diff --git a/Documentation/core-api/printk-formats.rst b/Documentation/core-api/printk-formats.rst
index ecccc0473da9c1..6de6b0e6abf3a0 100644
--- a/Documentation/core-api/printk-formats.rst
+++ b/Documentation/core-api/printk-formats.rst
@@ -648,6 +648,38 @@ Examples::
 	%p4cc	Y10  little-endian (0x20303159)
 	%p4cc	NV12 big-endian (0xb231564e)
 
+Generic FourCC code
+-------------------
+
+::
+	%p4c[hnbl]	gP00 (0x67503030)
+
+Print a generic FourCC code, as both ASCII characters and its numerical
+value as hexadecimal.
+
+The additional ``h``, ``r``, ``b``, and ``l`` specifiers are used to specify
+host, reversed, big or little endian order data respectively. Host endian
+order means the data is interpreted as a 32-bit integer and the most
+significant byte is printed first; that is, the character code as printed
+matches the byte order stored in memory on big-endian systems, and is reversed
+on little-endian systems.
+
+Passed by reference.
+
+Examples for a little-endian machine, given &(u32)0x67503030::
+
+	%p4ch	gP00 (0x67503030)
+	%p4cl	gP00 (0x67503030)
+	%p4cb	00Pg (0x30305067)
+	%p4cr	00Pg (0x30305067)
+
+Examples for a big-endian machine, given &(u32)0x67503030::
+
+	%p4ch	gP00 (0x67503030)
+	%p4cl	00Pg (0x30305067)
+	%p4cb	gP00 (0x67503030)
+	%p4cr	00Pg (0x30305067)
+
 Rust
 ----
 
diff --git a/Documentation/devicetree/bindings/arm/apple.yaml b/Documentation/devicetree/bindings/arm/apple.yaml
index dc9aab19ff11d5..da60e9de1cfbd0 100644
--- a/Documentation/devicetree/bindings/arm/apple.yaml
+++ b/Documentation/devicetree/bindings/arm/apple.yaml
@@ -57,6 +57,25 @@ description: |
   - iPad Pro (2nd Generation) (10.5 Inch)
   - iPad Pro (2nd Generation) (12.9 Inch)
 
+  Devices based on the "T2" SoC:
+
+  - Apple T2 MacBookPro15,2 (j132)
+  - Apple T2 iMacPro1,1 (j137)
+  - Apple T2 MacBookAir8,2 (j140a)
+  - Apple T2 MacBookAir8,1 (j140k)
+  - Apple T2 MacBookPro16,1 (j152f)
+  - Apple T2 MacPro7,1 (j160)
+  - Apple T2 Macmini8,1 (j174)
+  - Apple T2 iMac20,1 (j185)
+  - Apple T2 iMac20,2 (j185f)
+  - Apple T2 MacBookPro15,4 (j213)
+  - Apple T2 MacBookPro16,2 (j214k)
+  - Apple T2 MacBookPro16,4 (j215)
+  - Apple T2 MacBookPro16,3 (j223)
+  - Apple T2 MacBookAir9,1 (j230k)
+  - Apple T2 MacBookPro15,1 (j680)
+  - Apple T2 MacBookPro15,3 (j780)
+
   Devices based on the "A11" SoC:
 
   - iPhone 8
@@ -211,6 +230,28 @@ properties:
           - const: apple,t8011
           - const: apple,arm-platform
 
+      - description: Apple T2 SoC based platforms
+        items:
+          - enum:
+              - apple,j132  # Apple T2 MacBookPro15,2 (j132)
+              - apple,j137  # Apple T2 iMacPro1,1 (j137)
+              - apple,j140a # Apple T2 MacBookAir8,2 (j140a)
+              - apple,j140k # Apple T2 MacBookAir8,1 (j140k)
+              - apple,j152f # Apple T2 MacBookPro16,1 (j152f)
+              - apple,j160  # Apple T2 MacPro7,1 (j160)
+              - apple,j174  # Apple T2 Macmini8,1 (j174)
+              - apple,j185  # Apple T2 iMac20,1 (j185)
+              - apple,j185f # Apple T2 iMac20,2 (j185f)
+              - apple,j213  # Apple T2 MacBookPro15,4 (j213)
+              - apple,j214k # Apple T2 MacBookPro16,2 (j214k)
+              - apple,j215  # Apple T2 MacBookPro16,4 (j215)
+              - apple,j223  # Apple T2 MacBookPro16,3 (j223)
+              - apple,j230k # Apple T2 MacBookAir9,1 (j230k)
+              - apple,j680  # Apple T2 MacBookPro15,1 (j680)
+              - apple,j780  # Apple T2 MacBookPro15,3 (j780)
+          - const: apple,t8012
+          - const: apple,arm-platform
+
       - description: Apple A11 SoC based platforms
         items:
           - enum:
diff --git a/Documentation/devicetree/bindings/arm/apple/apple,pmgr.yaml b/Documentation/devicetree/bindings/arm/apple/apple,pmgr.yaml
index 673277a7a22440..5001f4d5a0dc17 100644
--- a/Documentation/devicetree/bindings/arm/apple/apple,pmgr.yaml
+++ b/Documentation/devicetree/bindings/arm/apple/apple,pmgr.yaml
@@ -22,6 +22,11 @@ properties:
   compatible:
     items:
       - enum:
+          - apple,s5l8960x-pmgr
+          - apple,t7000-pmgr
+          - apple,s8000-pmgr
+          - apple,t8010-pmgr
+          - apple,t8015-pmgr
           - apple,t8103-pmgr
           - apple,t8112-pmgr
           - apple,t6000-pmgr
diff --git a/Documentation/devicetree/bindings/display/apple,h7-display-pipe-mipi.yaml b/Documentation/devicetree/bindings/display/apple,h7-display-pipe-mipi.yaml
new file mode 100644
index 00000000000000..5e6da66499a508
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/apple,h7-display-pipe-mipi.yaml
@@ -0,0 +1,83 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/apple,h7-display-pipe-mipi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Apple pre-DCP display controller MIPI interface
+
+maintainers:
+  - Sasha Finkelstein <fnkl.kernel@gmail.com>
+
+description:
+  The MIPI controller part of the pre-DCP Apple display controller
+
+allOf:
+  - $ref: dsi-controller.yaml#
+
+properties:
+  compatible:
+    items:
+      - enum:
+          - apple,t8112-display-pipe-mipi
+          - apple,t8103-display-pipe-mipi
+      - const: apple,h7-display-pipe-mipi
+
+  reg:
+    maxItems: 1
+
+  power-domains:
+    maxItems: 1
+
+  ports:
+    $ref: /schemas/graph.yaml#/properties/ports
+
+    properties:
+      port@0:
+        $ref: /schemas/graph.yaml#/properties/port
+        description: Input port. Always connected to the primary controller
+
+      port@1:
+        $ref: /schemas/graph.yaml#/properties/port
+        description: Output MIPI DSI port to the panel
+
+    required:
+      - port@0
+      - port@1
+
+required:
+  - compatible
+  - reg
+  - ports
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    dsi@28200000 {
+        compatible = "apple,t8103-display-pipe-mipi", "apple,h7-display-pipe-mipi";
+        reg = <0x28200000 0xc000>;
+        power-domains = <&ps_dispdfr_mipi>;
+
+        ports {
+            #address-cells = <1>;
+            #size-cells = <0>;
+
+            port@0 {
+                reg = <0>;
+
+                dfr_adp_out_mipi: endpoint {
+                    remote-endpoint = <&dfr_adp_out_mipi>;
+                };
+            };
+
+            port@1 {
+                reg = <1>;
+
+                dfr_panel_in: endpoint {
+                    remote-endpoint = <&dfr_mipi_out_panel>;
+                };
+            };
+        };
+    };
+...
diff --git a/Documentation/devicetree/bindings/display/apple,h7-display-pipe.yaml b/Documentation/devicetree/bindings/display/apple,h7-display-pipe.yaml
new file mode 100644
index 00000000000000..102fb1804c0c0b
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/apple,h7-display-pipe.yaml
@@ -0,0 +1,88 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/apple,h7-display-pipe.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Apple pre-DCP display controller
+
+maintainers:
+  - Sasha Finkelstein <fnkl.kernel@gmail.com>
+
+description:
+  A secondary display controller used to drive the "touchbar" on
+  certain Apple laptops.
+
+properties:
+  compatible:
+    items:
+      - enum:
+          - apple,t8112-display-pipe
+          - apple,t8103-display-pipe
+      - const: apple,h7-display-pipe
+
+  reg:
+    items:
+      - description: Primary register block, controls planes and blending
+      - description:
+          Contains other configuration registers like interrupt
+          and FIFO control
+
+  reg-names:
+    items:
+      - const: be
+      - const: fe
+
+  power-domains:
+    description:
+      Phandles to pmgr entries that are needed for this controller to turn on.
+      Aside from that, their specific functions are unknown
+    maxItems: 2
+
+  interrupts:
+    items:
+      - description: Unknown function
+      - description: Primary interrupt. Vsync events are reported via it
+
+  interrupt-names:
+    items:
+      - const: be
+      - const: fe
+
+  iommus:
+    maxItems: 1
+
+  port:
+    $ref: /schemas/graph.yaml#/properties/port
+    description: Output port. Always connected to apple,h7-display-pipe-mipi
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - port
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/apple-aic.h>
+    display-pipe@28200000 {
+        compatible = "apple,t8103-display-pipe", "apple,h7-display-pipe";
+        reg = <0x28200000 0xc000>,
+              <0x28400000 0x4000>;
+        reg-names = "be", "fe";
+        power-domains = <&ps_dispdfr_fe>, <&ps_dispdfr_be>;
+        interrupt-parent = <&aic>;
+        interrupts = <AIC_IRQ 502 IRQ_TYPE_LEVEL_HIGH>,
+                     <AIC_IRQ 506 IRQ_TYPE_LEVEL_HIGH>;
+        interrupt-names = "be", "fe";
+        iommus = <&displaydfr_dart 0>;
+
+        port {
+            dfr_adp_out_mipi: endpoint {
+                remote-endpoint = <&dfr_mipi_in_adp>;
+            };
+        };
+    };
+...
diff --git a/Documentation/devicetree/bindings/display/panel/apple,summit.yaml b/Documentation/devicetree/bindings/display/panel/apple,summit.yaml
new file mode 100644
index 00000000000000..f081755325e97a
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/apple,summit.yaml
@@ -0,0 +1,58 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/apple,summit.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Apple "Summit" display panel
+
+maintainers:
+  - Sasha Finkelstein <fnkl.kernel@gmail.com>
+
+description:
+  An OLED panel used as a touchbar on certain Apple laptops.
+  Contains a backlight device, which controls brightness of the panel itself.
+  The backlight common properties are included for this reason
+
+allOf:
+  - $ref: panel-common.yaml#
+  - $ref: /schemas/leds/backlight/common.yaml#
+
+properties:
+  compatible:
+    items:
+      - enum:
+          - apple,j293-summit
+          - apple,j493-summit
+      - const: apple,summit
+
+  reg:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+  - max-brightness
+  - port
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    dsi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        panel@0 {
+            compatible = "apple,j293-summit", "apple,summit";
+            reg = <0>;
+            max-brightness = <255>;
+
+            port {
+                endpoint {
+                    remote-endpoint = <&dfr_bridge_out>;
+                };
+            };
+        };
+    };
+...
diff --git a/Documentation/devicetree/bindings/display/simple-framebuffer.yaml b/Documentation/devicetree/bindings/display/simple-framebuffer.yaml
index 296500f9da05e2..0a901db31c5f2b 100644
--- a/Documentation/devicetree/bindings/display/simple-framebuffer.yaml
+++ b/Documentation/devicetree/bindings/display/simple-framebuffer.yaml
@@ -26,7 +26,8 @@ description: |+
   over control to a driver for the real hardware. The bindings for the
   hw nodes must specify which node is considered the primary node.
 
-  If a panel node is given, then the driver uses this to configure the
+  If a panel or panel-dimensions node is given, then the driver uses
+  this to configure the
   physical width and height of the display. If no panel node is given,
   then the driver uses the width and height properties of the simplefb
   node to estimate it.
@@ -126,6 +127,10 @@ properties:
     $ref: /schemas/types.yaml#/definitions/phandle
     description: Display panel node
 
+  panel-dimensions:
+    $ref: /schemas/types.yaml#/definitions/phandle
+    description: Display panel node (without enforcing probe order)
+
   allwinner,pipeline:
     description: Pipeline used by the framebuffer on Allwinner SoCs
     enum:
diff --git a/Documentation/devicetree/bindings/dma/apple,sio.yaml b/Documentation/devicetree/bindings/dma/apple,sio.yaml
new file mode 100644
index 00000000000000..0e3780ad9dd79a
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/apple,sio.yaml
@@ -0,0 +1,111 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/apple,sio.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Apple SIO Coprocessor
+
+description:
+  SIO is a coprocessor on Apple M1 and later chips (and maybe also on earlier
+  chips). Its role is to offload SPI, UART and DisplayPort audio transfers,
+  being a pretend DMA controller.
+
+maintainers:
+  - Martin Povišer <povik+lin@cutebit.org>
+
+allOf:
+  - $ref: dma-controller.yaml#
+
+properties:
+  compatible:
+    items:
+      - enum:
+          - apple,t6000-sio
+          - apple,t8103-sio
+      - const: apple,sio
+
+  reg:
+    maxItems: 1
+
+  '#dma-cells':
+    const: 1
+    description:
+      DMA clients specify a single cell that corresponds to the RTKit endpoint
+      number used for arranging the transfers in question
+
+  dma-channels:
+    maximum: 128
+
+  mboxes:
+    maxItems: 1
+
+  iommus:
+    maxItems: 1
+
+  power-domains:
+    maxItems: 1
+
+  memory-region:
+    minItems: 2
+    maxItems: 8
+    description:
+      A number of references to reserved memory regions among which are the DATA/TEXT
+      sections of coprocessor executable firmware and also auxiliary firmware data
+      describing the available DMA-enabled peripherals
+
+  apple,sio-firmware-params:
+    $ref: /schemas/types.yaml#/definitions/uint32-array
+    description: |
+      Parameters in the form of opaque key/value pairs that are to be sent to the SIO
+      coprocesssor once it boots. These parameters can point into the reserved memory
+      regions (in device address space).
+
+      Note that unlike Apple's firmware, we treat the parameters, and the data they
+      refer to, as opaque. Apple embed short data blobs into their SIO devicetree node
+      that describe the DMA-enabled peripherals (presumably with defined semantics).
+      Their driver processes those blobs and sets up data structure in mapped device
+      memory, then references this memory in the parameters sent to the SIO. At the
+      level of description we are opting for in this binding, we assume the job of
+      constructing those data structures has been done in advance, leaving behind an
+      opaque list of key/value parameter pairs to be sent by a prospective driver.
+
+      This approach is chosen for two reasons:
+
+       - It means we don't need to try to understand the semantics of Apple's blobs
+         as long as we know the transformation we need to do from Apple's devicetree
+         data to SIO data (which can be shoved away into a loader). It also means the
+         semantics of Apple's blobs (or of something to replace them) need not be part
+         of the binding and be kept up with Apple's firmware changes in the future.
+
+       - It leaves less work for the driver attaching on this binding. Instead the work
+         is done upfront in the loader which can be better suited for keeping up with
+         Apple's firmware changes.
+
+required:
+  - compatible
+  - reg
+  - '#dma-cells'
+  - dma-channels
+  - mboxes
+  - iommus
+  - power-domains
+
+additionalProperties: false
+
+examples:
+  - |
+    sio: dma-controller@36400000 {
+      compatible = "apple,t8103-sio", "apple,sio";
+      reg = <0x36400000 0x8000>;
+      dma-channels = <128>;
+      #dma-cells = <1>;
+      mboxes = <&sio_mbox>;
+      iommus = <&sio_dart 0>;
+      power-domains = <&ps_sio_cpu>;
+      memory-region = <&sio_text>, <&sio_data>,
+                      <&sio_auxdata1>, <&sio_auxdata2>; /* Filled by loader */
+      apple,sio-firmware-params = <0xb 0x10>, <0xc 0x1b80>, <0xf 0x14>,
+                                  <0x10 0x1e000>, <0x30d 0x34>, <0x30e 0x4000>,
+                                  <0x1a 0x38>, <0x1b 0x50>; /* Filled by loader */
+    };
diff --git a/Documentation/devicetree/bindings/input/touchscreen/apple,z2-multitouch.yaml b/Documentation/devicetree/bindings/input/touchscreen/apple,z2-multitouch.yaml
new file mode 100644
index 00000000000000..402ca6bffd3473
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/apple,z2-multitouch.yaml
@@ -0,0 +1,70 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/touchscreen/apple,z2-multitouch.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Apple touchscreens attached using the Z2 protocol
+
+maintainers:
+  - Sasha Finkelstein <fnkl.kernel@gmail.com>
+
+description: A series of touschscreen controllers used in Apple products
+
+allOf:
+  - $ref: touchscreen.yaml#
+  - $ref: /schemas/spi/spi-peripheral-props.yaml#
+
+properties:
+  compatible:
+    enum:
+      - apple,j293-touchbar
+      - apple,j493-touchbar
+
+  interrupts:
+    maxItems: 1
+
+  reset-gpios:
+    maxItems: 1
+
+  firmware-name:
+    maxItems: 1
+
+  apple,z2-cal-blob:
+    $ref: /schemas/types.yaml#/definitions/uint8-array
+    maxItems: 4096
+    description:
+      Calibration blob supplied by the bootloader
+
+required:
+  - compatible
+  - interrupts
+  - reset-gpios
+  - firmware-name
+  - touchscreen-size-x
+  - touchscreen-size-y
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+    #include <dt-bindings/interrupt-controller/irq.h>
+
+    spi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        touchscreen@0 {
+            compatible = "apple,j293-touchbar";
+            reg = <0>;
+            spi-max-frequency = <11500000>;
+            reset-gpios = <&pinctrl_ap 139 GPIO_ACTIVE_LOW>;
+            interrupts-extended = <&pinctrl_ap 194 IRQ_TYPE_EDGE_FALLING>;
+            firmware-name = "apple/dfrmtfw-j293.bin";
+            touchscreen-size-x = <23045>;
+            touchscreen-size-y = <640>;
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/pci/apple,pcie.yaml b/Documentation/devicetree/bindings/pci/apple,pcie.yaml
index c8775f9cb07133..6b9d0dcfd6094f 100644
--- a/Documentation/devicetree/bindings/pci/apple,pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/apple,pcie.yaml
@@ -17,6 +17,10 @@ description: |
   implements its root ports.  But the ATU found on most DesignWare
   PCIe host bridges is absent.
 
+  On systems derived from T602x, the PHY registers are in a region
+  separate from the port registers. In that case, there is one PHY
+  register range per port register range.
+
   All root ports share a single ECAM space, but separate GPIOs are
   used to take the PCI devices on those ports out of reset.  Therefore
   the standard "reset-gpios" and "max-link-speed" properties appear on
@@ -35,11 +39,12 @@ properties:
           - apple,t8103-pcie
           - apple,t8112-pcie
           - apple,t6000-pcie
+          - apple,t6020-pcie
       - const: apple,pcie
 
   reg:
     minItems: 3
-    maxItems: 6
+    maxItems: 10
 
   reg-names:
     minItems: 3
@@ -50,6 +55,10 @@ properties:
       - const: port1
       - const: port2
       - const: port3
+      - const: phy0
+      - const: phy1
+      - const: phy2
+      - const: phy3
 
   ranges:
     minItems: 2
@@ -72,6 +81,27 @@ properties:
   power-domains:
     maxItems: 1
 
+patternProperties:
+  "^pci@":
+    $ref: /schemas/pci/pci-bus.yaml#
+    type: object
+    description: A single PCI root port
+
+    properties:
+      reg:
+        maxItems: 1
+
+      pwren-gpios:
+        description: Optional GPIO to power on the device
+        maxItems: 1
+
+    required:
+      - reset-gpios
+      - interrupt-controller
+      - "#interrupt-cells"
+      - interrupt-map-mask
+      - interrupt-map
+
 required:
   - compatible
   - reg
@@ -142,7 +172,7 @@ examples:
         pinctrl-0 = <&pcie_pins>;
         pinctrl-names = "default";
 
-        pci@0,0 {
+        port00: pci@0,0 {
           device_type = "pci";
           reg = <0x0 0x0 0x0 0x0 0x0>;
           reset-gpios = <&pinctrl_ap 152 0>;
@@ -150,9 +180,17 @@ examples:
           #address-cells = <3>;
           #size-cells = <2>;
           ranges;
+
+          interrupt-controller;
+          #interrupt-cells = <1>;
+          interrupt-map-mask = <0 0 0 7>;
+          interrupt-map = <0 0 0 1 &port00 0 0 0 0>,
+                          <0 0 0 2 &port00 0 0 0 1>,
+                          <0 0 0 3 &port00 0 0 0 2>,
+                          <0 0 0 4 &port00 0 0 0 3>;
         };
 
-        pci@1,0 {
+        port01: pci@1,0 {
           device_type = "pci";
           reg = <0x800 0x0 0x0 0x0 0x0>;
           reset-gpios = <&pinctrl_ap 153 0>;
@@ -160,9 +198,17 @@ examples:
           #address-cells = <3>;
           #size-cells = <2>;
           ranges;
+
+          interrupt-controller;
+          #interrupt-cells = <1>;
+          interrupt-map-mask = <0 0 0 7>;
+          interrupt-map = <0 0 0 1 &port01 0 0 0 0>,
+                          <0 0 0 2 &port01 0 0 0 1>,
+                          <0 0 0 3 &port01 0 0 0 2>,
+                          <0 0 0 4 &port01 0 0 0 3>;
         };
 
-        pci@2,0 {
+        port02: pci@2,0 {
           device_type = "pci";
           reg = <0x1000 0x0 0x0 0x0 0x0>;
           reset-gpios = <&pinctrl_ap 33 0>;
@@ -170,6 +216,14 @@ examples:
           #address-cells = <3>;
           #size-cells = <2>;
           ranges;
+
+          interrupt-controller;
+          #interrupt-cells = <1>;
+          interrupt-map-mask = <0 0 0 7>;
+          interrupt-map = <0 0 0 1 &port02 0 0 0 0>,
+                          <0 0 0 2 &port02 0 0 0 1>,
+                          <0 0 0 3 &port02 0 0 0 2>,
+                          <0 0 0 4 &port02 0 0 0 3>;
         };
       };
     };
diff --git a/Documentation/devicetree/bindings/power/apple,pmgr-pwrstate.yaml b/Documentation/devicetree/bindings/power/apple,pmgr-pwrstate.yaml
index 59a6af735a2167..38b02ca8b46319 100644
--- a/Documentation/devicetree/bindings/power/apple,pmgr-pwrstate.yaml
+++ b/Documentation/devicetree/bindings/power/apple,pmgr-pwrstate.yaml
@@ -31,6 +31,11 @@ properties:
   compatible:
     items:
       - enum:
+          - apple,s5l8960x-pmgr-pwrstate
+          - apple,t7000-pmgr-pwrstate
+          - apple,s8000-pmgr-pwrstate
+          - apple,t8010-pmgr-pwrstate
+          - apple,t8015-pmgr-pwrstate
           - apple,t8103-pmgr-pwrstate
           - apple,t8112-pmgr-pwrstate
           - apple,t6000-pmgr-pwrstate
@@ -70,6 +75,18 @@ properties:
     minimum: 0
     maximum: 15
 
+  apple,force-disable:
+    description:
+      Forces this device to be disabled (bus access blocked) when the power
+      domain is powered down.
+    type: boolean
+
+  apple,force-reset:
+    description:
+      Forces a reset/error recovery of the power control logic when the power
+      domain is powered down.
+    type: boolean
+
 required:
   - compatible
   - reg
diff --git a/Documentation/devicetree/bindings/sound/apple,macaudio.yaml b/Documentation/devicetree/bindings/sound/apple,macaudio.yaml
new file mode 100644
index 00000000000000..8fe22dec3015d6
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/apple,macaudio.yaml
@@ -0,0 +1,162 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/apple,macaudio.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Apple Silicon Macs integrated sound peripherals
+
+description:
+  This binding represents the overall machine-level integration of sound
+  peripherals on 'Apple Silicon' machines by Apple.
+
+maintainers:
+  - Martin Povišer <povik+lin@cutebit.org>
+
+properties:
+  compatible:
+    items:
+      - enum:
+          - apple,j274-macaudio
+          - apple,j293-macaudio
+          - apple,j314-macaudio
+      - const: apple,macaudio
+
+  "#address-cells":
+    const: 1
+
+  "#size-cells":
+    const: 0
+
+  model:
+    description:
+      Model name for presentation to users
+    $ref: /schemas/types.yaml#/definitions/string
+
+patternProperties:
+  "^dai-link(@[0-9a-f]+)?$":
+    description: |
+      Node for each sound peripheral such as the speaker array, headphones jack,
+      or microphone.
+    type: object
+
+    additionalProperties: false
+
+    properties:
+      reg:
+        maxItems: 1
+
+      link-name:
+        description: |
+          Name for the peripheral, expecting 'Speaker' or 'Speakers' if this is
+          the speaker array.
+        $ref: /schemas/types.yaml#/definitions/string
+
+      cpu:
+        type: object
+
+        properties:
+          sound-dai:
+            description: |
+              DAI list with CPU-side I2S ports involved in this peripheral.
+            minItems: 1
+            maxItems: 2
+
+        required:
+          - sound-dai
+
+      codec:
+        type: object
+
+        properties:
+          sound-dai:
+            minItems: 1
+            maxItems: 8
+            description: |
+              DAI list with the CODEC-side DAIs connected to the above CPU-side
+              DAIs and involved in this sound peripheral.
+
+              The list is in left/right order if applicable. If there are more
+              than one CPU-side DAIs (there can be two), the CODECs must be
+              listed first those connected to the first CPU, then those
+              connected to the second.
+
+              In addition, on some machines with many speaker codecs, the CODECs
+              are listed in this fixed order:
+
+              J293: Left Front, Left Rear, Right Front, Right Rear
+              J314: Left Woofer 1, Left Tweeter, Left Woofer 2,
+                    Right Woofer 1, Right Tweeter, Right Woofer 2
+
+        required:
+          - sound-dai
+
+    required:
+      - reg
+      - cpu
+      - codec
+
+required:
+  - compatible
+  - model
+
+additionalProperties: false
+
+examples:
+  - |
+    mca: mca@9b600000 {
+      compatible = "apple,t6000-mca", "apple,mca";
+      reg = <0x9b600000 0x10000>,
+            <0x9b500000 0x20000>;
+
+      clocks = <&nco 0>, <&nco 1>, <&nco 2>, <&nco 3>;
+      power-domains = <&ps_audio_p>, <&ps_mca0>, <&ps_mca1>,
+                      <&ps_mca2>, <&ps_mca3>;
+      dmas = <&admac 0>, <&admac 1>, <&admac 2>, <&admac 3>,
+             <&admac 4>, <&admac 5>, <&admac 6>, <&admac 7>,
+             <&admac 8>, <&admac 9>, <&admac 10>, <&admac 11>,
+             <&admac 12>, <&admac 13>, <&admac 14>, <&admac 15>;
+      dma-names = "tx0a", "rx0a", "tx0b", "rx0b",
+                  "tx1a", "rx1a", "tx1b", "rx1b",
+                  "tx2a", "rx2a", "tx2b", "rx2b",
+                  "tx3a", "rx3a", "tx3b", "rx3b";
+
+      #sound-dai-cells = <1>;
+    };
+
+    sound {
+      compatible = "apple,j314-macaudio", "apple,macaudio";
+      model = "MacBook Pro J314 integrated audio";
+
+      #address-cells = <1>;
+      #size-cells = <0>;
+
+      dai-link@0 {
+        reg = <0>;
+        link-name = "Speakers";
+
+        cpu {
+          sound-dai = <&mca 0>, <&mca 1>;
+        };
+        codec {
+          sound-dai = <&speaker_left_woof1>,
+                      <&speaker_left_tweet>,
+                      <&speaker_left_woof2>,
+                      <&speaker_right_woof1>,
+                      <&speaker_right_tweet>,
+                      <&speaker_right_woof2>;
+        };
+      };
+
+      dai-link@1 {
+        reg = <1>;
+        link-name = "Headphones Jack";
+
+        cpu {
+          sound-dai = <&mca 2>;
+        };
+        codec {
+          sound-dai = <&jack_codec>;
+        };
+      };
+    };
diff --git a/Documentation/devicetree/bindings/usb/apple,dwc3.yaml b/Documentation/devicetree/bindings/usb/apple,dwc3.yaml
new file mode 100644
index 00000000000000..fb3b3489e6b263
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/apple,dwc3.yaml
@@ -0,0 +1,64 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/apple,dwc3.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Apple Silicon DWC3 USB controller
+
+maintainers:
+  - Sven Peter <sven@svenpeter.dev>
+
+description:
+  On Apple Silicon SoCs such as the M1 each Type-C port has a corresponding
+  USB controller based on the Synopsys DesignWare USB3 controller.
+
+  The common content of this binding is defined in snps,dwc3.yaml.
+
+allOf:
+  - $ref: snps,dwc3.yaml#
+
+select:
+  properties:
+    compatible:
+      contains:
+        const: apple,dwc3
+  required:
+    - compatible
+
+properties:
+  compatible:
+    items:
+      - enum:
+          - apple,t8103-dwc3
+          - apple,t6000-dwc3
+      - const: apple,dwc3
+      - const: snps,dwc3
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+unevaluatedProperties: false
+
+required:
+  - compatible
+  - reg
+  - interrupts
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/apple-aic.h>
+    #include <dt-bindings/interrupt-controller/irq.h>
+
+    usb@82280000 {
+      compatible = "apple,t8103-dwc3", "apple,dwc3", "snps,dwc3";
+      reg = <0x82280000 0x10000>;
+      interrupts = <AIC_IRQ 777 IRQ_TYPE_LEVEL_HIGH>;
+
+      dr_mode = "otg";
+      usb-role-switch;
+      role-switch-default-mode = "host";
+    };
diff --git a/MAINTAINERS b/MAINTAINERS
index c0d5232a473b8a..984c9fd848f473 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1702,6 +1702,13 @@ L:	linux-input@vger.kernel.org
 S:	Odd fixes
 F:	drivers/input/mouse/bcm5974.c
 
+APPLE DRM DISPLAY DRIVER
+M:	Janne Grunau <j@jannau.net>
+L:	dri-devel@lists.freedesktop.org
+S:	Maintained
+T:	git git://anongit.freedesktop.org/drm/drm-misc
+F:	drivers/gpu/drm/apple/
+
 APPLE PCIE CONTROLLER DRIVER
 M:	Alyssa Rosenzweig <alyssa@rosenzweig.io>
 M:	Marc Zyngier <maz@kernel.org>
@@ -2201,9 +2208,11 @@ M:	Martin Povišer <povik+lin@cutebit.org>
 L:	asahi@lists.linux.dev
 L:	linux-sound@vger.kernel.org
 S:	Maintained
+F:	Documentation/devicetree/bindings/dma/apple,sio.yaml
 F:	Documentation/devicetree/bindings/sound/adi,ssm3515.yaml
 F:	Documentation/devicetree/bindings/sound/cirrus,cs42l84.yaml
 F:	Documentation/devicetree/bindings/sound/apple,*
+F:	drivers/dma/apple-sio.c
 F:	sound/soc/apple/*
 F:	sound/soc/codecs/cs42l83-i2c.c
 F:	sound/soc/codecs/cs42l84.*
@@ -2227,6 +2236,7 @@ F:	Documentation/devicetree/bindings/clock/apple,nco.yaml
 F:	Documentation/devicetree/bindings/cpufreq/apple,cluster-cpufreq.yaml
 F:	Documentation/devicetree/bindings/dma/apple,admac.yaml
 F:	Documentation/devicetree/bindings/i2c/apple,i2c.yaml
+F:	Documentation/devicetree/bindings/input/touchscreen/apple,z2-multitouch.yaml
 F:	Documentation/devicetree/bindings/interrupt-controller/apple,*
 F:	Documentation/devicetree/bindings/iommu/apple,dart.yaml
 F:	Documentation/devicetree/bindings/iommu/apple,sart.yaml
@@ -2239,6 +2249,7 @@ F:	Documentation/devicetree/bindings/pinctrl/apple,pinctrl.yaml
 F:	Documentation/devicetree/bindings/power/apple*
 F:	Documentation/devicetree/bindings/pwm/apple,s5l-fpwm.yaml
 F:	Documentation/devicetree/bindings/spi/apple,spi.yaml
+F:	Documentation/devicetree/bindings/usb/apple,dwc3.yaml
 F:	Documentation/devicetree/bindings/watchdog/apple,wdt.yaml
 F:	arch/arm64/boot/dts/apple/
 F:	drivers/bluetooth/hci_bcm4377.c
@@ -2248,6 +2259,7 @@ F:	drivers/dma/apple-admac.c
 F:	drivers/pmdomain/apple/
 F:	drivers/i2c/busses/i2c-pasemi-core.c
 F:	drivers/i2c/busses/i2c-pasemi-platform.c
+F:	drivers/input/touchscreen/apple_z2.c
 F:	drivers/iommu/apple-dart.c
 F:	drivers/iommu/io-pgtable-dart.c
 F:	drivers/irqchip/irq-apple-aic.c
@@ -2262,6 +2274,15 @@ F:	include/dt-bindings/interrupt-controller/apple-aic.h
 F:	include/dt-bindings/pinctrl/apple.h
 F:	include/linux/soc/apple/*
 
+ARM/APPLE SMC HWMON DRIVER
+M:	James Calligeros <jcalligeros99@gmail.com>
+L:	asahi@lists.linux.dev
+S:	Maintained
+W:	https://asahilinux.org
+B:	https://github.com/AsahiLinux/linux/issues
+C:	irc://irc.oftc.net/asahi-dev
+F:	drivers/hwmon/macsmc-hwmon.c
+
 ARM/ARTPEC MACHINE SUPPORT
 M:	Jesper Nilsson <jesper.nilsson@axis.com>
 M:	Lars Persson <lars.persson@axis.com>
@@ -6907,6 +6928,19 @@ F:	include/linux/dma-mapping.h
 F:	include/linux/swiotlb.h
 F:	kernel/dma/
 
+DMA MAPPING HELPERS DEVICE DRIVER API [RUST]
+M:	Abdiel Janulgue <abdiel.janulgue@gmail.com>
+M:	Danilo Krummrich <dakr@kernel.org>
+R:	Daniel Almeida <daniel.almeida@collabora.com>
+R:	Robin Murphy <robin.murphy@arm.com>
+R:	Andreas Hindborg <a.hindborg@kernel.org>
+L:	rust-for-linux@vger.kernel.org
+S:	Supported
+W:	https://rust-for-linux.com
+T:	git https://github.com/Rust-for-Linux/linux.git alloc-next
+F:	rust/kernel/dma.rs
+F:	samples/rust/rust_dma.rs
+
 DMA-BUF HEAPS FRAMEWORK
 M:	Sumit Semwal <sumit.semwal@linaro.org>
 R:	Benjamin Gaignard <benjamin.gaignard@collabora.com>
@@ -7844,6 +7878,22 @@ F:	drivers/gpu/host1x/
 F:	include/linux/host1x.h
 F:	include/uapi/drm/tegra_drm.h
 
+DRM DRIVERS FOR PRE-DCP APPLE DISPLAY OUTPUT
+M:	Sasha Finkelstein <fnkl.kernel@gmail.com>
+R:	Janne Grunau <j@jannau.net>
+L:	dri-devel@lists.freedesktop.org
+L:	asahi@lists.linux.dev
+S:	Maintained
+W:	https://asahilinux.org
+B:	https://github.com/AsahiLinux/linux/issues
+C:	irc://irc.oftc.net/asahi-dev
+T:	git https://gitlab.freedesktop.org/drm/misc/kernel.git
+F:	Documentation/devicetree/bindings/display/apple,h7-display-pipe-mipi.yaml
+F:	Documentation/devicetree/bindings/display/apple,h7-display-pipe.yaml
+F:	Documentation/devicetree/bindings/display/panel/apple,summit.yaml
+F:	drivers/gpu/drm/adp/
+F:	drivers/gpu/drm/panel/panel-summit.c
+
 DRM DRIVERS FOR RENESAS R-CAR
 M:	Laurent Pinchart <laurent.pinchart@ideasonboard.com>
 M:	Kieran Bingham <kieran.bingham+renesas@ideasonboard.com>
@@ -15992,6 +16042,8 @@ F:	include/linux/kmod.h
 F:	include/linux/module*.h
 F:	kernel/module/
 F:	lib/test_kmod.c
+F:	rust/kernel/module_param.rs
+F:	rust/macros/module.rs
 F:	scripts/module*
 F:	tools/testing/selftests/kmod/
 
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 3e7483ad5276c3..e0e81f27143612 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -435,6 +435,9 @@ config KASAN_SHADOW_OFFSET
 config UNWIND_TABLES
 	bool
 
+config ARM64_ACTLR_STATE
+	bool
+
 source "arch/arm64/Kconfig.platforms"
 
 menu "Kernel Features"
@@ -2314,6 +2317,17 @@ config ARM64_DEBUG_PRIORITY_MASKING
 	  If unsure, say N
 endif # ARM64_PSEUDO_NMI
 
+config ARM64_MEMORY_MODEL_CONTROL
+	bool "Runtime memory model control"
+	default ARCH_APPLE
+	select ARM64_ACTLR_STATE
+	help
+	  Some ARM64 CPUs support runtime switching of the CPU memory
+	  model, which can be useful to emulate other CPU architectures
+	  which have different memory models. Say Y to enable support
+	  for the PR_SET_MEM_MODEL/PR_GET_MEM_MODEL prctl() calls on
+	  CPUs with this feature.
+
 config RELOCATABLE
 	bool "Build a relocatable kernel image" if EXPERT
 	select ARCH_HAS_RELR
diff --git a/arch/arm64/boot/dts/apple/Makefile b/arch/arm64/boot/dts/apple/Makefile
index ab6ebb53218ad9..913857d6662505 100644
--- a/arch/arm64/boot/dts/apple/Makefile
+++ b/arch/arm64/boot/dts/apple/Makefile
@@ -46,6 +46,22 @@ dtb-$(CONFIG_ARCH_APPLE) += t8011-j120.dtb
 dtb-$(CONFIG_ARCH_APPLE) += t8011-j121.dtb
 dtb-$(CONFIG_ARCH_APPLE) += t8011-j207.dtb
 dtb-$(CONFIG_ARCH_APPLE) += t8011-j208.dtb
+dtb-$(CONFIG_ARCH_APPLE) += t8012-j132.dtb
+dtb-$(CONFIG_ARCH_APPLE) += t8012-j137.dtb
+dtb-$(CONFIG_ARCH_APPLE) += t8012-j140a.dtb
+dtb-$(CONFIG_ARCH_APPLE) += t8012-j140k.dtb
+dtb-$(CONFIG_ARCH_APPLE) += t8012-j152f.dtb
+dtb-$(CONFIG_ARCH_APPLE) += t8012-j160.dtb
+dtb-$(CONFIG_ARCH_APPLE) += t8012-j174.dtb
+dtb-$(CONFIG_ARCH_APPLE) += t8012-j185.dtb
+dtb-$(CONFIG_ARCH_APPLE) += t8012-j185f.dtb
+dtb-$(CONFIG_ARCH_APPLE) += t8012-j213.dtb
+dtb-$(CONFIG_ARCH_APPLE) += t8012-j215.dtb
+dtb-$(CONFIG_ARCH_APPLE) += t8012-j223.dtb
+dtb-$(CONFIG_ARCH_APPLE) += t8012-j230k.dtb
+dtb-$(CONFIG_ARCH_APPLE) += t8012-j214k.dtb
+dtb-$(CONFIG_ARCH_APPLE) += t8012-j680.dtb
+dtb-$(CONFIG_ARCH_APPLE) += t8012-j780.dtb
 dtb-$(CONFIG_ARCH_APPLE) += t8015-d201.dtb
 dtb-$(CONFIG_ARCH_APPLE) += t8015-d20.dtb
 dtb-$(CONFIG_ARCH_APPLE) += t8015-d211.dtb
@@ -64,5 +80,14 @@ dtb-$(CONFIG_ARCH_APPLE) += t6001-j316c.dtb
 dtb-$(CONFIG_ARCH_APPLE) += t6001-j375c.dtb
 dtb-$(CONFIG_ARCH_APPLE) += t6002-j375d.dtb
 dtb-$(CONFIG_ARCH_APPLE) += t8112-j413.dtb
+dtb-$(CONFIG_ARCH_APPLE) += t8112-j415.dtb
 dtb-$(CONFIG_ARCH_APPLE) += t8112-j473.dtb
 dtb-$(CONFIG_ARCH_APPLE) += t8112-j493.dtb
+dtb-$(CONFIG_ARCH_APPLE) += t6020-j414s.dtb
+dtb-$(CONFIG_ARCH_APPLE) += t6021-j414c.dtb
+dtb-$(CONFIG_ARCH_APPLE) += t6020-j416s.dtb
+dtb-$(CONFIG_ARCH_APPLE) += t6021-j416c.dtb
+dtb-$(CONFIG_ARCH_APPLE) += t6020-j474s.dtb
+dtb-$(CONFIG_ARCH_APPLE) += t6021-j475c.dtb
+dtb-$(CONFIG_ARCH_APPLE) += t6022-j475d.dtb
+dtb-$(CONFIG_ARCH_APPLE) += t6022-j180d.dtb
diff --git a/arch/arm64/boot/dts/apple/hwmon-common.dtsi b/arch/arm64/boot/dts/apple/hwmon-common.dtsi
new file mode 100644
index 00000000000000..1f9a2435e14cb7
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/hwmon-common.dtsi
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * hwmon sensors expected on all systems
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+
+&smc {
+	hwmon {
+		apple,power-keys {
+			power-PSTR {
+				apple,key-id = "PSTR";
+				label = "Total System Power";
+			};
+			power-PDTR {
+				apple,key-id = "PDTR";
+				label = "AC Input Power";
+			};
+			power-PMVR {
+				apple,key-id = "PMVR";
+				label = "3.8 V Rail Power";
+			};
+		};
+		apple,temp-keys {
+			temp-TH0x {
+				apple,key-id = "TH0x";
+				label = "NAND Flash Temperature";
+			};
+		};
+		apple,volt-keys {
+			volt-VD0R {
+				apple,key-id = "VD0R";
+				label = "AC Input Voltage";
+			};
+		};
+		apple,current-keys {
+			current-ID0R {
+				apple,key-id = "ID0R";
+				label = "AC Input Current";
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/apple/hwmon-fan-dual.dtsi b/arch/arm64/boot/dts/apple/hwmon-fan-dual.dtsi
new file mode 100644
index 00000000000000..782b6051a3866e
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/hwmon-fan-dual.dtsi
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Copyright The Asahi Linux Contributors
+ *
+ * Fan hwmon sensors for machines with 2 fan.
+ */
+
+#include "hwmon-fan.dtsi"
+
+&smc {
+	hwmon {
+		apple,fan-keys {
+			fan-F0Ac {
+				label = "Fan 1";
+			};
+			fan-F1Ac {
+				apple,key-id = "F1Ac";
+				label = "Fan 2";
+				apple,fan-minimum = "F1Mn";
+				apple,fan-maximum = "F1Mx";
+				apple,fan-target = "F1Tg";
+				apple,fan-mode = "F1Md";
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/apple/hwmon-fan.dtsi b/arch/arm64/boot/dts/apple/hwmon-fan.dtsi
new file mode 100644
index 00000000000000..8f329ac4ff9fef
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/hwmon-fan.dtsi
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Copyright The Asahi Linux Contributors
+ *
+ * Fan hwmon sensors for machines with a single fan.
+ */
+
+&smc {
+	hwmon {
+		apple,fan-keys {
+			fan-F0Ac {
+				apple,key-id = "F0Ac";
+				label = "Fan";
+				apple,fan-minimum = "F0Mn";
+				apple,fan-maximum = "F0Mx";
+				apple,fan-target = "F0Tg";
+				apple,fan-mode = "F0Md";
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/apple/hwmon-laptop.dtsi b/arch/arm64/boot/dts/apple/hwmon-laptop.dtsi
new file mode 100644
index 00000000000000..2583ef379dfac9
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/hwmon-laptop.dtsi
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * hwmon sensors expected on all laptops
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+
+&smc {
+	hwmon {
+		apple,power-keys {
+			power-PHPC {
+				apple,key-id = "PHPC";
+				label = "Heatpipe Power";
+			};
+		};
+		apple,temp-keys {
+			temp-TB0T {
+				apple,key-id = "TB0T";
+				label = "Battery Hotspot";
+			};
+			temp-TCHP {
+				apple,key-id = "TCHP";
+				label = "Charge Regulator Temp";
+			};
+			temp-TW0P {
+				apple,key-id = "TW0P";
+				label = "WiFi/BT Module Temp";
+			};
+		};
+		apple,volt-keys {
+			volt-SBAV {
+				apple,key-id = "SBAV";
+				label = "Battery Voltage";
+			};
+			volt-VD0R {
+				apple,key-id = "VD0R";
+				label = "Charger Input Voltage";
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/apple/hwmon-mini.dtsi b/arch/arm64/boot/dts/apple/hwmon-mini.dtsi
new file mode 100644
index 00000000000000..bd0c22786d4226
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/hwmon-mini.dtsi
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * hwmon sensors common to the Mac mini desktop
+ * models, but not the Studio or Pro.
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+
+#include "hwmon-fan.dtsi"
+
+&smc {
+	hwmon {
+		apple,temp-keys {
+			temp-TW0P {
+				apple,key-id = "TW0P";
+				label = "WiFi/BT Module Temp";
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/apple/isp-common.dtsi b/arch/arm64/boot/dts/apple/isp-common.dtsi
new file mode 100644
index 00000000000000..739e6e9e66e740
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/isp-common.dtsi
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Common ISP configuration for Apple silicon platforms.
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+
+/ {
+	aliases {
+		isp = &isp;
+	};
+
+	reserved-memory {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		isp_heap: isp-heap {
+			compatible = "apple,asc-mem";
+			/* Filled in by bootloder */
+			reg = <0 0 0 0>;
+			no-map;
+		};
+	};
+};
+
+&isp {
+	memory-region = <&isp_heap>;
+	memory-region-names = "heap";
+	status = "okay";
+};
+
+&isp_dart0 {
+	status = "okay";
+};
+
+&isp_dart1 {
+	status = "okay";
+};
+
+&isp_dart2 {
+	status = "okay";
+};
+
+&ps_isp_sys {
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/apple/isp-imx248.dtsi b/arch/arm64/boot/dts/apple/isp-imx248.dtsi
new file mode 100644
index 00000000000000..0a4ac1a0152c2c
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/isp-imx248.dtsi
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * ISP configuration for platforms with IMX248 sensor.
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+
+#include "isp-common.dtsi"
+
+&isp {
+	apple,temporal-filter = <0>;
+
+	sensor-presets {
+		/* 1280x720 */
+		preset0 {
+			apple,config-index = <0>;
+			apple,input-size = <1296 736>;
+			apple,output-size = <1280 720>;
+			apple,crop = <8 8 1280 720>;
+		};
+		/* 960x720 (4:3) */
+		preset1 {
+			apple,config-index = <0>;
+			apple,input-size = <1296 736>;
+			apple,output-size = <960 720>;
+			apple,crop = <168 8 960 720>;
+		};
+		/* 960x540 (16:9) */
+		preset2 {
+			apple,config-index = <0>;
+			apple,input-size = <1296 736>;
+			apple,output-size = <960 540>;
+			apple,crop = <8 8 1280 720>;
+		};
+		/* 640x480 (4:3) */
+		preset3 {
+			apple,config-index = <0>;
+			apple,input-size = <1296 736>;
+			apple,output-size = <640 480>;
+			apple,crop = <168 8 960 720>;
+		};
+		/* 640x360 (16:9) */
+		preset4 {
+			apple,config-index = <0>;
+			apple,input-size = <1296 736>;
+			apple,output-size = <640 360>;
+			apple,crop = <8 8 1280 720>;
+		};
+		/* 320x180 (16:9) */
+		preset5 {
+			apple,config-index = <0>;
+			apple,input-size = <1296 736>;
+			apple,output-size = <320 180>;
+			apple,crop = <8 8 1280 720>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/apple/isp-imx364.dtsi b/arch/arm64/boot/dts/apple/isp-imx364.dtsi
new file mode 100644
index 00000000000000..55484d86523657
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/isp-imx364.dtsi
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * ISP configuration for platforms with IMX364 sensor.
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+
+#include "isp-common.dtsi"
+
+&isp {
+	apple,temporal-filter = <0>;
+
+	sensor-presets {
+		/* 1920x1080 */
+		preset0 {
+			apple,config-index = <0>;
+			apple,input-size = <1920 1080>;
+			apple,output-size = <1920 1080>;
+			apple,crop = <0 0 1920 1080>;
+		};
+		/* 1440x720 (4:3) */
+		preset1 {
+			apple,config-index = <0>;
+			apple,input-size = <1920 1080>;
+			apple,output-size = <1440 1080>;
+			apple,crop = <240 0 1440 1080>;
+		};
+		/* 1280x720 (16:9) */
+		preset2 {
+			apple,config-index = <0>;
+			apple,input-size = <1920 1080>;
+			apple,output-size = <1280 720>;
+			apple,crop = <0 0 1920 1080>;
+		};
+		/* 960x720 (4:3) */
+		preset3{
+			apple,config-index = <0>;
+			apple,input-size = <1920 1080>;
+			apple,output-size = <960 720>;
+			apple,crop = <240 0 1440 1080>;
+		};
+		/* 960x540 (16:9) */
+		preset4 {
+			apple,config-index = <0>;
+			apple,input-size = <1920 1080>;
+			apple,output-size = <960 540>;
+			apple,crop = <0 0 1920 1080>;
+		};
+		/* 640x480 (4:3) */
+		preset5 {
+			apple,config-index = <0>;
+			apple,input-size = <1920 1080>;
+			apple,output-size = <640 480>;
+			apple,crop = <240 0 1440 1080>;
+		};
+		/* 640x360 (16:9) */
+		preset6 {
+			apple,config-index = <0>;
+			apple,input-size = <1920 1080>;
+			apple,output-size = <640 360>;
+			apple,crop = <0 0 1920 1080>;
+		};
+		/* 320x180 (16:9) */
+		preset7 {
+			apple,config-index = <0>;
+			apple,input-size = <1920 1080>;
+			apple,output-size = <320 180>;
+			apple,crop = <0 0 1920 1080>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/apple/isp-imx558-cfg0.dtsi b/arch/arm64/boot/dts/apple/isp-imx558-cfg0.dtsi
new file mode 100644
index 00000000000000..729b97829cbb7e
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/isp-imx558-cfg0.dtsi
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * ISP configuration for platforms with IMX558 sensor in
+ * config #0 mode.
+ *
+ * These platforms enable MLVNR for all configs except
+ * #0, which we don't support. Config #0 is an uncropped
+ * square 1920x1920 sensor, with dark corners.
+ * Therefore, we synthesize common resolutions by using
+ * crop/scale while always choosing config #0.
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+
+#include "isp-common.dtsi"
+
+&isp {
+	apple,temporal-filter = <0>;
+
+	sensor-presets {
+		/* 1920x1080 */
+		preset0 {
+			apple,config-index = <0>;
+			apple,input-size = <1920 1920>;
+			apple,output-size = <1920 1080>;
+			apple,crop = <0 420 1920 1080>;
+		};
+		/* 1080x1920 */
+		preset1 {
+			apple,config-index = <0>;
+			apple,input-size = <1920 1920>;
+			apple,output-size = <1080 1920>;
+			apple,crop = <420 0 1080 1920>;
+		};
+		/* 1920x1440 */
+		preset2 {
+			apple,config-index = <0>;
+			apple,input-size = <1920 1920>;
+			apple,output-size = <1920 1440>;
+			apple,crop = <0 240 1920 1440>;
+		};
+		/* 1440x1920 */
+		preset3 {
+			apple,config-index = <0>;
+			apple,input-size = <1920 1920>;
+			apple,output-size = <1440 1920>;
+			apple,crop = <240 0 1440 1920>;
+		};
+		/* 1280x720 */
+		preset4 {
+			apple,config-index = <0>;
+			apple,input-size = <1920 1920>;
+			apple,output-size = <1280 720>;
+			apple,crop = <0 420 1920 1080>;
+		};
+		/* 720x1280 */
+		preset5 {
+			apple,config-index = <0>;
+			apple,input-size = <1920 1920>;
+			apple,output-size = <720 1280>;
+			apple,crop = <420 0 1080 1920>;
+		};
+		/* 1280x960 */
+		preset6 {
+			apple,config-index = <0>;
+			apple,input-size = <1920 1920>;
+			apple,output-size = <1280 960>;
+			apple,crop = <0 240 1920 1440>;
+		};
+		/* 960x1280 */
+		preset7 {
+			apple,config-index = <0>;
+			apple,input-size = <1920 1920>;
+			apple,output-size = <960 1280>;
+			apple,crop = <240 0 1440 1920>;
+		};
+		/* 640x480 */
+		preset8 {
+			apple,config-index = <0>;
+			apple,input-size = <1920 1920>;
+			apple,output-size = <640 480>;
+			apple,crop = <0 240 1920 1440>;
+		};
+		/* 480x640 */
+		preset9 {
+			apple,config-index = <0>;
+			apple,input-size = <1920 1920>;
+			apple,output-size = <480 640>;
+			apple,crop = <240 0 1440 1920>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/apple/isp-imx558.dtsi b/arch/arm64/boot/dts/apple/isp-imx558.dtsi
new file mode 100644
index 00000000000000..d55854c883f5b6
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/isp-imx558.dtsi
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * ISP configuration for platforms with IMX558 sensor.
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+
+#include "isp-common.dtsi"
+
+&isp {
+	apple,temporal-filter = <0>;
+
+	sensor-presets {
+		/* 1920x1080 */
+		preset0 {
+			apple,config-index = <1>;
+			apple,input-size = <1920 1080>;
+			apple,output-size = <1920 1080>;
+			apple,crop = <0 0 1920 1080>;
+		};
+		/* 1080x1920 */
+		preset1 {
+			apple,config-index = <2>;
+			apple,input-size = <1080 1920>;
+			apple,output-size = <1080 1920>;
+			apple,crop = <0 0 1080 1920>;
+		};
+		/* 1760x1328 */
+		preset2 {
+			apple,config-index = <3>;
+			apple,input-size = <1760 1328>;
+			apple,output-size = <1760 1328>;
+			apple,crop = <0 0 1760 1328>;
+		};
+		/* 1328x1760 */
+		preset3 {
+			apple,config-index = <4>;
+			apple,input-size = <1328 1760>;
+			apple,output-size = < 1328 1760>;
+			apple,crop = <0 0 1328 1760>;
+		};
+		/* 1152x1152 */
+		preset4 {
+			apple,config-index = <5>;
+			apple,input-size = <1152 1152>;
+			apple,output-size = <1152 1152>;
+			apple,crop = <0 0 1152 1152>;
+		};
+		/* 1280x720 */
+		preset5 {
+			apple,config-index = <1>;
+			apple,input-size = <1920 1080>;
+			apple,output-size = <1280 720>;
+			apple,crop = <0 0 1920 1080>;
+		};
+		/* 720x1280 */
+		preset6 {
+			apple,config-index = <2>;
+			apple,input-size = <1080 1920>;
+			apple,output-size = <720 1280>;
+			apple,crop = <0 0 1080 1920>;
+		};
+		/* 1280x960 */
+		preset7 {
+			apple,config-index = <3>;
+			apple,input-size = <1760 1328>;
+			apple,output-size = <1280 960>;
+			apple,crop = <0 4 1760 1320>;
+		};
+		/* 960x1280 */
+		preset8 {
+			apple,config-index = <4>;
+			apple,input-size = <1328 1760>;
+			apple,output-size = <960 1280>;
+			apple,crop = <4 0 1320 1760>;
+		};
+		/* 640x480 */
+		preset9 {
+			apple,config-index = <3>;
+			apple,input-size = <1760 1328>;
+			apple,output-size = <640 480>;
+			apple,crop = <0 4 1760 1320>;
+		};
+		/* 480x640 */
+		preset10 {
+			apple,config-index = <4>;
+			apple,input-size = <1328 1760>;
+			apple,output-size = <480 640>;
+			apple,crop = <4 0 1320 1760>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/apple/s5l8960x-5s.dtsi b/arch/arm64/boot/dts/apple/s5l8960x-5s.dtsi
index 0b16adf07f79b1..8868df1538d685 100644
--- a/arch/arm64/boot/dts/apple/s5l8960x-5s.dtsi
+++ b/arch/arm64/boot/dts/apple/s5l8960x-5s.dtsi
@@ -8,6 +8,7 @@
 
 #include "s5l8960x.dtsi"
 #include "s5l8960x-common.dtsi"
+#include "s5l8960x-opp.dtsi"
 #include <dt-bindings/input/input.h>
 
 / {
@@ -49,3 +50,11 @@
 		};
 	};
 };
+
+&dwi_bl {
+	status = "okay";
+};
+
+&framebuffer0 {
+	power-domains = <&ps_disp0 &ps_mipi_dsi>;
+};
diff --git a/arch/arm64/boot/dts/apple/s5l8960x-air1.dtsi b/arch/arm64/boot/dts/apple/s5l8960x-air1.dtsi
index 741c5a9f21dd2f..dd57eb1d34c06e 100644
--- a/arch/arm64/boot/dts/apple/s5l8960x-air1.dtsi
+++ b/arch/arm64/boot/dts/apple/s5l8960x-air1.dtsi
@@ -8,6 +8,7 @@
 
 #include "s5l8960x.dtsi"
 #include "s5l8960x-common.dtsi"
+#include "s5l8965x-opp.dtsi"
 #include <dt-bindings/input/input.h>
 
 / {
@@ -49,3 +50,7 @@
 		};
 	};
 };
+
+&framebuffer0 {
+	power-domains = <&ps_disp0 &ps_dp>;
+};
diff --git a/arch/arm64/boot/dts/apple/s5l8960x-mini2.dtsi b/arch/arm64/boot/dts/apple/s5l8960x-mini2.dtsi
index b27ef568062643..f3696d22e71cd1 100644
--- a/arch/arm64/boot/dts/apple/s5l8960x-mini2.dtsi
+++ b/arch/arm64/boot/dts/apple/s5l8960x-mini2.dtsi
@@ -8,6 +8,7 @@
 
 #include "s5l8960x.dtsi"
 #include "s5l8960x-common.dtsi"
+#include "s5l8960x-opp.dtsi"
 #include <dt-bindings/input/input.h>
 
 / {
@@ -49,3 +50,7 @@
 		};
 	};
 };
+
+&framebuffer0 {
+	power-domains = <&ps_disp0 &ps_dp>;
+};
diff --git a/arch/arm64/boot/dts/apple/s5l8960x-opp.dtsi b/arch/arm64/boot/dts/apple/s5l8960x-opp.dtsi
new file mode 100644
index 00000000000000..e4d568c4a11955
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/s5l8960x-opp.dtsi
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Operating points for Apple S5L8960X "A7" SoC, Up to 1296 MHz
+ *
+ * target-type: N51, N53, J85, J86. J87, J85m, J86m, J87m
+ *
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+
+/ {
+	cyclone_opp: opp-table {
+		compatible = "operating-points-v2";
+
+		opp01 {
+			opp-hz = /bits/ 64 <300000000>;
+			opp-level = <1>;
+			clock-latency-ns = <15500>;
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <396000000>;
+			opp-level = <2>;
+			clock-latency-ns = <43000>;
+		};
+		opp03 {
+			opp-hz = /bits/ 64 <600000000>;
+			opp-level = <3>;
+			clock-latency-ns = <26000>;
+		};
+		opp04 {
+			opp-hz = /bits/ 64 <840000000>;
+			opp-level = <4>;
+			clock-latency-ns = <30000>;
+		};
+		opp05 {
+			opp-hz = /bits/ 64 <1128000000>;
+			opp-level = <5>;
+			clock-latency-ns = <39500>;
+		};
+		opp06 {
+			opp-hz = /bits/ 64 <1296000000>;
+			opp-level = <6>;
+			clock-latency-ns = <45500>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/apple/s5l8960x-pmgr.dtsi b/arch/arm64/boot/dts/apple/s5l8960x-pmgr.dtsi
new file mode 100644
index 00000000000000..da265f4843070a
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/s5l8960x-pmgr.dtsi
@@ -0,0 +1,610 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * PMGR Power domains for the Apple S5L8960X "A7" SoC
+ *
+ * Copyright (c) 2024 Nick Chan <towinchenmi@gmail.com>
+ */
+
+&pmgr {
+	ps_cpu0: power-controller@20000 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20000 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "cpu0";
+		apple,always-on; /* Core device */
+	};
+
+	ps_cpu1: power-controller@20008 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20008 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "cpu1";
+		apple,always-on; /* Core device */
+	};
+
+	ps_secuart0: power-controller@200f0 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x200f0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "secuart0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_secuart1: power-controller@200f8 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x200f8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "secuart1";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_cpm: power-controller@20010 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20010 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "cpm";
+		apple,always-on; /* Core device */
+	};
+
+	ps_lio: power-controller@20018 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20018 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "lio";
+		apple,always-on; /* Core device */
+	};
+
+	ps_iomux: power-controller@20020 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20020 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "iomux";
+		apple,always-on; /* Core device */
+	};
+
+	ps_aic: power-controller@20028 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20028 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "aic";
+		apple,always-on; /* Core device */
+	};
+
+	ps_debug: power-controller@20030 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20030 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "debug";
+	};
+
+	ps_dwi: power-controller@20038 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20038 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dwi";
+	};
+
+	ps_gpio: power-controller@20040 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20040 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "gpio";
+	};
+
+	ps_mca0: power-controller@20048 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20048 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mca0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_mca1: power-controller@20050 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20050 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mca1";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_mca2: power-controller@20058 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20058 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mca2";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_mca3: power-controller@20060 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20060 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mca3";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_mca4: power-controller@20068 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20068 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mca4";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_pwm0: power-controller@20070 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20070 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pwm0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_i2c0: power-controller@20078 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20078 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "i2c0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_i2c1: power-controller@20080 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20080 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "i2c1";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_i2c2: power-controller@20088 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20088 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "i2c2";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_i2c3: power-controller@20090 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20090 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "i2c3";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_spi0: power-controller@20098 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20098 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "spi0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_spi1: power-controller@200a0 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x200a0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "spi1";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_spi2: power-controller@200a8 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x200a8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "spi2";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_spi3: power-controller@200b0 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x200b0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "spi3";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart0: power-controller@200b8 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x200b8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart1: power-controller@200c0 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x200c0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart1";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart2: power-controller@200c8 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x200c8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart2";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart3: power-controller@200d0 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x200d0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart3";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart4: power-controller@200d8 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x200d8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart4";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart5: power-controller@200e0 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x200e0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart5";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart6: power-controller@200e8 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x200e8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart6";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_sio_p: power-controller@20110 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20110 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "sio_p";
+	};
+
+	ps_usb: power-controller@20158 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20158 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usb";
+	};
+
+	ps_usbctrl: power-controller@20160 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20160 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usbctrl";
+		power-domains = <&ps_usb>;
+	};
+
+	ps_usb2host0: power-controller@20170 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20170 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usb2host0";
+		power-domains = <&ps_usbctrl>;
+	};
+
+	ps_usb2host1: power-controller@20180 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20180 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usb2host1";
+		power-domains = <&ps_usbctrl>;
+	};
+
+	ps_disp_busmux: power-controller@201a8 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x201a8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "disp_busmux";
+	};
+
+	ps_media: power-controller@201d8 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x201d8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "media";
+	};
+
+	ps_isp: power-controller@201d0 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x201d0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp";
+	};
+
+	ps_msr: power-controller@201e0 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x201e0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "msr";
+		power-domains = <&ps_media>;
+	};
+
+	ps_jpg: power-controller@201e8 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x201e8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "jpg";
+		power-domains = <&ps_media>;
+	};
+
+	ps_disp0: power-controller@201b0 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x201b0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "disp0";
+		power-domains = <&ps_disp_busmux>;
+	};
+
+	ps_aes0: power-controller@20100 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20100 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "aes0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_sio: power-controller@20108 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20108 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "sio";
+		power-domains = <&ps_sio_p>;
+		apple,always-on; /* Core device */
+	};
+
+	ps_hsic0_phy: power-controller@20118 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20118 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "hsic0_phy";
+		power-domains = <&ps_usb2host0>;
+	};
+
+	ps_hsic1_phy: power-controller@20120 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20120 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "hsic1_phy";
+		power-domains = <&ps_usb2host0>;
+	};
+
+	ps_hsic2_phy: power-controller@20128 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20128 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "hsic2_phy";
+		power-domains = <&ps_usb2host1>;
+	};
+
+	ps_ispsens0: power-controller@20130 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20130 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "ispsens0";
+	};
+
+	ps_ispsens1: power-controller@20138 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20138 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "ispsens1";
+	};
+
+	ps_mcc: power-controller@20140 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20140 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mcc";
+		apple,always-on; /* Core device */
+	};
+
+	ps_mcu: power-controller@20148 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20148 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mcu";
+		apple,always-on; /* Core device */
+	};
+
+	ps_amp: power-controller@20150 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20150 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "amp";
+		apple,always-on; /* Core device */
+	};
+
+	ps_usb2host0_ohci: power-controller@20168 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20168 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usb2host0_ohci";
+		power-domains = <&ps_usb2host0>;
+	};
+
+	ps_usb2host1_ohci: power-controller@20178 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20178 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usb2host1_ohci";
+		power-domains = <&ps_usb2host1>;
+	};
+
+	ps_usbotg: power-controller@20188 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20188 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usbotg";
+		power-domains = <&ps_usbctrl>;
+	};
+
+	ps_smx: power-controller@20190 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20190 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "smx";
+		apple,always-on; /* Apple fabric, critical block */
+	};
+
+	ps_sf: power-controller@20198 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20198 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "sf";
+		apple,always-on; /* Apple fabric, critical block */
+	};
+
+	ps_cp: power-controller@201a0 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x201a0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "cp";
+		apple,always-on; /* Core device */
+	};
+
+	ps_mipi_dsi: power-controller@201b8 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x201b8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mipi_dsi";
+		power-domains = <&ps_disp_busmux>;
+	};
+
+	ps_dp: power-controller@201c0 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x201c0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dp";
+		power-domains = <&ps_disp0>;
+	};
+
+	ps_disp1: power-controller@201c8 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x201c8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "disp1";
+		power-domains = <&ps_disp_busmux>;
+	};
+
+	ps_vdec: power-controller@201f0 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x201f0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "vdec";
+		power-domains = <&ps_media>;
+	};
+
+	ps_venc: power-controller@201f8 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x201f8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "venc";
+		power-domains = <&ps_media>;
+	};
+
+	ps_ans: power-controller@20200 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20200 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "ans";
+	};
+
+	ps_ans_dll: power-controller@20208 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20208 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "ans_dll";
+		power-domains = <&ps_ans>;
+	};
+
+	ps_gfx: power-controller@20218 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20218 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "gfx";
+	};
+
+	ps_sep: power-controller@20268 {
+		compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20268 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "sep";
+		power-domains = <&ps_secuart1>, <&ps_secuart0>;
+		apple,always-on; /* Locked on */
+	};
+};
diff --git a/arch/arm64/boot/dts/apple/s5l8960x.dtsi b/arch/arm64/boot/dts/apple/s5l8960x.dtsi
index 0218ecac1d8364..d820b0e430507f 100644
--- a/arch/arm64/boot/dts/apple/s5l8960x.dtsi
+++ b/arch/arm64/boot/dts/apple/s5l8960x.dtsi
@@ -33,6 +33,8 @@
 			compatible = "apple,cyclone";
 			reg = <0x0 0x0>;
 			cpu-release-addr = <0 0>; /* To be filled by loader */
+			operating-points-v2 = <&cyclone_opp>;
+			performance-domains = <&cpufreq>;
 			enable-method = "spin-table";
 			device_type = "cpu";
 		};
@@ -41,6 +43,8 @@
 			compatible = "apple,cyclone";
 			reg = <0x0 0x1>;
 			cpu-release-addr = <0 0>; /* To be filled by loader */
+			operating-points-v2 = <&cyclone_opp>;
+			performance-domains = <&cpufreq>;
 			enable-method = "spin-table";
 			device_type = "cpu";
 		};
@@ -53,6 +57,12 @@
 		nonposted-mmio;
 		ranges;
 
+		cpufreq: performance-controller@202220000 {
+			compatible = "apple,s5l8960x-cluster-cpufreq";
+			reg = <0x2 0x02220000 0 0x1000>;
+			#performance-domain-cells = <0>;
+		};
+
 		serial0: serial@20a0a0000 {
 			compatible = "apple,s5l-uart";
 			reg = <0x2 0x0a0a0000 0x0 0x4000>;
@@ -62,9 +72,18 @@
 			/* Use the bootloader-enabled clocks for now. */
 			clocks = <&clkref>, <&clkref>;
 			clock-names = "uart", "clk_uart_baud0";
+			power-domains = <&ps_uart0>;
 			status = "disabled";
 		};
 
+		pmgr: power-management@20e000000 {
+			compatible = "apple,s5l8960x-pmgr", "apple,pmgr", "syscon", "simple-mfd";
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			reg = <0x2 0xe000000 0 0x24000>;
+		};
+
 		wdt: watchdog@20e027000 {
 			compatible = "apple,s5l8960x-wdt", "apple,wdt";
 			reg = <0x2 0x0e027000 0x0 0x1000>;
@@ -78,11 +97,20 @@
 			reg = <0x2 0x0e100000 0x0 0x100000>;
 			#interrupt-cells = <3>;
 			interrupt-controller;
+			power-domains = <&ps_aic>;
+		};
+
+		dwi_bl: backlight@20e200010 {
+			compatible = "apple,s5l8960x-dwi-bl", "apple,dwi-bl";
+			reg = <0x2 0x0e200010 0x0 0x8>;
+			power-domains = <&ps_dwi>;
+			status = "disabled";
 		};
 
 		pinctrl: pinctrl@20e300000 {
 			compatible = "apple,s5l8960x-pinctrl", "apple,pinctrl";
 			reg = <0x2 0x0e300000 0x0 0x100000>;
+			power-domains = <&ps_gpio>;
 
 			gpio-controller;
 			#gpio-cells = <2>;
@@ -111,3 +139,5 @@
 			     <AIC_FIQ AIC_TMR_GUEST_VIRT IRQ_TYPE_LEVEL_HIGH>;
 	};
 };
+
+#include "s5l8960x-pmgr.dtsi"
diff --git a/arch/arm64/boot/dts/apple/s5l8965x-opp.dtsi b/arch/arm64/boot/dts/apple/s5l8965x-opp.dtsi
new file mode 100644
index 00000000000000..d34dae74a90c52
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/s5l8965x-opp.dtsi
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Operating points for Apple S5L8965X "A7" Rev A SoC, Up to 1392 MHz
+ *
+ * target-type: J71, J72, J73
+ *
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+
+/ {
+	cyclone_opp: opp-table {
+		compatible = "operating-points-v2";
+
+		opp01 {
+			opp-hz = /bits/ 64 <300000000>;
+			opp-level = <1>;
+			clock-latency-ns = <10000>;
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <600000000>;
+			opp-level = <2>;
+			clock-latency-ns = <49000>;
+		};
+		opp03 {
+			opp-hz = /bits/ 64 <840000000>;
+			opp-level = <3>;
+			clock-latency-ns = <30000>;
+		};
+		opp04 {
+			opp-hz = /bits/ 64 <1128000000>;
+			opp-level = <4>;
+			clock-latency-ns = <39500>;
+		};
+		opp05 {
+			opp-hz = /bits/ 64 <1296000000>;
+			opp-level = <5>;
+			clock-latency-ns = <45500>;
+		};
+		opp06 {
+			opp-hz = /bits/ 64 <1392000000>;
+			opp-level = <6>;
+			clock-latency-ns = <46500>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/apple/s800-0-3-common.dtsi b/arch/arm64/boot/dts/apple/s800-0-3-common.dtsi
index 4276bd890e81b1..cb42c5f2c1b6ca 100644
--- a/arch/arm64/boot/dts/apple/s800-0-3-common.dtsi
+++ b/arch/arm64/boot/dts/apple/s800-0-3-common.dtsi
@@ -43,6 +43,10 @@
 	};
 };
 
+&dwi_bl {
+	status = "okay";
+};
+
 &serial0 {
 	status = "okay";
 };
diff --git a/arch/arm64/boot/dts/apple/s800-0-3-pmgr.dtsi b/arch/arm64/boot/dts/apple/s800-0-3-pmgr.dtsi
new file mode 100644
index 00000000000000..196b8e745a957c
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/s800-0-3-pmgr.dtsi
@@ -0,0 +1,757 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * PMGR Power domains for the Apple S8000/3 "A9" SoC
+ *
+ * Copyright (c) 2024 Nick Chan <towinchenmi@gmail.com>
+ */
+
+&pmgr {
+	ps_cpu0: power-controller@80000 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80000 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "cpu0";
+		apple,always-on; /* Core device */
+	};
+
+	ps_cpu1: power-controller@80008 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80008 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "cpu1";
+		apple,always-on; /* Core device */
+	};
+
+	ps_cpm: power-controller@80040 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80040 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "cpm";
+		apple,always-on; /* Core device */
+	};
+
+	ps_sio_busif: power-controller@80150 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80150 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "sio_busif";
+	};
+
+	ps_sio_p: power-controller@80158 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80158 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "sio_p";
+		power-domains = <&ps_sio_busif>;
+	};
+
+	ps_sbr: power-controller@80100 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80100 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "sbr";
+		apple,always-on; /* Apple fabric, critical block */
+	};
+
+	ps_aic: power-controller@80108 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80108 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "aic";
+		apple,always-on; /* Core device */
+	};
+
+	ps_dwi: power-controller@80110 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80110 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dwi";
+	};
+
+	ps_gpio: power-controller@80118 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80118 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "gpio";
+	};
+
+	ps_pms: power-controller@80120 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80120 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pms";
+		apple,always-on; /* Core device */
+	};
+
+	ps_pcie_ref: power-controller@80148 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80148 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pcie_ref";
+	};
+
+	ps_mca0: power-controller@80168 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80168 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mca0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_mca1: power-controller@80170 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80170 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mca1";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_mca2: power-controller@80178 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80178 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mca2";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_mca3: power-controller@80180 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80180 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mca3";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_mca4: power-controller@80188 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80188 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mca4";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_pwm0: power-controller@80190 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80190 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pwm0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_i2c0: power-controller@80198 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80198 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "i2c0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_i2c1: power-controller@801a0 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801a0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "i2c1";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_i2c2: power-controller@801a8 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801a8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "i2c2";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_i2c3: power-controller@801b0 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801b0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "i2c3";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_spi0: power-controller@801b8 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801b8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "spi0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_spi1: power-controller@801c0 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801c0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "spi1";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_spi2: power-controller@801c8 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801c8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "spi2";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_spi3: power-controller@801d0 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801d0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "spi3";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart0: power-controller@801d8 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801d8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart1: power-controller@801e0 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801e0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart1";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart2: power-controller@801e8 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801e8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart2";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart3: power-controller@801f0 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801f0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart3";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart4: power-controller@801f8 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801f8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart4";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_sio: power-controller@80160 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80160 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "sio";
+		power-domains = <&ps_sio_p>;
+		apple,always-on; /* Core device */
+	};
+
+	ps_hsic0_phy: power-controller@80128 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80128 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "hsic0_phy";
+		power-domains = <&ps_usb2host1>;
+	};
+
+	ps_hsic1_phy: power-controller@80130 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80130 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "hsic1_phy";
+		power-domains = <&ps_usb2host2>;
+	};
+
+	ps_isp_sens0: power-controller@80138 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80138 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_sens0";
+	};
+
+	ps_isp_sens1: power-controller@80140 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80140 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_sens1";
+	};
+
+	ps_usb: power-controller@80250 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80250 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usb";
+	};
+
+	ps_usbctrl: power-controller@80258 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80258 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usbctrl";
+		power-domains = <&ps_usb>;
+	};
+
+	ps_usb2host0: power-controller@80260 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80260 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usb2host0";
+		power-domains = <&ps_usbctrl>;
+	};
+
+	ps_usb2host1: power-controller@80270 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80270 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usb2host1";
+		power-domains = <&ps_usbctrl>;
+	};
+
+	ps_usb2host2: power-controller@80280 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80280 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usb2host2";
+		power-domains = <&ps_usbctrl>;
+	};
+
+	ps_rtmux: power-controller@802a8 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802a8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "rtmux";
+	};
+
+	ps_media: power-controller@802d0 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802d0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "media";
+	};
+
+	ps_isp: power-controller@802c8 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802c8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp";
+		power-domains = <&ps_rtmux>;
+	};
+
+	ps_msr: power-controller@802e0 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802e0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "msr";
+		power-domains = <&ps_media>;
+	};
+
+	ps_jpg: power-controller@802d8 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802d8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "jpg";
+		power-domains = <&ps_media>;
+	};
+
+	ps_disp0: power-controller@802b0 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802b0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "disp0";
+		power-domains = <&ps_rtmux>;
+	};
+
+	ps_pmp: power-controller@802e8 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802e8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pmp";
+	};
+
+	ps_pms_sram: power-controller@802f0 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802f0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pms_sram";
+	};
+
+	ps_uart5: power-controller@80200 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80200 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart5";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart6: power-controller@80208 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80208 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart6";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart7: power-controller@80210 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80210 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart7";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart8: power-controller@80218 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80218 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart8";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_aes0: power-controller@80220 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80220 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "aes0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_mcc: power-controller@80228 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80228 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mcc";
+		apple,always-on; /* Memory cache controller */
+	};
+
+	ps_dcs0: power-controller@80230 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80230 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dcs0";
+		apple,always-on; /* LPDDR4 interface */
+	};
+
+	ps_dcs1: power-controller@80238 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80238 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dcs1";
+		apple,always-on; /* LPDDR4 interface */
+	};
+
+	ps_dcs2: power-controller@80240 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80240 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dcs2";
+		apple,always-on; /* LPDDR4 interface */
+	};
+
+	ps_dcs3: power-controller@80248 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80248 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dcs3";
+		apple,always-on; /* LPDDR4 interface */
+	};
+
+	ps_usb2host0_ohci: power-controller@80268 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80268 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usb2host0_ohci";
+		power-domains = <&ps_usb2host0>;
+	};
+
+	ps_usb2host1_ohci: power-controller@80278 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80278 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usb2host1_ohci";
+		power-domains = <&ps_usb2host1>;
+	};
+
+	ps_usb2host2_ohci: power-controller@80288 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80288 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usb2host2_ohci";
+		power-domains = <&ps_usb2host2>;
+	};
+
+	ps_usbotg: power-controller@80290 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80290 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usbotg";
+		power-domains = <&ps_usbctrl>;
+	};
+
+	ps_smx: power-controller@80298 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80298 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "smx";
+		apple,always-on; /* Apple fabric, critical block */
+	};
+
+	ps_sf: power-controller@802a0 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802a0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "sf";
+		apple,always-on; /* Apple fabric, critical block */
+	};
+
+	ps_mipi_dsi: power-controller@802b8 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802b8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mipi_dsi";
+		power-domains = <&ps_rtmux>;
+	};
+
+	ps_dp: power-controller@802c0 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802c0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dp";
+		power-domains = <&ps_disp0>;
+	};
+
+	ps_vdec: power-controller@802f8 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802f8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "vdec";
+		power-domains = <&ps_media>;
+	};
+
+	ps_venc: power-controller@80308 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80308 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "venc";
+		power-domains = <&ps_media>;
+	};
+
+	ps_pcie: power-controller@80310 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80310 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pcie";
+	};
+
+	ps_pcie_aux: power-controller@80318 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80318 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pcie_aux";
+	};
+
+	ps_pcie_link0: power-controller@80320 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80320 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pcie_link0";
+		power-domains = <&ps_pcie>;
+	};
+
+	ps_pcie_link1: power-controller@80328 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80328 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pcie_link1";
+		power-domains = <&ps_pcie>;
+	};
+
+	ps_pcie_link2: power-controller@80330 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80330 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pcie_link2";
+		power-domains = <&ps_pcie>;
+	};
+
+	ps_pcie_link3: power-controller@80338 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80338 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pcie_link3";
+		power-domains = <&ps_pcie>;
+	};
+
+	ps_gfx: power-controller@80340 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80340 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "gfx";
+	};
+
+	ps_sep: power-controller@80400 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80400 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "sep";
+		apple,always-on; /* Locked on */
+	};
+
+	ps_venc_pipe: power-controller@88000 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x88000 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "venc_pipe";
+		power-domains = <&ps_venc>;
+	};
+
+	ps_venc_me0: power-controller@88008 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x88008 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "venc_me0";
+	};
+
+	ps_venc_me1: power-controller@88010 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x88010 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "venc_me1";
+	};
+};
+
+&pmgr_mini {
+	ps_aop: power-controller@80000 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80000 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "aop";
+		power-domains = <&ps_aop_busif &ps_aop_cpu &ps_aop_filter>;
+		apple,always-on; /* Always on processor */
+	};
+
+	ps_debug: power-controller@80008 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80008 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "debug";
+	};
+
+	ps_aop_gpio: power-controller@80010 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80010 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "aop_gpio";
+		power-domains = <&ps_aop>;
+	};
+
+	ps_aop_cpu: power-controller@80040 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80040 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "aop_cpu";
+	};
+
+	ps_aop_filter: power-controller@80048 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80048 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "aop_filter";
+	};
+
+	ps_aop_busif: power-controller@80050 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80050 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "aop_busif";
+	};
+};
diff --git a/arch/arm64/boot/dts/apple/s800-0-3.dtsi b/arch/arm64/boot/dts/apple/s800-0-3.dtsi
new file mode 100644
index 00000000000000..c0e9ae45627c81
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/s800-0-3.dtsi
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Apple S8000/S8003 "A9" SoC
+ *
+ * This file contains parts common to both variants of A9
+ *
+ * Copyright (c) 2022, Konrad Dybcio <konradybcio@kernel.org>
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/apple-aic.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/pinctrl/apple.h>
+
+/ {
+	interrupt-parent = <&aic>;
+	#address-cells = <2>;
+	#size-cells = <2>;
+
+	clkref: clock-ref {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <24000000>;
+		clock-output-names = "clkref";
+	};
+
+	cpus {
+		#address-cells = <2>;
+		#size-cells = <0>;
+
+		cpu0: cpu@0 {
+			compatible = "apple,twister";
+			reg = <0x0 0x0>;
+			cpu-release-addr = <0 0>; /* To be filled in by loader */
+			operating-points-v2 = <&twister_opp>;
+			performance-domains = <&cpufreq>;
+			enable-method = "spin-table";
+			device_type = "cpu";
+		};
+
+		cpu1: cpu@1 {
+			compatible = "apple,twister";
+			reg = <0x0 0x1>;
+			cpu-release-addr = <0 0>; /* To be filled in by loader */
+			operating-points-v2 = <&twister_opp>;
+			performance-domains = <&cpufreq>;
+			enable-method = "spin-table";
+			device_type = "cpu";
+		};
+	};
+
+	soc {
+		compatible = "simple-bus";
+		#address-cells = <2>;
+		#size-cells = <2>;
+		nonposted-mmio;
+		ranges;
+
+		cpufreq: performance-controller@202220000 {
+			compatible = "apple,s8000-cluster-cpufreq", "apple,t8103-cluster-cpufreq", "apple,cluster-cpufreq";
+			reg = <0x2 0x02220000 0 0x1000>;
+			#performance-domain-cells = <0>;
+		};
+
+		serial0: serial@20a0c0000 {
+			compatible = "apple,s5l-uart";
+			reg = <0x2 0x0a0c0000 0x0 0x4000>;
+			reg-io-width = <4>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 192 IRQ_TYPE_LEVEL_HIGH>;
+			/* Use the bootloader-enabled clocks for now. */
+			clocks = <&clkref>, <&clkref>;
+			clock-names = "uart", "clk_uart_baud0";
+			power-domains = <&ps_uart0>;
+			status = "disabled";
+		};
+
+		pmgr: power-management@20e000000 {
+			compatible = "apple,s8000-pmgr", "apple,pmgr", "syscon", "simple-mfd";
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			reg = <0x2 0xe000000 0 0x8c000>;
+		};
+
+		aic: interrupt-controller@20e100000 {
+			compatible = "apple,s8000-aic", "apple,aic";
+			reg = <0x2 0x0e100000 0x0 0x100000>;
+			#interrupt-cells = <3>;
+			interrupt-controller;
+			power-domains = <&ps_aic>;
+		};
+
+		dwi_bl: backlight@20e200080 {
+			compatible = "apple,s8000-dwi-bl", "apple,dwi-bl";
+			reg = <0x2 0x0e200080 0x0 0x8>;
+			power-domains = <&ps_dwi>;
+			status = "disabled";
+		};
+
+		pinctrl_ap: pinctrl@20f100000 {
+			compatible = "apple,s8000-pinctrl", "apple,pinctrl";
+			reg = <0x2 0x0f100000 0x0 0x100000>;
+			power-domains = <&ps_gpio>;
+
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio-ranges = <&pinctrl_ap 0 0 208>;
+			apple,npins = <208>;
+
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 42 IRQ_TYPE_LEVEL_HIGH>,
+				     <AIC_IRQ 43 IRQ_TYPE_LEVEL_HIGH>,
+				     <AIC_IRQ 44 IRQ_TYPE_LEVEL_HIGH>,
+				     <AIC_IRQ 45 IRQ_TYPE_LEVEL_HIGH>,
+				     <AIC_IRQ 46 IRQ_TYPE_LEVEL_HIGH>,
+				     <AIC_IRQ 47 IRQ_TYPE_LEVEL_HIGH>,
+				     <AIC_IRQ 48 IRQ_TYPE_LEVEL_HIGH>;
+		};
+
+		pinctrl_aop: pinctrl@2100f0000 {
+			compatible = "apple,s8000-pinctrl", "apple,pinctrl";
+			reg = <0x2 0x100f0000 0x0 0x100000>;
+			power-domains = <&ps_aop_gpio>;
+
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio-ranges = <&pinctrl_aop 0 0 42>;
+			apple,npins = <42>;
+
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 113 IRQ_TYPE_LEVEL_HIGH>,
+				     <AIC_IRQ 114 IRQ_TYPE_LEVEL_HIGH>,
+				     <AIC_IRQ 115 IRQ_TYPE_LEVEL_HIGH>,
+				     <AIC_IRQ 116 IRQ_TYPE_LEVEL_HIGH>,
+				     <AIC_IRQ 117 IRQ_TYPE_LEVEL_HIGH>,
+				     <AIC_IRQ 118 IRQ_TYPE_LEVEL_HIGH>,
+				     <AIC_IRQ 119 IRQ_TYPE_LEVEL_HIGH>;
+		};
+
+		pmgr_mini: power-management@210200000 {
+			compatible = "apple,s8000-pmgr", "apple,pmgr", "syscon", "simple-mfd";
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			reg = <0x2 0x10200000 0 0x84000>;
+		};
+
+		wdt: watchdog@2102b0000 {
+			compatible = "apple,s8000-wdt", "apple,wdt";
+			reg = <0x2 0x102b0000 0x0 0x4000>;
+			clocks = <&clkref>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 4 IRQ_TYPE_LEVEL_HIGH>;
+		};
+	};
+
+	timer {
+		compatible = "arm,armv8-timer";
+		interrupt-parent = <&aic>;
+		interrupt-names = "phys", "virt";
+		/* Note that A9 doesn't actually have a hypervisor (EL2 is not implemented). */
+		interrupts = <AIC_FIQ AIC_TMR_GUEST_PHYS IRQ_TYPE_LEVEL_HIGH>,
+			     <AIC_FIQ AIC_TMR_GUEST_VIRT IRQ_TYPE_LEVEL_HIGH>;
+	};
+};
+
+#include "s800-0-3-pmgr.dtsi"
+
+/*
+ * The A9 was made by two separate fabs on two different process
+ * nodes: Samsung made the S8000 (APL0898) on 14nm and TSMC made
+ * the S8003 (APL1022) on 16nm. There are some minor differences
+ * such as timing in cpufreq state transistions.
+ */
diff --git a/arch/arm64/boot/dts/apple/s8000.dtsi b/arch/arm64/boot/dts/apple/s8000.dtsi
index 6e9046ea106c08..72322f5677ab15 100644
--- a/arch/arm64/boot/dts/apple/s8000.dtsi
+++ b/arch/arm64/boot/dts/apple/s8000.dtsi
@@ -4,141 +4,65 @@
  *
  * Other names: H8P, "Maui"
  *
- * Copyright (c) 2022, Konrad Dybcio <konradybcio@kernel.org>
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
  */
 
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/interrupt-controller/apple-aic.h>
-#include <dt-bindings/interrupt-controller/irq.h>
-#include <dt-bindings/pinctrl/apple.h>
+#include "s800-0-3.dtsi"
 
 / {
-	interrupt-parent = <&aic>;
-	#address-cells = <2>;
-	#size-cells = <2>;
+	twister_opp: opp-table {
+		compatible = "operating-points-v2";
 
-	clkref: clock-ref {
-		compatible = "fixed-clock";
-		#clock-cells = <0>;
-		clock-frequency = <24000000>;
-		clock-output-names = "clkref";
-	};
-
-	cpus {
-		#address-cells = <2>;
-		#size-cells = <0>;
-
-		cpu0: cpu@0 {
-			compatible = "apple,twister";
-			reg = <0x0 0x0>;
-			cpu-release-addr = <0 0>; /* To be filled in by loader */
-			enable-method = "spin-table";
-			device_type = "cpu";
+		opp01 {
+			opp-hz = /bits/ 64 <300000000>;
+			opp-level = <1>;
+			clock-latency-ns = <650>;
 		};
-
-		cpu1: cpu@1 {
-			compatible = "apple,twister";
-			reg = <0x0 0x1>;
-			cpu-release-addr = <0 0>; /* To be filled in by loader */
-			enable-method = "spin-table";
-			device_type = "cpu";
+		opp02 {
+			opp-hz = /bits/ 64 <396000000>;
+			opp-level = <2>;
+			clock-latency-ns = <75000>;
 		};
-	};
-
-	soc {
-		compatible = "simple-bus";
-		#address-cells = <2>;
-		#size-cells = <2>;
-		nonposted-mmio;
-		ranges;
-
-		serial0: serial@20a0c0000 {
-			compatible = "apple,s5l-uart";
-			reg = <0x2 0x0a0c0000 0x0 0x4000>;
-			reg-io-width = <4>;
-			interrupt-parent = <&aic>;
-			interrupts = <AIC_IRQ 192 IRQ_TYPE_LEVEL_HIGH>;
-			/* Use the bootloader-enabled clocks for now. */
-			clocks = <&clkref>, <&clkref>;
-			clock-names = "uart", "clk_uart_baud0";
-			status = "disabled";
+		opp03 {
+			opp-hz = /bits/ 64 <600000000>;
+			opp-level = <3>;
+			clock-latency-ns = <27000>;
 		};
-
-		aic: interrupt-controller@20e100000 {
-			compatible = "apple,s8000-aic", "apple,aic";
-			reg = <0x2 0x0e100000 0x0 0x100000>;
-			#interrupt-cells = <3>;
-			interrupt-controller;
+		opp04 {
+			opp-hz = /bits/ 64 <912000000>;
+			opp-level = <4>;
+			clock-latency-ns = <32000>;
 		};
-
-		pinctrl_ap: pinctrl@20f100000 {
-			compatible = "apple,s8000-pinctrl", "apple,pinctrl";
-			reg = <0x2 0x0f100000 0x0 0x100000>;
-
-			gpio-controller;
-			#gpio-cells = <2>;
-			gpio-ranges = <&pinctrl_ap 0 0 208>;
-			apple,npins = <208>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-			interrupt-parent = <&aic>;
-			interrupts = <AIC_IRQ 42 IRQ_TYPE_LEVEL_HIGH>,
-				     <AIC_IRQ 43 IRQ_TYPE_LEVEL_HIGH>,
-				     <AIC_IRQ 44 IRQ_TYPE_LEVEL_HIGH>,
-				     <AIC_IRQ 45 IRQ_TYPE_LEVEL_HIGH>,
-				     <AIC_IRQ 46 IRQ_TYPE_LEVEL_HIGH>,
-				     <AIC_IRQ 47 IRQ_TYPE_LEVEL_HIGH>,
-				     <AIC_IRQ 48 IRQ_TYPE_LEVEL_HIGH>;
+		opp05 {
+			opp-hz = /bits/ 64 <1200000000>;
+			opp-level = <5>;
+			clock-latency-ns = <35000>;
 		};
-
-		pinctrl_aop: pinctrl@2100f0000 {
-			compatible = "apple,s8000-pinctrl", "apple,pinctrl";
-			reg = <0x2 0x100f0000 0x0 0x100000>;
-
-			gpio-controller;
-			#gpio-cells = <2>;
-			gpio-ranges = <&pinctrl_aop 0 0 42>;
-			apple,npins = <42>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-			interrupt-parent = <&aic>;
-			interrupts = <AIC_IRQ 113 IRQ_TYPE_LEVEL_HIGH>,
-				     <AIC_IRQ 114 IRQ_TYPE_LEVEL_HIGH>,
-				     <AIC_IRQ 115 IRQ_TYPE_LEVEL_HIGH>,
-				     <AIC_IRQ 116 IRQ_TYPE_LEVEL_HIGH>,
-				     <AIC_IRQ 117 IRQ_TYPE_LEVEL_HIGH>,
-				     <AIC_IRQ 118 IRQ_TYPE_LEVEL_HIGH>,
-				     <AIC_IRQ 119 IRQ_TYPE_LEVEL_HIGH>;
+		opp06 {
+			opp-hz = /bits/ 64 <1512000000>;
+			opp-level = <6>;
+			clock-latency-ns = <45000>;
 		};
-
-		wdt: watchdog@2102b0000 {
-			compatible = "apple,s8000-wdt", "apple,wdt";
-			reg = <0x2 0x102b0000 0x0 0x4000>;
-			clocks = <&clkref>;
-			interrupt-parent = <&aic>;
-			interrupts = <AIC_IRQ 4 IRQ_TYPE_LEVEL_HIGH>;
+		opp07 {
+			opp-hz = /bits/ 64 <1800000000>;
+			opp-level = <7>;
+			clock-latency-ns = <58000>;
 		};
-	};
-
-	timer {
-		compatible = "arm,armv8-timer";
-		interrupt-parent = <&aic>;
-		interrupt-names = "phys", "virt";
-		/* Note that A9 doesn't actually have a hypervisor (EL2 is not implemented). */
-		interrupts = <AIC_FIQ AIC_TMR_GUEST_PHYS IRQ_TYPE_LEVEL_HIGH>,
-			     <AIC_FIQ AIC_TMR_GUEST_VIRT IRQ_TYPE_LEVEL_HIGH>;
+#if 0
+		/* Not available until CPU deep sleep is implemented */
+		opp08 {
+			opp-hz = /bits/ 64 <1844000000>;
+			opp-level = <8>;
+			clock-latency-ns = <58000>;
+			turbo-mode;
+		};
+#endif
 	};
 };
 
 /*
  * The A9 was made by two separate fabs on two different process
  * nodes: Samsung made the S8000 (APL0898) on 14nm and TSMC made
- * the S8003 (APL1022) on 16nm. While they are seemingly the same,
- * they do have distinct part numbers and devices using them have
- * distinct model names. There are currently no known differences
- * between these as far as Linux is concerned, but let's keep things
- * structured properly to make it easier to alter the behaviour of
- * one of the chips if need be.
+ * the S8003 (APL1022) on 16nm. There are some minor differences
+ * such as timing in cpufreq state transistions.
  */
diff --git a/arch/arm64/boot/dts/apple/s8001-common.dtsi b/arch/arm64/boot/dts/apple/s8001-common.dtsi
index e94d0e77653a8a..91b06e1138943a 100644
--- a/arch/arm64/boot/dts/apple/s8001-common.dtsi
+++ b/arch/arm64/boot/dts/apple/s8001-common.dtsi
@@ -24,6 +24,7 @@
 		framebuffer0: framebuffer@0 {
 			compatible = "apple,simple-framebuffer", "simple-framebuffer";
 			reg = <0 0 0 0>; /* To be filled by loader */
+			power-domains = <&ps_disp0 &ps_dp0>;
 			/* Format properties will be added by loader */
 			status = "disabled";
 		};
diff --git a/arch/arm64/boot/dts/apple/s8001-j98a-j99a.dtsi b/arch/arm64/boot/dts/apple/s8001-j98a-j99a.dtsi
new file mode 100644
index 00000000000000..e66a4c1c138fe8
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/s8001-j98a-j99a.dtsi
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Apple iPad Pro (12.9-inch)
+ *
+ * This file contains parts common to iPad Pro (12.9-inch).
+ *
+ * target-type: J98a, J99a
+ *
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+
+&ps_dcs4 {
+	apple,always-on; /* LPDDR4 interface */
+};
+
+&ps_dcs5 {
+	apple,always-on; /* LPDDR4 interface */
+};
+
+&ps_dcs6 {
+	apple,always-on; /* LPDDR4 interface */
+};
+
+&ps_dcs7 {
+	apple,always-on; /* LPDDR4 interface */
+};
diff --git a/arch/arm64/boot/dts/apple/s8001-j98a.dts b/arch/arm64/boot/dts/apple/s8001-j98a.dts
index 6d6b841e7ab0bd..162eca05c2d9fd 100644
--- a/arch/arm64/boot/dts/apple/s8001-j98a.dts
+++ b/arch/arm64/boot/dts/apple/s8001-j98a.dts
@@ -7,6 +7,7 @@
 /dts-v1/;
 
 #include "s8001-pro.dtsi"
+#include "s8001-j98a-j99a.dtsi"
 
 / {
 	compatible = "apple,j98a", "apple,s8001", "apple,arm-platform";
diff --git a/arch/arm64/boot/dts/apple/s8001-j99a.dts b/arch/arm64/boot/dts/apple/s8001-j99a.dts
index d20194b1cae733..7b765820c69e96 100644
--- a/arch/arm64/boot/dts/apple/s8001-j99a.dts
+++ b/arch/arm64/boot/dts/apple/s8001-j99a.dts
@@ -7,6 +7,7 @@
 /dts-v1/;
 
 #include "s8001-pro.dtsi"
+#include "s8001-j98a-j99a.dtsi"
 
 / {
 	compatible = "apple,j99a", "apple,s8001", "apple,arm-platform";
diff --git a/arch/arm64/boot/dts/apple/s8001-pmgr.dtsi b/arch/arm64/boot/dts/apple/s8001-pmgr.dtsi
new file mode 100644
index 00000000000000..859ab77ae92b01
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/s8001-pmgr.dtsi
@@ -0,0 +1,822 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * PMGR Power domains for the Apple S8001 "A9X" SoC
+ *
+ * Copyright (c) 2024 Nick Chan <towinchenmi@gmail.com>
+ */
+
+&pmgr {
+	ps_cpu0: power-controller@80000 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80000 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "cpu0";
+		apple,always-on; /* Core device */
+	};
+
+	ps_cpu1: power-controller@80008 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80008 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "cpu1";
+		apple,always-on; /* Core device */
+	};
+
+	ps_cpm: power-controller@80040 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80040 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "cpm";
+		apple,always-on; /* Core device */
+	};
+
+	ps_sio_busif: power-controller@80148 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80148 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "sio_busif";
+	};
+
+	ps_sio_p: power-controller@80150 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80150 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "sio_p";
+		power-domains = <&ps_sio_busif>;
+	};
+
+	ps_sbr: power-controller@80100 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80100 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "sbr";
+		apple,always-on; /* Apple fabric, critical block */
+	};
+
+	ps_aic: power-controller@80108 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80108 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "aic";
+		apple,always-on; /* Core device */
+	};
+
+	ps_dwi: power-controller@80110 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80110 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dwi";
+	};
+
+	ps_gpio: power-controller@80118 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80118 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "gpio";
+	};
+
+	ps_pcie_ref: power-controller@80140 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80140 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pcie_ref";
+	};
+
+	ps_mca0: power-controller@80160 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80160 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mca0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_mca1: power-controller@80168 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80168 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mca1";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_mca2: power-controller@80170 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80170 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mca2";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_mca3: power-controller@80178 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80178 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mca3";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_mca4: power-controller@80180 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80180 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mca4";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_pwm0: power-controller@80188 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80188 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pwm0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_i2c0: power-controller@80190 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80190 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "i2c0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_i2c1: power-controller@80198 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80198 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "i2c1";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_i2c2: power-controller@801a0 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801a0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "i2c2";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_i2c3: power-controller@801a8 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801a8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "i2c3";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_spi0: power-controller@801b0 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801b0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "spi0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_spi1: power-controller@801b8 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801b8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "spi1";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_spi2: power-controller@801c0 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801c0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "spi2";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_spi3: power-controller@801c8 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801c8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "spi3";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart0: power-controller@801d0 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801d0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart1: power-controller@801d8 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801d8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart1";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart2: power-controller@801e0 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801e0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart2";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart3: power-controller@801e8 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801e8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart3";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart4: power-controller@801f0 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801f0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart4";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart5: power-controller@801f8 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801f8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart5";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_sio: power-controller@80158 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80158 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "sio";
+		power-domains = <&ps_sio_p>;
+		apple,always-on; /* Core device */
+	};
+
+	ps_hsic0_phy: power-controller@80128 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80128 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "hsic0_phy";
+		power-domains = <&ps_usb2host1>;
+	};
+
+	ps_isp_sens0: power-controller@80130 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80130 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_sens0";
+	};
+
+	ps_isp_sens1: power-controller@80138 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80138 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_sens1";
+	};
+
+	ps_pms: power-controller@80120 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80120 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pms";
+		apple,always-on; /* Core device */
+	};
+
+	ps_usb: power-controller@80278 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80278 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usb";
+	};
+
+	ps_usbctrl: power-controller@80280 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80280 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usbctrl";
+		power-domains = <&ps_usb>;
+	};
+
+	ps_usb2host0: power-controller@80288 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80288 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usb2host0";
+		power-domains = <&ps_usbctrl>;
+	};
+
+	ps_usb2host1: power-controller@80298 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80298 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usb2host1";
+		power-domains = <&ps_usbctrl>;
+	};
+
+	ps_usb2host2: power-controller@802a8 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802a8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usb2host2";
+		power-domains = <&ps_usbctrl>;
+	};
+
+	ps_rtmux: power-controller@802d0 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802d0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "rtmux";
+		apple,always-on; /* Core device */
+	};
+
+	ps_disp1mux: power-controller@802e8 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802e8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "disp1mux";
+	};
+
+	ps_disp0: power-controller@802d8 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802d8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "disp0";
+		power-domains = <&ps_rtmux>;
+	};
+
+	ps_disp1: power-controller@802f0 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802f0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "disp1";
+		power-domains = <&ps_disp1mux>;
+	};
+
+	ps_uart6: power-controller@80200 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80200 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart6";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart7: power-controller@80208 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80208 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart7";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart8: power-controller@80210 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80210 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart8";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_aes0: power-controller@80218 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80218 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "aes0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_mcc: power-controller@80230 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80230 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mcc";
+		apple,always-on; /* Memory cache controller */
+	};
+
+	ps_dcs0: power-controller@80238 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80238 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dcs0";
+		apple,always-on; /* LPDDR4 interface */
+	};
+
+	ps_dcs1: power-controller@80240 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80240 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dcs1";
+		apple,always-on; /* LPDDR4 interface */
+	};
+
+	ps_dcs2: power-controller@80248 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80248 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dcs2";
+		apple,always-on; /* LPDDR4 interface */
+	};
+
+	ps_dcs3: power-controller@80250 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80250 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dcs3";
+		apple,always-on; /* LPDDR4 interface */
+	};
+
+	ps_dcs4: power-controller@80258 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80258 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dcs4";
+	};
+
+	ps_dcs5: power-controller@80260 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80260 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dcs5";
+	};
+
+	ps_dcs6: power-controller@80268 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80268 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dcs6";
+	};
+
+	ps_dcs7: power-controller@80270 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80270 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dcs7";
+	};
+
+	ps_usb2host0_ohci: power-controller@80290 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80290 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usb2host0_ohci";
+		power-domains = <&ps_usb2host0>;
+	};
+
+	ps_usbotg: power-controller@802b8 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802b8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usbotg";
+		power-domains = <&ps_usbctrl>;
+	};
+
+	ps_smx: power-controller@802c0 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802c0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "smx";
+		apple,always-on; /* Apple fabric, critical block */
+	};
+
+	ps_sf: power-controller@802c8 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802c8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "sf";
+		apple,always-on; /* Apple fabric, critical block */
+	};
+
+	ps_dp0: power-controller@802e0 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802e0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dp0";
+		power-domains = <&ps_disp0>;
+	};
+
+	ps_dp1: power-controller@802f8 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802f8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dp1";
+		power-domains = <&ps_disp1>;
+	};
+
+	ps_dpa0: power-controller@80220 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80220 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dpa0";
+	};
+
+	ps_dpa1: power-controller@80228 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80228 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dpa1";
+	};
+
+	ps_media: power-controller@80308 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80308 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "media";
+	};
+
+	ps_isp: power-controller@80300 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80300 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp";
+		power-domains = <&ps_rtmux>;
+	};
+
+	ps_msr: power-controller@80318 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80318 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "msr";
+		power-domains = <&ps_media>;
+	};
+
+	ps_jpg: power-controller@80310 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80310 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "jpg";
+		power-domains = <&ps_media>;
+	};
+
+	ps_venc: power-controller@80340 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80340 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "venc";
+		power-domains = <&ps_media>;
+	};
+
+	ps_pcie: power-controller@80348 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80348 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pcie";
+	};
+
+	ps_srs: power-controller@80390 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80390 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "srs";
+		power-domains = <&ps_media>;
+	};
+
+	ps_pcie_aux: power-controller@80350 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80350 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pcie_aux";
+	};
+
+	ps_pcie_link0: power-controller@80358 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80358 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pcie_link0";
+		power-domains = <&ps_pcie>;
+	};
+
+	ps_pcie_link1: power-controller@80360 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80360 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pcie_link1";
+		power-domains = <&ps_pcie>;
+	};
+
+	ps_pcie_link2: power-controller@80368 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80368 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pcie_link2";
+		power-domains = <&ps_pcie>;
+	};
+
+	ps_pcie_link3: power-controller@80370 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80370 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pcie_link3";
+		power-domains = <&ps_pcie>;
+	};
+
+	ps_pcie_link4: power-controller@80378 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80378 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pcie_link4";
+		power-domains = <&ps_pcie>;
+	};
+
+	ps_pcie_link5: power-controller@80380 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80380 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pcie_link5";
+		power-domains = <&ps_pcie>;
+	};
+
+	ps_vdec: power-controller@80330 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80330 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "vdec";
+		power-domains = <&ps_media>;
+	};
+
+	ps_gfx: power-controller@80388 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80388 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "gfx";
+	};
+
+	ps_pmp: power-controller@80320 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80320 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pmp";
+	};
+
+	ps_pms_sram: power-controller@80328 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80328 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pms_sram";
+	};
+
+	ps_sep: power-controller@80400 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80400 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "sep";
+		apple,always-on; /* Locked on*/
+	};
+
+	ps_venc_pipe: power-controller@88000 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x88000 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "venc_pipe";
+		power-domains = <&ps_venc>;
+	};
+
+	ps_venc_me0: power-controller@88008 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x88008 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "venc_me0";
+	};
+
+	ps_venc_me1: power-controller@88010 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x88010 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "venc_me1";
+	};
+};
+
+&pmgr_mini {
+	ps_aop: power-controller@80000 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80000 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "aop";
+		power-domains = <&ps_aop_cpu &ps_aop_filter &ps_aop_busif>;
+		apple,always-on; /* Always on processor */
+	};
+
+	ps_debug: power-controller@80008 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80008 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "debug";
+	};
+
+	ps_aop_gpio: power-controller@80010 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80010 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "aop_gpio";
+	};
+
+	ps_aop_cpu: power-controller@80040 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80040 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "aop_cpu";
+	};
+
+	ps_aop_filter: power-controller@80048 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80048 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "aop_filter";
+	};
+
+	ps_aop_busif: power-controller@80050 {
+		compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80050 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "aop_busif";
+	};
+};
diff --git a/arch/arm64/boot/dts/apple/s8001.dtsi b/arch/arm64/boot/dts/apple/s8001.dtsi
index 23ee3238844d95..d56d49c048bbf5 100644
--- a/arch/arm64/boot/dts/apple/s8001.dtsi
+++ b/arch/arm64/boot/dts/apple/s8001.dtsi
@@ -32,6 +32,8 @@
 			compatible = "apple,twister";
 			reg = <0x0 0x0>;
 			cpu-release-addr = <0 0>; /* To be filled in by loader */
+			operating-points-v2 = <&twister_opp>;
+			performance-domains = <&cpufreq>;
 			enable-method = "spin-table";
 			device_type = "cpu";
 		};
@@ -40,11 +42,62 @@
 			compatible = "apple,twister";
 			reg = <0x0 0x1>;
 			cpu-release-addr = <0 0>; /* To be filled in by loader */
+			operating-points-v2 = <&twister_opp>;
+			performance-domains = <&cpufreq>;
 			enable-method = "spin-table";
 			device_type = "cpu";
 		};
 	};
 
+	twister_opp: opp-table {
+		compatible = "operating-points-v2";
+
+		opp01 {
+			opp-hz = /bits/ 64 <300000000>;
+			opp-level = <1>;
+			clock-latency-ns = <800>;
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <396000000>;
+			opp-level = <2>;
+			clock-latency-ns = <53000>;
+		};
+		opp03 {
+			opp-hz = /bits/ 64 <792000000>;
+			opp-level = <3>;
+			clock-latency-ns = <18000>;
+		};
+		opp04 {
+			opp-hz = /bits/ 64 <1080000000>;
+			opp-level = <4>;
+			clock-latency-ns = <21000>;
+		};
+		opp05 {
+			opp-hz = /bits/ 64 <1440000000>;
+			opp-level = <5>;
+			clock-latency-ns = <25000>;
+		};
+		opp06 {
+			opp-hz = /bits/ 64 <1800000000>;
+			opp-level = <6>;
+			clock-latency-ns = <33000>;
+		};
+		opp07 {
+			opp-hz = /bits/ 64 <2160000000>;
+			opp-level = <7>;
+			clock-latency-ns = <45000>;
+		};
+#if 0
+		/* Not available until CPU deep sleep is implemented */
+		opp08 {
+			opp-hz = /bits/ 64 <2160000000>;
+			opp-level = <8>;
+			clock-latency-ns = <45000>;
+			turbo-mode;
+		};
+#endif
+	};
+
 	soc {
 		compatible = "simple-bus";
 		#address-cells = <2>;
@@ -52,6 +105,12 @@
 		nonposted-mmio;
 		ranges;
 
+		cpufreq: performance-controller@202220000 {
+			compatible = "apple,s8000-cluster-cpufreq", "apple,t8103-cluster-cpufreq", "apple,cluster-cpufreq";
+			reg = <0x2 0x02220000 0 0x1000>;
+			#performance-domain-cells = <0>;
+		};
+
 		serial0: serial@20a0c0000 {
 			compatible = "apple,s5l-uart";
 			reg = <0x2 0x0a0c0000 0x0 0x4000>;
@@ -61,19 +120,30 @@
 			/* Use the bootloader-enabled clocks for now. */
 			clocks = <&clkref>, <&clkref>;
 			clock-names = "uart", "clk_uart_baud0";
+			power-domains = <&ps_uart0>;
 			status = "disabled";
 		};
 
+		pmgr: power-management@20e000000 {
+			compatible = "apple,s8000-pmgr", "apple,pmgr", "syscon", "simple-mfd";
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			reg = <0x2 0xe000000 0 0x8c000>;
+		};
+
 		aic: interrupt-controller@20e100000 {
 			compatible = "apple,s8000-aic", "apple,aic";
 			reg = <0x2 0x0e100000 0x0 0x100000>;
 			#interrupt-cells = <3>;
 			interrupt-controller;
+			power-domains = <&ps_aic>;
 		};
 
 		pinctrl_ap: pinctrl@20f100000 {
 			compatible = "apple,s8000-pinctrl", "apple,pinctrl";
 			reg = <0x2 0x0f100000 0x0 0x100000>;
+			power-domains = <&ps_gpio>;
 
 			gpio-controller;
 			#gpio-cells = <2>;
@@ -95,6 +165,7 @@
 		pinctrl_aop: pinctrl@2100f0000 {
 			compatible = "apple,s8000-pinctrl", "apple,pinctrl";
 			reg = <0x2 0x100f0000 0x0 0x100000>;
+			power-domains = <&ps_aop_gpio>;
 
 			gpio-controller;
 			#gpio-cells = <2>;
@@ -113,6 +184,14 @@
 				     <AIC_IRQ 134 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
+		pmgr_mini: power-management@210200000 {
+			compatible = "apple,s8000-pmgr", "apple,pmgr", "syscon", "simple-mfd";
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			reg = <0x2 0x10200000 0 0x84000>;
+		};
+
 		wdt: watchdog@2102b0000 {
 			compatible = "apple,s8000-wdt", "apple,wdt";
 			reg = <0x2 0x102b0000 0x0 0x4000>;
@@ -131,3 +210,5 @@
 			     <AIC_FIQ AIC_TMR_GUEST_VIRT IRQ_TYPE_LEVEL_HIGH>;
 	};
 };
+
+#include "s8001-pmgr.dtsi"
diff --git a/arch/arm64/boot/dts/apple/s8003.dtsi b/arch/arm64/boot/dts/apple/s8003.dtsi
index 7e4ad4f7e49953..79df5c7832600b 100644
--- a/arch/arm64/boot/dts/apple/s8003.dtsi
+++ b/arch/arm64/boot/dts/apple/s8003.dtsi
@@ -4,18 +4,65 @@
  *
  * Other names: H8P, "Malta"
  *
- * Copyright (c) 2022, Konrad Dybcio <konradybcio@kernel.org>
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
  */
 
-#include "s8000.dtsi"
+#include "s800-0-3.dtsi"
+
+/ {
+	twister_opp: opp-table {
+		compatible = "operating-points-v2";
+
+		opp01 {
+			opp-hz = /bits/ 64 <300000000>;
+			opp-level = <1>;
+			clock-latency-ns = <500>;
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <396000000>;
+			opp-level = <2>;
+			clock-latency-ns = <45000>;
+		};
+		opp03 {
+			opp-hz = /bits/ 64 <600000000>;
+			opp-level = <3>;
+			clock-latency-ns = <22000>;
+		};
+		opp04 {
+			opp-hz = /bits/ 64 <912000000>;
+			opp-level = <4>;
+			clock-latency-ns = <25000>;
+		};
+		opp05 {
+			opp-hz = /bits/ 64 <1200000000>;
+			opp-level = <5>;
+			clock-latency-ns = <28000>;
+		};
+		opp06 {
+			opp-hz = /bits/ 64 <1512000000>;
+			opp-level = <6>;
+			clock-latency-ns = <35000>;
+		};
+		opp07 {
+			opp-hz = /bits/ 64 <1800000000>;
+			opp-level = <7>;
+			clock-latency-ns = <38000>;
+		};
+#if 0
+		/* Not available until CPU deep sleep is implemented */
+		opp08 {
+			opp-hz = /bits/ 64 <1844000000>;
+			opp-level = <8>;
+			clock-latency-ns = <38000>;
+			turbo-mode;
+		};
+#endif
+	};
+};
 
 /*
  * The A9 was made by two separate fabs on two different process
  * nodes: Samsung made the S8000 (APL0898) on 14nm and TSMC made
- * the S8003 (APL1022) on 16nm. While they are seemingly the same,
- * they do have distinct part numbers and devices using them have
- * distinct model names. There are currently no known differences
- * between these as far as Linux is concerned, but let's keep things
- * structured properly to make it easier to alter the behaviour of
- * one of the chips if need be.
+ * the S8003 (APL1022) on 16nm. There are some minor differences
+ * such as timing in cpufreq state transistions.
  */
diff --git a/arch/arm64/boot/dts/apple/s800x-6s.dtsi b/arch/arm64/boot/dts/apple/s800x-6s.dtsi
index 49b04db310c6e8..1dcf80cc292066 100644
--- a/arch/arm64/boot/dts/apple/s800x-6s.dtsi
+++ b/arch/arm64/boot/dts/apple/s800x-6s.dtsi
@@ -47,3 +47,7 @@
 		};
 	};
 };
+
+&framebuffer0 {
+	power-domains = <&ps_disp0 &ps_mipi_dsi>;
+};
diff --git a/arch/arm64/boot/dts/apple/s800x-ipad5.dtsi b/arch/arm64/boot/dts/apple/s800x-ipad5.dtsi
index 32570ed3cdf009..c1701e81f0c1a2 100644
--- a/arch/arm64/boot/dts/apple/s800x-ipad5.dtsi
+++ b/arch/arm64/boot/dts/apple/s800x-ipad5.dtsi
@@ -41,3 +41,7 @@
 		};
 	};
 };
+
+&framebuffer0 {
+	power-domains = <&ps_disp0 &ps_dp>;
+};
diff --git a/arch/arm64/boot/dts/apple/s800x-se.dtsi b/arch/arm64/boot/dts/apple/s800x-se.dtsi
index a1a5690e83713c..deb7c7cc90f625 100644
--- a/arch/arm64/boot/dts/apple/s800x-se.dtsi
+++ b/arch/arm64/boot/dts/apple/s800x-se.dtsi
@@ -47,3 +47,7 @@
 		};
 	};
 };
+
+&framebuffer0 {
+	power-domains = <&ps_disp0 &ps_mipi_dsi>;
+};
diff --git a/arch/arm64/boot/dts/apple/spi1-nvram.dtsi b/arch/arm64/boot/dts/apple/spi1-nvram.dtsi
new file mode 100644
index 00000000000000..3df2fd3993b528
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/spi1-nvram.dtsi
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+//
+// Devicetree include for common spi-nor nvram flash.
+//
+// Apple uses a consistent configiguration for the nvram on all known M1* and
+// M2* devices.
+//
+// Copyright The Asahi Linux Contributors
+
+/ {
+	aliases {
+		nvram = &nvram;
+	};
+};
+
+&spi1 {
+	status = "okay";
+
+	flash@0 {
+		compatible = "jedec,spi-nor";
+		reg = <0x0>;
+		spi-max-frequency = <25000000>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		partitions {
+			compatible = "fixed-partitions";
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			nvram: partition@700000 {
+				label = "nvram";
+				/* To be filled by the loader */
+				reg = <0x0 0x0>;
+				status = "disabled";
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/apple/t6000-j314s.dts b/arch/arm64/boot/dts/apple/t6000-j314s.dts
index c9e192848fe3f9..afa86668440f04 100644
--- a/arch/arm64/boot/dts/apple/t6000-j314s.dts
+++ b/arch/arm64/boot/dts/apple/t6000-j314s.dts
@@ -16,3 +16,28 @@
 	compatible = "apple,j314s", "apple,t6000", "apple,arm-platform";
 	model = "Apple MacBook Pro (14-inch, M1 Pro, 2021)";
 };
+
+&wifi0 {
+	brcm,board-type = "apple,maldives";
+};
+
+&bluetooth0 {
+	brcm,board-type = "apple,maldives";
+};
+
+&panel {
+	compatible = "apple,panel-j314", "apple,panel-mini-led", "apple,panel";
+	width-mm = <302>;
+	height-mm = <196>;
+	adj-height-mm = <189>;
+};
+
+&aop_audio {
+	apple,chassis-name = "J314";
+	apple,machine-kind = "MacBook Pro";
+};
+
+&sound {
+	compatible = "apple,j314-macaudio", "apple,macaudio";
+	model = "MacBook Pro J314";
+};
diff --git a/arch/arm64/boot/dts/apple/t6000-j316s.dts b/arch/arm64/boot/dts/apple/t6000-j316s.dts
index ff1803ce23001c..ddfc3c530923c7 100644
--- a/arch/arm64/boot/dts/apple/t6000-j316s.dts
+++ b/arch/arm64/boot/dts/apple/t6000-j316s.dts
@@ -16,3 +16,28 @@
 	compatible = "apple,j316s", "apple,t6000", "apple,arm-platform";
 	model = "Apple MacBook Pro (16-inch, M1 Pro, 2021)";
 };
+
+&wifi0 {
+	brcm,board-type = "apple,madagascar";
+};
+
+&bluetooth0 {
+	brcm,board-type = "apple,madagascar";
+};
+
+&panel {
+	compatible = "apple,panel-j316", "apple,panel-mini-led", "apple,panel";
+	width-mm = <346>;
+	height-mm = <223>;
+	adj-height-mm = <216>;
+};
+
+&aop_audio {
+	apple,chassis-name = "J316";
+	apple,machine-kind = "MacBook Pro";
+};
+
+&sound {
+	compatible = "apple,j316-macaudio", "apple,macaudio";
+	model = "MacBook Pro J316";
+};
diff --git a/arch/arm64/boot/dts/apple/t6000.dtsi b/arch/arm64/boot/dts/apple/t6000.dtsi
index 89c3b211b116e9..c9e4e52d9aac92 100644
--- a/arch/arm64/boot/dts/apple/t6000.dtsi
+++ b/arch/arm64/boot/dts/apple/t6000.dtsi
@@ -9,6 +9,8 @@
 
 /* This chip is just a cut down version of t6001, so include it and disable the missing parts */
 
+#define GPU_REPEAT(x) <x x>
+
 #include "t6001.dtsi"
 
 / {
@@ -16,3 +18,7 @@
 };
 
 /delete-node/ &pmgr_south;
+
+&gpu {
+	compatible = "apple,agx-t6000", "apple,agx-g13x";
+};
diff --git a/arch/arm64/boot/dts/apple/t6001-j314c.dts b/arch/arm64/boot/dts/apple/t6001-j314c.dts
index 1761d15b98c12f..245df6d03ee422 100644
--- a/arch/arm64/boot/dts/apple/t6001-j314c.dts
+++ b/arch/arm64/boot/dts/apple/t6001-j314c.dts
@@ -16,3 +16,28 @@
 	compatible = "apple,j314c", "apple,t6001", "apple,arm-platform";
 	model = "Apple MacBook Pro (14-inch, M1 Max, 2021)";
 };
+
+&wifi0 {
+	brcm,board-type = "apple,maldives";
+};
+
+&bluetooth0 {
+	brcm,board-type = "apple,maldives";
+};
+
+&panel {
+	compatible = "apple,panel-j314", "apple,panel-mini-led", "apple,panel";
+	width-mm = <302>;
+	height-mm = <196>;
+	adj-height-mm = <189>;
+};
+
+&aop_audio {
+	apple,chassis-name = "J314";
+	apple,machine-kind = "MacBook Pro";
+};
+
+&sound {
+	compatible = "apple,j314-macaudio", "apple,macaudio";
+	model = "MacBook Pro J314";
+};
diff --git a/arch/arm64/boot/dts/apple/t6001-j316c.dts b/arch/arm64/boot/dts/apple/t6001-j316c.dts
index 750e9beeffc0aa..a000d497b705fa 100644
--- a/arch/arm64/boot/dts/apple/t6001-j316c.dts
+++ b/arch/arm64/boot/dts/apple/t6001-j316c.dts
@@ -16,3 +16,28 @@
 	compatible = "apple,j316c", "apple,t6001", "apple,arm-platform";
 	model = "Apple MacBook Pro (16-inch, M1 Max, 2021)";
 };
+
+&wifi0 {
+	brcm,board-type = "apple,madagascar";
+};
+
+&bluetooth0 {
+	brcm,board-type = "apple,madagascar";
+};
+
+&panel {
+	compatible = "apple,panel-j316", "apple,panel-mini-led", "apple,panel";
+	width-mm = <346>;
+	height-mm = <223>;
+	adj-height-mm = <216>;
+};
+
+&aop_audio {
+	apple,chassis-name = "J316";
+	apple,machine-kind = "MacBook Pro";
+};
+
+&sound {
+	compatible = "apple,j316-macaudio", "apple,macaudio";
+	model = "MacBook Pro J316";
+};
diff --git a/arch/arm64/boot/dts/apple/t6001-j375c.dts b/arch/arm64/boot/dts/apple/t6001-j375c.dts
index 62ea437b58b25c..40aef1386adfd1 100644
--- a/arch/arm64/boot/dts/apple/t6001-j375c.dts
+++ b/arch/arm64/boot/dts/apple/t6001-j375c.dts
@@ -16,3 +16,39 @@
 	compatible = "apple,j375c", "apple,t6001", "apple,arm-platform";
 	model = "Apple Mac Studio (M1 Max, 2022)";
 };
+
+&dpaudio0 {
+	status = "okay";
+};
+
+&sound {
+	compatible = "apple,j375-macaudio", "apple,macaudio";
+	model = "Mac Studio J375";
+};
+
+&pinctrl_ap {
+	usb_hub_oe-hog {
+		gpio-hog;
+		gpios = <230 0>;
+		input;
+		line-name = "usb-hub-oe";
+	};
+
+	usb_hub_rst-hog {
+		gpio-hog;
+		gpios = <231 GPIO_ACTIVE_LOW>;
+		output-low;
+		line-name = "usb-hub-rst";
+	};
+};
+
+&gpu {
+	apple,avg-power-ki-only = <0.6375>;
+	apple,avg-power-kp = <0.58>;
+	apple,avg-power-target-filter-tc = <1>;
+	apple,perf-base-pstate = <3>;
+	apple,ppm-ki = <5.8>;
+	apple,ppm-kp = <0.355>;
+};
+
+#include "hwmon-fan-dual.dtsi"
diff --git a/arch/arm64/boot/dts/apple/t6001.dtsi b/arch/arm64/boot/dts/apple/t6001.dtsi
index 620b17e4031f06..20e7c1cf383562 100644
--- a/arch/arm64/boot/dts/apple/t6001.dtsi
+++ b/arch/arm64/boot/dts/apple/t6001.dtsi
@@ -11,9 +11,15 @@
 #include <dt-bindings/interrupt-controller/apple-aic.h>
 #include <dt-bindings/interrupt-controller/irq.h>
 #include <dt-bindings/pinctrl/apple.h>
+#include <dt-bindings/phy/phy.h>
+#include <dt-bindings/spmi/spmi.h>
 
 #include "multi-die-cpp.h"
 
+#ifndef GPU_REPEAT
+# define GPU_REPEAT(x) <x x x x>
+#endif
+
 #include "t600x-common.dtsi"
 
 / {
@@ -26,6 +32,8 @@
 
 		ranges;
 		nonposted-mmio;
+		/* Required to get >32-bit DMA via DARTs */
+		dma-ranges = <0 0 0 0 0xffffffff 0xffffc000>;
 
 		// filled via templated includes at the end of the file
 	};
@@ -61,3 +69,7 @@
 		};
 	};
 };
+
+&gpu {
+	compatible = "apple,agx-t6001", "apple,agx-g13x";
+};
diff --git a/arch/arm64/boot/dts/apple/t6002-j375d.dts b/arch/arm64/boot/dts/apple/t6002-j375d.dts
index 3365429bdc8be9..9f3f8d384317ca 100644
--- a/arch/arm64/boot/dts/apple/t6002-j375d.dts
+++ b/arch/arm64/boot/dts/apple/t6002-j375d.dts
@@ -15,6 +15,19 @@
 / {
 	compatible = "apple,j375d", "apple,t6002", "apple,arm-platform";
 	model = "Apple Mac Studio (M1 Ultra, 2022)";
+	aliases {
+		atcphy4 = &atcphy0_die1;
+		atcphy5 = &atcphy1_die1;
+	};
+};
+
+&dpaudio0 {
+	status = "okay";
+};
+
+&sound {
+	compatible = "apple,j375-macaudio", "apple,macaudio";
+	model = "Mac Studio J375";
 };
 
 /* USB Type C */
@@ -26,6 +39,30 @@
 		interrupt-parent = <&pinctrl_ap>;
 		interrupts = <174 IRQ_TYPE_LEVEL_LOW>;
 		interrupt-names = "irq";
+
+		typec4: connector {
+			compatible = "usb-c-connector";
+			label = "USB-C Front Right";
+			power-role = "dual";
+			data-role = "dual";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				port@0 {
+					reg = <0>;
+					typec4_con_hs: endpoint {
+						remote-endpoint = <&typec4_usb_hs>;
+					};
+				};
+				port@1 {
+					reg = <1>;
+					typec4_con_ss: endpoint {
+						remote-endpoint = <&typec4_usb_ss>;
+					};
+				};
+			};
+		};
 	};
 
 	/* front-left */
@@ -35,9 +72,82 @@
 		interrupt-parent = <&pinctrl_ap>;
 		interrupts = <174 IRQ_TYPE_LEVEL_LOW>;
 		interrupt-names = "irq";
+
+		typec5: connector {
+			compatible = "usb-c-connector";
+			label = "USB-C Front Left";
+			power-role = "dual";
+			data-role = "dual";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				port@0 {
+					reg = <0>;
+					typec5_con_hs: endpoint {
+						remote-endpoint = <&typec5_usb_hs>;
+					};
+				};
+				port@1 {
+					reg = <1>;
+					typec5_con_ss: endpoint {
+						remote-endpoint = <&typec5_usb_ss>;
+					};
+				};
+			};
+		};
+	};
+};
+
+/* USB controllers on die 1 */
+&dwc3_0_die1 {
+	port {
+		typec4_usb_hs: endpoint {
+			remote-endpoint = <&typec4_con_hs>;
+		};
+	};
+};
+
+&dwc3_1_die1 {
+	port {
+		typec5_usb_hs: endpoint {
+			remote-endpoint = <&typec5_con_hs>;
+		};
+	};
+};
+
+/* Type-C PHYs */
+&atcphy0_die1 {
+	port {
+		typec4_usb_ss: endpoint {
+			remote-endpoint = <&typec4_con_ss>;
+		};
+	};
+};
+
+&atcphy1_die1 {
+	port {
+		typec5_usb_ss: endpoint {
+			remote-endpoint = <&typec5_con_ss>;
+		};
 	};
 };
 
+/* delete unused USB nodes on die 1 */
+
+/delete-node/ &dwc3_2_dart_0_die1;
+/delete-node/ &dwc3_2_dart_1_die1;
+/delete-node/ &dwc3_2_die1;
+/delete-node/ &atcphy2_die1;
+/delete-node/ &atcphy2_xbar_die1;
+
+/delete-node/ &dwc3_3_dart_0_die1;
+/delete-node/ &dwc3_3_dart_1_die1;
+/delete-node/ &dwc3_3_die1;
+/delete-node/ &atcphy3_die1;
+/delete-node/ &atcphy3_xbar_die1;
+
+
 /* delete unused always-on power-domains on die 1 */
 
 /delete-node/ &ps_atc2_usb_aon_die1;
@@ -48,3 +158,14 @@
 
 /delete-node/ &ps_disp0_cpu0_die1;
 /delete-node/ &ps_disp0_fe_die1;
+
+&gpu {
+	apple,avg-power-ki-only = <0.6375>;
+	apple,avg-power-kp = <0.58>;
+	apple,avg-power-target-filter-tc = <1>;
+	apple,perf-base-pstate = <3>;
+	apple,ppm-ki = <5.8>;
+	apple,ppm-kp = <0.355>;
+};
+
+#include "hwmon-fan-dual.dtsi"
diff --git a/arch/arm64/boot/dts/apple/t6002.dtsi b/arch/arm64/boot/dts/apple/t6002.dtsi
index a963a5011799a0..331cc49b42994d 100644
--- a/arch/arm64/boot/dts/apple/t6002.dtsi
+++ b/arch/arm64/boot/dts/apple/t6002.dtsi
@@ -11,9 +11,15 @@
 #include <dt-bindings/interrupt-controller/apple-aic.h>
 #include <dt-bindings/interrupt-controller/irq.h>
 #include <dt-bindings/pinctrl/apple.h>
+#include <dt-bindings/phy/phy.h>
+#include <dt-bindings/spmi/spmi.h>
 
 #include "multi-die-cpp.h"
 
+#ifndef GPU_REPEAT
+# define GPU_REPEAT(x) <x x x x x x x x>
+#endif
+
 #include "t600x-common.dtsi"
 
 / {
@@ -234,6 +240,8 @@
 			 <0x5 0x80000000 0x5 0x80000000 0x1 0x80000000>,
 			 <0x7 0x0 0x7 0x0 0xf 0x80000000>;
 		nonposted-mmio;
+		/* Required to get >32-bit DMA via DARTs */
+		dma-ranges = <0 0 0 0 0xffffffff 0xffffc000>;
 
 		// filled via templated includes at the end of the file
 	};
@@ -245,6 +253,8 @@
 		ranges = <0x2 0x0 0x22 0x0 0x4 0x0>,
 			 <0x7 0x0 0x27 0x0 0xf 0x80000000>;
 		nonposted-mmio;
+		/* Required to get >32-bit DMA via DARTs */
+		dma-ranges = <0 0 0 0 0xffffffff 0xffffc000>;
 
 		// filled via templated includes at the end of the file
 	};
@@ -295,7 +305,21 @@
 	};
 };
 
+&dcpext0_die1 {
+	// TODO: verify
+	apple,bw-scratch = <&pmgr_dcp 0 4 0x9c0>;
+};
+
+&dcpext1_die1 {
+	// TODO: verify
+	apple,bw-scratch = <&pmgr_dcp 0 4 0x9c8>;
+};
+
 &ps_gfx {
 	// On t6002, the die0 GPU power domain needs both AFR power domains
 	power-domains = <&ps_afr>, <&ps_afr_die1>;
 };
+
+&gpu {
+	compatible = "apple,agx-t6002", "apple,agx-g13x";
+};
diff --git a/arch/arm64/boot/dts/apple/t600x-common.dtsi b/arch/arm64/boot/dts/apple/t600x-common.dtsi
index fa8ead69936366..5db701a508b59d 100644
--- a/arch/arm64/boot/dts/apple/t600x-common.dtsi
+++ b/arch/arm64/boot/dts/apple/t600x-common.dtsi
@@ -11,6 +11,10 @@
 	#address-cells = <2>;
 	#size-cells = <2>;
 
+	aliases {
+		gpu = &gpu;
+	};
+
 	cpus {
 		#address-cells = <2>;
 		#size-cells = <0>;
@@ -225,26 +229,31 @@
 			opp-hz = /bits/ 64 <600000000>;
 			opp-level = <1>;
 			clock-latency-ns = <7500>;
+			opp-microwatt = <47296>;
 		};
 		opp02 {
 			opp-hz = /bits/ 64 <972000000>;
 			opp-level = <2>;
 			clock-latency-ns = <23000>;
+			opp-microwatt = <99715>;
 		};
 		opp03 {
 			opp-hz = /bits/ 64 <1332000000>;
 			opp-level = <3>;
 			clock-latency-ns = <29000>;
+			opp-microwatt = <188860>;
 		};
 		opp04 {
 			opp-hz = /bits/ 64 <1704000000>;
 			opp-level = <4>;
 			clock-latency-ns = <40000>;
+			opp-microwatt = <288891>;
 		};
 		opp05 {
 			opp-hz = /bits/ 64 <2064000000>;
 			opp-level = <5>;
 			clock-latency-ns = <50000>;
+			opp-microwatt = <412979>;
 		};
 	};
 
@@ -255,82 +264,139 @@
 			opp-hz = /bits/ 64 <600000000>;
 			opp-level = <1>;
 			clock-latency-ns = <8000>;
+			opp-microwatt = <290230>;
 		};
 		opp02 {
 			opp-hz = /bits/ 64 <828000000>;
 			opp-level = <2>;
 			clock-latency-ns = <18000>;
+			opp-microwatt = <449013>;
 		};
 		opp03 {
 			opp-hz = /bits/ 64 <1056000000>;
 			opp-level = <3>;
 			clock-latency-ns = <19000>;
+			opp-microwatt = <647097>;
 		};
 		opp04 {
 			opp-hz = /bits/ 64 <1296000000>;
 			opp-level = <4>;
 			clock-latency-ns = <23000>;
+			opp-microwatt = <865620>;
 		};
 		opp05 {
 			opp-hz = /bits/ 64 <1524000000>;
 			opp-level = <5>;
 			clock-latency-ns = <24000>;
+			opp-microwatt = <1112838>;
 		};
 		opp06 {
 			opp-hz = /bits/ 64 <1752000000>;
 			opp-level = <6>;
 			clock-latency-ns = <28000>;
+			opp-microwatt = <1453271>;
 		};
 		opp07 {
 			opp-hz = /bits/ 64 <1980000000>;
 			opp-level = <7>;
 			clock-latency-ns = <31000>;
+			opp-microwatt = <1776667>;
 		};
 		opp08 {
 			opp-hz = /bits/ 64 <2208000000>;
 			opp-level = <8>;
 			clock-latency-ns = <45000>;
+			opp-microwatt = <2366690>;
 		};
 		opp09 {
 			opp-hz = /bits/ 64 <2448000000>;
 			opp-level = <9>;
 			clock-latency-ns = <49000>;
+			opp-microwatt = <2892193>;
 		};
 		opp10 {
 			opp-hz = /bits/ 64 <2676000000>;
 			opp-level = <10>;
 			clock-latency-ns = <53000>;
+			opp-microwatt = <3475417>;
 		};
 		opp11 {
 			opp-hz = /bits/ 64 <2904000000>;
 			opp-level = <11>;
 			clock-latency-ns = <56000>;
+			opp-microwatt = <3959410>;
 		};
 		opp12 {
 			opp-hz = /bits/ 64 <3036000000>;
 			opp-level = <12>;
 			clock-latency-ns = <56000>;
+			opp-microwatt = <4540620>;
 		};
-		/* Not available until CPU deep sleep is implemented
 		opp13 {
 			opp-hz = /bits/ 64 <3132000000>;
 			opp-level = <13>;
 			clock-latency-ns = <56000>;
+			opp-microwatt = <4745031>;
 			turbo-mode;
 		};
 		opp14 {
 			opp-hz = /bits/ 64 <3168000000>;
 			opp-level = <14>;
 			clock-latency-ns = <56000>;
+			opp-microwatt = <4822390>;
 			turbo-mode;
 		};
 		opp15 {
 			opp-hz = /bits/ 64 <3228000000>;
 			opp-level = <15>;
 			clock-latency-ns = <56000>;
+			opp-microwatt = <4951324>;
 			turbo-mode;
 		};
-		*/
+	};
+
+	gpu_opp: opp-table-gpu {
+		compatible = "operating-points-v2";
+
+		/*
+		 * NOTE: The voltage and power values are device-specific and
+		 * must be filled in by the bootloader.
+		 */
+		opp00 {
+			opp-hz = /bits/ 64 <0>;
+			opp-microvolt = GPU_REPEAT(400000);
+			opp-microwatt = <0>;
+		};
+		opp01 {
+			opp-hz = /bits/ 64 <388800000>;
+			opp-microvolt = GPU_REPEAT(634000);
+			opp-microwatt = <25011450>;
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <486000000>;
+			opp-microvolt = GPU_REPEAT(650000);
+			opp-microwatt = <31681170>;
+		};
+		opp03 {
+			opp-hz = /bits/ 64 <648000000>;
+			opp-microvolt = GPU_REPEAT(668000);
+			opp-microwatt = <41685750>;
+		};
+		opp04 {
+			opp-hz = /bits/ 64 <777600000>;
+			opp-microvolt = GPU_REPEAT(715000);
+			opp-microwatt = <56692620>;
+		};
+		opp05 {
+			opp-hz = /bits/ 64 <972000000>;
+			opp-microvolt = GPU_REPEAT(778000);
+			opp-microwatt = <83371500>;
+		};
+		opp06 {
+			opp-hz = /bits/ 64 <1296000000>;
+			opp-microvolt = GPU_REPEAT(903000);
+			opp-microwatt = <166743000>;
+		};
 	};
 
 	pmu-e {
@@ -362,6 +428,47 @@
 		clock-output-names = "clkref";
 	};
 
+	clk_200m: clock-200m {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <200000000>;
+		clock-output-names = "clk_200m";
+	};
+
+	clk_disp0: clock-disp0 {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <237333328>;
+		clock-output-names = "clk_disp0";
+	};
+
+	clk_dispext0: clock-dispext0 {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <0>;
+		clock-output-names = "clk_dispext0";
+	};
+
+	clk_dispext0_die1: clock-dispext0_die1 {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <0>;
+		clock-output-names = "clk_dispext0_die1";
+	};
+
+	clk_dispext1: clock-dispext1 {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <0>;
+		clock-output-names = "clk_dispext1";
+	};
+
+	clk_dispext1_die1: clock-dispext1_die1 {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <0>;
+		clock-output-names = "clk_dispext1_die1";
+	};
 	/*
 	 * This is a fabulated representation of the input clock
 	 * to NCO since we don't know the true clock tree.
@@ -371,4 +478,22 @@
 		#clock-cells = <0>;
 		clock-output-names = "nco_ref";
 	};
+
+	reserved-memory {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		uat_handoff: uat-handoff {
+			reg = <0 0 0 0>;
+		};
+
+		uat_pagetables: uat-pagetables {
+			reg = <0 0 0 0>;
+		};
+
+		uat_ttbs: uat-ttbs {
+			reg = <0 0 0 0>;
+		};
+	};
 };
diff --git a/arch/arm64/boot/dts/apple/t600x-die0.dtsi b/arch/arm64/boot/dts/apple/t600x-die0.dtsi
index b1c875e692c8fb..b213fa2e683fc7 100644
--- a/arch/arm64/boot/dts/apple/t600x-die0.dtsi
+++ b/arch/arm64/boot/dts/apple/t600x-die0.dtsi
@@ -24,6 +24,60 @@
 		power-domains = <&ps_aic>;
 	};
 
+	pmgr_misc: power-management@28e20c000 {
+		compatible = "apple,t6000-pmgr-misc";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		reg = <0x2 0x8e20c000 0 0x400>,
+			<0x2 0x8e20c800 0 0x400>;
+		reg-names = "fabric-ps", "dcs-ps";
+		apple,dcs-min-ps = <7>;
+	};
+
+	pmgr_dcp: power-management@28e3d0000 {
+		reg = <0x2 0x8e3d0000 0x0 0x4000>;
+		reg-names = "dcp-fw-pmgr";
+		#apple,bw-scratch-cells = <3>;
+	};
+
+	smc_mbox: mbox@290408000 {
+		compatible = "apple,t6000-asc-mailbox", "apple,asc-mailbox-v4";
+		reg = <0x2 0x90408000 0x0 0x4000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 754 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 0 755 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 0 756 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 0 757 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "send-empty", "send-not-empty",
+			"recv-empty", "recv-not-empty";
+		#mbox-cells = <0>;
+	};
+
+	smc: smc@290400000 {
+		compatible = "apple,t6000-smc", "apple,smc";
+		reg = <0x2 0x90400000 0x0 0x4000>,
+			<0x2 0x91e00000 0x0 0x100000>;
+		reg-names = "smc", "sram";
+		mboxes = <&smc_mbox>;
+
+		smc_gpio: gpio {
+			gpio-controller;
+			#gpio-cells = <2>;
+		};
+
+		smc_rtc: rtc {
+			nvmem-cells = <&rtc_offset>;
+			nvmem-cell-names = "rtc_offset";
+		};
+
+		smc_reboot: reboot {
+			nvmem-cells = <&shutdown_flag>, <&boot_stage>,
+				<&boot_error_count>, <&panic_count>, <&pm_setting>;
+			nvmem-cell-names = "shutdown_flag", "boot_stage",
+				"boot_error_count", "panic_count", "pm_setting";
+		};
+	};
+
 	pinctrl_smc: pinctrl@290820000 {
 		compatible = "apple,t6000-pinctrl", "apple,pinctrl";
 		reg = <0x2 0x90820000 0x0 0x4000>;
@@ -53,22 +107,278 @@
 		interrupts = <AIC_IRQ 0 631 IRQ_TYPE_LEVEL_HIGH>;
 	};
 
-	sio_dart_0: iommu@39b004000 {
+	nub_spmi0: spmi@2920a1300 {
+		compatible = "apple,t6000-spmi", "apple,spmi";
+		reg = <0x2 0x920a1300 0x0 0x100>;
+		#address-cells = <2>;
+		#size-cells = <0>;
+
+		pmu1: pmu@f {
+			compatible = "apple,maverick-pmu", "apple,spmi-pmu";
+			reg = <0xf SPMI_USID>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			rtc_nvmem@1400 {
+				compatible = "apple,spmi-pmu-nvmem";
+				reg = <0x1400 0x20>;
+				#address-cells = <1>;
+				#size-cells = <1>;
+
+				pm_setting: pm-setting@5 {
+					reg = <0x5 0x1>;
+				};
+
+				rtc_offset: rtc-offset@11 {
+					reg = <0x11 0x6>;
+				};
+			};
+
+			legacy_nvmem@6000 {
+				compatible = "apple,spmi-pmu-nvmem";
+				reg = <0x6000 0x20>;
+				#address-cells = <1>;
+				#size-cells = <1>;
+
+				boot_stage: boot-stage@1 {
+					reg = <0x1 0x1>;
+				};
+
+				boot_error_count: boot-error-count@2 {
+					reg = <0x2 0x1>;
+					bits = <0 4>;
+				};
+
+				panic_count: panic-count@2 {
+					reg = <0x2 0x1>;
+					bits = <4 4>;
+				};
+
+				boot_error_stage: boot-error-stage@3 {
+					reg = <0x3 0x1>;
+				};
+
+				shutdown_flag: shutdown-flag@f {
+					reg = <0xf 0x1>;
+					bits = <3 1>;
+				};
+			};
+
+			scrpad_nvmem@8000 {
+				compatible = "apple,spmi-pmu-nvmem";
+				reg = <0x8000 0x1000>;
+				#address-cells = <1>;
+				#size-cells = <1>;
+
+				fault_shadow: fault-shadow@67b {
+					reg = <0x67b 0x10>;
+				};
+
+				socd: socd@b00 {
+					reg = <0xb00 0x400>;
+				};
+			};
+
+		};
+	};
+
+	aop_mbox: mbox@293408000 {
+		compatible = "apple,t6000-asc-mailbox", "apple,asc-mailbox-v4";
+		reg = <0x2 0x93408000 0x0 0x4000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 582 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 0 583 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 0 584 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 0 585 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "send-empty", "send-not-empty",
+			"recv-empty", "recv-not-empty";
+		#mbox-cells = <0>;
+		status = "disabled";
+	};
+
+	aop_dart: iommu@293808000 {
 		compatible = "apple,t6000-dart";
-		reg = <0x3 0x9b004000 0x0 0x4000>;
+		reg = <0x2 0x93808000 0x0 0x4000>;
+		#iommu-cells = <1>;
 		interrupt-parent = <&aic>;
-		interrupts = <AIC_IRQ 0 1130 IRQ_TYPE_LEVEL_HIGH>;
+		interrupts = <AIC_IRQ 0 597 IRQ_TYPE_LEVEL_HIGH>;
+		status = "disabled";
+	};
+
+	aop_admac: dma-controller@293980000 {
+		/*
+		 * Use "admac2" until commit "dmaengine: apple-admac: Avoid
+		 * accessing registers in probe" is long enough upstream (not
+		 * yet as of 2024-12-30)
+		 */
+		// compatible = "apple,t6000-admac", "apple,admac";
+		compatible = "apple,t6000-admac2", "apple,admac2";
+		reg = <0x2 0x93980000 0x0 0x34000>;
+		#dma-cells = <1>;
+		dma-channels = <16>;
+		interrupts-extended = <0>,
+				      <0>,
+				      <&aic AIC_IRQ 0 600 IRQ_TYPE_LEVEL_HIGH>,
+				      <0>;
+		iommus = <&aop_dart 7>;
+		status = "disabled";
+	};
+
+	aop: aop@293c00000 {
+		compatible = "apple,t6000-aop";
+		reg = <0x2 0x93c00000 0x0 0x250000>,
+		      <0x2 0x93400000 0x0 0x6c000>;
+		mboxes = <&aop_mbox>;
+		mbox-names = "mbox";
+		iommus = <&aop_dart 0>;
+
+		/* HACK: ensure probe order */
+		dmas = <&aop_admac 1023>;
+		dma-names = "invalid-order-only";
+
+		status = "disabled";
+
+		aop_audio: audio {
+			dmas = <&aop_admac 1>;
+			dma-names = "dma";
+		};
+
+		aop_als: als {
+			// intentionally empty
+		};
+
+	};
+
+	disp0_dart: iommu@38b304000 {
+		compatible = "apple,t6000-dart";
+		reg = <0x3 0x8b304000 0x0 0x4000>;
 		#iommu-cells = <1>;
-		power-domains = <&ps_sio_cpu>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 821 IRQ_TYPE_LEVEL_HIGH>;
+		status = "disabled";
+		power-domains = <&ps_disp0_cpu0>;
+		apple,dma-range = <0x0 0x0 0x0 0xfc000000>;
 	};
 
-	sio_dart_1: iommu@39b008000 {
+	dcp_dart: iommu@38b30c000 {
 		compatible = "apple,t6000-dart";
-		reg = <0x3 0x9b008000 0x0 0x8000>;
+		reg = <0x3 0x8b30c000 0x0 0x4000>;
+		#iommu-cells = <1>;
 		interrupt-parent = <&aic>;
-		interrupts = <AIC_IRQ 0 1130 IRQ_TYPE_LEVEL_HIGH>;
+		interrupts = <AIC_IRQ 0 821 IRQ_TYPE_LEVEL_HIGH>;
+		power-domains = <&ps_disp0_cpu0>;
+		apple,dma-range = <0x1f0 0x0 0x0 0xfc000000>;
+	};
+
+	sep_dart: iommu@3952c0000 {
+		compatible = "apple,t6000-dart";
+		reg = <0x3 0x952c0000 0x0 0x4000>;
 		#iommu-cells = <1>;
-		power-domains = <&ps_sio_cpu>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 551 IRQ_TYPE_LEVEL_HIGH>;
+	};
+
+	sep: sep@396400000 {
+		compatible = "apple,sep";
+		reg = <0x3 0x96400000 0x0 0x6C000>;
+		mboxes = <&sep_mbox>;
+		mbox-names = "mbox";
+		iommus = <&sep_dart 0>;
+		power-domains = <&ps_sep>;
+		status = "disabled";
+	};
+
+	sep_mbox: mbox@396408000 {
+		compatible = "apple,t6000-asc-mailbox", "apple,asc-mailbox-v4";
+		reg = <0x3 0x96408000 0x0 0x4000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 545 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 0 546 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 0 547 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 0 548 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "send-empty", "send-not-empty",
+			"recv-empty", "recv-not-empty";
+		#mbox-cells = <0>;
+	};
+
+	dpaudio0: audio-controller@39b500000 {
+		compatible = "apple,t6000-dpaudio", "apple,dpaudio";
+		reg = <0x3 0x9b500000 0x0 0x4000>;
+		dmas = <&sio 0x64>;
+		dma-names = "tx";
+		power-domains = <&ps_dpa0>;
+		reset-domains = <&ps_dpa0>;
+		status = "disabled";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			port@0 {
+				reg = <0>;
+				dpaudio0_dcp: endpoint {
+					remote-endpoint = <&dcp_audio>;
+				};
+			};
+		};
+	};
+
+	dcp_mbox: mbox@38bc08000 {
+		compatible = "apple,t6000-asc-mailbox", "apple,asc-mailbox-v4";
+		reg = <0x3 0x8bc08000 0x0 0x4000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 842 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 0 843 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 0 844 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 0 845 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "send-empty", "send-not-empty",
+			"recv-empty", "recv-not-empty";
+		#mbox-cells = <0>;
+		power-domains = <&ps_disp0_cpu0>;
+	};
+
+	dcp: dcp@38bc00000 {
+		compatible = "apple,t6000-dcp", "apple,dcp";
+		mboxes = <&dcp_mbox>;
+		mbox-names = "mbox";
+		iommus = <&dcp_dart 0>;
+
+		reg-names = "coproc", "disp-0", "disp-1", "disp-2", "disp-3";
+		reg = <0x3 0x8bc00000 0x0 0x4000>,
+			<0x3 0x8a000000 0x0 0x3000000>,
+			<0x3 0x8b320000 0x0 0x4000>,
+			<0x3 0x8b344000 0x0 0x4000>,
+			<0x3 0x8b800000 0x0 0x800000>;
+		apple,bw-scratch = <&pmgr_dcp 0 4 0x988>;
+		power-domains = <&ps_disp0_cpu0>;
+		resets = <&ps_disp0_cpu0>;
+		clocks = <&clk_disp0>;
+		phandle = <&dcp>;
+		// required bus properties for 'piodma' subdevice
+		#address-cells = <2>;
+		#size-cells = <2>;
+
+		disp0_piodma: piodma {
+			iommus = <&disp0_dart 4>;
+			phandle = <&disp0_piodma>;
+		};
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			port@0 {
+				reg = <0>;
+				dcp_audio: endpoint {
+					remote-endpoint = <&dpaudio0_dcp>;
+				};
+			};
+		};
+	};
+
+	display: display-subsystem {
+		compatible = "apple,display-subsystem";
+		iommus = <&disp0_dart 0>;
+		/* generate phandle explicitly for use in loader */
+		phandle = <&display>;
 	};
 
 	fpwm0: pwm@39b030000 {
@@ -163,6 +473,34 @@
 		status = "disabled";
 	};
 
+	spi1: spi@39b104000 {
+		compatible = "apple,t6000-spi", "apple,spi";
+		reg = <0x3 0x9b104000 0x0 0x4000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 1107 IRQ_TYPE_LEVEL_HIGH>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clocks = <&clk_200m>;
+		pinctrl-0 = <&spi1_pins>;
+		pinctrl-names = "default";
+		power-domains = <&ps_spi1>;
+		status = "disabled";
+	};
+
+	spi3: spi@39b10c000 {
+		compatible = "apple,t6000-spi", "apple,spi";
+		reg = <0x3 0x9b10c000 0x0 0x4000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 1109 IRQ_TYPE_LEVEL_HIGH>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clocks = <&clkref>;
+		pinctrl-0 = <&spi3_pins>;
+		pinctrl-names = "default";
+		power-domains = <&ps_spi3>;
+		status = "disabled";
+	};
+
 	serial0: serial@39b200000 {
 		compatible = "apple,s5l-uart";
 		reg = <0x3 0x9b200000 0x0 0x1000>;
@@ -207,16 +545,132 @@
 			    "tx2a", "rx2a", "tx2b", "rx2b",
 			    "tx3a", "rx3a", "tx3b", "rx3b";
 		interrupt-parent = <&aic>;
-		interrupts = <AIC_IRQ 0 1112 IRQ_TYPE_LEVEL_HIGH>,
+		interrupts = <AIC_IRQ 0 1111 IRQ_TYPE_LEVEL_HIGH>,
+			     <AIC_IRQ 0 1112 IRQ_TYPE_LEVEL_HIGH>,
 			     <AIC_IRQ 0 1113 IRQ_TYPE_LEVEL_HIGH>,
-			     <AIC_IRQ 0 1114 IRQ_TYPE_LEVEL_HIGH>,
-			     <AIC_IRQ 0 1115 IRQ_TYPE_LEVEL_HIGH>;
+			     <AIC_IRQ 0 1114 IRQ_TYPE_LEVEL_HIGH>;
 		power-domains = <&ps_audio_p>, <&ps_mca0>, <&ps_mca1>,
 				<&ps_mca2>, <&ps_mca3>;
 		resets = <&ps_audio_p>;
 		#sound-dai-cells = <1>;
 	};
 
+	gpu: gpu@406400000 {
+		compatible = "apple,agx-g13x";
+		reg = <0x4 0x6400000 0 0x40000>,
+			<0x4 0x4000000 0 0x1000000>;
+		reg-names = "asc", "sgx";
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 1044 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 0 1045 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 0 1046 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 0 1047 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 0 1063 IRQ_TYPE_LEVEL_HIGH>;
+		mboxes = <&agx_mbox>;
+		power-domains = <&ps_gfx>;
+		memory-region = <&uat_ttbs>, <&uat_pagetables>, <&uat_handoff>;
+		memory-region-names = "ttbs", "pagetables", "handoff";
+
+		apple,firmware-version = <12 3 0>;
+		apple,firmware-compat = <12 3 0>;
+
+		operating-points-v2 = <&gpu_opp>;
+		apple,perf-base-pstate = <1>;
+		apple,min-sram-microvolt = <790000>;
+		apple,avg-power-filter-tc-ms = <1000>;
+		apple,avg-power-ki-only = <2.4>;
+		apple,avg-power-kp = <1.5>;
+		apple,avg-power-min-duty-cycle = <40>;
+		apple,avg-power-target-filter-tc = <125>;
+		apple,fast-die0-integral-gain = <500.0>;
+		apple,fast-die0-proportional-gain = <72.0>;
+		apple,perf-boost-ce-step = <50>;
+		apple,perf-boost-min-util = <90>;
+		apple,perf-filter-drop-threshold = <0>;
+		apple,perf-filter-time-constant = <5>;
+		apple,perf-filter-time-constant2 = <50>;
+		apple,perf-integral-gain = <6.3>;
+		apple,perf-integral-gain2 = <0.197392>;
+		apple,perf-integral-min-clamp = <0>;
+		apple,perf-proportional-gain = <15.75>;
+		apple,perf-proportional-gain2 = <6.853981>;
+		apple,perf-tgt-utilization = <85>;
+		apple,power-sample-period = <8>;
+		apple,ppm-filter-time-constant-ms = <100>;
+		apple,ppm-ki = <30.0>;
+		apple,ppm-kp = <1.5>;
+		apple,pwr-filter-time-constant = <313>;
+		apple,pwr-integral-gain = <0.0202129>;
+		apple,pwr-integral-min-clamp = <0>;
+		apple,pwr-min-duty-cycle = <40>;
+		apple,pwr-proportional-gain = <5.2831855>;
+
+		apple,core-leak-coef = GPU_REPEAT(1200.0);
+		apple,sram-leak-coef = GPU_REPEAT(20.0);
+	};
+
+	agx_mbox: mbox@406408000 {
+		compatible = "apple,t6000-asc-mailbox", "apple,asc-mailbox-v4";
+		reg = <0x4 0x6408000 0x0 0x4000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 1059 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 0 1060 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 0 1061 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 0 1062 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "send-empty", "send-not-empty",
+			"recv-empty", "recv-not-empty";
+		#mbox-cells = <0>;
+	};
+
+	isp_dart0: iommu@3860e8000 {
+		compatible = "apple,t6000-dart";
+		reg = <0x3 0x860e8000 0x0 0x4000>;
+		#iommu-cells = <1>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 543 IRQ_TYPE_LEVEL_HIGH>;
+		power-domains = <&ps_isp_sys>;
+		status = "disabled";
+	};
+
+	isp_dart1: iommu@3860f4000 {
+		compatible = "apple,t6000-dart";
+		reg = <0x3 0x860f4000 0x0 0x4000>;
+		#iommu-cells = <1>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 543 IRQ_TYPE_LEVEL_HIGH>;
+		power-domains = <&ps_isp_sys>;
+		status = "disabled";
+	};
+
+	isp_dart2: iommu@3860fc000 {
+		compatible = "apple,t6000-dart";
+		reg = <0x3 0x860fc000 0x0 0x4000>;
+		#iommu-cells = <1>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 543 IRQ_TYPE_LEVEL_HIGH>;
+		power-domains = <&ps_isp_sys>;
+		status = "disabled";
+	};
+
+	isp: isp@384000000 {
+		compatible = "apple,t6000-isp", "apple,isp";
+		iommus = <&isp_dart0 0>, <&isp_dart1 0>, <&isp_dart2 0>;
+		reg-names = "coproc", "mbox", "gpio", "mbox2";
+		reg = <0x3 0x84000000 0x0 0x2000000>,
+			<0x3 0x86104000 0x0 0x100>,
+			<0x3 0x86104170 0x0 0x100>,
+			<0x3 0x861043f0 0x0 0x100>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 538 IRQ_TYPE_LEVEL_HIGH>;
+		power-domains = <&ps_isp_sys>, <&ps_isp_set0>,
+			<&ps_isp_set1>, <&ps_isp_fe>, <&ps_isp_set3>,
+			<&ps_isp_set4>, <&ps_isp_set5>, <&ps_isp_set6>,
+			<&ps_isp_set7>, <&ps_isp_set8>;
+		apple,dart-vm-size = <0x0 0xa0000000>;
+
+		status = "disabled";
+	};
+
 	pcie0_dart_0: iommu@581008000 {
 		compatible = "apple,t6000-dart";
 		reg = <0x5 0x81008000 0x0 0x4000>;
@@ -294,6 +748,8 @@
 		pinctrl-0 = <&pcie_pins>;
 		pinctrl-names = "default";
 
+		dma-coherent;
+
 		port00: pci@0,0 {
 			device_type = "pci";
 			reg = <0x0 0x0 0x0 0x0 0x0>;
diff --git a/arch/arm64/boot/dts/apple/t600x-dieX.dtsi b/arch/arm64/boot/dts/apple/t600x-dieX.dtsi
index a32ff0c9d7b0c2..a68c4b739287b8 100644
--- a/arch/arm64/boot/dts/apple/t600x-dieX.dtsi
+++ b/arch/arm64/boot/dts/apple/t600x-dieX.dtsi
@@ -24,6 +24,160 @@
 		#performance-domain-cells = <0>;
 	};
 
+	DIE_NODE(dispext0_dart): iommu@289304000 {
+		compatible = "apple,t6000-dart";
+		reg = <0x2 0x89304000 0x0 0x4000>;
+		#iommu-cells = <1>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ DIE_NO 873 IRQ_TYPE_LEVEL_HIGH>;
+		power-domains = <&DIE_NODE(ps_dispext0_cpu0)>;
+		apple,dma-range = <0x0 0x0 0x0 0xfc000000>;
+		status = "disabled";
+	};
+
+	DIE_NODE(dcpext0_dart): iommu@28930c000 {
+		compatible = "apple,t6000-dart";
+		reg = <0x2 0x8930c000 0x0 0x4000>;
+		#iommu-cells = <1>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ DIE_NO 873 IRQ_TYPE_LEVEL_HIGH>;
+		power-domains = <&DIE_NODE(ps_dispext0_cpu0)>;
+		apple,dma-range = <0x1f0 0x0 0x0 0xfc000000>;
+		status = "disabled";
+	};
+
+	DIE_NODE(dcpext0_mbox): mbox@289c08000 {
+		compatible = "apple,t6000-asc-mailbox", "apple,asc-mailbox-v4";
+		reg = <0x2 0x89c08000 0x0 0x4000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ DIE_NO 894 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ DIE_NO 895 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ DIE_NO 896 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ DIE_NO 897 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "send-empty", "send-not-empty",
+			"recv-empty", "recv-not-empty";
+		#mbox-cells = <0>;
+		power-domains = <&DIE_NODE(ps_dispext0_cpu0)>;
+		resets = <&DIE_NODE(ps_dispext0_cpu0)>;
+		status = "disabled";
+	};
+
+	DIE_NODE(dcpext0):  dcp@289c00000 {
+		compatible = "apple,t6000-dcpext", "apple,dcpext";
+		mboxes = <&DIE_NODE(dcpext0_mbox)>;
+		mbox-names = "mbox";
+		iommus = <&DIE_NODE(dcpext0_dart) 0>;
+
+		reg-names = "coproc", "disp-0", "disp-1", "disp-2", "disp-3";
+		reg = <0x2 0x89c00000 0x0 0x4000>,
+			<0x2 0x88000000 0x0 0x3000000>,
+			<0x2 0x89320000 0x0 0x4000>,
+			<0x2 0x89344000 0x0 0x4000>,
+			<0x2 0x89800000 0x0 0x800000>;
+		apple,bw-scratch = <&pmgr_dcp 0 4 0x990>;
+		power-domains = <&DIE_NODE(ps_dispext0_cpu0)>;
+		resets = <&DIE_NODE(ps_dispext0_cpu0)>;
+		clocks = <&DIE_NODE(clk_dispext0)>;
+		phandle = <&DIE_NODE(dcpext0)>;
+		apple,dcp-index = <1>;
+		status = "disabled";
+		// required bus properties for 'piodma' subdevice
+		#address-cells = <2>;
+		#size-cells = <2>;
+
+		piodma {
+			iommus = <&DIE_NODE(dispext0_dart) 4>;
+		};
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			port@0 {
+				reg = <0>;
+				DIE_NODE(dcpext0_audio): endpoint {
+					remote-endpoint = <&DIE_NODE(dpaudio1_dcp)>;
+				};
+			};
+		};
+	};
+
+	DIE_NODE(dispext1_dart): iommu@28c304000 {
+		compatible = "apple,t6000-dart", "apple,t8110-dart";
+		reg = <0x2 0x8c304000 0x0 0x4000>;
+		#iommu-cells = <1>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ DIE_NO 909 IRQ_TYPE_LEVEL_HIGH>;
+		power-domains = <&DIE_NODE(ps_dispext1_cpu0)>;
+		apple,dma-range = <0x0 0x0 0x0 0xfc000000>;
+		status = "disabled";
+	};
+
+	DIE_NODE(dcpext1_dart): iommu@28c30c000 {
+		compatible = "apple,t6000-dart", "apple,t8110-dart";
+		reg = <0x2 0x8c30c000 0x0 0x4000>;
+		#iommu-cells = <1>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ DIE_NO 909 IRQ_TYPE_LEVEL_HIGH>;
+		power-domains = <&DIE_NODE(ps_dispext1_cpu0)>;
+		apple,dma-range = <0x1f0 0x0 0x0 0xfc000000>;
+		status = "disabled";
+	};
+
+	DIE_NODE(dcpext1_mbox): mbox@28cc08000 {
+		compatible = "apple,t6000-asc-mailbox", "apple,asc-mailbox-v4";
+		reg = <0x2 0x8cc08000 0x0 0x4000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ DIE_NO 930 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ DIE_NO 931 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ DIE_NO 932 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ DIE_NO 933 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "send-empty", "send-not-empty",
+			"recv-empty", "recv-not-empty";
+		#mbox-cells = <0>;
+		power-domains = <&DIE_NODE(ps_dispext1_cpu0)>;
+		resets = <&DIE_NODE(ps_dispext1_cpu0)>;
+		status = "disabled";
+	};
+
+	DIE_NODE(dcpext1):  dcp@28cc00000 {
+		compatible = "apple,t6000-dcpext", "apple,dcpext";
+		mboxes = <&DIE_NODE(dcpext1_mbox)>;
+		mbox-names = "mbox";
+		iommus = <&DIE_NODE(dcpext1_dart) 0>;
+
+		reg-names = "coproc", "disp-0", "disp-1", "disp-2", "disp-3";
+		reg = <0x2 0x8cc00000 0x0 0x4000>,
+			<0x2 0x8b000000 0x0 0x3000000>,
+			<0x2 0x8c320000 0x0 0x4000>,
+			<0x2 0x8c344000 0x0 0x4000>,
+			<0x2 0x8c800000 0x0 0x800000>;
+		apple,bw-scratch = <&pmgr_dcp 0 4 0x998>;
+		power-domains = <&DIE_NODE(ps_dispext1_cpu0)>;
+		resets = <&DIE_NODE(ps_dispext1_cpu0)>;
+		clocks = <&DIE_NODE(clk_dispext1)>;
+		phandle = <&DIE_NODE(dcpext1)>;
+		apple,dcp-index = <2>;
+		status = "disabled";
+		// required bus properties for 'piodma' subdevice
+		#address-cells = <2>;
+		#size-cells = <2>;
+
+		piodma {
+			iommus = <&DIE_NODE(dispext1_dart) 4>;
+		};
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			port@0 {
+				reg = <0>;
+				DIE_NODE(dcpext1_audio): endpoint {
+					remote-endpoint = <&DIE_NODE(dpaudio2_dcp)>;
+				};
+			};
+		};
+	};
+
 	DIE_NODE(pmgr): power-management@28e080000 {
 		compatible = "apple,t6000-pmgr", "apple,pmgr", "syscon", "simple-mfd";
 		#address-cells = <1>;
@@ -74,6 +228,193 @@
 		reg = <0x2 0x92280000 0 0x4000>;
 	};
 
+	DIE_NODE(efuse): efuse@2922bc000 {
+		compatible = "apple,t6000-efuses", "apple,efuses";
+		reg = <0x2 0x922bc000 0x0 0x2000>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		DIE_NODE(atcphy0_auspll_rodco_bias_adjust): efuse@a10,22 {
+			reg = <0xa10 4>;
+			bits = <22 3>;
+		};
+
+		DIE_NODE(atcphy0_auspll_rodco_encap): efuse@a10,25 {
+			reg = <0xa10 4>;
+			bits = <25 2>;
+		};
+
+		DIE_NODE(atcphy0_auspll_dtc_vreg_adjust): efuse@a10,27 {
+			reg = <0xa10 4>;
+			bits = <27 3>;
+		};
+
+		DIE_NODE(atcphy0_auspll_fracn_dll_start_capcode): efuse@a10,30 {
+			reg = <0xa10 4>;
+			bits = <30 2>;
+		};
+
+		DIE_NODE(atcphy0_aus_cmn_shm_vreg_trim): efuse@a14,0 {
+			reg = <0xa14 4>;
+			bits = <0 5>;
+		};
+
+		DIE_NODE(atcphy0_cio3pll_dco_coarsebin0): efuse@a14,5 {
+			reg = <0xa14 4>;
+			bits = <5 6>;
+		};
+
+		DIE_NODE(atcphy0_cio3pll_dco_coarsebin1): efuse@a14,11 {
+			reg = <0xa14 4>;
+			bits = <11 6>;
+		};
+
+		DIE_NODE(atcphy0_cio3pll_dll_start_capcode): efuse@a14,17 {
+			reg = <0xa14 4>;
+			bits = <17 2>;
+		};
+
+		DIE_NODE(atcphy0_cio3pll_dtc_vreg_adjust): efuse@a14,19 {
+			reg = <0xa14 4>;
+			bits = <19 3>;
+		};
+
+		DIE_NODE(atcphy1_auspll_rodco_bias_adjust): efuse@a18,0 {
+			reg = <0xa18 4>;
+			bits = <0 3>;
+		};
+
+		DIE_NODE(atcphy1_auspll_rodco_encap): efuse@a18,3 {
+			reg = <0xa18 4>;
+			bits = <3 2>;
+		};
+
+		DIE_NODE(atcphy1_auspll_dtc_vreg_adjust): efuse@a18,5 {
+			reg = <0xa18 4>;
+			bits = <5 3>;
+		};
+
+		DIE_NODE(atcphy1_auspll_fracn_dll_start_capcode): efuse@a18,8 {
+			reg = <0xa18 4>;
+			bits = <8 2>;
+		};
+
+		DIE_NODE(atcphy1_aus_cmn_shm_vreg_trim): efuse@a18,10 {
+			reg = <0xa18 4>;
+			bits = <10 5>;
+		};
+
+		DIE_NODE(atcphy1_cio3pll_dco_coarsebin0): efuse@a18,15 {
+			reg = <0xa18 4>;
+			bits = <15 6>;
+		};
+
+		DIE_NODE(atcphy1_cio3pll_dco_coarsebin1): efuse@a18,21 {
+			reg = <0xa18 4>;
+			bits = <21 6>;
+		};
+
+		DIE_NODE(atcphy1_cio3pll_dll_start_capcode): efuse@a18,27 {
+			reg = <0xa18 4>;
+			bits = <27 2>;
+		};
+
+		DIE_NODE(atcphy1_cio3pll_dtc_vreg_adjust): efuse@a18,29 {
+			reg = <0xa18 4>;
+			bits = <29 3>;
+		};
+
+		DIE_NODE(atcphy2_auspll_rodco_bias_adjust): efuse@a1c,10 {
+			reg = <0xa1c 4>;
+			bits = <10 3>;
+		};
+
+		DIE_NODE(atcphy2_auspll_rodco_encap): efuse@a1c,13 {
+			reg = <0xa1c 4>;
+			bits = <13 2>;
+		};
+
+		DIE_NODE(atcphy2_auspll_dtc_vreg_adjust): efuse@a1c,15 {
+			reg = <0xa1c 4>;
+			bits = <15 3>;
+		};
+
+		DIE_NODE(atcphy2_auspll_fracn_dll_start_capcode): efuse@a1c,18 {
+			reg = <0xa1c 4>;
+			bits = <18 2>;
+		};
+
+		DIE_NODE(atcphy2_aus_cmn_shm_vreg_trim): efuse@a1c,20 {
+			reg = <0xa1c 4>;
+			bits = <20 5>;
+		};
+
+		DIE_NODE(atcphy2_cio3pll_dco_coarsebin0): efuse@a1c,25 {
+			reg = <0xa1c 4>;
+			bits = <25 6>;
+		};
+
+		DIE_NODE(atcphy2_cio3pll_dco_coarsebin1): efuse@a1c,31 {
+			reg = <0xa1c 8>;
+			bits = <31 6>;
+		};
+
+		DIE_NODE(atcphy2_cio3pll_dll_start_capcode): efuse@a20,5 {
+			reg = <0xa20 4>;
+			bits = <5 2>;
+		};
+
+		DIE_NODE(atcphy2_cio3pll_dtc_vreg_adjust): efuse@a20,7 {
+			reg = <0xa20 4>;
+			bits = <7 3>;
+		};
+
+		DIE_NODE(atcphy3_auspll_rodco_bias_adjust): efuse@a20,20 {
+			reg = <0xa20 4>;
+			bits = <20 3>;
+		};
+
+		DIE_NODE(atcphy3_auspll_rodco_encap): efuse@a20,23 {
+			reg = <0xa20 4>;
+			bits = <23 2>;
+		};
+
+		DIE_NODE(atcphy3_auspll_dtc_vreg_adjust): efuse@a20,25 {
+			reg = <0xa20 4>;
+			bits = <25 3>;
+		};
+
+		DIE_NODE(atcphy3_auspll_fracn_dll_start_capcode): efuse@a20,28 {
+			reg = <0xa20 4>;
+			bits = <28 2>;
+		};
+
+		DIE_NODE(atcphy3_aus_cmn_shm_vreg_trim): efuse@a20,30 {
+			reg = <0xa20 8>;
+			bits = <30 5>;
+		};
+
+		DIE_NODE(atcphy3_cio3pll_dco_coarsebin0): efuse@a24,3 {
+			reg = <0xa24 4>;
+			bits = <3 6>;
+		};
+
+		DIE_NODE(atcphy3_cio3pll_dco_coarsebin1): efuse@a24,9 {
+			reg = <0xa24 4>;
+			bits = <9 6>;
+		};
+
+		DIE_NODE(atcphy3_cio3pll_dll_start_capcode): efuse@a24,15 {
+			reg = <0xa24 4>;
+			bits = <15 2>;
+		};
+
+		DIE_NODE(atcphy3_cio3pll_dtc_vreg_adjust): efuse@a24,17 {
+			reg = <0xa24 4>;
+			bits = <17 3>;
+		};
+	};
+
 	DIE_NODE(pinctrl_aop): pinctrl@293820000 {
 		compatible = "apple,t6000-pinctrl", "apple,pinctrl";
 		reg = <0x2 0x93820000 0x0 0x4000>;
@@ -95,6 +436,24 @@
 				<AIC_IRQ DIE_NO 573 IRQ_TYPE_LEVEL_HIGH>;
 	};
 
+	DIE_NODE(sio_dart_0): iommu@39b004000 {
+		compatible = "apple,t6000-dart";
+		reg = <0x3 0x9b004000 0x0 0x4000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ DIE_NO 1130 IRQ_TYPE_LEVEL_HIGH>;
+		#iommu-cells = <1>;
+		power-domains = <&DIE_NODE(ps_sio_cpu)>;
+	};
+
+	DIE_NODE(sio_dart_1): iommu@39b008000 {
+		compatible = "apple,t6000-dart";
+		reg = <0x3 0x9b008000 0x0 0x8000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ DIE_NO 1130 IRQ_TYPE_LEVEL_HIGH>;
+		#iommu-cells = <1>;
+		power-domains = <&DIE_NODE(ps_sio_cpu)>;
+	};
+
 	DIE_NODE(pinctrl_ap): pinctrl@39b028000 {
 		compatible = "apple,t6000-pinctrl", "apple,pinctrl";
 		reg = <0x3 0x9b028000 0x0 0x4000>;
@@ -119,3 +478,441 @@
 		interrupt-controller;
 		#interrupt-cells = <2>;
 	};
+
+	DIE_NODE(sio_mbox): mbox@39bc08000 {
+		compatible = "apple,t6000-asc-mailbox", "apple,asc-mailbox-v4";
+		reg = <0x3 0x9bc08000 0x0 0x4000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ DIE_NO 1147 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ DIE_NO 1148 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ DIE_NO 1149 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ DIE_NO 1150 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "send-empty", "send-not-empty",
+			"recv-empty", "recv-not-empty";
+		#mbox-cells = <0>;
+		power-domains = <&DIE_NODE(ps_sio_cpu)>;
+	};
+
+	DIE_NODE(sio): sio@39bc00000 {
+		compatible = "apple,t6000-sio", "apple,sio";
+		reg = <0x3 0x9bc00000 0x0 0x8000>;
+		dma-channels = <128>;
+		#dma-cells = <1>;
+		mboxes = <&DIE_NODE(sio_mbox)>;
+		iommus = <&DIE_NODE(sio_dart_0) 0>, <&DIE_NODE(sio_dart_1) 0>;
+		power-domains = <&DIE_NODE(ps_sio_cpu)>;
+		resets = <&DIE_NODE(ps_sio)>; /* TODO: verify reset does something */
+		status = "disabled";
+	};
+
+	DIE_NODE(dpaudio1): audio-controller@39b504000 {
+		compatible = "apple,t6000-dpaudio", "apple,dpaudio";
+		reg = <0x3 0x9b540000 0x0 0x4000>;
+		dmas = <&DIE_NODE(sio) 0x66>;
+		dma-names = "tx";
+		power-domains = <&DIE_NODE(ps_dpa1)>;
+		reset-domains = <&DIE_NODE(ps_dpa1)>;
+		status = "disabled";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			port@0 {
+				reg = <0>;
+				DIE_NODE(dpaudio1_dcp): endpoint {
+					remote-endpoint = <&DIE_NODE(dcpext0_audio)>;
+				};
+			};
+		};
+	};
+
+	DIE_NODE(dpaudio2): audio-controller@39b508000 {
+		compatible = "apple,t6000-dpaudio", "apple,dpaudio";
+		reg = <0x3 0x9b580000 0x0 0x4000>;
+		dmas = <&DIE_NODE(sio) 0x68>;
+		dma-names = "tx";
+		power-domains = <&DIE_NODE(ps_dpa2)>;
+		reset-domains = <&DIE_NODE(ps_dpa2)>;
+		status = "disabled";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			port@0 {
+				reg = <0>;
+				DIE_NODE(dpaudio2_dcp): endpoint {
+					remote-endpoint = <&DIE_NODE(dcpext1_audio)>;
+				};
+			};
+		};
+	};
+
+	/*
+	 * omit dpaudio3 / 4 as long as the linked dcpext nodes don't exist
+	 *
+	DIE_NODE(dpaudio3): audio-controller@39b50c000 {
+		compatible = "apple,t6000-dpaudio", "apple,dpaudio";
+		reg = <0x3 0x9b5c0000 0x0 0x4000>;
+		dmas = <&DIE_NODE(sio) 0x6a>;
+		dma-names = "tx";
+		power-domains = <&DIE_NODE(ps_dpa3)>;
+		reset-domains = <&DIE_NODE(ps_dpa3)>;
+		status = "disabled";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			port@0 {
+				reg = <0>;
+				DIE_NODE(dpaudio3_dcp): endpoint {
+					remote-endpoint = <&DIE_NODE(dcpext2_audio)>;
+				};
+			};
+		};
+	};
+
+	DIE_NODE(dpaudio4): audio-controller@39b510000 {
+		compatible = "apple,t6000-dpaudio", "apple,dpaudio";
+		reg = <0x3 0x9b500000 0x0 0x4000>;
+		dmas = <&DIE_NODE(sio) 0x6c>;
+		dma-names = "tx";
+		power-domains = <&DIE_NODE(ps_dpa4)>;
+		reset-domains = <&DIE_NODE(ps_dpa4)>;
+		status = "disabled";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			port@0 {
+				reg = <0>;
+				DIE_NODE(dpaudio4_dcp): endpoint {
+					remote-endpoint = <&DIE_NODE(dcpext3_audio)>;
+				};
+			};
+		};
+	};
+	*/
+
+	DIE_NODE(dwc3_0_dart_0): iommu@702f00000 {
+		compatible = "apple,t6000-dart";
+		reg = <0x7 0x02f00000 0x0 0x4000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ DIE_NO 1194 IRQ_TYPE_LEVEL_HIGH>;
+		power-domains = <&DIE_NODE(ps_atc0_usb)>;
+		#iommu-cells = <1>;
+	};
+
+	DIE_NODE(dwc3_0_dart_1): iommu@702f80000 {
+		compatible = "apple,t6000-dart";
+		reg = <0x7 0x02f80000 0x0 0x4000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ DIE_NO 1194 IRQ_TYPE_LEVEL_HIGH>;
+		power-domains = <&DIE_NODE(ps_atc0_usb)>;
+		#iommu-cells = <1>;
+	};
+
+	DIE_NODE(dwc3_0): usb@702280000 {
+		compatible = "apple,t6000-dwc3", "apple,dwc3", "snps,dwc3";
+		reg = <0x7 0x02280000 0x0 0x100000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ DIE_NO 1190 IRQ_TYPE_LEVEL_HIGH>;
+		dr_mode = "otg";
+		usb-role-switch;
+		role-switch-default-mode = "host";
+		iommus = <&DIE_NODE(dwc3_0_dart_0) 0>,
+			<&DIE_NODE(dwc3_0_dart_1) 1>;
+		power-domains = <&DIE_NODE(ps_atc0_usb)>;
+		dma-coherent;
+		resets = <&DIE_NODE(atcphy0)>;
+		phys = <&DIE_NODE(atcphy0) PHY_TYPE_USB2>, <&DIE_NODE(atcphy0) PHY_TYPE_USB3>;
+		phy-names = "usb2-phy", "usb3-phy";
+	};
+
+	DIE_NODE(atcphy0): phy@703000000 {
+		compatible = "apple,t6000-atcphy", "apple,t8103-atcphy";
+		reg = <0x7 0x03000000 0x0 0x4c000>,
+			<0x7 0x03050000 0x0 0x8000>,
+			<0x7 0x00000000 0x0 0x4000>,
+			<0x7 0x02a90000 0x0 0x4000>,
+			<0x7 0x02a84000 0x0 0x4000>;
+		reg-names = "core", "lpdptx", "axi2af", "usb2phy",
+			"pipehandler";
+
+		#phy-cells = <1>;
+		#reset-cells = <0>;
+
+		nvmem-cells = <&DIE_NODE(atcphy0_aus_cmn_shm_vreg_trim)>,
+			<&DIE_NODE(atcphy0_auspll_rodco_encap)>,
+			<&DIE_NODE(atcphy0_auspll_rodco_bias_adjust)>,
+			<&DIE_NODE(atcphy0_auspll_fracn_dll_start_capcode)>,
+			<&DIE_NODE(atcphy0_auspll_dtc_vreg_adjust)>,
+			<&DIE_NODE(atcphy0_cio3pll_dco_coarsebin0)>,
+			<&DIE_NODE(atcphy0_cio3pll_dco_coarsebin1)>,
+			<&DIE_NODE(atcphy0_cio3pll_dll_start_capcode)>,
+			<&DIE_NODE(atcphy0_cio3pll_dtc_vreg_adjust)>;
+		nvmem-cell-names =  "aus_cmn_shm_vreg_trim",
+			"auspll_rodco_encap",
+			"auspll_rodco_bias_adjust",
+			"auspll_fracn_dll_start_capcode",
+			"auspll_dtc_vreg_adjust",
+			"cio3pll_dco_coarsebin0",
+			"cio3pll_dco_coarsebin1",
+			"cio3pll_dll_start_capcode",
+			"cio3pll_dtc_vreg_adjust";
+
+		orientation-switch;
+		mode-switch;
+		svid = <0xff01>, <0x8087>;
+		power-domains = <&DIE_NODE(ps_atc0_usb)>;
+	};
+
+	DIE_NODE(atcphy0_xbar): mux@70304c000 {
+		compatible = "apple,t6000-display-crossbar";
+		reg = <0x7 0x0304c000 0x0 0x4000>;
+		#mux-control-cells = <1>;
+		power-domains = <&DIE_NODE(ps_atc0_usb)>;
+		status = "disabled";
+	};
+
+	DIE_NODE(dwc3_1_dart_0): iommu@b02f00000 {
+		compatible = "apple,t6000-dart";
+		reg = <0xb 0x02f00000 0x0 0x4000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ DIE_NO 1211 IRQ_TYPE_LEVEL_HIGH>;
+		power-domains = <&DIE_NODE(ps_atc1_usb)>;
+		#iommu-cells = <1>;
+	};
+
+	DIE_NODE(dwc3_1_dart_1): iommu@b02f80000 {
+		compatible = "apple,t6000-dart";
+		reg = <0xb 0x02f80000 0x0 0x4000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ DIE_NO 1211 IRQ_TYPE_LEVEL_HIGH>;
+		power-domains = <&DIE_NODE(ps_atc1_usb)>;
+		#iommu-cells = <1>;
+	};
+
+	DIE_NODE(dwc3_1): usb@b02280000 {
+		compatible = "apple,t6000-dwc3", "apple,dwc3", "snps,dwc3";
+		reg = <0xb 0x02280000 0x0 0x100000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ DIE_NO 1207 IRQ_TYPE_LEVEL_HIGH>;
+		dr_mode = "otg";
+		usb-role-switch;
+		role-switch-default-mode = "host";
+		iommus = <&DIE_NODE(dwc3_1_dart_0) 0>,
+			<&DIE_NODE(dwc3_1_dart_1) 1>;
+		power-domains = <&DIE_NODE(ps_atc1_usb)>;
+		dma-coherent;
+		resets = <&DIE_NODE(atcphy1)>;
+		phys = <&DIE_NODE(atcphy1) PHY_TYPE_USB2>, <&DIE_NODE(atcphy1) PHY_TYPE_USB3>;
+		phy-names = "usb2-phy", "usb3-phy";
+	};
+
+	DIE_NODE(atcphy1): phy@b03000000 {
+		compatible = "apple,t6000-atcphy", "apple,t8103-atcphy";
+		reg = <0xb 0x03000000 0x0 0x4c000>,
+			<0xb 0x03050000 0x0 0x8000>,
+			<0xb 0x00000000 0x0 0x4000>,
+			<0xb 0x02a90000 0x0 0x4000>,
+			<0xb 0x02a84000 0x0 0x4000>;
+		reg-names = "core", "lpdptx", "axi2af", "usb2phy",
+			"pipehandler";
+
+		#phy-cells = <1>;
+		#reset-cells = <0>;
+
+		nvmem-cells = <&DIE_NODE(atcphy1_aus_cmn_shm_vreg_trim)>,
+			<&DIE_NODE(atcphy1_auspll_rodco_encap)>,
+			<&DIE_NODE(atcphy1_auspll_rodco_bias_adjust)>,
+			<&DIE_NODE(atcphy1_auspll_fracn_dll_start_capcode)>,
+			<&DIE_NODE(atcphy1_auspll_dtc_vreg_adjust)>,
+			<&DIE_NODE(atcphy1_cio3pll_dco_coarsebin0)>,
+			<&DIE_NODE(atcphy1_cio3pll_dco_coarsebin1)>,
+			<&DIE_NODE(atcphy1_cio3pll_dll_start_capcode)>,
+			<&DIE_NODE(atcphy1_cio3pll_dtc_vreg_adjust)>;
+		nvmem-cell-names =  "aus_cmn_shm_vreg_trim",
+			"auspll_rodco_encap",
+			"auspll_rodco_bias_adjust",
+			"auspll_fracn_dll_start_capcode",
+			"auspll_dtc_vreg_adjust",
+			"cio3pll_dco_coarsebin0",
+			"cio3pll_dco_coarsebin1",
+			"cio3pll_dll_start_capcode",
+			"cio3pll_dtc_vreg_adjust";
+
+		orientation-switch;
+		mode-switch;
+		svid = <0xff01>, <0x8087>;
+		power-domains = <&DIE_NODE(ps_atc1_usb)>;
+	};
+
+	DIE_NODE(atcphy1_xbar): mux@b0304c000 {
+		compatible = "apple,t6000-display-crossbar";
+		reg = <0xb 0x0304c000 0x0 0x4000>;
+		#mux-control-cells = <1>;
+		power-domains = <&DIE_NODE(ps_atc1_usb)>;
+		status = "disabled";
+	};
+
+	DIE_NODE(dwc3_2_dart_0): iommu@f02f00000 {
+		compatible = "apple,t6000-dart";
+		reg = <0xf 0x02f00000 0x0 0x4000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ DIE_NO 1228 IRQ_TYPE_LEVEL_HIGH>;
+		power-domains = <&DIE_NODE(ps_atc2_usb)>;
+		#iommu-cells = <1>;
+	};
+
+	DIE_NODE(dwc3_2_dart_1): iommu@f02f80000 {
+		compatible = "apple,t6000-dart";
+		reg = <0xf 0x02f80000 0x0 0x4000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ DIE_NO 1228 IRQ_TYPE_LEVEL_HIGH>;
+		power-domains = <&DIE_NODE(ps_atc2_usb)>;
+		#iommu-cells = <1>;
+	};
+
+	DIE_NODE(dwc3_2): usb@f02280000 {
+		compatible = "apple,t6000-dwc3", "apple,dwc3", "snps,dwc3";
+		reg = <0xf 0x02280000 0x0 0x100000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ DIE_NO 1224 IRQ_TYPE_LEVEL_HIGH>;
+		dr_mode = "otg";
+		usb-role-switch;
+		role-switch-default-mode = "host";
+		iommus = <&DIE_NODE(dwc3_2_dart_0) 0>,
+			<&DIE_NODE(dwc3_2_dart_1) 1>;
+		power-domains = <&DIE_NODE(ps_atc2_usb)>;
+		dma-coherent;
+		resets = <&DIE_NODE(atcphy2)>;
+		phys = <&DIE_NODE(atcphy2) PHY_TYPE_USB2>, <&DIE_NODE(atcphy2) PHY_TYPE_USB3>;
+		phy-names = "usb2-phy", "usb3-phy";
+	};
+
+	DIE_NODE(atcphy2): phy@f03000000 {
+		compatible = "apple,t6000-atcphy", "apple,t8103-atcphy";
+		reg = <0xf 0x03000000 0x0 0x4c000>,
+			<0xf 0x03050000 0x0 0x8000>,
+			<0xf 0x00000000 0x0 0x4000>,
+			<0xf 0x02a90000 0x0 0x4000>,
+			<0xf 0x02a84000 0x0 0x4000>;
+		reg-names = "core", "lpdptx", "axi2af", "usb2phy",
+			"pipehandler";
+
+		#phy-cells = <1>;
+		#reset-cells = <0>;
+
+		nvmem-cells = <&DIE_NODE(atcphy2_aus_cmn_shm_vreg_trim)>,
+			<&DIE_NODE(atcphy2_auspll_rodco_encap)>,
+			<&DIE_NODE(atcphy2_auspll_rodco_bias_adjust)>,
+			<&DIE_NODE(atcphy2_auspll_fracn_dll_start_capcode)>,
+			<&DIE_NODE(atcphy2_auspll_dtc_vreg_adjust)>,
+			<&DIE_NODE(atcphy2_cio3pll_dco_coarsebin0)>,
+			<&DIE_NODE(atcphy2_cio3pll_dco_coarsebin1)>,
+			<&DIE_NODE(atcphy2_cio3pll_dll_start_capcode)>,
+			<&DIE_NODE(atcphy2_cio3pll_dtc_vreg_adjust)>;
+		nvmem-cell-names =  "aus_cmn_shm_vreg_trim",
+			"auspll_rodco_encap",
+			"auspll_rodco_bias_adjust",
+			"auspll_fracn_dll_start_capcode",
+			"auspll_dtc_vreg_adjust",
+			"cio3pll_dco_coarsebin0",
+			"cio3pll_dco_coarsebin1",
+			"cio3pll_dll_start_capcode",
+			"cio3pll_dtc_vreg_adjust";
+
+		orientation-switch;
+		mode-switch;
+		svid = <0xff01>, <0x8087>;
+		power-domains = <&DIE_NODE(ps_atc2_usb)>;
+	};
+
+	DIE_NODE(atcphy2_xbar): mux@f0304c000 {
+		compatible = "apple,t6000-display-crossbar";
+		reg = <0xf 0x0304c000 0x0 0x4000>;
+		#mux-control-cells = <1>;
+		power-domains = <&DIE_NODE(ps_atc2_usb)>;
+		status = "disabled";
+	};
+
+	DIE_NODE(dwc3_3_dart_0): iommu@1302f00000 {
+		compatible = "apple,t6000-dart";
+		reg = <0x13 0x02f00000 0x0 0x4000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ DIE_NO 1245 IRQ_TYPE_LEVEL_HIGH>;
+		power-domains = <&DIE_NODE(ps_atc3_usb)>;
+		#iommu-cells = <1>;
+	};
+
+	DIE_NODE(dwc3_3_dart_1): iommu@1302f80000 {
+		compatible = "apple,t6000-dart";
+		reg = <0x13 0x02f80000 0x0 0x4000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ DIE_NO 1245 IRQ_TYPE_LEVEL_HIGH>;
+		power-domains = <&DIE_NODE(ps_atc3_usb)>;
+		#iommu-cells = <1>;
+	};
+
+	DIE_NODE(dwc3_3): usb@1302280000 {
+		compatible = "apple,t6000-dwc3", "apple,dwc3", "snps,dwc3";
+		reg = <0x13 0x02280000 0x0 0x100000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ DIE_NO 1241 IRQ_TYPE_LEVEL_HIGH>;
+		dr_mode = "otg";
+		usb-role-switch;
+		role-switch-default-mode = "host";
+		iommus = <&DIE_NODE(dwc3_3_dart_0) 0>,
+			<&DIE_NODE(dwc3_3_dart_1) 1>;
+		power-domains = <&DIE_NODE(ps_atc3_usb)>;
+		dma-coherent;
+		resets = <&DIE_NODE(atcphy3)>;
+		phys = <&DIE_NODE(atcphy3) PHY_TYPE_USB2>, <&DIE_NODE(atcphy3) PHY_TYPE_USB3>;
+		phy-names = "usb2-phy", "usb3-phy";
+	};
+
+	DIE_NODE(atcphy3): phy@1303000000 {
+		compatible = "apple,t6000-atcphy", "apple,t8103-atcphy";
+		reg = <0x13 0x03000000 0x0 0x4c000>,
+			<0x13 0x03050000 0x0 0x8000>,
+			<0x13 0x00000000 0x0 0x4000>,
+			<0x13 0x02a90000 0x0 0x4000>,
+			<0x13 0x02a84000 0x0 0x4000>;
+		reg-names = "core", "lpdptx", "axi2af", "usb2phy",
+			"pipehandler";
+
+		#phy-cells = <1>;
+		#reset-cells = <0>;
+
+		nvmem-cells = <&DIE_NODE(atcphy3_aus_cmn_shm_vreg_trim)>,
+			<&DIE_NODE(atcphy3_auspll_rodco_encap)>,
+			<&DIE_NODE(atcphy3_auspll_rodco_bias_adjust)>,
+			<&DIE_NODE(atcphy3_auspll_fracn_dll_start_capcode)>,
+			<&DIE_NODE(atcphy3_auspll_dtc_vreg_adjust)>,
+			<&DIE_NODE(atcphy3_cio3pll_dco_coarsebin0)>,
+			<&DIE_NODE(atcphy3_cio3pll_dco_coarsebin1)>,
+			<&DIE_NODE(atcphy3_cio3pll_dll_start_capcode)>,
+			<&DIE_NODE(atcphy3_cio3pll_dtc_vreg_adjust)>;
+		nvmem-cell-names =  "aus_cmn_shm_vreg_trim",
+			"auspll_rodco_encap",
+			"auspll_rodco_bias_adjust",
+			"auspll_fracn_dll_start_capcode",
+			"auspll_dtc_vreg_adjust",
+			"cio3pll_dco_coarsebin0",
+			"cio3pll_dco_coarsebin1",
+			"cio3pll_dll_start_capcode",
+			"cio3pll_dtc_vreg_adjust";
+
+		orientation-switch;
+		mode-switch;
+		svid = <0xff01>, <0x8087>;
+		power-domains = <&DIE_NODE(ps_atc3_usb)>;
+	};
+
+	DIE_NODE(atcphy3_xbar): mux@130304c000 {
+		compatible = "apple,t6000-display-crossbar";
+		reg = <0x13 0x0304c000 0x0 0x4000>;
+		#mux-control-cells = <1>;
+		power-domains = <&DIE_NODE(ps_atc3_usb)>;
+		status = "disabled";
+	};
diff --git a/arch/arm64/boot/dts/apple/t600x-gpio-pins.dtsi b/arch/arm64/boot/dts/apple/t600x-gpio-pins.dtsi
index b31f1a7a2b3fc3..1a994c3c1b79f0 100644
--- a/arch/arm64/boot/dts/apple/t600x-gpio-pins.dtsi
+++ b/arch/arm64/boot/dts/apple/t600x-gpio-pins.dtsi
@@ -36,6 +36,20 @@
 			<APPLE_PINMUX(101, 1)>;
 	};
 
+	spi1_pins: spi1-pins {
+		pinmux = <APPLE_PINMUX(10, 1)>,
+			<APPLE_PINMUX(11, 1)>,
+			<APPLE_PINMUX(32, 1)>,
+			<APPLE_PINMUX(33, 1)>;
+	};
+
+	spi3_pins: spi3-pins {
+		pinmux = <APPLE_PINMUX(52, 1)>,
+			<APPLE_PINMUX(53, 1)>,
+			<APPLE_PINMUX(54, 1)>,
+			<APPLE_PINMUX(55, 1)>;
+	};
+
 	pcie_pins: pcie-pins {
 		pinmux = <APPLE_PINMUX(0, 1)>,
 				<APPLE_PINMUX(1, 1)>,
diff --git a/arch/arm64/boot/dts/apple/t600x-j314-j316.dtsi b/arch/arm64/boot/dts/apple/t600x-j314-j316.dtsi
index 2e471dfe43cf88..ada719f244c0a1 100644
--- a/arch/arm64/boot/dts/apple/t600x-j314-j316.dtsi
+++ b/arch/arm64/boot/dts/apple/t600x-j314-j316.dtsi
@@ -13,7 +13,17 @@
 
 / {
 	aliases {
+		atcphy0 = &atcphy0;
+		atcphy1 = &atcphy1;
+		atcphy2 = &atcphy2;
+		atcphy3 = &atcphy3;
+		bluetooth0 = &bluetooth0;
+		dcp = &dcp;
+		dcpext0 = &dcpext0;
+		disp0 = &display;
+		disp0_piodma = &disp0_piodma;
 		serial0 = &serial0;
+		sio = &sio;
 		wifi0 = &wifi0;
 	};
 
@@ -29,9 +39,18 @@
 			reg = <0 0 0 0>; /* To be filled by loader */
 			/* Format properties will be added by loader */
 			status = "disabled";
+			panel-dimensions = <&panel>;
+			power-domains = <&ps_disp0_cpu0>;
 		};
 	};
 
+	reserved-memory {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+		/* To be filled by loader */
+	};
+
 	memory@10000000000 {
 		device_type = "memory";
 		reg = <0x100 0 0x2 0>; /* To be filled by loader */
@@ -54,6 +73,64 @@
 	status = "okay";
 };
 
+&dcp {
+	panel: panel {
+		apple,max-brightness = <500>;
+	};
+};
+
+&display {
+	iommus = <&disp0_dart 0>, <&dispext0_dart 0>;
+};
+
+&dispext0_dart {
+	status = "okay";
+};
+
+&dcpext0_dart {
+	status = "okay";
+};
+
+&dcpext0_mbox {
+	status = "okay";
+};
+
+&dcpext0 {
+	/* enabled by the loader */
+	apple,connector-type = "HDMI-A";
+
+	/*  HDMI HPD gpio, used as interrupt*/
+	hdmi-hpd-gpios = <&pinctrl_nub 15 GPIO_ACTIVE_HIGH>;
+
+	hdmi-pwren-gpios = <&smc_gpio 23 GPIO_ACTIVE_HIGH>;
+	dp2hdmi-pwren-gpios = <&smc_gpio 6 GPIO_ACTIVE_HIGH>;
+
+	phy-names = "dp-phy";
+	phys = <&atcphy3 PHY_TYPE_DP>;
+	phy-names = "dp-phy";
+	mux-controls = <&atcphy3_xbar 0>;
+	mux-control-names = "dp-xbar";
+	mux-index = <0>;
+	apple,dptx-phy = <3>;
+};
+
+/* remove once m1n1 enables sio nodes after setup */
+&sio {
+        status = "okay";
+};
+
+&dpaudio1 {
+	status = "okay";
+};
+
+&atcphy3 {
+	apple,mode-fixed-dp;
+};
+
+&atcphy3_xbar {
+	status = "okay";
+};
+
 /* USB Type C */
 &i2c0 {
 	hpm0: usb-pd@38 {
@@ -62,6 +139,30 @@
 		interrupt-parent = <&pinctrl_ap>;
 		interrupts = <174 IRQ_TYPE_LEVEL_LOW>;
 		interrupt-names = "irq";
+
+		typec0: connector {
+			compatible = "usb-c-connector";
+			label = "USB-C Left Rear";
+			power-role = "dual";
+			data-role = "dual";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				port@0 {
+					reg = <0>;
+					typec0_con_hs: endpoint {
+						remote-endpoint = <&typec0_usb_hs>;
+					};
+				};
+				port@1 {
+					reg = <1>;
+					typec0_con_ss: endpoint {
+						remote-endpoint = <&typec0_usb_ss>;
+					};
+				};
+			};
+		};
 	};
 
 	hpm1: usb-pd@3f {
@@ -70,6 +171,30 @@
 		interrupt-parent = <&pinctrl_ap>;
 		interrupts = <174 IRQ_TYPE_LEVEL_LOW>;
 		interrupt-names = "irq";
+
+		typec1: connector {
+			compatible = "usb-c-connector";
+			label = "USB-C Left Front";
+			power-role = "dual";
+			data-role = "dual";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				port@0 {
+					reg = <0>;
+					typec1_con_hs: endpoint {
+						remote-endpoint = <&typec1_usb_hs>;
+					};
+				};
+				port@1 {
+					reg = <1>;
+					typec1_con_ss: endpoint {
+						remote-endpoint = <&typec1_usb_ss>;
+					};
+				};
+			};
+		};
 	};
 
 	hpm2: usb-pd@3b {
@@ -78,6 +203,30 @@
 		interrupt-parent = <&pinctrl_ap>;
 		interrupts = <174 IRQ_TYPE_LEVEL_LOW>;
 		interrupt-names = "irq";
+
+		typec2: connector {
+			compatible = "usb-c-connector";
+			label = "USB-C Right";
+			power-role = "dual";
+			data-role = "dual";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				port@0 {
+					reg = <0>;
+					typec2_con_hs: endpoint {
+						remote-endpoint = <&typec2_usb_hs>;
+					};
+				};
+				port@1 {
+					reg = <1>;
+					typec2_con_ss: endpoint {
+						remote-endpoint = <&typec2_usb_ss>;
+					};
+				};
+			};
+		};
 	};
 
 	/* MagSafe port */
@@ -90,24 +239,159 @@
 	};
 };
 
+/* Virtual regulator representing the shared shutdown GPIO */
+/ {
+	speaker_sdz: fixed-regulator-sn012776-sdz {
+		compatible = "regulator-fixed";
+		regulator-name = "sn012776-sdz";
+		startup-delay-us = <5000>;
+		gpios = <&pinctrl_ap 178 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+	};
+};
+
+&i2c1 {
+	status = "okay";
+
+	speaker_left_tweet: codec@3a {
+		compatible = "ti,sn012776", "ti,tas2764";
+		reg = <0x3a>;
+		SDZ-supply = <&speaker_sdz>;
+		#sound-dai-cells = <0>;
+		sound-name-prefix = "Left Tweeter";
+		interrupts-extended = <&pinctrl_ap 179 IRQ_TYPE_LEVEL_LOW>;
+		ti,imon-slot-no = <8>;
+		ti,vmon-slot-no = <10>;
+	};
+
+	speaker_left_woof1: codec@38 {
+		compatible = "ti,sn012776", "ti,tas2764";
+		reg = <0x38>;
+		SDZ-supply = <&speaker_sdz>;
+		#sound-dai-cells = <0>;
+		sound-name-prefix = "Left Woofer 1";
+		interrupts-extended = <&pinctrl_ap 179 IRQ_TYPE_LEVEL_LOW>;
+		ti,imon-slot-no = <0>;
+		ti,vmon-slot-no = <2>;
+		ti,sdout-force-zero-mask = <0xf0f0f0>;
+	};
+
+	speaker_left_woof2: codec@39 {
+		compatible = "ti,sn012776", "ti,tas2764";
+		reg = <0x39>;
+		SDZ-supply = <&speaker_sdz>;
+		#sound-dai-cells = <0>;
+		sound-name-prefix = "Left Woofer 2";
+		interrupts-extended = <&pinctrl_ap 179 IRQ_TYPE_LEVEL_LOW>;
+		ti,imon-slot-no = <16>;
+		ti,vmon-slot-no = <18>;
+	};
+};
+
+&i2c2 {
+	status = "okay";
+
+	jack_codec: codec@4b {
+		compatible = "cirrus,cs42l84";
+		reg = <0x4b>;
+		reset-gpios = <&pinctrl_nub 4 GPIO_ACTIVE_HIGH>;
+		#sound-dai-cells = <0>;
+		interrupts-extended = <&pinctrl_ap 180 IRQ_TYPE_LEVEL_LOW>;
+		sound-name-prefix = "Jack";
+	};
+};
+
+&i2c3 {
+	status = "okay";
+
+	speaker_right_tweet: codec@3d {
+		compatible = "ti,sn012776", "ti,tas2764";
+		reg = <0x3d>;
+		SDZ-supply = <&speaker_sdz>;
+		#sound-dai-cells = <0>;
+		sound-name-prefix = "Right Tweeter";
+		interrupts-extended = <&pinctrl_ap 179 IRQ_TYPE_LEVEL_LOW>;
+		ti,imon-slot-no = <12>;
+		ti,vmon-slot-no = <14>;
+	};
+
+	speaker_right_woof1: codec@3b {
+		compatible = "ti,sn012776", "ti,tas2764";
+		reg = <0x3b>;
+		SDZ-supply = <&speaker_sdz>;
+		#sound-dai-cells = <0>;
+		sound-name-prefix = "Right Woofer 1";
+		interrupts-extended = <&pinctrl_ap 179 IRQ_TYPE_LEVEL_LOW>;
+		ti,imon-slot-no = <4>;
+		ti,vmon-slot-no = <6>;
+		ti,sdout-force-zero-mask = <0x0f0f0f>;
+	};
+
+	speaker_right_woof2: codec@3c {
+		compatible = "ti,sn012776", "ti,tas2764";
+		reg = <0x3c>;
+		SDZ-supply = <&speaker_sdz>;
+		#sound-dai-cells = <0>;
+		sound-name-prefix = "Right Woofer 2";
+		interrupts-extended = <&pinctrl_ap 179 IRQ_TYPE_LEVEL_LOW>;
+		ti,imon-slot-no = <20>;
+		ti,vmon-slot-no = <22>;
+	};
+};
+
 &nco_clkref {
 	clock-frequency = <1068000000>;
 };
 
+#ifndef NO_SPI_TRACKPAD
+&spi3 {
+	status = "okay";
+
+	hid-transport@0 {
+		compatible = "apple,spi-hid-transport";
+		reg = <0>;
+		spi-max-frequency = <8000000>;
+		/*
+		 * Apple's ADT specifies 20us CS change delays, and the
+		 * SPI HID interface metadata specifies 45us. Using either
+		 * seems not to be reliable, but adding both works, so
+		 * best guess is they are cumulative.
+		*/
+		spi-cs-setup-delay-ns = <65000>;
+		spi-cs-hold-delay-ns = <65000>;
+		spi-cs-inactive-delay-ns = <250000>;
+		spien-gpios = <&pinctrl_ap 194 0>;
+		interrupts-extended = <&pinctrl_nub 6 IRQ_TYPE_LEVEL_LOW>;
+	};
+};
+#endif
+
 /* PCIe devices */
 &port00 {
 	/* WLAN */
 	bus-range = <1 1>;
+	pwren-gpios = <&smc_gpio 13 GPIO_ACTIVE_HIGH>;
 	wifi0: wifi@0,0 {
+		compatible = "pci14e4,4433";
 		reg = <0x10000 0x0 0x0 0x0 0x0>;
 		/* To be filled by the loader */
 		local-mac-address = [00 10 18 00 00 10];
+		apple,antenna-sku = "XX";
+	};
+
+	bluetooth0: network@0,1 {
+		compatible = "pci14e4,5f71";
+		reg = <0x10100 0x0 0x0 0x0 0x0>;
+		/* To be filled by the loader */
+		local-bd-address = [00 00 00 00 00 00];
 	};
 };
 
 &port01 {
 	/* SD card reader */
 	bus-range = <2 2>;
+	pwren-gpios = <&smc_gpio 26 GPIO_ACTIVE_HIGH>;
+	status = "okay";
 	sdhci0: mmc@0,0 {
 		compatible = "pci17a0,9755";
 		reg = <0x20000 0x0 0x0 0x0 0x0>;
@@ -119,3 +403,139 @@
 &fpwm0 {
 	status = "okay";
 };
+
+&pcie0_dart_1 {
+	status = "okay";
+};
+
+/* USB controllers */
+&dwc3_0 {
+	port {
+		typec0_usb_hs: endpoint {
+			remote-endpoint = <&typec0_con_hs>;
+		};
+	};
+};
+
+&dwc3_1 {
+	port {
+		typec1_usb_hs: endpoint {
+			remote-endpoint = <&typec1_con_hs>;
+		};
+	};
+};
+
+&dwc3_2 {
+	port {
+		typec2_usb_hs: endpoint {
+			remote-endpoint = <&typec2_con_hs>;
+		};
+	};
+};
+
+/* Type-C PHYs */
+&atcphy0 {
+	port {
+		typec0_usb_ss: endpoint {
+			remote-endpoint = <&typec0_con_ss>;
+		};
+	};
+};
+
+&atcphy1 {
+	port {
+		typec1_usb_ss: endpoint {
+			remote-endpoint = <&typec1_con_ss>;
+		};
+	};
+};
+
+&atcphy2 {
+	port {
+		typec2_usb_ss: endpoint {
+			remote-endpoint = <&typec2_con_ss>;
+		};
+	};
+};
+
+/* ATC3 is used for DisplayPort -> HDMI only */
+&dwc3_3_dart_0 {
+	status = "disabled";
+};
+
+&dwc3_3_dart_1 {
+	status = "disabled";
+};
+
+&dwc3_3 {
+	status = "disabled";
+};
+/* Delete unused dwc3_3 to prevent dt_disable_missing_devs() from disabling
+ * atcphy3 via phandle references from a disablecd device.
+ */
+/delete-node/ &dwc3_3;
+
+&ps_atc3_usb_aon {
+	/delete-property/ apple,always-on;
+};
+
+&aop_mbox {
+       status = "okay";
+};
+
+&aop_dart {
+       status = "okay";
+};
+
+&aop_admac {
+       status = "okay";
+};
+
+&aop {
+       status = "okay";
+};
+
+/ {
+	sound: sound {
+		/* compatible is set per machine */
+
+		dai-link@0 {
+			link-name = "Speakers";
+
+			cpu {
+				sound-dai = <&mca 0>, <&mca 1>;
+			};
+			codec {
+				sound-dai = <&speaker_left_woof1>,
+					    <&speaker_left_tweet>,
+					    <&speaker_left_woof2>,
+					    <&speaker_right_woof1>,
+					    <&speaker_right_tweet>,
+					    <&speaker_right_woof2>;
+			};
+		};
+
+		dai-link@1 {
+			link-name = "Headphone Jack";
+
+			cpu {
+				sound-dai = <&mca 2>;
+			};
+			codec {
+				sound-dai = <&jack_codec>;
+			};
+		};
+	};
+};
+
+#include "spi1-nvram.dtsi"
+
+#include "isp-imx558.dtsi"
+
+&isp {
+	apple,platform-id = <3>;
+};
+
+#include "hwmon-common.dtsi"
+#include "hwmon-fan-dual.dtsi"
+#include "hwmon-laptop.dtsi"
diff --git a/arch/arm64/boot/dts/apple/t600x-j375.dtsi b/arch/arm64/boot/dts/apple/t600x-j375.dtsi
index 1e5a19e49b089d..43a71a6bab248d 100644
--- a/arch/arm64/boot/dts/apple/t600x-j375.dtsi
+++ b/arch/arm64/boot/dts/apple/t600x-j375.dtsi
@@ -11,7 +11,20 @@
 
 / {
 	aliases {
+		atcphy0 = &atcphy0;
+		atcphy1 = &atcphy1;
+		atcphy2 = &atcphy2;
+		atcphy3 = &atcphy3;
+		bluetooth0 = &bluetooth0;
+		#ifndef NO_DCP
+		dcp = &dcp;
+		disp0 = &display;
+		disp0_piodma = &disp0_piodma;
+		#endif
+		ethernet0 = &ethernet0;
+		nvram = &nvram;
 		serial0 = &serial0;
+		sio = &sio;
 		wifi0 = &wifi0;
 	};
 
@@ -27,9 +40,17 @@
 			reg = <0 0 0 0>; /* To be filled by loader */
 			/* Format properties will be added by loader */
 			status = "disabled";
+			power-domains = <&ps_disp0_cpu0>;
 		};
 	};
 
+	reserved-memory {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+		/* To be filled by loader */
+	};
+
 	memory@10000000000 {
 		device_type = "memory";
 		reg = <0x100 0 0x2 0>; /* To be filled by loader */
@@ -40,6 +61,15 @@
 	status = "okay";
 };
 
+&dcp {
+	apple,connector-type = "HDMI-A";
+};
+
+/* remove once m1n1 enables sio nodes after setup */
+&sio {
+        status = "okay";
+};
+
 /* USB Type C */
 &i2c0 {
 	hpm0: usb-pd@38 {
@@ -48,6 +78,30 @@
 		interrupt-parent = <&pinctrl_ap>;
 		interrupts = <174 IRQ_TYPE_LEVEL_LOW>;
 		interrupt-names = "irq";
+
+		typec0: connector {
+			compatible = "usb-c-connector";
+			label = "USB-C Back Left";
+			power-role = "dual";
+			data-role = "dual";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				port@0 {
+					reg = <0>;
+					typec0_con_hs: endpoint {
+						remote-endpoint = <&typec0_usb_hs>;
+					};
+				};
+				port@1 {
+					reg = <1>;
+					typec0_con_ss: endpoint {
+						remote-endpoint = <&typec0_usb_ss>;
+					};
+				};
+			};
+		};
 	};
 
 	hpm1: usb-pd@3f {
@@ -56,6 +110,30 @@
 		interrupt-parent = <&pinctrl_ap>;
 		interrupts = <174 IRQ_TYPE_LEVEL_LOW>;
 		interrupt-names = "irq";
+
+		typec1: connector {
+			compatible = "usb-c-connector";
+			label = "USB-C Back Left Middle";
+			power-role = "dual";
+			data-role = "dual";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				port@0 {
+					reg = <0>;
+					typec1_con_hs: endpoint {
+						remote-endpoint = <&typec1_usb_hs>;
+					};
+				};
+				port@1 {
+					reg = <1>;
+					typec1_con_ss: endpoint {
+						remote-endpoint = <&typec1_usb_ss>;
+					};
+				};
+			};
+		};
 	};
 
 	hpm2: usb-pd@3b {
@@ -64,6 +142,30 @@
 		interrupt-parent = <&pinctrl_ap>;
 		interrupts = <174 IRQ_TYPE_LEVEL_LOW>;
 		interrupt-names = "irq";
+
+		typec2: connector {
+			compatible = "usb-c-connector";
+			label = "USB-C Back Right Middle";
+			power-role = "dual";
+			data-role = "dual";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				port@0 {
+					reg = <0>;
+					typec2_con_hs: endpoint {
+						remote-endpoint = <&typec2_usb_hs>;
+					};
+				};
+				port@1 {
+					reg = <1>;
+					typec2_con_ss: endpoint {
+						remote-endpoint = <&typec2_usb_ss>;
+					};
+				};
+			};
+		};
 	};
 
 	hpm3: usb-pd@3c {
@@ -72,6 +174,124 @@
 		interrupt-parent = <&pinctrl_ap>;
 		interrupts = <174 IRQ_TYPE_LEVEL_LOW>;
 		interrupt-names = "irq";
+
+		typec3: connector {
+			compatible = "usb-c-connector";
+			label = "USB-C Back Right";
+			power-role = "dual";
+			data-role = "dual";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				port@0 {
+					reg = <0>;
+					typec3_con_hs: endpoint {
+						remote-endpoint = <&typec3_usb_hs>;
+					};
+				};
+				port@1 {
+					reg = <1>;
+					typec3_con_ss: endpoint {
+						remote-endpoint = <&typec3_usb_ss>;
+					};
+				};
+			};
+		};
+	};
+};
+
+/* USB controllers */
+&dwc3_0 {
+	port {
+		typec0_usb_hs: endpoint {
+			remote-endpoint = <&typec0_con_hs>;
+		};
+	};
+};
+
+&dwc3_1 {
+	port {
+		typec1_usb_hs: endpoint {
+			remote-endpoint = <&typec1_con_hs>;
+		};
+	};
+};
+
+&dwc3_2 {
+	port {
+		typec2_usb_hs: endpoint {
+			remote-endpoint = <&typec2_con_hs>;
+		};
+	};
+};
+
+&dwc3_3 {
+	port {
+		typec3_usb_hs: endpoint {
+			remote-endpoint = <&typec3_con_hs>;
+		};
+	};
+};
+
+/* Type-C PHYs */
+&atcphy0 {
+	port {
+		typec0_usb_ss: endpoint {
+			remote-endpoint = <&typec0_con_ss>;
+		};
+	};
+};
+
+&atcphy1 {
+	port {
+		typec1_usb_ss: endpoint {
+			remote-endpoint = <&typec1_con_ss>;
+		};
+	};
+};
+
+&atcphy2 {
+	port {
+		typec2_usb_ss: endpoint {
+			remote-endpoint = <&typec2_con_ss>;
+		};
+	};
+};
+
+&atcphy3 {
+	port {
+		typec3_usb_ss: endpoint {
+			remote-endpoint = <&typec3_con_ss>;
+		};
+	};
+};
+
+/* Audio */
+&i2c1 {
+	status = "okay";
+
+	speaker: codec@38 {
+		compatible = "ti,sn012776", "ti,tas2764";
+		reg = <0x38>;
+		shutdown-gpios = <&pinctrl_ap 178 GPIO_ACTIVE_HIGH>;
+		#sound-dai-cells = <0>;
+		interrupts-extended = <&pinctrl_ap 179 IRQ_TYPE_LEVEL_LOW>;
+		ti,imon-slot-no = <0>;
+		ti,vmon-slot-no = <2>;
+	};
+};
+
+&i2c2 {
+	status = "okay";
+
+	jack_codec: codec@4b {
+		compatible = "cirrus,cs42l84";
+		reg = <0x4b>;
+		reset-gpios = <&pinctrl_nub 4 GPIO_ACTIVE_HIGH>;
+		#sound-dai-cells = <0>;
+		interrupts-extended = <&pinctrl_ap 180 IRQ_TYPE_LEVEL_LOW>;
+		sound-name-prefix = "Jack";
 	};
 };
 
@@ -79,20 +299,62 @@
 	clock-frequency = <1068000000>;
 };
 
+/ {
+	sound: sound {
+		/* compatible is set per machine */
+
+		dai-link@0 {
+			link-name = "Speaker";
+
+			cpu {
+				sound-dai = <&mca 0>;
+			};
+			codec {
+				sound-dai = <&speaker>;
+			};
+		};
+
+		dai-link@1 {
+			link-name = "Headphone Jack";
+
+			cpu {
+				sound-dai = <&mca 2>;
+			};
+			codec {
+				sound-dai = <&jack_codec>;
+			};
+		};
+	};
+};
+
 /* PCIe devices */
 &port00 {
 	/* WLAN */
 	bus-range = <1 1>;
+	pwren-gpios = <&smc_gpio 13 GPIO_ACTIVE_HIGH>;
 	wifi0: wifi@0,0 {
 		reg = <0x10000 0x0 0x0 0x0 0x0>;
+		compatible = "pci14e4,4433";
+		brcm,board-type = "apple,okinawa";
+		apple,antenna-sku = "XX";
 		/* To be filled by the loader */
 		local-mac-address = [00 10 18 00 00 10];
 	};
+
+	bluetooth0: network@0,1 {
+		compatible = "pci14e4,5f71";
+		brcm,board-type = "apple,okinawa";
+		reg = <0x10100 0x0 0x0 0x0 0x0>;
+		/* To be filled by the loader */
+		local-bd-address = [00 00 00 00 00 00];
+	};
 };
 
+#ifndef NO_PCIE_SDHC
 &port01 {
 	/* SD card reader */
 	bus-range = <2 2>;
+	pwren-gpios = <&smc_gpio 26 GPIO_ACTIVE_HIGH>;
 	sdhci0: mmc@0,0 {
 		compatible = "pci17a0,9755";
 		reg = <0x20000 0x0 0x0 0x0 0x0>;
@@ -100,6 +362,7 @@
 		wp-inverted;
 	};
 };
+#endif
 
 &port02 {
 	/* 10 Gbit Ethernet */
@@ -115,6 +378,7 @@
 &port03 {
 	/* USB xHCI */
 	bus-range = <4 4>;
+	pwren-gpios = <&smc_gpio 20 GPIO_ACTIVE_HIGH>;
 	status = "okay";
 };
 
@@ -126,3 +390,7 @@
 &pcie0_dart_3 {
 	status = "okay";
 };
+
+#include "spi1-nvram.dtsi"
+
+#include "hwmon-common.dtsi"
diff --git a/arch/arm64/boot/dts/apple/t600x-pmgr.dtsi b/arch/arm64/boot/dts/apple/t600x-pmgr.dtsi
index 0bd44753b76a0c..3517b2aeb5f61f 100644
--- a/arch/arm64/boot/dts/apple/t600x-pmgr.dtsi
+++ b/arch/arm64/boot/dts/apple/t600x-pmgr.dtsi
@@ -396,6 +396,7 @@
 		#reset-cells = <0>;
 		label = DIE_LABEL(dispext0_cpu0);
 		power-domains = <&DIE_NODE(ps_dispext0_fe)>;
+		apple,min-state = <4>;
 	};
 
 	DIE_NODE(ps_dispext1_cpu0): power-controller@2a8 {
@@ -405,6 +406,7 @@
 		#reset-cells = <0>;
 		label = DIE_LABEL(dispext1_cpu0);
 		power-domains = <&DIE_NODE(ps_dispext1_fe)>;
+		apple,min-state = <4>;
 	};
 
 	DIE_NODE(ps_ane_sys_cpu): power-controller@2c8 {
@@ -824,7 +826,7 @@
 		#power-domain-cells = <0>;
 		#reset-cells = <0>;
 		label = DIE_LABEL(sio_cpu);
-		power-domains = <&DIE_NODE(ps_sio)>;
+		power-domains = <&DIE_NODE(ps_sio) &DIE_NODE(ps_uart_p) &DIE_NODE(ps_spi_p) &DIE_NODE(ps_audio_p)>;
 	};
 
 	DIE_NODE(ps_fpwm0): power-controller@190 {
@@ -1113,6 +1115,7 @@
 		#reset-cells = <0>;
 		label = DIE_LABEL(mca0);
 		power-domains = <&DIE_NODE(ps_audio_p)>, <&DIE_NODE(ps_sio_adma)>;
+		apple,externally-clocked;
 	};
 
 	DIE_NODE(ps_mca1): power-controller@290 {
@@ -1122,6 +1125,7 @@
 		#reset-cells = <0>;
 		label = DIE_LABEL(mca1);
 		power-domains = <&DIE_NODE(ps_audio_p)>, <&DIE_NODE(ps_sio_adma)>;
+		apple,externally-clocked;
 	};
 
 	DIE_NODE(ps_mca2): power-controller@298 {
@@ -1131,6 +1135,7 @@
 		#reset-cells = <0>;
 		label = DIE_LABEL(mca2);
 		power-domains = <&DIE_NODE(ps_audio_p)>, <&DIE_NODE(ps_sio_adma)>;
+		apple,externally-clocked;
 	};
 
 	DIE_NODE(ps_mca3): power-controller@2a0 {
@@ -1140,6 +1145,7 @@
 		#reset-cells = <0>;
 		label = DIE_LABEL(mca3);
 		power-domains = <&DIE_NODE(ps_audio_p)>, <&DIE_NODE(ps_sio_adma)>;
+		apple,externally-clocked;
 	};
 
 	DIE_NODE(ps_dpa0): power-controller@2a8 {
@@ -1293,7 +1299,6 @@
 		#reset-cells = <0>;
 		label = DIE_LABEL(disp0_fe);
 		power-domains = <&DIE_NODE(ps_afnc2_lw0)>;
-		apple,always-on; /* TODO: figure out if we can enable PM here */
 	};
 
 	DIE_NODE(ps_disp0_cpu0): power-controller@350 {
@@ -1303,7 +1308,6 @@
 		#reset-cells = <0>;
 		label = DIE_LABEL(disp0_cpu0);
 		power-domains = <&DIE_NODE(ps_disp0_fe)>;
-		apple,always-on; /* TODO: figure out if we can enable PM here */
 		apple,min-state = <4>;
 	};
 
@@ -1368,6 +1372,7 @@
 		#reset-cells = <0>;
 		label = DIE_LABEL(isp_sys);
 		power-domains = <&DIE_NODE(ps_afnc2_lw1)>;
+		status = "disabled";
 	};
 
 	DIE_NODE(ps_venc_sys): power-controller@3b0 {
@@ -1385,12 +1390,6 @@
 		#power-domain-cells = <0>;
 		#reset-cells = <0>;
 		label = DIE_LABEL(ans2);
-		/*
-		 * The ADT makes ps_apcie_st[1]_sys depend on ps_ans2 instead,
-		 * but we'd rather have a single power domain for the downstream
-		 * device to depend on, so use this node as the child.
-		 * This makes more sense anyway (since ANS2 uses APCIE_ST).
-		 */
 		power-domains = <&DIE_NODE(ps_afnc2_lw0)>;
 	};
 
@@ -1456,6 +1455,86 @@
 		label = DIE_LABEL(venc_me1);
 		power-domains = <&DIE_NODE(ps_venc_me0)>;
 	};
+
+	/* There is a dependency tree involved with these PDs,
+	 * but we do not express it here since the ISP driver
+	 * is supposed to sequence them in the right order anyway
+	 * (and we do not know the exact tree structure).
+	 *
+	 * This also works around spurious parent PD activation
+	 * on machines with ISP disabled (desktops).
+	 */
+	DIE_NODE(ps_isp_set0): power-controller@4000 {
+		compatible = "apple,t6000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4000 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_set0";
+	};
+
+	DIE_NODE(ps_isp_set1): power-controller@4010 {
+		compatible = "apple,t6000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4010 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_set1";
+	};
+
+	DIE_NODE(ps_isp_fe): power-controller@4008 {
+		compatible = "apple,t6000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4008 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_set2";
+	};
+
+	DIE_NODE(ps_isp_set3): power-controller@4028 {
+		compatible = "apple,t6000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4028 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_set3";
+	};
+
+	DIE_NODE(ps_isp_set4): power-controller@4020 {
+		compatible = "apple,t6000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4020 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_set4";
+	};
+
+	DIE_NODE(ps_isp_set5): power-controller@4030 {
+		compatible = "apple,t6000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4030 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_set5";
+	};
+
+	DIE_NODE(ps_isp_set6): power-controller@4018 {
+		compatible = "apple,t6000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4018 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_set6";
+	};
+
+	DIE_NODE(ps_isp_set7): power-controller@4038 {
+		compatible = "apple,t6000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4038 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_set7";
+	};
+
+	DIE_NODE(ps_isp_set8): power-controller@4040 {
+		compatible = "apple,t6000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4040 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_set8";
+	};
 };
 
 &DIE_NODE(pmgr_south) {
@@ -1715,6 +1794,7 @@
 		#reset-cells = <0>;
 		label = DIE_LABEL(dispext2_cpu0);
 		power-domains = <&DIE_NODE(ps_dispext2_fe)>;
+		apple,min-state = <4>;
 	};
 
 	DIE_NODE(ps_dispext3_fe): power-controller@210 {
@@ -1733,6 +1813,7 @@
 		#reset-cells = <0>;
 		label = DIE_LABEL(dispext3_cpu0);
 		power-domains = <&DIE_NODE(ps_dispext3_fe)>;
+		apple,min-state = <4>;
 	};
 
 	DIE_NODE(ps_msr1): power-controller@250 {
@@ -1881,6 +1962,7 @@
 		#power-domain-cells = <0>;
 		#reset-cells = <0>;
 		label = DIE_LABEL(msg);
+		apple,always-on; /* Core AON device? */
 	};
 
 	DIE_NODE(ps_nub_gpio): power-controller@80 {
diff --git a/arch/arm64/boot/dts/apple/t6020-j414s.dts b/arch/arm64/boot/dts/apple/t6020-j414s.dts
new file mode 100644
index 00000000000000..a227069727dd8f
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t6020-j414s.dts
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * MacBook Pro (14-inch, M2 Pro, 2023)
+ *
+ * target-type: J414s
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+
+/dts-v1/;
+
+#include "t6020.dtsi"
+#include "t602x-j414-j416.dtsi"
+
+/ {
+	compatible = "apple,j414s", "apple,t6020", "apple,arm-platform";
+	model = "Apple MacBook Pro (14-inch, M2 Pro, 2023)";
+};
+
+&wifi0 {
+	brcm,board-type = "apple,tokara";
+};
+
+&bluetooth0 {
+	brcm,board-type = "apple,tokara";
+};
+
+&panel {
+	compatible = "apple,panel-j414", "apple,panel-mini-led", "apple,panel";
+	width-mm = <302>;
+	height-mm = <196>;
+	adj-height-mm = <189>;
+};
+
+&aop_audio {
+	apple,chassis-name = "J414";
+	apple,machine-kind = "MacBook Pro";
+};
+
+&sound {
+	compatible = "apple,j414-macaudio", "apple,j314-macaudio", "apple,macaudio";
+	model = "MacBook Pro J414";
+};
+
+&mtp_mt {
+	firmware-name = "apple/tpmtfw-j414s.bin";
+};
diff --git a/arch/arm64/boot/dts/apple/t6020-j416s.dts b/arch/arm64/boot/dts/apple/t6020-j416s.dts
new file mode 100644
index 00000000000000..3ea2b1d52593e2
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t6020-j416s.dts
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * MacBook Pro (16-inch, M2 Pro, 2023)
+ *
+ * target-type: J416s
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+
+/dts-v1/;
+
+#include "t6020.dtsi"
+#include "t602x-j414-j416.dtsi"
+
+/ {
+	compatible = "apple,j416s", "apple,t6020", "apple,arm-platform";
+	model = "Apple MacBook Pro (16-inch, M2 Pro, 2023)";
+};
+
+&wifi0 {
+	brcm,board-type = "apple,amami";
+};
+
+&bluetooth0 {
+	brcm,board-type = "apple,amami";
+};
+
+&panel {
+	compatible = "apple,panel-j416", "apple,panel-mini-led", "apple,panel";
+	width-mm = <346>;
+	height-mm = <223>;
+	adj-height-mm = <216>;
+};
+
+&aop_audio {
+	apple,chassis-name = "J416";
+	apple,machine-kind = "MacBook Pro";
+};
+
+&sound {
+	compatible = "apple,j416-macaudio", "apple,j316-macaudio", "apple,macaudio";
+	model = "MacBook Pro J416";
+};
+
+&mtp_mt {
+	firmware-name = "apple/tpmtfw-j416s.bin";
+};
diff --git a/arch/arm64/boot/dts/apple/t6020-j474s.dts b/arch/arm64/boot/dts/apple/t6020-j474s.dts
new file mode 100644
index 00000000000000..bf64a9c47cd807
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t6020-j474s.dts
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Mac mini (M2 Pro, 2023)
+ *
+ * target-type: J474s
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+
+/dts-v1/;
+
+#include "t6020.dtsi"
+
+#define NO_PCIE_SDHC
+#include "t602x-j474-j475.dtsi"
+
+/ {
+	compatible = "apple,j474s", "apple,t6020", "apple,arm-platform";
+	model = "Apple Mac mini (M2 Pro, 2023)";
+};
+
+&wifi0 {
+	compatible = "pci14e4,4434";
+	brcm,board-type = "apple,tasmania";
+};
+
+&bluetooth0 {
+	compatible = "pci14e4,5f72";
+	brcm,board-type = "apple,tasmania";
+};
+
+/* PCIe devices */
+&port01 {
+	/*
+	 * TODO: do not enable port without device. This works around a Linux
+	 * bug which results in mismatched iommus on gaps in PCI(e) ports / bus
+	 * numbers.
+	 */
+	bus-range = <2 2>;
+	status = "okay";
+};
+
+&sound {
+	compatible = "apple,j474-macaudio", "apple,j473-macaudio", "apple,macaudio";
+	model = "Mac mini J474";
+};
+
+&lpdptxphy {
+	status = "okay";
+};
+
+#define USE_DCPEXT0 1
+
+#if USE_DCPEXT0
+/ {
+	aliases {
+		dcpext0 = &dcpext0;
+		/delete-property/ dcp;
+	};
+};
+
+&framebuffer0 {
+	power-domains = <&ps_dispext0_cpu0>, <&ps_dptx_phy_ps>;
+};
+
+&dcp {
+	status = "disabled";
+};
+&display {
+	iommus = <&dispext0_dart 0>;
+};
+&dispext0_dart {
+	status = "okay";
+};
+&dcpext0_dart {
+	status = "okay";
+};
+&dcpext0_mbox {
+	status = "okay";
+};
+&dpaudio1 {
+	status = "okay";
+};
+&dcpext0 {
+#else
+&dpaudio0 {
+	status = "okay";
+};
+&dcp {
+#endif
+	status = "okay";
+	apple,connector-type = "HDMI-A";
+
+	/*  HDMI HPD gpio, used as interrupt*/
+	hdmi-hpd-gpios = <&pinctrl_aop 25 GPIO_ACTIVE_HIGH>;
+
+	hdmi-pwren-gpios = <&smc_gpio 23 GPIO_ACTIVE_HIGH>;
+	dp2hdmi-pwren-gpios = <&smc_gpio 25 GPIO_ACTIVE_HIGH>;
+
+	phys = <&lpdptxphy>;
+	phy-names = "dp-phy";
+	apple,dptx-phy = <4>;
+};
+
+&gpu {
+	/* Apple does not do this, but they probably should */
+	apple,perf-base-pstate = <3>;
+};
+
+#include "hwmon-mini.dtsi"
diff --git a/arch/arm64/boot/dts/apple/t6020.dtsi b/arch/arm64/boot/dts/apple/t6020.dtsi
new file mode 100644
index 00000000000000..77affcd3aa0d1c
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t6020.dtsi
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Apple T6020 "M2 Pro" SoC
+ *
+ * Other names: H14J, "Rhodes Chop"
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+
+/* This chip is just a cut down version of t6021, so include it and disable the missing parts */
+
+#define GPU_REPEAT(x) <x x>
+
+#include "t6021.dtsi"
+
+/ {
+	compatible = "apple,t6020", "apple,arm-platform";
+};
+
+/delete-node/ &pmgr_south;
+
+&gpu {
+	compatible = "apple,agx-t6020", "apple,agx-g14x";
+
+	apple,avg-power-filter-tc-ms = <302>;
+	apple,avg-power-ki-only = <2.6375>;
+	apple,avg-power-kp = <0.18>;
+	apple,fast-die0-integral-gain = <1350.0>;
+	apple,ppm-filter-time-constant-ms = <32>;
+	apple,ppm-ki = <28.0>;
+};
diff --git a/arch/arm64/boot/dts/apple/t6021-j414c.dts b/arch/arm64/boot/dts/apple/t6021-j414c.dts
new file mode 100644
index 00000000000000..fab3b03ff3c452
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t6021-j414c.dts
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * MacBook Pro (14-inch, M2 Max, 2023)
+ *
+ * target-type: J414c
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+
+/dts-v1/;
+
+#include "t6021.dtsi"
+#include "t602x-j414-j416.dtsi"
+
+/ {
+	compatible = "apple,j414c", "apple,t6021", "apple,arm-platform";
+	model = "Apple MacBook Pro (14-inch, M2 Max, 2023)";
+};
+
+&wifi0 {
+	brcm,board-type = "apple,tokara";
+};
+
+&bluetooth0 {
+	brcm,board-type = "apple,tokara";
+};
+
+&panel {
+	compatible = "apple,panel-j414", "apple,panel-mini-led", "apple,panel";
+	width-mm = <302>;
+	height-mm = <196>;
+	adj-height-mm = <189>;
+};
+
+&aop_audio {
+	apple,chassis-name = "J414";
+	apple,machine-kind = "MacBook Pro";
+};
+
+&sound {
+	compatible = "apple,j414-macaudio", "apple,j314-macaudio", "apple,macaudio";
+	model = "MacBook Pro J414";
+};
+
+&mtp_mt {
+	firmware-name = "apple/tpmtfw-j414c.bin";
+};
diff --git a/arch/arm64/boot/dts/apple/t6021-j416c.dts b/arch/arm64/boot/dts/apple/t6021-j416c.dts
new file mode 100644
index 00000000000000..b476e235639ffc
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t6021-j416c.dts
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * MacBook Pro (16-inch, M2 Max, 2022)
+ *
+ * target-type: J416c
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+
+/dts-v1/;
+
+#include "t6021.dtsi"
+#include "t602x-j414-j416.dtsi"
+
+/ {
+	compatible = "apple,j416c", "apple,t6021", "apple,arm-platform";
+	model = "Apple MacBook Pro (16-inch, M2 Max, 2023)";
+};
+
+/* This machine model (only) has two extra boost CPU P-states *
+ * Disabled: Only the highest CPU bin (38 GPU cores) has this.
+ * Keep this disabled until m1n1 learns how to remove these OPPs
+ * for unsupported machines, otherwise it breaks cpufreq.
+&avalanche_opp {
+	opp18 {
+		opp-hz = /bits/ 64 <3528000000>;
+		opp-level = <18>;
+		clock-latency-ns = <67000>;
+		turbo-mode;
+	};
+	opp19 {
+		opp-hz = /bits/ 64 <3696000000>;
+		opp-level = <19>;
+		clock-latency-ns = <67000>;
+		turbo-mode;
+	};
+};
+*/
+
+&wifi0 {
+	brcm,board-type = "apple,amami";
+};
+
+&bluetooth0 {
+	brcm,board-type = "apple,amami";
+};
+
+&panel {
+	compatible = "apple,panel-j416", "apple,panel-mini-led", "apple,panel";
+	width-mm = <346>;
+	height-mm = <223>;
+	adj-height-mm = <216>;
+};
+
+&aop_audio {
+	apple,chassis-name = "J416";
+	apple,machine-kind = "MacBook Pro";
+};
+
+&sound {
+	compatible = "apple,j416-macaudio", "apple,j316-macaudio", "apple,macaudio";
+	model = "MacBook Pro J416";
+};
+
+&mtp_mt {
+	firmware-name = "apple/tpmtfw-j416c.bin";
+};
diff --git a/arch/arm64/boot/dts/apple/t6021-j475c.dts b/arch/arm64/boot/dts/apple/t6021-j475c.dts
new file mode 100644
index 00000000000000..e8e3f1e8bafd74
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t6021-j475c.dts
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Mac Studio (M2 Max, 2023)
+ *
+ * target-type: J475c
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+
+/dts-v1/;
+
+#include "t6021.dtsi"
+#include "t602x-j474-j475.dtsi"
+
+/ {
+	compatible = "apple,j475c", "apple,t6021", "apple,arm-platform";
+	model = "Apple Mac Studio (M2 Max, 2023)";
+};
+
+&wifi0 {
+	compatible = "pci14e4,4434";
+	brcm,board-type = "apple,canary";
+};
+
+&bluetooth0 {
+	compatible = "pci14e4,5f72";
+	brcm,board-type = "apple,canary";
+};
+
+&pinctrl_ap {
+	usb_hub_oe-hog {
+		gpio-hog;
+		gpios = <231 0>;
+		input;
+		line-name = "usb-hub-oe";
+	};
+
+	usb_hub_rst-hog {
+		gpio-hog;
+		gpios = <232 GPIO_ACTIVE_LOW>;
+		output-low;
+		line-name = "usb-hub-rst";
+	};
+};
+
+&sound {
+	compatible = "apple,j475-macaudio", "apple,j375-macaudio", "apple,macaudio";
+	model = "Mac Studio J475";
+};
+
+&lpdptxphy {
+	status = "okay";
+};
+
+
+#define USE_DCPEXT0 1
+
+#if USE_DCPEXT0
+/ {
+	aliases {
+		dcpext0 = &dcpext0;
+		/delete-property/ dcp;
+	};
+};
+
+&framebuffer0 {
+	power-domains = <&ps_dispext0_cpu0>, <&ps_dptx_phy_ps>;
+};
+
+&dcp {
+	status = "disabled";
+};
+&display {
+	iommus = <&dispext0_dart 0>;
+};
+&dispext0_dart {
+	status = "okay";
+};
+&dcpext0_dart {
+	status = "okay";
+};
+&dcpext0_mbox {
+	status = "okay";
+};
+&dpaudio1 {
+	status = "okay";
+};
+&dcpext0 {
+#else
+&dpaudio0 {
+	status = "okay";
+};
+&dcp {
+#endif
+	status = "okay";
+	apple,connector-type = "HDMI-A";
+
+	/*  HDMI HPD gpio, used as interrupt*/
+	hdmi-hpd-gpios = <&pinctrl_aop 25 GPIO_ACTIVE_HIGH>;
+
+	hdmi-pwren-gpios = <&smc_gpio 23 GPIO_ACTIVE_HIGH>;
+	dp2hdmi-pwren-gpios = <&smc_gpio 25 GPIO_ACTIVE_HIGH>;
+
+	phys = <&lpdptxphy>;
+	phy-names = "dp-phy";
+	apple,dptx-phy = <4>;
+};
+
+&gpu {
+	apple,idleoff-standby-timer = <3000>;
+	apple,perf-base-pstate = <5>;
+	apple,perf-boost-ce-step = <100>;
+	apple,perf-boost-min-util = <75>;
+	apple,perf-tgt-utilization = <70>;
+};
+
+#include "hwmon-fan-dual.dtsi"
diff --git a/arch/arm64/boot/dts/apple/t6021.dtsi b/arch/arm64/boot/dts/apple/t6021.dtsi
new file mode 100644
index 00000000000000..95298973624f1d
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t6021.dtsi
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Apple T6021 "M2 Max" SoC
+ *
+ * Other names: H14J, "Rhodes"
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/apple-aic.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/pinctrl/apple.h>
+#include <dt-bindings/phy/phy.h>
+#include <dt-bindings/spmi/spmi.h>
+
+#include "multi-die-cpp.h"
+
+#ifndef GPU_REPEAT
+# define GPU_REPEAT(x) <x x x x>
+#endif
+#ifndef GPU_DIE_REPEAT
+# define GPU_DIE_REPEAT(x) <x>
+#endif
+
+#include "t602x-common.dtsi"
+
+/ {
+	compatible = "apple,t6001", "apple,arm-platform";
+
+	soc {
+		compatible = "simple-bus";
+		#address-cells = <2>;
+		#size-cells = <2>;
+
+		ranges;
+		nonposted-mmio;
+		/* Required to get >32-bit DMA via DARTs */
+		dma-ranges = <0 0 0 0 0xffffffff 0xffffc000>;
+
+		// filled via templated includes at the end of the file
+	};
+};
+
+#define DIE
+#define DIE_NO 0
+
+&{/soc} {
+	#include "t602x-die0.dtsi"
+	#include "t602x-dieX.dtsi"
+	#include "t602x-nvme.dtsi"
+};
+
+#include "t602x-gpio-pins.dtsi"
+#include "t602x-pmgr.dtsi"
+
+#undef DIE
+#undef DIE_NO
+
+
+&aic {
+	affinities {
+		e-core-pmu-affinity {
+			apple,fiq-index = <AIC_CPU_PMU_E>;
+			cpus = <&cpu_e00 &cpu_e01 &cpu_e02 &cpu_e03>;
+		};
+
+		p-core-pmu-affinity {
+			apple,fiq-index = <AIC_CPU_PMU_P>;
+			cpus = <&cpu_p00 &cpu_p01 &cpu_p02 &cpu_p03
+				&cpu_p10 &cpu_p11 &cpu_p12 &cpu_p13>;
+		};
+	};
+};
+
+&gpu {
+	compatible = "apple,agx-t6021", "apple,agx-g14x";
+
+	apple,avg-power-filter-tc-ms = <300>;
+	apple,avg-power-ki-only = <1.5125>;
+	apple,avg-power-kp = <0.38>;
+	apple,fast-die0-integral-gain = <700.0>;
+	apple,ppm-filter-time-constant-ms = <34>;
+	apple,ppm-ki = <18.0>;
+};
diff --git a/arch/arm64/boot/dts/apple/t6022-j180d.dts b/arch/arm64/boot/dts/apple/t6022-j180d.dts
new file mode 100644
index 00000000000000..f36b07d1e0c763
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t6022-j180d.dts
@@ -0,0 +1,740 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Mac Pro (M2 Ultra, 2023)
+ *
+ * target-type: J180d
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+
+/dts-v1/;
+
+#include "t6022.dtsi"
+#include "t6022-pcie-ge.dtsi"
+#include "t6022-jxxxd.dtsi"
+
+/ {
+	compatible = "apple,j180d", "apple,t6022", "apple,arm-platform";
+	model = "Apple Mac Pro (M2 Ultra, 2023)";
+	aliases {
+		atcphy0 = &atcphy0;
+		atcphy1 = &atcphy1;
+		atcphy2 = &atcphy2;
+		atcphy3 = &atcphy3;
+		atcphy4 = &atcphy0_die1;
+		atcphy5 = &atcphy1_die1;
+		atcphy6 = &atcphy2_die1;
+		atcphy7 = &atcphy3_die1;
+		//bluetooth0 = &bluetooth0; // ADT misses calibration data
+		dcpext0 = &dcpext0;
+		ethernet0 = &ethernet0;
+		ethernet1 = &ethernet1;
+		nvram = &nvram;
+		serial0 = &serial0;
+		//wifi0 = &wifi0; // ADT misses calibration data
+	};
+
+	chosen {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		stdout-path = "serial0";
+
+		framebuffer0: framebuffer@0 {
+			compatible = "apple,simple-framebuffer", "simple-framebuffer";
+			reg = <0 0 0 0>; /* To be filled by loader */
+			/* Format properties will be added by loader */
+			status = "disabled";
+			power-domains = <&ps_dispext0_cpu0_die1>, <&ps_dptx_phy_ps_die1>;
+		};
+	};
+
+	reserved-memory {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+		/* To be filled by loader */
+	};
+
+	memory@10000000000 {
+		device_type = "memory";
+		reg = <0x100 0 0x2 0>; /* To be filled by loader */
+	};
+};
+
+&serial0 {
+	status = "okay";
+};
+
+&lpdptxphy {
+	status = "okay";
+};
+
+&display {
+	iommus = <&dispext0_dart_die1 0>, <&dispext0_dart 0>;
+};
+
+&dispext0_dart {
+	status = "okay";
+};
+
+&dcpext0_dart {
+	status = "okay";
+};
+
+&dcpext0_mbox {
+	status = "okay";
+};
+
+&dcpext0 {
+	status = "okay";
+	apple,connector-type = "HDMI-A";
+
+	/*  HDMI HPD gpio, used as interrupt*/
+	hdmi-hpd-gpios = <&pinctrl_aop 25 GPIO_ACTIVE_HIGH>;
+
+	// shared between dp2hdmi-gpio0 / dp2hdmi-gpio1
+	// hdmi-pwren-gpios = <&smc_gpio 23 GPIO_ACTIVE_HIGH>;
+
+	phys = <&lpdptxphy>;
+	phy-names = "dp-phy";
+	apple,dptx-phy = <4>;
+	apple,dptx-die = <0>;
+};
+
+&dpaudio1 {
+	status = "okay";
+};
+
+/* USB Type C Rear */
+&i2c0 {
+	hpm2: usb-pd@3b {
+		compatible = "apple,cd321x";
+		reg = <0x3b>;
+		interrupt-parent = <&pinctrl_ap>;
+		interrupts = <44 IRQ_TYPE_LEVEL_LOW>;
+		interrupt-names = "irq";
+
+		typec2: connector {
+			compatible = "usb-c-connector";
+			label = "USB-C Back 1";
+			power-role = "dual";
+			data-role = "dual";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				port@0 {
+					reg = <0>;
+					typec2_con_hs: endpoint {
+						remote-endpoint = <&typec2_usb_hs>;
+					};
+				};
+				port@1 {
+					reg = <1>;
+					typec2_con_ss: endpoint {
+						remote-endpoint = <&typec2_usb_ss>;
+					};
+				};
+			};
+		};
+	};
+
+	hpm3: usb-pd@3c {
+		compatible = "apple,cd321x";
+		reg = <0x3c>;
+		interrupt-parent = <&pinctrl_ap>;
+		interrupts = <44 IRQ_TYPE_LEVEL_LOW>;
+		interrupt-names = "irq";
+
+		typec3: connector {
+			compatible = "usb-c-connector";
+			label = "USB-C Back 2";
+			power-role = "dual";
+			data-role = "dual";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				port@0 {
+					reg = <0>;
+					typec3_con_hs: endpoint {
+						remote-endpoint = <&typec3_usb_hs>;
+					};
+				};
+				port@1 {
+					reg = <1>;
+					typec3_con_ss: endpoint {
+						remote-endpoint = <&typec3_usb_ss>;
+					};
+				};
+			};
+		};
+	};
+
+	/* hpm4 included from t6022-jxxxd.dtsi */
+
+	/* hpm5 included from t6022-jxxxd.dtsi */
+
+	hpm6: usb-pd@3d {
+		compatible = "apple,cd321x";
+		reg = <0x3d>;
+		interrupt-parent = <&pinctrl_ap>;
+		interrupts = <44 IRQ_TYPE_LEVEL_LOW>;
+		interrupt-names = "irq";
+
+		typec6: connector {
+			compatible = "usb-c-connector";
+			label = "USB-C Back 5";
+			power-role = "dual";
+			data-role = "dual";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				port@0 {
+					reg = <0>;
+					typec6_con_hs: endpoint {
+						remote-endpoint = <&typec6_usb_hs>;
+					};
+				};
+				port@1 {
+					reg = <1>;
+					typec6_con_ss: endpoint {
+						remote-endpoint = <&typec6_usb_ss>;
+					};
+				};
+			};
+		};
+	};
+
+	hpm7: usb-pd@3e {
+		compatible = "apple,cd321x";
+		reg = <0x3e>;
+		interrupt-parent = <&pinctrl_ap>;
+		interrupts = <44 IRQ_TYPE_LEVEL_LOW>;
+		interrupt-names = "irq";
+
+		typec7: connector {
+			compatible = "usb-c-connector";
+			label = "USB-C Back 6";
+			power-role = "dual";
+			data-role = "dual";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				port@0 {
+					reg = <0>;
+					typec7_con_hs: endpoint {
+						remote-endpoint = <&typec7_usb_hs>;
+					};
+				};
+				port@1 {
+					reg = <1>;
+					typec7_con_ss: endpoint {
+						remote-endpoint = <&typec7_usb_ss>;
+					};
+				};
+			};
+		};
+	};
+};
+
+&hpm4 {
+	label = "USB-C Back 3";
+};
+
+&hpm5 {
+	label = "USB-C Back 4";
+};
+
+/* USB Type C Front */
+&i2c3 {
+	status = "okay";
+
+	hpm0: usb-pd@38 {
+		compatible = "apple,cd321x";
+		reg = <0x38>;
+		interrupt-parent = <&pinctrl_ap>;
+		interrupts = <60 IRQ_TYPE_LEVEL_LOW>;
+		interrupt-names = "irq";
+
+		typec0: connector {
+			compatible = "usb-c-connector";
+			label = "USB-C Top Right";
+			power-role = "dual";
+			data-role = "dual";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				port@0 {
+					reg = <0>;
+					typec0_con_hs: endpoint {
+						remote-endpoint = <&typec0_usb_hs>;
+					};
+				};
+				port@1 {
+					reg = <1>;
+					typec0_con_ss: endpoint {
+						remote-endpoint = <&typec0_usb_ss>;
+					};
+				};
+			};
+		};
+	};
+
+	hpm1: usb-pd@3f {
+		compatible = "apple,cd321x";
+		reg = <0x3f>;
+		interrupt-parent = <&pinctrl_ap>;
+		interrupts = <60 IRQ_TYPE_LEVEL_LOW>;
+		interrupt-names = "irq";
+
+		typec1: connector {
+			compatible = "usb-c-connector";
+			label = "USB-C Top Left";
+			power-role = "dual";
+			data-role = "dual";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				port@0 {
+					reg = <0>;
+					typec1_con_hs: endpoint {
+						remote-endpoint = <&typec1_usb_hs>;
+					};
+				};
+				port@1 {
+					reg = <1>;
+					typec1_con_ss: endpoint {
+						remote-endpoint = <&typec1_usb_ss>;
+					};
+				};
+			};
+		};
+	};
+};
+
+/* USB controllers */
+&dwc3_0 {
+	port {
+		typec0_usb_hs: endpoint {
+			remote-endpoint = <&typec0_con_hs>;
+		};
+	};
+};
+
+&dwc3_1 {
+	port {
+		typec1_usb_hs: endpoint {
+			remote-endpoint = <&typec1_con_hs>;
+		};
+	};
+};
+
+&dwc3_2 {
+	port {
+		typec2_usb_hs: endpoint {
+			remote-endpoint = <&typec2_con_hs>;
+		};
+	};
+};
+
+&dwc3_3 {
+	port {
+		typec3_usb_hs: endpoint {
+			remote-endpoint = <&typec3_con_hs>;
+		};
+	};
+};
+
+&dwc3_2_die1 {
+	port {
+		typec6_usb_hs: endpoint {
+			remote-endpoint = <&typec6_con_hs>;
+		};
+	};
+};
+
+&dwc3_3_die1 {
+	port {
+		typec7_usb_hs: endpoint {
+			remote-endpoint = <&typec7_con_hs>;
+		};
+	};
+};
+
+/* Type-C PHYs */
+&atcphy0 {
+	port {
+		typec0_usb_ss: endpoint {
+			remote-endpoint = <&typec0_con_ss>;
+		};
+	};
+};
+
+&atcphy1 {
+	port {
+		typec1_usb_ss: endpoint {
+			remote-endpoint = <&typec1_con_ss>;
+		};
+	};
+};
+
+&atcphy2 {
+	port {
+		typec2_usb_ss: endpoint {
+			remote-endpoint = <&typec2_con_ss>;
+		};
+	};
+};
+
+&atcphy3 {
+	port {
+		typec3_usb_ss: endpoint {
+			remote-endpoint = <&typec3_con_ss>;
+		};
+	};
+};
+
+&atcphy2_die1 {
+	port {
+		typec6_usb_ss: endpoint {
+			remote-endpoint = <&typec6_con_ss>;
+		};
+	};
+};
+
+&atcphy3_die1 {
+	port {
+		typec7_usb_ss: endpoint {
+			remote-endpoint = <&typec7_con_ss>;
+		};
+	};
+};
+
+/* Audio */
+&i2c1 {
+	status = "okay";
+
+	speaker_tweeter: codec@38 {
+		compatible = "ti,sn012776", "ti,tas2764";
+		reg = <0x38>;
+		shutdown-gpios = <&pinctrl_ap 57 GPIO_ACTIVE_HIGH>;
+		#sound-dai-cells = <0>;
+		sound-name-prefix = "Tweeter";
+		interrupts-extended = <&pinctrl_ap 58 IRQ_TYPE_LEVEL_LOW>;
+		ti,imon-slot-no = <4>;
+		ti,vmon-slot-no = <6>;
+	};
+
+	speaker_woofer: codec@39 {
+		compatible = "ti,sn012776", "ti,tas2764";
+		reg = <0x39>;
+		shutdown-gpios = <&pinctrl_ap 57 GPIO_ACTIVE_HIGH>;
+		#sound-dai-cells = <0>;
+		sound-name-prefix = "Woofer";
+		interrupts-extended = <&pinctrl_ap 58 IRQ_TYPE_LEVEL_LOW>;
+		ti,imon-slot-no = <0>;
+		ti,vmon-slot-no = <2>;
+	};
+};
+
+&i2c2 {
+	status = "okay";
+
+	jack_codec: codec@4b {
+		compatible = "cirrus,cs42l84";
+		reg = <0x4b>;
+		reset-gpios = <&pinctrl_nub 8 GPIO_ACTIVE_HIGH>;
+		#sound-dai-cells = <0>;
+		interrupts-extended = <&pinctrl_ap 59 IRQ_TYPE_LEVEL_LOW>;
+		sound-name-prefix = "Jack";
+	};
+};
+
+&nco_clkref {
+	clock-frequency = <1068000000>;
+};
+
+/ {
+	sound: sound {
+		compatible = "apple,j180-macaudio", "apple,macaudio";
+		model = "Mac Pro J180";
+
+		dai-link@0 {
+			link-name = "Speakers";
+			/*
+			* DANGER ZONE: You can blow your speakers!
+			*
+			* The drivers are not ready, and unless you are careful
+			* to attenuate the audio stream, you run the risk of
+			* blowing your speakers.
+			*/
+			status = "disabled";
+			cpu {
+				sound-dai = <&mca 0>;
+			};
+			codec {
+				sound-dai = <&speaker_woofer>, <&speaker_tweeter>;
+			};
+		};
+
+		dai-link@1 {
+			link-name = "Headphone Jack";
+
+			cpu {
+				sound-dai = <&mca 2>;
+			};
+			codec {
+				sound-dai = <&jack_codec>;
+			};
+		};
+	};
+};
+
+/* PCIe devices */
+&port_ge00_die1 {
+		bus-range = <0x01 0x09>;
+
+	pci@0,0 {
+		device_type = "pci";
+		reg = <0x10000 0x00 0x00 0x00 0x00>;
+		bus-range = <0x02 0x09>;
+
+		#address-cells = <3>;
+		#size-cells = <2>;
+		ranges;
+
+		interrupt-controller;
+		#interrupt-cells = <1>;
+
+		interrupt-map-mask = <0xffff00 0x00 0x00 0x07>;
+		interrupt-map = <0x20000 0x00 0x00 0x01 &port_ge00_die1 0x00 0x00 0x00 0x00>,
+				<0x20000 0x00 0x00 0x02 &port_ge00_die1 0x00 0x00 0x00 0x01>,
+				<0x20000 0x00 0x00 0x03 &port_ge00_die1 0x00 0x00 0x00 0x02>,
+				<0x20000 0x00 0x00 0x04 &port_ge00_die1 0x00 0x00 0x00 0x03>,
+				<0x20800 0x00 0x00 0x01 &port_ge00_die1 0x00 0x00 0x00 0x01>,
+				<0x20800 0x00 0x00 0x02 &port_ge00_die1 0x00 0x00 0x00 0x02>,
+				<0x20800 0x00 0x00 0x03 &port_ge00_die1 0x00 0x00 0x00 0x03>,
+				<0x20800 0x00 0x00 0x04 &port_ge00_die1 0x00 0x00 0x00 0x00>,
+				<0x21000 0x00 0x00 0x01 &port_ge00_die1 0x00 0x00 0x00 0x02>,
+				<0x21000 0x00 0x00 0x02 &port_ge00_die1 0x00 0x00 0x00 0x03>,
+				<0x21000 0x00 0x00 0x03 &port_ge00_die1 0x00 0x00 0x00 0x00>,
+				<0x21000 0x00 0x00 0x04 &port_ge00_die1 0x00 0x00 0x00 0x01>,
+				<0x21800 0x00 0x00 0x01 &port_ge00_die1 0x00 0x00 0x00 0x03>,
+				<0x21800 0x00 0x00 0x02 &port_ge00_die1 0x00 0x00 0x00 0x00>,
+				<0x21800 0x00 0x00 0x03 &port_ge00_die1 0x00 0x00 0x00 0x01>,
+				<0x21800 0x00 0x00 0x04 &port_ge00_die1 0x00 0x00 0x00 0x02>,
+				<0x22000 0x00 0x00 0x01 &port_ge00_die1 0x00 0x00 0x00 0x00>,
+				<0x22000 0x00 0x00 0x02 &port_ge00_die1 0x00 0x00 0x00 0x01>,
+				<0x22000 0x00 0x00 0x03 &port_ge00_die1 0x00 0x00 0x00 0x02>,
+				<0x22000 0x00 0x00 0x04 &port_ge00_die1 0x00 0x00 0x00 0x03>,
+				<0x22800 0x00 0x00 0x01 &port_ge00_die1 0x00 0x00 0x00 0x01>,
+				<0x22800 0x00 0x00 0x02 &port_ge00_die1 0x00 0x00 0x00 0x02>,
+				<0x22800 0x00 0x00 0x03 &port_ge00_die1 0x00 0x00 0x00 0x03>,
+				<0x22800 0x00 0x00 0x04 &port_ge00_die1 0x00 0x00 0x00 0x00>,
+				<0x23000 0x00 0x00 0x01 &port_ge00_die1 0x00 0x00 0x00 0x02>,
+				<0x23000 0x00 0x00 0x02 &port_ge00_die1 0x00 0x00 0x00 0x03>,
+				<0x23000 0x00 0x00 0x03 &port_ge00_die1 0x00 0x00 0x00 0x00>,
+				<0x23000 0x00 0x00 0x04 &port_ge00_die1 0x00 0x00 0x00 0x01>;
+
+		/* pci-usba-dsp, internal USB-A port */
+		pci@0,0 {
+			device_type = "pci";
+			reg = <0x20000 0x00 0x00 0x00 0x00>;
+			bus-range = <0x03 0x03>;
+
+			#address-cells = <3>;
+			#size-cells = <2>;
+			ranges;
+
+			interrupt-controller;
+			#interrupt-cells = <1>;
+
+			interrupt-map-mask = <0 0 0 7>;
+			interrupt-map = <0x30000 0x00 0x00 0x02 &port_ge00_die1 0x00 0x00 0x00 0x00>,
+					<0x30000 0x00 0x00 0x03 &port_ge00_die1 0x00 0x00 0x00 0x01>,
+					<0x30000 0x00 0x00 0x04 &port_ge00_die1 0x00 0x00 0x00 0x02>;
+
+			/* not functional yet */
+			reset-gpios = <&pinctrl_ap 6 GPIO_ACTIVE_LOW>;
+		};
+
+		/* pci-sata-dsp, internal AHCI controller */
+		pci@1,0 {
+			device_type = "pci";
+			reg = <0x20800 0x00 0x00 0x00 0x00>;
+			bus-range = <0x04 0x04>;
+
+			#address-cells = <3>;
+			#size-cells = <2>;
+			ranges;
+
+			interrupt-controller;
+			#interrupt-cells = <1>;
+
+			interrupt-map-mask = <0 0 0 7>;
+			interrupt-map = <0x40000 0x00 0x00 0x01 &port_ge00_die1 0x00 0x00 0x00 0x00>,
+					<0x40000 0x00 0x00 0x02 &port_ge00_die1 0x00 0x00 0x00 0x01>,
+					<0x40000 0x00 0x00 0x03 &port_ge00_die1 0x00 0x00 0x00 0x02>;
+		};
+
+		/* pci-bio-dsp, I/O board USB-A ports */
+		pci@2,0 {
+			device_type = "pci";
+			reg = <0x21000 0x00 0x00 0x00 0x00>;
+			bus-range = <0x05 0x05>;
+
+			#address-cells = <3>;
+			#size-cells = <2>;
+			ranges;
+
+			interrupt-controller;
+			#interrupt-cells = <1>;
+
+			interrupt-map-mask = <0 0 0 7>;
+			interrupt-map = <0x50000 0x00 0x00 0x01 &port_ge00_die1 0x00 0x00 0x00 0x01>,
+					<0x50000 0x00 0x00 0x02 &port_ge00_die1 0x00 0x00 0x00 0x02>,
+					<0x50000 0x00 0x00 0x04 &port_ge00_die1 0x00 0x00 0x00 0x00>;
+
+			/* not functional yet */
+			reset-gpios = <&pinctrl_ap 7 GPIO_ACTIVE_LOW>;
+		};
+
+		/* pci-lan-dsp, Qtion AQC113 10G etherner controller (0) */
+		pci@3,0 {
+			device_type = "pci";
+			reg = <0x21800 0x00 0x00 0x00 0x00>;
+			bus-range = <0x06 0x06>;
+
+			#address-cells = <3>;
+			#size-cells = <2>;
+			ranges;
+
+			interrupt-controller;
+			#interrupt-cells = <1>;
+
+			interrupt-map-mask = <0 0 0 7>;
+			interrupt-map = <0x60000 0x00 0x00 0x01 &port_ge00_die1 0x00 0x00 0x00 0x02>,
+					<0x60000 0x00 0x00 0x03 &port_ge00_die1 0x00 0x00 0x00 0x00>,
+					<0x60000 0x00 0x00 0x04 &port_ge00_die1 0x00 0x00 0x00 0x01>;
+
+			ethernet0: ethernet@0,0 {
+				reg = <0x60000 0x0 0x0 0x0 0x0>;
+				/* To be filled by the loader */
+				local-mac-address = [00 10 18 00 00 00];
+			};
+		};
+
+		/* pci-lan-b-dsp, Qtion AQC113 10G etherner controller (1) */
+		pci@4,0 {
+			device_type = "pci";
+			reg = <0x22000 0x00 0x00 0x00 0x00>;
+			bus-range = <0x07 0x07>;
+
+			#address-cells = <3>;
+			#size-cells = <2>;
+			ranges;
+
+			interrupt-controller;
+			#interrupt-cells = <1>;
+
+			interrupt-map-mask = <0 0 0 7>;
+			interrupt-map = <0x70000 0x00 0x00 0x02 &port_ge00_die1 0x00 0x00 0x00 0x00>,
+					<0x70000 0x00 0x00 0x03 &port_ge00_die1 0x00 0x00 0x00 0x01>,
+					<0x70000 0x00 0x00 0x04 &port_ge00_die1 0x00 0x00 0x00 0x02>;
+
+			ethernet1: ethernet@0,0 {
+				reg = <0x70000 0x0 0x0 0x0 0x0>;
+				/* To be filled by the loader */
+				local-mac-address = [00 10 18 00 00 00];
+			};
+		};
+
+		/* pci-wifibt-dsp, Broadcom BCM4388 Wlan/BT */
+		pci@5,0 {
+			device_type = "pci";
+			reg = <0x22800 0x00 0x00 0x00 0x00>;
+			bus-range = <0x08 0x08>;
+
+			#address-cells = <3>;
+			#size-cells = <2>;
+			ranges;
+
+			interrupt-controller;
+			#interrupt-cells = <1>;
+
+			interrupt-map-mask = <0 0 0 7>;
+			interrupt-map = <0x80000 0x00 0x00 0x01 &port_ge00_die1 0x00 0x00 0x00 0x00>,
+					<0x80000 0x00 0x00 0x02 &port_ge00_die1 0x00 0x00 0x00 0x01>,
+					<0x80000 0x00 0x00 0x03 &port_ge00_die1 0x00 0x00 0x00 0x02>,
+					<0x80100 0x00 0x00 0x01 &port_ge00_die1 0x00 0x00 0x00 0x00>,
+					<0x80100 0x00 0x00 0x02 &port_ge00_die1 0x00 0x00 0x00 0x01>,
+					<0x80100 0x00 0x00 0x03 &port_ge00_die1 0x00 0x00 0x00 0x02>;
+
+			/* not functional yet */
+			reset-gpios = <&pinctrl_ap 4 GPIO_ACTIVE_LOW>;
+			pwren-gpios = <&smc_gpio 13 GPIO_ACTIVE_HIGH>;
+
+			wifi0: wifi@0,0 {
+				reg = <0x80000 0x0 0x0 0x0 0x0>;
+				compatible = "pci14e4,4433";
+				brcm,board-type = "apple,sumatra";
+				apple,antenna-sku = "XX";
+				/* To be filled by the loader */
+				local-mac-address = [00 10 18 00 00 10];
+			};
+
+			bluetooth0: network@0,1 {
+				compatible = "pci14e4,5f71";
+				brcm,board-type = "apple,sumatra";
+				// reg = <0x80100 0x0 0x0 0x0 0x0>;
+				/* To be filled by the loader */
+				local-bd-address = [00 00 00 00 00 00];
+			};
+		};
+
+		/* pci-slot6-dsp, PCIe slot6 */
+		pci@6,0 {
+			device_type = "pci";
+			reg = <0x23000 0x00 0x00 0x00 0x00>;
+			bus-range = <0x09 0x09>;
+
+			#address-cells = <3>;
+			#size-cells = <2>;
+			ranges;
+
+			interrupt-controller;
+			#interrupt-cells = <1>;
+
+			interrupt-map-mask = <0 0 0 7>;
+		};
+	};
+};
+
+&pcie_ge {
+	status = "ok";
+};
+
+&pcie_ge_dart {
+	status = "ok";
+};
+
+&pcie_ge_die1 {
+	status = "ok";
+};
+
+&pcie_ge_dart_die1 {
+	status = "ok";
+};
+
+// delete unused PCIe nodes
+/delete-node/ &pcie0;
+/delete-node/ &pcie0_dart_0;
+
+#include "spi1-nvram.dtsi"
diff --git a/arch/arm64/boot/dts/apple/t6022-j475d.dts b/arch/arm64/boot/dts/apple/t6022-j475d.dts
new file mode 100644
index 00000000000000..5a60e84fab101c
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t6022-j475d.dts
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Mac Studio (M2 Ultra, 2023)
+ *
+ * target-type: J475d
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+
+/dts-v1/;
+
+#define NO_DCP
+
+#include "t6022.dtsi"
+#include "t602x-j474-j475.dtsi"
+#include "t6022-jxxxd.dtsi"
+
+/ {
+	compatible = "apple,j475d", "apple,t6022", "apple,arm-platform";
+	model = "Apple Mac Studio (M2 Ultra, 2023)";
+	aliases {
+		atcphy4 = &atcphy0_die1;
+		atcphy5 = &atcphy1_die1;
+		/delete-property/ dcp;
+		/delete-property/ sio;
+	};
+};
+
+&sio {
+        status = "disabled";
+};
+
+&framebuffer0 {
+	power-domains = <&ps_dispext0_cpu0_die1>, <&ps_dptx_phy_ps_die1>;
+};
+
+&dcpext0_die1 {
+	// J180 misses "function-dp2hdmi_pwr_en"
+	dp2hdmi-pwren-gpios = <&smc_gpio 25 GPIO_ACTIVE_HIGH>;
+};
+
+&typec4 {
+	label = "USB-C Front Right";
+};
+
+&typec5 {
+	label = "USB-C Front Left";
+};
+
+/* delete unused USB nodes on die 1 */
+
+/delete-node/ &dwc3_2_dart_0_die1;
+/delete-node/ &dwc3_2_dart_1_die1;
+/delete-node/ &dwc3_2_die1;
+/delete-node/ &atcphy2_die1;
+/delete-node/ &atcphy2_xbar_die1;
+
+/delete-node/ &dwc3_3_dart_0_die1;
+/delete-node/ &dwc3_3_dart_1_die1;
+/delete-node/ &dwc3_3_die1;
+/delete-node/ &atcphy3_die1;
+/delete-node/ &atcphy3_xbar_die1;
+
+
+/* delete unused always-on power-domains on die 1 */
+
+/delete-node/ &ps_atc2_usb_aon_die1;
+/delete-node/ &ps_atc2_usb_die1;
+
+/delete-node/ &ps_atc3_usb_aon_die1;
+/delete-node/ &ps_atc3_usb_die1;
+
+&wifi0 {
+	compatible = "pci14e4,4434";
+	brcm,board-type = "apple,canary";
+};
+
+&bluetooth0 {
+	compatible = "pci14e4,5f72";
+	brcm,board-type = "apple,canary";
+};
+
+&sound {
+	compatible = "apple,j475-macaudio", "apple,j375-macaudio", "apple,macaudio";
+	model = "Mac Studio J475";
+};
+
+#include "hwmon-fan-dual.dtsi"
diff --git a/arch/arm64/boot/dts/apple/t6022-jxxxd.dtsi b/arch/arm64/boot/dts/apple/t6022-jxxxd.dtsi
new file mode 100644
index 00000000000000..f8d2fcd485d1fc
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t6022-jxxxd.dtsi
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Mac Pro (M2 Ultra, 2023) and Mac Studio (M2 Ultra, 2023)
+ *
+ * This file contains the parts common to J180 and J475 devices with t6022.
+ *
+ * target-type: J180d / J475d
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+
+/ {
+	aliases {
+		dcpext4 = &dcpext0_die1;
+		disp0 = &display;
+		sio1 = &sio_die1;
+	};
+};
+
+&lpdptxphy_die1 {
+	status = "okay";
+};
+
+&display {
+	iommus = <&dispext0_dart_die1 0>;
+};
+
+&dispext0_dart_die1 {
+	status = "okay";
+};
+
+&dcpext0_dart_die1 {
+	status = "okay";
+};
+
+&dcpext0_mbox_die1 {
+	status = "okay";
+};
+
+&dcpext0_die1 {
+	status = "okay";
+	apple,connector-type = "HDMI-A";
+
+	/*  HDMI HPD gpio, used as interrupt*/
+	hdmi-hpd-gpios = <&pinctrl_aop 41 GPIO_ACTIVE_HIGH>;
+
+	hdmi-pwren-gpios = <&smc_gpio 23 GPIO_ACTIVE_HIGH>;
+	// J180 misses "function-dp2hdmi_pwr_en"
+	// dp2hdmi-pwren-gpios = <&smc_gpio 25 GPIO_ACTIVE_HIGH>;
+
+	phys = <&lpdptxphy_die1>;
+	phy-names = "dp-phy";
+	apple,dptx-phy = <4>;
+	apple,dptx-die = <1>;
+};
+
+&dpaudio1_die1 {
+	status = "okay";
+};
+
+/* delete missing dcp0/disp0 */
+
+/delete-node/ &disp0_dart;
+/delete-node/ &dcp_dart;
+/delete-node/ &dcp_mbox;
+/delete-node/ &dcp;
+/delete-node/ &dpaudio0;
+
+/* delete unused always-on power-domains */
+/delete-node/ &ps_disp0_cpu0;
+/delete-node/ &ps_disp0_fe;
+
+/delete-node/ &ps_disp0_cpu0_die1;
+/delete-node/ &ps_disp0_fe_die1;
+
+
+/* USB Type C */
+&i2c0 {
+	/* front-right */
+	hpm4: usb-pd@39 {
+		compatible = "apple,cd321x";
+		reg = <0x39>;
+		interrupt-parent = <&pinctrl_ap>;
+		interrupts = <44 IRQ_TYPE_LEVEL_LOW>;
+		interrupt-names = "irq";
+
+		typec4: connector {
+			compatible = "usb-c-connector";
+			power-role = "dual";
+			data-role = "dual";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				port@0 {
+					reg = <0>;
+					typec4_con_hs: endpoint {
+						remote-endpoint = <&typec4_usb_hs>;
+					};
+				};
+				port@1 {
+					reg = <1>;
+					typec4_con_ss: endpoint {
+						remote-endpoint = <&typec4_usb_ss>;
+					};
+				};
+			};
+		};
+	};
+
+	/* front-left */
+	hpm5: usb-pd@3a {
+		compatible = "apple,cd321x";
+		reg = <0x3a>;
+		interrupt-parent = <&pinctrl_ap>;
+		interrupts = <44 IRQ_TYPE_LEVEL_LOW>;
+		interrupt-names = "irq";
+
+		typec5: connector {
+			compatible = "usb-c-connector";
+			power-role = "dual";
+			data-role = "dual";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				port@0 {
+					reg = <0>;
+					typec5_con_hs: endpoint {
+						remote-endpoint = <&typec5_usb_hs>;
+					};
+				};
+				port@1 {
+					reg = <1>;
+					typec5_con_ss: endpoint {
+						remote-endpoint = <&typec5_usb_ss>;
+					};
+				};
+			};
+		};
+	};
+};
+
+/* USB controllers on die 1 */
+&dwc3_0_die1 {
+	port {
+		typec4_usb_hs: endpoint {
+			remote-endpoint = <&typec4_con_hs>;
+		};
+	};
+};
+
+&dwc3_1_die1 {
+	port {
+		typec5_usb_hs: endpoint {
+			remote-endpoint = <&typec5_con_hs>;
+		};
+	};
+};
+
+/* Type-C PHYs */
+&atcphy0_die1 {
+	port {
+		typec4_usb_ss: endpoint {
+			remote-endpoint = <&typec4_con_ss>;
+		};
+	};
+};
+
+&atcphy1_die1 {
+	port {
+		typec5_usb_ss: endpoint {
+			remote-endpoint = <&typec5_con_ss>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/apple/t6022-pcie-ge.dtsi b/arch/arm64/boot/dts/apple/t6022-pcie-ge.dtsi
new file mode 100644
index 00000000000000..f78c483c29133f
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t6022-pcie-ge.dtsi
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Include PCIe-GE nodes presen on both dies of T6022 (M2 Ultra) in the
+ * Mac Pro (2023).
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+
+#define DIE
+#define DIE_NO 0
+
+&die0 {
+	#include "t602x-pcie-ge.dtsi"
+};
+
+#undef DIE
+#undef DIE_NO
+
+#define DIE _die1
+#define DIE_NO 1
+
+&die1 {
+	#include "t602x-pcie-ge.dtsi"
+};
+
+#undef DIE
+#undef DIE_NO
diff --git a/arch/arm64/boot/dts/apple/t6022.dtsi b/arch/arm64/boot/dts/apple/t6022.dtsi
new file mode 100644
index 00000000000000..9f94df45ed7659
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t6022.dtsi
@@ -0,0 +1,381 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Apple T6022 "M2 Ultra" SoC
+ *
+ * Other names: H14J, "Rhodes 2C"
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/apple-aic.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/pinctrl/apple.h>
+#include <dt-bindings/phy/phy.h>
+#include <dt-bindings/spmi/spmi.h>
+
+#include "multi-die-cpp.h"
+
+#ifndef GPU_REPEAT
+# define GPU_REPEAT(x) <x x x x x x x x>
+#endif
+#ifndef GPU_DIE_REPEAT
+# define GPU_DIE_REPEAT(x) <x x>
+#endif
+
+#include "t602x-common.dtsi"
+
+/ {
+	compatible = "apple,t6022", "apple,arm-platform";
+
+	#address-cells = <2>;
+	#size-cells = <2>;
+
+	cpus {
+		cpu-map {
+			cluster3 {
+				core0 {
+					cpu = <&cpu_e10>;
+				};
+				core1 {
+					cpu = <&cpu_e11>;
+				};
+				core2 {
+					cpu = <&cpu_e12>;
+				};
+				core3 {
+					cpu = <&cpu_e13>;
+				};
+			};
+
+			cluster4 {
+				core0 {
+					cpu = <&cpu_p20>;
+				};
+				core1 {
+					cpu = <&cpu_p21>;
+				};
+				core2 {
+					cpu = <&cpu_p22>;
+				};
+				core3 {
+					cpu = <&cpu_p23>;
+				};
+			};
+
+			cluster5 {
+				core0 {
+					cpu = <&cpu_p30>;
+				};
+				core1 {
+					cpu = <&cpu_p31>;
+				};
+				core2 {
+					cpu = <&cpu_p32>;
+				};
+				core3 {
+					cpu = <&cpu_p33>;
+				};
+			};
+		};
+
+		cpu_e10: cpu@800 {
+			compatible = "apple,blizzard";
+			device_type = "cpu";
+			reg = <0x0 0x800>;
+			enable-method = "spin-table";
+			cpu-release-addr = <0 0>; /* to be filled by loader */
+			next-level-cache = <&l2_cache_3>;
+			i-cache-size  = <0x20000>;
+			d-cache-size = <0x10000>;
+			operating-points-v2 = <&blizzard_opp>;
+			capacity-dmips-mhz = <756>;
+			performance-domains = <&cpufreq_e_die1>;
+		};
+
+		cpu_e11: cpu@801 {
+			compatible = "apple,blizzard";
+			device_type = "cpu";
+			reg = <0x0 0x801>;
+			enable-method = "spin-table";
+			cpu-release-addr = <0 0>; /* to be filled by loader */
+			next-level-cache = <&l2_cache_3>;
+			i-cache-size  = <0x20000>;
+			d-cache-size = <0x10000>;
+			operating-points-v2 = <&blizzard_opp>;
+			capacity-dmips-mhz = <756>;
+			performance-domains = <&cpufreq_e_die1>;
+		};
+
+		cpu_e12: cpu@802 {
+			compatible = "apple,blizzard";
+			device_type = "cpu";
+			reg = <0x0 0x802>;
+			enable-method = "spin-table";
+			cpu-release-addr = <0 0>; /* to be filled by loader */
+			next-level-cache = <&l2_cache_3>;
+			i-cache-size  = <0x20000>;
+			d-cache-size = <0x10000>;
+			operating-points-v2 = <&blizzard_opp>;
+			capacity-dmips-mhz = <756>;
+			performance-domains = <&cpufreq_e_die1>;
+		};
+
+		cpu_e13: cpu@803 {
+			compatible = "apple,blizzard";
+			device_type = "cpu";
+			reg = <0x0 0x803>;
+			enable-method = "spin-table";
+			cpu-release-addr = <0 0>; /* to be filled by loader */
+			next-level-cache = <&l2_cache_3>;
+			i-cache-size  = <0x20000>;
+			d-cache-size = <0x10000>;
+			operating-points-v2 = <&blizzard_opp>;
+			capacity-dmips-mhz = <756>;
+			performance-domains = <&cpufreq_e_die1>;
+		};
+
+		cpu_p20: cpu@10900 {
+			compatible = "apple,avalanche";
+			device_type = "cpu";
+			reg = <0x0 0x10900>;
+			enable-method = "spin-table";
+			cpu-release-addr = <0 0>; /* To be filled by loader */
+			next-level-cache = <&l2_cache_4>;
+			i-cache-size = <0x30000>;
+			d-cache-size = <0x20000>;
+			operating-points-v2 = <&avalanche_opp>;
+			capacity-dmips-mhz = <1024>;
+			performance-domains = <&cpufreq_p0_die1>;
+		};
+
+		cpu_p21: cpu@10901 {
+			compatible = "apple,avalanche";
+			device_type = "cpu";
+			reg = <0x0 0x10901>;
+			enable-method = "spin-table";
+			cpu-release-addr = <0 0>; /* To be filled by loader */
+			next-level-cache = <&l2_cache_4>;
+			i-cache-size = <0x30000>;
+			d-cache-size = <0x20000>;
+			operating-points-v2 = <&avalanche_opp>;
+			capacity-dmips-mhz = <1024>;
+			performance-domains = <&cpufreq_p0_die1>;
+		};
+
+		cpu_p22: cpu@10902 {
+			compatible = "apple,avalanche";
+			device_type = "cpu";
+			reg = <0x0 0x10902>;
+			enable-method = "spin-table";
+			cpu-release-addr = <0 0>; /* To be filled by loader */
+			next-level-cache = <&l2_cache_4>;
+			i-cache-size = <0x30000>;
+			d-cache-size = <0x20000>;
+			operating-points-v2 = <&avalanche_opp>;
+			capacity-dmips-mhz = <1024>;
+			performance-domains = <&cpufreq_p0_die1>;
+		};
+
+		cpu_p23: cpu@10903 {
+			compatible = "apple,avalanche";
+			device_type = "cpu";
+			reg = <0x0 0x10903>;
+			enable-method = "spin-table";
+			cpu-release-addr = <0 0>; /* To be filled by loader */
+			next-level-cache = <&l2_cache_4>;
+			i-cache-size = <0x30000>;
+			d-cache-size = <0x20000>;
+			operating-points-v2 = <&avalanche_opp>;
+			capacity-dmips-mhz = <1024>;
+			performance-domains = <&cpufreq_p0_die1>;
+		};
+
+		cpu_p30: cpu@10a00 {
+			compatible = "apple,avalanche";
+			device_type = "cpu";
+			reg = <0x0 0x10a00>;
+			enable-method = "spin-table";
+			cpu-release-addr = <0 0>; /* To be filled by loader */
+			next-level-cache = <&l2_cache_5>;
+			i-cache-size = <0x30000>;
+			d-cache-size = <0x20000>;
+			operating-points-v2 = <&avalanche_opp>;
+			capacity-dmips-mhz = <1024>;
+			performance-domains = <&cpufreq_p1_die1>;
+		};
+
+		cpu_p31: cpu@10a01 {
+			compatible = "apple,avalanche";
+			device_type = "cpu";
+			reg = <0x0 0x10a01>;
+			enable-method = "spin-table";
+			cpu-release-addr = <0 0>; /* To be filled by loader */
+			next-level-cache = <&l2_cache_5>;
+			i-cache-size = <0x30000>;
+			d-cache-size = <0x20000>;
+			operating-points-v2 = <&avalanche_opp>;
+			capacity-dmips-mhz = <1024>;
+			performance-domains = <&cpufreq_p1_die1>;
+		};
+
+		cpu_p32: cpu@10a02 {
+			compatible = "apple,avalanche";
+			device_type = "cpu";
+			reg = <0x0 0x10a02>;
+			enable-method = "spin-table";
+			cpu-release-addr = <0 0>; /* To be filled by loader */
+			next-level-cache = <&l2_cache_5>;
+			i-cache-size = <0x30000>;
+			d-cache-size = <0x20000>;
+			operating-points-v2 = <&avalanche_opp>;
+			capacity-dmips-mhz = <1024>;
+			performance-domains = <&cpufreq_p1_die1>;
+		};
+
+		cpu_p33: cpu@10a03 {
+			compatible = "apple,avalanche";
+			device_type = "cpu";
+			reg = <0x0 0x10a03>;
+			enable-method = "spin-table";
+			cpu-release-addr = <0 0>; /* To be filled by loader */
+			next-level-cache = <&l2_cache_5>;
+			i-cache-size = <0x30000>;
+			d-cache-size = <0x20000>;
+			operating-points-v2 = <&avalanche_opp>;
+			capacity-dmips-mhz = <1024>;
+			performance-domains = <&cpufreq_p1_die1>;
+		};
+
+		l2_cache_3: l2-cache-3 {
+			compatible = "cache";
+			cache-level = <2>;
+			cache-unified;
+			cache-size = <0x400000>;
+		};
+
+		l2_cache_4: l2-cache-4 {
+			compatible = "cache";
+			cache-level = <2>;
+			cache-unified;
+			cache-size = <0x1000000>;
+		};
+
+		l2_cache_5: l2-cache-5 {
+			compatible = "cache";
+			cache-level = <2>;
+			cache-unified;
+			cache-size = <0x1000000>;
+		};
+	};
+
+	die0: soc@200000000 {
+		compatible = "simple-bus";
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges = <0x2 0x0 0x2 0x0 0x4 0x0>,
+			 <0x5 0x80000000 0x5 0x80000000 0x1 0x80000000>,
+			 <0x7 0x0 0x7 0x0 0xf 0x80000000>,
+			 <0x16 0x80000000 0x16 0x80000000 0x5 0x80000000>;
+		nonposted-mmio;
+		/* Required to get >32-bit DMA via DARTs */
+		dma-ranges = <0 0 0 0 0xffffffff 0xffffc000>;
+
+		// filled via templated includes at the end of the file
+	};
+
+	die1: soc@2200000000 {
+		compatible = "simple-bus";
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges = <0x2 0x0 0x22 0x0 0x4 0x0>,
+			 <0x7 0x0 0x27 0x0 0xf 0x80000000>,
+			 <0x16 0x80000000 0x36 0x80000000 0x5 0x80000000>;
+		nonposted-mmio;
+		/* Required to get >32-bit DMA via DARTs */
+		dma-ranges = <0 0 0 0 0xffffffff 0xffffc000>;
+
+		// filled via templated includes at the end of the file
+	};
+};
+
+#define DIE
+#define DIE_NO 0
+
+&die0 {
+	#include "t602x-die0.dtsi"
+	#include "t602x-dieX.dtsi"
+};
+
+#include "t602x-pmgr.dtsi"
+#include "t602x-gpio-pins.dtsi"
+
+#undef DIE
+#undef DIE_NO
+
+#define DIE _die1
+#define DIE_NO 1
+
+&die1 {
+	#include "t602x-dieX.dtsi"
+	#include "t602x-nvme.dtsi"
+};
+
+#include "t602x-pmgr.dtsi"
+
+#undef DIE
+#undef DIE_NO
+
+&aic {
+	affinities {
+		e-core-pmu-affinity {
+			apple,fiq-index = <AIC_CPU_PMU_E>;
+			cpus = <&cpu_e00 &cpu_e01 &cpu_e02 &cpu_e03
+				&cpu_e10 &cpu_e11 &cpu_e12 &cpu_e13>;
+		};
+
+		p-core-pmu-affinity {
+			apple,fiq-index = <AIC_CPU_PMU_P>;
+			cpus = <&cpu_p00 &cpu_p01 &cpu_p02 &cpu_p03
+				&cpu_p10 &cpu_p11 &cpu_p12 &cpu_p13
+				&cpu_p20 &cpu_p21 &cpu_p22 &cpu_p23
+				&cpu_p30 &cpu_p31 &cpu_p32 &cpu_p33>;
+		};
+	};
+};
+
+&dcpext0_die1 {
+	apple,bw-scratch = <&pmgr_dcp 0 4 0x1240>;
+};
+
+&dcpext1_die1 {
+	apple,bw-scratch = <&pmgr_dcp 0 4 0x1248>;
+};
+
+&ps_gfx {
+	// On t6022, the die0 GPU power domain needs both AFR power domains
+	power-domains = <&ps_afr>, <&ps_afr_die1>;
+};
+
+&gpu {
+	compatible = "apple,agx-t6022", "apple,agx-g14x";
+
+	apple,avg-power-filter-tc-ms = <302>;
+	apple,avg-power-ki-only = <1.0125>;
+	apple,avg-power-kp = <0.15>;
+	apple,fast-die0-integral-gain = <9.6>;
+	apple,fast-die0-proportional-gain = <24.0>;
+	apple,idleoff-standby-timer = <3000>;
+	apple,perf-base-pstate = <5>;
+	apple,perf-boost-ce-step = <100>;
+	apple,perf-boost-min-util = <75>;
+	apple,perf-tgt-utilization = <70>;
+	apple,ppm-ki = <11.0>;
+	apple,ppm-kp = <0.15>;
+};
+
+&pinctrl_ap_die1 {
+	pcie_ge_pins_die1: pcie-ge1-pins {
+		pinmux = <APPLE_PINMUX(8, 1)>;
+	};
+};
diff --git a/arch/arm64/boot/dts/apple/t602x-common.dtsi b/arch/arm64/boot/dts/apple/t602x-common.dtsi
new file mode 100644
index 00000000000000..48fc173f0ab0c5
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t602x-common.dtsi
@@ -0,0 +1,613 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Nodes common to all T602x family SoCs (M2 Pro/Max/Ultra)
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+
+ / {
+	#address-cells = <2>;
+	#size-cells = <2>;
+
+	aliases {
+		gpu = &gpu;
+	};
+
+	cpus {
+		#address-cells = <2>;
+		#size-cells = <0>;
+
+		cpu-map {
+			cluster0 {
+				core0 {
+					cpu = <&cpu_e00>;
+				};
+				core1 {
+					cpu = <&cpu_e01>;
+				};
+				core2 {
+					cpu = <&cpu_e02>;
+				};
+				core3 {
+					cpu = <&cpu_e03>;
+				};
+			};
+			cluster1 {
+				core0 {
+					cpu = <&cpu_p00>;
+				};
+				core1 {
+					cpu = <&cpu_p01>;
+				};
+				core2 {
+					cpu = <&cpu_p02>;
+				};
+				core3 {
+					cpu = <&cpu_p03>;
+				};
+			};
+
+			cluster2 {
+				core0 {
+					cpu = <&cpu_p10>;
+				};
+				core1 {
+					cpu = <&cpu_p11>;
+				};
+				core2 {
+					cpu = <&cpu_p12>;
+				};
+				core3 {
+					cpu = <&cpu_p13>;
+				};
+			};
+		};
+
+		cpu_e00: cpu@0 {
+			compatible = "apple,blizzard";
+			device_type = "cpu";
+			reg = <0x0 0x0>;
+			enable-method = "spin-table";
+			cpu-release-addr = <0 0>; /* to be filled by loader */
+			next-level-cache = <&l2_cache_0>;
+			i-cache-size  = <0x20000>;
+			d-cache-size = <0x10000>;
+			operating-points-v2 = <&blizzard_opp>;
+			capacity-dmips-mhz = <756>;
+			performance-domains = <&cpufreq_e>;
+		};
+
+		cpu_e01: cpu@1 {
+			compatible = "apple,blizzard";
+			device_type = "cpu";
+			reg = <0x0 0x1>;
+			enable-method = "spin-table";
+			cpu-release-addr = <0 0>; /* to be filled by loader */
+			next-level-cache = <&l2_cache_0>;
+			i-cache-size  = <0x20000>;
+			d-cache-size = <0x10000>;
+			operating-points-v2 = <&blizzard_opp>;
+			capacity-dmips-mhz = <756>;
+			performance-domains = <&cpufreq_e>;
+		};
+
+		cpu_e02: cpu@2 {
+			compatible = "apple,blizzard";
+			device_type = "cpu";
+			reg = <0x0 0x2>;
+			enable-method = "spin-table";
+			cpu-release-addr = <0 0>; /* to be filled by loader */
+			next-level-cache = <&l2_cache_0>;
+			i-cache-size  = <0x20000>;
+			d-cache-size = <0x10000>;
+			operating-points-v2 = <&blizzard_opp>;
+			capacity-dmips-mhz = <756>;
+			performance-domains = <&cpufreq_e>;
+		};
+
+		cpu_e03: cpu@3 {
+			compatible = "apple,blizzard";
+			device_type = "cpu";
+			reg = <0x0 0x3>;
+			enable-method = "spin-table";
+			cpu-release-addr = <0 0>; /* to be filled by loader */
+			next-level-cache = <&l2_cache_0>;
+			i-cache-size  = <0x20000>;
+			d-cache-size = <0x10000>;
+			operating-points-v2 = <&blizzard_opp>;
+			capacity-dmips-mhz = <756>;
+			performance-domains = <&cpufreq_e>;
+		};
+
+		cpu_p00: cpu@10100 {
+			compatible = "apple,avalanche";
+			device_type = "cpu";
+			reg = <0x0 0x10100>;
+			enable-method = "spin-table";
+			cpu-release-addr = <0 0>; /* To be filled by loader */
+			next-level-cache = <&l2_cache_1>;
+			i-cache-size = <0x30000>;
+			d-cache-size = <0x20000>;
+			operating-points-v2 = <&avalanche_opp>;
+			capacity-dmips-mhz = <1024>;
+			performance-domains = <&cpufreq_p0>;
+		};
+
+		cpu_p01: cpu@10101 {
+			compatible = "apple,avalanche";
+			device_type = "cpu";
+			reg = <0x0 0x10101>;
+			enable-method = "spin-table";
+			cpu-release-addr = <0 0>; /* To be filled by loader */
+			next-level-cache = <&l2_cache_1>;
+			i-cache-size = <0x30000>;
+			d-cache-size = <0x20000>;
+			operating-points-v2 = <&avalanche_opp>;
+			capacity-dmips-mhz = <1024>;
+			performance-domains = <&cpufreq_p0>;
+		};
+
+		cpu_p02: cpu@10102 {
+			compatible = "apple,avalanche";
+			device_type = "cpu";
+			reg = <0x0 0x10102>;
+			enable-method = "spin-table";
+			cpu-release-addr = <0 0>; /* To be filled by loader */
+			next-level-cache = <&l2_cache_1>;
+			i-cache-size = <0x30000>;
+			d-cache-size = <0x20000>;
+			operating-points-v2 = <&avalanche_opp>;
+			capacity-dmips-mhz = <1024>;
+			performance-domains = <&cpufreq_p0>;
+		};
+
+		cpu_p03: cpu@10103 {
+			compatible = "apple,avalanche";
+			device_type = "cpu";
+			reg = <0x0 0x10103>;
+			enable-method = "spin-table";
+			cpu-release-addr = <0 0>; /* To be filled by loader */
+			next-level-cache = <&l2_cache_1>;
+			i-cache-size = <0x30000>;
+			d-cache-size = <0x20000>;
+			operating-points-v2 = <&avalanche_opp>;
+			capacity-dmips-mhz = <1024>;
+			performance-domains = <&cpufreq_p0>;
+		};
+
+		cpu_p10: cpu@10200 {
+			compatible = "apple,avalanche";
+			device_type = "cpu";
+			reg = <0x0 0x10200>;
+			enable-method = "spin-table";
+			cpu-release-addr = <0 0>; /* To be filled by loader */
+			next-level-cache = <&l2_cache_2>;
+			i-cache-size = <0x30000>;
+			d-cache-size = <0x20000>;
+			operating-points-v2 = <&avalanche_opp>;
+			capacity-dmips-mhz = <1024>;
+			performance-domains = <&cpufreq_p1>;
+		};
+
+		cpu_p11: cpu@10201 {
+			compatible = "apple,avalanche";
+			device_type = "cpu";
+			reg = <0x0 0x10201>;
+			enable-method = "spin-table";
+			cpu-release-addr = <0 0>; /* To be filled by loader */
+			next-level-cache = <&l2_cache_2>;
+			i-cache-size = <0x30000>;
+			d-cache-size = <0x20000>;
+			operating-points-v2 = <&avalanche_opp>;
+			capacity-dmips-mhz = <1024>;
+			performance-domains = <&cpufreq_p1>;
+		};
+
+		cpu_p12: cpu@10202 {
+			compatible = "apple,avalanche";
+			device_type = "cpu";
+			reg = <0x0 0x10202>;
+			enable-method = "spin-table";
+			cpu-release-addr = <0 0>; /* To be filled by loader */
+			next-level-cache = <&l2_cache_2>;
+			i-cache-size = <0x30000>;
+			d-cache-size = <0x20000>;
+			operating-points-v2 = <&avalanche_opp>;
+			capacity-dmips-mhz = <1024>;
+			performance-domains = <&cpufreq_p1>;
+		};
+
+		cpu_p13: cpu@10203 {
+			compatible = "apple,avalanche";
+			device_type = "cpu";
+			reg = <0x0 0x10203>;
+			enable-method = "spin-table";
+			cpu-release-addr = <0 0>; /* To be filled by loader */
+			next-level-cache = <&l2_cache_2>;
+			i-cache-size = <0x30000>;
+			d-cache-size = <0x20000>;
+			operating-points-v2 = <&avalanche_opp>;
+			capacity-dmips-mhz = <1024>;
+			performance-domains = <&cpufreq_p1>;
+		};
+
+		l2_cache_0: l2-cache-0 {
+			compatible = "cache";
+			cache-level = <2>;
+			cache-unified;
+			cache-size = <0x400000>;
+		};
+
+		l2_cache_1: l2-cache-1 {
+			compatible = "cache";
+			cache-level = <2>;
+			cache-unified;
+			cache-size = <0x1000000>;
+		};
+
+		l2_cache_2: l2-cache-2 {
+			compatible = "cache";
+			cache-level = <2>;
+			cache-unified;
+			cache-size = <0x1000000>;
+		};
+	 };
+
+	blizzard_opp: opp-table-0 {
+		compatible = "operating-points-v2";
+		opp-shared;
+
+		/* pstate #1 is a dummy clone of #2 */
+		opp02 {
+			opp-hz = /bits/ 64 <912000000>;
+			opp-level = <2>;
+			clock-latency-ns = <7700>;
+		};
+		opp03 {
+			opp-hz = /bits/ 64 <1284000000>;
+			opp-level = <3>;
+			clock-latency-ns = <25000>;
+		};
+		opp04 {
+			opp-hz = /bits/ 64 <1752000000>;
+			opp-level = <4>;
+			clock-latency-ns = <33000>;
+		};
+		opp05 {
+			opp-hz = /bits/ 64 <2004000000>;
+			opp-level = <5>;
+			clock-latency-ns = <38000>;
+		};
+		opp06 {
+			opp-hz = /bits/ 64 <2256000000>;
+			opp-level = <6>;
+			clock-latency-ns = <44000>;
+		};
+		opp07 {
+			opp-hz = /bits/ 64 <2424000000>;
+			opp-level = <7>;
+			clock-latency-ns = <48000>;
+		};
+	};
+
+	avalanche_opp: opp-table-1 {
+		compatible = "operating-points-v2";
+		opp-shared;
+
+		opp01 {
+			opp-hz = /bits/ 64 <702000000>;
+			opp-level = <1>;
+			clock-latency-ns = <7400>;
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <948000000>;
+			opp-level = <2>;
+			clock-latency-ns = <18000>;
+		};
+		opp03 {
+			opp-hz = /bits/ 64 <1188000000>;
+			opp-level = <3>;
+			clock-latency-ns = <21000>;
+		};
+		opp04 {
+			opp-hz = /bits/ 64 <1452000000>;
+			opp-level = <4>;
+			clock-latency-ns = <24000>;
+		};
+		opp05 {
+			opp-hz = /bits/ 64 <1704000000>;
+			opp-level = <5>;
+			clock-latency-ns = <28000>;
+		};
+		opp06 {
+			opp-hz = /bits/ 64 <1968000000>;
+			opp-level = <6>;
+			clock-latency-ns = <31000>;
+		};
+		opp07 {
+			opp-hz = /bits/ 64 <2208000000>;
+			opp-level = <7>;
+			clock-latency-ns = <33000>;
+		};
+		opp08 {
+			opp-hz = /bits/ 64 <2400000000>;
+			opp-level = <8>;
+			clock-latency-ns = <45000>;
+		};
+		opp09 {
+			opp-hz = /bits/ 64 <2568000000>;
+			opp-level = <9>;
+			clock-latency-ns = <47000>;
+		};
+		opp10 {
+			opp-hz = /bits/ 64 <2724000000>;
+			opp-level = <10>;
+			clock-latency-ns = <50000>;
+		};
+		opp11 {
+			opp-hz = /bits/ 64 <2868000000>;
+			opp-level = <11>;
+			clock-latency-ns = <52000>;
+		};
+		opp12 {
+			opp-hz = /bits/ 64 <3000000000>;
+			opp-level = <12>;
+			clock-latency-ns = <57000>;
+		};
+		opp13 {
+			opp-hz = /bits/ 64 <3132000000>;
+			opp-level = <13>;
+			clock-latency-ns = <60000>;
+		};
+		opp14 {
+			opp-hz = /bits/ 64 <3264000000>;
+			opp-level = <14>;
+			clock-latency-ns = <64000>;
+		};
+		opp15 {
+			opp-hz = /bits/ 64 <3360000000>;
+			opp-level = <15>;
+			clock-latency-ns = <64000>;
+			turbo-mode;
+		};
+		opp16 {
+			opp-hz = /bits/ 64 <3408000000>;
+			opp-level = <16>;
+			clock-latency-ns = <64000>;
+			turbo-mode;
+		};
+		opp17 {
+			opp-hz = /bits/ 64 <3504000000>;
+			opp-level = <17>;
+			clock-latency-ns = <64000>;
+			turbo-mode;
+		};
+	};
+
+	gpu_opp: opp-table-gpu {
+		compatible = "operating-points-v2";
+
+		/*
+		 * NOTE: The voltage and power values are device-specific and
+		 * must be filled in by the bootloader.
+		 */
+		opp00 {
+			opp-hz = /bits/ 64 <0>;
+			opp-microvolt = GPU_REPEAT(400000);
+			opp-microwatt = <0>;
+		};
+		opp01 {
+			opp-hz = /bits/ 64 <444000000>;
+			opp-microvolt = GPU_REPEAT(637000);
+			opp-microwatt = <4295000>;
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <612000000>;
+			opp-microvolt = GPU_REPEAT(656000);
+			opp-microwatt = <6251000>;
+		};
+		opp03 {
+			opp-hz = /bits/ 64 <808000000>;
+			opp-microvolt = GPU_REPEAT(687000);
+			opp-microwatt = <8625000>;
+		};
+		opp04 {
+			opp-hz = /bits/ 64 <968000000>;
+			opp-microvolt = GPU_REPEAT(725000);
+			opp-microwatt = <11948000>;
+		};
+		opp05 {
+			opp-hz = /bits/ 64 <1110000000>;
+			opp-microvolt = GPU_REPEAT(790000);
+			opp-microwatt = <15071000>;
+		};
+		opp06 {
+			opp-hz = /bits/ 64 <1236000000>;
+			opp-microvolt = GPU_REPEAT(843000);
+			opp-microwatt = <18891000>;
+		};
+		opp07 {
+			opp-hz = /bits/ 64 <1338000000>;
+			opp-microvolt = GPU_REPEAT(887000);
+			opp-microwatt = <21960000>;
+		};
+		opp08 {
+			opp-hz = /bits/ 64 <1398000000>;
+			opp-microvolt = GPU_REPEAT(918000);
+			opp-microwatt = <22800000>;
+		};
+	};
+
+	gpu_cs_opp: opp-table-gpu-cs {
+		compatible = "operating-points-v2";
+
+		/*
+		 * NOTE: The voltage and power values are device-specific and
+		 * must be filled in by the bootloader.
+		 */
+		opp00 {
+			opp-hz = /bits/ 64 <24>;
+			opp-microvolt = GPU_DIE_REPEAT(668000);
+		};
+		opp01 {
+			opp-hz = /bits/ 64 <444000000>;
+			opp-microvolt = GPU_DIE_REPEAT(668000);
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <612000000>;
+			opp-microvolt = GPU_DIE_REPEAT(678000);
+		};
+		opp03 {
+			opp-hz = /bits/ 64 <808000000>;
+			opp-microvolt = GPU_DIE_REPEAT(737000);
+		};
+		opp04 {
+			opp-hz = /bits/ 64 <1024000000>;
+			opp-microvolt = GPU_DIE_REPEAT(815000);
+		};
+		opp05 {
+			opp-hz = /bits/ 64 <1140000000>;
+			opp-microvolt = GPU_DIE_REPEAT(862000);
+		};
+		opp06 {
+			opp-hz = /bits/ 64 <1236000000>;
+			opp-microvolt = GPU_DIE_REPEAT(893000);
+		};
+	};
+
+	gpu_afr_opp: opp-table-gpu-afr {
+		compatible = "operating-points-v2";
+
+		/*
+		 * NOTE: The voltage and power values are device-specific and
+		 * must be filled in by the bootloader.
+		 */
+		opp00 {
+			opp-hz = /bits/ 64 <24>;
+			opp-microvolt = GPU_DIE_REPEAT(668000);
+		};
+		opp01 {
+			opp-hz = /bits/ 64 <400000000>;
+			opp-microvolt = GPU_DIE_REPEAT(668000);
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <552000000>;
+			opp-microvolt = GPU_DIE_REPEAT(678000);
+		};
+		opp03 {
+			opp-hz = /bits/ 64 <760000000>;
+			opp-microvolt = GPU_DIE_REPEAT(737000);
+		};
+		opp04 {
+			opp-hz = /bits/ 64 <980000000>;
+			opp-microvolt = GPU_DIE_REPEAT(815000);
+		};
+		opp05 {
+			opp-hz = /bits/ 64 <1098000000>;
+			opp-microvolt = GPU_DIE_REPEAT(862000);
+		};
+		opp06 {
+			opp-hz = /bits/ 64 <1200000000>;
+			opp-microvolt = GPU_DIE_REPEAT(893000);
+		};
+	};
+
+	pmu-e {
+		compatible = "apple,blizzard-pmu";
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_FIQ 0 AIC_CPU_PMU_E IRQ_TYPE_LEVEL_HIGH>;
+	};
+
+	pmu-p {
+		compatible = "apple,avalanche-pmu";
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_FIQ 0 AIC_CPU_PMU_P IRQ_TYPE_LEVEL_HIGH>;
+	};
+
+	timer {
+		compatible = "arm,armv8-timer";
+		interrupt-parent = <&aic>;
+		interrupt-names = "phys", "virt", "hyp-phys", "hyp-virt";
+		interrupts = <AIC_FIQ 0 AIC_TMR_GUEST_PHYS IRQ_TYPE_LEVEL_HIGH>,
+			     <AIC_FIQ 0 AIC_TMR_GUEST_VIRT IRQ_TYPE_LEVEL_HIGH>,
+			     <AIC_FIQ 0 AIC_TMR_HV_PHYS IRQ_TYPE_LEVEL_HIGH>,
+			     <AIC_FIQ 0 AIC_TMR_HV_VIRT IRQ_TYPE_LEVEL_HIGH>;
+	};
+
+	clkref: clock-ref {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <24000000>;
+		clock-output-names = "clkref";
+	};
+
+	clk_200m: clock-200m {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <200000000>;
+		clock-output-names = "clk_200m";
+	};
+
+	clk_disp0: clock-disp0 {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <257142848>; /* TODO: check */
+		clock-output-names = "clk_disp0";
+	};
+
+	clk_dispext0: clock-dispext0 {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <0>;
+		clock-output-names = "clk_dispext0";
+	};
+
+	clk_dispext0_die1: clock-dispext0_die1 {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <0>;
+		clock-output-names = "clk_dispext0_die1";
+	};
+
+	clk_dispext1: clock-dispext1 {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <0>;
+		clock-output-names = "clk_dispext1";
+	};
+
+	clk_dispext1_die1: clock-dispext1_die1 {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <0>;
+		clock-output-names = "clk_dispext1_die1";
+	};
+
+	/*
+	 * This is a fabulated representation of the input clock
+	 * to NCO since we don't know the true clock tree.
+	 */
+	nco_clkref: clock-ref-nco {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-output-names = "nco_ref";
+	};
+
+	reserved-memory {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		uat_handoff: uat-handoff {
+			reg = <0 0 0 0>;
+		};
+
+		uat_pagetables: uat-pagetables {
+			reg = <0 0 0 0>;
+		};
+
+		uat_ttbs: uat-ttbs {
+			reg = <0 0 0 0>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/apple/t602x-die0.dtsi b/arch/arm64/boot/dts/apple/t602x-die0.dtsi
new file mode 100644
index 00000000000000..064a785a44c6e3
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t602x-die0.dtsi
@@ -0,0 +1,999 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * In anticipation of an M2 Ultra. Inspired by T600x.
+ *
+ * Obviously needs filling out, just the bare bones required
+ * to boot to a console in the HV.
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+
+	nco: clock-controller@28e03c000 {
+		compatible = "apple,t6020-nco", "apple,nco";
+		reg = <0x2 0x8e03c000 0x0 0x14000>;
+		clocks = <&nco_clkref>;
+		#clock-cells = <1>;
+	};
+
+	aic: interrupt-controller@28e100000 {
+		compatible = "apple,t6020-aic", "apple,aic2";
+		#interrupt-cells = <4>;
+		interrupt-controller;
+		reg = <0x2 0x8e100000 0x0 0xc000>,
+                <0x2 0x8e10c000 0x0 0x1000>;
+		reg-names = "core", "event";
+		power-domains = <&ps_aic>;
+	};
+
+	pmgr_misc: power-management@28e20c000 {
+		compatible = "apple,t6020-pmgr-misc", "apple,t6000-pmgr-misc";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		reg = <0x2 0x8e20c000 0 0x400>,
+			<0x2 0x8e20c400 0 0x400>;
+		reg-names = "fabric-ps", "dcs-ps";
+	};
+
+	pmgr_dcp: power-management@28e3d0000 {
+		reg = <0x2 0x8e3d0000 0x0 0x4000>;
+		reg-names = "dcp-fw-pmgr";
+		#apple,bw-scratch-cells = <3>;
+	};
+
+	wdt: watchdog@29e2c4000 {
+		compatible = "apple,t6020-wdt", "apple,wdt";
+		reg = <0x2 0x9e2c4000 0x0 0x4000>;
+		clocks = <&clkref>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 719 IRQ_TYPE_LEVEL_HIGH>;
+	};
+
+	nub_spmi0: spmi@29e114000 {
+		compatible = "apple,t6020-spmi", "apple,spmi";
+		reg = <0x2 0x9e114000 0x0 0x100>;
+		#address-cells = <2>;
+		#size-cells = <0>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 256 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 0 724 IRQ_TYPE_LEVEL_HIGH>;
+
+		pmu1: pmu@f {
+			compatible = "apple,maverick-pmu", "apple,spmi-pmu";
+			reg = <0xb SPMI_USID>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			rtc_nvmem@1400 {
+				compatible = "apple,spmi-pmu-nvmem";
+				reg = <0x1400 0x20>;
+				#address-cells = <1>;
+				#size-cells = <1>;
+
+				pm_setting: pm-setting@5 {
+					reg = <0x5 0x1>;
+				};
+
+				rtc_offset: rtc-offset@11 {
+					reg = <0x11 0x6>;
+				};
+			};
+
+			legacy_nvmem@6000 {
+				compatible = "apple,spmi-pmu-nvmem";
+				reg = <0x6000 0x20>;
+				#address-cells = <1>;
+				#size-cells = <1>;
+
+				boot_stage: boot-stage@1 {
+					reg = <0x1 0x1>;
+				};
+
+				boot_error_count: boot-error-count@2 {
+					reg = <0x2 0x1>;
+					bits = <0 4>;
+				};
+
+				panic_count: panic-count@2 {
+					reg = <0x2 0x1>;
+					bits = <4 4>;
+				};
+
+				boot_error_stage: boot-error-stage@3 {
+					reg = <0x3 0x1>;
+				};
+
+				shutdown_flag: shutdown-flag@f {
+					reg = <0xf 0x1>;
+					bits = <3 1>;
+				};
+			};
+
+			scrpad_nvmem@8000 {
+				compatible = "apple,spmi-pmu-nvmem";
+				reg = <0x8000 0x1000>;
+				#address-cells = <1>;
+				#size-cells = <1>;
+
+				fault_shadow: fault-shadow@67b {
+					reg = <0x67b 0x10>;
+				};
+
+				socd: socd@b00 {
+					reg = <0xb00 0x400>;
+				};
+			};
+
+		};
+	};
+
+	smc_mbox: mbox@2a2408000 {
+		compatible = "apple,t6020-asc-mailbox", "apple,asc-mailbox-v4";
+		reg = <0x2 0xa2408000 0x0 0x4000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 862 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 0 863 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 0 864 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 0 865 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "send-empty", "send-not-empty",
+			"recv-empty", "recv-not-empty";
+		#mbox-cells = <0>;
+	};
+
+	smc: smc@2a2400000 {
+		compatible = "apple,t6020-smc", "apple,smc";
+		reg = <0x2 0xa2400000 0x0 0x4000>,
+			<0x2 0xa3e00000 0x0 0x100000>;
+		reg-names = "smc", "sram";
+		mboxes = <&smc_mbox>;
+
+		smc_gpio: gpio {
+			gpio-controller;
+			#gpio-cells = <2>;
+		};
+
+		smc_rtc: rtc {
+			nvmem-cells = <&rtc_offset>;
+			nvmem-cell-names = "rtc_offset";
+		};
+
+		smc_reboot: reboot {
+			nvmem-cells = <&shutdown_flag>, <&boot_stage>,
+				<&boot_error_count>, <&panic_count>, <&pm_setting>;
+			nvmem-cell-names = "shutdown_flag", "boot_stage",
+				"boot_error_count", "panic_count", "pm_setting";
+		};
+	};
+
+	pinctrl_smc: pinctrl@2a2820000 {
+		compatible = "apple,t6020-pinctrl", "apple,pinctrl";
+		reg = <0x2 0xa2820000 0x0 0x4000>;
+
+		gpio-controller;
+		#gpio-cells = <2>;
+		gpio-ranges = <&pinctrl_smc 0 0 30>;
+		apple,npins = <30>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 851 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 0 852 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 0 853 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 0 854 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 0 855 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 0 856 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 0 857 IRQ_TYPE_LEVEL_HIGH>;
+	};
+
+	aop_mbox: mbox@2a6408000 {
+		compatible = "apple,t6020-asc-mailbox", "apple,asc-mailbox-v4";
+		reg = <0x2 0xa6408000 0x0 0x4000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 613 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 0 614 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 0 615 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 0 616 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "send-empty", "send-not-empty",
+			"recv-empty", "recv-not-empty";
+		#mbox-cells = <0>;
+		status = "disabled";
+	};
+
+	aop_dart: iommu@2a6808000 {
+		compatible = "apple,t6020-dart", "apple,t8110-dart";
+		reg = <0x2 0xa6808000 0x0 0x4000>;
+		#iommu-cells = <1>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 628 IRQ_TYPE_LEVEL_HIGH>;
+		status = "disabled";
+		apple,dma-range = <0x100 0x0 0x300 0x0>;
+	};
+
+	aop_admac: dma-controller@2a6980000 {
+		/*
+		 * Use "admac2" until commit "dmaengine: apple-admac: Avoid
+		 * accessing registers in probe" is long enough upstream (not
+		 * yet as of 2024-12-30)
+		 */
+		// compatible = "apple,t6020-admac", "apple,admac";
+		compatible = "apple,t6020-admac2", "apple,admac2";
+		reg = <0x2 0xa6980000 0x0 0x34000>;
+		#dma-cells = <1>;
+		dma-channels = <16>;
+		interrupts-extended = <0>,
+				      <0>,
+				      <&aic AIC_IRQ 0 631 IRQ_TYPE_LEVEL_HIGH>,
+				      <0>;
+		iommus = <&aop_dart 10>;
+		status = "disabled";
+	};
+
+	aop: aop@2a6c00000 {
+		compatible = "apple,t6020-aop";
+		reg = <0x2 0xa6c00000 0x0 0x250000>,
+		      <0x2 0xa6400000 0x0 0x6c000>;
+		mboxes = <&aop_mbox>;
+		mbox-names = "mbox";
+		iommus = <&aop_dart 0>;
+
+		/* HACK: ensure probe order */
+		dmas = <&aop_admac 1023>;
+		dma-names = "invalid-order-only";
+
+		status = "disabled";
+
+		aop_audio: audio {
+			dmas = <&aop_admac 1>;
+			dma-names = "dma";
+		};
+
+		aop_als: als {
+			// intentionally empty
+		};
+	};
+
+	mtp: mtp@2a9400000 {
+		compatible = "apple,t6020-mtp", "apple,t6020-rtk-helper-asc4", "apple,mtp", "apple,rtk-helper-asc4";
+		reg = <0x2 0xa9400000 0x0 0x4000>,
+			<0x2 0xa9c00000 0x0 0x100000>;
+		reg-names = "asc", "sram";
+		mboxes = <&mtp_mbox>;
+		iommus = <&mtp_dart 1>;
+		#helper-cells = <0>;
+
+		status = "disabled";
+	};
+
+	mtp_mbox: mbox@2a9408000 {
+		compatible = "apple,t6020-asc-mailbox", "apple,asc-mailbox-v4";
+		reg = <0x2 0xa9408000 0x0 0x4000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 693 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 0 694 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 0 695 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 0 696 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "send-empty", "send-not-empty",
+			"recv-empty", "recv-not-empty";
+		#mbox-cells = <0>;
+
+		status = "disabled";
+	};
+
+	mtp_dart: iommu@2a9808000 {
+		compatible = "apple,t6020-dart", "apple,t8110-dart";
+		reg = <0x2 0xa9808000 0x0 0x4000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 676 IRQ_TYPE_LEVEL_HIGH>;
+		#iommu-cells = <1>;
+
+		apple,dma-range = <0x100 0x0 0x1 0x0>;
+
+		status = "disabled";
+	};
+
+	mtp_dockchannel: fifo@2a9b14000 {
+		compatible = "apple,t6020-dockchannel", "apple,dockchannel";
+		reg = <0x2 0xa9b14000 0x0 0x4000>;
+		reg-names = "irq";
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 677 IRQ_TYPE_LEVEL_HIGH>;
+
+		ranges = <0 0x2 0xa9b28000 0x20000>;
+		nonposted-mmio;
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+
+		status = "disabled";
+
+		mtp_hid: input@8000 {
+			compatible = "apple,dockchannel-hid";
+			reg = <0x8000 0x4000>,
+				<0xc000 0x4000>,
+				<0x0000 0x4000>,
+				<0x4000 0x4000>;
+			reg-names = "config", "data",
+				"rmt-config", "rmt-data";
+			iommus = <&mtp_dart 1>;
+			interrupt-parent = <&mtp_dockchannel>;
+			interrupts = <2 IRQ_TYPE_LEVEL_HIGH>,
+				<3 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "tx", "rx";
+
+			apple,fifo-size = <0x800>;
+			apple,helper-cpu = <&mtp>;
+		};
+
+	};
+
+	isp_dart0: iommu@3860e8000 {
+		compatible = "apple,t6020-dart", "apple,t8110-dart";
+		reg = <0x3 0x860e8000 0x0 0x4000>;
+		#iommu-cells = <1>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 574 IRQ_TYPE_LEVEL_HIGH>;
+		power-domains = <&ps_isp_sys>;
+
+		apple,dma-range = <0x100 0x0 0x1 0x0>;
+		status = "disabled";
+	};
+
+	isp_dart1: iommu@3860f4000 {
+		compatible = "apple,t6020-dart", "apple,t8110-dart";
+		reg = <0x3 0x860f4000 0x0 0x4000>;
+		#iommu-cells = <1>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 574 IRQ_TYPE_LEVEL_HIGH>;
+		power-domains = <&ps_isp_sys>;
+
+		apple,dma-range = <0x100 0x0 0x1 0x0>;
+		status = "disabled";
+	};
+
+	isp_dart2: iommu@3860fc000 {
+		compatible = "apple,t6020-dart", "apple,t8110-dart";
+		reg = <0x3 0x860fc000 0x0 0x4000>;
+		#iommu-cells = <1>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 574 IRQ_TYPE_LEVEL_HIGH>;
+		power-domains = <&ps_isp_sys>;
+
+		apple,dma-range = <0x100 0x0 0x1 0x0>;
+		status = "disabled";
+	};
+
+	isp: isp@384000000 {
+		compatible = "apple,t6020-isp", "apple,isp";
+		iommus = <&isp_dart0 0>, <&isp_dart1 0>, <&isp_dart2 0>;
+		reg-names = "coproc", "mbox", "gpio", "mbox2";
+		reg = <0x3 0x84000000 0x0 0x2000000>,
+			<0x3 0x86104000 0x0 0x100>,
+			<0x3 0x86104170 0x0 0x100>,
+			<0x3 0x861043f0 0x0 0x100>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 569 IRQ_TYPE_LEVEL_HIGH>;
+		power-domains = <&ps_isp_cpu>, <&ps_isp_fe>,
+			<&ps_dprx>, <&ps_isp_vis>, <&ps_isp_be>,
+			<&ps_isp_clr>, <&ps_isp_raw>;
+		apple,dart-vm-size = <0x0 0xa0000000>;
+
+		status = "disabled";
+	};
+
+	disp0_dart: iommu@389304000 {
+		compatible = "apple,t6020-dart", "apple,t8110-dart";
+		reg = <0x3 0x89304000 0x0 0x4000>;
+		#iommu-cells = <1>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 911 IRQ_TYPE_LEVEL_HIGH>;
+		status = "disabled";
+		power-domains = <&ps_disp0_cpu0>;
+		apple,dma-range = <0x100 0x0 0x10 0x0>;
+	};
+
+	dcp_dart: iommu@38930c000 {
+		compatible = "apple,t6020-dart", "apple,t8110-dart";
+		reg = <0x3 0x8930c000 0x0 0x4000>;
+		#iommu-cells = <1>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 911 IRQ_TYPE_LEVEL_HIGH>;
+		power-domains = <&ps_disp0_cpu0>;
+		apple,dma-range = <0x100 0x0 0x10 0x0>;
+	};
+
+	dcp_mbox: mbox@389c08000 {
+		compatible = "apple,t6020-asc-mailbox", "apple,asc-mailbox-v4";
+		reg = <0x3 0x89c08000 0x0 0x4000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 932 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 0 933 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 0 934 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 0 935 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "send-empty", "send-not-empty",
+			"recv-empty", "recv-not-empty";
+		#mbox-cells = <0>;
+		power-domains = <&ps_disp0_cpu0>;
+	};
+
+	dcp: dcp@389c00000 {
+		compatible = "apple,t6020-dcp", "apple,dcp";
+		mboxes = <&dcp_mbox>;
+		mbox-names = "mbox";
+		iommus = <&dcp_dart 5>;
+
+		reg-names = "coproc", "disp-0", "disp-1", "disp-2", "disp-3";
+		reg = <0x3 0x89c00000 0x0 0x4000>, // check?
+			<0x3 0x88000000 0x0 0x61c000>,
+			<0x3 0x89320000 0x0 0x4000>,
+			<0x3 0x89344000 0x0 0x4000>,
+			<0x3 0x89800000 0x0 0x800000>;
+		apple,bw-scratch = <&pmgr_dcp 0 4 0x1208>;
+		power-domains = <&ps_disp0_cpu0>;
+		resets = <&ps_disp0_cpu0>;
+		clocks = <&clk_disp0>;
+		phandle = <&dcp>;
+		// required bus properties for 'piodma' subdevice
+		#address-cells = <2>;
+		#size-cells = <2>;
+
+		disp0_piodma: piodma {
+			iommus = <&disp0_dart 4>;
+			phandle = <&disp0_piodma>;
+		};
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			port@0 {
+				reg = <0>;
+				dcp_audio: endpoint {
+					remote-endpoint = <&dpaudio0_dcp>;
+				};
+			};
+		};
+	};
+
+	display: display-subsystem {
+		compatible = "apple,display-subsystem";
+		iommus = <&disp0_dart 0>;
+		/* generate phandle explicitly for use in loader */
+		phandle = <&display>;
+	};
+
+	sep_dart: iommu@394ac0000 {
+		compatible = "apple,t6020-dart", "apple,t8110-dart";
+		reg = <0x3 0x94ac0000 0x0 0x4000>;
+		#iommu-cells = <1>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 582 IRQ_TYPE_LEVEL_HIGH>;
+		status = "disabled";
+	};
+
+	sep: sep@396400000 {
+		compatible = "apple,sep";
+		reg = <0x3 0x96400000 0x0 0x6C000>;
+		mboxes = <&sep_mbox>;
+		mbox-names = "mbox";
+		iommus = <&sep_dart 0>;
+		power-domains = <&ps_sep>;
+		status = "disabled";
+	};
+
+	sep_mbox: mbox@396408000 {
+		compatible = "apple,t6020-asc-mailbox", "apple,asc-mailbox-v4";
+		reg = <0x3 0x96408000 0x0 0x4000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 576 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 0 577 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 0 578 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 0 579 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "send-empty", "send-not-empty",
+			"recv-empty", "recv-not-empty";
+		#mbox-cells = <0>;
+	};
+
+	fpwm0: pwm@39b030000 {
+		compatible = "apple,t6020-fpwm", "apple,s5l-fpwm";
+		reg = <0x3 0x9b030000 0x0 0x4000>;
+		power-domains = <&ps_fpwm0>;
+		clocks = <&clkref>;
+		#pwm-cells = <2>;
+		status = "disabled";
+	};
+
+	i2c0: i2c@39b040000 {
+		compatible = "apple,t6020-i2c", "apple,i2c";
+		reg = <0x3 0x9b040000 0x0 0x4000>;
+		clocks = <&clkref>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 1219 IRQ_TYPE_LEVEL_HIGH>;
+		pinctrl-0 = <&i2c0_pins>;
+		pinctrl-names = "default";
+		power-domains = <&ps_i2c0>;
+		#address-cells = <0x1>;
+		#size-cells = <0x0>;
+	};
+
+	i2c1: i2c@39b044000 {
+		compatible = "apple,t6020-i2c", "apple,i2c";
+		reg = <0x3 0x9b044000 0x0 0x4000>;
+		clocks = <&clkref>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 1220 IRQ_TYPE_LEVEL_HIGH>;
+		pinctrl-0 = <&i2c1_pins>;
+		pinctrl-names = "default";
+		power-domains = <&ps_i2c1>;
+		#address-cells = <0x1>;
+		#size-cells = <0x0>;
+		status = "disabled";
+	};
+
+	i2c2: i2c@39b048000 {
+		compatible = "apple,t6020-i2c", "apple,i2c";
+		reg = <0x3 0x9b048000 0x0 0x4000>;
+		clocks = <&clkref>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 1221 IRQ_TYPE_LEVEL_HIGH>;
+		pinctrl-0 = <&i2c2_pins>;
+		pinctrl-names = "default";
+		power-domains = <&ps_i2c2>;
+		#address-cells = <0x1>;
+		#size-cells = <0x0>;
+		status = "disabled";
+	};
+
+	i2c3: i2c@39b04c000 {
+		compatible = "apple,t6020-i2c", "apple,i2c";
+		reg = <0x3 0x9b04c000 0x0 0x4000>;
+		clocks = <&clkref>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 1222 IRQ_TYPE_LEVEL_HIGH>;
+		pinctrl-0 = <&i2c3_pins>;
+		pinctrl-names = "default";
+		power-domains = <&ps_i2c3>;
+		#address-cells = <0x1>;
+		#size-cells = <0x0>;
+		status = "disabled";
+	};
+
+	i2c4: i2c@39b050000 {
+		compatible = "apple,t6020-i2c", "apple,i2c";
+		reg = <0x3 0x9b050000 0x0 0x4000>;
+		clocks = <&clkref>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 1223 IRQ_TYPE_LEVEL_HIGH>;
+		pinctrl-0 = <&i2c4_pins>;
+		pinctrl-names = "default";
+		power-domains = <&ps_i2c4>;
+		#address-cells = <0x1>;
+		#size-cells = <0x0>;
+		status = "disabled";
+	};
+
+	i2c5: i2c@39b054000 {
+		compatible = "apple,t6020-i2c", "apple,i2c";
+		reg = <0x3 0x9b054000 0x0 0x4000>;
+		clocks = <&clkref>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 1224 IRQ_TYPE_LEVEL_HIGH>;
+		pinctrl-0 = <&i2c5_pins>;
+		pinctrl-names = "default";
+		power-domains = <&ps_i2c5>;
+		#address-cells = <0x1>;
+		#size-cells = <0x0>;
+		status = "disabled";
+	};
+
+	i2c6: i2c@39b054000 {
+		compatible = "apple,t6020-i2c", "apple,i2c";
+		reg = <0x3 0x9b054000 0x0 0x4000>;
+		clocks = <&clkref>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 1225 IRQ_TYPE_LEVEL_HIGH>;
+		pinctrl-0 = <&i2c6_pins>;
+		pinctrl-names = "default";
+		power-domains = <&ps_i2c6>;
+		#address-cells = <0x1>;
+		#size-cells = <0x0>;
+		status = "disabled";
+	};
+
+	i2c7: i2c@39b054000 {
+		compatible = "apple,t6020-i2c", "apple,i2c";
+		reg = <0x3 0x9b054000 0x0 0x4000>;
+		clocks = <&clkref>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 1226 IRQ_TYPE_LEVEL_HIGH>;
+		pinctrl-0 = <&i2c7_pins>;
+		pinctrl-names = "default";
+		power-domains = <&ps_i2c7>;
+		#address-cells = <0x1>;
+		#size-cells = <0x0>;
+		status = "disabled";
+	};
+
+	i2c8: i2c@39b054000 {
+		compatible = "apple,t6020-i2c", "apple,i2c";
+		reg = <0x3 0x9b054000 0x0 0x4000>;
+		clocks = <&clkref>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 1227 IRQ_TYPE_LEVEL_HIGH>;
+		pinctrl-0 = <&i2c8_pins>;
+		pinctrl-names = "default";
+		power-domains = <&ps_i2c8>;
+		#address-cells = <0x1>;
+		#size-cells = <0x0>;
+		status = "disabled";
+	};
+
+	spi1: spi@39b104000 {
+		compatible = "apple,t6020-spi", "apple,spi";
+		reg = <0x3 0x9b104000 0x0 0x4000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 1206 IRQ_TYPE_LEVEL_HIGH>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clocks = <&clk_200m>;
+		pinctrl-0 = <&spi1_pins>;
+		pinctrl-names = "default";
+		power-domains = <&ps_spi1>;
+		status = "disabled";
+	};
+
+	spi2: spi@39b108000 {
+		compatible = "apple,t6020-spi", "apple,spi";
+		reg = <0x3 0x9b108000 0x0 0x4000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 1207 IRQ_TYPE_LEVEL_HIGH>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clocks = <&clkref>;
+		pinctrl-0 = <&spi2_pins>;
+		pinctrl-names = "default";
+		power-domains = <&ps_spi2>;
+		status = "disabled";
+	};
+
+	spi4: spi@39b110000 {
+		compatible = "apple,t6020-spi", "apple,spi";
+		reg = <0x3 0x9b110000 0x0 0x4000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 1209 IRQ_TYPE_LEVEL_HIGH>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clocks = <&clkref>;
+		pinctrl-0 = <&spi4_pins>;
+		pinctrl-names = "default";
+		power-domains = <&ps_spi4>;
+		status = "disabled";
+	};
+
+	serial0: serial@39b200000 {
+		compatible = "apple,s5l-uart";
+		reg = <0x3 0x9b200000 0x0 0x4000>;
+		reg-io-width = <4>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 1198 IRQ_TYPE_LEVEL_HIGH>;
+		/*
+		 * TODO: figure out the clocking properly, there may
+		 * be a third selectable clock.
+		 */
+		clocks = <&clkref>, <&clkref>;
+		clock-names = "uart", "clk_uart_baud0";
+		power-domains = <&ps_uart0>;
+		status = "disabled";
+	};
+
+	admac: dma-controller@39b400000 {
+		compatible = "apple,t6020-admac", "apple,admac";
+		reg = <0x3 0x9b400000 0x0 0x34000>;
+		#dma-cells = <1>;
+		dma-channels = <16>;
+		interrupts-extended = <0>,
+				      <&aic AIC_IRQ 0 1218 IRQ_TYPE_LEVEL_HIGH>,
+				      <0>,
+				      <0>;
+		iommus = <&sio_dart 2>;
+		power-domains = <&ps_sio_adma>;
+		resets = <&ps_audio_p>;
+	};
+
+	dpaudio0: audio-controller@39b500000 {
+		compatible = "apple,t6020-dpaudio", "apple,dpaudio";
+		reg = <0x3 0x9b500000 0x0 0x4000>;
+		dmas = <&sio 0x64>;
+		dma-names = "tx";
+		power-domains = <&ps_dpa0>;
+		reset-domains = <&ps_dpa0>;
+		status = "disabled";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			port@0 {
+				reg = <0>;
+				dpaudio0_dcp: endpoint {
+					remote-endpoint = <&dcp_audio>;
+				};
+			};
+		};
+	};
+
+	mca: mca@39b600000 {
+		compatible = "apple,t6020-mca", "apple,mca";
+		reg = <0x3 0x9b600000 0x0 0x10000>,
+		      <0x3 0x9b500000 0x0 0x20000>;
+		clocks = <&nco 0>, <&nco 1>, <&nco 2>, <&nco 3>;
+		dmas = <&admac 0>, <&admac 1>, <&admac 2>, <&admac 3>,
+		       <&admac 4>, <&admac 5>, <&admac 6>, <&admac 7>,
+		       <&admac 8>, <&admac 9>, <&admac 10>, <&admac 11>,
+		       <&admac 12>, <&admac 13>, <&admac 14>, <&admac 15>;
+		dma-names = "tx0a", "rx0a", "tx0b", "rx0b",
+			    "tx1a", "rx1a", "tx1b", "rx1b",
+			    "tx2a", "rx2a", "tx2b", "rx2b",
+			    "tx3a", "rx3a", "tx3b", "rx3b";
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 1211 IRQ_TYPE_LEVEL_HIGH>,
+			     <AIC_IRQ 0 1212 IRQ_TYPE_LEVEL_HIGH>,
+			     <AIC_IRQ 0 1213 IRQ_TYPE_LEVEL_HIGH>,
+			     <AIC_IRQ 0 1214 IRQ_TYPE_LEVEL_HIGH>;
+		power-domains = <&ps_audio_p>, <&ps_mca0>, <&ps_mca1>,
+				<&ps_mca2>, <&ps_mca3>;
+		resets = <&ps_audio_p>;
+		#sound-dai-cells = <1>;
+	};
+
+	gpu: gpu@406400000 {
+		compatible = "apple,agx-g14x";
+		reg = <0x4 0x6400000 0 0x40000>,
+			<0x4 0x4000000 0 0x1000000>;
+		reg-names = "asc", "sgx";
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 1127 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 0 1128 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 0 1129 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 0 1130 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 0 1147 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 0 1149 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 0 1142 IRQ_TYPE_LEVEL_HIGH>;
+		mboxes = <&agx_mbox>;
+		power-domains = <&ps_gfx>;
+		memory-region = <&uat_ttbs>, <&uat_pagetables>, <&uat_handoff>;
+		memory-region-names = "ttbs", "pagetables", "handoff";
+
+		apple,firmware-version = <0 0 0>;
+		apple,firmware-compat = <0 0 0>;
+
+		operating-points-v2 = <&gpu_opp>;
+		apple,cs-opp = <&gpu_cs_opp>;
+		apple,afr-opp = <&gpu_afr_opp>;
+
+		apple,min-sram-microvolt = <790000>;
+		apple,csafr-min-sram-microvolt = <812000>;
+		apple,perf-base-pstate = <1>;
+
+		apple,avg-power-min-duty-cycle = <40>;
+		apple,avg-power-target-filter-tc = <1>;
+		apple,fast-die0-proportional-gain = <34.0>;
+		apple,perf-boost-ce-step = <50>;
+		apple,perf-boost-min-util = <90>;
+		apple,perf-filter-drop-threshold = <0>;
+		apple,perf-filter-time-constant = <5>;
+		apple,perf-filter-time-constant2 = <200>;
+		apple,perf-integral-gain = <1.62>;
+		apple,perf-integral-gain2 = <1.62>;
+		apple,perf-integral-min-clamp = <0>;
+		apple,perf-proportional-gain2 = <5.4>;
+		apple,perf-proportional-gain = <5.4>;
+		apple,perf-tgt-utilization = <85>;
+		apple,power-sample-period = <8>;
+		apple,ppm-filter-time-constant-ms = <34>;
+		apple,ppm-ki = <18.0>;
+		apple,ppm-kp = <0.1>;
+		apple,pwr-filter-time-constant = <313>;
+		apple,pwr-integral-gain = <0.0202129>;
+		apple,pwr-integral-min-clamp = <0>;
+		apple,pwr-min-duty-cycle = <40>;
+		apple,pwr-proportional-gain = <5.2831855>;
+		apple,pwr-sample-period-aic-clks = <200000>;
+		apple,se-engagement-criteria = <700>;
+		apple,se-filter-time-constant = <9>;
+		apple,se-filter-time-constant-1 = <3>;
+		apple,se-inactive-threshold = <2500>;
+		apple,se-ki = <-50.0>;
+		apple,se-ki-1 = <-100.0>;
+		apple,se-kp = <-5.0>;
+		apple,se-kp-1 = <-10.0>;
+		apple,se-reset-criteria = <50>;
+
+		apple,core-leak-coef = GPU_REPEAT(1200.0);
+		apple,sram-leak-coef = GPU_REPEAT(20.0);
+		apple,cs-leak-coef = GPU_DIE_REPEAT(400.0);
+		apple,afr-leak-coef = GPU_DIE_REPEAT(200.0);
+	};
+
+	agx_mbox: mbox@406408000 {
+		compatible = "apple,t6020-asc-mailbox", "apple,asc-mailbox-v4";
+		reg = <0x4 0x6408000 0x0 0x4000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 1143 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 0 1144 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 0 1145 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 0 1146 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "send-empty", "send-not-empty",
+			"recv-empty", "recv-not-empty";
+		#mbox-cells = <0>;
+	};
+
+	pcie0: pcie@580000000 {
+		compatible = "apple,t6020-pcie";
+		device_type = "pci";
+
+		reg = <0x5 0x80000000 0x0 0x1000000>,	/* config */
+			<0x5 0x91000000 0x0 0x4000>,	/* rc */
+			<0x5 0x94008000 0x0 0x4000>,	/* port0 */
+			<0x5 0x95008000 0x0 0x4000>,	/* port1 */
+			<0x5 0x96008000 0x0 0x4000>,	/* port2 */
+			<0x5 0x97008000 0x0 0x4000>,	/* port3 */
+			<0x5 0x9e00c000 0x0 0x4000>,	/* phy0 */
+			<0x5 0x9e010000 0x0 0x4000>,	/* phy1 */
+			<0x5 0x9e014000 0x0 0x4000>,	/* phy2 */
+			<0x5 0x9e018000 0x0 0x4000>,	/* phy3 */
+			<0x5 0x9401c000 0x0 0x1000>,	/* ltssm0 */
+			<0x5 0x9501c000 0x0 0x1000>,	/* ltssm1 */
+			<0x5 0x9601c000 0x0 0x1000>,	/* ltssm2 */
+			<0x5 0x9701c000 0x0 0x1000>;	/* ltssm3 */
+		reg-names = "config", "rc",
+			"port0", "port1", "port2", "port3",
+			"phy0", "phy1", "phy2", "phy3",
+			"ltssm0", "ltssm1", "ltssm2", "ltssm3";
+
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 1340 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 0 1344 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 0 1348 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 0 1352 IRQ_TYPE_LEVEL_HIGH>;
+
+		msi-controller;
+		msi-parent = <&pcie0>;
+		msi-ranges = <&aic AIC_IRQ 0 1672 IRQ_TYPE_EDGE_RISING 32>;
+
+
+		iommu-map = <0x100 &pcie0_dart_0 1 1>,
+				<0x200 &pcie0_dart_1 1 1>,
+				<0x300 &pcie0_dart_2 1 1>,
+				<0x400 &pcie0_dart_3 1 1>;
+		iommu-map-mask = <0xff00>;
+
+		bus-range = <0 4>;
+		#address-cells = <3>;
+		#size-cells = <2>;
+		ranges = <0x43000000 0x5 0xa0000000 0x5 0xa0000000 0x0 0x20000000>,
+				<0x02000000 0x0 0xc0000000 0x5 0xc0000000 0x0 0x40000000>;
+
+		power-domains = <&ps_apcie_gp_sys>;
+		pinctrl-0 = <&pcie_pins>;
+		pinctrl-names = "default";
+
+		dma-coherent;
+
+		port00: pci@0,0 {
+			device_type = "pci";
+			reg = <0x0 0x0 0x0 0x0 0x0>;
+			reset-gpios = <&pinctrl_ap 4 GPIO_ACTIVE_LOW>;
+
+			#address-cells = <3>;
+			#size-cells = <2>;
+			ranges;
+
+			interrupt-controller;
+			#interrupt-cells = <1>;
+
+			interrupt-map-mask = <0 0 0 7>;
+			interrupt-map = <0 0 0 1 &port00 0 0 0 0>,
+					<0 0 0 2 &port00 0 0 0 1>,
+					<0 0 0 3 &port00 0 0 0 2>,
+					<0 0 0 4 &port00 0 0 0 3>;
+		};
+
+		port01: pci@1,0 {
+			device_type = "pci";
+			reg = <0x800 0x0 0x0 0x0 0x0>;
+			reset-gpios = <&pinctrl_ap 5 GPIO_ACTIVE_LOW>;
+
+			#address-cells = <3>;
+			#size-cells = <2>;
+			ranges;
+
+			interrupt-controller;
+			#interrupt-cells = <1>;
+
+			interrupt-map-mask = <0 0 0 7>;
+			interrupt-map = <0 0 0 1 &port01 0 0 0 0>,
+					<0 0 0 2 &port01 0 0 0 1>,
+					<0 0 0 3 &port01 0 0 0 2>,
+					<0 0 0 4 &port01 0 0 0 3>;
+			status = "disabled";
+		};
+
+		port02: pci@2,0 {
+			device_type = "pci";
+			reg = <0x1000 0x0 0x0 0x0 0x0>;
+			reset-gpios = <&pinctrl_ap 6 GPIO_ACTIVE_LOW>;
+
+			#address-cells = <3>;
+			#size-cells = <2>;
+			ranges;
+
+			interrupt-controller;
+			#interrupt-cells = <1>;
+
+			interrupt-map-mask = <0 0 0 7>;
+			interrupt-map = <0 0 0 1 &port02 0 0 0 0>,
+					<0 0 0 2 &port02 0 0 0 1>,
+					<0 0 0 3 &port02 0 0 0 2>,
+					<0 0 0 4 &port02 0 0 0 3>;
+			status = "disabled";
+		};
+
+		port03: pci@3,0 {
+			device_type = "pci";
+			reg = <0x1800 0x0 0x0 0x0 0x0>;
+			reset-gpios = <&pinctrl_ap 7 GPIO_ACTIVE_LOW>;
+
+			#address-cells = <3>;
+			#size-cells = <2>;
+			ranges;
+
+			interrupt-controller;
+			#interrupt-cells = <1>;
+
+			interrupt-map-mask = <0 0 0 7>;
+			interrupt-map = <0 0 0 1 &port03 0 0 0 0>,
+					<0 0 0 2 &port03 0 0 0 1>,
+					<0 0 0 3 &port03 0 0 0 2>,
+					<0 0 0 4 &port03 0 0 0 3>;
+			status = "disabled";
+		};
+	};
+
+	pcie0_dart_0: iommu@594000000 {
+		compatible = "apple,t6020-dart", "apple,t8110-dart";
+		reg = <0x5 0x94000000 0x0 0x4000>;
+		#iommu-cells = <1>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 1341 IRQ_TYPE_LEVEL_HIGH>;
+		power-domains = <&ps_apcie_gp_sys>;
+	};
+
+	pcie0_dart_1: iommu@595000000 {
+		compatible = "apple,t6020-dart", "apple,t8110-dart";
+		reg = <0x5 0x95000000 0x0 0x4000>;
+		#iommu-cells = <1>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 1345 IRQ_TYPE_LEVEL_HIGH>;
+		power-domains = <&ps_apcie_gp_sys>;
+		status = "disabled";
+	};
+
+	pcie0_dart_2: iommu@596000000 {
+		compatible = "apple,t6020-dart", "apple,t8110-dart";
+		reg = <0x5 0x96000000 0x0 0x4000>;
+		#iommu-cells = <1>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 1349 IRQ_TYPE_LEVEL_HIGH>;
+		power-domains = <&ps_apcie_gp_sys>;
+		status = "disabled";
+	};
+
+	pcie0_dart_3: iommu@597000000 {
+		compatible = "apple,t6020-dart", "apple,t8110-dart";
+		reg = <0x5 0x97000000 0x0 0x4000>;
+		#iommu-cells = <1>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ 0 1353 IRQ_TYPE_LEVEL_HIGH>;
+		power-domains = <&ps_apcie_gp_sys>;
+		status = "disabled";
+	};
diff --git a/arch/arm64/boot/dts/apple/t602x-dieX.dtsi b/arch/arm64/boot/dts/apple/t602x-dieX.dtsi
new file mode 100644
index 00000000000000..10c29e8e417aaf
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t602x-dieX.dtsi
@@ -0,0 +1,673 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Nodes present on both dies of a hypothetical T6022 (M2 Ultra)
+ * and present on M2 Pro/Max.
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+
+	DIE_NODE(cpufreq_e): cpufreq@210e20000 {
+		compatible = "apple,t6020-cluster-cpufreq", "apple,t8112-cluster-cpufreq", "apple,cluster-cpufreq";
+		reg = <0x2 0x10e20000 0 0x1000>;
+		#performance-domain-cells = <0>;
+	};
+
+	DIE_NODE(cpufreq_p0): cpufreq@211e20000 {
+		compatible = "apple,t6020-cluster-cpufreq", "apple,t8112-cluster-cpufreq", "apple,cluster-cpufreq";
+		reg = <0x2 0x11e20000 0 0x1000>;
+		#performance-domain-cells = <0>;
+	};
+
+	DIE_NODE(cpufreq_p1): cpufreq@212e20000 {
+		compatible = "apple,t6020-cluster-cpufreq", "apple,t8112-cluster-cpufreq", "apple,cluster-cpufreq";
+		reg = <0x2 0x12e20000 0 0x1000>;
+		#performance-domain-cells = <0>;
+	};
+
+	DIE_NODE(dispext0_dart): iommu@289304000 {
+		compatible = "apple,t6020-dart", "apple,t8110-dart";
+		reg = <0x2 0x89304000 0x0 0x4000>;
+		#iommu-cells = <1>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ DIE_NO 950 IRQ_TYPE_LEVEL_HIGH>;
+		power-domains = <&DIE_NODE(ps_dispext0_cpu0)>;
+		apple,dma-range = <0x100 0x0 0x10 0x0>;
+		status = "disabled";
+	};
+
+	DIE_NODE(dcpext0_dart): iommu@28930c000 {
+		compatible = "apple,t6020-dart", "apple,t8110-dart";
+		reg = <0x2 0x8930c000 0x0 0x4000>;
+		#iommu-cells = <1>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ DIE_NO 950 IRQ_TYPE_LEVEL_HIGH>;
+		power-domains = <&DIE_NODE(ps_dispext0_cpu0)>;
+		apple,dma-range = <0x100 0x0 0x10 0x0>;
+		status = "disabled";
+	};
+
+	DIE_NODE(dcpext0_mbox): mbox@289c08000 {
+		compatible = "apple,t6020-asc-mailbox", "apple,asc-mailbox-v4";
+		reg = <0x2 0x89c08000 0x0 0x4000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ DIE_NO 971 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ DIE_NO 972 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ DIE_NO 973 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ DIE_NO 974 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "send-empty", "send-not-empty",
+			"recv-empty", "recv-not-empty";
+		#mbox-cells = <0>;
+		power-domains = <&DIE_NODE(ps_dispext0_cpu0)>;
+		resets = <&DIE_NODE(ps_dispext0_cpu0)>;
+		status = "disabled";
+	};
+
+	DIE_NODE(dcpext0):  dcp@289c00000 {
+		compatible = "apple,t6020-dcpext", "apple,dcpext";
+		mboxes = <&DIE_NODE(dcpext0_mbox)>;
+		mbox-names = "mbox";
+		iommus = <&DIE_NODE(dcpext0_dart) 5>;
+
+		reg-names = "coproc", "disp-0", "disp-1", "disp-2", "disp-3";
+		reg = <0x2 0x89c00000 0x0 0x4000>,
+			<0x2 0x88000000 0x0 0x4000000>,
+			<0x2 0x89320000 0x0 0x4000>,
+			<0x2 0x89344000 0x0 0x4000>,
+			<0x2 0x89800000 0x0 0x800000>;
+		apple,bw-scratch = <&pmgr_dcp 0 4 0x1210>;
+		power-domains = <&DIE_NODE(ps_dispext0_cpu0)>;
+		resets = <&DIE_NODE(ps_dispext0_cpu0)>;
+		clocks = <&DIE_NODE(clk_dispext0)>;
+		phandle = <&DIE_NODE(dcpext0)>;
+		apple,dcp-index = <1>;
+		status = "disabled";
+		// required bus properties for 'piodma' subdevice
+		#address-cells = <2>;
+		#size-cells = <2>;
+
+		piodma {
+			iommus = <&DIE_NODE(dispext0_dart) 4>;
+		};
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			port@0 {
+				reg = <0>;
+				DIE_NODE(dcpext0_audio): endpoint {
+					remote-endpoint = <&DIE_NODE(dpaudio1_dcp)>;
+				};
+			};
+		};
+	};
+
+	DIE_NODE(pmgr): power-management@28e080000 {
+		compatible = "apple,t6020-pmgr", "apple,pmgr", "syscon", "simple-mfd";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		reg = <0x2 0x8e080000 0 0x8000>;
+	};
+
+	DIE_NODE(pmgr_south): power-management@28e680000 {
+		compatible = "apple,t6020-pmgr", "apple,pmgr", "syscon", "simple-mfd";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		reg = <0x2 0x8e680000 0 0x8000>;
+	};
+
+	DIE_NODE(pmgr_east): power-management@290280000 {
+		compatible = "apple,t6020-pmgr", "apple,pmgr", "syscon", "simple-mfd";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		reg = <0x2 0x90280000 0 0xc000>;
+	};
+
+	DIE_NODE(pinctrl_nub): pinctrl@29e1f0000 {
+		compatible = "apple,t6000-pinctrl", "apple,pinctrl";
+		reg = <0x2 0x9e1f0000 0x0 0x4000>;
+		power-domains = <&DIE_NODE(ps_nub_gpio)>;
+
+		gpio-controller;
+		#gpio-cells = <2>;
+		gpio-ranges = <&DIE_NODE(pinctrl_nub) 0 0 30>;
+		apple,npins = <30>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ DIE_NO 711 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ DIE_NO 712 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ DIE_NO 713 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ DIE_NO 714 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ DIE_NO 715 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ DIE_NO 716 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ DIE_NO 717 IRQ_TYPE_LEVEL_HIGH>;
+	};
+
+	DIE_NODE(pmgr_mini): power-management@29e280000 {
+		compatible = "apple,t6000-pmgr", "apple,pmgr", "syscon", "simple-mfd";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		reg = <0x2 0x9e280000 0 0x4000>;
+	};
+
+	DIE_NODE(efuse): efuse@29e2cc000 {
+		compatible = "apple,t6020-efuses", "apple,efuses";
+		reg = <0x2 0x9e2cc000 0x0 0x2000>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+	};
+
+	DIE_NODE(pinctrl_aop): pinctrl@2a6820000 {
+		compatible = "apple,t6020-pinctrl", "apple,pinctrl";
+		reg = <0x2 0xa6820000 0x0 0x4000>;
+
+		gpio-controller;
+		#gpio-cells = <2>;
+		gpio-ranges = <&DIE_NODE(pinctrl_aop) 0 0 72>;
+		apple,npins = <72>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ DIE_NO 598 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ DIE_NO 599 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ DIE_NO 600 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ DIE_NO 601 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ DIE_NO 602 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ DIE_NO 603 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ DIE_NO 604 IRQ_TYPE_LEVEL_HIGH>;
+	};
+
+	DIE_NODE(dispext1_dart): iommu@315304000 {
+		compatible = "apple,t6020-dart", "apple,t8110-dart";
+		reg = <0x3 0x15304000 0x0 0x4000>;
+		#iommu-cells = <1>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ DIE_NO 986 IRQ_TYPE_LEVEL_HIGH>;
+		power-domains = <&DIE_NODE(ps_dispext1_cpu0)>;
+		apple,dma-range = <0x100 0x0 0x10 0x0>;
+		status = "disabled";
+	};
+
+	DIE_NODE(dcpext1_dart): iommu@31530c000 {
+		compatible = "apple,t6020-dart", "apple,t8110-dart";
+		reg = <0x3 0x1530c000 0x0 0x4000>;
+		#iommu-cells = <1>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ DIE_NO 986 IRQ_TYPE_LEVEL_HIGH>;
+		power-domains = <&DIE_NODE(ps_dispext1_cpu0)>;
+		apple,dma-range = <0x100 0x0 0x10 0x0>;
+		status = "disabled";
+	};
+
+	DIE_NODE(dcpext1_mbox): mbox@315c08000 {
+		compatible = "apple,t6020-asc-mailbox", "apple,asc-mailbox-v4";
+		reg = <0x3 0x15c08000 0x0 0x4000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ DIE_NO 1007 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ DIE_NO 1008 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ DIE_NO 1009 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ DIE_NO 1010 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "send-empty", "send-not-empty",
+			"recv-empty", "recv-not-empty";
+		#mbox-cells = <0>;
+		power-domains = <&DIE_NODE(ps_dispext1_cpu0)>;
+		resets = <&DIE_NODE(ps_dispext1_cpu0)>;
+		status = "disabled";
+	};
+
+	DIE_NODE(dcpext1):  dcp@315c00000 {
+		compatible = "apple,t6020-dcpext", "apple,dcpext";
+		mboxes = <&DIE_NODE(dcpext1_mbox)>;
+		mbox-names = "mbox";
+		iommus = <&DIE_NODE(dcpext1_dart) 5>;
+
+		reg-names = "coproc", "disp-0", "disp-1", "disp-2", "disp-3";
+		reg = <0x3 0x15c00000 0x0 0x4000>,
+			<0x3 0x14000000 0x0 0x4000000>,
+			<0x3 0x15320000 0x0 0x4000>,
+			<0x3 0x15344000 0x0 0x4000>,
+			<0x3 0x15800000 0x0 0x800000>;
+		apple,bw-scratch = <&pmgr_dcp 0 4 0x1218>;
+		power-domains = <&DIE_NODE(ps_dispext1_cpu0)>;
+		resets = <&DIE_NODE(ps_dispext1_cpu0)>;
+		clocks = <&DIE_NODE(clk_dispext1)>;
+		phandle = <&DIE_NODE(dcpext1)>;
+		apple,dcp-index = <2>;
+		status = "disabled";
+		// required bus properties for 'piodma' subdevice
+		#address-cells = <2>;
+		#size-cells = <2>;
+
+		piodma {
+			iommus = <&DIE_NODE(dispext1_dart) 4>;
+		};
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			port@0 {
+				reg = <0>;
+				DIE_NODE(dcpext1_audio): endpoint {
+					remote-endpoint = <&DIE_NODE(dpaudio2_dcp)>;
+				};
+			};
+		};
+	};
+
+	DIE_NODE(sio_dart): iommu@39b008000 {
+		compatible = "apple,t6020-dart", "apple,t8110-dart";
+		reg = <0x3 0x9b008000 0x0 0x4000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ DIE_NO 1231 IRQ_TYPE_LEVEL_HIGH>;
+		#iommu-cells = <1>;
+		power-domains = <&DIE_NODE(ps_sio)>;
+		//apple,dma-range = <0x100 0x0001c000 0x2ff 0xfffe4000>;
+	};
+
+	DIE_NODE(pinctrl_ap): pinctrl@39b028000 {
+		compatible = "apple,t6020-pinctrl", "apple,pinctrl";
+		reg = <0x3 0x9b028000 0x0 0x4000>;
+
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ DIE_NO 458 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ DIE_NO 459 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ DIE_NO 460 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ DIE_NO 461 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ DIE_NO 462 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ DIE_NO 463 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ DIE_NO 464 IRQ_TYPE_LEVEL_HIGH>;
+
+		clocks = <&clkref>;
+		power-domains = <&DIE_NODE(ps_gpio)>;
+
+		gpio-controller;
+		#gpio-cells = <2>;
+		gpio-ranges = <&DIE_NODE(pinctrl_ap) 0 0 255>;
+		apple,npins = <255>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	DIE_NODE(sio_mbox): mbox@39bc08000 {
+		compatible = "apple,t6020-asc-mailbox", "apple,asc-mailbox-v4";
+		reg = <0x3 0x9bc08000 0x0 0x4000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ DIE_NO 1248 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ DIE_NO 1249 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ DIE_NO 1250 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ DIE_NO 1251 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "send-empty", "send-not-empty",
+			"recv-empty", "recv-not-empty";
+		#mbox-cells = <0>;
+		power-domains = <&DIE_NODE(ps_sio_cpu)>;
+	};
+
+	DIE_NODE(sio): sio@39bc00000 {
+		compatible = "apple,t6020-sio", "apple,sio";
+		reg = <0x3 0x9bc00000 0x0 0x8000>;
+		dma-channels = <128>;
+		#dma-cells = <1>;
+		mboxes = <&DIE_NODE(sio_mbox)>;
+		iommus = <&DIE_NODE(sio_dart) 0>;
+		power-domains = <&DIE_NODE(ps_sio_cpu)>;
+		resets = <&DIE_NODE(ps_sio_cpu)>;
+		status = "disabled";
+	};
+
+	DIE_NODE(dpaudio1): audio-controller@39b504000 {
+		compatible = "apple,t6020-dpaudio", "apple,dpaudio";
+		reg = <0x3 0x9b540000 0x0 0x4000>;
+		dmas = <&DIE_NODE(sio) 0x66>;
+		dma-names = "tx";
+		power-domains = <&DIE_NODE(ps_dpa1)>;
+		reset-domains = <&DIE_NODE(ps_dpa1)>;
+		status = "disabled";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			port@0 {
+				reg = <0>;
+				DIE_NODE(dpaudio1_dcp): endpoint {
+					remote-endpoint = <&DIE_NODE(dcpext0_audio)>;
+				};
+			};
+		};
+	};
+
+	DIE_NODE(dpaudio2): audio-controller@39b508000 {
+		compatible = "apple,t6020-dpaudio", "apple,dpaudio";
+		reg = <0x3 0x9b580000 0x0 0x4000>;
+		dmas = <&DIE_NODE(sio) 0x68>;
+		dma-names = "tx";
+		power-domains = <&DIE_NODE(ps_dpa2)>;
+		reset-domains = <&DIE_NODE(ps_dpa2)>;
+		status = "disabled";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			port@0 {
+				reg = <0>;
+				DIE_NODE(dpaudio2_dcp): endpoint {
+					remote-endpoint = <&DIE_NODE(dcpext1_audio)>;
+				};
+			};
+		};
+	};
+
+	/*
+	 * omit dpaudio3 / 4 as long as the linked dcpext nodes don't exist
+	 *
+	DIE_NODE(dpaudio3): audio-controller@39b50c000 {
+		compatible = "apple,t6020-dpaudio", "apple,dpaudio";
+		reg = <0x3 0x9b5c0000 0x0 0x4000>;
+		dmas = <&DIE_NODE(sio) 0x6a>;
+		dma-names = "tx";
+		power-domains = <&DIE_NODE(ps_dpa3)>;
+		reset-domains = <&DIE_NODE(ps_dpa3)>;
+		status = "disabled";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			port@0 {
+				reg = <0>;
+				DIE_NODE(dpaudio3_dcp): endpoint {
+					remote-endpoint = <&DIE_NODE(dcpext2_audio)>;
+				};
+			};
+		};
+	};
+
+	DIE_NODE(dpaudio4): audio-controller@39b510000 {
+		compatible = "apple,t6020-dpaudio", "apple,dpaudio";
+		reg = <0x3 0x9b500000 0x0 0x4000>;
+		dmas = <&DIE_NODE(sio) 0x6c>;
+		dma-names = "tx";
+		power-domains = <&DIE_NODE(ps_dpa4)>;
+		reset-domains = <&DIE_NODE(ps_dpa4)>;
+		status = "disabled";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			port@0 {
+				reg = <0>;
+				DIE_NODE(dpaudio4_dcp): endpoint {
+					remote-endpoint = <&DIE_NODE(dcpext3_audio)>;
+				};
+			};
+		};
+	};
+	*/
+
+	DIE_NODE(lpdptxphy): phy@39c000000 {
+		compatible = "apple,t6020-dptx-phy", "apple,dptx-phy";
+		reg = <0x3 0x9c000000 0x0 0x4000>,
+			<0x3 0x9c040000 0x0 0xc000>;
+		reg-names = "core", "dptx";
+		power-domains = <&DIE_NODE(ps_dptx_phy_ps)>;
+		#phy-cells = <0>;
+		#reset-cells = <0>;
+		status = "disabled"; /* only exposed on desktop devices */
+	};
+
+	DIE_NODE(pmgr_gfx): power-management@404e80000 {
+		compatible = "apple,t6020-pmgr", "apple,pmgr", "syscon", "simple-mfd";
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		reg = <0x4 0x4e80000 0 0x4000>;
+	};
+
+	DIE_NODE(dwc3_0_dart_0): iommu@702f00000 {
+		compatible = "apple,t6020-dart", "apple,t8110-dart";
+		reg = <0x7 0x02f00000 0x0 0x4000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ DIE_NO 1260 IRQ_TYPE_LEVEL_HIGH>;
+		power-domains = <&DIE_NODE(ps_atc0_usb)>;
+		#iommu-cells = <1>;
+	};
+
+	DIE_NODE(dwc3_0_dart_1): iommu@702f80000 {
+		compatible = "apple,t6020-dart", "apple,t8110-dart";
+		reg = <0x7 0x02f80000 0x0 0x4000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ DIE_NO 1260 IRQ_TYPE_LEVEL_HIGH>;
+		power-domains = <&DIE_NODE(ps_atc0_usb)>;
+		#iommu-cells = <1>;
+	};
+
+	DIE_NODE(dwc3_0): usb@702280000 {
+		compatible = "apple,t6020-dwc3", "apple,dwc3", "snps,dwc3";
+		reg = <0x7 0x02280000 0x0 0x100000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ DIE_NO 1256 IRQ_TYPE_LEVEL_HIGH>;
+		dr_mode = "otg";
+		usb-role-switch;
+		role-switch-default-mode = "host";
+		iommus = <&DIE_NODE(dwc3_0_dart_0) 0>,
+			<&DIE_NODE(dwc3_0_dart_1) 1>;
+		power-domains = <&DIE_NODE(ps_atc0_usb)>;
+		dma-coherent;
+		resets = <&DIE_NODE(atcphy0)>;
+		phys = <&DIE_NODE(atcphy0) PHY_TYPE_USB2>, <&DIE_NODE(atcphy0) PHY_TYPE_USB3>;
+		phy-names = "usb2-phy", "usb3-phy";
+	};
+
+	DIE_NODE(atcphy0): phy@703000000 {
+		compatible = "apple,t6020-atcphy", "apple,t8103-atcphy";
+		reg = <0x7 0x03000000 0x0 0x4c000>,
+			<0x7 0x03050000 0x0 0x8000>,
+			<0x7 0x00000000 0x0 0x4000>,
+			<0x7 0x02a90000 0x0 0x4000>,
+			<0x7 0x02a84000 0x0 0x4000>;
+		reg-names = "core", "lpdptx", "axi2af", "usb2phy",
+			"pipehandler";
+
+		#phy-cells = <1>;
+		#reset-cells = <0>;
+
+		orientation-switch;
+		mode-switch;
+		svid = <0xff01>, <0x8087>;
+		power-domains = <&DIE_NODE(ps_atc0_usb)>;
+	};
+
+	DIE_NODE(atcphy0_xbar): mux@70304c000 {
+		compatible = "apple,t6020-display-crossbar";
+		reg = <0x7 0x0304c000 0x0 0x4000>;
+		#mux-control-cells = <1>;
+		power-domains = <&DIE_NODE(ps_atc0_usb)>;
+		status = "disabled";
+	};
+
+	DIE_NODE(dwc3_1_dart_0): iommu@b02f00000 {
+		compatible = "apple,t6020-dart", "apple,t8110-dart";
+		reg = <0xb 0x02f00000 0x0 0x4000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ DIE_NO 1278 IRQ_TYPE_LEVEL_HIGH>;
+		power-domains = <&DIE_NODE(ps_atc1_usb)>;
+		#iommu-cells = <1>;
+	};
+
+	DIE_NODE(dwc3_1_dart_1): iommu@b02f80000 {
+		compatible = "apple,t6020-dart", "apple,t8110-dart";
+		reg = <0xb 0x02f80000 0x0 0x4000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ DIE_NO 1278 IRQ_TYPE_LEVEL_HIGH>;
+		power-domains = <&DIE_NODE(ps_atc1_usb)>;
+		#iommu-cells = <1>;
+	};
+
+	DIE_NODE(dwc3_1): usb@b02280000 {
+		compatible = "apple,t6020-dwc3", "apple,dwc3", "snps,dwc3";
+		reg = <0xb 0x02280000 0x0 0x100000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ DIE_NO 1274 IRQ_TYPE_LEVEL_HIGH>;
+		dr_mode = "otg";
+		usb-role-switch;
+		role-switch-default-mode = "host";
+		iommus = <&DIE_NODE(dwc3_1_dart_0) 0>,
+			<&DIE_NODE(dwc3_1_dart_1) 1>;
+		power-domains = <&DIE_NODE(ps_atc1_usb)>;
+		dma-coherent;
+		resets = <&DIE_NODE(atcphy1)>;
+		phys = <&DIE_NODE(atcphy1) PHY_TYPE_USB2>, <&DIE_NODE(atcphy1) PHY_TYPE_USB3>;
+		phy-names = "usb2-phy", "usb3-phy";
+	};
+
+	DIE_NODE(atcphy1): phy@b03000000 {
+		compatible = "apple,t6020-atcphy", "apple,t8103-atcphy";
+		reg = <0xb 0x03000000 0x0 0x4c000>,
+			<0xb 0x03050000 0x0 0x8000>,
+			<0xb 0x00000000 0x0 0x4000>,
+			<0xb 0x02a90000 0x0 0x4000>,
+			<0xb 0x02a84000 0x0 0x4000>;
+		reg-names = "core", "lpdptx", "axi2af", "usb2phy",
+			"pipehandler";
+
+		#phy-cells = <1>;
+		#reset-cells = <0>;
+
+		orientation-switch;
+		mode-switch;
+		svid = <0xff01>, <0x8087>;
+		power-domains = <&DIE_NODE(ps_atc1_usb)>;
+	};
+
+	DIE_NODE(atcphy1_xbar): mux@b0304c000 {
+		compatible = "apple,t6020-display-crossbar";
+		reg = <0xb 0x0304c000 0x0 0x4000>;
+		#mux-control-cells = <1>;
+		power-domains = <&DIE_NODE(ps_atc1_usb)>;
+		status = "disabled";
+	};
+
+	DIE_NODE(dwc3_2_dart_0): iommu@f02f00000 {
+		compatible = "apple,t6020-dart", "apple,t8110-dart";
+		reg = <0xf 0x02f00000 0x0 0x4000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ DIE_NO 1296 IRQ_TYPE_LEVEL_HIGH>;
+		power-domains = <&DIE_NODE(ps_atc2_usb)>;
+		#iommu-cells = <1>;
+	};
+
+	DIE_NODE(dwc3_2_dart_1): iommu@f02f80000 {
+		compatible = "apple,t6020-dart", "apple,t8110-dart";
+		reg = <0xf 0x02f80000 0x0 0x4000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ DIE_NO 1296 IRQ_TYPE_LEVEL_HIGH>;
+		power-domains = <&DIE_NODE(ps_atc2_usb)>;
+		#iommu-cells = <1>;
+	};
+
+	DIE_NODE(dwc3_2): usb@f02280000 {
+		compatible = "apple,t6020-dwc3", "apple,dwc3", "snps,dwc3";
+		reg = <0xf 0x02280000 0x0 0x100000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ DIE_NO 1292 IRQ_TYPE_LEVEL_HIGH>;
+		dr_mode = "otg";
+		usb-role-switch;
+		role-switch-default-mode = "host";
+		iommus = <&DIE_NODE(dwc3_2_dart_0) 0>,
+			<&DIE_NODE(dwc3_2_dart_1) 1>;
+		power-domains = <&DIE_NODE(ps_atc2_usb)>;
+		dma-coherent;
+		resets = <&DIE_NODE(atcphy2)>;
+		phys = <&DIE_NODE(atcphy2) PHY_TYPE_USB2>, <&DIE_NODE(atcphy2) PHY_TYPE_USB3>;
+		phy-names = "usb2-phy", "usb3-phy";
+	};
+
+	DIE_NODE(atcphy2): phy@f03000000 {
+		compatible = "apple,t6020-atcphy", "apple,t8103-atcphy";
+		reg = <0xf 0x03000000 0x0 0x4c000>,
+			<0xf 0x03050000 0x0 0x8000>,
+			<0xf 0x00000000 0x0 0x4000>,
+			<0xf 0x02a90000 0x0 0x4000>,
+			<0xf 0x02a84000 0x0 0x4000>;
+		reg-names = "core", "lpdptx", "axi2af", "usb2phy",
+			"pipehandler";
+
+		#phy-cells = <1>;
+		#reset-cells = <0>;
+
+		orientation-switch;
+		mode-switch;
+		svid = <0xff01>, <0x8087>;
+		power-domains = <&DIE_NODE(ps_atc2_usb)>;
+	};
+
+	DIE_NODE(atcphy2_xbar): mux@f0304c000 {
+		compatible = "apple,t6020-display-crossbar";
+		reg = <0xf 0x0304c000 0x0 0x4000>;
+		#mux-control-cells = <1>;
+		power-domains = <&DIE_NODE(ps_atc2_usb)>;
+		status = "disabled";
+	};
+
+	DIE_NODE(dwc3_3_dart_0): iommu@1302f00000 {
+		compatible = "apple,t6020-dart", "apple,t8110-dart";
+		reg = <0x13 0x02f00000 0x0 0x4000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ DIE_NO 1314 IRQ_TYPE_LEVEL_HIGH>;
+		power-domains = <&DIE_NODE(ps_atc3_usb)>;
+		#iommu-cells = <1>;
+	};
+
+	DIE_NODE(dwc3_3_dart_1): iommu@1302f80000 {
+		compatible = "apple,t6020-dart", "apple,t8110-dart";
+		reg = <0x13 0x02f80000 0x0 0x4000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ DIE_NO 1314 IRQ_TYPE_LEVEL_HIGH>;
+		power-domains = <&DIE_NODE(ps_atc3_usb)>;
+		#iommu-cells = <1>;
+	};
+
+	DIE_NODE(dwc3_3): usb@1302280000 {
+		compatible = "apple,t6020-dwc3", "apple,dwc3", "snps,dwc3";
+		reg = <0x13 0x02280000 0x0 0x100000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ DIE_NO 1310 IRQ_TYPE_LEVEL_HIGH>;
+		dr_mode = "otg";
+		usb-role-switch;
+		role-switch-default-mode = "host";
+		iommus = <&DIE_NODE(dwc3_3_dart_0) 0>,
+			<&DIE_NODE(dwc3_3_dart_1) 1>;
+		power-domains = <&DIE_NODE(ps_atc3_usb)>;
+		dma-coherent;
+		resets = <&DIE_NODE(atcphy3)>;
+		phys = <&DIE_NODE(atcphy3) PHY_TYPE_USB2>, <&DIE_NODE(atcphy3) PHY_TYPE_USB3>;
+		phy-names = "usb2-phy", "usb3-phy";
+	};
+
+	DIE_NODE(atcphy3): phy@1303000000 {
+		compatible = "apple,t6020-atcphy", "apple,t8103-atcphy";
+		reg = <0x13 0x03000000 0x0 0x4c000>,
+			<0x13 0x03050000 0x0 0x8000>,
+			<0x13 0x00000000 0x0 0x4000>,
+			<0x13 0x02a90000 0x0 0x4000>,
+			<0x13 0x02a84000 0x0 0x4000>;
+		reg-names = "core", "lpdptx", "axi2af", "usb2phy",
+			"pipehandler";
+
+		#phy-cells = <1>;
+		#reset-cells = <0>;
+
+		orientation-switch;
+		mode-switch;
+		svid = <0xff01>, <0x8087>;
+		power-domains = <&DIE_NODE(ps_atc3_usb)>;
+	};
+
+	DIE_NODE(atcphy3_xbar): mux@130304c000 {
+		compatible = "apple,t6020-display-crossbar";
+		reg = <0x13 0x0304c000 0x0 0x4000>;
+		#mux-control-cells = <1>;
+		power-domains = <&DIE_NODE(ps_atc3_usb)>;
+		status = "disabled";
+	};
diff --git a/arch/arm64/boot/dts/apple/t602x-gpio-pins.dtsi b/arch/arm64/boot/dts/apple/t602x-gpio-pins.dtsi
new file mode 100644
index 00000000000000..9b24832ba26abe
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t602x-gpio-pins.dtsi
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * GPIO pin mappings for Apple T600x SoCs.
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+
+&pinctrl_ap {
+	i2c0_pins: i2c0-pins {
+		pinmux = <APPLE_PINMUX(63, 1)>,
+			<APPLE_PINMUX(64, 1)>;
+	};
+
+	i2c1_pins: i2c1-pins {
+		pinmux = <APPLE_PINMUX(65, 1)>,
+			<APPLE_PINMUX(66, 1)>;
+	};
+
+	i2c2_pins: i2c2-pins {
+		pinmux = <APPLE_PINMUX(67, 1)>,
+			<APPLE_PINMUX(68, 1)>;
+	};
+
+	i2c3_pins: i2c3-pins {
+		pinmux = <APPLE_PINMUX(69, 1)>,
+			<APPLE_PINMUX(70, 1)>;
+	};
+
+	i2c4_pins: i2c4-pins {
+		pinmux = <APPLE_PINMUX(71, 1)>,
+			<APPLE_PINMUX(72, 1)>;
+	};
+
+	i2c5_pins: i2c5-pins {
+		pinmux = <APPLE_PINMUX(73, 1)>,
+			<APPLE_PINMUX(74, 1)>;
+	};
+
+	i2c6_pins: i2c6-pins {
+		pinmux = <APPLE_PINMUX(75, 1)>,
+			<APPLE_PINMUX(76, 1)>;
+	};
+
+	i2c7_pins: i2c7-pins {
+		pinmux = <APPLE_PINMUX(77, 1)>,
+			<APPLE_PINMUX(78, 1)>;
+	};
+
+	i2c8_pins: i2c8-pins {
+		pinmux = <APPLE_PINMUX(79, 1)>,
+			<APPLE_PINMUX(80, 1)>;
+	};
+
+	spi1_pins: spi1-pins {
+		pinmux = <APPLE_PINMUX(155, 1)>, /* SDI */
+			<APPLE_PINMUX(156, 1)>,  /* SDO */
+			<APPLE_PINMUX(157, 1)>,  /* SCK */
+			<APPLE_PINMUX(158, 1)>;  /* CS */
+	};
+
+	spi2_pins: spi2-pins {
+		pinmux = <APPLE_PINMUX(159, 1)>, /* SDI */
+			<APPLE_PINMUX(160, 1)>,  /* SDO */
+			<APPLE_PINMUX(161, 1)>,  /* SCK */
+			<APPLE_PINMUX(162, 1)>;  /* CS */
+	};
+
+	spi4_pins: spi4-pins {
+		pinmux = <APPLE_PINMUX(167, 1)>, /* SDI */
+			<APPLE_PINMUX(168, 1)>,  /* SDO */
+			<APPLE_PINMUX(169, 1)>,  /* SCK */
+			<APPLE_PINMUX(170, 1)>;  /* CS */
+	};
+
+	pcie_pins: pcie-pins {
+		pinmux = <APPLE_PINMUX(0, 1)>,
+				<APPLE_PINMUX(1, 1)>,
+				<APPLE_PINMUX(2, 1)>,
+				<APPLE_PINMUX(3, 1)>;
+	};
+
+	pcie_ge_pins: pcie-ge-pins {
+		pinmux = <APPLE_PINMUX(8, 1)>;
+	};
+};
diff --git a/arch/arm64/boot/dts/apple/t602x-j414-j416.dtsi b/arch/arm64/boot/dts/apple/t602x-j414-j416.dtsi
new file mode 100644
index 00000000000000..6e8df7750d2a43
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t602x-j414-j416.dtsi
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * MacBook Pro (14/16-inch, 2022)
+ *
+ * This file contains the parts common to J414 and J416 devices with both t6020 and t6021.
+ *
+ * target-type: J414s / J414c / J416s / J416c
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+
+/*
+ * These models are essentially identical to the previous generation, other than
+ * the GPIO indices.
+ */
+
+#define NO_SPI_TRACKPAD
+#include "t600x-j314-j316.dtsi"
+
+/ {
+	aliases {
+		keyboard = &keyboard;
+	};
+};
+
+/* HACK: keep dptx_phy_ps power-domain always-on
+ *       it is unclear how to sequence with dcp for the integrated display
+ */
+&ps_dptx_phy_ps {
+	apple,always-on;
+};
+
+&dcpext0 {
+	/*  HDMI HPD gpio, used as interrupt*/
+	hdmi-hpd-gpios = <&pinctrl_aop 25 GPIO_ACTIVE_HIGH>;
+};
+
+&hpm0 {
+	interrupts = <44 IRQ_TYPE_LEVEL_LOW>;
+};
+
+&hpm1 {
+	interrupts = <44 IRQ_TYPE_LEVEL_LOW>;
+};
+
+&hpm2 {
+	interrupts = <44 IRQ_TYPE_LEVEL_LOW>;
+};
+
+&hpm5 {
+	interrupts = <44 IRQ_TYPE_LEVEL_LOW>;
+};
+
+/* Redefine GPIO for SDZ */
+&speaker_sdz {
+	gpios = <&pinctrl_ap 57 GPIO_ACTIVE_HIGH>;
+};
+
+&speaker_left_tweet {
+	interrupts-extended = <&pinctrl_ap 58 IRQ_TYPE_LEVEL_LOW>;
+};
+
+&speaker_left_woof1 {
+	interrupts-extended = <&pinctrl_ap 58 IRQ_TYPE_LEVEL_LOW>;
+};
+
+&speaker_left_woof2 {
+	interrupts-extended = <&pinctrl_ap 58 IRQ_TYPE_LEVEL_LOW>;
+};
+
+&speaker_right_tweet {
+	interrupts-extended = <&pinctrl_ap 58 IRQ_TYPE_LEVEL_LOW>;
+};
+
+&speaker_right_woof1 {
+	interrupts-extended = <&pinctrl_ap 58 IRQ_TYPE_LEVEL_LOW>;
+};
+
+&speaker_right_woof2 {
+	interrupts-extended = <&pinctrl_ap 58 IRQ_TYPE_LEVEL_LOW>;
+};
+
+&jack_codec {
+	reset-gpios = <&pinctrl_nub 8 GPIO_ACTIVE_HIGH>;
+	interrupts-extended = <&pinctrl_ap 59 IRQ_TYPE_LEVEL_LOW>;
+};
+
+&wifi0 {
+	compatible = "pci14e4,4434";
+};
+
+&bluetooth0 {
+	compatible = "pci14e4,5f72";
+};
+
+&port01 {
+	pwren-gpios = <&smc_gpio 22 GPIO_ACTIVE_HIGH>;
+};
+
+&ps_mtp_fabric {
+	status = "okay";
+};
+
+&mtp {
+	status = "okay";
+};
+
+&mtp_mbox {
+	status = "okay";
+};
+
+&mtp_dart {
+	status = "okay";
+};
+
+&mtp_dockchannel {
+	status = "okay";
+};
+
+&mtp_hid {
+	apple,afe-reset-gpios = <&smc_gpio 25 GPIO_ACTIVE_LOW>;
+	apple,stm-reset-gpios = <&smc_gpio 26 GPIO_ACTIVE_LOW>;
+
+	mtp_mt: multi-touch {
+	};
+
+	keyboard: keyboard {
+		hid-country-code = <0>;
+		apple,keyboard-layout-id = <0>;
+	};
+
+	stm {
+	};
+
+	actuator {
+	};
+
+	tp_accel {
+	};
+};
+
+&isp {
+	apple,platform-id = <7>;
+	/delete-node/ sensor-presets; /* Override j31[46] below */
+};
+
+#include "isp-imx558-cfg0.dtsi"
diff --git a/arch/arm64/boot/dts/apple/t602x-j474-j475.dtsi b/arch/arm64/boot/dts/apple/t602x-j474-j475.dtsi
new file mode 100644
index 00000000000000..c0c6eff3159839
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t602x-j474-j475.dtsi
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Mac mini (M2 Pro, 2023) and Mac Studio (2023)
+ *
+ * This file contains the parts common to J474 and J475 devices with t6020,
+ * t6021 and t6022.
+ *
+ * target-type: J474s / J375c / J375d
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+
+/*
+ * These model is very similar to the previous generation Mac Studio, other than
+ * the GPIO indices.
+ */
+
+#include "t600x-j375.dtsi"
+
+&framebuffer0 {
+	power-domains = <&ps_disp0_cpu0>, <&ps_dptx_phy_ps>;
+};
+
+&hpm0 {
+	interrupts = <44 IRQ_TYPE_LEVEL_LOW>;
+};
+
+&hpm1 {
+	interrupts = <44 IRQ_TYPE_LEVEL_LOW>;
+};
+
+&hpm2 {
+	interrupts = <44 IRQ_TYPE_LEVEL_LOW>;
+};
+
+&hpm3  {
+	interrupts = <44 IRQ_TYPE_LEVEL_LOW>;
+};
+
+/* PCIe devices */
+&port00 {
+	pwren-gpios = <&smc_gpio 13 GPIO_ACTIVE_HIGH>;
+};
+
+#ifndef NO_PCIE_SDHC
+&port01 {
+	pwren-gpios = <&smc_gpio 22 GPIO_ACTIVE_HIGH>;
+	status = "okay";
+};
+
+&pcie0_dart_1 {
+	status = "okay";
+};
+#endif
+
+&port03 {
+	/* USB xHCI */
+	pwren-gpios = <&smc_gpio 19 GPIO_ACTIVE_HIGH>;
+};
+
+&speaker {
+	shutdown-gpios = <&pinctrl_ap 57 GPIO_ACTIVE_HIGH>;
+	interrupts-extended = <&pinctrl_ap 58 IRQ_TYPE_LEVEL_LOW>;
+};
+
+&jack_codec {
+	reset-gpios = <&pinctrl_nub 8 GPIO_ACTIVE_HIGH>;
+	interrupts-extended = <&pinctrl_ap 59 IRQ_TYPE_LEVEL_LOW>;
+};
diff --git a/arch/arm64/boot/dts/apple/t602x-nvme.dtsi b/arch/arm64/boot/dts/apple/t602x-nvme.dtsi
new file mode 100644
index 00000000000000..756a971bde48ae
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t602x-nvme.dtsi
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * NVMe related devices for Apple T602x SoCs.
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+
+	DIE_NODE(ans_mbox): mbox@347408000 {
+		compatible = "apple,t6020-asc-mailbox", "apple,asc-mailbox-v4";
+		reg = <0x3 0x47408000 0x0 0x4000>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ DIE_NO 1169 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ DIE_NO 1170 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ DIE_NO 1171 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ DIE_NO 1172 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "send-empty", "send-not-empty",
+			"recv-empty", "recv-not-empty";
+		power-domains = <&DIE_NODE(ps_ans2)>;
+		#mbox-cells = <0>;
+	};
+
+	DIE_NODE(sart): sart@34bc50000 {
+		compatible = "apple,t6020-sart", "apple,t6000-sart";
+		reg = <0x3 0x4bc50000 0x0 0x10000>;
+		power-domains = <&DIE_NODE(ps_ans2)>;
+	};
+
+	DIE_NODE(nvme): nvme@34bcc0000 {
+		compatible = "apple,t6020-nvme-ans2", "apple,nvme-ans2";
+		reg = <0x3 0x4bcc0000 0x0 0x40000>, <0x3 0x47400000 0x0 0x4000>;
+		reg-names = "nvme", "ans";
+		interrupt-parent = <&aic>;
+		/* The NVME interrupt is always routed to die 0 */
+		interrupts = <AIC_IRQ 0 1832 IRQ_TYPE_LEVEL_HIGH>;
+		mboxes = <&DIE_NODE(ans_mbox)>;
+		apple,sart = <&DIE_NODE(sart)>;
+		power-domains = <&DIE_NODE(ps_ans2)>,
+			<&DIE_NODE(ps_apcie_st_sys)>,
+			<&DIE_NODE(ps_apcie_st1_sys)>;
+		power-domain-names = "ans", "apcie0", "apcie1";
+		resets = <&DIE_NODE(ps_ans2)>;
+	};
diff --git a/arch/arm64/boot/dts/apple/t602x-pcie-ge.dtsi b/arch/arm64/boot/dts/apple/t602x-pcie-ge.dtsi
new file mode 100644
index 00000000000000..2fc0afaf54c741
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t602x-pcie-ge.dtsi
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * PCIe-GE Nodes present on both dies of a T6022 (M2 Ultra) and M2 Pro/Max but
+ * only used on T6022 in the Mac Pro (2023).
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+
+	DIE_NODE(pcie_ge): pcie@1680000000 {
+		compatible = "apple,t6020-pcie-ge", "apple,t6020-pcie";
+		device_type = "pci";
+
+		reg = <0x16 0x80000000 0x0 0x1000000>,	/* config */
+			<0x16 0x91000000 0x0 0x4000>,	/* rc */
+			<0x16 0x94008000 0x0 0x4000>,	/* port0 */
+			<0x16 0x9e01c000 0x0 0x4000>,	/* phy0 */
+			<0x16 0x9401c000 0x0 0x1000>;	/* ltssm0 */
+		reg-names = "config", "rc", "port0", "phy0", "ltssm0";
+
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ DIE_NO 1356 IRQ_TYPE_LEVEL_HIGH>;
+
+		msi-controller;
+		msi-parent = <&DIE_NODE(pcie_ge)>;
+		msi-ranges = <&aic AIC_IRQ DIE_NO 1672 IRQ_TYPE_EDGE_RISING 128>;
+
+		iommu-map = <0x000 &DIE_NODE(pcie_ge_dart) 0 0>,
+			    <0x100 &DIE_NODE(pcie_ge_dart) 1 1>,
+			    <0x200 &DIE_NODE(pcie_ge_dart) 2 2>,
+			    <0x300 &DIE_NODE(pcie_ge_dart) 3 3>,
+			    <0x400 &DIE_NODE(pcie_ge_dart) 4 4>,
+			    <0x500 &DIE_NODE(pcie_ge_dart) 5 5>,
+			    <0x600 &DIE_NODE(pcie_ge_dart) 6 6>,
+			    <0x700 &DIE_NODE(pcie_ge_dart) 7 7>,
+			    <0x800 &DIE_NODE(pcie_ge_dart) 8 8>,
+			    <0x900 &DIE_NODE(pcie_ge_dart) 9 9>,
+			    <0xa00 &DIE_NODE(pcie_ge_dart) 10 10>,
+			    <0xb00 &DIE_NODE(pcie_ge_dart) 11 11>,
+			    <0xc00 &DIE_NODE(pcie_ge_dart) 12 12>,
+			    <0xd00 &DIE_NODE(pcie_ge_dart) 13 13>,
+			    <0xe00 &DIE_NODE(pcie_ge_dart) 14 14>;
+		iommu-map-mask = <0xff00>;
+
+		bus-range = <0 15>;
+		#address-cells = <3>;
+		#size-cells = <2>;
+		ranges = <0x43000000 0x18 0x00000000 0x18 0x00000000 0x4 0x00000000>,
+				<0x02000000 0x0 0x80000000 0x17 0x80000000 0x0 0x80000000>;
+
+		power-domains = <&DIE_NODE(ps_apcie_ge_sys)>;
+		pinctrl-0 = <&DIE_NODE(pcie_ge_pins)>;
+		pinctrl-names = "default";
+
+		dma-coherent;
+
+		status = "disabled";
+
+		DIE_NODE(port_ge00): pci@0,0 {
+			device_type = "pci";
+			reg = <0x0 0x0 0x0 0x0 0x0>;
+			reset-gpios = <&DIE_NODE(pinctrl_ap) 9 GPIO_ACTIVE_LOW>;
+
+			#address-cells = <3>;
+			#size-cells = <2>;
+			ranges;
+
+			interrupt-controller;
+			#interrupt-cells = <1>;
+
+			interrupt-map-mask = <0 0 0 7>;
+			interrupt-map = <0 0 0 1 &DIE_NODE(port_ge00) 0 0 0 0>,
+					<0 0 0 2 &DIE_NODE(port_ge00) 0 0 0 1>,
+					<0 0 0 3 &DIE_NODE(port_ge00) 0 0 0 2>,
+					<0 0 0 4 &DIE_NODE(port_ge00) 0 0 0 3>;
+		};
+	};
+
+	DIE_NODE(pcie_ge_dart): iommu@1694000000 {
+		compatible = "apple,t6020-dart", "apple,t8110-dart";
+		reg = <0x16 0x94000000 0x0 0x4000>;
+		#iommu-cells = <1>;
+		interrupt-parent = <&aic>;
+		interrupts = <AIC_IRQ DIE_NO 1357 IRQ_TYPE_LEVEL_HIGH>;
+		power-domains = <&DIE_NODE(ps_apcie_ge_sys)>;
+		status = "disabled";
+	};
diff --git a/arch/arm64/boot/dts/apple/t602x-pmgr.dtsi b/arch/arm64/boot/dts/apple/t602x-pmgr.dtsi
new file mode 100644
index 00000000000000..d97287833f1bf3
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t602x-pmgr.dtsi
@@ -0,0 +1,2274 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * PMGR Power domains for the Apple T6001 "M1 Max" SoC
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+
+&DIE_NODE(pmgr) {
+	DIE_NODE(ps_afi): power-controller@100 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x100 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(afi);
+		apple,always-on; /* Apple Fabric, CPU interface is here */
+	};
+
+	DIE_NODE(ps_aic): power-controller@108 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x108 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(aic);
+		apple,always-on; /* Core device */
+	};
+
+	DIE_NODE(ps_dwi): power-controller@110 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x110 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dwi);
+	};
+
+	DIE_NODE(ps_pms): power-controller@118 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x118 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(pms);
+		apple,always-on; /* Core device */
+	};
+
+	DIE_NODE(ps_gpio): power-controller@120 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x120 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(gpio);
+		power-domains = <&DIE_NODE(ps_sio)>, <&DIE_NODE(ps_pms)>;
+	};
+
+	DIE_NODE(ps_soc_dpe): power-controller@128 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x128 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(soc_dpe);
+		apple,always-on; /* Core device */
+	};
+
+	DIE_NODE(ps_pms_c1ppt): power-controller@130 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x130 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(pms_c1ppt);
+		apple,always-on; /* Core device */
+	};
+
+	DIE_NODE(ps_pmgr_soc_ocla): power-controller@138 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x138 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(pmgr_soc_ocla);
+		power-domains = <&DIE_NODE(ps_sio)>;
+	};
+
+	DIE_NODE(ps_amcc0): power-controller@168 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x168 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(amcc0);
+		apple,always-on; /* Memory controller */
+	};
+
+	DIE_NODE(ps_amcc2): power-controller@170 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x170 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(amcc2);
+		apple,always-on; /* Memory controller */
+	};
+
+	DIE_NODE(ps_dcs_00): power-controller@178 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x178 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dcs_00);
+		apple,always-on; /* LPDDR5 interface */
+	};
+
+	DIE_NODE(ps_dcs_01): power-controller@180 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x180 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dcs_01);
+		apple,always-on; /* LPDDR5 interface */
+	};
+
+	DIE_NODE(ps_dcs_02): power-controller@188 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x188 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dcs_02);
+		apple,always-on; /* LPDDR5 interface */
+	};
+
+	DIE_NODE(ps_dcs_03): power-controller@190 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x190 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dcs_03);
+		apple,always-on; /* LPDDR5 interface */
+	};
+
+	DIE_NODE(ps_dcs_08): power-controller@198 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x198 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dcs_08);
+		apple,always-on; /* LPDDR5 interface */
+	};
+
+	DIE_NODE(ps_dcs_09): power-controller@1a0 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x1a0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dcs_09);
+		apple,always-on; /* LPDDR5 interface */
+	};
+
+	DIE_NODE(ps_dcs_10): power-controller@1a8 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x1a8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dcs_10);
+		apple,always-on; /* LPDDR5 interface */
+	};
+
+	DIE_NODE(ps_dcs_11): power-controller@1b0 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x1b0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dcs_11);
+		apple,always-on; /* LPDDR5 interface */
+	};
+
+	DIE_NODE(ps_afnc1_ioa): power-controller@1b8 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x1b8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(afnc1_ioa);
+		apple,always-on; /* Apple Fabric */
+		power-domains = <&DIE_NODE(ps_afi)>;
+	};
+
+	DIE_NODE(ps_afc): power-controller@1d0 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x1d0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(afc);
+		apple,always-on; /* Apple Fabric, CPU interface is here */
+	};
+
+	DIE_NODE(ps_afnc0_ioa): power-controller@1e8 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x1e8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(afnc0_ioa);
+		apple,always-on; /* Apple Fabric */
+		power-domains = <&DIE_NODE(ps_afi)>;
+	};
+
+	DIE_NODE(ps_afnc1_ls): power-controller@1f0 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x1f0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(afnc1_ls);
+		apple,always-on; /* Apple Fabric */
+		power-domains = <&DIE_NODE(ps_afnc1_ioa)>;
+	};
+
+	DIE_NODE(ps_afnc0_ls): power-controller@1f8 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x1f8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(afnc0_ls);
+		apple,always-on; /* Apple Fabric */
+		power-domains = <&DIE_NODE(ps_afnc0_ioa)>;
+	};
+
+	DIE_NODE(ps_afnc1_lw0): power-controller@200 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x200 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(afnc1_lw0);
+		apple,always-on; /* Apple Fabric */
+		power-domains = <&DIE_NODE(ps_afnc1_ls)>;
+	};
+
+	DIE_NODE(ps_afnc1_lw1): power-controller@208 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x208 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(afnc1_lw1);
+		apple,always-on; /* Apple Fabric */
+		power-domains = <&DIE_NODE(ps_afnc1_ls)>;
+	};
+
+	DIE_NODE(ps_afnc1_lw2): power-controller@210 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x210 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(afnc1_lw2);
+		apple,always-on; /* Apple Fabric */
+		power-domains = <&DIE_NODE(ps_afnc1_ls)>;
+	};
+
+	DIE_NODE(ps_afnc0_lw0): power-controller@218 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x218 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(afnc0_lw0);
+		apple,always-on; /* Apple Fabric */
+		power-domains = <&DIE_NODE(ps_afnc0_ls)>;
+	};
+
+	DIE_NODE(ps_scodec): power-controller@220 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x220 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(scodec);
+		power-domains = <&DIE_NODE(ps_afnc1_lw0)>;
+	};
+
+	DIE_NODE(ps_atc0_common): power-controller@228 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x228 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(atc0_common);
+		power-domains = <&DIE_NODE(ps_afnc1_lw1)>;
+	};
+
+	DIE_NODE(ps_atc1_common): power-controller@230 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x230 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(atc1_common);
+		power-domains = <&DIE_NODE(ps_afnc1_lw1)>;
+	};
+
+	DIE_NODE(ps_atc2_common): power-controller@238 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x238 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(atc2_common);
+		power-domains = <&DIE_NODE(ps_afnc1_lw1)>;
+	};
+
+	DIE_NODE(ps_atc3_common): power-controller@240 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x240 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(atc3_common);
+		power-domains = <&DIE_NODE(ps_afnc1_lw1)>;
+	};
+
+	DIE_NODE(ps_dispext1_sys): power-controller@248 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x248 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dispext1_sys);
+		power-domains = <&DIE_NODE(ps_afnc1_lw2)>;
+	};
+
+	DIE_NODE(ps_pms_bridge): power-controller@250 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x250 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(pms_bridge);
+		apple,always-on; /* Core device */
+		power-domains = <&DIE_NODE(ps_afnc0_lw0)>;
+	};
+
+	DIE_NODE(ps_dispext0_sys): power-controller@258 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x258 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dispext0_sys);
+		power-domains = <&DIE_NODE(ps_afnc0_lw0)>, <&DIE_NODE(ps_afr)>;
+	};
+
+	DIE_NODE(ps_ane_sys): power-controller@260 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x260 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(ane_sys);
+		power-domains = <&DIE_NODE(ps_afnc0_lw0)>;
+	};
+
+	DIE_NODE(ps_avd_sys): power-controller@268 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x268 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(avd_sys);
+		power-domains = <&DIE_NODE(ps_afnc0_lw0)>;
+	};
+
+	DIE_NODE(ps_atc0_cio): power-controller@270 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x270 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(atc0_cio);
+		power-domains = <&DIE_NODE(ps_atc0_common)>;
+	};
+
+	DIE_NODE(ps_atc0_pcie): power-controller@278 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x278 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(atc0_pcie);
+		power-domains = <&DIE_NODE(ps_atc0_common)>;
+	};
+
+	DIE_NODE(ps_atc1_cio): power-controller@280 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x280 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(atc1_cio);
+		power-domains = <&DIE_NODE(ps_atc1_common)>;
+	};
+
+	DIE_NODE(ps_atc1_pcie): power-controller@288 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x288 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(atc1_pcie);
+		power-domains = <&DIE_NODE(ps_atc1_common)>;
+	};
+
+	DIE_NODE(ps_atc2_cio): power-controller@290 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x290 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(atc2_cio);
+		power-domains = <&DIE_NODE(ps_atc2_common)>;
+	};
+
+	DIE_NODE(ps_atc2_pcie): power-controller@298 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x298 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(atc2_pcie);
+		power-domains = <&DIE_NODE(ps_atc2_common)>;
+	};
+
+	DIE_NODE(ps_atc3_cio): power-controller@2a0 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x2a0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(atc3_cio);
+		power-domains = <&DIE_NODE(ps_atc3_common)>;
+	};
+
+	DIE_NODE(ps_atc3_pcie): power-controller@2a8 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x2a8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(atc3_pcie);
+		power-domains = <&DIE_NODE(ps_atc3_common)>;
+	};
+
+	DIE_NODE(ps_dispext1_fe): power-controller@2b0 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x2b0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dispext1_fe);
+		power-domains = <&DIE_NODE(ps_dispext1_sys)>;
+	};
+
+	DIE_NODE(ps_dispext1_cpu0): power-controller@2b8 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x2b8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dispext1_cpu0);
+		power-domains = <&DIE_NODE(ps_dispext1_fe)>;
+		apple,min-state = <4>;
+	};
+
+	DIE_NODE(ps_dispext0_fe): power-controller@2c0 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x2c0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dispext0_fe);
+		power-domains = <&DIE_NODE(ps_dispext0_sys)>;
+	};
+
+#if DIE_NO == 0
+	/* PMP is only present on die 0 of the M1 Ultra */
+	DIE_NODE(ps_pmp): power-controller@2c8 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x2c8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(pmp);
+	};
+#endif
+
+	DIE_NODE(ps_pms_sram): power-controller@2d0 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x2d0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(pms_sram);
+	};
+
+	DIE_NODE(ps_dispext0_cpu0): power-controller@2d8 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x2d8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dispext0_cpu0);
+		power-domains = <&DIE_NODE(ps_dispext0_fe)>;
+		apple,min-state = <4>;
+	};
+
+	DIE_NODE(ps_ane_cpu): power-controller@2e0 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x2e0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(ane_cpu);
+		power-domains = <&DIE_NODE(ps_ane_sys)>;
+	};
+
+	DIE_NODE(ps_atc0_cio_pcie): power-controller@2e8 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x2e8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(atc0_cio_pcie);
+		power-domains = <&DIE_NODE(ps_atc0_cio)>;
+	};
+
+	DIE_NODE(ps_atc0_cio_usb): power-controller@2f0 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x2f0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(atc0_cio_usb);
+		power-domains = <&DIE_NODE(ps_atc0_cio)>;
+	};
+
+	DIE_NODE(ps_atc1_cio_pcie): power-controller@2f8 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x2f8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(atc1_cio_pcie);
+		power-domains = <&DIE_NODE(ps_atc1_cio)>;
+	};
+
+	DIE_NODE(ps_atc1_cio_usb): power-controller@300 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x300 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(atc1_cio_usb);
+		power-domains = <&DIE_NODE(ps_atc1_cio)>;
+	};
+
+	DIE_NODE(ps_atc2_cio_pcie): power-controller@308 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x308 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(atc2_cio_pcie);
+		power-domains = <&DIE_NODE(ps_atc2_cio)>;
+	};
+
+	DIE_NODE(ps_atc2_cio_usb): power-controller@310 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x310 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(atc2_cio_usb);
+		power-domains = <&DIE_NODE(ps_atc2_cio)>;
+	};
+
+	DIE_NODE(ps_atc3_cio_pcie): power-controller@318 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x318 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(atc3_cio_pcie);
+		power-domains = <&DIE_NODE(ps_atc3_cio)>;
+	};
+
+	DIE_NODE(ps_atc3_cio_usb): power-controller@320 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x320 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(atc3_cio_usb);
+		power-domains = <&DIE_NODE(ps_atc3_cio)>;
+	};
+
+	DIE_NODE(ps_trace_fab): power-controller@390 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x390 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(trace_fab);
+	};
+
+	DIE_NODE(ps_ane_sys_mpm): power-controller@4000 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4000 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(ane_sys_mpm);
+		power-domains = <&DIE_NODE(ps_ane_sys)>;
+	};
+
+	DIE_NODE(ps_ane_td): power-controller@4008 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4008 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(ane_td);
+		power-domains = <&DIE_NODE(ps_ane_sys)>;
+	};
+
+	DIE_NODE(ps_ane_base): power-controller@4010 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4010 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(ane_base);
+		power-domains = <&DIE_NODE(ps_ane_td)>;
+	};
+
+	DIE_NODE(ps_ane_set1): power-controller@4018 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4018 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(ane_set1);
+		power-domains = <&DIE_NODE(ps_ane_base)>;
+	};
+
+	DIE_NODE(ps_ane_set2): power-controller@4020 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4020 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(ane_set2);
+		power-domains = <&DIE_NODE(ps_ane_set1)>;
+	};
+
+	DIE_NODE(ps_ane_set3): power-controller@4028 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4028 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(ane_set3);
+		power-domains = <&DIE_NODE(ps_ane_set2)>;
+	};
+
+	DIE_NODE(ps_ane_set4): power-controller@4030 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4030 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(ane_set4);
+		power-domains = <&DIE_NODE(ps_ane_set3)>;
+	};
+};
+
+&DIE_NODE(pmgr_south) {
+	DIE_NODE(ps_amcc4): power-controller@100 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x100 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(amcc4);
+		apple,always-on;
+	};
+
+	DIE_NODE(ps_amcc5): power-controller@108 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x108 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(amcc5);
+		apple,always-on;
+	};
+
+	DIE_NODE(ps_amcc6): power-controller@110 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x110 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(amcc6);
+		apple,always-on;
+	};
+
+	DIE_NODE(ps_amcc7): power-controller@118 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x118 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(amcc7);
+		apple,always-on;
+	};
+
+	DIE_NODE(ps_dcs_16): power-controller@120 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x120 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dcs_16);
+		apple,always-on; /* LPDDR5 interface */
+	};
+
+	DIE_NODE(ps_dcs_17): power-controller@128 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x128 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dcs_17);
+		apple,always-on; /* LPDDR5 interface */
+	};
+
+	DIE_NODE(ps_dcs_18): power-controller@130 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x130 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dcs_18);
+		apple,always-on; /* LPDDR5 interface */
+	};
+
+	DIE_NODE(ps_dcs_19): power-controller@138 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x138 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dcs_19);
+		apple,always-on; /* LPDDR5 interface */
+	};
+
+	DIE_NODE(ps_dcs_20): power-controller@140 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x140 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dcs_20);
+		apple,always-on; /* LPDDR5 interface */
+	};
+
+	DIE_NODE(ps_dcs_21): power-controller@148 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x148 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dcs_21);
+		apple,always-on; /* LPDDR5 interface */
+	};
+
+	DIE_NODE(ps_dcs_22): power-controller@150 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x150 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dcs_22);
+		apple,always-on; /* LPDDR5 interface */
+	};
+
+	DIE_NODE(ps_dcs_23): power-controller@158 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x158 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dcs_23);
+		apple,always-on; /* LPDDR5 interface */
+	};
+
+	DIE_NODE(ps_dcs_24): power-controller@160 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x160 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dcs_24);
+		apple,always-on; /* LPDDR5 interface */
+	};
+
+	DIE_NODE(ps_dcs_25): power-controller@168 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x168 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dcs_25);
+		apple,always-on; /* LPDDR5 interface */
+	};
+
+	DIE_NODE(ps_dcs_26): power-controller@170 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x170 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dcs_26);
+		apple,always-on; /* LPDDR5 interface */
+	};
+
+	DIE_NODE(ps_dcs_27): power-controller@178 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x178 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dcs_27);
+		apple,always-on; /* LPDDR5 interface */
+	};
+
+	DIE_NODE(ps_dcs_28): power-controller@180 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x180 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dcs_28);
+		apple,always-on; /* LPDDR5 interface */
+	};
+
+	DIE_NODE(ps_dcs_29): power-controller@188 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x188 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dcs_29);
+		apple,always-on; /* LPDDR5 interface */
+	};
+
+	DIE_NODE(ps_dcs_30): power-controller@190 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x190 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dcs_30);
+		apple,always-on; /* LPDDR5 interface */
+	};
+
+	DIE_NODE(ps_dcs_31): power-controller@198 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x198 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dcs_31);
+		apple,always-on; /* LPDDR5 interface */
+	};
+
+	DIE_NODE(ps_afnc4_ioa): power-controller@1a0 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x1a0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(afnc4_ioa);
+		apple,always-on; /* Apple Fabric */
+		power-domains = <&DIE_NODE(ps_afi)>;
+	};
+
+	DIE_NODE(ps_afnc4_ls): power-controller@1a8 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x1a8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(afnc4_ls);
+		apple,always-on; /* Apple Fabric */
+		power-domains = <&DIE_NODE(ps_afnc4_ioa)>;
+	};
+
+	DIE_NODE(ps_afnc4_lw0): power-controller@1b0 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x1b0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(afnc4_lw0);
+		apple,always-on; /* Apple Fabric */
+		power-domains = <&DIE_NODE(ps_afnc4_ls)>;
+	};
+
+	DIE_NODE(ps_afnc5_ioa): power-controller@1b8 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x1b8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(afnc5_ioa);
+		apple,always-on; /* Apple Fabric */
+		power-domains = <&DIE_NODE(ps_afi)>;
+	};
+
+	DIE_NODE(ps_afnc5_ls): power-controller@1c0 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x1c0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(afnc5_ls);
+		apple,always-on; /* Apple Fabric */
+		power-domains = <&DIE_NODE(ps_afnc5_ioa)>;
+	};
+
+	DIE_NODE(ps_afnc5_lw0): power-controller@1c8 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x1c8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(afnc5_lw0);
+		apple,always-on; /* Apple Fabric */
+		power-domains = <&DIE_NODE(ps_afnc5_ls)>;
+	};
+
+	DIE_NODE(ps_dispext2_sys): power-controller@1d0 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x1d0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dispext2_sys);
+	};
+
+	DIE_NODE(ps_msr1): power-controller@1d8 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x1d8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(msr1);
+	};
+
+	DIE_NODE(ps_dispext2_fe): power-controller@1e0 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x1e0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dispext2_fe);
+		power-domains = <&DIE_NODE(ps_dispext2_sys)>;
+	};
+
+	DIE_NODE(ps_dispext2_cpu0): power-controller@1e8 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x1e8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dispext2_cpu0);
+		power-domains = <&DIE_NODE(ps_dispext2_fe)>;
+		apple,min-state = <4>;
+	};
+
+	DIE_NODE(ps_msr1_ase_core): power-controller@1f0 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x1f0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(msr1_ase_core);
+		power-domains = <&DIE_NODE(ps_msr1)>;
+	};
+
+	DIE_NODE(ps_dispext3_sys): power-controller@220 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x220 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dispext3_sys);
+	};
+
+	DIE_NODE(ps_venc1_sys): power-controller@228 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x228 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(venc1_sys);
+	};
+
+	DIE_NODE(ps_dispext3_fe): power-controller@230 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x230 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dispext3_fe);
+		power-domains = <&DIE_NODE(ps_dispext3_sys)>;
+	};
+
+	DIE_NODE(ps_dispext3_cpu0): power-controller@238 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x238 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dispext3_cpu0);
+		power-domains = <&DIE_NODE(ps_dispext3_fe)>;
+		apple,min-state = <4>;
+	};
+
+	DIE_NODE(ps_venc1_dma): power-controller@4000 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4000 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(venc1_dma);
+		power-domains = <&DIE_NODE(ps_venc1_sys)>;
+	};
+
+	DIE_NODE(ps_venc1_pipe4): power-controller@4008 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4008 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(venc1_pipe4);
+		power-domains = <&DIE_NODE(ps_venc1_dma)>;
+	};
+
+	DIE_NODE(ps_venc1_pipe5): power-controller@4010 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4010 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(venc1_pipe5);
+		power-domains = <&DIE_NODE(ps_venc1_dma)>;
+	};
+
+	DIE_NODE(ps_venc1_me0): power-controller@4018 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4018 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(venc1_me0);
+		power-domains = <&DIE_NODE(ps_venc1_pipe5)>, <&DIE_NODE(ps_venc1_pipe4)>;
+	};
+
+	DIE_NODE(ps_venc1_me1): power-controller@4020 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4020 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(venc1_me1);
+		power-domains = <&DIE_NODE(ps_venc1_me0)>;
+	};
+};
+
+&DIE_NODE(pmgr_east) {
+	DIE_NODE(ps_clvr_spmi0): power-controller@100 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x100 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(clvr_spmi0);
+		apple,always-on; /* PCPU voltage regulator interface (used by SMC) */
+	};
+
+	DIE_NODE(ps_clvr_spmi1): power-controller@108 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x108 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(clvr_spmi1);
+		apple,always-on; /* GPU voltage regulator interface (used by SMC) */
+	};
+
+	DIE_NODE(ps_clvr_spmi2): power-controller@110 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x110 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(clvr_spmi2);
+		apple,always-on; /* ANE, fabric, AFR voltage regulator interface (used by SMC) */
+	};
+
+	DIE_NODE(ps_clvr_spmi3): power-controller@118 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x118 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(clvr_spmi3);
+		apple,always-on; /* Additional voltage regulator, probably used on T6021 (SMC) */
+	};
+
+	DIE_NODE(ps_clvr_spmi4): power-controller@120 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x120 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(clvr_spmi4);
+		apple,always-on; /* Additional voltage regulator, probably used on T6021 (SMC) */
+	};
+
+	DIE_NODE(ps_ispsens0): power-controller@128 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x128 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(ispsens0);
+	};
+
+	DIE_NODE(ps_ispsens1): power-controller@130 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x130 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(ispsens1);
+	};
+
+	DIE_NODE(ps_ispsens2): power-controller@138 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x138 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(ispsens2);
+	};
+
+	DIE_NODE(ps_ispsens3): power-controller@140 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x140 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(ispsens3);
+	};
+
+	DIE_NODE(ps_afnc6_ioa): power-controller@148 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x148 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(afnc6_ioa);
+		apple,always-on;
+		power-domains = <&DIE_NODE(ps_afi)>;
+	};
+
+	DIE_NODE(ps_afnc6_ls): power-controller@150 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x150 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(afnc6_ls);
+		apple,always-on;
+		power-domains = <&DIE_NODE(ps_afnc6_ioa)>;
+	};
+
+	DIE_NODE(ps_afnc6_lw0): power-controller@158 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x158 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(afnc6_lw0);
+		apple,always-on;
+		power-domains = <&DIE_NODE(ps_afnc6_ls)>;
+	};
+
+	DIE_NODE(ps_afnc2_ioa): power-controller@160 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x160 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(afnc2_ioa);
+		apple,always-on;
+		power-domains = <&DIE_NODE(ps_dcs_10)>;
+	};
+
+	DIE_NODE(ps_afnc2_ls): power-controller@168 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x168 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(afnc2_ls);
+		apple,always-on;
+		power-domains = <&DIE_NODE(ps_afnc2_ioa)>;
+	};
+
+	DIE_NODE(ps_afnc2_lw0): power-controller@170 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x170 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(afnc2_lw0);
+		apple,always-on;
+		power-domains = <&DIE_NODE(ps_afnc2_ls)>;
+	};
+
+	DIE_NODE(ps_afnc2_lw1): power-controller@178 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x178 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(afnc2_lw1);
+		apple,always-on;
+		power-domains = <&DIE_NODE(ps_afnc2_ls)>;
+	};
+
+	DIE_NODE(ps_afnc3_ioa): power-controller@180 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x180 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(afnc3_ioa);
+		apple,always-on;
+		power-domains = <&DIE_NODE(ps_afi)>;
+	};
+
+	DIE_NODE(ps_afnc3_ls): power-controller@188 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x188 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(afnc3_ls);
+		apple,always-on;
+		power-domains = <&DIE_NODE(ps_afnc3_ioa)>;
+	};
+
+	DIE_NODE(ps_afnc3_lw0): power-controller@190 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x190 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(afnc3_lw0);
+		apple,always-on;
+		power-domains = <&DIE_NODE(ps_afnc3_ls)>;
+	};
+
+	DIE_NODE(ps_apcie_gp): power-controller@198 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x198 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(apcie_gp);
+		power-domains = <&DIE_NODE(ps_afnc6_lw0)>;
+	};
+
+	DIE_NODE(ps_apcie_st): power-controller@1a0 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x1a0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(apcie_st);
+		power-domains = <&DIE_NODE(ps_afnc6_lw0)>;
+	};
+
+	DIE_NODE(ps_ans2): power-controller@1a8 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x1a8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(ans2);
+		power-domains = <&DIE_NODE(ps_afnc6_lw0)>;
+	};
+
+	DIE_NODE(ps_disp0_sys): power-controller@1b0 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x1b0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(disp0_sys);
+		power-domains = <&DIE_NODE(ps_afnc2_lw0)>;
+	};
+
+	DIE_NODE(ps_jpg): power-controller@1b8 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x1b8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(jpg);
+		power-domains = <&DIE_NODE(ps_afnc2_lw0)>;
+	};
+
+	DIE_NODE(ps_sio): power-controller@1c0 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x1c0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(sio);
+		power-domains = <&DIE_NODE(ps_afnc2_lw1)>;
+	};
+
+	DIE_NODE(ps_isp_sys): power-controller@1c8 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x1c8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(isp_sys);
+		power-domains = <&DIE_NODE(ps_afnc2_lw1)>;
+		status = "disabled";
+	};
+
+	DIE_NODE(ps_disp0_fe): power-controller@1d0 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x1d0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(disp0_fe);
+		power-domains = <&DIE_NODE(ps_disp0_sys)>;
+	};
+
+	DIE_NODE(ps_disp0_cpu0): power-controller@1d8 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x1d8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(disp0_cpu0);
+		power-domains = <&DIE_NODE(ps_disp0_fe)>;
+		apple,min-state = <4>;
+	};
+
+	DIE_NODE(ps_sio_cpu): power-controller@1e0 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x1e0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(sio_cpu);
+		power-domains = <&DIE_NODE(ps_sio) &DIE_NODE(ps_uart_p) &DIE_NODE(ps_spi_p) &DIE_NODE(ps_audio_p)>;
+	};
+
+	DIE_NODE(ps_fpwm0): power-controller@1e8 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x1e8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(fpwm0);
+		power-domains = <&DIE_NODE(ps_sio)>;
+	};
+
+	DIE_NODE(ps_fpwm1): power-controller@1f0 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x1f0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(fpwm1);
+		power-domains = <&DIE_NODE(ps_sio)>;
+	};
+
+	DIE_NODE(ps_fpwm2): power-controller@1f8 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x1f8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(fpwm2);
+		power-domains = <&DIE_NODE(ps_sio)>;
+	};
+
+	DIE_NODE(ps_i2c0): power-controller@200 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x200 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(i2c0);
+		power-domains = <&DIE_NODE(ps_sio)>;
+	};
+
+	DIE_NODE(ps_i2c1): power-controller@208 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x208 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(i2c1);
+		power-domains = <&DIE_NODE(ps_sio)>;
+	};
+
+	DIE_NODE(ps_i2c2): power-controller@210 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x210 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(i2c2);
+		power-domains = <&DIE_NODE(ps_sio)>;
+	};
+
+	DIE_NODE(ps_i2c3): power-controller@218 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x218 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(i2c3);
+		power-domains = <&DIE_NODE(ps_sio)>;
+	};
+
+	DIE_NODE(ps_i2c4): power-controller@220 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x220 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(i2c4);
+		power-domains = <&DIE_NODE(ps_sio)>;
+	};
+
+	DIE_NODE(ps_i2c5): power-controller@228 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x228 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(i2c5);
+		power-domains = <&DIE_NODE(ps_sio)>;
+	};
+
+	DIE_NODE(ps_i2c6): power-controller@230 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x230 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(i2c6);
+		power-domains = <&DIE_NODE(ps_sio)>;
+	};
+
+	DIE_NODE(ps_i2c7): power-controller@238 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x238 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(i2c7);
+		power-domains = <&DIE_NODE(ps_sio)>;
+	};
+
+	DIE_NODE(ps_i2c8): power-controller@240 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x240 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(i2c8);
+		power-domains = <&DIE_NODE(ps_sio)>;
+	};
+
+	DIE_NODE(ps_spi_p): power-controller@248 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x248 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(spi_p);
+		power-domains = <&DIE_NODE(ps_sio)>;
+	};
+
+	DIE_NODE(ps_sio_spmi0): power-controller@250 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x250 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(sio_spmi0);
+		power-domains = <&DIE_NODE(ps_sio)>;
+	};
+
+	DIE_NODE(ps_sio_spmi1): power-controller@258 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x258 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(sio_spmi1);
+		power-domains = <&DIE_NODE(ps_sio)>;
+	};
+
+	DIE_NODE(ps_sio_spmi2): power-controller@260 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x260 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(sio_spmi2);
+		power-domains = <&DIE_NODE(ps_sio)>;
+	};
+
+	DIE_NODE(ps_uart_p): power-controller@268 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x268 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(uart_p);
+		power-domains = <&DIE_NODE(ps_sio)>;
+	};
+
+	DIE_NODE(ps_audio_p): power-controller@270 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x270 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(audio_p);
+		power-domains = <&DIE_NODE(ps_sio)>;
+	};
+
+	DIE_NODE(ps_sio_adma): power-controller@278 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x278 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(sio_adma);
+		power-domains = <&DIE_NODE(ps_audio_p)>, <&DIE_NODE(ps_sio)>;
+	};
+
+	DIE_NODE(ps_aes): power-controller@280 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x280 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(aes);
+		apple,always-on;
+		power-domains = <&DIE_NODE(ps_sio)>;
+	};
+
+	DIE_NODE(ps_dptx_phy_ps): power-controller@288 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x288 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dptx_phy_ps);
+		power-domains = <&DIE_NODE(ps_sio)>;
+	};
+
+	DIE_NODE(ps_spi0): power-controller@2d8 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x2d8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(spi0);
+		power-domains = <&DIE_NODE(ps_spi_p)>;
+	};
+
+	DIE_NODE(ps_spi1): power-controller@2e0 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x2e0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(spi1);
+		power-domains = <&DIE_NODE(ps_spi_p)>;
+	};
+
+	DIE_NODE(ps_spi2): power-controller@2e8 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x2e8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(spi2);
+		power-domains = <&DIE_NODE(ps_spi_p)>;
+	};
+
+	DIE_NODE(ps_spi3): power-controller@2f0 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x2f0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(spi3);
+		power-domains = <&DIE_NODE(ps_spi_p)>;
+	};
+
+	DIE_NODE(ps_spi4): power-controller@2f8 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x2f8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(spi4);
+		power-domains = <&DIE_NODE(ps_spi_p)>;
+	};
+
+	DIE_NODE(ps_spi5): power-controller@300 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x300 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(spi5);
+		power-domains = <&DIE_NODE(ps_spi_p)>;
+	};
+
+	DIE_NODE(ps_uart_n): power-controller@308 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x308 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(uart_n);
+		power-domains = <&DIE_NODE(ps_uart_p)>;
+	};
+
+	DIE_NODE(ps_uart0): power-controller@310 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x310 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(uart0);
+		power-domains = <&DIE_NODE(ps_uart_p)>;
+	};
+
+	DIE_NODE(ps_amcc1): power-controller@318 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x318 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(amcc1);
+		apple,always-on;
+	};
+
+	DIE_NODE(ps_amcc3): power-controller@320 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x320 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(amcc3);
+		apple,always-on;
+	};
+
+	DIE_NODE(ps_dcs_04): power-controller@328 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x328 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dcs_04);
+		apple,always-on; /* LPDDR5 interface */
+	};
+
+	DIE_NODE(ps_dcs_05): power-controller@330 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x330 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dcs_05);
+		apple,always-on; /* LPDDR5 interface */
+	};
+
+	DIE_NODE(ps_dcs_06): power-controller@338 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x338 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dcs_06);
+		apple,always-on; /* LPDDR5 interface */
+	};
+
+	DIE_NODE(ps_dcs_07): power-controller@340 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x340 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dcs_07);
+		apple,always-on; /* LPDDR5 interface */
+	};
+
+	DIE_NODE(ps_dcs_12): power-controller@348 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x348 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dcs_12);
+		apple,always-on; /* LPDDR5 interface */
+	};
+
+	DIE_NODE(ps_dcs_13): power-controller@350 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x350 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dcs_13);
+		apple,always-on; /* LPDDR5 interface */
+	};
+
+	DIE_NODE(ps_dcs_14): power-controller@358 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x358 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dcs_14);
+		apple,always-on; /* LPDDR5 interface */
+	};
+
+	DIE_NODE(ps_dcs_15): power-controller@360 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x360 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dcs_15);
+		apple,always-on; /* LPDDR5 interface */
+	};
+
+	DIE_NODE(ps_uart1): power-controller@368 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x368 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(uart1);
+		power-domains = <&DIE_NODE(ps_uart_p)>;
+	};
+
+	DIE_NODE(ps_uart2): power-controller@370 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x370 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(uart2);
+		power-domains = <&DIE_NODE(ps_uart_p)>;
+	};
+
+	DIE_NODE(ps_uart3): power-controller@378 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x378 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(uart3);
+		power-domains = <&DIE_NODE(ps_uart_p)>;
+	};
+
+	DIE_NODE(ps_uart4): power-controller@380 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x380 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(uart4);
+		power-domains = <&DIE_NODE(ps_uart_p)>;
+	};
+
+	DIE_NODE(ps_uart5): power-controller@388 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x388 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(uart5);
+		power-domains = <&DIE_NODE(ps_uart_p)>;
+	};
+
+	DIE_NODE(ps_uart6): power-controller@390 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x390 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(uart6);
+		power-domains = <&DIE_NODE(ps_uart_p)>;
+	};
+
+	DIE_NODE(ps_mca0): power-controller@398 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x398 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(mca0);
+		power-domains = <&DIE_NODE(ps_audio_p)>, <&DIE_NODE(ps_sio_adma)>;
+		apple,externally-clocked;
+	};
+
+	DIE_NODE(ps_mca1): power-controller@3a0 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x3a0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(mca1);
+		power-domains = <&DIE_NODE(ps_audio_p)>, <&DIE_NODE(ps_sio_adma)>;
+		apple,externally-clocked;
+	};
+
+	DIE_NODE(ps_mca2): power-controller@3a8 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x3a8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(mca2);
+		power-domains = <&DIE_NODE(ps_audio_p)>, <&DIE_NODE(ps_sio_adma)>;
+		apple,externally-clocked;
+	};
+
+	DIE_NODE(ps_mca3): power-controller@3b0 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x3b0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(mca3);
+		power-domains = <&DIE_NODE(ps_audio_p)>, <&DIE_NODE(ps_sio_adma)>;
+		apple,externally-clocked;
+	};
+
+	DIE_NODE(ps_dpa0): power-controller@3b8 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x3b8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dpa0);
+		power-domains = <&DIE_NODE(ps_audio_p)>;
+	};
+
+	DIE_NODE(ps_dpa1): power-controller@3c0 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x3c0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dpa1);
+		power-domains = <&DIE_NODE(ps_audio_p)>;
+	};
+
+	DIE_NODE(ps_dpa2): power-controller@3c8 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x3c8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dpa2);
+		power-domains = <&DIE_NODE(ps_audio_p)>;
+	};
+
+	DIE_NODE(ps_dpa3): power-controller@3d0 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x3d0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dpa3);
+		power-domains = <&DIE_NODE(ps_audio_p)>;
+	};
+
+	DIE_NODE(ps_msr0): power-controller@3d8 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x3d8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(msr0);
+	};
+
+	DIE_NODE(ps_venc_sys): power-controller@3e0 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x3e0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(venc_sys);
+	};
+
+	DIE_NODE(ps_dpa4): power-controller@3e8 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x3e8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dpa4);
+		power-domains = <&DIE_NODE(ps_audio_p)>;
+	};
+
+	DIE_NODE(ps_msr0_ase_core): power-controller@3f0 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x3f0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(msr0_ase_core);
+		power-domains = <&DIE_NODE(ps_msr0)>;
+	};
+
+	DIE_NODE(ps_apcie_gpshr_sys): power-controller@3f8 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x3f8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(apcie_gpshr_sys);
+		power-domains = <&DIE_NODE(ps_apcie_gp)>;
+	};
+
+	DIE_NODE(ps_apcie_st_sys): power-controller@408 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x408 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(apcie_st_sys);
+		power-domains = <&DIE_NODE(ps_apcie_st)>, <&DIE_NODE(ps_ans2)>;
+	};
+
+	DIE_NODE(ps_apcie_st1_sys): power-controller@410 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x410 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(apcie_st1_sys);
+		power-domains = <&DIE_NODE(ps_apcie_st_sys)>;
+	};
+
+	DIE_NODE(ps_apcie_gp_sys): power-controller@418 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x418 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(apcie_gp_sys);
+		power-domains = <&DIE_NODE(ps_apcie_gpshr_sys)>;
+		apple,always-on; /* Breaks things if shut down */
+	};
+
+	DIE_NODE(ps_apcie_ge_sys): power-controller@420 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x420 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(apcie_ge_sys);
+		power-domains = <&DIE_NODE(ps_apcie_gpshr_sys)>;
+	};
+
+	DIE_NODE(ps_apcie_phy_sw): power-controller@428 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x428 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(apcie_phy_sw);
+		apple,always-on; /* macOS does not turn this off */
+	};
+
+	DIE_NODE(ps_sep): power-controller@c00 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0xc00 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(sep);
+		apple,always-on; /* Locked on */
+	};
+
+	/* There is a dependency tree involved with these PDs,
+	 * but we do not express it here since the ISP driver
+	 * is supposed to sequence them in the right order anyway.
+	 *
+	 * This also works around spurious parent PD activation
+	 * on machines with ISP disabled (desktops), so we don't
+	 * have to enable/disable everything in the per-model DTs.
+	 */
+	DIE_NODE(ps_isp_cpu): power-controller@4000 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4000 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(isp_cpu);
+		/* power-domains = <&DIE_NODE(ps_isp_sys)>; */
+		apple,force-disable;
+	};
+
+	DIE_NODE(ps_isp_fe): power-controller@4008 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4008 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(isp_fe);
+		/* power-domains = <&DIE_NODE(ps_isp_sys)>; */
+	};
+
+	DIE_NODE(ps_dprx): power-controller@4010 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4010 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(dprx);
+		/* power-domains = <&DIE_NODE(ps_isp_sys)>; */
+	};
+
+	DIE_NODE(ps_isp_vis): power-controller@4018 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4018 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(isp_vis);
+		/* power-domains = <&DIE_NODE(ps_isp_fe)>; */
+	};
+
+	DIE_NODE(ps_isp_be): power-controller@4020 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4020 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(isp_be);
+		/* power-domains = <&DIE_NODE(ps_isp_fe)>; */
+	};
+
+	DIE_NODE(ps_isp_raw): power-controller@4028 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4028 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(isp_raw);
+		/* power-domains = <&DIE_NODE(ps_isp_fe)>; */
+	};
+
+	DIE_NODE(ps_isp_clr): power-controller@4030 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4030 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(isp_clr);
+		/* power-domains = <&DIE_NODE(ps_isp_be)>; */
+	};
+
+	DIE_NODE(ps_venc_dma): power-controller@8000 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x8000 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(venc_dma);
+		power-domains = <&DIE_NODE(ps_venc_sys)>;
+	};
+
+	DIE_NODE(ps_venc_pipe4): power-controller@8008 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x8008 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(venc_pipe4);
+		power-domains = <&DIE_NODE(ps_venc_dma)>;
+	};
+
+	DIE_NODE(ps_venc_pipe5): power-controller@8010 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x8010 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(venc_pipe5);
+		power-domains = <&DIE_NODE(ps_venc_dma)>;
+	};
+
+	DIE_NODE(ps_venc_me0): power-controller@8018 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x8018 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(venc_me0);
+		power-domains = <&DIE_NODE(ps_venc_pipe5)>, <&DIE_NODE(ps_venc_pipe4)>;
+	};
+
+	DIE_NODE(ps_venc_me1): power-controller@8020 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x8020 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(venc_me1);
+		power-domains = <&DIE_NODE(ps_venc_me0)>;
+	};
+
+	DIE_NODE(ps_prores): power-controller@c000 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0xc000 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(prores);
+		power-domains = <&DIE_NODE(ps_afnc3_lw0)>;
+	};
+};
+
+&DIE_NODE(pmgr_mini) {
+	DIE_NODE(ps_debug): power-controller@58 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x58 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(debug);
+		apple,always-on; /* Core AON device */
+	};
+
+	DIE_NODE(ps_nub_spmi0): power-controller@60 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x60 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(nub_spmi0);
+		apple,always-on; /* Core AON device */
+	};
+
+	DIE_NODE(ps_nub_spmi1): power-controller@68 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x68 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(nub_spmi1);
+		apple,always-on; /* Core AON device */
+	};
+
+	DIE_NODE(ps_nub_aon): power-controller@70 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x70 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(nub_aon);
+		apple,always-on; /* Core AON device */
+	};
+
+	DIE_NODE(ps_msg): power-controller@78 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x78 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(msg);
+		apple,always-on; /* Core AON device? */
+	};
+
+	DIE_NODE(ps_nub_gpio): power-controller@80 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(nub_gpio);
+		apple,always-on; /* Core AON device */
+	};
+
+	DIE_NODE(ps_nub_fabric): power-controller@88 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x88 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(nub_fabric);
+		apple,always-on; /* Core AON device */
+	};
+
+	DIE_NODE(ps_atc0_usb_aon): power-controller@90 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x90 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(atc0_usb_aon);
+		apple,always-on; /* Needs to stay on for dwc3 to work */
+	};
+
+	DIE_NODE(ps_atc1_usb_aon): power-controller@98 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x98 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(atc1_usb_aon);
+		apple,always-on; /* Needs to stay on for dwc3 to work */
+	};
+
+	DIE_NODE(ps_atc2_usb_aon): power-controller@a0 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0xa0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(atc2_usb_aon);
+		apple,always-on; /* Needs to stay on for dwc3 to work */
+	};
+
+	DIE_NODE(ps_atc3_usb_aon): power-controller@a8 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0xa8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(atc3_usb_aon);
+		apple,always-on; /* Needs to stay on for dwc3 to work */
+	};
+
+	DIE_NODE(ps_mtp_fabric): power-controller@b0 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0xb0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(mtp_fabric);
+		apple,always-on;
+		power-domains = <&DIE_NODE(ps_nub_fabric)>;
+		status = "disabled";
+	};
+
+	DIE_NODE(ps_nub_sram): power-controller@b8 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0xb8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(nub_sram);
+		apple,always-on; /* Core AON device */
+	};
+
+	DIE_NODE(ps_debug_switch): power-controller@c0 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0xc0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(debug_switch);
+		apple,always-on; /* Core AON device */
+	};
+
+	DIE_NODE(ps_atc0_usb): power-controller@c8 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0xc8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(atc0_usb);
+		power-domains = <&DIE_NODE(ps_atc0_common)>;
+	};
+
+	DIE_NODE(ps_atc1_usb): power-controller@d0 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0xd0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(atc1_usb);
+		power-domains = <&DIE_NODE(ps_atc1_common)>;
+	};
+
+	DIE_NODE(ps_atc2_usb): power-controller@d8 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0xd8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(atc2_usb);
+		power-domains = <&DIE_NODE(ps_atc2_common)>;
+	};
+
+	DIE_NODE(ps_atc3_usb): power-controller@e0 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0xe0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(atc3_usb);
+		power-domains = <&DIE_NODE(ps_atc3_common)>;
+	};
+
+#if 0
+	/* MTP stuff is self-managed */
+	DIE_NODE(ps_mtp_gpio): power-controller@e8 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0xe8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(mtp_gpio);
+		apple,always-on; /* MTP always stays on */
+		power-domains = <&DIE_NODE(ps_mtp_fabric)>;
+	};
+
+	DIE_NODE(ps_mtp_base): power-controller@f0 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0xf0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(mtp_base);
+		apple,always-on; /* MTP always stays on */
+		power-domains = <&DIE_NODE(ps_mtp_fabric)>;
+	};
+
+	DIE_NODE(ps_mtp_periph): power-controller@f8 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0xf8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(mtp_periph);
+		apple,always-on; /* MTP always stays on */
+		power-domains = <&DIE_NODE(ps_mtp_fabric)>;
+	};
+
+	DIE_NODE(ps_mtp_spi0): power-controller@100 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x100 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(mtp_spi0);
+		apple,always-on; /* MTP always stays on */
+		power-domains = <&DIE_NODE(ps_mtp_fabric)>;
+	};
+
+	DIE_NODE(ps_mtp_i2cm0): power-controller@108 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x108 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(mtp_i2cm0);
+		apple,always-on; /* MTP always stays on */
+		power-domains = <&DIE_NODE(ps_mtp_fabric)>;
+	};
+
+	DIE_NODE(ps_mtp_uart0): power-controller@110 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x110 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(mtp_uart0);
+		apple,always-on; /* MTP always stays on */
+		power-domains = <&DIE_NODE(ps_mtp_fabric)>;
+	};
+
+	DIE_NODE(ps_mtp_cpu): power-controller@118 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x118 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(mtp_cpu);
+		apple,always-on; /* MTP always stays on */
+		power-domains = <&DIE_NODE(ps_mtp_fabric)>;
+	};
+
+	DIE_NODE(ps_mtp_scm_fabric): power-controller@120 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x120 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(mtp_scm_fabric);
+		apple,always-on; /* MTP always stays on */
+		power-domains = <&DIE_NODE(ps_mtp_periph)>;
+	};
+
+	DIE_NODE(ps_mtp_sram): power-controller@128 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x128 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(mtp_sram);
+		apple,always-on; /* MTP always stays on */
+		power-domains = <&DIE_NODE(ps_mtp_scm_fabric)>, <&DIE_NODE(ps_mtp_cpu)>;
+	};
+
+	DIE_NODE(ps_mtp_dma): power-controller@130 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x130 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(mtp_dma);
+		apple,always-on; /* MTP always stays on */
+		power-domains = <&DIE_NODE(ps_mtp_sram)>;
+	};
+#endif
+};
+
+&DIE_NODE(pmgr_gfx) {
+	DIE_NODE(ps_gpx): power-controller@0 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(gpx);
+		apple,min-state = <4>;
+		apple,always-on;
+	};
+
+	DIE_NODE(ps_afr): power-controller@100 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x100 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(afr);
+		/* Apple Fabric, media stuff: this can power down */
+		apple,min-state = <4>;
+	};
+
+	DIE_NODE(ps_gfx): power-controller@108 {
+		compatible = "apple,t6020-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x108 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = DIE_LABEL(gfx);
+		power-domains = <&DIE_NODE(ps_afr)>, <&DIE_NODE(ps_gpx)>;
+	};
+};
+
diff --git a/arch/arm64/boot/dts/apple/t7000-6.dtsi b/arch/arm64/boot/dts/apple/t7000-6.dtsi
index f60ea4a4a38716..7048d7383982cd 100644
--- a/arch/arm64/boot/dts/apple/t7000-6.dtsi
+++ b/arch/arm64/boot/dts/apple/t7000-6.dtsi
@@ -48,3 +48,11 @@
 		};
 	};
 };
+
+&framebuffer0 {
+	power-domains = <&ps_disp0 &ps_mipi_dsi>;
+};
+
+&typhoon_opp06 {
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/apple/t7000-handheld.dtsi b/arch/arm64/boot/dts/apple/t7000-handheld.dtsi
index 8984c9ec6cc8e3..7b58aa648b53da 100644
--- a/arch/arm64/boot/dts/apple/t7000-handheld.dtsi
+++ b/arch/arm64/boot/dts/apple/t7000-handheld.dtsi
@@ -22,6 +22,10 @@
 	};
 };
 
+&dwi_bl {
+	status = "okay";
+};
+
 &serial0 {
 	status = "okay";
 };
diff --git a/arch/arm64/boot/dts/apple/t7000-j42d.dts b/arch/arm64/boot/dts/apple/t7000-j42d.dts
index 2231db6a739d48..2ec9e06cc63fae 100644
--- a/arch/arm64/boot/dts/apple/t7000-j42d.dts
+++ b/arch/arm64/boot/dts/apple/t7000-j42d.dts
@@ -20,6 +20,7 @@
 		framebuffer0: framebuffer@0 {
 			compatible = "apple,simple-framebuffer", "simple-framebuffer";
 			reg = <0 0 0 0>; /* To be filled by loader */
+			power-domains = <&ps_disp0 &ps_dp>;
 			/* Format properties will be added by loader */
 			status = "disabled";
 		};
@@ -29,3 +30,7 @@
 &serial6 {
 	status = "okay";
 };
+
+&typhoon_opp06 {
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/apple/t7000-mini4.dtsi b/arch/arm64/boot/dts/apple/t7000-mini4.dtsi
index c64ddc402fda25..cc235c5a0c438f 100644
--- a/arch/arm64/boot/dts/apple/t7000-mini4.dtsi
+++ b/arch/arm64/boot/dts/apple/t7000-mini4.dtsi
@@ -49,3 +49,15 @@
 		};
 	};
 };
+
+&framebuffer0 {
+	power-domains = <&ps_disp0 &ps_dp>;
+};
+
+&typhoon_opp06 {
+	status = "okay";
+};
+
+&typhoon_opp07 {
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/apple/t7000-n102.dts b/arch/arm64/boot/dts/apple/t7000-n102.dts
index 9c55d339ba4e14..99eb8a2b8c7340 100644
--- a/arch/arm64/boot/dts/apple/t7000-n102.dts
+++ b/arch/arm64/boot/dts/apple/t7000-n102.dts
@@ -46,3 +46,7 @@
 		};
 	};
 };
+
+&framebuffer0 {
+	power-domains = <&ps_disp0 &ps_mipi_dsi>;
+};
diff --git a/arch/arm64/boot/dts/apple/t7000-pmgr.dtsi b/arch/arm64/boot/dts/apple/t7000-pmgr.dtsi
new file mode 100644
index 00000000000000..5948fa7afffc90
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t7000-pmgr.dtsi
@@ -0,0 +1,641 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * PMGR Power domains for the Apple T7000 "A8" SoC
+ *
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+&pmgr {
+	ps_cpu0: power-controller@20000 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20000 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "cpu0";
+		apple,always-on; /* Core device */
+	};
+
+	ps_cpu1: power-controller@20008 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20008 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "cpu1";
+		apple,always-on; /* Core device */
+	};
+
+	ps_cpm: power-controller@20040 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20040 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "cpm";
+		apple,always-on; /* Core device */
+	};
+
+	ps_sio_p: power-controller@201f8 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x201f8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "sio_p";
+	};
+
+	ps_lio: power-controller@20100 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20100 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "lio";
+		apple,always-on; /* Core device */
+	};
+
+	ps_iomux: power-controller@20108 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20108 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "iomux";
+		apple,always-on; /* Core device */
+	};
+
+	ps_aic: power-controller@20110 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20110 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "aic";
+		apple,always-on; /* Core device */
+	};
+
+	ps_debug: power-controller@20118 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20118 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "debug";
+	};
+
+	ps_dwi: power-controller@20120 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20120 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dwi";
+	};
+
+	ps_gpio: power-controller@20128 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20128 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "gpio";
+	};
+
+	ps_mca0: power-controller@20130 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20130 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mca0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_mca1: power-controller@20138 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20138 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mca1";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_mca2: power-controller@20140 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20140 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mca2";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_mca3: power-controller@20148 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20148 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mca3";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_mca4: power-controller@20150 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20150 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mca4";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_pwm0: power-controller@20158 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20158 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pwm0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_i2c0: power-controller@20160 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20160 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "i2c0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_i2c1: power-controller@20168 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20168 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "i2c1";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_i2c2: power-controller@20170 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20170 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "i2c2";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_i2c3: power-controller@20178 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20178 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "i2c3";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_spi0: power-controller@20180 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20180 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "spi0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_spi1: power-controller@20188 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20188 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "spi1";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_spi2: power-controller@20190 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20190 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "spi2";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_spi3: power-controller@20198 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20198 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "spi3";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart0: power-controller@201a0 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x201a0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart1: power-controller@201a8 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x201a8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart1";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart2: power-controller@201b0 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x201b0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart2";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart3: power-controller@201b8 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x201b8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart3";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart4: power-controller@201c0 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x201c0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart4";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart5: power-controller@201c8 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x201c8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart5";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart6: power-controller@201d0 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x201d0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart6";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart7: power-controller@201d8 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x201d8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart7";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart8: power-controller@201e0 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x201e0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart8";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_aes0: power-controller@201e8 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x201e8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "aes0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_sio: power-controller@201f0 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x201f0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "sio";
+		power-domains = <&ps_sio_p>;
+		apple,always-on; /* Core device */
+	};
+
+	ps_usb: power-controller@20248 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20248 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usb";
+	};
+
+	ps_usbctrl: power-controller@20250 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20250 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usbctrl";
+		power-domains = <&ps_usb>;
+	};
+
+	ps_usb2host0: power-controller@20258 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20258 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usb2host0";
+		power-domains = <&ps_usbctrl>;
+	};
+
+	ps_usb2host1: power-controller@20268 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20268 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usb2host1";
+		power-domains = <&ps_usbctrl>;
+	};
+
+	ps_usb2host2: power-controller@20278 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20278 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usb2host2";
+		power-domains = <&ps_usbctrl>;
+	};
+
+	ps_disp_busmux: power-controller@202a8 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x202a8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "disp_busmux";
+	};
+
+	ps_media: power-controller@202d8 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x202d8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "media";
+	};
+
+	ps_isp: power-controller@202d0 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x202d0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp";
+	};
+
+	ps_msr: power-controller@202e0 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x202e0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "msr";
+		power-domains = <&ps_media>;
+	};
+
+	ps_jpg: power-controller@202e8 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x202e8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "jpg";
+		power-domains = <&ps_media>;
+	};
+
+	ps_disp0: power-controller@202b0 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x202b0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "disp0";
+		power-domains = <&ps_disp_busmux>;
+	};
+
+	ps_disp1: power-controller@202c8 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x202c8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "disp1";
+		power-domains = <&ps_disp_busmux>;
+	};
+
+	ps_pcie_ref: power-controller@20220 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20220 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pcie_ref";
+	};
+
+	ps_hsic0_phy: power-controller@20200 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20200 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "hsic0_phy";
+		power-domains = <&ps_usb2host1>;
+	};
+
+	ps_hsic1_phy: power-controller@20208 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20208 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "hsic1_phy";
+		power-domains = <&ps_usb2host2>;
+	};
+
+	ps_ispsens0: power-controller@20210 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20210 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "ispsens0";
+	};
+
+	ps_ispsens1: power-controller@20218 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20218 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "ispsens1";
+	};
+
+	ps_mcc: power-controller@20230 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20230 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mcc";
+		apple,always-on; /* Memory cache controller */
+	};
+
+	ps_mcu: power-controller@20238 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20238 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mcu";
+		apple,always-on; /* Core device */
+	};
+
+	ps_amp: power-controller@20240 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20240 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "amp";
+		apple,always-on; /* Core device */
+	};
+
+	ps_usb2host0_ohci: power-controller@20260 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20260 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usb2host0_ohci";
+		power-domains = <&ps_usb2host0>;
+	};
+
+	ps_usbotg: power-controller@20288 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20288 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usbotg";
+		power-domains = <&ps_usbctrl>;
+	};
+
+	ps_smx: power-controller@20290 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20290 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "smx";
+		apple,always-on; /* Apple Fabric, critical block */
+	};
+
+	ps_sf: power-controller@20298 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20298 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "sf";
+		apple,always-on; /* Apple Fabric, critical block */
+	};
+
+	ps_cp: power-controller@202a0 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x202a0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "cp";
+		apple,always-on; /* Core device */
+	};
+
+	ps_mipi_dsi: power-controller@202b8 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x202b8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mipi_dsi";
+		power-domains = <&ps_disp_busmux>;
+	};
+
+	ps_dp: power-controller@202c0 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x202c0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dp";
+		power-domains = <&ps_disp0>;
+	};
+
+	ps_vdec: power-controller@202f0 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x202f0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "vdec";
+		power-domains = <&ps_media>;
+	};
+
+	ps_ans: power-controller@20318 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20318 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "ans";
+	};
+
+	ps_venc: power-controller@20300 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20300 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "venc";
+		power-domains = <&ps_media>;
+	};
+
+	ps_pcie: power-controller@20308 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20308 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pcie";
+	};
+
+	ps_pcie_aux: power-controller@20310 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20310 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pcie_aux";
+	};
+
+	ps_gfx: power-controller@20320 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20320 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "gfx";
+	};
+
+	ps_sep: power-controller@20400 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20400 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "sep";
+		apple,always-on; /* Locked on */
+	};
+
+	ps_venc_pipe: power-controller@21000 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x21000 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "venc_pipe";
+		power-domains = <&ps_venc>;
+	};
+
+	ps_venc_me0: power-controller@21008 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x21008 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "venc_me0";
+		power-domains = <&ps_venc>;
+	};
+
+	ps_venc_me1: power-controller@21010 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x21010 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "venc_me1";
+		power-domains = <&ps_venc>;
+	};
+};
diff --git a/arch/arm64/boot/dts/apple/t7000.dtsi b/arch/arm64/boot/dts/apple/t7000.dtsi
index a7cc29e84c8410..85a34dc7bc0108 100644
--- a/arch/arm64/boot/dts/apple/t7000.dtsi
+++ b/arch/arm64/boot/dts/apple/t7000.dtsi
@@ -33,6 +33,8 @@
 			compatible = "apple,typhoon";
 			reg = <0x0 0x0>;
 			cpu-release-addr = <0 0>; /* To be filled in by loader */
+			performance-domains = <&cpufreq>;
+			operating-points-v2 = <&typhoon_opp>;
 			enable-method = "spin-table";
 			device_type = "cpu";
 		};
@@ -41,11 +43,55 @@
 			compatible = "apple,typhoon";
 			reg = <0x0 0x1>;
 			cpu-release-addr = <0 0>; /* To be filled in by loader */
+			performance-domains = <&cpufreq>;
+			operating-points-v2 = <&typhoon_opp>;
 			enable-method = "spin-table";
 			device_type = "cpu";
 		};
 	};
 
+	typhoon_opp: opp-table {
+		compatible = "operating-points-v2";
+
+		opp01 {
+			opp-hz = /bits/ 64 <300000000>;
+			opp-level = <1>;
+			clock-latency-ns = <300>;
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <396000000>;
+			opp-level = <2>;
+			clock-latency-ns = <50000>;
+		};
+		opp03 {
+			opp-hz = /bits/ 64 <600000000>;
+			opp-level = <3>;
+			clock-latency-ns = <29000>;
+		};
+		opp04 {
+			opp-hz = /bits/ 64 <840000000>;
+			opp-level = <4>;
+			clock-latency-ns = <29000>;
+		};
+		opp05 {
+			opp-hz = /bits/ 64 <1128000000>;
+			opp-level = <5>;
+			clock-latency-ns = <36000>;
+		};
+		typhoon_opp06: opp06 {
+			opp-hz = /bits/ 64 <1392000000>;
+			opp-level = <6>;
+			clock-latency-ns = <42000>;
+			status = "disabled"; /* Not available on N102 */
+		};
+		typhoon_opp07: opp07 {
+			opp-hz = /bits/ 64 <1512000000>;
+			opp-level = <7>;
+			clock-latency-ns = <49000>;
+			status = "disabled"; /* J96 and J97 only */
+		};
+	};
+
 	soc {
 		compatible = "simple-bus";
 		#address-cells = <2>;
@@ -53,6 +99,12 @@
 		nonposted-mmio;
 		ranges;
 
+		cpufreq: performance-controller@202220000 {
+			compatible = "apple,t7000-cluster-cpufreq", "apple,s5l8960x-cluster-cpufreq";
+			reg = <0x2 0x02220000 0 0x1000>;
+			#performance-domain-cells = <0>;
+		};
+
 		serial0: serial@20a0c0000 {
 			compatible = "apple,s5l-uart";
 			reg = <0x2 0x0a0c0000 0x0 0x4000>;
@@ -62,6 +114,7 @@
 			/* Use the bootloader-enabled clocks for now. */
 			clocks = <&clkref>, <&clkref>;
 			clock-names = "uart", "clk_uart_baud0";
+			power-domains = <&ps_uart0>;
 			status = "disabled";
 		};
 
@@ -74,9 +127,18 @@
 			/* Use the bootloader-enabled clocks for now. */
 			clocks = <&clkref>, <&clkref>;
 			clock-names = "uart", "clk_uart_baud0";
+			power-domains = <&ps_uart6>;
 			status = "disabled";
 		};
 
+		pmgr: power-management@20e000000 {
+			compatible = "apple,t7000-pmgr", "apple,pmgr", "syscon", "simple-mfd";
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			reg = <0x2 0xe000000 0 0x24000>;
+		};
+
 		wdt: watchdog@20e027000 {
 			compatible = "apple,t7000-wdt", "apple,wdt";
 			reg = <0x2 0x0e027000 0x0 0x1000>;
@@ -90,11 +152,20 @@
 			reg = <0x2 0x0e100000 0x0 0x100000>;
 			#interrupt-cells = <3>;
 			interrupt-controller;
+			power-domains = <&ps_aic>;
+		};
+
+		dwi_bl: backlight@20e200010 {
+			compatible = "apple,t7000-dwi-bl", "apple,dwi-bl";
+			reg = <0x2 0x0e200010 0x0 0x8>;
+			power-domains = <&ps_dwi>;
+			status = "disabled";
 		};
 
 		pinctrl: pinctrl@20e300000 {
 			compatible = "apple,t7000-pinctrl", "apple,pinctrl";
 			reg = <0x2 0x0e300000 0x0 0x100000>;
+			power-domains = <&ps_gpio>;
 
 			gpio-controller;
 			#gpio-cells = <2>;
@@ -123,3 +194,5 @@
 			     <AIC_FIQ AIC_TMR_GUEST_VIRT IRQ_TYPE_LEVEL_HIGH>;
 	};
 };
+
+#include "t7000-pmgr.dtsi"
diff --git a/arch/arm64/boot/dts/apple/t7001-air2.dtsi b/arch/arm64/boot/dts/apple/t7001-air2.dtsi
index 19fabd425c5280..e4ec8c1977dea5 100644
--- a/arch/arm64/boot/dts/apple/t7001-air2.dtsi
+++ b/arch/arm64/boot/dts/apple/t7001-air2.dtsi
@@ -20,6 +20,7 @@
 		framebuffer0: framebuffer@0 {
 			compatible = "apple,simple-framebuffer", "simple-framebuffer";
 			reg = <0 0 0 0>; /* To be filled by loader */
+			power-domains = <&ps_disp0 &ps_dp>;
 			/* Format properties will be added by loader */
 			status = "disabled";
 		};
diff --git a/arch/arm64/boot/dts/apple/t7001-pmgr.dtsi b/arch/arm64/boot/dts/apple/t7001-pmgr.dtsi
new file mode 100644
index 00000000000000..7321cfdcd18965
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t7001-pmgr.dtsi
@@ -0,0 +1,650 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * PMGR Power domains for the Apple T7001 "A8X" SoC
+ *
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+
+&pmgr {
+	ps_cpu0: power-controller@20000 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20000 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "cpu0";
+		apple,always-on; /* Core device */
+	};
+
+	ps_cpu1: power-controller@20008 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20008 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "cpu1";
+		apple,always-on; /* Core device */
+	};
+
+	ps_cpu2: power-controller@20010 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20010 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "cpu2";
+		apple,always-on; /* Core device */
+	};
+
+	ps_cpm: power-controller@20040 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20040 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "cpm";
+		apple,always-on; /* Core device */
+	};
+
+	ps_sio_p: power-controller@201f8 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x201f8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "sio_p";
+	};
+
+	ps_lio: power-controller@20100 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20100 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "lio";
+		apple,always-on; /* Core device */
+	};
+
+	ps_iomux: power-controller@20108 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20108 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "iomux";
+		apple,always-on; /* Core device */
+	};
+
+	ps_aic: power-controller@20110 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20110 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "aic";
+		apple,always-on; /* Core device */
+	};
+
+	ps_debug: power-controller@20118 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20118 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "debug";
+	};
+
+	ps_dwi: power-controller@20120 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20120 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dwi";
+	};
+
+	ps_gpio: power-controller@20128 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20128 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "gpio";
+	};
+
+	ps_mca0: power-controller@20130 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20130 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mca0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_mca1: power-controller@20138 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20138 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mca1";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_mca2: power-controller@20140 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20140 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mca2";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_mca3: power-controller@20148 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20148 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mca3";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_mca4: power-controller@20150 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20150 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mca4";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_pwm0: power-controller@20158 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20158 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pwm0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_i2c0: power-controller@20160 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20160 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "i2c0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_i2c1: power-controller@20168 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20168 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "i2c1";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_i2c2: power-controller@20170 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20170 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "i2c2";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_i2c3: power-controller@20178 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20178 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "i2c3";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_spi0: power-controller@20180 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20180 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "spi0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_spi1: power-controller@20188 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20188 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "spi1";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_spi2: power-controller@20190 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20190 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "spi2";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_spi3: power-controller@20198 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20198 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "spi3";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart0: power-controller@201a0 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x201a0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart1: power-controller@201a8 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x201a8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart1";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart2: power-controller@201b0 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x201b0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart2";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart3: power-controller@201b8 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x201b8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart3";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart4: power-controller@201c0 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x201c0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart4";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart5: power-controller@201c8 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x201c8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart5";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart6: power-controller@201d0 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x201d0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart6";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart7: power-controller@201d8 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x201d8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart7";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart8: power-controller@201e0 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x201e0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart8";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_aes0: power-controller@201e8 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x201e8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "aes0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_sio: power-controller@201f0 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x201f0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "sio";
+		power-domains = <&ps_sio_p>;
+		apple,always-on; /* Core device */
+	};
+
+	ps_usb: power-controller@20248 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20248 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usb";
+	};
+
+	ps_usbctrl: power-controller@20250 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20250 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usbctrl";
+		power-domains = <&ps_usb>;
+	};
+
+	ps_usb2host0: power-controller@20258 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20258 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usb2host0";
+		power-domains = <&ps_usbctrl>;
+	};
+
+	ps_usb2host1: power-controller@20268 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20268 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usb2host1";
+		power-domains = <&ps_usbctrl>;
+	};
+
+	ps_usb2host2: power-controller@20278 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20278 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usb2host2";
+		power-domains = <&ps_usbctrl>;
+	};
+
+	ps_disp_busmux: power-controller@202a8 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x202a8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "disp_busmux";
+	};
+
+	ps_disp1_busmux: power-controller@202c0 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x202c0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "disp1_busmux";
+	};
+
+	ps_media: power-controller@202d8 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x202d8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "media";
+	};
+
+	ps_isp: power-controller@202d0 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x202d0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp";
+	};
+
+	ps_msr: power-controller@202e0 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x202e0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "msr";
+		power-domains = <&ps_media>;
+	};
+
+	ps_jpg: power-controller@202e8 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x202e8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "jpg";
+		power-domains = <&ps_media>;
+	};
+
+	ps_disp0: power-controller@202b0 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x202b0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "disp0";
+		power-domains = <&ps_disp_busmux>;
+	};
+
+	ps_disp1: power-controller@202c8 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x202c8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "disp1";
+		power-domains = <&ps_disp1_busmux>;
+	};
+
+	ps_pcie_ref: power-controller@20220 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20220 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pcie_ref";
+	};
+
+	ps_hsic0_phy: power-controller@20200 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20200 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "hsic0_phy";
+		power-domains = <&ps_usb2host1>;
+	};
+
+	ps_hsic1_phy: power-controller@20208 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20208 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "hsic1_phy";
+		power-domains = <&ps_usb2host2>;
+	};
+
+	ps_ispsens0: power-controller@20210 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20210 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "ispsens0";
+	};
+
+	ps_ispsens1: power-controller@20218 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20218 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "ispsens1";
+	};
+
+	ps_mcc: power-controller@20230 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20230 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mcc";
+		apple,always-on; /* Memory cache controller */
+	};
+
+	ps_mcu: power-controller@20238 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20238 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mcu";
+		apple,always-on; /* Core device */
+	};
+
+	ps_amp: power-controller@20240 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20240 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "amp";
+		apple,always-on; /* Core device */
+	};
+
+	ps_usb2host0_ohci: power-controller@20260 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20260 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usb2host0_ohci";
+		power-domains = <&ps_usb2host0>;
+	};
+
+	ps_usbotg: power-controller@20288 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20288 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usbotg";
+		power-domains = <&ps_usbctrl>;
+	};
+
+	ps_smx: power-controller@20290 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20290 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "smx";
+		apple,always-on; /* Apple fabric, critical block */
+	};
+
+	ps_sf: power-controller@20298 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20298 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "sf";
+		apple,always-on; /* Apple fabric, critical block */
+	};
+
+	ps_cp: power-controller@202a0 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x202a0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "cp";
+		apple,always-on; /* Core device */
+	};
+
+	ps_dp: power-controller@202b8 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x202b8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dp";
+		power-domains = <&ps_disp0>;
+	};
+
+	ps_vdec: power-controller@202f0 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x202f0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "vdec";
+		power-domains = <&ps_media>;
+	};
+
+	ps_ans: power-controller@20318 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20318 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "ans";
+	};
+
+	ps_venc: power-controller@20300 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20300 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "venc";
+		power-domains = <&ps_media>;
+	};
+
+	ps_pcie: power-controller@20308 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20308 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pcie";
+	};
+
+	ps_pcie_aux: power-controller@20310 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20310 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pcie_aux";
+	};
+
+	ps_gfx: power-controller@20320 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20320 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "gfx";
+	};
+
+	ps_sep: power-controller@20400 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x20400 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "sep";
+		apple,always-on; /* Locked on */
+	};
+
+	ps_venc_pipe: power-controller@21000 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x21000 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "venc_pipe";
+		power-domains = <&ps_venc>;
+	};
+
+	ps_venc_me0: power-controller@21008 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x21008 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "venc_me0";
+		power-domains = <&ps_venc>;
+	};
+
+	ps_venc_me1: power-controller@21010 {
+		compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x21010 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "venc_me1";
+		power-domains = <&ps_venc>;
+	};
+};
diff --git a/arch/arm64/boot/dts/apple/t7001.dtsi b/arch/arm64/boot/dts/apple/t7001.dtsi
index a76e034c85e346..8e2c67e19c4167 100644
--- a/arch/arm64/boot/dts/apple/t7001.dtsi
+++ b/arch/arm64/boot/dts/apple/t7001.dtsi
@@ -35,6 +35,8 @@
 			compatible = "apple,typhoon";
 			reg = <0x0 0x0>;
 			cpu-release-addr = <0 0>; /* To be filled in by loader */
+			performance-domains = <&cpufreq>;
+			operating-points-v2 = <&typhoon_opp>;
 			enable-method = "spin-table";
 			device_type = "cpu";
 		};
@@ -43,6 +45,8 @@
 			compatible = "apple,typhoon";
 			reg = <0x0 0x1>;
 			cpu-release-addr = <0 0>; /* To be filled in by loader */
+			performance-domains = <&cpufreq>;
+			operating-points-v2 = <&typhoon_opp>;
 			enable-method = "spin-table";
 			device_type = "cpu";
 		};
@@ -51,11 +55,53 @@
 			compatible = "apple,typhoon";
 			reg = <0x0 0x2>;
 			cpu-release-addr = <0 0>; /* To be filled by loader */
+			performance-domains = <&cpufreq>;
+			operating-points-v2 = <&typhoon_opp>;
 			enable-method = "spin-table";
 			device_type = "cpu";
 		};
 	};
 
+	typhoon_opp: opp-table {
+		compatible = "operating-points-v2";
+
+		opp01 {
+			opp-hz = /bits/ 64 <300000000>;
+			opp-level = <1>;
+			clock-latency-ns = <300>;
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <396000000>;
+			opp-level = <2>;
+			clock-latency-ns = <49000>;
+		};
+		opp03 {
+			opp-hz = /bits/ 64 <600000000>;
+			opp-level = <3>;
+			clock-latency-ns = <31000>;
+		};
+		opp04 {
+			opp-hz = /bits/ 64 <840000000>;
+			opp-level = <4>;
+			clock-latency-ns = <32000>;
+		};
+		opp05 {
+			opp-hz = /bits/ 64 <1128000000>;
+			opp-level = <5>;
+			clock-latency-ns = <32000>;
+		};
+		opp06 {
+			opp-hz = /bits/ 64 <1392000000>;
+			opp-level = <6>;
+			clock-latency-ns = <37000>;
+		};
+		opp07 {
+			opp-hz = /bits/ 64 <1512000000>;
+			opp-level = <7>;
+			clock-latency-ns = <41000>;
+		};
+	};
+
 	soc {
 		compatible = "simple-bus";
 		#address-cells = <2>;
@@ -63,6 +109,12 @@
 		nonposted-mmio;
 		ranges;
 
+		cpufreq: performance-controller@202220000 {
+			compatible = "apple,t7000-cluster-cpufreq", "apple,s5l8960x-cluster-cpufreq";
+			reg = <0x2 0x02220000 0 0x1000>;
+			#performance-domain-cells = <0>;
+		};
+
 		serial0: serial@20a0c0000 {
 			compatible = "apple,s5l-uart";
 			reg = <0x2 0x0a0c0000 0x0 0x4000>;
@@ -72,9 +124,18 @@
 			/* Use the bootloader-enabled clocks for now. */
 			clocks = <&clkref>, <&clkref>;
 			clock-names = "uart", "clk_uart_baud0";
+			power-domains = <&ps_uart0>;
 			status = "disabled";
 		};
 
+		pmgr: power-management@20e000000 {
+			compatible = "apple,t7000-pmgr", "apple,pmgr", "syscon", "simple-mfd";
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			reg = <0x2 0xe000000 0 0x24000>;
+		};
+
 		wdt: watchdog@20e027000 {
 			compatible = "apple,t7000-wdt", "apple,wdt";
 			reg = <0x2 0x0e027000 0x0 0x1000>;
@@ -88,11 +149,13 @@
 			reg = <0x2 0x0e100000 0x0 0x100000>;
 			#interrupt-cells = <3>;
 			interrupt-controller;
+			power-domains = <&ps_aic>;
 		};
 
 		pinctrl: pinctrl@20e300000 {
 			compatible = "apple,t7000-pinctrl", "apple,pinctrl";
 			reg = <0x2 0x0e300000 0x0 0x100000>;
+			power-domains = <&ps_gpio>;
 
 			gpio-controller;
 			#gpio-cells = <2>;
@@ -121,3 +184,5 @@
 			     <AIC_FIQ AIC_TMR_GUEST_VIRT IRQ_TYPE_LEVEL_HIGH>;
 	};
 };
+
+#include "t7001-pmgr.dtsi"
diff --git a/arch/arm64/boot/dts/apple/t8010-7.dtsi b/arch/arm64/boot/dts/apple/t8010-7.dtsi
index 1332fd73f50f08..1913b7b2c1febc 100644
--- a/arch/arm64/boot/dts/apple/t8010-7.dtsi
+++ b/arch/arm64/boot/dts/apple/t8010-7.dtsi
@@ -41,3 +41,15 @@
 		};
 	};
 };
+
+&framebuffer0 {
+	power-domains = <&ps_disp0_fe &ps_disp0_be &ps_mipi_dsi>;
+};
+
+&hurricane_opp09 {
+	status = "okay";
+};
+
+&hurricane_opp10 {
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/apple/t8010-common.dtsi b/arch/arm64/boot/dts/apple/t8010-common.dtsi
index 6613fb57c92fff..44dc968638b138 100644
--- a/arch/arm64/boot/dts/apple/t8010-common.dtsi
+++ b/arch/arm64/boot/dts/apple/t8010-common.dtsi
@@ -43,6 +43,10 @@
 	};
 };
 
+&dwi_bl {
+	status = "okay";
+};
+
 &serial0 {
 	status = "okay";
 };
diff --git a/arch/arm64/boot/dts/apple/t8010-ipad6.dtsi b/arch/arm64/boot/dts/apple/t8010-ipad6.dtsi
index 81696c6e302c61..1e46e4a3a7f4ad 100644
--- a/arch/arm64/boot/dts/apple/t8010-ipad6.dtsi
+++ b/arch/arm64/boot/dts/apple/t8010-ipad6.dtsi
@@ -42,3 +42,15 @@
 		};
 	};
 };
+
+&framebuffer0 {
+	power-domains = <&ps_disp0_fe &ps_disp0_be &ps_dp>;
+};
+
+&hurricane_opp09 {
+	status = "okay";
+};
+
+&hurricane_opp10 {
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/apple/t8010-n112.dts b/arch/arm64/boot/dts/apple/t8010-n112.dts
index 6e71c3cb5d92b7..48fdbedf74da5c 100644
--- a/arch/arm64/boot/dts/apple/t8010-n112.dts
+++ b/arch/arm64/boot/dts/apple/t8010-n112.dts
@@ -45,3 +45,7 @@
 		};
 	};
 };
+
+&framebuffer0 {
+	power-domains = <&ps_disp0_fe &ps_disp0_be &ps_mipi_dsi>;
+};
diff --git a/arch/arm64/boot/dts/apple/t8010-pmgr.dtsi b/arch/arm64/boot/dts/apple/t8010-pmgr.dtsi
new file mode 100644
index 00000000000000..6d451088616a97
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t8010-pmgr.dtsi
@@ -0,0 +1,772 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * PMGR Power domains for the Apple T8010 "A10" SoC
+ *
+ * Copyright (c) 2024 Nick Chan <towinchenmi@gmail.com>
+ */
+
+&pmgr {
+	ps_cpu0: power-controller@80000 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80000 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "cpu0";
+		apple,always-on; /* Core device */
+	};
+
+	ps_cpu1: power-controller@80008 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80008 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "cpu1";
+		apple,always-on; /* Core device */
+	};
+
+	ps_cpm: power-controller@80040 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80040 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "cpm";
+		apple,always-on; /* Core device */
+	};
+
+	ps_sio_busif: power-controller@80160 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80160 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "sio_busif";
+	};
+
+	ps_sio_p: power-controller@80168 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80168 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "sio_p";
+		power-domains = <&ps_sio_busif>;
+	};
+
+	ps_sbr: power-controller@80100 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80100 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "sbr";
+		apple,always-on; /* Apple fabric, critical block */
+	};
+
+	ps_aic: power-controller@80108 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80108 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "aic";
+		apple,always-on; /* Core device */
+	};
+
+	ps_dwi: power-controller@80110 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80110 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dwi";
+	};
+
+	ps_gpio: power-controller@80118 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80118 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "gpio";
+	};
+
+	ps_pms: power-controller@80120 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80120 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pms";
+		apple,always-on; /* Core device */
+	};
+
+	ps_pcie_ref: power-controller@80148 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80148 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pcie_ref";
+	};
+
+	ps_socuvd: power-controller@80150 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80150 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "socuvd";
+	};
+
+	ps_mca0: power-controller@80178 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80178 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mca0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_mca1: power-controller@80180 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80180 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mca1";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_mca2: power-controller@80188 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80188 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mca2";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_mca3: power-controller@80190 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80190 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mca3";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_mca4: power-controller@80198 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80198 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mca4";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_pwm0: power-controller@801a0 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801a0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pwm0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_i2c0: power-controller@801a8 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801a8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "i2c0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_i2c1: power-controller@801b0 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801b0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "i2c1";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_i2c2: power-controller@801b8 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801b8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "i2c2";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_i2c3: power-controller@801c0 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801c0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "i2c3";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_spi0: power-controller@801c8 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801c8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "spi0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_spi1: power-controller@801d0 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801d0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "spi1";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_spi2: power-controller@801d8 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801d8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "spi2";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_spi3: power-controller@801e0 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801e0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "spi3";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart0: power-controller@801e8 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801e8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart1: power-controller@801f0 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801f0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart1";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart2: power-controller@801f8 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801f8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart2";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_sio: power-controller@80170 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80170 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "sio";
+		power-domains = <&ps_sio_p>;
+		apple,always-on; /* Core device */
+	};
+
+	ps_hsic0_phy: power-controller@80128 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80128 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "hsic0_phy";
+		power-domains = <&ps_usb2host1>;
+	};
+
+	ps_isp_sens0: power-controller@80130 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80130 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_sens0";
+	};
+
+	ps_isp_sens1: power-controller@80138 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80138 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_sens1";
+	};
+
+	ps_isp_sens2: power-controller@80140 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80140 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_sens2";
+	};
+
+	ps_usb: power-controller@80268 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80268 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usb";
+	};
+
+	ps_usbctrl: power-controller@80270 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80270 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usbctrl";
+		power-domains = <&ps_usb>;
+	};
+
+	ps_usb2host0: power-controller@80278 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80278 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usb2host0";
+		power-domains = <&ps_usbctrl>;
+	};
+
+	ps_usb2host1: power-controller@80288 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80288 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usb2host1";
+		power-domains = <&ps_usbctrl>;
+	};
+
+	ps_rtmux: power-controller@802a8 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802a8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "rtmux";
+	};
+
+	ps_media: power-controller@802d8 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802d8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "media";
+	};
+
+	ps_isp_sys: power-controller@802d0 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802d0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_sys";
+		power-domains = <&ps_rtmux>;
+	};
+
+	ps_msr: power-controller@802e8 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802e8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "msr";
+		power-domains = <&ps_media>;
+	};
+
+	ps_jpg: power-controller@802e0 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802e0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "jpg";
+		power-domains = <&ps_media>;
+	};
+
+	ps_disp0_fe: power-controller@802b0 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802b0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "disp0_fe";
+		power-domains = <&ps_rtmux>;
+	};
+
+	ps_disp0_be: power-controller@802b8 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802b8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "disp0_be";
+		power-domains = <&ps_disp0_fe>;
+	};
+
+	ps_pmp: power-controller@802f0 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802f0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pmp";
+	};
+
+	ps_pms_sram: power-controller@802f8 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802f8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pms_sram";
+	};
+
+	ps_uart3: power-controller@80200 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80200 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart3";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart4: power-controller@80208 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80208 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart4";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart5: power-controller@80210 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80210 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart5";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart6: power-controller@80218 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80218 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart6";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart7: power-controller@80220 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80220 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart7";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart8: power-controller@80228 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80228 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart8";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_hfd0: power-controller@80238 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80238 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "hfd0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_mcc: power-controller@80240 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80240 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mcc";
+		apple,always-on; /* Memory cache controller */
+	};
+
+	ps_dcs0: power-controller@80248 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80248 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dcs0";
+		apple,always-on; /* LPDDR4 interface */
+	};
+
+	ps_dcs1: power-controller@80250 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80250 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dcs1";
+		apple,always-on; /* LPDDR4 interface */
+	};
+
+	ps_dcs2: power-controller@80258 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80258 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dcs2";
+		apple,always-on; /* LPDDR4 interface */
+	};
+
+	ps_dcs3: power-controller@80260 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80260 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dcs3";
+		apple,always-on; /* LPDDR4 interface */
+	};
+
+	ps_usb2host0_ohci: power-controller@80280 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80280 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usb2host0_ohci";
+		power-domains = <&ps_usb2host0>;
+	};
+
+	ps_usbotg: power-controller@80290 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80290 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usbotg";
+		power-domains = <&ps_usbctrl>;
+	};
+
+	ps_smx: power-controller@80298 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80298 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "smx";
+		apple,always-on; /* Apple fabric, critical block */
+	};
+
+	ps_sf: power-controller@802a0 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802a0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "sf";
+		apple,always-on; /* Apple fabric, critical block */
+	};
+
+	ps_mipi_dsi: power-controller@802c0 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802c0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mipi_dsi";
+		power-domains = <&ps_disp0_be>;
+	};
+
+	ps_dp: power-controller@802c8 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802c8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dp";
+		power-domains = <&ps_disp0_be>;
+	};
+
+	ps_venc_sys: power-controller@80310 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80310 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "venc_sys";
+		power-domains = <&ps_media>;
+	};
+
+	ps_pcie: power-controller@80318 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80318 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pcie";
+	};
+
+	ps_pcie_aux: power-controller@80320 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80320 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pcie_aux";
+	};
+
+	ps_vdec0: power-controller@80300 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80300 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "vdec0";
+		power-domains = <&ps_media>;
+	};
+
+	ps_gfx: power-controller@80328 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80328 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "gfx";
+	};
+
+	ps_sep: power-controller@80400 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80400 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "sep";
+		apple,always-on; /* Locked on */
+	};
+
+	ps_isp_rsts0: power-controller@84000 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x84000 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_rsts0";
+		power-domains = <&ps_isp_sys>;
+	};
+
+	ps_isp_rsts1: power-controller@84008 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x84008 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_rsts1";
+		power-domains = <&ps_isp_sys>;
+	};
+
+	ps_isp_vis: power-controller@84010 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x84010 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_vis";
+		power-domains = <&ps_isp_sys>;
+	};
+
+	ps_isp_be: power-controller@84018 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x84018 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_be";
+		power-domains = <&ps_isp_sys>;
+	};
+
+	ps_isp_pearl: power-controller@84020 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x84020 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_pearl";
+		power-domains = <&ps_isp_sys>;
+	};
+
+	ps_dprx: power-controller@84028 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x84028 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dprx";
+		power-domains = <&ps_isp_sys>;
+	};
+
+	ps_venc_pipe4: power-controller@88000 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x88000 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "venc_pipe4";
+		power-domains = <&ps_venc_sys>;
+	};
+
+	ps_venc_pipe5: power-controller@88008 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x88008 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "venc_pipe5";
+		power-domains = <&ps_venc_sys>;
+	};
+
+	ps_venc_me0: power-controller@88010 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x88010 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "venc_me0";
+	};
+
+	ps_venc_me1: power-controller@88018 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x88018 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "venc_me1";
+	};
+};
+
+&pmgr_mini {
+	ps_aop: power-controller@80000 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80000 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "aop";
+		power-domains = <&ps_aop_cpu &ps_aop_busif &ps_aop_filter>;
+		apple,always-on; /* Always on processor */
+	};
+
+	ps_debug: power-controller@80008 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80008 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "debug";
+	};
+
+	ps_aop_gpio: power-controller@80010 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80010 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "aop_gpio";
+	};
+
+	ps_aop_cpu: power-controller@80048 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80048 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "aop_cpu";
+	};
+
+	ps_aop_filter: power-controller@80050 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80050 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "aop_filter";
+	};
+
+	ps_aop_busif: power-controller@80058 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80058 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "aop_busif";
+	};
+};
diff --git a/arch/arm64/boot/dts/apple/t8010.dtsi b/arch/arm64/boot/dts/apple/t8010.dtsi
index e3d6a835410384..17e294bd7c44c7 100644
--- a/arch/arm64/boot/dts/apple/t8010.dtsi
+++ b/arch/arm64/boot/dts/apple/t8010.dtsi
@@ -32,6 +32,8 @@
 			compatible = "apple,hurricane-zephyr";
 			reg = <0x0 0x0>;
 			cpu-release-addr = <0 0>; /* To be filled by loader */
+			operating-points-v2 = <&fusion_opp>;
+			performance-domains = <&cpufreq>;
 			enable-method = "spin-table";
 			device_type = "cpu";
 		};
@@ -40,11 +42,89 @@
 			compatible = "apple,hurricane-zephyr";
 			reg = <0x0 0x1>;
 			cpu-release-addr = <0 0>; /* To be filled by loader */
+			operating-points-v2 = <&fusion_opp>;
+			performance-domains = <&cpufreq>;
 			enable-method = "spin-table";
 			device_type = "cpu";
 		};
 	};
 
+	fusion_opp: opp-table {
+		compatible = "operating-points-v2";
+
+		/*
+		 * Apple Fusion Architecture: Hardware big.LITTLE switcher
+		 * that use p-state transitions to switch between cores.
+		 * Only one type of core can be active at a given time.
+		 *
+		 * The E-core frequencies are adjusted so performance scales
+		 * linearly with reported clock speed.
+		 */
+
+		opp01 {
+			opp-hz = /bits/ 64 <172000000>; /* 300 MHz, E-core */
+			opp-level = <1>;
+			clock-latency-ns = <11000>;
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <230000000>; /* 396 MHz, E-core */
+			opp-level = <2>;
+			clock-latency-ns = <49000>;
+		};
+		opp03 {
+			opp-hz = /bits/ 64 <425000000>; /* 732 MHz, E-core */
+			opp-level = <3>;
+			clock-latency-ns = <13000>;
+		};
+		opp04 {
+			opp-hz = /bits/ 64 <637000000>; /* 1092 MHz, E-core */
+			opp-level = <4>;
+			clock-latency-ns = <18000>;
+		};
+		opp05 {
+			opp-hz = /bits/ 64 <756000000>;
+			opp-level = <5>;
+			clock-latency-ns = <35000>;
+		};
+		opp06 {
+			opp-hz = /bits/ 64 <1056000000>;
+			opp-level = <6>;
+			clock-latency-ns = <31000>;
+		};
+		opp07 {
+			opp-hz = /bits/ 64 <1356000000>;
+			opp-level = <7>;
+			clock-latency-ns = <37000>;
+		};
+		opp08 {
+			opp-hz = /bits/ 64 <1644000000>;
+			opp-level = <8>;
+			clock-latency-ns = <39500>;
+		};
+		hurricane_opp09: opp09 {
+			opp-hz = /bits/ 64 <1944000000>;
+			opp-level = <9>;
+			clock-latency-ns = <46000>;
+			status = "disabled"; /* Not available on N112 */
+		};
+		hurricane_opp10: opp10 {
+			opp-hz = /bits/ 64 <2244000000>;
+			opp-level = <10>;
+			clock-latency-ns = <56000>;
+			status = "disabled"; /* Not available on N112 */
+		};
+#if 0
+		/* Not available until CPU deep sleep is implemented */
+		hurricane_opp11: opp11 {
+			opp-hz = /bits/ 64 <2340000000>;
+			opp-level = <11>;
+			clock-latency-ns = <56000>;
+			turbo-mode;
+			status = "disabled"; /* Not available on N112 */
+		};
+#endif
+	};
+
 	soc {
 		compatible = "simple-bus";
 		#address-cells = <2>;
@@ -52,6 +132,12 @@
 		nonposted-mmio;
 		ranges;
 
+		cpufreq: performance-controller@202f20000 {
+			compatible = "apple,t8010-cluster-cpufreq", "apple,t8103-cluster-cpufreq", "apple,cluster-cpufreq";
+			reg = <0x2 0x02f20000 0 0x1000>;
+			#performance-domain-cells = <0>;
+		};
+
 		serial0: serial@20a0c0000 {
 			compatible = "apple,s5l-uart";
 			reg = <0x2 0x0a0c0000 0x0 0x4000>;
@@ -61,19 +147,37 @@
 			/* Use the bootloader-enabled clocks for now. */
 			clocks = <&clkref>, <&clkref>;
 			clock-names = "uart", "clk_uart_baud0";
+			power-domains = <&ps_uart0>;
 			status = "disabled";
 		};
 
+		pmgr: power-management@20e000000 {
+			compatible = "apple,t8010-pmgr", "apple,pmgr", "syscon", "simple-mfd";
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			reg = <0x2 0xe000000 0 0x8c000>;
+		};
+
 		aic: interrupt-controller@20e100000 {
 			compatible = "apple,t8010-aic", "apple,aic";
 			reg = <0x2 0x0e100000 0x0 0x100000>;
 			#interrupt-cells = <3>;
 			interrupt-controller;
+			power-domains = <&ps_aic>;
+		};
+
+		dwi_bl: backlight@20e200080 {
+			compatible = "apple,t8010-dwi-bl", "apple,dwi-bl";
+			reg = <0x2 0x0e200080 0x0 0x8>;
+			power-domains = <&ps_dwi>;
+			status = "disabled";
 		};
 
 		pinctrl_ap: pinctrl@20f100000 {
 			compatible = "apple,t8010-pinctrl", "apple,pinctrl";
 			reg = <0x2 0x0f100000 0x0 0x100000>;
+			power-domains = <&ps_gpio>;
 
 			gpio-controller;
 			#gpio-cells = <2>;
@@ -95,6 +199,7 @@
 		pinctrl_aop: pinctrl@2100f0000 {
 			compatible = "apple,t8010-pinctrl", "apple,pinctrl";
 			reg = <0x2 0x100f0000 0x0 0x100000>;
+			power-domains = <&ps_aop_gpio>;
 
 			gpio-controller;
 			#gpio-cells = <2>;
@@ -113,6 +218,14 @@
 				     <AIC_IRQ 134 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
+		pmgr_mini: power-management@210200000 {
+			compatible = "apple,t8010-pmgr", "apple,pmgr", "syscon", "simple-mfd";
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			reg = <0x2 0x10200000 0 0x84000>;
+		};
+
 		wdt: watchdog@2102b0000 {
 			compatible = "apple,t8010-wdt", "apple,wdt";
 			reg = <0x2 0x102b0000 0x0 0x4000>;
@@ -131,3 +244,5 @@
 			     <AIC_FIQ AIC_TMR_GUEST_VIRT IRQ_TYPE_LEVEL_HIGH>;
 	};
 };
+
+#include "t8010-pmgr.dtsi"
diff --git a/arch/arm64/boot/dts/apple/t8011-common.dtsi b/arch/arm64/boot/dts/apple/t8011-common.dtsi
index 44a0d0ea2ee36e..2010b56246f143 100644
--- a/arch/arm64/boot/dts/apple/t8011-common.dtsi
+++ b/arch/arm64/boot/dts/apple/t8011-common.dtsi
@@ -22,6 +22,7 @@
 		framebuffer0: framebuffer@0 {
 			compatible = "apple,simple-framebuffer", "simple-framebuffer";
 			reg = <0 0 0 0>; /* To be filled by loader */
+			power-domains = <&ps_disp0_fe &ps_disp0_be &ps_dp>;
 			/* Format properties will be added by loader */
 			status = "disabled";
 		};
diff --git a/arch/arm64/boot/dts/apple/t8011-pmgr.dtsi b/arch/arm64/boot/dts/apple/t8011-pmgr.dtsi
new file mode 100644
index 00000000000000..c44e3f9d708711
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t8011-pmgr.dtsi
@@ -0,0 +1,806 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * PMGR Power domains for the Apple T8011 "A10X" SoC
+ *
+ * Copyright (c) 2024 Nick Chan <towinchenmi@gmail.com>
+ */
+
+&pmgr {
+	ps_cpu0: power-controller@80000 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80000 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "cpu0";
+		apple,always-on; /* Core device */
+	};
+
+	ps_cpu1: power-controller@80008 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80008 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "cpu1";
+		apple,always-on; /* Core device */
+	};
+
+	ps_cpu2: power-controller@80010 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80010 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "cpu2";
+		apple,always-on; /* Core device */
+	};
+
+	ps_cpm: power-controller@80040 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80040 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "cpm";
+		apple,always-on; /* Core device */
+	};
+
+	ps_sio_busif: power-controller@80158 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80158 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "sio_busif";
+	};
+
+	ps_sio_p: power-controller@80160 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80160 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "sio_p";
+		power-domains = <&ps_sio_busif>;
+	};
+
+	ps_sbr: power-controller@80100 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80100 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "sbr";
+		apple,always-on; /* Apple fabric, critical block */
+	};
+
+	ps_aic: power-controller@80108 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80108 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "aic";
+		apple,always-on; /* Core device */
+	};
+
+	ps_dwi: power-controller@80110 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80110 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dwi";
+	};
+
+	ps_gpio: power-controller@80118 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80118 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "gpio";
+	};
+
+	ps_pms: power-controller@80120 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80120 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pms";
+		apple,always-on; /* Core device */
+	};
+
+	ps_pcie_ref: power-controller@80148 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80148 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pcie_ref";
+	};
+
+	ps_mca0: power-controller@80170 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80170 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mca0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_mca1: power-controller@80178 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80178 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mca1";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_mca2: power-controller@80180 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80180 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mca2";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_mca3: power-controller@80188 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80188 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mca3";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_mca4: power-controller@80190 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80190 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mca4";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_pwm0: power-controller@80198 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80198 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pwm0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_i2c0: power-controller@801a0 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801a0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "i2c0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_i2c1: power-controller@801a8 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801a8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "i2c1";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_i2c2: power-controller@801b0 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801b0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "i2c2";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_i2c3: power-controller@801b8 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801b8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "i2c3";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_spi0: power-controller@801c0 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801c0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "spi0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_spi1: power-controller@801c8 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801c8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "spi1";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_spi2: power-controller@801d0 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801d0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "spi2";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_spi3: power-controller@801d8 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801d8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "spi3";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart0: power-controller@801e0 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801e0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart1: power-controller@801e8 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801e8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart1";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart2: power-controller@801f0 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801f0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart2";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart3: power-controller@801f8 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801f8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart3";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_sio: power-controller@80168 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80168 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "sio";
+		power-domains = <&ps_sio_p>;
+		apple,always-on; /* Core device */
+	};
+
+	ps_hsic0_phy: power-controller@80128 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80128 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "hsic0_phy";
+		power-domains = <&ps_usb3host>;
+	};
+
+	ps_isp_sens0: power-controller@80130 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80130 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_sens0";
+	};
+
+	ps_isp_sens1: power-controller@80138 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80138 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_sens1";
+	};
+
+	ps_isp_sens2: power-controller@80140 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80140 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_sens2";
+	};
+
+	ps_usb: power-controller@80288 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80288 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usb";
+	};
+
+	ps_usbctrl: power-controller@80290 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80290 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usbctrl";
+		power-domains = <&ps_usb>;
+	};
+
+	ps_usb2host: power-controller@80298 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80298 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usb2host";
+		power-domains = <&ps_usbctrl>;
+	};
+
+	ps_usb2dev: power-controller@802a0 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802a0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usb2dev";
+		power-domains = <&ps_usbctrl>;
+	};
+
+	ps_usb3host: power-controller@802a8 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802a8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usb3host";
+		power-domains = <&ps_usbctrl>;
+	};
+
+	ps_usb3dev: power-controller@802b0 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802b0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usb3dev";
+		power-domains = <&ps_usbctrl>;
+	};
+
+	ps_media: power-controller@802e8 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802e8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "media";
+	};
+
+	ps_isp_sys: power-controller@802e0 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802e0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_sys";
+	};
+
+	ps_msr: power-controller@802f8 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802f8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "msr";
+		power-domains = <&ps_media>;
+	};
+
+	ps_jpg: power-controller@802f0 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802f0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "jpg";
+		power-domains = <&ps_media>;
+	};
+
+	ps_disp0_fe: power-controller@802c8 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802c8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "disp0_fe";
+	};
+
+	ps_disp0_be: power-controller@802d0 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802d0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "disp0_be";
+		power-domains = <&ps_disp0_fe>;
+	};
+
+	ps_dpa: power-controller@80230 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80230 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dpa";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart4: power-controller@80200 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80200 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart4";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart5: power-controller@80208 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80208 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart5";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart6: power-controller@80210 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80210 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart6";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart7: power-controller@80218 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80218 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart7";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart8: power-controller@80220 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80220 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart8";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_hfd0: power-controller@80238 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80238 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "hfd0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_mcc: power-controller@80240 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80240 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mcc";
+		apple,always-on; /* Memory cache controller */
+	};
+
+	ps_dcs0: power-controller@80248 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80248 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dcs0";
+		apple,always-on; /* LPDDR4 interface */
+	};
+
+	ps_dcs1: power-controller@80250 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80250 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dcs1";
+		apple,always-on; /* LPDDR4 interface */
+	};
+
+	ps_dcs2: power-controller@80258 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80258 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dcs2";
+		apple,always-on; /* LPDDR4 interface */
+	};
+
+	ps_dcs3: power-controller@80260 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80260 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dcs3";
+		apple,always-on; /* LPDDR4 interface */
+	};
+
+	ps_dcs4: power-controller@80268 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80268 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dcs4";
+		apple,always-on; /* LPDDR4 interface */
+	};
+
+	ps_dcs5: power-controller@80270 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80270 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dcs5";
+		apple,always-on; /* LPDDR4 interface */
+	};
+
+	ps_dcs6: power-controller@80278 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80278 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dcs6";
+	};
+
+	ps_dcs7: power-controller@80280 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80280 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dcs7";
+	};
+
+	ps_smx: power-controller@802b8 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802b8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "smx";
+		apple,always-on; /* Apple fabric, critical block */
+	};
+
+	ps_sf: power-controller@802c0 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802c0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "sf";
+		apple,always-on; /* Apple fabric, critical block */
+	};
+
+	ps_dp: power-controller@802d8 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802d8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dp";
+		power-domains = <&ps_disp0_be>;
+	};
+
+	ps_venc_sys: power-controller@80320 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80320 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "venc_sys";
+		power-domains = <&ps_media>;
+	};
+
+	ps_srs: power-controller@80390 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80390 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "srs";
+		power-domains = <&ps_media>;
+	};
+
+	ps_pms_sram: power-controller@80308 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80308 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pms_sram";
+	};
+
+	ps_pmp: power-controller@80300 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80300 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pmp";
+	};
+
+	ps_pcie: power-controller@80328 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80328 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pcie";
+	};
+
+	ps_pcie_aux: power-controller@80330 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80330 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pcie_aux";
+	};
+
+	ps_vdec0: power-controller@80310 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80310 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "vdec0";
+		power-domains = <&ps_media>;
+	};
+
+	ps_gfx: power-controller@80338 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80338 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "gfx";
+	};
+
+	ps_sep: power-controller@80400 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80400 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "sep";
+		apple,always-on; /* Locked on */
+	};
+
+	ps_isp_rsts0: power-controller@84000 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x84000 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_rsts0";
+		power-domains = <&ps_isp_sys>;
+	};
+
+	ps_isp_rsts1: power-controller@84008 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x84008 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_rsts1";
+		power-domains = <&ps_isp_sys>;
+	};
+
+	ps_isp_vis: power-controller@84010 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x84010 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_vis";
+		power-domains = <&ps_isp_sys>;
+	};
+
+	ps_isp_be: power-controller@84018 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x84018 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_be";
+		power-domains = <&ps_isp_sys>;
+	};
+
+	ps_isp_pearl: power-controller@84020 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x84020 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_pearl";
+		power-domains = <&ps_isp_sys>;
+	};
+
+	ps_dprx: power-controller@84028 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x84028 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dprx";
+		power-domains = <&ps_isp_sys>;
+	};
+
+	ps_venc_pipe4: power-controller@88000 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x88000 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "venc_pipe4";
+		power-domains = <&ps_venc_sys>;
+	};
+
+	ps_venc_pipe5: power-controller@88008 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x88008 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "venc_pipe5";
+		power-domains = <&ps_venc_sys>;
+	};
+
+	ps_venc_me0: power-controller@88010 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x88010 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "venc_me0";
+	};
+
+	ps_venc_me1: power-controller@88018 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x88018 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "venc_me1";
+	};
+};
+
+&pmgr_mini {
+	ps_aop: power-controller@80000 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80000 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "aop";
+		power-domains = <&ps_aop_cpu &ps_aop_filter &ps_aop_busif>;
+		apple,always-on; /* Always on processor */
+	};
+
+	ps_debug: power-controller@80008 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80008 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "debug";
+	};
+
+	ps_aop_gpio: power-controller@80010 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80010 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "aop_gpio";
+	};
+
+	ps_aop_cpu: power-controller@80048 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80048 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "aop_cpu";
+	};
+
+	ps_aop_filter: power-controller@80050 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80050 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "aop_filter";
+	};
+
+	ps_aop_busif: power-controller@80058 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80058 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "aop_busif";
+	};
+};
diff --git a/arch/arm64/boot/dts/apple/t8011-pro2.dtsi b/arch/arm64/boot/dts/apple/t8011-pro2.dtsi
index f4e7074150036c..5eaa0a73350f59 100644
--- a/arch/arm64/boot/dts/apple/t8011-pro2.dtsi
+++ b/arch/arm64/boot/dts/apple/t8011-pro2.dtsi
@@ -40,3 +40,11 @@
 		};
 	};
 };
+
+&ps_dcs6 {
+	apple,always-on; /* LPDDR4 interface */
+};
+
+&ps_dcs7 {
+	apple,always-on; /* LPDDR4 interface */
+};
diff --git a/arch/arm64/boot/dts/apple/t8011.dtsi b/arch/arm64/boot/dts/apple/t8011.dtsi
index 6c4ed9dc4a504d..5b280c896b760d 100644
--- a/arch/arm64/boot/dts/apple/t8011.dtsi
+++ b/arch/arm64/boot/dts/apple/t8011.dtsi
@@ -32,6 +32,8 @@
 			compatible = "apple,hurricane-zephyr";
 			reg = <0x0 0x0>;
 			cpu-release-addr = <0 0>; /* To be filled by loader */
+			operating-points-v2 = <&fusion_opp>;
+			performance-domains = <&cpufreq>;
 			enable-method = "spin-table";
 			device_type = "cpu";
 		};
@@ -40,6 +42,8 @@
 			compatible = "apple,hurricane-zephyr";
 			reg = <0x0 0x1>;
 			cpu-release-addr = <0 0>; /* To be filled by loader */
+			operating-points-v2 = <&fusion_opp>;
+			performance-domains = <&cpufreq>;
 			enable-method = "spin-table";
 			device_type = "cpu";
 		};
@@ -48,11 +52,80 @@
 			compatible = "apple,hurricane-zephyr";
 			reg = <0x0 0x2>;
 			cpu-release-addr = <0 0>; /* To be filled by loader */
+			operating-points-v2 = <&fusion_opp>;
+			performance-domains = <&cpufreq>;
 			enable-method = "spin-table";
 			device_type = "cpu";
 		};
 	};
 
+	fusion_opp: opp-table {
+		compatible = "operating-points-v2";
+
+		/*
+		 * Apple Fusion Architecture: Hardwired big.LITTLE switcher
+		 * that use p-state transitions to switch between cores.
+		 *
+		 * The E-core frequencies are adjusted so performance scales
+		 * linearly with reported clock speed.
+		 */
+
+		opp01 {
+			opp-hz = /bits/ 64 <172000000>; /* 300 MHz, E-core */
+			opp-level = <1>;
+			clock-latency-ns = <12000>;
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <230000000>; /* 396 MHz, E-core */
+			opp-level = <2>;
+			clock-latency-ns = <135000>;
+		};
+		opp03 {
+			opp-hz = /bits/ 64 <448000000>; /* 768 MHz, E-core */
+			opp-level = <3>;
+			clock-latency-ns = <105000>;
+		};
+		opp04 {
+			opp-hz = /bits/ 64 <662000000>; /* 1152 MHz, E-core */
+			opp-level = <4>;
+			clock-latency-ns = <115000>;
+		};
+		opp05 {
+			opp-hz = /bits/ 64 <804000000>;
+			opp-level = <5>;
+			clock-latency-ns = <122000>;
+		};
+		opp06 {
+			opp-hz = /bits/ 64 <1140000000>;
+			opp-level = <6>;
+			clock-latency-ns = <120000>;
+		};
+		opp07 {
+			opp-hz = /bits/ 64 <1548000000>;
+			opp-level = <7>;
+			clock-latency-ns = <125000>;
+		};
+		opp08 {
+			opp-hz = /bits/ 64 <1956000000>;
+			opp-level = <8>;
+			clock-latency-ns = <135000>;
+		};
+		opp09 {
+			opp-hz = /bits/ 64 <2316000000>;
+			opp-level = <9>;
+			clock-latency-ns = <140000>;
+		};
+#if 0
+		/* Not available until CPU deep sleep is implemented */
+		opp10 {
+			opp-hz = /bits/ 64 <2400000000>;
+			opp-level = <10>;
+			clock-latency-ns = <140000>;
+			turbo-mode;
+		};
+#endif
+	};
+
 	soc {
 		compatible = "simple-bus";
 		#address-cells = <2>;
@@ -60,6 +133,12 @@
 		nonposted-mmio;
 		ranges;
 
+		cpufreq: performance-controller@202f20000 {
+			compatible = "apple,t8010-cluster-cpufreq", "apple,t8103-cluster-cpufreq", "apple,cluster-cpufreq";
+			reg = <0x2 0x02f20000 0 0x1000>;
+			#performance-domain-cells = <0>;
+		};
+
 		serial0: serial@20a0c0000 {
 			compatible = "apple,s5l-uart";
 			reg = <0x2 0x0a0c0000 0x0 0x4000>;
@@ -69,19 +148,30 @@
 			/* Use the bootloader-enabled clocks for now. */
 			clocks = <&clkref>, <&clkref>;
 			clock-names = "uart", "clk_uart_baud0";
+			power-domains = <&ps_uart0>;
 			status = "disabled";
 		};
 
+		pmgr: power-management@20e000000 {
+			compatible = "apple,t8010-pmgr", "apple,pmgr", "syscon", "simple-mfd";
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			reg = <0x2 0xe000000 0 0x8c000>;
+		};
+
 		aic: interrupt-controller@20e100000 {
 			compatible = "apple,t8010-aic", "apple,aic";
 			reg = <0x2 0x0e100000 0x0 0x100000>;
 			#interrupt-cells = <3>;
 			interrupt-controller;
+			power-domains = <&ps_aic>;
 		};
 
 		pinctrl_ap: pinctrl@20f100000 {
 			compatible = "apple,t8010-pinctrl", "apple,pinctrl";
 			reg = <0x2 0x0f100000 0x0 0x100000>;
+			power-domains = <&ps_gpio>;
 
 			gpio-controller;
 			#gpio-cells = <2>;
@@ -103,6 +193,7 @@
 		pinctrl_aop: pinctrl@2100f0000 {
 			compatible = "apple,t8010-pinctrl", "apple,pinctrl";
 			reg = <0x2 0x100f0000 0x0 0x100000>;
+			power-domains = <&ps_aop_gpio>;
 
 			gpio-controller;
 			#gpio-cells = <2>;
@@ -121,6 +212,14 @@
 				     <AIC_IRQ 131 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
+		pmgr_mini: power-management@210200000 {
+			compatible = "apple,t8010-pmgr", "apple,pmgr", "syscon", "simple-mfd";
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			reg = <0x2 0x10200000 0 0x84000>;
+		};
+
 		wdt: watchdog@2102b0000 {
 			compatible = "apple,t8010-wdt", "apple,wdt";
 			reg = <0x2 0x102b0000 0x0 0x4000>;
@@ -139,3 +238,5 @@
 			     <AIC_FIQ AIC_TMR_GUEST_VIRT IRQ_TYPE_LEVEL_HIGH>;
 	};
 };
+
+#include "t8011-pmgr.dtsi"
diff --git a/arch/arm64/boot/dts/apple/t8012-j132.dts b/arch/arm64/boot/dts/apple/t8012-j132.dts
new file mode 100644
index 00000000000000..778a69be18dd81
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t8012-j132.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Apple T2 MacBookPro15,2 (j132), J132, iBridge2,4
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "t8012-jxxx.dtsi"
+
+/ {
+	model = "Apple T2 MacBookPro15,2 (j132)";
+	compatible = "apple,j132", "apple,t8012", "apple,arm-platform";
+};
diff --git a/arch/arm64/boot/dts/apple/t8012-j137.dts b/arch/arm64/boot/dts/apple/t8012-j137.dts
new file mode 100644
index 00000000000000..dbde1ad7ce14a0
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t8012-j137.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Apple T2 iMacPro1,1 (j137), J137, iBridge2,1
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "t8012-jxxx.dtsi"
+
+/ {
+	model = "Apple T2 iMacPro1,1 (j137)";
+	compatible = "apple,j137", "apple,t8012", "apple,arm-platform";
+};
diff --git a/arch/arm64/boot/dts/apple/t8012-j140a.dts b/arch/arm64/boot/dts/apple/t8012-j140a.dts
new file mode 100644
index 00000000000000..5df1ff74d2df72
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t8012-j140a.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Apple T2 MacBookAir8,2 (j140a), J140a, iBridge2,12
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "t8012-jxxx.dtsi"
+
+/ {
+	model = "Apple T2 MacBookAir8,2 (j140a)";
+	compatible = "apple,j140a", "apple,t8012", "apple,arm-platform";
+};
diff --git a/arch/arm64/boot/dts/apple/t8012-j140k.dts b/arch/arm64/boot/dts/apple/t8012-j140k.dts
new file mode 100644
index 00000000000000..a0ef1585e5c24e
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t8012-j140k.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Apple T2 MacBookAir8,1 (j140k), J140k, iBridge2,8
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "t8012-jxxx.dtsi"
+
+/ {
+	model = "Apple T2 MacBookAir8,1 (j140k)";
+	compatible = "apple,j140k", "apple,t8012", "apple,arm-platform";
+};
diff --git a/arch/arm64/boot/dts/apple/t8012-j152f.dts b/arch/arm64/boot/dts/apple/t8012-j152f.dts
new file mode 100644
index 00000000000000..261416eaf97e08
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t8012-j152f.dts
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Apple T2 MacBookPro16,1 (j152f), J152f, iBridge2,14
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "t8012-jxxx.dtsi"
+#include "t8012-touchbar.dtsi"
+
+/ {
+	model = "Apple T2 MacBookPro16,1 (j152f)";
+	compatible = "apple,j152f", "apple,t8012", "apple,arm-platform";
+};
diff --git a/arch/arm64/boot/dts/apple/t8012-j160.dts b/arch/arm64/boot/dts/apple/t8012-j160.dts
new file mode 100644
index 00000000000000..fbcc0604f4a071
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t8012-j160.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Apple T2 MacPro7,1 (j160), J160, iBridge2,6
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "t8012-jxxx.dtsi"
+
+/ {
+	model = "Apple T2 MacPro7,1 (j160)";
+	compatible = "apple,j160", "apple,t8012", "apple,arm-platform";
+};
diff --git a/arch/arm64/boot/dts/apple/t8012-j174.dts b/arch/arm64/boot/dts/apple/t8012-j174.dts
new file mode 100644
index 00000000000000..d11c70f84a71d7
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t8012-j174.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Apple T2 Macmini8,1 (j174), J174, iBridge2,5
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "t8012-jxxx.dtsi"
+
+/ {
+	model = "Apple T2 Macmini8,1 (j174)";
+	compatible = "apple,j174", "apple,t8012", "apple,arm-platform";
+};
diff --git a/arch/arm64/boot/dts/apple/t8012-j185.dts b/arch/arm64/boot/dts/apple/t8012-j185.dts
new file mode 100644
index 00000000000000..33492f5db46df4
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t8012-j185.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Apple T2 iMac20,1 (j185), J185, iBridge2,19
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "t8012-jxxx.dtsi"
+
+/ {
+	model = "Apple T2 iMac20,1 (j185)";
+	compatible = "apple,j185", "apple,t8012", "apple,arm-platform";
+};
diff --git a/arch/arm64/boot/dts/apple/t8012-j185f.dts b/arch/arm64/boot/dts/apple/t8012-j185f.dts
new file mode 100644
index 00000000000000..3a4abdd8f7d7af
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t8012-j185f.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Apple T2 iMac20,2 (j185f), J185f, iBridge2,20
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "t8012-jxxx.dtsi"
+
+/ {
+	model = "Apple T2 iMac20,2 (j185f)";
+	compatible = "apple,j185f", "apple,t8012", "apple,arm-platform";
+};
diff --git a/arch/arm64/boot/dts/apple/t8012-j213.dts b/arch/arm64/boot/dts/apple/t8012-j213.dts
new file mode 100644
index 00000000000000..8270812b9a686f
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t8012-j213.dts
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Apple T2 MacBookPro15,4 (j213), J213, iBridge2,10
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "t8012-jxxx.dtsi"
+#include "t8012-touchbar.dtsi"
+
+/ {
+	model = "Apple T2 MacBookPro15,4 (j213)";
+	compatible = "apple,j213", "apple,t8012", "apple,arm-platform";
+};
diff --git a/arch/arm64/boot/dts/apple/t8012-j214k.dts b/arch/arm64/boot/dts/apple/t8012-j214k.dts
new file mode 100644
index 00000000000000..5b8e425120609e
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t8012-j214k.dts
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Apple T2 MacBookPro16,2 (j214k), J214k, iBridge2,16
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "t8012-jxxx.dtsi"
+#include "t8012-touchbar.dtsi"
+
+/ {
+	model = "Apple T2 MacBookPro16,2 (j214k)";
+	compatible = "apple,j214k", "apple,t8012", "apple,arm-platform";
+};
diff --git a/arch/arm64/boot/dts/apple/t8012-j215.dts b/arch/arm64/boot/dts/apple/t8012-j215.dts
new file mode 100644
index 00000000000000..ad574fbf7f92bf
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t8012-j215.dts
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Apple T2 MacBookPro16,4 (j215), J215, iBridge2,22
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "t8012-jxxx.dtsi"
+#include "t8012-touchbar.dtsi"
+
+/ {
+	model = "Apple T2 MacBookPro16,4 (j215)";
+	compatible = "apple,j215", "apple,t8012", "apple,arm-platform";
+};
diff --git a/arch/arm64/boot/dts/apple/t8012-j223.dts b/arch/arm64/boot/dts/apple/t8012-j223.dts
new file mode 100644
index 00000000000000..de75d775aac59d
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t8012-j223.dts
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Apple T2 MacBookPro16,3 (j223), J223, iBridge2,21
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "t8012-jxxx.dtsi"
+#include "t8012-touchbar.dtsi"
+
+/ {
+	model = "Apple T2 MacBookPro16,3 (j223)";
+	compatible = "apple,j223", "apple,t8012", "apple,arm-platform";
+};
diff --git a/arch/arm64/boot/dts/apple/t8012-j230k.dts b/arch/arm64/boot/dts/apple/t8012-j230k.dts
new file mode 100644
index 00000000000000..4b19bc70ab0f60
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t8012-j230k.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Apple T2 MacBookAir9,1 (j230k), J230k, iBridge2,15
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "t8012-jxxx.dtsi"
+
+/ {
+	model = "Apple T2 MacBookAir9,1 (j230k)";
+	compatible = "apple,j230k", "apple,t8012", "apple,arm-platform";
+};
diff --git a/arch/arm64/boot/dts/apple/t8012-j680.dts b/arch/arm64/boot/dts/apple/t8012-j680.dts
new file mode 100644
index 00000000000000..aa5a72e07d3fd1
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t8012-j680.dts
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Apple T2 MacBookPro15,1 (j680), J680, iBridge2,3
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "t8012-jxxx.dtsi"
+#include "t8012-touchbar.dtsi"
+
+/ {
+	model = "Apple T2 MacBookPro15,1 (j680)";
+	compatible = "apple,j680", "apple,t8012", "apple,arm-platform";
+};
diff --git a/arch/arm64/boot/dts/apple/t8012-j780.dts b/arch/arm64/boot/dts/apple/t8012-j780.dts
new file mode 100644
index 00000000000000..9cee891cb16d78
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t8012-j780.dts
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Apple T2 MacBookPro15,3 (j780), J780, iBridge2,7
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "t8012-jxxx.dtsi"
+#include "t8012-touchbar.dtsi"
+
+/ {
+	model = "Apple T2 MacBookPro15,3 (j780)";
+	compatible = "apple,j780", "apple,t8012", "apple,arm-platform";
+};
diff --git a/arch/arm64/boot/dts/apple/t8012-jxxx.dtsi b/arch/arm64/boot/dts/apple/t8012-jxxx.dtsi
new file mode 100644
index 00000000000000..36e82633bc521a
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t8012-jxxx.dtsi
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Common Device Tree for all T2 devices
+ *
+ * target-type: J132, J137, J140a, J140k, J152f, J160, J174, J185, J185f
+ * J213, J214k, J215, J223, J230k, J680, J780
+ *
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+
+#include "t8012.dtsi"
+
+/ {
+	chassis-type = "embedded";
+
+	aliases {
+		serial0 = &serial0;
+	};
+
+	chosen {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		stdout-path = "serial0";
+	};
+
+	memory@800000000 {
+		device_type = "memory";
+		reg = <0x8 0 0 0>; /* To be filled by loader */
+	};
+
+	reserved-memory {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		/* To be filled by loader */
+	};
+};
+
+&serial0 {
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/apple/t8012-pmgr.dtsi b/arch/arm64/boot/dts/apple/t8012-pmgr.dtsi
new file mode 100644
index 00000000000000..35a462edd4af71
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t8012-pmgr.dtsi
@@ -0,0 +1,837 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * PMGR Power domains for the Apple T8012 "T2" SoC
+ *
+ * Copyright (c) 2024 Nick Chan <towinchenmi@gmail.com>
+ */
+
+&pmgr {
+	ps_cpu0: power-controller@80000 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80000 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "cpu0";
+		apple,always-on; /* Core device */
+	};
+
+	ps_cpu1: power-controller@80008 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80008 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "cpu1";
+		apple,always-on; /* Core device */
+	};
+
+	ps_cpm: power-controller@80040 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80040 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "cpm";
+		apple,always-on; /* Core device */
+	};
+
+	ps_sio_busif: power-controller@80158 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80158 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "sio_busif";
+	};
+
+	ps_sio_p: power-controller@80160 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80160 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "sio_p";
+		power-domains = <&ps_sio_busif>;
+	};
+
+	ps_iomux: power-controller@80150 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80150 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "iomux";
+	};
+
+	ps_sbr: power-controller@80100 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80100 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "sbr";
+		apple,always-on; /* Apple fabric, critical block */
+	};
+
+	ps_aic: power-controller@80108 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80108 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "aic";
+		apple,always-on; /* Core device */
+	};
+
+	ps_gpio: power-controller@80110 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80110 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "gpio";
+	};
+
+	ps_pcie_down_ref: power-controller@80138 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80138 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pcie_down_ref";
+	};
+
+	ps_pcie_stg0_ref: power-controller@80140 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80140 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pcie_stg0_ref";
+	};
+
+	ps_pcie_stg1_ref: power-controller@80148 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80148 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pcie_stg1_ref";
+	};
+
+	ps_mca0: power-controller@80170 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80170 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mca0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_mca1: power-controller@80178 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80178 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mca1";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_mca2: power-controller@80180 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80180 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mca2";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_mca3: power-controller@80188 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80188 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mca3";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_mca4: power-controller@80190 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80190 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mca4";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_mca5: power-controller@80198 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80198 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mca5";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_i2c0: power-controller@801a8 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801a8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "i2c0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_i2c1: power-controller@801b0 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801b0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "i2c1";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_i2c2: power-controller@801b8 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801b8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "i2c2";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_i2c3: power-controller@801c0 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801c0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "i2c3";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_spi0: power-controller@801e0 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801e0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "spi0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_spi1: power-controller@801e8 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801e8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "spi1";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_spi2: power-controller@801f0 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801f0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "spi2";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_spi3: power-controller@801f8 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801f8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "spi3";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_pwm0: power-controller@801a0 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801a0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pwm0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_sio: power-controller@80168 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80168 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "sio";
+		power-domains = <&ps_sio_p>;
+		apple,always-on; /* Core device */
+	};
+
+	ps_isp_sens0: power-controller@80120 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80120 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_sens0";
+	};
+
+	ps_isp_sens1: power-controller@80128 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80128 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_sens1";
+	};
+
+	ps_isp_sens2: power-controller@80130 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80130 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_sens2";
+	};
+
+	ps_pms: power-controller@80118 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80118 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pms";
+		apple,always-on; /* Core device */
+	};
+
+	ps_i2c4: power-controller@801c8 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801c8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "i2c4";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_i2c5: power-controller@801d0 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801d0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "i2c5";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_i2c6: power-controller@801d8 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801d8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "i2c6";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_usb: power-controller@80268 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80268 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usb";
+	};
+
+	ps_usbctrl: power-controller@80270 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80270 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usbctrl";
+		power-domains = <&ps_usb>;
+	};
+
+	ps_usb2host0: power-controller@80278 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80278 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usb2host0";
+		power-domains = <&ps_usbctrl>;
+	};
+
+	ps_usb2host1: power-controller@80288 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80288 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usb2host1";
+		power-domains = <&ps_usbctrl>;
+	};
+
+	ps_rtmux: power-controller@802a8 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802a8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "rtmux";
+	};
+
+	ps_media: power-controller@802d8 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802d8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "media";
+	};
+
+	ps_isp_sys: power-controller@802d0 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802d0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_sys";
+		power-domains = <&ps_rtmux>;
+	};
+
+	ps_msr: power-controller@802e8 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802e8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "msr";
+		power-domains = <&ps_media>;
+	};
+
+	ps_jpg: power-controller@802e0 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802e0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "jpg";
+		power-domains = <&ps_media>;
+	};
+
+	ps_disp0_fe: power-controller@802b0 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802b0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "disp0_fe";
+		power-domains = <&ps_rtmux>;
+	};
+
+	ps_disp0_be: power-controller@802b8 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802b8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "disp0_be";
+		power-domains = <&ps_disp0_fe>;
+	};
+
+	ps_uart0: power-controller@80200 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80200 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart1: power-controller@80208 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80208 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart1";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart2: power-controller@80210 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80210 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart2";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart3: power-controller@80218 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80218 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart3";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart4: power-controller@80220 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80220 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart4";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_dpa: power-controller@80228 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80228 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dpa";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_hfd0: power-controller@80230 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80230 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "hfd0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_mcc: power-controller@80240 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80240 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mcc";
+		apple,always-on; /* Memory cache controller */
+	};
+
+	ps_dcs0: power-controller@80248 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80248 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dcs0";
+		apple,always-on; /* LPDDR4 interface */
+	};
+
+	ps_dcs1: power-controller@80250 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80250 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dcs1";
+		apple,always-on; /* LPDDR4 interface */
+	};
+
+	ps_dcs2: power-controller@80258 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80258 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dcs2";
+		/* Not used on some devicecs, to be disabled by loader */
+		apple,always-on; /* LPDDR4 interface */
+	};
+
+	ps_dcs3: power-controller@80260 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80260 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dcs3";
+		/* Not used on some devicecs, to be disabled by loader */
+		apple,always-on; /* LPDDR4 interface */
+	};
+
+	ps_usb2host0_ohci: power-controller@80280 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80280 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usb2host0_ohci";
+		power-domains = <&ps_usb2host0>;
+	};
+
+	ps_usbotg: power-controller@80290 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80290 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usbotg";
+		power-domains = <&ps_usbctrl>;
+	};
+
+	ps_smx: power-controller@80298 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80298 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "smx";
+		apple,always-on; /* Apple fabric, critical block */
+	};
+
+	ps_sf: power-controller@802a0 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802a0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "sf";
+		apple,always-on; /* Apple fabric, critical block */
+	};
+
+	ps_mipi_dsi: power-controller@802c8 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802c8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mipi_dsi";
+		power-domains = <&ps_disp0_be>;
+	};
+
+	ps_pmp: power-controller@802f0 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802f0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pmp";
+	};
+
+	ps_pms_sram: power-controller@802f8 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802f8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pms_sram";
+	};
+
+	ps_pcie_up_af: power-controller@80320 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80320 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pcie_up_af";
+		power-domains = <&ps_iomux>;
+	};
+
+	ps_pcie_up: power-controller@80328 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80328 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pcie_up";
+		power-domains = <&ps_pcie_up_af>;
+	};
+
+	ps_venc_sys: power-controller@80300 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80300 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "venc_sys";
+		power-domains = <&ps_media>;
+	};
+
+	ps_ans2: power-controller@80308 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80308 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "ans2";
+		power-domains = <&ps_iomux>;
+	};
+
+	ps_pcie_down: power-controller@80310 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80310 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pcie_down";
+		power-domains = <&ps_iomux>;
+	};
+
+	ps_pcie_down_aux: power-controller@80318 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80318 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pcie_down_aux";
+	};
+
+	ps_pcie_up_aux: power-controller@80330 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80330 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pcie_up_aux";
+		power-domains = <&ps_pcie_up>;
+	};
+
+	ps_pcie_stg0: power-controller@80338 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80338 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pcie_stg0";
+		power-domains = <&ps_ans2>;
+	};
+
+	ps_pcie_stg0_aux: power-controller@80340 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80340 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pcie_stg0_aux";
+	};
+
+	ps_pcie_stg1: power-controller@80348 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80348 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pcie_stg1";
+		power-domains = <&ps_ans2>;
+	};
+
+	ps_pcie_stg1_aux: power-controller@80350 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80350 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pcie_stg1_aux";
+	};
+
+	ps_sep: power-controller@80400 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80400 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "sep";
+		apple,always-on; /* Locked on */
+	};
+
+	ps_isp_rsts0: power-controller@84000 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x84000 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_rsts0";
+		power-domains = <&ps_isp_sys>;
+	};
+
+	ps_isp_rsts1: power-controller@84008 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x84008 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_rsts1";
+		power-domains = <&ps_isp_sys>;
+	};
+
+	ps_isp_vis: power-controller@84010 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x84010 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_vis";
+		power-domains = <&ps_isp_sys>;
+	};
+
+	ps_isp_be: power-controller@84018 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x84018 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_be";
+		power-domains = <&ps_isp_sys>;
+	};
+
+	ps_isp_pearl: power-controller@84020 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x84020 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_pearl";
+		power-domains = <&ps_isp_sys>;
+	};
+
+	ps_venc_pipe4: power-controller@88000 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x88000 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "venc_pipe4";
+	};
+
+	ps_venc_pipe5: power-controller@88008 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x88008 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "venc_pipe5";
+	};
+
+	ps_venc_me0: power-controller@88010 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x88010 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "venc_me0";
+	};
+
+	ps_venc_me1: power-controller@88018 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x88018 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "venc_me1";
+	};
+};
+
+&pmgr_mini {
+	ps_spmi: power-controller@80058 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80058 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "spmi";
+		apple,always-on; /* Core AON device */
+	};
+
+	ps_nub_aon: power-controller@80060 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80060 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "nub_aon";
+		apple,always-on; /* Core AON device */
+	};
+
+	ps_smc_fabric: power-controller@80030 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80030 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "smc_fabric";
+		apple,always-on; /* Core AON device */
+	};
+
+	ps_smc_aon: power-controller@80088 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80088 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "smc_aon";
+		apple,always-on; /* Core AON device */
+	};
+
+	ps_debug: power-controller@80050 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80050 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "debug";
+	};
+
+	ps_nub_sram: power-controller@801a0 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801a0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "nub_sram";
+		apple,always-on; /* Core AON device */
+	};
+
+	ps_nub_fabric: power-controller@80198 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80198 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "nub_fabric";
+		apple,always-on; /* Core AON device */
+	};
+
+	ps_smc_cpu: power-controller@801a8 {
+		compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801a8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "smc_cpu";
+		power-domains = <&ps_smc_fabric &ps_smc_aon>;
+	};
+};
diff --git a/arch/arm64/boot/dts/apple/t8012-touchbar.dtsi b/arch/arm64/boot/dts/apple/t8012-touchbar.dtsi
new file mode 100644
index 00000000000000..fc4a80d0c787f5
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t8012-touchbar.dtsi
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Common Device Tree for T2 devices with a Touch Bar
+ *
+ * target-type: J152f, J213, J214k, J215, J223, J680, J780
+ *
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+
+/ {
+	chosen {
+		framebuffer0: framebuffer@0 {
+			compatible = "apple,simple-framebuffer", "simple-framebuffer";
+			reg = <0 0 0 0>; /* To be filled by loader */
+			power-domains = <&ps_disp0_fe &ps_disp0_be &ps_mipi_dsi>;
+			/* Format properties will be added by loader */
+			status = "disabled";
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/apple/t8012.dtsi b/arch/arm64/boot/dts/apple/t8012.dtsi
new file mode 100644
index 00000000000000..42df2f51ad7be4
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t8012.dtsi
@@ -0,0 +1,281 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Apple T8012 "T2" SoC
+ *
+ * Other names: H9M, "Gibraltar"
+ *
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/apple-aic.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/pinctrl/apple.h>
+
+/ {
+	interrupt-parent = <&aic>;
+	#address-cells = <2>;
+	#size-cells = <2>;
+
+	clkref: clock-ref {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <24000000>;
+		clock-output-names = "clkref";
+	};
+
+	cpus {
+		#address-cells = <2>;
+		#size-cells = <0>;
+
+		cpu0: cpu@10000 {
+			compatible = "apple,hurricane-zephyr";
+			reg = <0x0 0x10000>;
+			cpu-release-addr = <0 0>; /* To be filled by loader */
+			operating-points-v2 = <&fusion_opp>;
+			performance-domains = <&cpufreq>;
+			enable-method = "spin-table";
+			device_type = "cpu";
+		};
+
+		cpu1: cpu@10001 {
+			compatible = "apple,hurricane-zephyr";
+			reg = <0x0 0x10001>;
+			cpu-release-addr = <0 0>; /* To be filled by loader */
+			operating-points-v2 = <&fusion_opp>;
+			performance-domains = <&cpufreq>;
+			enable-method = "spin-table";
+			device_type = "cpu";
+		};
+	};
+
+	fusion_opp: opp-table {
+		compatible = "operating-points-v2";
+
+		/*
+		 * Apple Fusion Architecture: Hardware big.LITTLE switcher
+		 * that use p-state transitions to switch between cores.
+		 * Only one type of core can be active at a given time.
+		 *
+		 * The E-core frequencies are adjusted so performance scales
+		 * linearly with reported clock speed.
+		 */
+
+		opp01 {
+			opp-hz = /bits/ 64 <172000000>; /* 300 MHz, E-core */
+			opp-level = <1>;
+			clock-latency-ns = <11000>;
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <230000000>; /* 396 MHz, E-core */
+			opp-level = <2>;
+			clock-latency-ns = <140000>;
+		};
+		opp03 {
+			opp-hz = /bits/ 64 <425000000>; /* 732 MHz, E-core */
+			opp-level = <3>;
+			clock-latency-ns = <110000>;
+		};
+		opp04 {
+			opp-hz = /bits/ 64 <637000000>; /* 1092 MHz, E-core */
+			opp-level = <4>;
+			clock-latency-ns = <130000>;
+		};
+		opp05 {
+			opp-hz = /bits/ 64 <756000000>;
+			opp-level = <5>;
+			clock-latency-ns = <130000>;
+		};
+		opp06 {
+			opp-hz = /bits/ 64 <1056000000>;
+			opp-level = <6>;
+			clock-latency-ns = <130000>;
+		};
+		opp07 {
+			opp-hz = /bits/ 64 <1356000000>;
+			opp-level = <7>;
+			clock-latency-ns = <130000>;
+		};
+		opp08 {
+			opp-hz = /bits/ 64 <1644000000>;
+			opp-level = <8>;
+			clock-latency-ns = <135000>;
+		};
+		opp09 {
+			opp-hz = /bits/ 64 <1944000000>;
+			opp-level = <9>;
+			clock-latency-ns = <140000>;
+		};
+		opp10 {
+			opp-hz = /bits/ 64 <2244000000>;
+			opp-level = <10>;
+			clock-latency-ns = <150000>;
+		};
+#if 0
+		/* Not available until CPU deep sleep is implemented */
+		opp11 {
+			opp-hz = /bits/ 64 <2340000000>;
+			opp-level = <11>;
+			clock-latency-ns = <150000>;
+			turbo-mode;
+		};
+#endif
+	};
+
+	soc {
+		compatible = "simple-bus";
+		#address-cells = <2>;
+		#size-cells = <2>;
+		nonposted-mmio;
+		ranges;
+
+		cpufreq: performance-controller@202f20000 {
+			compatible = "apple,t8010-cluster-cpufreq", "apple,t8103-cluster-cpufreq", "apple,cluster-cpufreq";
+			reg = <0x2 0x02f20000 0 0x1000>;
+			#performance-domain-cells = <0>;
+		};
+
+		serial0: serial@20a600000 {
+			compatible = "apple,s5l-uart";
+			reg = <0x2 0x0a600000 0x0 0x4000>;
+			reg-io-width = <4>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 271 IRQ_TYPE_LEVEL_HIGH>;
+			/* Use the bootloader-enabled clocks for now. */
+			clocks = <&clkref>, <&clkref>;
+			clock-names = "uart", "clk_uart_baud0";
+			power-domains = <&ps_uart0>;
+			status = "disabled";
+		};
+
+		pmgr: power-management@20e000000 {
+			compatible = "apple,t8010-pmgr", "apple,pmgr", "syscon", "simple-mfd";
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			reg = <0x2 0xe000000 0 0x8c000>;
+		};
+
+		aic: interrupt-controller@20e100000 {
+			compatible = "apple,t8010-aic", "apple,aic";
+			reg = <0x2 0x0e100000 0x0 0x100000>;
+			#interrupt-cells = <3>;
+			interrupt-controller;
+			power-domains = <&ps_aic>;
+		};
+
+		pinctrl_ap: pinctrl@20f100000 {
+			compatible = "apple,t8010-pinctrl", "apple,pinctrl";
+			reg = <0x2 0x0f100000 0x0 0x100000>;
+			power-domains = <&ps_gpio>;
+
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio-ranges = <&pinctrl_ap 0 0 221>;
+			apple,npins = <221>;
+
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 45 IRQ_TYPE_LEVEL_HIGH>,
+				     <AIC_IRQ 46 IRQ_TYPE_LEVEL_HIGH>,
+				     <AIC_IRQ 47 IRQ_TYPE_LEVEL_HIGH>,
+				     <AIC_IRQ 48 IRQ_TYPE_LEVEL_HIGH>,
+				     <AIC_IRQ 49 IRQ_TYPE_LEVEL_HIGH>,
+				     <AIC_IRQ 50 IRQ_TYPE_LEVEL_HIGH>,
+				     <AIC_IRQ 51 IRQ_TYPE_LEVEL_HIGH>;
+		};
+
+		pinctrl_aop: pinctrl@2100f0000 {
+			compatible = "apple,t8010-pinctrl", "apple,pinctrl";
+			reg = <0x2 0x0100f0000 0x0 0x10000>;
+
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio-ranges = <&pinctrl_aop 0 0 41>;
+			apple,npins = <41>;
+
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 131 IRQ_TYPE_LEVEL_HIGH>,
+				     <AIC_IRQ 132 IRQ_TYPE_LEVEL_HIGH>,
+				     <AIC_IRQ 133 IRQ_TYPE_LEVEL_HIGH>,
+				     <AIC_IRQ 134 IRQ_TYPE_LEVEL_HIGH>,
+				     <AIC_IRQ 135 IRQ_TYPE_LEVEL_HIGH>,
+				     <AIC_IRQ 136 IRQ_TYPE_LEVEL_HIGH>,
+				     <AIC_IRQ 137 IRQ_TYPE_LEVEL_HIGH>;
+		};
+
+		pinctrl_nub: pinctrl@2111f0000 {
+			compatible = "apple,t8010-pinctrl", "apple,pinctrl";
+			reg = <0x2 0x111f0000 0x0 0x1000>;
+
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio-ranges = <&pinctrl_nub 0 0 19>;
+			apple,npins = <19>;
+
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 164 IRQ_TYPE_LEVEL_HIGH>,
+				     <AIC_IRQ 165 IRQ_TYPE_LEVEL_HIGH>,
+				     <AIC_IRQ 166 IRQ_TYPE_LEVEL_HIGH>;
+		};
+
+		pmgr_mini: power-management@211200000 {
+			compatible = "apple,t8010-pmgr", "apple,pmgr", "syscon", "simple-mfd";
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			reg = <0x2 0x11200000 0 0x84000>;
+		};
+
+		wdt: watchdog@2112b0000 {
+			compatible = "apple,t8010-wdt", "apple,wdt";
+			reg = <0x2 0x112b0000 0x0 0x4000>;
+			clocks = <&clkref>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 168 IRQ_TYPE_LEVEL_HIGH>;
+		};
+
+		pinctrl_smc: pinctrl@212024000 {
+			compatible = "apple,t8010-pinctrl", "apple,pinctrl";
+			reg = <0x2 0x12024000 0x0 0x1000>;
+			power-domains = <&ps_smc_cpu>;
+
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio-ranges = <&pinctrl_smc 0 0 81>;
+			apple,npins = <81>;
+
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 195 IRQ_TYPE_LEVEL_HIGH>,
+				     <AIC_IRQ 196 IRQ_TYPE_LEVEL_HIGH>,
+				     <AIC_IRQ 197 IRQ_TYPE_LEVEL_HIGH>,
+				     <AIC_IRQ 198 IRQ_TYPE_LEVEL_HIGH>,
+				     <AIC_IRQ 199 IRQ_TYPE_LEVEL_HIGH>,
+				     <AIC_IRQ 200 IRQ_TYPE_LEVEL_HIGH>,
+				     <AIC_IRQ 201 IRQ_TYPE_LEVEL_HIGH>;
+			/*
+			 * SMC is not yet supported and accessing this pinctrl while SMC is
+			 * suspended results in a hang.
+			 */
+			status = "disabled";
+		};
+	};
+
+	timer {
+		compatible = "arm,armv8-timer";
+		interrupt-parent = <&aic>;
+		interrupt-names = "phys", "virt";
+		/* Note that T2 doesn't actually have a hypervisor (EL2 is not implemented). */
+		interrupts = <AIC_FIQ AIC_TMR_GUEST_PHYS IRQ_TYPE_LEVEL_HIGH>,
+			     <AIC_FIQ AIC_TMR_GUEST_VIRT IRQ_TYPE_LEVEL_HIGH>;
+	};
+};
+
+#include "t8012-pmgr.dtsi"
diff --git a/arch/arm64/boot/dts/apple/t8015-8.dtsi b/arch/arm64/boot/dts/apple/t8015-8.dtsi
index b6505b5185bdd7..0300ee1a2ffb7d 100644
--- a/arch/arm64/boot/dts/apple/t8015-8.dtsi
+++ b/arch/arm64/boot/dts/apple/t8015-8.dtsi
@@ -11,3 +11,7 @@
 / {
 	chassis-type = "handset";
 };
+
+&dwi_bl {
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/apple/t8015-common.dtsi b/arch/arm64/boot/dts/apple/t8015-common.dtsi
index 69258a33ea5008..498f58fb9715d1 100644
--- a/arch/arm64/boot/dts/apple/t8015-common.dtsi
+++ b/arch/arm64/boot/dts/apple/t8015-common.dtsi
@@ -24,6 +24,7 @@
 		framebuffer0: framebuffer@0 {
 			compatible = "apple,simple-framebuffer", "simple-framebuffer";
 			reg = <0 0 0 0>; /* To be filled by loader */
+			power-domains = <&ps_disp0_be &ps_mipi_dsi &ps_disp0_hilo &ps_disp0_ppp>;
 			/* Format properties will be added by loader */
 			status = "disabled";
 		};
diff --git a/arch/arm64/boot/dts/apple/t8015-pmgr.dtsi b/arch/arm64/boot/dts/apple/t8015-pmgr.dtsi
new file mode 100644
index 00000000000000..e238c2d2732f79
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t8015-pmgr.dtsi
@@ -0,0 +1,931 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * PMGR Power domains for the Apple T8015 "A11" SoC
+ *
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+
+&pmgr {
+	ps_cpu0: power-controller@80000 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80000 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "cpu0";
+		apple,always-on; /* Core device */
+	};
+
+	ps_cpu1: power-controller@80008 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80008 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "cpu1";
+		apple,always-on; /* Core device */
+	};
+
+	ps_cpu2: power-controller@80010 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80010 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "cpu2";
+		apple,always-on; /* Core device */
+	};
+
+	ps_cpu3: power-controller@80018 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80018 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "cpu3";
+		apple,always-on; /* Core device */
+	};
+
+	ps_cpu4: power-controller@80020 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80020 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "cpu4";
+		apple,always-on; /* Core device */
+	};
+
+	ps_cpu5: power-controller@80028 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80028 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "cpu5";
+		apple,always-on; /* Core device */
+	};
+
+	ps_cpm: power-controller@80040 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80040 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "cpm";
+		apple,always-on; /* Core device */
+	};
+
+	ps_sio_busif: power-controller@80158 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80158 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "sio_busif";
+	};
+
+	ps_sio_p: power-controller@80160 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80160 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "sio_p";
+		power-domains = <&ps_sio_busif>;
+	};
+
+	ps_sbr: power-controller@80100 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80100 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "sbr";
+		apple,always-on; /* Apple fabric, critical block */
+	};
+
+	ps_aic: power-controller@80108 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80108 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "aic";
+		apple,always-on; /* Core device */
+	};
+
+	ps_dwi: power-controller@80110 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80110 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dwi";
+	};
+
+	ps_gpio: power-controller@80118 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80118 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "gpio";
+	};
+
+	ps_pms: power-controller@80120 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80120 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pms";
+		apple,always-on; /* Core device */
+	};
+
+	ps_pcie_ref: power-controller@80148 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80148 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pcie_ref";
+	};
+
+	ps_mca0: power-controller@80170 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80170 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mca0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_mca1: power-controller@80178 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80178 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mca1";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_mca2: power-controller@80180 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80180 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mca2";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_mca3: power-controller@80188 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80188 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mca3";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_mca4: power-controller@80190 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80190 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mca4";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_pwm0: power-controller@801a0 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801a0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pwm0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_i2c0: power-controller@801a8 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801a8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "i2c0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_i2c1: power-controller@801b0 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801b0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "i2c1";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_i2c2: power-controller@801b8 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801b8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "i2c2";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_i2c3: power-controller@801c0 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801c0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "i2c3";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_spi0: power-controller@801c8 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801c8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "spi0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_spi1: power-controller@801d0 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801d0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "spi1";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_spi2: power-controller@801d8 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801d8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "spi2";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_spi3: power-controller@801e0 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801e0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "spi3";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart0: power-controller@801e8 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801e8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart1: power-controller@801f0 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801f0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart1";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart2: power-controller@801f8 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x801f8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart2";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_sio: power-controller@80168 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80168 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "sio";
+		power-domains = <&ps_sio_p>;
+		apple,always-on; /* Core device */
+	};
+
+	ps_hsicphy: power-controller@80128 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80128 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "hsicphy";
+		power-domains = <&ps_usb2host1>;
+	};
+
+	ps_ispsens0: power-controller@80130 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80130 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "ispsens0";
+	};
+
+	ps_ispsens1: power-controller@80138 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80138 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "ispsens1";
+	};
+
+	ps_ispsens2: power-controller@80140 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80140 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "ispsens2";
+	};
+
+	ps_mca5: power-controller@80198 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80198 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mca5";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_usb: power-controller@80270 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80270 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usb";
+	};
+
+	ps_usbctlreg: power-controller@80278 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80278 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usbctlreg";
+		power-domains = <&ps_usb>;
+	};
+
+	ps_usb2host0: power-controller@80280 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80280 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usb2host0";
+		power-domains = <&ps_usbctlreg>;
+	};
+
+	ps_usb2host1: power-controller@80290 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80290 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usb2host1";
+		power-domains = <&ps_usbctlreg>;
+	};
+
+	ps_rtmux: power-controller@802b0 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802b0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "rtmux";
+	};
+
+	ps_media: power-controller@802f0 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802f0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "media";
+	};
+
+	ps_jpg: power-controller@802f8 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802f8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "jpg";
+		power-domains = <&ps_media>;
+	};
+
+	ps_disp0_fe: power-controller@802b8 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802b8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "disp0_fe";
+		power-domains = <&ps_rtmux>;
+	};
+
+	ps_disp0_be: power-controller@802c0 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802c0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "disp0_be";
+		power-domains = <&ps_disp0_fe>;
+	};
+
+	ps_disp0_gp: power-controller@802c8 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802c8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "disp0_gp";
+		power-domains = <&ps_disp0_be>;
+		status = "disabled";
+	};
+
+	ps_uart3: power-controller@80200 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80200 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart3";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart4: power-controller@80208 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80208 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart4";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart5: power-controller@80210 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80210 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart5";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart6: power-controller@80218 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80218 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart6";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart7: power-controller@80220 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80220 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart7";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_uart8: power-controller@80228 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80228 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "uart8";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_hfd0: power-controller@80238 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80238 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "hfd0";
+		power-domains = <&ps_sio_p>;
+	};
+
+	ps_mcc: power-controller@80248 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80248 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mcc";
+		apple,always-on; /* Memory cache controller */
+	};
+
+	ps_dcs0: power-controller@80250 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80250 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dcs0";
+		apple,always-on; /* LPDDR4X interface */
+	};
+
+	ps_dcs1: power-controller@80258 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80258 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dcs1";
+		apple,always-on; /* LPDDR4X interface */
+	};
+
+	ps_dcs2: power-controller@80260 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80260 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dcs2";
+		apple,always-on; /* LPDDR4X interface */
+	};
+
+	ps_dcs3: power-controller@80268 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80268 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dcs3";
+		apple,always-on; /* LPDDR4X interface */
+	};
+
+	ps_usb2host0_ohci: power-controller@80288 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80288 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usb2host0_ohci";
+		power-domains = <&ps_usb2host0>;
+	};
+
+	ps_usb2dev: power-controller@80298 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80298 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "usb2dev";
+		power-domains = <&ps_usbctlreg>;
+	};
+
+	ps_smx: power-controller@802a0 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802a0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "smx";
+		apple,always-on; /* Apple fabric, critical block */
+	};
+
+	ps_sf: power-controller@802a8 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802a8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "sf";
+		apple,always-on; /* Apple fabric, critical block */
+	};
+
+	ps_mipi_dsi: power-controller@802d8 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802d8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "mipi_dsi";
+		power-domains = <&ps_rtmux>;
+	};
+
+	ps_dp: power-controller@802e0 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802e0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dp";
+		power-domains = <&ps_disp0_be>;
+	};
+
+	ps_dpa: power-controller@80230 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80230 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dpa";
+	};
+
+	ps_disp0_be_2x: power-controller@802d0 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x802d0 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "disp0_be_2x";
+		power-domains = <&ps_disp0_be>;
+	};
+
+	ps_isp_sys: power-controller@80350 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80350 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_sys";
+		power-domains = <&ps_rtmux>;
+	};
+
+	ps_msr: power-controller@80300 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80300 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "msr";
+		power-domains = <&ps_media>;
+	};
+
+	ps_venc_sys: power-controller@80398 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80398 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "venc_sys";
+		power-domains = <&ps_media>;
+	};
+
+	ps_pmp: power-controller@80308 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80308 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pmp";
+	};
+
+	ps_pms_sram: power-controller@80310 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80310 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pms_sram";
+	};
+
+	ps_pcie: power-controller@80318 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80318 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pcie";
+	};
+
+	ps_pcie_aux: power-controller@80320 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80320 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pcie_aux";
+	};
+
+	ps_vdec0: power-controller@80388 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80388 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "vdec0";
+		power-domains = <&ps_media>;
+	};
+
+	ps_gfx: power-controller@80338 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80338 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "gfx";
+	};
+
+	ps_ans2: power-controller@80328 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80328 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "ans2";
+		apple,always-on;
+	};
+
+	ps_pcie_direct: power-controller@80330 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80330 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "pcie_direct";
+		apple,always-on;
+	};
+
+	ps_avd_sys: power-controller@803a8 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x803a8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "avd_sys";
+		power-domains = <&ps_media>;
+	};
+
+	ps_sep: power-controller@80400 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80400 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "sep";
+		apple,always-on; /* Locked on */
+	};
+
+	ps_disp0_gp0: power-controller@80830 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80830 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "disp0_gp0";
+		power-domains = <&ps_disp0_gp>;
+		status = "disabled";
+	};
+
+	ps_disp0_gp1: power-controller@80838 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80838 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "disp0_gp1";
+		status = "disabled";
+	};
+
+	ps_disp0_ppp: power-controller@80840 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80840 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "disp0_ppp";
+	};
+
+	ps_disp0_hilo: power-controller@80848 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80848 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "disp0_hilo";
+	};
+
+	ps_isp_rsts0: power-controller@84000 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x84000 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_rsts0";
+		power-domains = <&ps_isp_sys>;
+	};
+
+	ps_isp_rsts1: power-controller@84008 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x84008 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_rsts1";
+		power-domains = <&ps_isp_sys>;
+	};
+
+	ps_isp_vis: power-controller@84010 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x84010 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_vis";
+		power-domains = <&ps_isp_sys>;
+	};
+
+	ps_isp_be: power-controller@84018 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x84018 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_be";
+		power-domains = <&ps_isp_sys>;
+	};
+
+	ps_isp_pearl: power-controller@84020 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x84020 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_pearl";
+		power-domains = <&ps_isp_sys>;
+	};
+
+	ps_dprx: power-controller@84028 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x84028 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "dprx";
+		power-domains = <&ps_isp_sys>;
+	};
+
+	ps_isp_cnv: power-controller@84030 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x84030 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_cnv";
+		power-domains = <&ps_isp_sys>;
+	};
+
+	ps_venc_dma: power-controller@88000 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x88000 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "venc_dma";
+	};
+
+	ps_venc_pipe4: power-controller@88010 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x88010 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "venc_pipe4";
+	};
+
+	ps_venc_pipe5: power-controller@88018 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x88018 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "venc_pipe5";
+	};
+
+	ps_venc_me0: power-controller@88020 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x88020 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "venc_me0";
+	};
+
+	ps_venc_me1: power-controller@88028 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x88028 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "venc_me1";
+	};
+};
+
+&pmgr_mini {
+	ps_aop_base: power-controller@80008 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80008 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "aop_base";
+		power-domains = <&ps_aop_cpu &ps_aop_filter>;
+		apple,always-on; /* Always on processor */
+	};
+
+	ps_debug: power-controller@80050 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80050 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "debug";
+	};
+
+	ps_aop_cpu: power-controller@80020 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80020 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "aop_cpu";
+	};
+
+	ps_aop_filter: power-controller@80000 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80000 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "aop_filter";
+	};
+
+	ps_spmi: power-controller@80058 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80058 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "spmi";
+		apple,always-on; /* System Power Management Interface */
+	};
+
+	ps_smc_i2cm1: power-controller@800a8 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x800a8 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "smc_i2cm1";
+	};
+
+	ps_smc_fabric: power-controller@80030 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80030 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "smc_fabric";
+	};
+
+	ps_smc_cpu: power-controller@80140 {
+		compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x80140 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "smc_cpu";
+		power-domains = <&ps_smc_fabric &ps_smc_i2cm1>;
+	};
+};
diff --git a/arch/arm64/boot/dts/apple/t8015.dtsi b/arch/arm64/boot/dts/apple/t8015.dtsi
index 8828d830e5be6f..4d54afcecd50b5 100644
--- a/arch/arm64/boot/dts/apple/t8015.dtsi
+++ b/arch/arm64/boot/dts/apple/t8015.dtsi
@@ -58,6 +58,9 @@
 			compatible = "apple,mistral";
 			reg = <0x0 0x0>;
 			cpu-release-addr = <0 0>; /* To be filled by loader */
+			performance-domains = <&cpufreq_e>;
+			operating-points-v2 = <&mistral_opp>;
+			capacity-dmips-mhz = <633>;
 			enable-method = "spin-table";
 			device_type = "cpu";
 		};
@@ -66,6 +69,9 @@
 			compatible = "apple,mistral";
 			reg = <0x0 0x1>;
 			cpu-release-addr = <0 0>; /* To be filled by loader */
+			performance-domains = <&cpufreq_e>;
+			operating-points-v2 = <&mistral_opp>;
+			capacity-dmips-mhz = <633>;
 			enable-method = "spin-table";
 			device_type = "cpu";
 		};
@@ -74,6 +80,9 @@
 			compatible = "apple,mistral";
 			reg = <0x0 0x2>;
 			cpu-release-addr = <0 0>; /* To be filled by loader */
+			performance-domains = <&cpufreq_e>;
+			operating-points-v2 = <&mistral_opp>;
+			capacity-dmips-mhz = <633>;
 			enable-method = "spin-table";
 			device_type = "cpu";
 		};
@@ -82,6 +91,9 @@
 			compatible = "apple,mistral";
 			reg = <0x0 0x3>;
 			cpu-release-addr = <0 0>; /* To be filled by loader */
+			performance-domains = <&cpufreq_e>;
+			operating-points-v2 = <&mistral_opp>;
+			capacity-dmips-mhz = <633>;
 			enable-method = "spin-table";
 			device_type = "cpu";
 		};
@@ -90,6 +102,9 @@
 			compatible = "apple,monsoon";
 			reg = <0x0 0x10004>;
 			cpu-release-addr = <0 0>; /* To be filled by loader */
+			performance-domains = <&cpufreq_p>;
+			operating-points-v2 = <&monsoon_opp>;
+			capacity-dmips-mhz = <1024>;
 			enable-method = "spin-table";
 			device_type = "cpu";
 		};
@@ -98,11 +113,107 @@
 			compatible = "apple,monsoon";
 			reg = <0x0 0x10005>;
 			cpu-release-addr = <0 0>; /* To be filled by loader */
+			performance-domains = <&cpufreq_p>;
+			operating-points-v2 = <&monsoon_opp>;
+			capacity-dmips-mhz = <1024>;
 			enable-method = "spin-table";
 			device_type = "cpu";
 		};
 	};
 
+	mistral_opp: opp-table-0 {
+		compatible = "operating-points-v2";
+
+		opp01 {
+			opp-hz = /bits/ 64 <300000000>;
+			opp-level = <1>;
+			clock-latency-ns = <1800>;
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <453000000>;
+			opp-level = <2>;
+			clock-latency-ns = <140000>;
+		};
+		opp03 {
+			opp-hz = /bits/ 64 <672000000>;
+			opp-level = <3>;
+			clock-latency-ns = <105000>;
+		};
+		opp04 {
+			opp-hz = /bits/ 64 <972000000>;
+			opp-level = <4>;
+			clock-latency-ns = <115000>;
+		};
+		opp05 {
+			opp-hz = /bits/ 64 <1272000000>;
+			opp-level = <5>;
+			clock-latency-ns = <125000>;
+		};
+		opp06 {
+			opp-hz = /bits/ 64 <1572000000>;
+			opp-level = <6>;
+			clock-latency-ns = <135000>;
+		};
+#if 0
+		/* Not available until CPU deep sleep is implemented */
+		opp07 {
+			opp-hz = /bits/ 64 <1680000000>;
+			opp-level = <7>;
+			clock-latency-ns = <135000>;
+			turbo-mode;
+		};
+#endif
+	};
+
+	monsoon_opp: opp-table-1 {
+		compatible = "operating-points-v2";
+
+		opp01 {
+			opp-hz = /bits/ 64 <300000000>;
+			opp-level = <1>;
+			clock-latency-ns = <1400>;
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <453000000>;
+			opp-level = <2>;
+			clock-latency-ns = <140000>;
+		};
+		opp03 {
+			opp-hz = /bits/ 64 <853000000>;
+			opp-level = <3>;
+			clock-latency-ns = <110000>;
+		};
+		opp04 {
+			opp-hz = /bits/ 64 <1332000000>;
+			opp-level = <4>;
+			clock-latency-ns = <110000>;
+		};
+		opp05 {
+			opp-hz = /bits/ 64 <1812000000>;
+			opp-level = <5>;
+			clock-latency-ns = <125000>;
+		};
+		opp06 {
+			opp-hz = /bits/ 64 <2064000000>;
+			opp-level = <6>;
+			clock-latency-ns = <130000>;
+		};
+		opp07 {
+			opp-hz = /bits/ 64 <2304000000>;
+			opp-level = <7>;
+			clock-latency-ns = <140000>;
+		};
+#if 0
+		/* Not available until CPU deep sleep is implemented */
+		opp08 {
+			opp-hz = /bits/ 64 <2376000000>;
+			opp-level = <8>;
+			clock-latency-ns = <140000>;
+			turbo-mode;
+		};
+#endif
+	};
+
 	soc {
 		compatible = "simple-bus";
 		#address-cells = <2>;
@@ -110,6 +221,18 @@
 		nonposted-mmio;
 		ranges;
 
+		cpufreq_e: performance-controller@208e20000 {
+			compatible = "apple,t8015-cluster-cpufreq", "apple,t8103-cluster-cpufreq", "apple,cluster-cpufreq";
+			reg = <0x2 0x08e20000 0 0x1000>;
+			#performance-domain-cells = <0>;
+		};
+
+		cpufreq_p: performance-controller@208ea0000 {
+			compatible = "apple,t8015-cluster-cpufreq", "apple,t8103-cluster-cpufreq", "apple,cluster-cpufreq";
+			reg = <0x2 0x08ea0000 0 0x1000>;
+			#performance-domain-cells = <0>;
+		};
+
 		serial0: serial@22e600000 {
 			compatible = "apple,s5l-uart";
 			reg = <0x2 0x2e600000 0x0 0x4000>;
@@ -119,6 +242,7 @@
 			/* Use the bootloader-enabled clocks for now. */
 			clocks = <&clkref>, <&clkref>;
 			clock-names = "uart", "clk_uart_baud0";
+			power-domains = <&ps_uart0>;
 			status = "disabled";
 		};
 
@@ -127,11 +251,28 @@
 			reg = <0x2 0x32100000 0x0 0x8000>;
 			#interrupt-cells = <3>;
 			interrupt-controller;
+			power-domains = <&ps_aic>;
+		};
+
+		pmgr: power-management@232000000 {
+			compatible = "apple,t8015-pmgr", "apple,pmgr", "syscon", "simple-mfd";
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			reg = <0x2 0x32000000 0 0x8c000>;
+		};
+
+		dwi_bl: backlight@232200080 {
+			compatible = "apple,t8015-dwi-bl", "apple,dwi-bl";
+			reg = <0x2 0x32200080 0x0 0x8>;
+			power-domains = <&ps_dwi>;
+			status = "disabled";
 		};
 
 		pinctrl_ap: pinctrl@233100000 {
 			compatible = "apple,t8015-pinctrl", "apple,pinctrl";
 			reg = <0x2 0x33100000 0x0 0x1000>;
+			power-domains = <&ps_gpio>;
 
 			gpio-controller;
 			#gpio-cells = <2>;
@@ -188,6 +329,14 @@
 				     <AIC_IRQ 170 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
+		pmgr_mini: power-management@235200000 {
+			compatible = "apple,t8015-pmgr", "apple,pmgr", "syscon", "simple-mfd";
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			reg = <0x2 0x35200000 0 0x84000>;
+		};
+
 		wdt: watchdog@2352b0000 {
 			compatible = "apple,t8015-wdt", "apple,wdt";
 			reg = <0x2 0x352b0000 0x0 0x4000>;
@@ -232,3 +381,5 @@
 			     <AIC_FIQ AIC_TMR_GUEST_VIRT IRQ_TYPE_LEVEL_HIGH>;
 	};
 };
+
+#include "t8015-pmgr.dtsi"
diff --git a/arch/arm64/boot/dts/apple/t8103-j274.dts b/arch/arm64/boot/dts/apple/t8103-j274.dts
index 1c3e37f86d46d7..d52a0b4525c041 100644
--- a/arch/arm64/boot/dts/apple/t8103-j274.dts
+++ b/arch/arm64/boot/dts/apple/t8103-j274.dts
@@ -18,9 +18,23 @@
 
 	aliases {
 		ethernet0 = &ethernet0;
+		sio = &sio;
 	};
 };
 
+&dcp {
+	apple,connector-type = "HDMI-A";
+};
+
+/* remove once m1n1 enables sio nodes after setup */
+&sio {
+        status = "okay";
+};
+
+&dpaudio0 {
+	status = "okay";
+};
+
 &bluetooth0 {
 	brcm,board-type = "apple,atlantisb";
 };
@@ -29,6 +43,18 @@
 	brcm,board-type = "apple,atlantisb";
 };
 
+/*
+ * Provide labels for the USB type C ports.
+ */
+
+&typec0 {
+	label = "USB-C Back-left";
+};
+
+&typec1 {
+	label = "USB-C Back-right";
+};
+
 /*
  * Force the bus number assignments so that we can declare some of the
  * on-board devices and properties that are populated by the bootloader
@@ -58,6 +84,65 @@
 	status = "okay";
 };
 
+&i2c1 {
+	speaker_amp: codec@31 {
+		compatible = "ti,tas5770l", "ti,tas2770";
+		reg = <0x31>;
+		shutdown-gpios = <&pinctrl_ap 181 GPIO_ACTIVE_HIGH>;
+		#sound-dai-cells = <0>;
+		ti,imon-slot-no = <0>;
+		ti,vmon-slot-no = <2>;
+		ti,sdout-zero-fill;
+	};
+};
+
 &i2c2 {
 	status = "okay";
+
+	jack_codec: codec@48 {
+		compatible = "cirrus,cs42l83";
+		reg = <0x48>;
+		reset-gpios = <&pinctrl_nub 11 GPIO_ACTIVE_HIGH>;
+		interrupt-parent = <&pinctrl_ap>;
+		interrupts = <183 IRQ_TYPE_LEVEL_LOW>;
+		#sound-dai-cells = <0>;
+		cirrus,ts-inv = <1>;
+		sound-name-prefix = "Jack";
+	};
 };
+
+/ {
+	sound {
+		compatible = "apple,j274-macaudio", "apple,macaudio";
+		model = "Mac mini J274";
+
+		dai-link@0 {
+			link-name = "Speaker";
+
+			cpu {
+				sound-dai = <&mca 0>;
+			};
+			codec {
+				sound-dai = <&speaker_amp>;
+			};
+		};
+
+		dai-link@1 {
+			link-name = "Headphone Jack";
+
+			cpu {
+				sound-dai = <&mca 2>;
+			};
+			codec {
+				sound-dai = <&jack_codec>;
+			};
+		};
+
+	};
+};
+
+&gpu {
+	apple,perf-base-pstate = <3>;
+};
+
+#include "hwmon-mini.dtsi"
diff --git a/arch/arm64/boot/dts/apple/t8103-j293.dts b/arch/arm64/boot/dts/apple/t8103-j293.dts
index 56b0c67bfcda32..f7654875668112 100644
--- a/arch/arm64/boot/dts/apple/t8103-j293.dts
+++ b/arch/arm64/boot/dts/apple/t8103-j293.dts
@@ -17,6 +17,15 @@
 	compatible = "apple,j293", "apple,t8103", "apple,arm-platform";
 	model = "Apple MacBook Pro (13-inch, M1, 2020)";
 
+	/*
+	 * All of those are used by the bootloader to pass calibration
+	 * blobs and other device-specific properties
+	 */
+	aliases {
+		touchbar0 = &touchbar0;
+		sep = &sep;
+	};
+
 	led-controller {
 		compatible = "pwm-leds";
 		led-0 {
@@ -30,6 +39,19 @@
 	};
 };
 
+&dcp {
+	panel: panel {
+		compatible = "apple,panel-j293", "apple,panel";
+		width-mm = <286>;
+		height-mm = <179>;
+		apple,max-brightness = <525>;
+	};
+};
+
+&framebuffer0 {
+	panel-dimensions = <&panel>;
+};
+
 &bluetooth0 {
 	brcm,board-type = "apple,honshu";
 };
@@ -38,8 +60,117 @@
 	brcm,board-type = "apple,honshu";
 };
 
+/*
+ * Provide labels for the USB type C ports.
+ */
+
+&typec0 {
+	label = "USB-C Left-back";
+};
+
+&typec1 {
+	label = "USB-C Left-front";
+};
+
+&spi3 {
+	status = "okay";
+
+	hid-transport@0 {
+		compatible = "apple,spi-hid-transport";
+		reg = <0>;
+		spi-max-frequency = <8000000>;
+		/*
+		 * Apple's ADT specifies 20us CS change delays, and the
+		 * SPI HID interface metadata specifies 45us. Using either
+		 * seems not to be reliable, but adding both works, so
+		 * best guess is they are cumulative.
+		 */
+		spi-cs-setup-delay-ns = <65000>;
+		spi-cs-hold-delay-ns = <65000>;
+		spi-cs-inactive-delay-ns = <250000>;
+		spien-gpios = <&pinctrl_ap 195 0>;
+		interrupts-extended = <&pinctrl_nub 13 IRQ_TYPE_LEVEL_LOW>;
+	};
+};
+
+/* Virtual regulator representing the shared shutdown GPIO */
+/ {
+	speaker_sdz: fixed-regulator-tas5770-sdz {
+		compatible = "regulator-fixed";
+		regulator-name = "tas5770-sdz";
+		startup-delay-us = <5000>;
+		gpios = <&pinctrl_ap 181 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+	};
+};
+
+&i2c1 {
+	speaker_left_rear: codec@31 {
+		compatible = "ti,tas5770l", "ti,tas2770";
+		reg = <0x31>;
+		SDZ-supply = <&speaker_sdz>;
+		#sound-dai-cells = <0>;
+		sound-name-prefix = "Left Rear";
+		interrupts-extended = <&pinctrl_ap 182 IRQ_TYPE_LEVEL_LOW>;
+		ti,imon-slot-no = <8>;
+		ti,vmon-slot-no = <10>;
+		ti,pdm-slot-no = <12>;
+	};
+
+	speaker_left_front: codec@32 {
+		compatible = "ti,tas5770l", "ti,tas2770";
+		reg = <0x32>;
+		SDZ-supply = <&speaker_sdz>;
+		#sound-dai-cells = <0>;
+		sound-name-prefix = "Left Front";
+		interrupts-extended = <&pinctrl_ap 182 IRQ_TYPE_LEVEL_LOW>;
+		ti,imon-slot-no = <0>;
+		ti,vmon-slot-no = <2>;
+		ti,pdm-slot-no = <4>;
+		ti,sdout-pull-down;
+	};
+};
+
 &i2c2 {
 	status = "okay";
+
+	jack_codec: codec@48 {
+		compatible = "cirrus,cs42l83";
+		reg = <0x48>;
+		reset-gpios = <&pinctrl_nub 11 GPIO_ACTIVE_HIGH>;
+		interrupt-parent = <&pinctrl_ap>;
+		interrupts = <183 IRQ_TYPE_LEVEL_LOW>;
+		#sound-dai-cells = <0>;
+		cirrus,ts-inv = <1>;
+		sound-name-prefix = "Jack";
+	};
+};
+
+&i2c3 {
+	speaker_right_rear: codec@34 {
+		compatible = "ti,tas5770l", "ti,tas2770";
+		reg = <0x34>;
+		SDZ-supply = <&speaker_sdz>;
+		#sound-dai-cells = <0>;
+		sound-name-prefix = "Right Rear";
+		interrupts-extended = <&pinctrl_ap 182 IRQ_TYPE_LEVEL_LOW>;
+		ti,imon-slot-no = <12>;
+		ti,vmon-slot-no = <14>;
+		ti,pdm-slot-no = <16>;
+	};
+
+	speaker_right_front: codec@35 {
+		compatible = "ti,tas5770l", "ti,tas2770";
+		reg = <0x35>;
+		SDZ-supply = <&speaker_sdz>;
+		#sound-dai-cells = <0>;
+		sound-name-prefix = "Right Front";
+		interrupts-extended = <&pinctrl_ap 182 IRQ_TYPE_LEVEL_LOW>;
+		ti,imon-slot-no = <4>;
+		ti,vmon-slot-no = <6>;
+		ti,pdm-slot-no = <8>;
+		ti,sdout-pull-down;
+	};
 };
 
 &i2c4 {
@@ -49,3 +180,127 @@
 &fpwm1 {
 	status = "okay";
 };
+
+&spi0 {
+	cs-gpios = <&pinctrl_ap 109 GPIO_ACTIVE_LOW>;
+	status = "okay";
+
+	touchbar0: touchbar@0 {
+		compatible = "apple,j293-touchbar";
+		reg = <0>;
+		spi-max-frequency = <11500000>;
+		spi-cs-setup-delay-ns = <2000>;
+		spi-cs-hold-delay-ns = <2000>;
+		reset-gpios = <&pinctrl_ap 139 GPIO_ACTIVE_LOW>;
+		interrupts-extended = <&pinctrl_ap 194 IRQ_TYPE_EDGE_FALLING>;
+		firmware-name = "apple/dfrmtfw-j293.bin";
+		touchscreen-size-x = <23045>;
+		touchscreen-size-y = <640>;
+		touchscreen-inverted-y;
+	};
+};
+
+/*
+ * The driver depends on boot loader initialized state which resets when this
+ * power-domain is powered off. This happens on suspend or when the driver is
+ * missing during boot. Mark the domain as always on until the driver can
+ * handle this.
+ */
+&ps_dispdfr_be {
+	apple,always-on;
+};
+
+&display_dfr {
+	status = "okay";
+};
+
+&dfr_mipi_out {
+	dfr_mipi_out_panel: endpoint@0 {
+		reg = <0>;
+		remote-endpoint = <&dfr_panel_in>;
+	};
+};
+
+&displaydfr_mipi {
+	status = "okay";
+
+	dfr_panel: panel@0 {
+		compatible = "apple,j293-summit", "apple,summit";
+		reg = <0>;
+		max-brightness = <255>;
+
+		port {
+			dfr_panel_in: endpoint {
+				remote-endpoint = <&dfr_mipi_out_panel>;
+			};
+		};
+	};
+};
+
+&displaydfr_dart {
+	status = "okay";
+};
+
+&aop_mbox {
+       status = "okay";
+};
+
+&aop_dart {
+       status = "okay";
+};
+
+&aop_admac {
+       status = "okay";
+};
+
+&aop {
+	status = "okay";
+};
+
+&sep {
+	status = "okay";
+};
+
+&aop_audio {
+	apple,chassis-name = "J293";
+	apple,machine-kind = "MacBook Pro";
+};
+
+/ {
+	sound {
+		compatible = "apple,j293-macaudio", "apple,macaudio";
+		model = "MacBook Pro J293";
+
+		dai-link@0 {
+			link-name = "Speakers";
+
+			cpu {
+				sound-dai = <&mca 0>, <&mca 1>;
+			};
+			codec {
+				sound-dai = <&speaker_left_front>, <&speaker_left_rear>,
+					    <&speaker_right_front>, <&speaker_right_rear>;
+			};
+		};
+
+		dai-link@1 {
+			link-name = "Headphone Jack";
+
+			cpu {
+				sound-dai = <&mca 2>;
+			};
+			codec {
+				sound-dai = <&jack_codec>;
+			};
+		};
+	};
+};
+
+#include "isp-imx248.dtsi"
+
+&isp {
+	apple,platform-id = <1>;
+};
+
+#include "hwmon-fan.dtsi"
+#include "hwmon-laptop.dtsi"
diff --git a/arch/arm64/boot/dts/apple/t8103-j313.dts b/arch/arm64/boot/dts/apple/t8103-j313.dts
index 97a4344d8dca68..5713c06b7d483d 100644
--- a/arch/arm64/boot/dts/apple/t8103-j313.dts
+++ b/arch/arm64/boot/dts/apple/t8103-j313.dts
@@ -17,6 +17,10 @@
 	compatible = "apple,j313", "apple,t8103", "apple,arm-platform";
 	model = "Apple MacBook Air (M1, 2020)";
 
+	aliases {
+		sep = &sep;
+	};
+
 	led-controller {
 		compatible = "pwm-leds";
 		led-0 {
@@ -30,6 +34,19 @@
 	};
 };
 
+&dcp {
+	panel: panel {
+		compatible = "apple,panel-j313", "apple,panel";
+		width-mm = <286>;
+		height-mm = <179>;
+		apple,max-brightness = <420>;
+	};
+};
+
+&framebuffer0 {
+	panel-dimensions = <&panel>;
+};
+
 &bluetooth0 {
 	brcm,board-type = "apple,shikoku";
 };
@@ -41,3 +58,148 @@
 &fpwm1 {
 	status = "okay";
 };
+
+/*
+ * Provide labels for the USB type C ports.
+ */
+
+&typec0 {
+	label = "USB-C Left-back";
+};
+
+&typec1 {
+	label = "USB-C Left-front";
+};
+
+&spi3 {
+	status = "okay";
+
+	hid-transport@0 {
+		compatible = "apple,spi-hid-transport";
+		reg = <0>;
+		spi-max-frequency = <8000000>;
+		/*
+		 * Apple's ADT specifies 20us CS change delays, and the
+		 * SPI HID interface metadata specifies 45us. Using either
+		 * seems not to be reliable, but adding both works, so
+		 * best guess is they are cumulative.
+		 */
+		spi-cs-setup-delay-ns = <65000>;
+		spi-cs-hold-delay-ns = <65000>;
+		spi-cs-inactive-delay-ns = <250000>;
+		spien-gpios = <&pinctrl_ap 195 0>;
+		interrupts-extended = <&pinctrl_nub 13 IRQ_TYPE_LEVEL_LOW>;
+	};
+};
+
+/* Virtual regulator representing the shared shutdown GPIO */
+/ {
+	speaker_sdz: fixed-regulator-tas5770-sdz {
+		compatible = "regulator-fixed";
+		regulator-name = "tas5770-sdz";
+		startup-delay-us = <5000>;
+		gpios = <&pinctrl_ap 181 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+	};
+};
+
+&i2c1 {
+	speaker_left: codec@31 {
+		compatible = "ti,tas5770l", "ti,tas2770";
+		reg = <0x31>;
+		SDZ-supply = <&speaker_sdz>;
+		#sound-dai-cells = <0>;
+		sound-name-prefix = "Left";
+		interrupts-extended = <&pinctrl_ap 182 IRQ_TYPE_LEVEL_LOW>;
+		ti,imon-slot-no = <0>;
+		ti,vmon-slot-no = <2>;
+		ti,sdout-zero-fill;
+	};
+};
+
+&i2c3 {
+	speaker_right: codec@34 {
+		compatible = "ti,tas5770l", "ti,tas2770";
+		reg = <0x34>;
+		SDZ-supply = <&speaker_sdz>;
+		#sound-dai-cells = <0>;
+		sound-name-prefix = "Right";
+		interrupts-extended = <&pinctrl_ap 182 IRQ_TYPE_LEVEL_LOW>;
+		ti,imon-slot-no = <4>;
+		ti,vmon-slot-no = <6>;
+		ti,sdout-zero-fill;
+	};
+
+	jack_codec: codec@48 {
+		compatible = "cirrus,cs42l83";
+		reg = <0x48>;
+		reset-gpios = <&pinctrl_nub 11 GPIO_ACTIVE_HIGH>;
+		interrupt-parent = <&pinctrl_ap>;
+		interrupts = <183 IRQ_TYPE_LEVEL_LOW>;
+		#sound-dai-cells = <0>;
+		cirrus,ts-inv = <1>;
+		sound-name-prefix = "Jack";
+	};
+};
+
+&aop_mbox {
+       status = "okay";
+};
+
+&aop_dart {
+       status = "okay";
+};
+
+&aop_admac {
+       status = "okay";
+};
+
+&aop {
+	status = "okay";
+};
+
+&sep {
+	status = "okay";
+};
+
+&aop_audio {
+	apple,chassis-name = "J313";
+	apple,machine-kind = "MacBook Air";
+};
+
+/ {
+	sound {
+		compatible = "apple,j313-macaudio", "apple,macaudio";
+		model = "MacBook Air J313";
+
+		dai-link@0 {
+			link-name = "Speakers";
+
+			cpu {
+				sound-dai = <&mca 0>, <&mca 1>;
+			};
+			codec {
+				sound-dai = <&speaker_left>, <&speaker_right>;
+			};
+		};
+
+		dai-link@1 {
+			link-name = "Headphone Jack";
+
+			cpu {
+				sound-dai = <&mca 2>;
+			};
+			codec {
+				sound-dai = <&jack_codec>;
+			};
+		};
+	};
+};
+
+#include "isp-imx248.dtsi"
+
+&isp {
+	apple,platform-id = <1>;
+};
+
+#include "hwmon-laptop.dtsi"
diff --git a/arch/arm64/boot/dts/apple/t8103-j456.dts b/arch/arm64/boot/dts/apple/t8103-j456.dts
index 58c8e43789b486..5df88a922f4053 100644
--- a/arch/arm64/boot/dts/apple/t8103-j456.dts
+++ b/arch/arm64/boot/dts/apple/t8103-j456.dts
@@ -21,6 +21,19 @@
 	};
 };
 
+&dcp {
+	panel: panel {
+		compatible = "apple,panel-j456", "apple,panel";
+		width-mm = <522>;
+		height-mm = <294>;
+		apple,max-brightness = <525>;
+	};
+};
+
+&framebuffer0 {
+	panel-dimensions = <&panel>;
+};
+
 &bluetooth0 {
 	brcm,board-type = "apple,capri";
 };
@@ -47,6 +60,18 @@
 	};
 };
 
+/*
+ * Provide labels for the USB type C ports.
+ */
+
+&typec0 {
+	label = "USB-C Back-right";
+};
+
+&typec1 {
+	label = "USB-C Back-right-middle";
+};
+
 /*
  * Force the bus number assignments so that we can declare some of the
  * on-board devices and properties that are populated by the bootloader
@@ -75,3 +100,72 @@
 &pcie0_dart_2 {
 	status = "okay";
 };
+
+&i2c1 {
+	jack_codec: codec@48 {
+		compatible = "cirrus,cs42l83";
+		reg = <0x48>;
+		reset-gpios = <&pinctrl_nub 11 GPIO_ACTIVE_HIGH>;
+		interrupt-parent = <&pinctrl_ap>;
+		interrupts = <183 IRQ_TYPE_LEVEL_LOW>;
+		#sound-dai-cells = <0>;
+		cirrus,ts-inv = <1>;
+		sound-name-prefix = "Jack";
+	};
+};
+
+&aop_mbox {
+       status = "okay";
+};
+
+&aop_dart {
+       status = "okay";
+};
+
+&aop_admac {
+       status = "okay";
+};
+
+&aop {
+	status = "okay";
+};
+
+&sep {
+	status = "okay";
+};
+
+&aop_audio {
+	apple,chassis-name = "J456";
+	apple,machine-kind = "iMac";
+	apple,no-beamforming;
+};
+
+/ {
+	sound {
+		compatible = "apple,j456-macaudio", "apple,macaudio";
+		model = "iMac J456";
+
+		dai-link@1 {
+			link-name = "Headphone Jack";
+
+			cpu {
+				sound-dai = <&mca 2>;
+			};
+			codec {
+				sound-dai = <&jack_codec>;
+			};
+		};
+	};
+};
+
+&gpu {
+	apple,perf-base-pstate = <3>;
+};
+
+#include "isp-imx364.dtsi"
+
+&isp {
+	apple,platform-id = <2>;
+};
+
+#include "hwmon-fan-dual.dtsi"
diff --git a/arch/arm64/boot/dts/apple/t8103-j457.dts b/arch/arm64/boot/dts/apple/t8103-j457.dts
index 152f95fd49a211..d83ba3b9cf4856 100644
--- a/arch/arm64/boot/dts/apple/t8103-j457.dts
+++ b/arch/arm64/boot/dts/apple/t8103-j457.dts
@@ -21,6 +21,19 @@
 	};
 };
 
+&dcp {
+	panel: panel {
+		compatible = "apple,panel-j457", "apple,panel";
+		width-mm = <522>;
+		height-mm = <294>;
+		apple,max-brightness = <525>;
+	};
+};
+
+&framebuffer0 {
+	panel-dimensions = <&panel>;
+};
+
 &bluetooth0 {
 	brcm,board-type = "apple,santorini";
 };
@@ -29,12 +42,34 @@
 	brcm,board-type = "apple,santorini";
 };
 
+/*
+ * Provide labels for the USB type C ports.
+ */
+
+&typec0 {
+	label = "USB-C Back-right";
+};
+
+&typec1 {
+	label = "USB-C Back-left";
+};
+
 /*
  * Force the bus number assignments so that we can declare some of the
  * on-board devices and properties that are populated by the bootloader
  * (such as MAC addresses).
  */
 
+&port01 {
+	/*
+	 * TODO: do not enable port without device. This works around a Linux
+	 * bug which results in mismatched iommus on gaps in PCI(e) ports / bus
+	 * numbers.
+	 */
+	bus-range = <2 2>;
+	status = "okay";
+};
+
 &port02 {
 	bus-range = <3 3>;
 	status = "okay";
@@ -45,6 +80,79 @@
 	};
 };
 
+&pcie0_dart_1 {
+	status = "okay";
+};
+
 &pcie0_dart_2 {
 	status = "okay";
 };
+
+&i2c1 {
+	jack_codec: codec@48 {
+		compatible = "cirrus,cs42l83";
+		reg = <0x48>;
+		reset-gpios = <&pinctrl_nub 11 GPIO_ACTIVE_HIGH>;
+		interrupt-parent = <&pinctrl_ap>;
+		interrupts = <183 IRQ_TYPE_LEVEL_LOW>;
+		#sound-dai-cells = <0>;
+		cirrus,ts-inv = <1>;
+		sound-name-prefix = "Jack";
+	};
+};
+
+&aop_mbox {
+       status = "okay";
+};
+
+&aop_dart {
+       status = "okay";
+};
+
+&aop_admac {
+       status = "okay";
+};
+
+&aop {
+	status = "okay";
+};
+
+&sep {
+	status = "okay";
+};
+
+&aop_audio {
+	apple,chassis-name = "J457";
+	apple,machine-kind = "iMac";
+	apple,no-beamforming;
+};
+
+/ {
+	sound {
+		compatible = "apple,j457-macaudio", "apple,macaudio";
+		model = "iMac J457";
+
+		dai-link@1 {
+			link-name = "Headphone Jack";
+
+			cpu {
+				sound-dai = <&mca 2>;
+			};
+			codec {
+				sound-dai = <&jack_codec>;
+			};
+		};
+	};
+};
+
+&gpu {
+	apple,perf-base-pstate = <3>;
+};
+
+#include "isp-imx364.dtsi"
+
+&isp {
+	apple,platform-id = <2>;
+};
+
+#include "hwmon-fan.dtsi"
diff --git a/arch/arm64/boot/dts/apple/t8103-jxxx.dtsi b/arch/arm64/boot/dts/apple/t8103-jxxx.dtsi
index 5988a4eb6efaa0..a1fa0d6eecf7f9 100644
--- a/arch/arm64/boot/dts/apple/t8103-jxxx.dtsi
+++ b/arch/arm64/boot/dts/apple/t8103-jxxx.dtsi
@@ -12,9 +12,15 @@
 / {
 	aliases {
 		bluetooth0 = &bluetooth0;
+		dcp = &dcp;
+		disp0 = &display;
+		disp0_piodma = &disp0_piodma;
+		nvram = &nvram;
 		serial0 = &serial0;
 		serial2 = &serial2;
 		wifi0 = &wifi0;
+		atcphy0 = &atcphy0;
+		atcphy1 = &atcphy1;
 	};
 
 	chosen {
@@ -27,11 +33,19 @@
 		framebuffer0: framebuffer@0 {
 			compatible = "apple,simple-framebuffer", "simple-framebuffer";
 			reg = <0 0 0 0>; /* To be filled by loader */
+			power-domains = <&ps_disp0_cpu0>;
 			/* Format properties will be added by loader */
 			status = "disabled";
 		};
 	};
 
+	reserved-memory {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+		/* To be filled by loader */
+	};
+
 	memory@800000000 {
 		device_type = "memory";
 		reg = <0x8 0 0x2 0>; /* To be filled by loader */
@@ -53,6 +67,29 @@
 		interrupt-parent = <&pinctrl_ap>;
 		interrupts = <106 IRQ_TYPE_LEVEL_LOW>;
 		interrupt-names = "irq";
+
+		typec0: connector {
+			compatible = "usb-c-connector";
+			power-role = "dual";
+			data-role = "dual";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				port@0 {
+					reg = <0>;
+					typec0_con_hs: endpoint {
+						remote-endpoint = <&typec0_usb_hs>;
+					};
+				};
+				port@1 {
+					reg = <1>;
+					typec0_con_ss: endpoint {
+						remote-endpoint = <&typec0_usb_ss>;
+					};
+				};
+			};
+		};
 	};
 
 	hpm1: usb-pd@3f {
@@ -61,6 +98,63 @@
 		interrupt-parent = <&pinctrl_ap>;
 		interrupts = <106 IRQ_TYPE_LEVEL_LOW>;
 		interrupt-names = "irq";
+
+		typec1: connector {
+			compatible = "usb-c-connector";
+			power-role = "dual";
+			data-role = "dual";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				port@0 {
+					reg = <0>;
+					typec1_con_hs: endpoint {
+						remote-endpoint = <&typec1_usb_hs>;
+					};
+				};
+				port@1 {
+					reg = <1>;
+					typec1_con_ss: endpoint {
+						remote-endpoint = <&typec1_usb_ss>;
+					};
+				};
+			};
+		};
+	};
+};
+
+/* USB controllers */
+&dwc3_0 {
+	port {
+		typec0_usb_hs: endpoint {
+			remote-endpoint = <&typec0_con_hs>;
+		};
+	};
+};
+
+&dwc3_1 {
+	port {
+		typec1_usb_hs: endpoint {
+			remote-endpoint = <&typec1_con_hs>;
+		};
+	};
+};
+
+/* Type-C PHYs */
+&atcphy0 {
+	port {
+		typec0_usb_ss: endpoint {
+			remote-endpoint = <&typec0_con_ss>;
+		};
+	};
+};
+
+&atcphy1 {
+	port {
+		typec1_usb_ss: endpoint {
+			remote-endpoint = <&typec1_con_ss>;
+		};
 	};
 };
 
@@ -71,6 +165,7 @@
  */
 &port00 {
 	bus-range = <1 1>;
+	pwren-gpios = <&smc_gpio 13 GPIO_ACTIVE_HIGH>;
 	wifi0: network@0,0 {
 		compatible = "pci14e4,4425";
 		reg = <0x10000 0x0 0x0 0x0 0x0>;
@@ -90,3 +185,7 @@
 &nco_clkref {
 	clock-frequency = <900000000>;
 };
+
+#include "hwmon-common.dtsi"
+
+#include "spi1-nvram.dtsi"
diff --git a/arch/arm64/boot/dts/apple/t8103-pmgr.dtsi b/arch/arm64/boot/dts/apple/t8103-pmgr.dtsi
index 9645861a858c1a..5d3846d44e3578 100644
--- a/arch/arm64/boot/dts/apple/t8103-pmgr.dtsi
+++ b/arch/arm64/boot/dts/apple/t8103-pmgr.dtsi
@@ -234,7 +234,7 @@
 		#power-domain-cells = <0>;
 		#reset-cells = <0>;
 		label = "sio_cpu";
-		power-domains = <&ps_sio>;
+		power-domains = <&ps_sio &ps_uart_p &ps_spi_p &ps_dpa0>;
 	};
 
 	ps_fpwm0: power-controller@1d8 {
@@ -387,6 +387,15 @@
 		power-domains = <&ps_sio>, <&ps_spi_p>;
 	};
 
+	ps_spi4: power-controller@260 {
+		compatible = "apple,t8103-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x260 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "spi4";
+		power-domains = <&ps_sio>, <&ps_spi_p>;
+	};
+
 	ps_uart_n: power-controller@268 {
 		compatible = "apple,t8103-pmgr-pwrstate", "apple,pmgr-pwrstate";
 		reg = <0x268 4>;
@@ -484,6 +493,7 @@
 		#reset-cells = <0>;
 		label = "mca0";
 		power-domains = <&ps_audio_p>, <&ps_sio_adma>;
+		apple,externally-clocked;
 	};
 
 	ps_mca1: power-controller@2c0 {
@@ -493,6 +503,7 @@
 		#reset-cells = <0>;
 		label = "mca1";
 		power-domains = <&ps_audio_p>, <&ps_sio_adma>;
+		apple,externally-clocked;
 	};
 
 	ps_mca2: power-controller@2c8 {
@@ -502,6 +513,7 @@
 		#reset-cells = <0>;
 		label = "mca2";
 		power-domains = <&ps_audio_p>, <&ps_sio_adma>;
+		apple,externally-clocked;
 	};
 
 	ps_mca3: power-controller@2d0 {
@@ -511,6 +523,7 @@
 		#reset-cells = <0>;
 		label = "mca3";
 		power-domains = <&ps_audio_p>, <&ps_sio_adma>;
+		apple,externally-clocked;
 	};
 
 	ps_mca4: power-controller@2d8 {
@@ -520,6 +533,7 @@
 		#reset-cells = <0>;
 		label = "mca4";
 		power-domains = <&ps_audio_p>, <&ps_sio_adma>;
+		apple,externally-clocked;
 	};
 
 	ps_mca5: power-controller@2e0 {
@@ -529,6 +543,7 @@
 		#reset-cells = <0>;
 		label = "mca5";
 		power-domains = <&ps_audio_p>, <&ps_sio_adma>;
+		apple,externally-clocked;
 	};
 
 	ps_dpa0: power-controller@2e8 {
@@ -558,15 +573,6 @@
 		apple,always-on; /* Memory controller */
 	};
 
-	ps_spi4: power-controller@260 {
-		compatible = "apple,t8103-pmgr-pwrstate", "apple,pmgr-pwrstate";
-		reg = <0x260 4>;
-		#power-domain-cells = <0>;
-		#reset-cells = <0>;
-		label = "spi4";
-		power-domains = <&ps_sio>, <&ps_spi_p>;
-	};
-
 	ps_dcs0: power-controller@300 {
 		compatible = "apple,t8103-pmgr-pwrstate", "apple,pmgr-pwrstate";
 		reg = <0x300 4>;
@@ -645,8 +651,7 @@
 		#power-domain-cells = <0>;
 		#reset-cells = <0>;
 		label = "disp0_fe";
-		power-domains = <&ps_rmx>;
-		apple,always-on; /* TODO: figure out if we can enable PM here */
+		power-domains = <&ps_rmx>, <&ps_pmp>;
 	};
 
 	ps_dispext_fe: power-controller@368 {
@@ -655,7 +660,7 @@
 		#power-domain-cells = <0>;
 		#reset-cells = <0>;
 		label = "dispext_fe";
-		power-domains = <&ps_rmx>;
+		power-domains = <&ps_rmx>, <&ps_pmp>;
 	};
 
 	ps_dispext_cpu0: power-controller@378 {
@@ -717,6 +722,7 @@
 		#reset-cells = <0>;
 		label = "apcie_gp";
 		power-domains = <&ps_apcie>;
+		apple,always-on; /* Breaks things if shut down */
 	};
 
 	ps_ans2: power-controller@3f0 {
@@ -733,6 +739,7 @@
 		#power-domain-cells = <0>;
 		#reset-cells = <0>;
 		label = "gfx";
+		power-domains = <&ps_pmp>;
 	};
 
 	ps_dcs4: power-controller@320 {
@@ -805,6 +812,7 @@
 		#reset-cells = <0>;
 		label = "isp_sys";
 		power-domains = <&ps_rmx>;
+		status = "disabled";
 	};
 
 	ps_venc_sys: power-controller@408 {
@@ -1000,9 +1008,125 @@
 		#reset-cells = <0>;
 		label = "disp0_cpu0";
 		power-domains = <&ps_disp0_fe>;
-		apple,always-on; /* TODO: figure out if we can enable PM here */
 		apple,min-state = <4>;
 	};
+
+	/* There is a dependency tree involved with these PDs,
+	 * but we do not express it here since the ISP driver
+	 * is supposed to sequence them in the right order anyway
+	 * (and we do not know the exact tree structure).
+	 *
+	 * This also works around spurious parent PD activation
+	 * on machines with ISP disabled (desktops).
+	 */
+	ps_isp_set0: power-controller@4000 {
+		compatible = "apple,t8103-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4000 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_set0";
+		apple,force-disable;
+	};
+
+	ps_isp_set1: power-controller@4008 {
+		compatible = "apple,t8103-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4008 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_set1";
+		apple,force-disable;
+		apple,force-reset;
+	};
+
+	ps_isp_set2: power-controller@4010 {
+		compatible = "apple,t8103-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4010 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_set2";
+		apple,force-disable;
+		apple,force-reset;
+	};
+
+	ps_isp_fe: power-controller@4018 {
+		compatible = "apple,t8103-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4018 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_fe";
+	};
+
+	ps_isp_set4: power-controller@4020 {
+		compatible = "apple,t8103-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4020 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_set4";
+	};
+
+	ps_isp_set5: power-controller@4028 {
+		compatible = "apple,t8103-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4028 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_set5";
+	};
+
+	ps_isp_set6: power-controller@4030 {
+		compatible = "apple,t8103-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4030 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_set6";
+	};
+
+	ps_isp_set7: power-controller@4038 {
+		compatible = "apple,t8103-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4038 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_set7";
+	};
+
+	ps_isp_set8: power-controller@4040 {
+		compatible = "apple,t8103-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4040 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_set8";
+	};
+
+	ps_isp_set9: power-controller@4048 {
+		compatible = "apple,t8103-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4048 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_set9";
+	};
+
+	ps_isp_set10: power-controller@4050 {
+		compatible = "apple,t8103-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4050 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_set10";
+	};
+
+	ps_isp_set11: power-controller@4058 {
+		compatible = "apple,t8103-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4058 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_set11";
+	};
+
+	ps_isp_set12: power-controller@4060 {
+		compatible = "apple,t8103-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4060 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_set12";
+	};
 };
 
 &pmgr_mini {
@@ -1095,6 +1219,7 @@
 		#power-domain-cells = <0>;
 		#reset-cells = <0>;
 		label = "msg";
+		apple,always-on; /* Core AON device? */
 	};
 
 	ps_atc0_usb_aon: power-controller@88 {
@@ -1103,6 +1228,7 @@
 		#power-domain-cells = <0>;
 		#reset-cells = <0>;
 		label = "atc0_usb_aon";
+		apple,always-on; /* Needs to stay on for dwc3 to work */
 	};
 
 	ps_atc1_usb_aon: power-controller@90 {
@@ -1111,6 +1237,7 @@
 		#power-domain-cells = <0>;
 		#reset-cells = <0>;
 		label = "atc1_usb_aon";
+		apple,always-on; /* Needs to stay on for dwc3 to work */
 	};
 
 	ps_atc0_usb: power-controller@98 {
diff --git a/arch/arm64/boot/dts/apple/t8103.dtsi b/arch/arm64/boot/dts/apple/t8103.dtsi
index 9b0dad6b618444..9d03c55c9c5ff3 100644
--- a/arch/arm64/boot/dts/apple/t8103.dtsi
+++ b/arch/arm64/boot/dts/apple/t8103.dtsi
@@ -11,6 +11,8 @@
 #include <dt-bindings/interrupt-controller/apple-aic.h>
 #include <dt-bindings/interrupt-controller/irq.h>
 #include <dt-bindings/pinctrl/apple.h>
+#include <dt-bindings/spmi/spmi.h>
+#include <dt-bindings/phy/phy.h>
 
 / {
 	compatible = "apple,t8103", "apple,arm-platform";
@@ -18,6 +20,10 @@
 	#address-cells = <2>;
 	#size-cells = <2>;
 
+	aliases {
+		gpu = &gpu;
+	};
+
 	cpus {
 		#address-cells = <2>;
 		#size-cells = <0>;
@@ -188,26 +194,31 @@
 			opp-hz = /bits/ 64 <600000000>;
 			opp-level = <1>;
 			clock-latency-ns = <7500>;
+			opp-microwatt = <47296>;
 		};
 		opp02 {
 			opp-hz = /bits/ 64 <972000000>;
 			opp-level = <2>;
 			clock-latency-ns = <22000>;
+			opp-microwatt = <99715>;
 		};
 		opp03 {
 			opp-hz = /bits/ 64 <1332000000>;
 			opp-level = <3>;
 			clock-latency-ns = <27000>;
+			opp-microwatt = <188860>;
 		};
 		opp04 {
 			opp-hz = /bits/ 64 <1704000000>;
 			opp-level = <4>;
 			clock-latency-ns = <33000>;
+			opp-microwatt = <288891>;
 		};
 		opp05 {
 			opp-hz = /bits/ 64 <2064000000>;
 			opp-level = <5>;
 			clock-latency-ns = <50000>;
+			opp-microwatt = <412979>;
 		};
 	};
 
@@ -218,83 +229,140 @@
 			opp-hz = /bits/ 64 <600000000>;
 			opp-level = <1>;
 			clock-latency-ns = <8000>;
+			opp-microwatt = <290230>;
 		};
 		opp02 {
 			opp-hz = /bits/ 64 <828000000>;
 			opp-level = <2>;
 			clock-latency-ns = <19000>;
+			opp-microwatt = <449013>;
 		};
 		opp03 {
 			opp-hz = /bits/ 64 <1056000000>;
 			opp-level = <3>;
 			clock-latency-ns = <21000>;
+			opp-microwatt = <647097>;
 		};
 		opp04 {
 			opp-hz = /bits/ 64 <1284000000>;
 			opp-level = <4>;
 			clock-latency-ns = <23000>;
+			opp-microwatt = <865620>;
 		};
 		opp05 {
 			opp-hz = /bits/ 64 <1500000000>;
 			opp-level = <5>;
 			clock-latency-ns = <24000>;
+			opp-microwatt = <1112838>;
 		};
 		opp06 {
 			opp-hz = /bits/ 64 <1728000000>;
 			opp-level = <6>;
 			clock-latency-ns = <29000>;
+			opp-microwatt = <1453271>;
 		};
 		opp07 {
 			opp-hz = /bits/ 64 <1956000000>;
 			opp-level = <7>;
 			clock-latency-ns = <31000>;
+			opp-microwatt = <1776667>;
 		};
 		opp08 {
 			opp-hz = /bits/ 64 <2184000000>;
 			opp-level = <8>;
 			clock-latency-ns = <34000>;
+			opp-microwatt = <2366690>;
 		};
 		opp09 {
 			opp-hz = /bits/ 64 <2388000000>;
 			opp-level = <9>;
 			clock-latency-ns = <36000>;
+			opp-microwatt = <2892193>;
 		};
 		opp10 {
 			opp-hz = /bits/ 64 <2592000000>;
 			opp-level = <10>;
 			clock-latency-ns = <51000>;
+			opp-microwatt = <3475417>;
 		};
 		opp11 {
 			opp-hz = /bits/ 64 <2772000000>;
 			opp-level = <11>;
 			clock-latency-ns = <54000>;
+			opp-microwatt = <3959410>;
 		};
 		opp12 {
 			opp-hz = /bits/ 64 <2988000000>;
 			opp-level = <12>;
 			clock-latency-ns = <55000>;
+			opp-microwatt = <4540620>;
 		};
-#if 0
 		/* Not available until CPU deep sleep is implemented */
 		opp13 {
 			opp-hz = /bits/ 64 <3096000000>;
 			opp-level = <13>;
 			clock-latency-ns = <55000>;
+			opp-microwatt = <4745031>;
 			turbo-mode;
 		};
 		opp14 {
 			opp-hz = /bits/ 64 <3144000000>;
 			opp-level = <14>;
 			clock-latency-ns = <56000>;
+			opp-microwatt = <4822390>;
 			turbo-mode;
 		};
 		opp15 {
 			opp-hz = /bits/ 64 <3204000000>;
 			opp-level = <15>;
 			clock-latency-ns = <56000>;
+			opp-microwatt = <4951324>;
 			turbo-mode;
 		};
-#endif
+	};
+
+	gpu_opp: opp-table-gpu {
+		compatible = "operating-points-v2";
+
+		/*
+		 * NOTE: The voltage and power values are device-specific and
+		 * must be filled in by the bootloader.
+		 */
+		opp00 {
+			opp-hz = /bits/ 64 <0>;
+			opp-microvolt = <400000>;
+			opp-microwatt = <0>;
+		};
+		opp01 {
+			opp-hz = /bits/ 64 <396000000>;
+			opp-microvolt = <603000>;
+			opp-microwatt = <3714690>;
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <528000000>;
+			opp-microvolt = <640000>;
+			opp-microwatt = <5083260>;
+		};
+		opp03 {
+			opp-hz = /bits/ 64 <720000000>;
+			opp-microvolt = <690000>;
+			opp-microwatt = <7429380>;
+		};
+		opp04 {
+			opp-hz = /bits/ 64 <924000000>;
+			opp-microvolt = <784000>;
+			opp-microwatt = <11730600>;
+		};
+		opp05 {
+			opp-hz = /bits/ 64 <1128000000>;
+			opp-microvolt = <862000>;
+			opp-microwatt = <17009370>;
+		};
+		opp06 {
+			opp-hz = /bits/ 64 <1278000000>;
+			opp-microvolt = <931000>;
+			opp-microwatt = <19551000>;
+		};
 	};
 
 	timer {
@@ -326,6 +394,36 @@
 		clock-output-names = "clkref";
 	};
 
+	clk_120m: clock-120m {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <120000000>;
+		clock-output-names = "clk_120m";
+	};
+
+	clk_200m: clock-200m {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <200000000>;
+		clock-output-names = "clk_200m";
+	};
+
+	/* Pixel clock? frequency in Hz (compare: 4K@60 VGA clock 533.250 MHz) */
+	clk_disp0: clock-disp0 {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <533333328>;
+		clock-output-names = "clk_disp0";
+	};
+
+	/* Pixel clock? frequency in Hz (compare: 4K@60 VGA clock 533.250 MHz) */
+	clk_dispext0: clock-dispext0 {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <0>;
+		clock-output-names = "clk_dispext0";
+	};
+
 	/*
 	 * This is a fabulated representation of the input clock
 	 * to NCO since we don't know the true clock tree.
@@ -336,6 +434,24 @@
 		clock-output-names = "nco_ref";
 	};
 
+	reserved-memory {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		uat_handoff: uat-handoff {
+			reg = <0 0 0 0>;
+		};
+
+		uat_pagetables: uat-pagetables {
+			reg = <0 0 0 0>;
+		};
+
+		uat_ttbs: uat-ttbs {
+			reg = <0 0 0 0>;
+		};
+	};
+
 	soc {
 		compatible = "simple-bus";
 		#address-cells = <2>;
@@ -343,6 +459,72 @@
 
 		ranges;
 		nonposted-mmio;
+		/* Required to get >32-bit DMA via DARTs */
+		dma-ranges = <0 0 0 0 0xffffffff 0xffffc000>;
+
+		gpu: gpu@206400000 {
+			compatible = "apple,agx-t8103", "apple,agx-g13g";
+			reg = <0x2 0x6400000 0 0x40000>,
+				<0x2 0x4000000 0 0x1000000>;
+			reg-names = "asc", "sgx";
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 563 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 564 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 565 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 566 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 579 IRQ_TYPE_LEVEL_HIGH>;
+			mboxes = <&agx_mbox>;
+			power-domains = <&ps_gfx>;
+			memory-region = <&uat_ttbs>, <&uat_pagetables>, <&uat_handoff>;
+			memory-region-names = "ttbs", "pagetables", "handoff";
+
+			apple,firmware-version = <12 3 0>;
+			apple,firmware-compat = <12 3 0>;
+
+			operating-points-v2 = <&gpu_opp>;
+			apple,perf-base-pstate = <1>;
+			apple,min-sram-microvolt = <850000>;
+			apple,avg-power-filter-tc-ms = <1000>;
+			apple,avg-power-ki-only = <7.5>;
+			apple,avg-power-kp = <4.0>;
+			apple,avg-power-min-duty-cycle = <40>;
+			apple,avg-power-target-filter-tc = <125>;
+			apple,fast-die0-integral-gain = <200.0>;
+			apple,fast-die0-proportional-gain = <5.0>;
+			apple,perf-filter-drop-threshold = <0>;
+			apple,perf-filter-time-constant = <5>;
+			apple,perf-filter-time-constant2 = <50>;
+			apple,perf-integral-gain2 = <0.197392>;
+			apple,perf-integral-min-clamp = <0>;
+			apple,perf-proportional-gain2 = <6.853981>;
+			apple,perf-tgt-utilization = <85>;
+			apple,power-sample-period = <8>;
+			apple,power-zones = <30000 100 6875>;
+			apple,ppm-filter-time-constant-ms = <100>;
+			apple,ppm-ki = <91.5>;
+			apple,ppm-kp = <6.9>;
+			apple,pwr-filter-time-constant = <313>;
+			apple,pwr-integral-gain = <0.0202129>;
+			apple,pwr-integral-min-clamp = <0>;
+			apple,pwr-min-duty-cycle = <40>;
+			apple,pwr-proportional-gain = <5.2831855>;
+
+			apple,core-leak-coef = <1000.0>;
+			apple,sram-leak-coef = <45.0>;
+		};
+
+		agx_mbox: mbox@206408000 {
+			compatible = "apple,t8103-asc-mailbox", "apple,asc-mailbox-v4";
+			reg = <0x2 0x6408000 0x0 0x4000>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 575 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 576 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 577 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 578 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "send-empty", "send-not-empty",
+				"recv-empty", "recv-not-empty";
+			#mbox-cells = <0>;
+		};
 
 		cpufreq_e: performance-controller@210e20000 {
 			compatible = "apple,t8103-cluster-cpufreq", "apple,cluster-cpufreq";
@@ -356,6 +538,207 @@
 			#performance-domain-cells = <0>;
 		};
 
+		display_dfr: display-pipe@228200000 {
+			compatible = "apple,t8103-display-pipe", "apple,h7-display-pipe";
+			reg = <0x2 0x28200000 0x0 0xc000>,
+			      <0x2 0x28400000 0x0 0x4000>;
+			reg-names = "be", "fe";
+			power-domains = <&ps_dispdfr_fe>, <&ps_dispdfr_be>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 502 IRQ_TYPE_LEVEL_HIGH>,
+				     <AIC_IRQ 506 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "be", "fe";
+			iommus = <&displaydfr_dart 0>;
+			status = "disabled";
+
+			port {
+				dfr_adp_out_mipi: endpoint {
+					remote-endpoint = <&dfr_mipi_in_adp>;
+				};
+			};
+		};
+
+		displaydfr_dart: iommu@228304000 {
+			compatible = "apple,t8103-dart";
+			reg = <0x2 0x28304000 0x0 0x4000>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 504 IRQ_TYPE_LEVEL_HIGH>;
+			#iommu-cells = <1>;
+			power-domains = <&ps_dispdfr_fe>;
+			status = "disabled";
+		};
+
+		displaydfr_mipi: dsi@228600000 {
+			compatible = "apple,t8103-display-pipe-mipi", "apple,h7-display-pipe-mipi";
+			reg = <0x2 0x28600000 0x0 0x100000>;
+			power-domains = <&ps_mipi_dsi>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				dfr_mipi_in: port@0 {
+					reg = <0>;
+					#address-cells = <1>;
+					#size-cells = <0>;
+
+					dfr_mipi_in_adp: endpoint@0 {
+						reg = <0>;
+						remote-endpoint = <&dfr_adp_out_mipi>;
+					};
+				};
+
+				dfr_mipi_out: port@1 {
+					reg = <1>;
+					#address-cells = <1>;
+					#size-cells = <0>;
+				};
+			};
+		};
+
+		disp0_dart: iommu@231304000 {
+			compatible = "apple,t8103-dart";
+			reg = <0x2 0x31304000 0x0 0x4000>;
+			#iommu-cells = <1>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 445 IRQ_TYPE_LEVEL_HIGH>;
+			power-domains = <&ps_disp0_cpu0>;
+			apple,dma-range = <0x0 0x0 0x0 0xfc000000>;
+			status = "disabled";
+		};
+
+		dcp_dart: iommu@23130c000 {
+			compatible = "apple,t8103-dart";
+			reg = <0x2 0x3130c000 0x0 0x4000>;
+			#iommu-cells = <1>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 445 IRQ_TYPE_LEVEL_HIGH>;
+			apple,dma-range = <0xf 0x00000000 0x0 0xfc000000>;
+			power-domains = <&ps_disp0_cpu0>;
+		};
+
+		dcp_mbox: mbox@231c08000 {
+			compatible = "apple,t8103-asc-mailbox", "apple,asc-mailbox-v4";
+			reg = <0x2 0x31c08000 0x0 0x4000>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 427 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 428 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 429 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 430 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "send-empty", "send-not-empty",
+				"recv-empty", "recv-not-empty";
+			#mbox-cells = <0>;
+			power-domains = <&ps_disp0_cpu0>;
+			resets = <&ps_disp0_cpu0>;
+		};
+
+		dcp: dcp@231c00000 {
+			compatible = "apple,t8103-dcp", "apple,dcp";
+			mboxes = <&dcp_mbox>;
+			mbox-names = "mbox";
+			iommus = <&dcp_dart 0>;
+
+			reg-names = "coproc", "disp-0", "disp-1", "disp-2",
+				"disp-3", "disp-4";
+			reg = <0x2 0x31c00000 0x0 0x4000>,
+				<0x2 0x30000000 0x0 0x3e8000>,
+				<0x2 0x31320000 0x0 0x4000>,
+				<0x2 0x31344000 0x0 0x4000>,
+				<0x2 0x31800000 0x0 0x800000>,
+				<0x2 0x3b3d0000 0x0 0x4000>;
+			apple,bw-scratch = <&pmgr_dcp 0 5 0x14>;
+			apple,bw-doorbell = <&pmgr_dcp 1 6>;
+			power-domains = <&ps_disp0_cpu0>;
+			resets = <&ps_disp0_cpu0>;
+			clocks = <&clk_disp0>;
+			phandle = <&dcp>;
+			// required bus properties for 'piodma' subdevice
+			#address-cells = <2>;
+			#size-cells = <2>;
+
+			disp0_piodma: piodma {
+				iommus = <&disp0_dart 4>;
+				phandle = <&disp0_piodma>;
+			};
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				port@0 {
+					reg = <0>;
+					dcp_audio: endpoint {
+						remote-endpoint = <&dpaudio0_dcp>;
+					};
+				};
+			};
+		};
+
+		display: display-subsystem {
+			compatible = "apple,display-subsystem";
+			/* disp_dart0 must be 1st since it is locked */
+			iommus = <&disp0_dart 0>;
+			/* generate phandle explicitly for use in loader */
+			phandle = <&display>;
+		};
+
+		isp_dart0: iommu@22c0e8000 {
+			compatible = "apple,t8103-dart";
+			reg = <0x2 0x2c0e8000 0x0 0x4000>;
+			#iommu-cells = <1>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 251 IRQ_TYPE_LEVEL_HIGH>;
+			power-domains = <&ps_isp_sys>;
+
+			status = "disabled";
+		};
+
+		isp_dart1: iommu@22c0f4000 {
+			compatible = "apple,t8103-dart";
+			reg = <0x2 0x2c0f4000 0x0 0x4000>;
+			#iommu-cells = <1>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 251 IRQ_TYPE_LEVEL_HIGH>;
+			power-domains = <&ps_isp_sys>;
+
+			status = "disabled";
+		};
+
+		isp_dart2: iommu@22c0fc000 {
+			compatible = "apple,t8103-dart";
+			reg = <0x2 0x2c0fc000 0x0 0x4000>;
+			#iommu-cells = <1>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 251 IRQ_TYPE_LEVEL_HIGH>;
+			power-domains = <&ps_isp_sys>;
+
+			status = "disabled";
+		};
+
+		isp: isp@22a000000 {
+			compatible = "apple,t8103-isp", "apple,isp";
+			iommus = <&isp_dart0 0>, <&isp_dart1 0>, <&isp_dart2 0>;
+			reg-names = "coproc", "mbox", "gpio", "mbox2";
+			reg = <0x2 0x2a000000 0x0 0x2000000>,
+				<0x2 0x2c104000 0x0 0x100>,
+				<0x2 0x2c104170 0x0 0x100>,
+				<0x2 0x2c1043f0 0x0 0x100>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 246 IRQ_TYPE_LEVEL_HIGH>;
+			power-domains = <&ps_isp_sys>, <&ps_isp_set0>,
+				<&ps_isp_set1>, <&ps_isp_set2>, <&ps_isp_fe>,
+				<&ps_isp_set4>, <&ps_isp_set5>, <&ps_isp_set6>,
+				<&ps_isp_set7>, <&ps_isp_set8>, <&ps_isp_set9>,
+				<&ps_isp_set10>, <&ps_isp_set11>,
+				<&ps_isp_set12>;
+
+			apple,dart-vm-size = <0x0 0xa0000000>;
+
+			status = "disabled";
+		};
+
 		sio_dart: iommu@235004000 {
 			compatible = "apple,t8103-dart";
 			reg = <0x2 0x35004000 0x0 0x4000>;
@@ -441,6 +824,48 @@
 			status = "disabled";
 		};
 
+		spi0: spi@235100000 {
+			compatible = "apple,t8103-spi", "apple,spi";
+			reg = <0x2 0x35100000 0x0 0x4000>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 614 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&clk_200m>;
+			pinctrl-0 = <&spi0_pins>;
+			pinctrl-names = "default";
+			power-domains = <&ps_spi0>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		spi1: spi@235104000 {
+			compatible = "apple,t8103-spi", "apple,spi";
+			reg = <0x2 0x35104000 0x0 0x4000>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 615 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&clk_200m>;
+			pinctrl-0 = <&spi1_pins>;
+			pinctrl-names = "default";
+			power-domains = <&ps_spi1>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		spi3: spi@23510c000 {
+			compatible = "apple,t8103-spi", "apple,spi";
+			reg = <0x2 0x3510c000 0x0 0x4000>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 617 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&clk_120m>;
+			pinctrl-0 = <&spi3_pins>;
+			pinctrl-names = "default";
+			power-domains = <&ps_spi3>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
 		serial0: serial@235200000 {
 			compatible = "apple,s5l-uart";
 			reg = <0x2 0x35200000 0x0 0x1000>;
@@ -469,6 +894,32 @@
 			status = "disabled";
 		};
 
+		sio_mbox: mbox@236408000 {
+			compatible = "apple,t8103-asc-mailbox", "apple,asc-mailbox-v4";
+			reg = <0x2 0x36408000 0x0 0x4000>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 640 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 641 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 642 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 643 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "send-empty", "send-not-empty",
+				"recv-empty", "recv-not-empty";
+			#mbox-cells = <0>;
+			power-domains = <&ps_sio>;
+		};
+
+		sio: sio@236400000 {
+			compatible = "apple,t8103-sio", "apple,sio";
+			reg = <0x2 0x36400000 0x0 0x8000>;
+			dma-channels = <128>;
+			#dma-cells = <1>;
+			mboxes = <&sio_mbox>;
+			iommus = <&sio_dart 0>;
+			power-domains = <&ps_sio_cpu>;
+			resets = <&ps_sio>; /* TODO: verify reset does something */
+			status = "disabled";
+		};
+
 		admac: dma-controller@238200000 {
 			compatible = "apple,t8103-admac", "apple,admac";
 			reg = <0x2 0x38200000 0x0 0x34000>;
@@ -483,6 +934,48 @@
 			resets = <&ps_audio_p>;
 		};
 
+		dpaudio0: audio-controller@238330000 {
+			compatible = "apple,t8103-dpaudio", "apple,dpaudio";
+			reg = <0x2 0x38330000 0x0 0x4000>;
+			dmas = <&sio 0x64>;
+			dma-names = "tx";
+			power-domains = <&ps_dpa0>;
+			reset-domains = <&ps_dpa0>;
+			status = "disabled";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				port@0 {
+					reg = <0>;
+					dpaudio0_dcp: endpoint {
+						remote-endpoint = <&dcp_audio>;
+					};
+				};
+			};
+		};
+
+		dpaudio1: audio-controller@238334000 {
+			compatible = "apple,t8103-dpaudio", "apple,dpaudio";
+			reg = <0x2 0x38334000 0x0 0x4000>;
+			dmas = <&sio 0x66>;
+			dma-names = "tx";
+			power-domains = <&ps_dpa1>;
+			reset-domains = <&ps_dpa1>;
+			status = "disabled";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				port@0 {
+					reg = <0>;
+					dpaudio1_dcp: endpoint {
+						remote-endpoint = <&dcpext_audio>;
+					};
+				};
+			};
+		};
+
 		mca: i2s@238400000 {
 			compatible = "apple,t8103-mca", "apple,mca";
 			reg = <0x2 0x38400000 0x0 0x18000>,
@@ -551,6 +1044,14 @@
 			reg = <0x2 0x3b700000 0 0x14000>;
 		};
 
+		pmgr_dcp: power-management@23b738000 {
+			reg = <0x2 0x3b738000 0x0 0x1000>,
+				<0x2 0x3bc3c000 0x0 0x1000>;
+			reg-names = "dcp-bw-scratch", "dcp-bw-doorbell";
+			#apple,bw-scratch-cells = <3>;
+			#apple,bw-doorbell-cells = <2>;
+		};
+
 		pinctrl_ap: pinctrl@23c100000 {
 			compatible = "apple,t8103-pinctrl", "apple,pinctrl";
 			reg = <0x2 0x3c100000 0x0 0x100000>;
@@ -597,6 +1098,26 @@
 					 <APPLE_PINMUX(134, 1)>;
 			};
 
+			spi0_pins: spi0-pins {
+				pinmux = <APPLE_PINMUX(67, 1)>, /* CLK */
+					<APPLE_PINMUX(68, 1)>,  /* MOSI */
+					<APPLE_PINMUX(69, 1)>;  /* MISO */
+			};
+
+			spi1_pins: spi1-pins {
+				pinmux = <APPLE_PINMUX(42, 1)>,
+					<APPLE_PINMUX(43, 1)>,
+					<APPLE_PINMUX(44, 1)>,
+					<APPLE_PINMUX(45, 1)>;
+			};
+
+			spi3_pins: spi3-pins {
+				pinmux = <APPLE_PINMUX(46, 1)>,
+					<APPLE_PINMUX(47, 1)>,
+					<APPLE_PINMUX(48, 1)>,
+					<APPLE_PINMUX(49, 1)>;
+			};
+
 			pcie_pins: pcie-pins {
 				pinmux = <APPLE_PINMUX(150, 1)>,
 					 <APPLE_PINMUX(151, 1)>,
@@ -604,6 +1125,81 @@
 			};
 		};
 
+		nub_spmi: spmi@23d0d9300 {
+			compatible = "apple,t8103-spmi", "apple,spmi";
+			reg = <0x2 0x3d0d9300 0x0 0x100>;
+			#address-cells = <2>;
+			#size-cells = <0>;
+
+			pmu1: pmu@f {
+				compatible = "apple,sera-pmu", "apple,spmi-pmu";
+				reg = <0xf SPMI_USID>;
+				#address-cells = <1>;
+				#size-cells = <1>;
+
+				rtc_nvmem@d000 {
+					compatible = "apple,spmi-pmu-nvmem";
+					reg = <0xd000 0x300>;
+					#address-cells = <1>;
+					#size-cells = <1>;
+
+					pm_setting: pm-setting@1 {
+						reg = <0x1 0x1>;
+					};
+
+					rtc_offset: rtc-offset@100 {
+						reg = <0x100 0x6>;
+					};
+				};
+
+				legacy_nvmem@9f00 {
+					compatible = "apple,spmi-pmu-nvmem";
+					reg = <0x9f00 0x20>;
+					#address-cells = <1>;
+					#size-cells = <1>;
+
+					boot_stage: boot-stage@1 {
+						reg = <0x1 0x1>;
+					};
+
+					boot_error_count: boot-error-count@2 {
+						reg = <0x2 0x1>;
+						bits = <0 4>;
+					};
+
+					panic_count: panic-count@2 {
+						reg = <0x2 0x1>;
+						bits = <4 4>;
+					};
+
+					boot_error_stage: boot-error-stage@3 {
+						reg = <0x3 0x1>;
+					};
+
+					shutdown_flag: shutdown-flag@f {
+						reg = <0xf 0x1>;
+						bits = <3 1>;
+					};
+				};
+
+				scrpad_nvmem@a000 {
+					compatible = "apple,spmi-pmu-nvmem";
+					reg = <0xa000 0x1000>;
+					#address-cells = <1>;
+					#size-cells = <1>;
+
+					fault_shadow: fault-shadow@67b {
+						reg = <0x67b 0x10>;
+					};
+
+					socd: socd@b00 {
+						reg = <0xb00 0x400>;
+					};
+				};
+
+			};
+		};
+
 		pinctrl_nub: pinctrl@23d1f0000 {
 			compatible = "apple,t8103-pinctrl", "apple,pinctrl";
 			reg = <0x2 0x3d1f0000 0x0 0x4000>;
@@ -641,6 +1237,44 @@
 			interrupts = <AIC_IRQ 338 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
+		smc_mbox: mbox@23e408000 {
+			compatible = "apple,t8103-asc-mailbox", "apple,asc-mailbox-v4";
+			reg = <0x2 0x3e408000 0x0 0x4000>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 400 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 401 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 402 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 403 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "send-empty", "send-not-empty",
+				"recv-empty", "recv-not-empty";
+			#mbox-cells = <0>;
+		};
+
+		smc: smc@23e400000 {
+			compatible = "apple,t8103-smc", "apple,smc";
+			reg = <0x2 0x3e400000 0x0 0x4000>,
+				<0x2 0x3fe00000 0x0 0x100000>;
+			reg-names = "smc", "sram";
+			mboxes = <&smc_mbox>;
+
+			smc_gpio: gpio {
+				gpio-controller;
+				#gpio-cells = <2>;
+			};
+
+			smc_rtc: rtc {
+				nvmem-cells = <&rtc_offset>;
+				nvmem-cell-names = "rtc_offset";
+			};
+
+			smc_reboot: reboot {
+				nvmem-cells = <&shutdown_flag>, <&boot_stage>,
+					<&boot_error_count>, <&panic_count>, <&pm_setting>;
+				nvmem-cell-names = "shutdown_flag", "boot_stage",
+					"boot_error_count", "panic_count", "pm_setting";
+			};
+		};
+
 		pinctrl_smc: pinctrl@23e820000 {
 			compatible = "apple,t8103-pinctrl", "apple,pinctrl";
 			reg = <0x2 0x3e820000 0x0 0x4000>;
@@ -662,6 +1296,36 @@
 				     <AIC_IRQ 397 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
+		sep_dart: iommu@2412c0000 {
+			compatible = "apple,t8103-dart";
+			reg = <0x2 0x412c0000 0x0 0x4000>;
+			#iommu-cells = <1>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 259 IRQ_TYPE_LEVEL_HIGH>;
+		};
+
+		sep: sep@242400000 {
+			compatible = "apple,sep";
+			reg = <0x2 0x42400000 0x0 0x6C000>;
+			mboxes = <&sep_mbox>;
+			mbox-names = "mbox";
+			iommus = <&sep_dart 0>;
+			status = "disabled";
+		};
+
+		sep_mbox: mbox@242408000 {
+			compatible = "apple,t8103-asc-mailbox", "apple,asc-mailbox-v4";
+			reg = <0x2 0x42408000 0x0 0x4000>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 253 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 254 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 255 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 256 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "send-empty", "send-not-empty",
+				"recv-empty", "recv-not-empty";
+			#mbox-cells = <0>;
+		};
+
 		pinctrl_aop: pinctrl@24a820000 {
 			compatible = "apple,t8103-pinctrl", "apple,pinctrl";
 			reg = <0x2 0x4a820000 0x0 0x4000>;
@@ -683,6 +1347,150 @@
 				     <AIC_IRQ 274 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
+		aop_mbox: mbox@24a408000 {
+			compatible = "apple,t8103-asc-mailbox", "apple,asc-mailbox-v4";
+			reg = <0x2 0x4a408000 0x0 0x4000>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 285 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 286 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 287 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 288 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "send-empty", "send-not-empty",
+				"recv-empty", "recv-not-empty";
+			#mbox-cells = <0>;
+			status = "disabled";
+		};
+
+		aop_dart: iommu@24a808000 {
+			compatible = "apple,t8103-dart";
+			reg = <0x2 0x4a808000 0x0 0x4000>;
+			#iommu-cells = <1>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 300 IRQ_TYPE_LEVEL_HIGH>;
+			status = "disabled";
+		};
+
+		aop_admac: dma-controller@24a980000 {
+			/*
+			* Use "admac2" until commit "dmaengine: apple-admac:
+			* Avoid accessing registers in probe" is long enough
+			* upstream (not yet as of 2024-12-30)
+			*/
+			// compatible = "apple,t8103-admac", "apple,admac";
+			compatible = "apple,t8103-admac2", "apple,admac2";
+			reg = <0x2 0x4a980000 0x0 0x34000>;
+			#dma-cells = <1>;
+			dma-channels = <16>;
+			interrupts-extended = <0>,
+					      <0>,
+					      <&aic AIC_IRQ 321 IRQ_TYPE_LEVEL_HIGH>,
+					      <0>;
+			iommus = <&aop_dart 7>;
+			status = "disabled";
+		};
+
+		aop: aop@24ac00000 {
+			compatible = "apple,t8103-aop";
+			reg = <0x2 0x4ac00000 0x0 0x1e0000>,
+			      <0x2 0x4a400000 0x0 0x6c000>;
+			mboxes = <&aop_mbox>;
+			mbox-names = "mbox";
+			iommus = <&aop_dart 0>;
+
+			/* HACK: ensure probe order */
+			dmas = <&aop_admac 1023>;
+			dma-names = "invalid-order-only";
+
+			status = "disabled";
+
+			aop_audio: audio {
+				dmas = <&aop_admac 1>;
+				dma-names = "dma";
+			};
+
+			aop_als: als {
+				// intentionally empty
+			};
+		};
+
+		dispext0_dart: iommu@271304000 {
+			compatible = "apple,t8103-dart";
+			reg = <0x2 0x71304000 0x0 0x4000>;
+			#iommu-cells = <1>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 481 IRQ_TYPE_LEVEL_HIGH>;
+			power-domains = <&ps_dispext_cpu0>;
+			status = "disabled";
+		};
+
+		dcpext_dart: iommu@27130c000 {
+			compatible = "apple,t8103-dart";
+			reg = <0x2 0x7130c000 0x0 0x4000>;
+			#iommu-cells = <1>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 481 IRQ_TYPE_LEVEL_HIGH>;
+			power-domains = <&ps_dispext_cpu0>;
+			status = "disabled";
+		};
+
+		dcpext_mbox: mbox@271c08000 {
+			compatible = "apple,t8103-asc-mailbox", "apple,asc-mailbox-v4";
+			reg = <0x2 0x71c08000 0x0 0x4000>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 466 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 467 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 468 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 469 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "send-empty", "send-not-empty",
+			"recv-empty", "recv-not-empty";
+			#mbox-cells = <0>;
+			power-domains = <&ps_dispext_cpu0>;
+			resets = <&ps_dispext_cpu0>;
+			status = "disabled";
+		};
+
+		dcpext: dcp@271c00000 {
+			compatible = "apple,t8103-dcpext", "apple,dcpext";
+			mboxes = <&dcpext_mbox>;
+			mbox-names = "mbox";
+			iommus = <&dcpext_dart 0>;
+			phandle = <&dcpext>;
+
+			reg-names = "coproc", "disp-0", "disp-1", "disp-2",
+			"disp-3", "disp-4";
+			reg = <0x2 0x71c00000 0x0 0x4000>,
+			      <0x2 0x70000000 0x0 0x118000>,
+			      <0x2 0x71320000 0x0 0x4000>,
+			      <0x2 0x71344000 0x0 0x4000>,
+			      <0x2 0x71800000 0x0 0x800000>,
+			      <0x2 0x3b3d0000 0x0 0x4000>;
+			apple,bw-scratch = <&pmgr_dcp 0 5 0x18>;
+			apple,bw-doorbell = <&pmgr_dcp 1 6>;
+			power-domains = <&ps_dispext_cpu0>;
+			resets = <&ps_dispext_cpu0>;
+			clocks = <&clk_dispext0>;
+			apple,asc-dram-mask = <0xf 0x00000000>;
+			status = "disabled";
+			// required bus properties for 'piodma' subdevice
+			#address-cells = <2>;
+			#size-cells = <2>;
+
+			piodma {
+				iommus = <&dispext0_dart 4>;
+			};
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				port@0 {
+					reg = <0>;
+					dcpext_audio: endpoint {
+						remote-endpoint = <&dpaudio1_dcp>;
+					};
+				};
+			};
+		};
+
 		ans_mbox: mbox@277408000 {
 			compatible = "apple,t8103-asc-mailbox", "apple,asc-mailbox-v4";
 			reg = <0x2 0x77408000 0x0 0x4000>;
@@ -717,6 +1525,251 @@
 			resets = <&ps_ans2>;
 		};
 
+		efuse@23d2bc000 {
+			compatible = "apple,t8103-efuses", "apple,efuses";
+			reg = <0x2 0x3d2bc000 0x0 0x1000>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			atcphy0_auspll_rodco_bias_adjust: efuse@430,26 {
+				reg = <0x430 4>;
+				bits = <26 3>;
+			};
+
+			atcphy0_auspll_rodco_encap: efuse@430,29 {
+				reg = <0x430 4>;
+				bits = <29 2>;
+			};
+
+			atcphy0_auspll_dtc_vreg_adjust: efuse@430,31 {
+				reg = <0x430 8>;
+				bits = <31 3>;
+			};
+
+			atcphy0_auspll_fracn_dll_start_capcode: efuse@434,2 {
+				reg = <0x434 4>;
+				bits = <2 2>;
+			};
+
+			atcphy0_aus_cmn_shm_vreg_trim: efuse@434,4 {
+				reg = <0x434 4>;
+				bits = <4 5>;
+			};
+
+			atcphy0_cio3pll_dco_coarsebin0: efuse@434,9 {
+				reg = <0x434 4>;
+				bits = <9 6>;
+			};
+
+			atcphy0_cio3pll_dco_coarsebin1: efuse@434,15 {
+				reg = <0x434 4>;
+				bits = <15 6>;
+			};
+
+			atcphy0_cio3pll_dll_start_capcode: efuse@434,21 {
+				reg = <0x434 4>;
+				bits = <21 2>;
+			};
+
+			atcphy0_cio3pll_dtc_vreg_adjust: efuse@434,23 {
+				reg = <0x434 0x4>;
+				bits = <23 3>;
+			};
+
+			atcphy1_auspll_rodco_bias_adjust: efuse@438,4 {
+				reg = <0x438 4>;
+				bits = <4 3>;
+			};
+
+			atcphy1_auspll_rodco_encap: efuse@438,7 {
+				reg = <0x438 4>;
+				bits = <7 2>;
+			};
+
+			atcphy1_auspll_dtc_vreg_adjust: efuse@438,9 {
+				reg = <0x438 4>;
+				bits = <9 3>;
+			};
+
+			atcphy1_auspll_fracn_dll_start_capcode: efuse@438,12 {
+				reg = <0x438 4>;
+				bits = <12 2>;
+			};
+
+			atcphy1_aus_cmn_shm_vreg_trim: efuse@438,14 {
+				reg = <0x438 4>;
+				bits = <14 5>;
+			};
+
+			atcphy1_cio3pll_dco_coarsebin0: efuse@438,19 {
+				reg = <0x438 4>;
+				bits = <19 6>;
+			};
+
+			atcphy1_cio3pll_dco_coarsebin1: efuse@438,25 {
+				reg = <0x438 4>;
+				bits = <25 6>;
+			};
+
+			atcphy1_cio3pll_dll_start_capcode: efuse@438,31 {
+				reg = <0x438 4>;
+				bits = <31 1>;
+			};
+
+			atcphy1_cio3pll_dll_start_capcode_workaround: efuse@43c,0 {
+				reg = <0x43c 0x4>;
+				bits = <0 1>;
+			};
+
+			atcphy1_cio3pll_dtc_vreg_adjust: efuse@43c,1 {
+				reg = <0x43c 0x4>;
+				bits = <1 3>;
+			};
+		};
+
+		dwc3_0: usb@382280000 {
+			compatible = "apple,t8103-dwc3", "apple,dwc3", "snps,dwc3";
+			reg = <0x3 0x82280000 0x0 0x100000>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 777 IRQ_TYPE_LEVEL_HIGH>;
+			dr_mode = "otg";
+			usb-role-switch;
+			role-switch-default-mode = "host";
+			iommus = <&dwc3_0_dart_0 0>, <&dwc3_0_dart_1 1>;
+			power-domains = <&ps_atc0_usb>;
+			resets = <&atcphy0>;
+			phys = <&atcphy0 PHY_TYPE_USB2>, <&atcphy0 PHY_TYPE_USB3>;
+			phy-names = "usb2-phy", "usb3-phy";
+		};
+
+		dwc3_0_dart_0: iommu@382f00000 {
+			compatible = "apple,t8103-dart";
+			reg = <0x3 0x82f00000 0x0 0x4000>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 781 IRQ_TYPE_LEVEL_HIGH>;
+			#iommu-cells = <1>;
+			power-domains = <&ps_atc0_usb>;
+		};
+
+		dwc3_0_dart_1: iommu@382f80000 {
+			compatible = "apple,t8103-dart";
+			reg = <0x3 0x82f80000 0x0 0x4000>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 781 IRQ_TYPE_LEVEL_HIGH>;
+			#iommu-cells = <1>;
+			power-domains = <&ps_atc0_usb>;
+		};
+
+		atcphy0: phy@383000000 {
+			compatible = "apple,t8103-atcphy";
+			reg = <0x3 0x83000000 0x0 0x4c000>,
+				<0x3 0x83050000 0x0 0x8000>,
+				<0x3 0x80000000 0x0 0x4000>,
+				<0x3 0x82a90000 0x0 0x4000>,
+				<0x3 0x82a84000 0x0 0x4000>;
+			reg-names = "core", "lpdptx", "axi2af", "usb2phy",
+				"pipehandler";
+
+			#phy-cells = <1>;
+			#reset-cells = <0>;
+
+			nvmem-cells = <&atcphy0_aus_cmn_shm_vreg_trim>,
+				<&atcphy0_auspll_rodco_encap>,
+				<&atcphy0_auspll_rodco_bias_adjust>,
+				<&atcphy0_auspll_fracn_dll_start_capcode>,
+				<&atcphy0_auspll_dtc_vreg_adjust>,
+				<&atcphy0_cio3pll_dco_coarsebin0>,
+				<&atcphy0_cio3pll_dco_coarsebin1>,
+				<&atcphy0_cio3pll_dll_start_capcode>,
+				<&atcphy0_cio3pll_dtc_vreg_adjust>;
+			nvmem-cell-names =  "aus_cmn_shm_vreg_trim",
+				"auspll_rodco_encap",
+				"auspll_rodco_bias_adjust",
+				"auspll_fracn_dll_start_capcode",
+				"auspll_dtc_vreg_adjust",
+				"cio3pll_dco_coarsebin0",
+				"cio3pll_dco_coarsebin1",
+				"cio3pll_dll_start_capcode",
+				"cio3pll_dtc_vreg_adjust";
+
+			orientation-switch;
+			mode-switch;
+			svid = <0xff01>, <0x8087>;
+			power-domains = <&ps_atc0_usb>;
+		};
+
+		dwc3_1: usb@502280000 {
+			compatible = "apple,t8103-dwc3", "apple,dwc3", "snps,dwc3";
+			reg = <0x5 0x02280000 0x0 0x100000>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 857 IRQ_TYPE_LEVEL_HIGH>;
+			dr_mode = "otg";
+			usb-role-switch;
+			role-switch-default-mode = "host";
+			iommus = <&dwc3_1_dart_0 0>, <&dwc3_1_dart_1 1>;
+			power-domains = <&ps_atc1_usb>;
+			resets = <&atcphy1>;
+			phys = <&atcphy1 PHY_TYPE_USB2>, <&atcphy1 PHY_TYPE_USB3>;
+			phy-names = "usb2-phy", "usb3-phy";
+		};
+
+		dwc3_1_dart_0: iommu@502f00000 {
+			compatible = "apple,t8103-dart";
+			reg = <0x5 0x02f00000 0x0 0x4000>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 861 IRQ_TYPE_LEVEL_HIGH>;
+			#iommu-cells = <1>;
+			power-domains = <&ps_atc1_usb>;
+		};
+
+		dwc3_1_dart_1: iommu@502f80000 {
+			compatible = "apple,t8103-dart";
+			reg = <0x5 0x02f80000 0x0 0x4000>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 861 IRQ_TYPE_LEVEL_HIGH>;
+			#iommu-cells = <1>;
+			power-domains = <&ps_atc1_usb>;
+		};
+
+		atcphy1: phy@503000000 {
+			compatible = "apple,t8103-atcphy";
+			reg = <0x5 0x03000000 0x0 0x4c000>,
+				<0x5 0x03050000 0x0 0x8000>,
+				<0x5 0x0 0x0 0x4000>,
+				<0x5 0x02a90000 0x0 0x4000>,
+				<0x5 0x02a84000 0x0 0x4000>;
+			reg-names = "core", "lpdptx", "axi2af", "usb2phy",
+				"pipehandler";
+
+			nvmem-cells = <&atcphy1_aus_cmn_shm_vreg_trim>,
+				<&atcphy1_auspll_rodco_encap>,
+				<&atcphy1_auspll_rodco_bias_adjust>,
+				<&atcphy1_auspll_fracn_dll_start_capcode>,
+				<&atcphy1_auspll_dtc_vreg_adjust>,
+				<&atcphy1_cio3pll_dco_coarsebin0>,
+				<&atcphy1_cio3pll_dco_coarsebin1>,
+				<&atcphy1_cio3pll_dll_start_capcode>,
+				<&atcphy1_cio3pll_dtc_vreg_adjust>,
+				<&atcphy1_cio3pll_dll_start_capcode_workaround>;
+			nvmem-cell-names =  "aus_cmn_shm_vreg_trim",
+				"auspll_rodco_encap",
+				"auspll_rodco_bias_adjust",
+				"auspll_fracn_dll_start_capcode",
+				"auspll_dtc_vreg_adjust",
+				"cio3pll_dco_coarsebin0",
+				"cio3pll_dco_coarsebin1",
+				"cio3pll_dll_start_capcode",
+				"cio3pll_dtc_vreg_adjust",
+				"cio3pll_dll_start_capcode_workaround";
+
+			#phy-cells = <1>;
+			#reset-cells = <0>;
+
+			orientation-switch;
+			mode-switch;
+			svid = <0xff01>, <0x8087>;
+			power-domains = <&ps_atc1_usb>;
+		};
+
 		pcie0_dart_0: iommu@681008000 {
 			compatible = "apple,t8103-dart";
 			reg = <0x6 0x81008000 0x0 0x4000>;
diff --git a/arch/arm64/boot/dts/apple/t8112-j413.dts b/arch/arm64/boot/dts/apple/t8112-j413.dts
index 6f69658623bf89..1f8b0eecbd5e72 100644
--- a/arch/arm64/boot/dts/apple/t8112-j413.dts
+++ b/arch/arm64/boot/dts/apple/t8112-j413.dts
@@ -20,6 +20,7 @@
 	aliases {
 		bluetooth0 = &bluetooth0;
 		wifi0 = &wifi0;
+		keyboard = &keyboard;
 	};
 
 	led-controller {
@@ -35,6 +36,20 @@
 	};
 };
 
+&dcp {
+	panel: panel {
+		compatible = "apple,panel-j413", "apple,panel";
+		width-mm = <290>;
+		height-mm = <189>;
+		adj-height-mm = <181>;
+		apple,max-brightness = <525>;
+	};
+};
+
+&framebuffer0 {
+	panel-dimensions = <&panel>;
+};
+
 /*
  * Force the bus number assignments so that we can declare some of the
  * on-board devices and properties that are populated by the bootloader
@@ -42,6 +57,7 @@
  */
 &port00 {
 	bus-range = <1 1>;
+	pwren-gpios = <&smc_gpio 13 GPIO_ACTIVE_HIGH>;
 	wifi0: wifi@0,0 {
 		compatible = "pci14e4,4433";
 		reg = <0x10000 0x0 0x0 0x0 0x0>;
@@ -60,6 +76,18 @@
 	};
 };
 
+/*
+ * Provide labels for the USB type C ports.
+ */
+
+&typec0 {
+	label = "USB-C Left-back";
+};
+
+&typec1 {
+	label = "USB-C Left-front";
+};
+
 &i2c0 {
 	/* MagSafe port */
 	hpm5: usb-pd@3a {
@@ -71,6 +99,76 @@
 	};
 };
 
+/* Virtual regulator representing the shared shutdown GPIO */
+/ {
+	speaker_sdz: fixed-regulator-sn012776-sdz {
+		compatible = "regulator-fixed";
+		regulator-name = "sn012776-sdz";
+		startup-delay-us = <5000>;
+		gpios = <&pinctrl_ap 88 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+	};
+};
+
+&i2c1 {
+	speaker_left_woof: codec@38 {
+		compatible = "ti,sn012776", "ti,tas2764";
+		reg = <0x38>;
+		SDZ-supply = <&speaker_sdz>;
+		#sound-dai-cells = <0>;
+		sound-name-prefix = "Left Woofer";
+		interrupts-extended = <&pinctrl_ap 11 IRQ_TYPE_LEVEL_LOW>;
+		ti,imon-slot-no = <0>;
+		ti,vmon-slot-no = <2>;
+		ti,sdout-force-zero-mask = <0xf0f0>;
+	};
+
+	speaker_left_tweet: codec@39 {
+		compatible = "ti,sn012776", "ti,tas2764";
+		reg = <0x39>;
+		SDZ-supply = <&speaker_sdz>;
+		#sound-dai-cells = <0>;
+		sound-name-prefix = "Left Tweeter";
+		interrupts-extended = <&pinctrl_ap 11 IRQ_TYPE_LEVEL_LOW>;
+		ti,imon-slot-no = <8>;
+		ti,vmon-slot-no = <10>;
+	};
+};
+
+&i2c3 {
+	speaker_right_woof: codec@3b {
+		compatible = "ti,sn012776", "ti,tas2764";
+		reg = <0x3b>;
+		SDZ-supply = <&speaker_sdz>;
+		#sound-dai-cells = <0>;
+		sound-name-prefix = "Right Woofer";
+		interrupts-extended = <&pinctrl_ap 11 IRQ_TYPE_LEVEL_LOW>;
+		ti,imon-slot-no = <4>;
+		ti,vmon-slot-no = <6>;
+		ti,sdout-force-zero-mask = <0x0f0f>;
+	};
+
+	speaker_right_tweet: codec@3c {
+		compatible = "ti,sn012776", "ti,tas2764";
+		reg = <0x3c>;
+		SDZ-supply = <&speaker_sdz>;
+		#sound-dai-cells = <0>;
+		sound-name-prefix = "Right Tweeter";
+		interrupts-extended = <&pinctrl_ap 11 IRQ_TYPE_LEVEL_LOW>;
+		ti,imon-slot-no = <12>;
+		ti,vmon-slot-no = <14>;
+	};
+
+	jack_codec: codec@4b {
+		compatible = "cirrus,cs42l84";
+		reg = <0x4b>;
+		reset-gpios = <&pinctrl_nub 12 GPIO_ACTIVE_HIGH>;
+		#sound-dai-cells = <0>;
+		interrupts-extended = <&pinctrl_ap 149 IRQ_TYPE_LEVEL_LOW>;
+		sound-name-prefix = "Jack";
+	};
+};
+
 &i2c4 {
 	status = "okay";
 };
@@ -78,3 +176,98 @@
 &fpwm1 {
 	status = "okay";
 };
+
+&aop_mbox {
+       status = "okay";
+};
+
+&aop_dart {
+       status = "okay";
+};
+
+&aop_admac {
+       status = "okay";
+};
+
+&aop {
+	status = "okay";
+};
+
+&aop_audio {
+	apple,chassis-name = "J413";
+	apple,machine-kind = "MacBook Air";
+};
+
+/ {
+	sound {
+		compatible = "apple,j413-macaudio", "apple,macaudio";
+		model = "MacBook Air J413";
+
+		dai-link@0 {
+			link-name = "Speakers";
+
+			cpu {
+				sound-dai = <&mca 0>, <&mca 1>;
+			};
+			codec {
+				sound-dai = <&speaker_left_woof>, <&speaker_left_tweet>,
+					    <&speaker_right_woof>, <&speaker_right_tweet>;
+			};
+		};
+
+		dai-link@1 {
+			link-name = "Headphone Jack";
+
+			cpu {
+				sound-dai = <&mca 2>;
+			};
+			codec {
+				sound-dai = <&jack_codec>;
+			};
+		};
+	};
+};
+
+&mtp {
+	status = "okay";
+};
+&mtp_mbox {
+	status = "okay";
+};
+&mtp_dart {
+	status = "okay";
+};
+&mtp_dockchannel {
+	status = "okay";
+};
+&mtp_hid {
+	apple,afe-reset-gpios = <&smc_gpio 8 GPIO_ACTIVE_LOW>;
+	apple,stm-reset-gpios = <&smc_gpio 24 GPIO_ACTIVE_LOW>;
+
+	multi-touch {
+		firmware-name = "apple/tpmtfw-j413.bin";
+	};
+
+	keyboard: keyboard {
+		hid-country-code = <0>;
+		apple,keyboard-layout-id = <0>;
+	};
+
+	stm {
+	};
+
+	actuator {
+	};
+
+	tp_accel {
+	};
+};
+
+#include "isp-imx558-cfg0.dtsi"
+
+&isp {
+	apple,platform-id = <14>;
+	apple,temporal-filter = <1>;
+};
+
+#include "hwmon-laptop.dtsi"
diff --git a/arch/arm64/boot/dts/apple/t8112-j415.dts b/arch/arm64/boot/dts/apple/t8112-j415.dts
new file mode 100644
index 00000000000000..42c11517436afa
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t8112-j415.dts
@@ -0,0 +1,295 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Apple MacBook Air (15-inchl, M2, 2023)
+ *
+ * target-type: J415
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+
+/dts-v1/;
+
+#include "t8112.dtsi"
+#include "t8112-jxxx.dtsi"
+#include <dt-bindings/leds/common.h>
+
+/ {
+	compatible = "apple,j415", "apple,t8112", "apple,arm-platform";
+	model = "Apple MacBook Air (15-inch, M2, 2023)";
+
+	aliases {
+		bluetooth0 = &bluetooth0;
+		wifi0 = &wifi0;
+		keyboard = &keyboard;
+	};
+
+	led-controller {
+		compatible = "pwm-leds";
+		led-0 {
+			pwms = <&fpwm1 0 40000>;
+			label = "kbd_backlight";
+			function = LED_FUNCTION_KBD_BACKLIGHT;
+			color = <LED_COLOR_ID_WHITE>;
+			max-brightness = <255>;
+			default-state = "keep";
+		};
+	};
+};
+
+&dcp {
+	panel: panel {
+		compatible = "apple,panel-j415", "apple,panel";
+		width-mm = <327>;
+		height-mm = <211>;
+		adj-height-mm = <204>;
+		apple,max-brightness = <500>;
+	};
+};
+
+&framebuffer0 {
+	panel-dimensions = <&panel>;
+};
+
+/*
+ * Force the bus number assignments so that we can declare some of the
+ * on-board devices and properties that are populated by the bootloader
+ * (such as MAC addresses).
+ */
+&port00 {
+	bus-range = <1 1>;
+	pwren-gpios = <&smc_gpio 13 GPIO_ACTIVE_HIGH>;
+	wifi0: wifi@0,0 {
+		compatible = "pci14e4,4433";
+		reg = <0x10000 0x0 0x0 0x0 0x0>;
+		/* To be filled by the loader */
+		local-mac-address = [00 10 18 00 00 10];
+		apple,antenna-sku = "XX";
+		brcm,board-type = "apple,snake";
+	};
+
+	bluetooth0: bluetooth@0,1 {
+		compatible = "pci14e4,5f71";
+		reg = <0x10100 0x0 0x0 0x0 0x0>;
+		/* To be filled by the loader */
+		local-bd-address = [00 00 00 00 00 00];
+		brcm,board-type = "apple,snake";
+	};
+};
+
+/*
+ * Provide labels for the USB type C ports.
+ */
+
+&typec0 {
+	label = "USB-C Left-back";
+};
+
+&typec1 {
+	label = "USB-C Left-front";
+};
+
+&i2c0 {
+	/* MagSafe port */
+	hpm5: usb-pd@3a {
+		compatible = "apple,cd321x";
+		reg = <0x3a>;
+		interrupt-parent = <&pinctrl_ap>;
+		interrupts = <8 IRQ_TYPE_LEVEL_LOW>;
+		interrupt-names = "irq";
+	};
+};
+
+/* Virtual regulator representing the shared shutdown GPIO */
+/ {
+	speaker_sdz: fixed-regulator-sn012776-sdz {
+		compatible = "regulator-fixed";
+		regulator-name = "sn012776-sdz";
+		startup-delay-us = <5000>;
+		gpios = <&pinctrl_ap 88 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+	};
+};
+
+&i2c1 {
+	speaker_left_woof1: codec@38 {
+		compatible = "ti,sn012776", "ti,tas2764";
+		reg = <0x38>;
+		SDZ-supply = <&speaker_sdz>;
+		#sound-dai-cells = <0>;
+		sound-name-prefix = "Left Woofer 1";
+		interrupts-extended = <&pinctrl_ap 11 IRQ_TYPE_LEVEL_LOW>;
+		ti,imon-slot-no = <0>;
+		ti,vmon-slot-no = <2>;
+		ti,sdout-force-zero-mask = <0xf0f0f0>;
+	};
+
+	speaker_left_tweet: codec@39 {
+		compatible = "ti,sn012776", "ti,tas2764";
+		reg = <0x39>;
+		SDZ-supply = <&speaker_sdz>;
+		#sound-dai-cells = <0>;
+		sound-name-prefix = "Left Tweeter";
+		interrupts-extended = <&pinctrl_ap 11 IRQ_TYPE_LEVEL_LOW>;
+		ti,imon-slot-no = <8>;
+		ti,vmon-slot-no = <10>;
+	};
+
+	speaker_left_woof2: codec@3a {
+		compatible = "ti,sn012776", "ti,tas2764";
+		reg = <0x3a>;
+		SDZ-supply = <&speaker_sdz>;
+		#sound-dai-cells = <0>;
+		sound-name-prefix = "Left Woofer 2";
+		interrupts-extended = <&pinctrl_ap 11 IRQ_TYPE_LEVEL_LOW>;
+		ti,imon-slot-no = <16>;
+		ti,vmon-slot-no = <18>;
+	};
+};
+
+&i2c3 {
+	speaker_right_woof1: codec@3b {
+		compatible = "ti,sn012776", "ti,tas2764";
+		reg = <0x3b>;
+		SDZ-supply = <&speaker_sdz>;
+		#sound-dai-cells = <0>;
+		sound-name-prefix = "Right Woofer 1";
+		interrupts-extended = <&pinctrl_ap 11 IRQ_TYPE_LEVEL_LOW>;
+		ti,imon-slot-no = <4>;
+		ti,vmon-slot-no = <6>;
+		ti,sdout-force-zero-mask = <0x0f0f0f>;
+	};
+
+	speaker_right_tweet: codec@3c {
+		compatible = "ti,sn012776", "ti,tas2764";
+		reg = <0x3c>;
+		SDZ-supply = <&speaker_sdz>;
+		#sound-dai-cells = <0>;
+		sound-name-prefix = "Right Tweeter";
+		interrupts-extended = <&pinctrl_ap 11 IRQ_TYPE_LEVEL_LOW>;
+		ti,imon-slot-no = <12>;
+		ti,vmon-slot-no = <14>;
+	};
+
+	speaker_right_woof2: codec@3d {
+		compatible = "ti,sn012776", "ti,tas2764";
+		reg = <0x3d>;
+		SDZ-supply = <&speaker_sdz>;
+		#sound-dai-cells = <0>;
+		sound-name-prefix = "Right Woofer 2";
+		interrupts-extended = <&pinctrl_ap 11 IRQ_TYPE_LEVEL_LOW>;
+		ti,imon-slot-no = <20>;
+		ti,vmon-slot-no = <22>;
+	};
+
+	jack_codec: codec@4b {
+		compatible = "cirrus,cs42l84";
+		reg = <0x4b>;
+		reset-gpios = <&pinctrl_nub 12 GPIO_ACTIVE_HIGH>;
+		#sound-dai-cells = <0>;
+		interrupts-extended = <&pinctrl_ap 149 IRQ_TYPE_LEVEL_LOW>;
+		sound-name-prefix = "Jack";
+	};
+};
+
+&fpwm1 {
+	status = "okay";
+};
+
+&aop_mbox {
+       status = "okay";
+};
+
+&aop_dart {
+       status = "okay";
+};
+
+&aop_admac {
+       status = "okay";
+};
+
+&aop {
+	status = "okay";
+};
+
+&aop_audio {
+	apple,chassis-name = "J415";
+	apple,machine-kind = "MacBook Air";
+};
+
+/ {
+	sound {
+		compatible = "apple,j415-macaudio", "apple,macaudio";
+		model = "MacBook Air J415";
+
+		dai-link@0 {
+			link-name = "Speakers";
+
+			cpu {
+				sound-dai = <&mca 0>, <&mca 1>;
+			};
+			codec {
+				sound-dai = <&speaker_left_woof1>,
+					    <&speaker_left_tweet>,
+					    <&speaker_left_woof2>,
+					    <&speaker_right_woof1>,
+					    <&speaker_right_tweet>,
+					    <&speaker_right_woof2>;
+			};
+		};
+
+		dai-link@1 {
+			link-name = "Headphone Jack";
+
+			cpu {
+				sound-dai = <&mca 2>;
+			};
+			codec {
+				sound-dai = <&jack_codec>;
+			};
+		};
+	};
+};
+
+&mtp {
+	status = "okay";
+};
+&mtp_mbox {
+	status = "okay";
+};
+&mtp_dart {
+	status = "okay";
+};
+&mtp_dockchannel {
+	status = "okay";
+};
+&mtp_hid {
+	apple,afe-reset-gpios = <&smc_gpio 8 GPIO_ACTIVE_LOW>;
+	apple,stm-reset-gpios = <&smc_gpio 24 GPIO_ACTIVE_LOW>;
+
+	multi-touch {
+		firmware-name = "apple/tpmtfw-j415.bin";
+	};
+
+	keyboard: keyboard {
+		hid-country-code = <0>;
+		apple,keyboard-layout-id = <0>;
+	};
+
+	stm {
+	};
+
+	actuator {
+	};
+
+	tp_accel {
+	};
+};
+
+#include "isp-imx558-cfg0.dtsi"
+
+&isp {
+	apple,platform-id = <15>;
+	apple,temporal-filter = <1>;
+};
+
+#include "hwmon-laptop.dtsi"
diff --git a/arch/arm64/boot/dts/apple/t8112-j473.dts b/arch/arm64/boot/dts/apple/t8112-j473.dts
index 06fe257f08be49..0640843b378cfb 100644
--- a/arch/arm64/boot/dts/apple/t8112-j473.dts
+++ b/arch/arm64/boot/dts/apple/t8112-j473.dts
@@ -17,10 +17,75 @@
 	model = "Apple Mac mini (M2, 2023)";
 
 	aliases {
+		bluetooth0 = &bluetooth0;
+		/delete-property/ dcp;
+		dcpext = &dcpext;
 		ethernet0 = &ethernet0;
+		sio = &sio;
+		wifi0 = &wifi0;
 	};
 };
 
+&framebuffer0 {
+	power-domains = <&ps_dispext_cpu0>, <&ps_dptx_ext_phy>;
+};
+
+&dptxphy {
+	status = "okay";
+};
+
+&dcp {
+	status = "disabled";
+};
+
+&display {
+	iommus = <&dispext0_dart 0>;
+};
+&dispext0_dart {
+	status = "okay";
+};
+&dcpext_dart {
+	status = "okay";
+};
+&dcpext_mbox {
+	status = "okay";
+};
+&dcpext {
+	status = "okay";
+	apple,connector-type = "HDMI-A";
+
+	/*  HDMI HPD gpio, used as interrupt*/
+	hdmi-hpd-gpios = <&pinctrl_aop 49 GPIO_ACTIVE_HIGH>;
+
+	hdmi-pwren-gpios = <&smc_gpio 21 GPIO_ACTIVE_HIGH>;
+	dp2hdmi-pwren-gpios = <&smc_gpio 22 GPIO_ACTIVE_HIGH>;
+
+	phys = <&dptxphy>;
+	phy-names = "dp-phy";
+	apple,dptx-phy = <5>;
+};
+
+/* remove once m1n1 enables sio nodes after setup */
+&sio {
+        status = "okay";
+};
+
+&dpaudio1 {
+	status = "okay";
+};
+
+/*
+ * Provide labels for the USB type C ports.
+ */
+
+&typec0 {
+	label = "USB-C Back-left";
+};
+
+&typec1 {
+	label = "USB-C Back-right";
+};
+
 /*
  * Force the bus number assignments so that we can declare some of the
  * on-board devices and properties that are populated by the bootloader
@@ -28,10 +93,28 @@
  */
 &port00 {
 	bus-range = <1 1>;
+	pwren-gpios = <&smc_gpio 13 GPIO_ACTIVE_HIGH>;
+	wifi0: wifi@0,0 {
+		compatible = "pci14e4,4434";
+		reg = <0x10000 0x0 0x0 0x0 0x0>;
+		/* To be filled by the loader */
+		local-mac-address = [00 10 18 00 00 10];
+		apple,antenna-sku = "XX";
+		brcm,board-type = "apple,miyake";
+	};
+
+	bluetooth0: bluetooth@0,1 {
+		compatible = "pci14e4,5f72";
+		reg = <0x10100 0x0 0x0 0x0 0x0>;
+		/* To be filled by the loader */
+		local-bd-address = [00 00 00 00 00 00];
+		brcm,board-type = "apple,miyake";
+	};
 };
 
 &port01 {
 	bus-range = <2 2>;
+	pwren-gpios = <&smc_gpio 24 GPIO_ACTIVE_HIGH>;
 	status = "okay";
 };
 
@@ -52,3 +135,63 @@
 &pcie2_dart {
 	status = "okay";
 };
+
+&i2c1 {
+	speaker_amp: codec@38 {
+		compatible = "ti,sn012776", "ti,tas2764";
+		reg = <0x38>;
+		shutdown-gpios = <&pinctrl_ap 88 GPIO_ACTIVE_HIGH>;
+		#sound-dai-cells = <0>;
+		interrupt-parent = <&pinctrl_ap>;
+		interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
+		ti,imon-slot-no = <0>;
+		ti,vmon-slot-no = <2>;
+	};
+
+	jack_codec: codec@4b {
+		compatible = "cirrus,cs42l84";
+		reg = <0x4b>;
+		reset-gpios = <&pinctrl_nub 12 GPIO_ACTIVE_HIGH>;
+		interrupt-parent = <&pinctrl_ap>;
+		interrupts = <149 IRQ_TYPE_LEVEL_LOW>;
+		#sound-dai-cells = <0>;
+		cirrus,ts-inv = <1>;
+		sound-name-prefix = "Jack";
+	};
+};
+
+/ {
+	sound {
+		compatible = "apple,j473-macaudio", "apple,macaudio";
+		model = "Mac mini J473";
+
+		dai-link@0 {
+			link-name = "Speaker";
+
+			cpu {
+				sound-dai = <&mca 0>;
+			};
+			codec {
+				sound-dai = <&speaker_amp>;
+			};
+		};
+
+		dai-link@1 {
+			link-name = "Headphone Jack";
+
+			cpu {
+				sound-dai = <&mca 2>;
+			};
+			codec {
+				sound-dai = <&jack_codec>;
+			};
+		};
+
+	};
+};
+
+&gpu {
+	apple,perf-base-pstate = <3>;
+};
+
+#include "hwmon-mini.dtsi"
diff --git a/arch/arm64/boot/dts/apple/t8112-j493.dts b/arch/arm64/boot/dts/apple/t8112-j493.dts
index 0ad908349f5540..ecfa4b1a4483fe 100644
--- a/arch/arm64/boot/dts/apple/t8112-j493.dts
+++ b/arch/arm64/boot/dts/apple/t8112-j493.dts
@@ -17,9 +17,16 @@
 	compatible = "apple,j493", "apple,t8112", "apple,arm-platform";
 	model = "Apple MacBook Pro (13-inch, M2, 2022)";
 
+	/*
+	 * All of those are used by the bootloader to pass calibration
+	 * blobs and other device-specific properties
+	 */
 	aliases {
 		bluetooth0 = &bluetooth0;
+		touchbar0 = &touchbar0;
 		wifi0 = &wifi0;
+		keyboard = &keyboard;
+		touchbar0 = &touchbar0;
 	};
 
 	led-controller {
@@ -35,6 +42,60 @@
 	};
 };
 
+&dcp {
+	panel: panel {
+		compatible = "apple,panel-j493", "apple,panel";
+		width-mm = <286>;
+		height-mm = <179>;
+		apple,max-brightness = <525>;
+	};
+};
+
+&framebuffer0 {
+	panel-dimensions = <&panel>;
+};
+
+/*
+ * The driver depends on boot loader initialized state which resets when this
+ * power-domain is powered off. This happens on suspend or when the driver is
+ * missing during boot. Mark the domain as always on until the driver can
+ * handle this.
+ */
+&ps_dispdfr_be {
+	apple,always-on;
+};
+
+&display_dfr {
+	status = "okay";
+};
+
+&dfr_mipi_out {
+	dfr_mipi_out_panel: endpoint@0 {
+		reg = <0>;
+		remote-endpoint = <&dfr_panel_in>;
+	};
+};
+
+&displaydfr_mipi {
+	status = "okay";
+
+	dfr_panel: panel@0 {
+		compatible = "apple,j493-summit", "apple,summit";
+		reg = <0>;
+		max-brightness = <255>;
+
+		port {
+			dfr_panel_in: endpoint {
+				remote-endpoint = <&dfr_mipi_out_panel>;
+			};
+		};
+	};
+};
+
+&displaydfr_dart {
+	status = "okay";
+};
+
 /*
  * Force the bus number assignments so that we can declare some of the
  * on-board devices and properties that are populated by the bootloader
@@ -42,6 +103,7 @@
  */
 &port00 {
 	bus-range = <1 1>;
+	pwren-gpios = <&smc_gpio 13 GPIO_ACTIVE_HIGH>;
 	wifi0: wifi@0,0 {
 		compatible = "pci14e4,4425";
 		reg = <0x10000 0x0 0x0 0x0 0x0>;
@@ -60,6 +122,88 @@
 	};
 };
 
+/*
+ * Provide labels for the USB type C ports.
+ */
+
+&typec0 {
+	label = "USB-C Left-back";
+};
+
+&typec1 {
+	label = "USB-C Left-front";
+};
+
+/* Virtual regulator representing the shared shutdown GPIO */
+/ {
+	speaker_sdz: fixed-regulator-sn012776-sdz {
+		compatible = "regulator-fixed";
+		regulator-name = "sn012776-sdz";
+		startup-delay-us = <5000>;
+		gpios = <&pinctrl_ap 88 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+	};
+};
+
+&i2c1 {
+	speaker_left_rear: codec@38 {
+		compatible = "ti,sn012776", "ti,tas2764";
+		reg = <0x38>;
+		SDZ-supply = <&speaker_sdz>;
+		#sound-dai-cells = <0>;
+		sound-name-prefix = "Left Rear";
+		interrupts-extended = <&pinctrl_ap 11 IRQ_TYPE_LEVEL_LOW>;
+		ti,imon-slot-no = <8>;
+		ti,vmon-slot-no = <10>;
+	};
+
+	speaker_left_front: codec@39 {
+		compatible = "ti,sn012776", "ti,tas2764";
+		reg = <0x39>;
+		SDZ-supply = <&speaker_sdz>;
+		#sound-dai-cells = <0>;
+		sound-name-prefix = "Left Front";
+		interrupts-extended = <&pinctrl_ap 11 IRQ_TYPE_LEVEL_LOW>;
+		ti,imon-slot-no = <0>;
+		ti,vmon-slot-no = <2>;
+		ti,sdout-force-zero-mask = <0xf0f0>;
+	};
+};
+
+&i2c3 {
+	speaker_right_rear: codec@3b {
+		compatible = "ti,sn012776", "ti,tas2764";
+		reg = <0x3b>;
+		SDZ-supply = <&speaker_sdz>;
+		#sound-dai-cells = <0>;
+		sound-name-prefix = "Right Rear";
+		interrupts-extended = <&pinctrl_ap 11 IRQ_TYPE_LEVEL_LOW>;
+		ti,imon-slot-no = <12>;
+		ti,vmon-slot-no = <14>;
+	};
+
+	speaker_right_front: codec@3c {
+		compatible = "ti,sn012776", "ti,tas2764";
+		reg = <0x3c>;
+		SDZ-supply = <&speaker_sdz>;
+		#sound-dai-cells = <0>;
+		sound-name-prefix = "Right Front";
+		interrupts-extended = <&pinctrl_ap 11 IRQ_TYPE_LEVEL_LOW>;
+		ti,imon-slot-no = <4>;
+		ti,vmon-slot-no = <6>;
+		ti,sdout-force-zero-mask = <0x0f0f>;
+	};
+
+	jack_codec: codec@4b {
+		compatible = "cirrus,cs42l84";
+		reg = <0x4b>;
+		reset-gpios = <&pinctrl_nub 12 GPIO_ACTIVE_HIGH>;
+		#sound-dai-cells = <0>;
+		interrupts-extended = <&pinctrl_ap 149 IRQ_TYPE_LEVEL_LOW>;
+		sound-name-prefix = "Jack";
+	};
+};
+
 &i2c4 {
 	status = "okay";
 };
@@ -67,3 +211,135 @@
 &fpwm1 {
 	status = "okay";
 };
+
+&spi3 {
+	status = "okay";
+
+	touchbar0: touchbar@0 {
+		compatible = "apple,j493-touchbar";
+		reg = <0>;
+		spi-max-frequency = <8000000>;
+		spi-cs-setup-delay-ns = <2000>;
+		spi-cs-hold-delay-ns = <2000>;
+		reset-gpios = <&pinctrl_ap 170 GPIO_ACTIVE_LOW>;
+		interrupts-extended = <&pinctrl_ap 174 IRQ_TYPE_EDGE_FALLING>;
+		firmware-name = "apple/dfrmtfw-j493.bin";
+		touchscreen-size-x = <23045>;
+		touchscreen-size-y = <640>;
+		touchscreen-inverted-y;
+	};
+};
+
+&aop_mbox {
+       status = "okay";
+};
+
+&aop_dart {
+       status = "okay";
+};
+
+&aop_admac {
+       status = "okay";
+};
+
+&aop {
+	status = "okay";
+};
+
+&aop_audio {
+	apple,chassis-name = "J493";
+	apple,machine-kind = "MacBook Pro";
+};
+
+/ {
+	sound {
+		compatible = "apple,j493-macaudio", "apple,macaudio";
+		model = "MacBook Pro J493";
+
+		dai-link@0 {
+			link-name = "Speakers";
+
+			cpu {
+				sound-dai = <&mca 0>, <&mca 1>;
+			};
+			codec {
+				sound-dai = <&speaker_left_front>, <&speaker_left_rear>,
+					    <&speaker_right_front>, <&speaker_right_rear>;
+			};
+		};
+
+		dai-link@1 {
+			link-name = "Headphone Jack";
+
+			cpu {
+				sound-dai = <&mca 2>;
+			};
+			codec {
+				sound-dai = <&jack_codec>;
+			};
+		};
+	};
+};
+
+&spi3 {
+	status = "okay";
+
+	touchbar0: touchbar@0 {
+		compatible = "apple,j493-touchbar", "apple,z2-touchbar", "apple,z2-multitouch";
+		reg = <0>;
+		label = "Mac14,7 Touch Bar";
+		spi-max-frequency = <8000000>;
+		spi-cs-setup-delay-ns = <2000>;
+		spi-cs-hold-delay-ns = <2000>;
+
+		reset-gpios = <&pinctrl_ap 170 GPIO_ACTIVE_LOW>;
+		interrupts-extended = <&pinctrl_ap 174 IRQ_TYPE_EDGE_FALLING>;
+		firmware-name = "apple/dfrmtfw-j493.bin";
+		touchscreen-size-x = <23045>;
+		touchscreen-size-y = <640>;
+       };
+};
+
+&mtp {
+	status = "okay";
+};
+&mtp_mbox {
+	status = "okay";
+};
+&mtp_dart {
+	status = "okay";
+};
+&mtp_dockchannel {
+	status = "okay";
+};
+&mtp_hid {
+	apple,afe-reset-gpios = <&smc_gpio 8 GPIO_ACTIVE_LOW>;
+	apple,stm-reset-gpios = <&smc_gpio 24 GPIO_ACTIVE_LOW>;
+
+	multi-touch {
+		firmware-name = "apple/tpmtfw-j493.bin";
+	};
+
+	keyboard: keyboard {
+		hid-country-code = <0>;
+		apple,keyboard-layout-id = <0>;
+	};
+
+	stm {
+	};
+
+	actuator {
+	};
+
+	tp_accel {
+	};
+};
+
+#include "isp-imx248.dtsi"
+
+&isp {
+	apple,platform-id = <6>;
+};
+
+#include "hwmon-fan.dtsi"
+#include "hwmon-laptop.dtsi"
diff --git a/arch/arm64/boot/dts/apple/t8112-jxxx.dtsi b/arch/arm64/boot/dts/apple/t8112-jxxx.dtsi
index f5edf61113e7aa..5e0742c1fb4450 100644
--- a/arch/arm64/boot/dts/apple/t8112-jxxx.dtsi
+++ b/arch/arm64/boot/dts/apple/t8112-jxxx.dtsi
@@ -11,6 +11,12 @@
 
 / {
 	aliases {
+		atcphy0 = &atcphy0;
+		atcphy1 = &atcphy1;
+		dcp = &dcp;
+		disp0 = &display;
+		disp0_piodma = &disp0_piodma;
+		nvram = &nvram;
 		serial0 = &serial0;
 		serial2 = &serial2;
 	};
@@ -25,11 +31,19 @@
 		framebuffer0: framebuffer@0 {
 			compatible = "apple,simple-framebuffer", "simple-framebuffer";
 			reg = <0 0 0 0>; /* To be filled by loader */
+			power-domains = <&ps_disp0_cpu0>;
 			/* Format properties will be added by loader */
 			status = "disabled";
 		};
 	};
 
+	reserved-memory {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+		/* To be filled by loader */
+	};
+
 	memory@800000000 {
 		device_type = "memory";
 		reg = <0x8 0 0x2 0>; /* To be filled by loader */
@@ -53,6 +67,29 @@
 		interrupt-parent = <&pinctrl_ap>;
 		interrupts = <8 IRQ_TYPE_LEVEL_LOW>;
 		interrupt-names = "irq";
+
+		typec0: connector {
+			compatible = "usb-c-connector";
+			power-role = "dual";
+			data-role = "dual";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				port@0 {
+					reg = <0>;
+					typec0_con_hs: endpoint {
+						remote-endpoint = <&typec0_usb_hs>;
+					};
+				};
+				port@1 {
+					reg = <1>;
+					typec0_con_ss: endpoint {
+						remote-endpoint = <&typec0_usb_ss>;
+					};
+				};
+			};
+		};
 	};
 
 	hpm1: usb-pd@3f {
@@ -61,6 +98,63 @@
 		interrupt-parent = <&pinctrl_ap>;
 		interrupts = <8 IRQ_TYPE_LEVEL_LOW>;
 		interrupt-names = "irq";
+
+		typec1: connector {
+			compatible = "usb-c-connector";
+			power-role = "dual";
+			data-role = "dual";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				port@0 {
+					reg = <0>;
+					typec1_con_hs: endpoint {
+						remote-endpoint = <&typec1_usb_hs>;
+					};
+				};
+				port@1 {
+					reg = <1>;
+					typec1_con_ss: endpoint {
+						remote-endpoint = <&typec1_usb_ss>;
+					};
+				};
+			};
+		};
+	};
+};
+
+/* USB controllers */
+&dwc3_0 {
+	port {
+		typec0_usb_hs: endpoint {
+			remote-endpoint = <&typec0_con_hs>;
+		};
+	};
+};
+
+&dwc3_1 {
+	port {
+		typec1_usb_hs: endpoint {
+			remote-endpoint = <&typec1_con_hs>;
+		};
+	};
+};
+
+/* Type-C PHYs */
+&atcphy0 {
+	port {
+		typec0_usb_ss: endpoint {
+			remote-endpoint = <&typec0_con_ss>;
+		};
+	};
+};
+
+&atcphy1 {
+	port {
+		typec1_usb_ss: endpoint {
+			remote-endpoint = <&typec1_con_ss>;
+		};
 	};
 };
 
@@ -79,3 +173,7 @@
 &nco_clkref {
 	clock-frequency = <900000000>;
 };
+
+#include "hwmon-common.dtsi"
+
+#include "spi1-nvram.dtsi"
diff --git a/arch/arm64/boot/dts/apple/t8112-pmgr.dtsi b/arch/arm64/boot/dts/apple/t8112-pmgr.dtsi
index 7c050c6f2707a1..ab8ec9bd4e4401 100644
--- a/arch/arm64/boot/dts/apple/t8112-pmgr.dtsi
+++ b/arch/arm64/boot/dts/apple/t8112-pmgr.dtsi
@@ -176,7 +176,7 @@
 		#power-domain-cells = <0>;
 		#reset-cells = <0>;
 		label = "sio_cpu";
-		power-domains = <&ps_sio>;
+		power-domains = <&ps_sio &ps_uart_p &ps_spi_p &ps_dpa0>;
 	};
 
 	ps_fpwm0: power-controller@1c8 {
@@ -465,6 +465,7 @@
 		#reset-cells = <0>;
 		label = "mca0";
 		power-domains = <&ps_sio_adma>, <&ps_audio_p>;
+		apple,externally-clocked;
 	};
 
 	ps_mca1: power-controller@2c8 {
@@ -474,6 +475,7 @@
 		#reset-cells = <0>;
 		label = "mca1";
 		power-domains = <&ps_sio_adma>, <&ps_audio_p>;
+		apple,externally-clocked;
 	};
 
 	ps_mca2: power-controller@2d0 {
@@ -483,6 +485,7 @@
 		#reset-cells = <0>;
 		label = "mca2";
 		power-domains = <&ps_sio_adma>, <&ps_audio_p>;
+		apple,externally-clocked;
 	};
 
 	ps_mca3: power-controller@2d8 {
@@ -492,6 +495,7 @@
 		#reset-cells = <0>;
 		label = "mca3";
 		power-domains = <&ps_sio_adma>, <&ps_audio_p>;
+		apple,externally-clocked;
 	};
 
 	ps_mca4: power-controller@2e0 {
@@ -501,6 +505,7 @@
 		#reset-cells = <0>;
 		label = "mca4";
 		power-domains = <&ps_sio_adma>, <&ps_audio_p>;
+		apple,externally-clocked;
 	};
 
 	ps_mca5: power-controller@2e8 {
@@ -510,6 +515,7 @@
 		#reset-cells = <0>;
 		label = "mca5";
 		power-domains = <&ps_sio_adma>, <&ps_audio_p>;
+		apple,externally-clocked;
 	};
 
 	ps_mcc: power-controller@2f0 {
@@ -663,7 +669,6 @@
 		#reset-cells = <0>;
 		label = "disp0_sys";
 		power-domains = <&ps_rmx1>;
-		apple,always-on; /* TODO: figure out if we can enable PM here */
 	};
 
 	ps_disp0_fe: power-controller@378 {
@@ -672,8 +677,7 @@
 		#power-domain-cells = <0>;
 		#reset-cells = <0>;
 		label = "disp0_fe";
-		power-domains = <&ps_disp0_sys>;
-		apple,always-on; /* TODO: figure out if we can enable PM here */
+		power-domains = <&ps_disp0_sys>, <&ps_pmp>;
 	};
 
 	ps_dispext_sys: power-controller@380 {
@@ -691,7 +695,7 @@
 		#power-domain-cells = <0>;
 		#reset-cells = <0>;
 		label = "dispext_fe";
-		power-domains = <&ps_dispext_sys>;
+		power-domains = <&ps_dispext_sys>, <&ps_pmp>;
 	};
 
 	ps_dispext_cpu0: power-controller@3c8 {
@@ -773,7 +777,6 @@
 		#power-domain-cells = <0>;
 		#reset-cells = <0>;
 		label = "pmp";
-		apple,always-on;
 	};
 
 	ps_pms_sram: power-controller@418 {
@@ -818,6 +821,7 @@
 		#reset-cells = <0>;
 		label = "isp_sys";
 		power-domains = <&ps_rmx1>;
+		status = "disabled";
 	};
 
 	ps_venc_sys: power-controller@440 {
@@ -964,6 +968,123 @@
 		apple,always-on;
 	};
 
+	/* There is a dependency tree involved with these PDs,
+	 * but we do not express it here since the ISP driver
+	 * is supposed to sequence them in the right order anyway
+	 * (and we do not know the exact tree structure).
+	 *
+	 * This also works around spurious parent PD activation
+	 * on machines with ISP disabled (desktops).
+	 */
+	ps_isp_set0: power-controller@4000 {
+		compatible = "apple,t8103-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4000 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_set0";
+		apple,force-disable;
+	};
+
+	ps_isp_set1: power-controller@4008 {
+		compatible = "apple,t8103-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4008 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_set1";
+		apple,force-disable;
+		apple,force-reset;
+	};
+
+	ps_isp_set2: power-controller@4010 {
+		compatible = "apple,t8103-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4010 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_set2";
+		apple,force-disable;
+		apple,force-reset;
+	};
+
+	ps_isp_fe: power-controller@4018 {
+		compatible = "apple,t8103-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4018 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_fe";
+	};
+
+	ps_isp_set4: power-controller@4020 {
+		compatible = "apple,t8103-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4020 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_set4";
+	};
+
+	ps_isp_set5: power-controller@4028 {
+		compatible = "apple,t8103-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4028 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_set5";
+	};
+
+	ps_isp_set6: power-controller@4030 {
+		compatible = "apple,t8103-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4030 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_set6";
+	};
+
+	ps_isp_set7: power-controller@4038 {
+		compatible = "apple,t8103-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4038 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_set7";
+	};
+
+	ps_isp_set8: power-controller@4040 {
+		compatible = "apple,t8103-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4040 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_set8";
+	};
+
+	ps_isp_set9: power-controller@4048 {
+		compatible = "apple,t8103-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4048 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_set9";
+	};
+
+	ps_isp_set12: power-controller@4050 {
+		compatible = "apple,t8103-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4050 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_set10";
+	};
+
+	ps_isp_set10: power-controller@4058 {
+		compatible = "apple,t8103-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4058 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_set11";
+	};
+
+	ps_isp_set11: power-controller@4060 {
+		compatible = "apple,t8103-pmgr-pwrstate", "apple,pmgr-pwrstate";
+		reg = <0x4060 4>;
+		#power-domain-cells = <0>;
+		#reset-cells = <0>;
+		label = "isp_set12";
+	};
+
 	ps_venc_dma: power-controller@8000 {
 		compatible = "apple,t8112-pmgr-pwrstate", "apple,pmgr-pwrstate";
 		reg = <0x8000 4>;
@@ -1064,6 +1185,7 @@
 		#power-domain-cells = <0>;
 		#reset-cells = <0>;
 		label = "msg";
+		apple,always-on; /* Core AON device? */
 	};
 
 	ps_nub_gpio: power-controller@80 {
diff --git a/arch/arm64/boot/dts/apple/t8112.dtsi b/arch/arm64/boot/dts/apple/t8112.dtsi
index 1666e6ab250bc0..9e03c1c6f45ecf 100644
--- a/arch/arm64/boot/dts/apple/t8112.dtsi
+++ b/arch/arm64/boot/dts/apple/t8112.dtsi
@@ -11,6 +11,7 @@
 #include <dt-bindings/interrupt-controller/apple-aic.h>
 #include <dt-bindings/interrupt-controller/irq.h>
 #include <dt-bindings/pinctrl/apple.h>
+#include <dt-bindings/phy/phy.h>
 #include <dt-bindings/spmi/spmi.h>
 
 / {
@@ -19,6 +20,10 @@
 	#address-cells = <2>;
 	#size-cells = <2>;
 
+	aliases {
+		gpu = &gpu;
+	};
+
 	cpus {
 		#address-cells = <2>;
 		#size-cells = <0>;
@@ -190,36 +195,43 @@
 			opp-hz = /bits/ 64 <600000000>;
 			opp-level = <1>;
 			clock-latency-ns = <7500>;
+			opp-microwatt = <26000>;
 		};
 		opp02 {
 			opp-hz = /bits/ 64 <912000000>;
 			opp-level = <2>;
 			clock-latency-ns = <20000>;
+			opp-microwatt = <56000>;
 		};
 		opp03 {
 			opp-hz = /bits/ 64 <1284000000>;
 			opp-level = <3>;
 			clock-latency-ns = <22000>;
+			opp-microwatt = <88000>;
 		};
 		opp04 {
 			opp-hz = /bits/ 64 <1752000000>;
 			opp-level = <4>;
 			clock-latency-ns = <30000>;
+			opp-microwatt = <155000>;
 		};
 		opp05 {
 			opp-hz = /bits/ 64 <2004000000>;
 			opp-level = <5>;
 			clock-latency-ns = <35000>;
+			opp-microwatt = <231000>;
 		};
 		opp06 {
 			opp-hz = /bits/ 64 <2256000000>;
 			opp-level = <6>;
 			clock-latency-ns = <39000>;
+			opp-microwatt = <254000>;
 		};
 		opp07 {
 			opp-hz = /bits/ 64 <2424000000>;
 			opp-level = <7>;
 			clock-latency-ns = <53000>;
+			opp-microwatt = <351000>;
 		};
 	};
 
@@ -231,93 +243,161 @@
 			opp-hz = /bits/ 64 <660000000>;
 			opp-level = <1>;
 			clock-latency-ns = <9000>;
+			opp-microwatt = <133000>;
 		};
 		opp02 {
 			opp-hz = /bits/ 64 <924000000>;
 			opp-level = <2>;
 			clock-latency-ns = <19000>;
+			opp-microwatt = <212000>;
 		};
 		opp03 {
 			opp-hz = /bits/ 64 <1188000000>;
 			opp-level = <3>;
 			clock-latency-ns = <22000>;
+			opp-microwatt = <261000>;
 		};
 		opp04 {
 			opp-hz = /bits/ 64 <1452000000>;
 			opp-level = <4>;
 			clock-latency-ns = <24000>;
+			opp-microwatt = <345000>;
 		};
 		opp05 {
 			opp-hz = /bits/ 64 <1704000000>;
 			opp-level = <5>;
 			clock-latency-ns = <26000>;
+			opp-microwatt = <441000>;
 		};
 		opp06 {
 			opp-hz = /bits/ 64 <1968000000>;
 			opp-level = <6>;
 			clock-latency-ns = <28000>;
+			opp-microwatt = <619000>;
 		};
 		opp07 {
 			opp-hz = /bits/ 64 <2208000000>;
 			opp-level = <7>;
 			clock-latency-ns = <30000>;
+			opp-microwatt = <740000>;
 		};
 		opp08 {
 			opp-hz = /bits/ 64 <2400000000>;
 			opp-level = <8>;
 			clock-latency-ns = <33000>;
+			opp-microwatt = <855000>;
 		};
 		opp09 {
 			opp-hz = /bits/ 64 <2568000000>;
 			opp-level = <9>;
 			clock-latency-ns = <34000>;
+			opp-microwatt = <1006000>;
 		};
 		opp10 {
 			opp-hz = /bits/ 64 <2724000000>;
 			opp-level = <10>;
 			clock-latency-ns = <36000>;
+			opp-microwatt = <1217000>;
 		};
 		opp11 {
 			opp-hz = /bits/ 64 <2868000000>;
 			opp-level = <11>;
 			clock-latency-ns = <41000>;
+			opp-microwatt = <1534000>;
 		};
 		opp12 {
 			opp-hz = /bits/ 64 <2988000000>;
 			opp-level = <12>;
 			clock-latency-ns = <42000>;
+			opp-microwatt = <1714000>;
 		};
 		opp13 {
 			opp-hz = /bits/ 64 <3096000000>;
 			opp-level = <13>;
 			clock-latency-ns = <44000>;
+			opp-microwatt = <1877000>;
 		};
 		opp14 {
 			opp-hz = /bits/ 64 <3204000000>;
 			opp-level = <14>;
 			clock-latency-ns = <46000>;
+			opp-microwatt = <2159000>;
 		};
-		/* Not available until CPU deep sleep is implemented */
-#if 0
 		opp15 {
 			opp-hz = /bits/ 64 <3324000000>;
 			opp-level = <15>;
 			clock-latency-ns = <62000>;
+			opp-microwatt = <2393000>;
 			turbo-mode;
 		};
 		opp16 {
 			opp-hz = /bits/ 64 <3408000000>;
 			opp-level = <16>;
 			clock-latency-ns = <62000>;
+			opp-microwatt = <2497000>;
 			turbo-mode;
 		};
 		opp17 {
 			opp-hz = /bits/ 64 <3504000000>;
 			opp-level = <17>;
 			clock-latency-ns = <62000>;
+			opp-microwatt = <2648000>;
 			turbo-mode;
 		};
-#endif
+	};
+
+	gpu_opp: opp-table-gpu {
+		compatible = "operating-points-v2";
+
+		/*
+		 * NOTE: The voltage and power values are device-specific and
+		 * must be filled in by the bootloader.
+		 */
+		opp00 {
+			opp-hz = /bits/ 64 <0>;
+			opp-microvolt = <400000>;
+			opp-microwatt = <0>;
+		};
+		opp01 {
+			opp-hz = /bits/ 64 <444000000>;
+			opp-microvolt = <603000>;
+			opp-microwatt = <4295000>;
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <612000000>;
+			opp-microvolt = <675000>;
+			opp-microwatt = <6251000>;
+		};
+		opp03 {
+			opp-hz = /bits/ 64 <808000000>;
+			opp-microvolt = <710000>;
+			opp-microwatt = <8625000>;
+		};
+		opp04 {
+			opp-hz = /bits/ 64 <968000000>;
+			opp-microvolt = <775000>;
+			opp-microwatt = <11948000>;
+		};
+		opp05 {
+			opp-hz = /bits/ 64 <1110000000>;
+			opp-microvolt = <820000>;
+			opp-microwatt = <15071000>;
+		};
+		opp06 {
+			opp-hz = /bits/ 64 <1236000000>;
+			opp-microvolt = <875000>;
+			opp-microwatt = <18891000>;
+		};
+		opp07 {
+			opp-hz = /bits/ 64 <1338000000>;
+			opp-microvolt = <915000>;
+			opp-microwatt = <21960000>;
+		};
+		opp08 {
+			opp-hz = /bits/ 64 <1398000000>;
+			opp-microvolt = <950000>;
+			opp-microwatt = <22800000>;
+		};
 	};
 
 	timer {
@@ -349,6 +429,13 @@
 		clock-output-names = "clkref";
 	};
 
+	clk_200m: clock-200m {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <200000000>;
+		clock-output-names = "clk_200m";
+	};
+
 	/*
 	 * This is a fabulated representation of the input clock
 	 * to NCO since we don't know the true clock tree.
@@ -359,6 +446,40 @@
 		clock-output-names = "nco_ref";
 	};
 
+	/* Pixel clock? frequency in Hz (compare: 4K@60 VGA clock 533.250 MHz) */
+	clk_disp0: clock-disp0 {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <533333328>;
+		clock-output-names = "clk_disp0";
+	};
+
+	/* Pixel clock? frequency in Hz (compare: 4K@60 VGA clock 533.250 MHz) */
+	clk_dispext0: clock-dispext0 {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <0>;
+		clock-output-names = "clk_dispext0";
+	};
+
+	reserved-memory {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		uat_handoff: uat-handoff {
+			reg = <0x0 0 0 0>;
+		};
+
+		uat_pagetables: uat-pagetables {
+			reg = <0x0 0 0 0>;
+		};
+
+		uat_ttbs: uat-ttbs {
+			reg = <0x0 0 0 0>;
+		};
+	};
+
 	soc {
 		compatible = "simple-bus";
 		#address-cells = <2>;
@@ -366,6 +487,70 @@
 
 		ranges;
 		nonposted-mmio;
+		/* Required to get >32-bit DMA via DARTs */
+		dma-ranges = <0 0 0 0 0xffffffff 0xffffc000>;
+
+		gpu: gpu@206400000 {
+			compatible = "apple,agx-t8112", "apple,agx-g14g";
+			reg = <0x2 0x6400000 0 0x40000>,
+				<0x2 0x4000000 0 0x1000000>;
+			reg-names = "asc", "sgx";
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 697 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 698 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 699 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 700 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 713 IRQ_TYPE_LEVEL_HIGH>;
+			mboxes = <&agx_mbox>;
+			power-domains = <&ps_gfx>;
+			memory-region = <&uat_ttbs>, <&uat_pagetables>, <&uat_handoff>;
+			memory-region-names = "ttbs", "pagetables", "handoff";
+
+			apple,firmware-version = <12 4 0>;
+			apple,firmware-compat = <12 4 0>;
+
+			operating-points-v2 = <&gpu_opp>;
+			apple,perf-base-pstate = <1>;
+			apple,min-sram-microvolt = <780000>;
+			apple,avg-power-filter-tc-ms = <300>;
+			apple,avg-power-ki-only = <9.375>;
+			apple,avg-power-kp = <3.22>;
+			apple,avg-power-min-duty-cycle = <40>;
+			apple,avg-power-target-filter-tc = <1>;
+			apple,fast-die0-integral-gain = <200.0>;
+			apple,fast-die0-proportional-gain = <5.0>;
+			apple,perf-boost-ce-step = <50>;
+			apple,perf-boost-min-util = <90>;
+			apple,perf-filter-drop-threshold = <0>;
+			apple,perf-filter-time-constant = <5>;
+			apple,perf-filter-time-constant2 = <200>;
+			apple,perf-integral-gain = <5.94>;
+			apple,perf-integral-gain2 = <5.94>;
+			apple,perf-integral-min-clamp = <0>;
+			apple,perf-proportional-gain = <14.85>;
+			apple,perf-proportional-gain2 = <14.85>;
+			apple,perf-tgt-utilization = <85>;
+			apple,power-sample-period = <8>;
+			apple,ppm-filter-time-constant-ms = <34>;
+			apple,ppm-ki = <205.0>;
+			apple,ppm-kp = <0.75>;
+			apple,pwr-min-duty-cycle = <40>;
+			apple,core-leak-coef = <1920.0>;
+			apple,sram-leak-coef = <74.0>;
+		};
+
+		agx_mbox: mbox@206408000 {
+			compatible = "apple,t8103-asc-mailbox", "apple,asc-mailbox-v4";
+			reg = <0x2 0x6408000 0x0 0x4000>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 709 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 710 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 711 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 712 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "send-empty", "send-not-empty",
+				"recv-empty", "recv-not-empty";
+			#mbox-cells = <0>;
+		};
 
 		cpufreq_e: cpufreq@210e20000 {
 			compatible = "apple,t8112-cluster-cpufreq", "apple,cluster-cpufreq";
@@ -379,6 +564,201 @@
 			#performance-domain-cells = <0>;
 		};
 
+		display_dfr: display-pipe@228200000 {
+			compatible = "apple,t8112-display-pipe", "apple,h7-display-pipe";
+			reg = <0x2 0x28200000 0x0 0xc000>,
+			      <0x2 0x28400000 0x0 0x4000>;
+			reg-names = "be", "fe";
+			power-domains = <&ps_dispdfr_fe>, <&ps_dispdfr_be>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 614 IRQ_TYPE_LEVEL_HIGH>,
+				     <AIC_IRQ 618 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "be", "fe";
+			iommus = <&displaydfr_dart 0>;
+			status = "disabled";
+
+			port {
+				dfr_adp_out_mipi: endpoint {
+					remote-endpoint = <&dfr_mipi_in_adp>;
+				};
+			};
+		};
+
+		displaydfr_dart: iommu@228304000 {
+			compatible = "apple,t8110-dart";
+			reg = <0x2 0x28304000 0x0 0x4000>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 616 IRQ_TYPE_LEVEL_HIGH>;
+			#iommu-cells = <1>;
+			power-domains = <&ps_dispdfr_fe>;
+			status = "disabled";
+		};
+
+		displaydfr_mipi: dsi@228600000 {
+			compatible = "apple,t8112-display-pipe-mipi", "apple,h7-display-pipe-mipi";
+			reg = <0x2 0x28600000 0x0 0x100000>;
+			power-domains = <&ps_mipi_dsi>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				dfr_mipi_in: port@0 {
+					reg = <0>;
+					#address-cells = <1>;
+					#size-cells = <0>;
+
+					dfr_mipi_in_adp: endpoint@0 {
+						reg = <0>;
+						remote-endpoint = <&dfr_adp_out_mipi>;
+					};
+				};
+
+				dfr_mipi_out: port@1 {
+					reg = <1>;
+					#address-cells = <1>;
+					#size-cells = <0>;
+				};
+			};
+		};
+
+		isp_dart0: iommu@22c4a8000 {
+			compatible = "apple,t8110-dart";
+			reg = <0x2 0x2c4a8000 0x0 0x4000>;
+			#iommu-cells = <1>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 274 IRQ_TYPE_LEVEL_HIGH>;
+			power-domains = <&ps_isp_sys>;
+			status = "disabled";
+		};
+
+		isp_dart1: iommu@22c4b4000 {
+			compatible = "apple,t8110-dart";
+			reg = <0x2 0x2c4b4000 0x0 0x4000>;
+			#iommu-cells = <1>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 274 IRQ_TYPE_LEVEL_HIGH>;
+			power-domains = <&ps_isp_sys>;
+			status = "disabled";
+		};
+
+		isp_dart2: iommu@22c4bc000 {
+			compatible = "apple,t8110-dart";
+			reg = <0x2 0x2c4bc000 0x0 0x4000>;
+			#iommu-cells = <1>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 274 IRQ_TYPE_LEVEL_HIGH>;
+			power-domains = <&ps_isp_sys>;
+			status = "disabled";
+		};
+
+		isp: isp@22a000000 {
+			compatible = "apple,t8112-isp", "apple,isp";
+			iommus = <&isp_dart0 0>, <&isp_dart1 0>, <&isp_dart2 0>;
+			reg-names = "coproc", "mbox", "gpio", "mbox2";
+			reg = <0x2 0x2a000000 0x0 0x2000000>,
+				<0x2 0x2c4c4000 0x0 0x100>,
+				<0x2 0x2c4c41b0 0x0 0x100>,
+				<0x2 0x2c4c4430 0x0 0x100>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 269 IRQ_TYPE_LEVEL_HIGH>;
+			power-domains = <&ps_isp_sys>, <&ps_isp_set0>,
+				<&ps_isp_set1>, <&ps_isp_set2>, <&ps_isp_fe>,
+				<&ps_isp_set4>, <&ps_isp_set5>, <&ps_isp_set6>,
+				<&ps_isp_set7>, <&ps_isp_set8>, <&ps_isp_set9>,
+				<&ps_isp_set10>, <&ps_isp_set11>,
+				<&ps_isp_set12>;
+
+			apple,dart-vm-size = <0x0 0xa0000000>;
+			status = "disabled";
+		};
+
+		disp0_dart: iommu@231304000 {
+			compatible = "apple,t8112-dart", "apple,t8110-dart";
+			reg = <0x2 0x31304000 0x0 0x4000>;
+			#iommu-cells = <1>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 553 IRQ_TYPE_LEVEL_HIGH>;
+			power-domains = <&ps_disp0_cpu0>;
+			apple,dma-range = <0x0 0x0 0xf 0xffff0000>;
+			status = "disabled";
+		};
+
+		dcp_dart: iommu@23130c000 {
+			compatible = "apple,t8112-dart", "apple,t8110-dart";
+			reg = <0x2 0x3130c000 0x0 0x4000>;
+			#iommu-cells = <1>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 553 IRQ_TYPE_LEVEL_HIGH>;
+			power-domains = <&ps_disp0_cpu0>;
+			apple,dma-range = <0x8 0x00000000 0x7 0xffff0000>;
+		};
+
+		dcp_mbox: mbox@231c08000 {
+			compatible = "apple,t8112-asc-mailbox", "apple,asc-mailbox-v4";
+			reg = <0x2 0x31c08000 0x0 0x4000>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 535 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 536 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 537 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 538 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "send-empty", "send-not-empty",
+				"recv-empty", "recv-not-empty";
+			#mbox-cells = <0>;
+			power-domains = <&ps_disp0_cpu0>;
+			resets = <&ps_disp0_cpu0>;
+		};
+
+		dcp: dcp@231c00000 {
+			compatible = "apple,t8112-dcp", "apple,dcp";
+			mboxes = <&dcp_mbox>;
+			mbox-names = "mbox";
+			iommus = <&dcp_dart 5>;
+
+			/* the ADT has 2 additional regs which seems to be unused */
+			reg-names = "coproc", "disp-0", "disp-1", "disp-2", "disp-3";
+			reg = <0x2 0x31c00000 0x0 0x4000>,
+				<0x2 0x30000000 0x0 0x61c000>,
+				<0x2 0x31320000 0x0 0x4000>,
+				<0x2 0x31344000 0x0 0x4000>,
+				<0x2 0x31800000 0x0 0x800000>;
+			apple,bw-scratch = <&pmgr_dcp 0 4 0x5d8>;
+			power-domains = <&ps_disp0_cpu0>;
+			resets = <&ps_disp0_cpu0>;
+			clocks = <&clk_disp0>;
+			phandle = <&dcp>;
+			// required bus properties for 'piodma' subdevice
+			#address-cells = <2>;
+			#size-cells = <2>;
+
+			disp0_piodma: piodma {
+				iommus = <&disp0_dart 4>;
+				phandle = <&disp0_piodma>;
+			};
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				port@0 {
+					reg = <0>;
+					dcp_audio: endpoint {
+						remote-endpoint = <&dpaudio0_dcp>;
+					};
+				};
+			};
+		};
+
+		display: display-subsystem {
+			compatible = "apple,display-subsystem";
+			/* disp_dart0 must be 1st since it is locked */
+			iommus = <&disp0_dart 0>;
+			/* generate phandle explicitly for use in loader */
+			phandle = <&display>;
+		};
+
 		sio_dart: iommu@235004000 {
 			compatible = "apple,t8110-dart";
 			reg = <0x2 0x35004000 0x0 0x4000>;
@@ -467,6 +847,34 @@
 			status = "disabled";
 		};
 
+		spi1: spi@235104000 {
+			compatible = "apple,t8112-spi", "apple,spi";
+			reg = <0x2 0x35104000 0x0 0x4000>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 749 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&clk_200m>;
+			pinctrl-0 = <&spi1_pins>;
+			pinctrl-names = "default";
+			power-domains = <&ps_spi1>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		spi3: spi@23510c000 {
+			compatible = "apple,t8112-spi", "apple,spi";
+			reg = <0x2 0x3510c000 0x0 0x4000>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 751 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&clkref>;
+			pinctrl-0 = <&spi3_pins>;
+			pinctrl-names = "default";
+			power-domains = <&ps_spi3>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled"; /* only used in J493 */
+		};
+
 		serial0: serial@235200000 {
 			compatible = "apple,s5l-uart";
 			reg = <0x2 0x35200000 0x0 0x1000>;
@@ -495,6 +903,32 @@
 			status = "disabled";
 		};
 
+		sio_mbox: mbox@236408000 {
+			compatible = "apple,t8112-asc-mailbox", "apple,asc-mailbox-v4";
+			reg = <0x2 0x36408000 0x0 0x4000>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 774 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 775 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 776 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 777 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "send-empty", "send-not-empty",
+				"recv-empty", "recv-not-empty";
+			#mbox-cells = <0>;
+			power-domains = <&ps_sio_cpu>;
+		};
+
+		sio: sio@236400000 {
+			compatible = "apple,t8112-sio", "apple,sio";
+			reg = <0x2 0x36400000 0x0 0x8000>;
+			dma-channels = <128>;
+			#dma-cells = <1>;
+			mboxes = <&sio_mbox>;
+			iommus = <&sio_dart 0>;
+			power-domains = <&ps_sio_cpu>;
+			resets = <&ps_sio>; /* TODO: verify reset does something */
+			status = "disabled";
+		};
+
 		admac: dma-controller@238200000 {
 			compatible = "apple,t8112-admac", "apple,admac";
 			reg = <0x2 0x38200000 0x0 0x34000>;
@@ -509,6 +943,48 @@
 			resets = <&ps_audio_p>;
 		};
 
+		dpaudio0: audio-controller@238330000 {
+			compatible = "apple,t8112-dpaudio", "apple,dpaudio";
+			reg = <0x2 0x38330000 0x0 0x4000>;
+			dmas = <&sio 0x64>;
+			dma-names = "tx";
+			power-domains = <&ps_dpa0>;
+			reset-domains = <&ps_dpa0>;
+			status = "disabled";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				port@0 {
+					reg = <0>;
+					dpaudio0_dcp: endpoint {
+						remote-endpoint = <&dcp_audio>;
+					};
+				};
+			};
+		};
+
+		dpaudio1: audio-controller@238334000 {
+			compatible = "apple,t8112-dpaudio", "apple,dpaudio";
+			reg = <0x2 0x38334000 0x0 0x4000>;
+			dmas = <&sio 0x66>;
+			dma-names = "tx";
+			power-domains = <&ps_dpa1>;
+			reset-domains = <&ps_dpa1>;
+			status = "disabled";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				port@0 {
+					reg = <0>;
+					dpaudio1_dcp: endpoint {
+						remote-endpoint = <&dcpext_audio>;
+					};
+				};
+			};
+		};
+
 		mca: i2s@238400000 {
 			compatible = "apple,t8112-mca", "apple,mca";
 			reg = <0x2 0x38400000 0x0 0x18000>,
@@ -580,6 +1056,12 @@
 			/* child nodes are added in t8103-pmgr.dtsi */
 		};
 
+		pmgr_dcp: power-management@23b3d0000 {
+			reg = <0x2 0x3b3d0000 0x0 0x4000>;
+			reg-names = "dcp-bw-scratch";
+			#apple,bw-scratch-cells = <3>;
+		};
+
 		pinctrl_ap: pinctrl@23c100000 {
 			compatible = "apple,t8112-pinctrl", "apple,pinctrl";
 			reg = <0x2 0x3c100000 0x0 0x100000>;
@@ -626,13 +1108,20 @@
 					 <APPLE_PINMUX(130, 1)>;
 			};
 
-			spi3_pins: spi3-pins {
+			spi1_pins: spi1-pins {
 				pinmux = <APPLE_PINMUX(46, 1)>,
 					<APPLE_PINMUX(47, 1)>,
 					<APPLE_PINMUX(48, 1)>,
 					<APPLE_PINMUX(49, 1)>;
 			};
 
+			spi3_pins: spi3-pins {
+				pinmux = <APPLE_PINMUX(93, 1)>,
+					<APPLE_PINMUX(94, 1)>,
+					<APPLE_PINMUX(95, 1)>,
+					<APPLE_PINMUX(96, 1)>;
+			};
+
 			pcie_pins: pcie-pins {
 				pinmux = <APPLE_PINMUX(162, 1)>,
 					 <APPLE_PINMUX(163, 1)>,
@@ -641,6 +1130,95 @@
 			};
 		};
 
+		dptxphy: phy@23c500000 {
+			compatible = "apple,t8112-dptx-phy", "apple,dptx-phy";
+			reg = <0x2 0x3c500000 0x0 0x4000>,
+				<0x2 0x3c540000 0x0 0xc000>;
+			reg-names = "core", "dptx";
+			power-domains = <&ps_dptx_ext_phy>;
+			#phy-cells = <0>;
+			#reset-cells = <0>;
+			status = "disabled"; /* only used on j473 */
+		};
+
+		nub_spmi: spmi@23d714000 {
+			compatible = "apple,t8112-spmi", "apple,spmi";
+			reg = <0x2 0x3d714000 0x0 0x100>;
+			#address-cells = <2>;
+			#size-cells = <0>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 256 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 384 IRQ_TYPE_LEVEL_HIGH>;
+
+			pmu1: pmu@e {
+				compatible = "apple,stowe-pmu", "apple,spmi-pmu";
+				reg = <0xe SPMI_USID>;
+				#address-cells = <1>;
+				#size-cells = <1>;
+
+				rtc_nvmem@f800 {
+					compatible = "apple,spmi-pmu-nvmem";
+					reg = <0xf800 0x300>;
+					#address-cells = <1>;
+					#size-cells = <1>;
+
+					pm_setting: pm-setting@1 {
+						reg = <0x1 0x1>;
+					};
+
+					rtc_offset: rtc-offset@100 {
+						reg = <0x100 0x6>;
+					};
+				};
+
+				legacy_nvmem@f700 {
+					compatible = "apple,spmi-pmu-nvmem";
+					reg = <0xf700 0x20>;
+					#address-cells = <1>;
+					#size-cells = <1>;
+
+					boot_stage: boot-stage@1 {
+						reg = <0x1 0x1>;
+					};
+
+					boot_error_count: boot-error-count@2 {
+						reg = <0x2 0x1>;
+						bits = <0 4>;
+					};
+
+					panic_count: panic-count@2 {
+						reg = <0x2 0x1>;
+						bits = <4 4>;
+					};
+
+					boot_error_stage: boot-error-stage@3 {
+						reg = <0x3 0x1>;
+					};
+
+					shutdown_flag: shutdown-flag@f {
+						reg = <0xf 0x1>;
+						bits = <3 1>;
+					};
+				};
+
+				scrpad_nvmem@8000 {
+					compatible = "apple,spmi-pmu-nvmem";
+					reg = <0x8000 0x2800>;
+					#address-cells = <1>;
+					#size-cells = <1>;
+
+					fault_shadow: fault-shadow@67b {
+						reg = <0x67b 0x10>;
+					};
+
+					socd: socd@b00 {
+						reg = <0xb00 0x400>;
+					};
+				};
+
+			};
+		};
+
 		pinctrl_nub: pinctrl@23d1f0000 {
 			compatible = "apple,t8112-pinctrl", "apple,pinctrl";
 			reg = <0x2 0x3d1f0000 0x0 0x4000>;
@@ -679,6 +1257,141 @@
 			interrupts = <AIC_IRQ 379 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
+		efuse@23d2c8000 {
+			compatible = "apple,t8112-efuses", "apple,efuses";
+			reg = <0x2 0x3d2c8000 0x0 0x1000>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			atcphy0_auspll_rodco_bias_adjust: efuse@480,20 {
+				reg = <0x480 4>;
+				bits = <20 3>;
+			};
+
+			atcphy0_auspll_rodco_encap: efuse@480,23 {
+				reg = <0x480 4>;
+				bits = <23 2>;
+			};
+
+			atcphy0_auspll_dtc_vreg_adjust: efuse@480,25 {
+				reg = <0x480 4>;
+				bits = <25 3>;
+			};
+
+			atcphy0_auspll_fracn_dll_start_capcode: efuse@480,28 {
+				reg = <0x480 4>;
+				bits = <28 2>;
+			};
+
+			atcphy0_aus_cmn_shm_vreg_trim: efuse@480,30 {
+				reg = <0x480 8>;
+				bits = <30 5>;
+			};
+
+			atcphy0_cio3pll_dco_coarsebin0: efuse@484,3 {
+				reg = <0x484 4>;
+				bits = <3 6>;
+			};
+
+			atcphy0_cio3pll_dco_coarsebin1: efuse@484,9 {
+				reg = <0x484 4>;
+				bits = <9 6>;
+			};
+
+			atcphy0_cio3pll_dll_start_capcode: efuse@484,15 {
+				reg = <0x484 4>;
+				bits = <15 2>;
+			};
+
+			atcphy0_cio3pll_dtc_vreg_adjust: efuse@484,17 {
+				reg = <0x484 0x4>;
+				bits = <17 3>;
+			};
+
+			atcphy1_auspll_rodco_bias_adjust: efuse@484,30 {
+				reg = <0x484 8>;
+				bits = <30 3>;
+			};
+
+			atcphy1_auspll_rodco_encap: efuse@488,1 {
+				reg = <0x488 8>;
+				bits = <1 2>;
+			};
+
+			atcphy1_auspll_dtc_vreg_adjust: efuse@488,3 {
+				reg = <0x488 4>;
+				bits = <3 3>;
+			};
+
+			atcphy1_auspll_fracn_dll_start_capcode: efuse@488,6 {
+				reg = <0x488 4>;
+				bits = <6 2>;
+			};
+
+			atcphy1_aus_cmn_shm_vreg_trim: efuse@488,8 {
+				reg = <0x488 4>;
+				bits = <8 5>;
+			};
+
+			atcphy1_cio3pll_dco_coarsebin0: efuse@488,13 {
+				reg = <0x488 4>;
+				bits = <13 6>;
+			};
+
+			atcphy1_cio3pll_dco_coarsebin1: efuse@488,19 {
+				reg = <0x488 4>;
+				bits = <19 6>;
+			};
+
+			atcphy1_cio3pll_dll_start_capcode: efuse@488,25 {
+				reg = <0x488 4>;
+				bits = <25 2>;
+			};
+
+			atcphy1_cio3pll_dtc_vreg_adjust: efuse@488,27 {
+				reg = <0x488 0x4>;
+				bits = <27 3>;
+			};
+		};
+
+		smc_mbox: mbox@23e408000 {
+			compatible = "apple,t8112-asc-mailbox", "apple,asc-mailbox-v4";
+			reg = <0x2 0x3e408000 0x0 0x4000>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 499 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 500 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 501 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 502 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "send-empty", "send-not-empty",
+				"recv-empty", "recv-not-empty";
+			#mbox-cells = <0>;
+		};
+
+		smc: smc@23e400000 {
+			compatible = "apple,t8112-smc", "apple,smc";
+			reg = <0x2 0x3e400000 0x0 0x4000>,
+				<0x2 0x3fe00000 0x0 0x100000>;
+			reg-names = "smc", "sram";
+			mboxes = <&smc_mbox>;
+
+			smc_gpio: gpio {
+				gpio-controller;
+				#gpio-cells = <2>;
+			};
+
+			smc_rtc: rtc {
+				nvmem-cells = <&rtc_offset>;
+				nvmem-cell-names = "rtc_offset";
+			};
+
+			smc_reboot: reboot {
+				nvmem-cells = <&shutdown_flag>, <&boot_stage>,
+					<&boot_error_count>, <&panic_count>, <&pm_setting>;
+				nvmem-cell-names = "shutdown_flag", "boot_stage",
+					"boot_error_count", "panic_count", "pm_setting";
+			};
+		};
+
 		pinctrl_smc: pinctrl@23e820000 {
 			compatible = "apple,t8112-pinctrl", "apple,pinctrl";
 			reg = <0x2 0x3e820000 0x0 0x4000>;
@@ -721,6 +1434,254 @@
 				     <AIC_IRQ 307 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
+		aop_mbox: mbox@24a408000 {
+			compatible = "apple,t8112-asc-mailbox", "apple,asc-mailbox-v4";
+			reg = <0x2 0x4a408000 0x0 0x4000>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 318 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 319 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 320 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 321 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "send-empty", "send-not-empty",
+				"recv-empty", "recv-not-empty";
+			#mbox-cells = <0>;
+			status = "disabled";
+		};
+
+		aop_dart: iommu@24a808000 {
+			compatible = "apple,t8112-dart", "apple,t8110-dart";
+			reg = <0x2 0x4a808000 0x0 0x4000>;
+			#iommu-cells = <1>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 338 IRQ_TYPE_LEVEL_HIGH>;
+			status = "disabled";
+		};
+
+		aop_admac: dma-controller@24a980000 {
+			/*
+			* Use "admac2" until commit "dmaengine: apple-admac:
+			* Avoid accessing registers in probe" is long enough
+			* upstream (not yet as of 2024-12-30)
+			*/
+			// compatible = "apple,t8112-admac", "apple,admac";
+			compatible = "apple,t8112-admac2", "apple,admac2";
+			reg = <0x2 0x4a980000 0x0 0x34000>;
+			#dma-cells = <1>;
+			dma-channels = <16>;
+			interrupts-extended = <0>,
+					      <0>,
+					      <&aic AIC_IRQ 359 IRQ_TYPE_LEVEL_HIGH>,
+					      <0>;
+			iommus = <&aop_dart 10>;
+			status = "disabled";
+		};
+
+		aop: aop@24ac00000 {
+			compatible = "apple,t8112-aop";
+			reg = <0x2 0x4ac00000 0x0 0x1e0000>,
+			      <0x2 0x4a400000 0x0 0x6c000>;
+			mboxes = <&aop_mbox>;
+			mbox-names = "mbox";
+			iommus = <&aop_dart 0>;
+
+			/* HACK: ensure probe order */
+			dmas = <&aop_admac 1023>;
+			dma-names = "invalid-order-only";
+
+			status = "disabled";
+
+			aop_audio: audio {
+				dmas = <&aop_admac 1>;
+				dma-names = "dma";
+			};
+
+			aop_als: als {
+				// intentionally empty
+			};
+		};
+
+		mtp: mtp@24e400000 {
+			compatible = "apple,t8112-mtp", "apple,t8112-rtk-helper-asc4", "apple,mtp", "apple,rtk-helper-asc4";
+			reg = <0x2 0x4e400000 0x0 0x4000>,
+				<0x2 0x4ec00000 0x0 0x100000>;
+			reg-names = "asc", "sram";
+			mboxes = <&mtp_mbox>;
+			iommus = <&mtp_dart 1>;
+			#helper-cells = <0>;
+
+			status = "disabled";
+		};
+
+		mtp_mbox: mbox@24e408000 {
+			compatible = "apple,t8112-asc-mailbox", "apple,asc-mailbox-v4";
+			reg = <0x2 0x4e408000 0x0 0x4000>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 864 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 865 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 866 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 867 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "send-empty", "send-not-empty",
+				"recv-empty", "recv-not-empty";
+			#mbox-cells = <0>;
+
+			status = "disabled";
+		};
+
+		mtp_dart: iommu@24e808000 {
+			compatible = "apple,t8112-dart", "apple,t8110-dart";
+			reg = <0x2 0x4e808000 0x0 0x4000>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 848 IRQ_TYPE_LEVEL_HIGH>;
+			#iommu-cells = <1>;
+
+			status = "disabled";
+		};
+
+		mtp_dockchannel: fifo@24eb14000 {
+			compatible = "apple,t8112-dockchannel", "apple,dockchannel";
+			reg = <0x2 0x4eb14000 0x0 0x4000>;
+			reg-names = "irq";
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 850 IRQ_TYPE_LEVEL_HIGH>;
+
+			ranges = <0 0x2 0x4eb28000 0x20000>;
+			nonposted-mmio;
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			interrupt-controller;
+			#interrupt-cells = <2>;
+
+			status = "disabled";
+
+			mtp_hid: input@8000 {
+				compatible = "apple,dockchannel-hid";
+				reg = <0x8000 0x4000>,
+					<0xc000 0x4000>,
+					<0x0000 0x4000>,
+					<0x4000 0x4000>;
+				reg-names = "config", "data",
+					"rmt-config", "rmt-data";
+				iommus = <&mtp_dart 1>;
+				interrupt-parent = <&mtp_dockchannel>;
+				interrupts = <2 IRQ_TYPE_LEVEL_HIGH>,
+					<3 IRQ_TYPE_LEVEL_HIGH>;
+				interrupt-names = "tx", "rx";
+
+				apple,fifo-size = <0x800>;
+				apple,helper-cpu = <&mtp>;
+			};
+
+		};
+
+		sep_dart: iommu@25d2c0000 {
+			compatible = "apple,t8112-dart", "apple,t8110-dart";
+			reg = <0x2 0x5d2c0000 0x0 0x4000>;
+			#iommu-cells = <1>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 282 IRQ_TYPE_LEVEL_HIGH>;
+		};
+
+		sep: sep@25e400000 {
+			compatible = "apple,sep";
+			reg = <0x2 0x5e400000 0x0 0x6C000>;
+			mboxes = <&sep_mbox>;
+			mbox-names = "mbox";
+			iommus = <&sep_dart 0>;
+			status = "disabled";
+		};
+
+		sep_mbox: mbox@25e408000 {
+			compatible = "apple,t8112-asc-mailbox", "apple,asc-mailbox-v4";
+			reg = <0x2 0x5e408000 0x0 0x4000>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 276 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 277 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 278 IRQ_TYPE_LEVEL_HIGH>,
+				<AIC_IRQ 279 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "send-empty", "send-not-empty",
+				"recv-empty", "recv-not-empty";
+			#mbox-cells = <0>;
+		};
+
+		dispext0_dart: iommu@271304000 {
+			compatible = "apple,t8112-dart", "apple,t8110-dart";
+			reg = <0x2 0x71304000 0x0 0x4000>;
+			#iommu-cells = <1>;
+			apple,dma-range = <0x0 0x0 0xf 0xffff0000>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 593 IRQ_TYPE_LEVEL_HIGH>;
+			power-domains = <&ps_dispext_cpu0>;
+			status = "disabled";
+		};
+
+		dcpext_dart: iommu@27130c000 {
+			compatible = "apple,t8112-dart", "apple,t8110-dart";
+			reg = <0x2 0x7130c000 0x0 0x4000>;
+			#iommu-cells = <1>;
+			apple,dma-range = <0x8 0x0 0x7 0xffff0000>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 593 IRQ_TYPE_LEVEL_HIGH>;
+			power-domains = <&ps_dispext_cpu0>;
+			status = "disabled";
+		};
+
+		dcpext_mbox: mbox@271c08000 {
+			compatible = "apple,t8112-asc-mailbox", "apple,asc-mailbox-v4";
+			reg = <0x2 0x71c08000 0x0 0x4000>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 578 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 579 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 580 IRQ_TYPE_LEVEL_HIGH>,
+			<AIC_IRQ 581 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "send-empty", "send-not-empty",
+			"recv-empty", "recv-not-empty";
+			#mbox-cells = <0>;
+			power-domains = <&ps_dispext_cpu0>;
+			resets = <&ps_dispext_cpu0>;
+			status = "disabled";
+		};
+
+		dcpext: dcp@271c00000 {
+			compatible = "apple,t8112-dcpext", "apple,dcpext";
+			mboxes = <&dcpext_mbox>;
+			mbox-names = "mbox";
+			iommus = <&dcpext_dart 5>;
+			phandle = <&dcpext>;
+
+			/* the ADT has 2 additional regs which seems to be unused */
+			reg-names = "coproc", "disp-0", "disp-1", "disp-2", "disp-3";
+			reg = <0x2 0x71c00000 0x0 0x4000>,
+			      <0x2 0x70000000 0x0 0x61C000>,
+			      <0x2 0x71320000 0x0 0x4000>,
+			      <0x2 0x71344000 0x0 0x4000>,
+			      <0x2 0x71800000 0x0 0x800000>;
+			apple,bw-scratch = <&pmgr_dcp 0 4 0x5e0>;
+			power-domains = <&ps_dispext_cpu0>;
+			resets = <&ps_dispext_cpu0>;
+			clocks = <&clk_dispext0>;
+			apple,dcp-index = <1>;
+			status = "disabled";
+			// required bus properties for 'piodma' subdevice
+			#address-cells = <2>;
+			#size-cells = <2>;
+
+			piodma {
+				iommus = <&dispext0_dart 4>;
+			};
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				port@0 {
+					reg = <0>;
+					dcpext_audio: endpoint {
+						remote-endpoint = <&dpaudio1_dcp>;
+					};
+				};
+			};
+		};
+
 		ans_mbox: mbox@277408000 {
 			compatible = "apple,t8112-asc-mailbox", "apple,asc-mailbox-v4";
 			reg = <0x2 0x77408000 0x0 0x4000>;
@@ -755,6 +1716,148 @@
 			resets = <&ps_ans>;
 		};
 
+		dwc3_0: usb@382280000 {
+			compatible = "apple,t8112-dwc3", "apple,dwc3", "snps,dwc3";
+			reg = <0x3 0x82280000 0x0 0x100000>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 1031 IRQ_TYPE_LEVEL_HIGH>;
+			dr_mode = "otg";
+			usb-role-switch;
+			role-switch-default-mode = "host";
+			iommus = <&dwc3_0_dart_0 0>, <&dwc3_0_dart_1 1>;
+			power-domains = <&ps_atc0_usb>;
+			resets = <&atcphy0>;
+			phys = <&atcphy0 PHY_TYPE_USB2>, <&atcphy0 PHY_TYPE_USB3>;
+			phy-names = "usb2-phy", "usb3-phy";
+		};
+
+		dwc3_0_dart_0: iommu@382f00000 {
+			compatible = "apple,t8112-dart", "apple,t8110-dart";
+			reg = <0x3 0x82f00000 0x0 0x4000>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 1035 IRQ_TYPE_LEVEL_HIGH>;
+			#iommu-cells = <1>;
+			power-domains = <&ps_atc0_usb>;
+		};
+
+		dwc3_0_dart_1: iommu@382f80000 {
+			compatible = "apple,t8112-dart", "apple,t8110-dart";
+			reg = <0x3 0x82f80000 0x0 0x4000>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 1035 IRQ_TYPE_LEVEL_HIGH>;
+			#iommu-cells = <1>;
+			power-domains = <&ps_atc0_usb>;
+		};
+
+		atcphy0: phy@383000000 {
+			compatible = "apple,t8112-atcphy", "apple,t8103-atcphy";
+			reg = <0x3 0x83000000 0x0 0x4c000>,
+				<0x3 0x83050000 0x0 0x8000>,
+				<0x3 0x80000000 0x0 0x4000>,
+				<0x3 0x82a90000 0x0 0x4000>,
+				<0x3 0x82a84000 0x0 0x4000>;
+			reg-names = "core", "lpdptx", "axi2af", "usb2phy",
+				"pipehandler";
+
+			#phy-cells = <1>;
+			#reset-cells = <0>;
+
+			nvmem-cells = <&atcphy0_aus_cmn_shm_vreg_trim>,
+				<&atcphy0_auspll_rodco_encap>,
+				<&atcphy0_auspll_rodco_bias_adjust>,
+				<&atcphy0_auspll_fracn_dll_start_capcode>,
+				<&atcphy0_auspll_dtc_vreg_adjust>,
+				<&atcphy0_cio3pll_dco_coarsebin0>,
+				<&atcphy0_cio3pll_dco_coarsebin1>,
+				<&atcphy0_cio3pll_dll_start_capcode>,
+				<&atcphy0_cio3pll_dtc_vreg_adjust>;
+			nvmem-cell-names =  "aus_cmn_shm_vreg_trim",
+				"auspll_rodco_encap",
+				"auspll_rodco_bias_adjust",
+				"auspll_fracn_dll_start_capcode",
+				"auspll_dtc_vreg_adjust",
+				"cio3pll_dco_coarsebin0",
+				"cio3pll_dco_coarsebin1",
+				"cio3pll_dll_start_capcode",
+				"cio3pll_dtc_vreg_adjust";
+
+			orientation-switch;
+			mode-switch;
+			svid = <0xff01>, <0x8087>;
+			power-domains = <&ps_atc0_usb>;
+		};
+
+		dwc3_1: usb@502280000 {
+			compatible = "apple,t8112-dwc3", "apple,dwc3", "snps,dwc3";
+			reg = <0x5 0x02280000 0x0 0x100000>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 1112 IRQ_TYPE_LEVEL_HIGH>;
+			dr_mode = "otg";
+			usb-role-switch;
+			role-switch-default-mode = "host";
+			iommus = <&dwc3_1_dart_0 0>, <&dwc3_1_dart_1 1>;
+			power-domains = <&ps_atc1_usb>;
+			resets = <&atcphy1>;
+			phys = <&atcphy1 PHY_TYPE_USB2>, <&atcphy1 PHY_TYPE_USB3>;
+			phy-names = "usb2-phy", "usb3-phy";
+		};
+
+		dwc3_1_dart_0: iommu@502f00000 {
+			compatible = "apple,t8112-dart", "apple,t8110-dart";
+			reg = <0x5 0x02f00000 0x0 0x4000>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 1116 IRQ_TYPE_LEVEL_HIGH>;
+			#iommu-cells = <1>;
+			power-domains = <&ps_atc1_usb>;
+		};
+
+		dwc3_1_dart_1: iommu@502f80000 {
+			compatible = "apple,t8112-dart", "apple,t8110-dart";
+			reg = <0x5 0x02f80000 0x0 0x4000>;
+			interrupt-parent = <&aic>;
+			interrupts = <AIC_IRQ 1116 IRQ_TYPE_LEVEL_HIGH>;
+			#iommu-cells = <1>;
+			power-domains = <&ps_atc1_usb>;
+		};
+
+		atcphy1: phy@503000000 {
+			compatible = "apple,t8112-atcphy", "apple,t8103-atcphy";
+			reg = <0x5 0x03000000 0x0 0x4c000>,
+				<0x5 0x03050000 0x0 0x8000>,
+				<0x5 0x0 0x0 0x4000>,
+				<0x5 0x02a90000 0x0 0x4000>,
+				<0x5 0x02a84000 0x0 0x4000>;
+			reg-names = "core", "lpdptx", "axi2af", "usb2phy",
+				"pipehandler";
+
+			nvmem-cells = <&atcphy1_aus_cmn_shm_vreg_trim>,
+				<&atcphy1_auspll_rodco_encap>,
+				<&atcphy1_auspll_rodco_bias_adjust>,
+				<&atcphy1_auspll_fracn_dll_start_capcode>,
+				<&atcphy1_auspll_dtc_vreg_adjust>,
+				<&atcphy1_cio3pll_dco_coarsebin0>,
+				<&atcphy1_cio3pll_dco_coarsebin1>,
+				<&atcphy1_cio3pll_dll_start_capcode>,
+				<&atcphy1_cio3pll_dtc_vreg_adjust>;
+			nvmem-cell-names =  "aus_cmn_shm_vreg_trim",
+				"auspll_rodco_encap",
+				"auspll_rodco_bias_adjust",
+				"auspll_fracn_dll_start_capcode",
+				"auspll_dtc_vreg_adjust",
+				"cio3pll_dco_coarsebin0",
+				"cio3pll_dco_coarsebin1",
+				"cio3pll_dll_start_capcode",
+				"cio3pll_dtc_vreg_adjust";
+
+			#phy-cells = <1>;
+			#reset-cells = <0>;
+
+			orientation-switch;
+			mode-switch;
+			svid = <0xff01>, <0x8087>;
+			power-domains = <&ps_atc1_usb>;
+		};
+
 		pcie0_dart: iommu@681008000 {
 			compatible = "apple,t8110-dart";
 			reg = <0x6 0x81008000 0x0 0x4000>;
diff --git a/arch/arm64/include/asm/apple_cpufeature.h b/arch/arm64/include/asm/apple_cpufeature.h
new file mode 100644
index 00000000000000..4370d91ffa3ec9
--- /dev/null
+++ b/arch/arm64/include/asm/apple_cpufeature.h
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#ifndef __ASM_APPLE_CPUFEATURES_H
+#define __ASM_APPLE_CPUFEATURES_H
+
+#include <linux/bits.h>
+#include <asm/sysreg.h>
+
+#define AIDR_APPLE_TSO_SHIFT	9
+#define AIDR_APPLE_TSO		BIT(9)
+
+#define ACTLR_APPLE_TSO_SHIFT	1
+#define ACTLR_APPLE_TSO		BIT(1)
+
+#endif
diff --git a/arch/arm64/include/asm/apple_m1_pmu.h b/arch/arm64/include/asm/apple_m1_pmu.h
index 99483b19b99fca..02e05d05851f73 100644
--- a/arch/arm64/include/asm/apple_m1_pmu.h
+++ b/arch/arm64/include/asm/apple_m1_pmu.h
@@ -37,6 +37,7 @@
 #define PMCR0_PMI_ENABLE_8_9	GENMASK(45, 44)
 
 #define SYS_IMP_APL_PMCR1_EL1	sys_reg(3, 1, 15, 1, 0)
+#define SYS_IMP_APL_PMCR1_EL12	sys_reg(3, 1, 15, 7, 2)
 #define PMCR1_COUNT_A64_EL0_0_7	GENMASK(15, 8)
 #define PMCR1_COUNT_A64_EL1_0_7	GENMASK(23, 16)
 #define PMCR1_COUNT_A64_EL0_8_9	GENMASK(41, 40)
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index 0b5ca6e0eb0932..9d769291a3067b 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -71,6 +71,8 @@ cpucap_is_possible(const unsigned int cap)
 		 * KVM MPAM support doesn't rely on the host kernel supporting MPAM.
 		*/
 		return true;
+	case ARM64_HAS_PMUV3:
+		return IS_ENABLED(CONFIG_HW_PERF_EVENTS);
 	}
 
 	return true;
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index e0e4478f5fb52d..970ed9f10cf864 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -525,29 +525,6 @@ cpuid_feature_extract_unsigned_field(u64 features, int field)
 	return cpuid_feature_extract_unsigned_field_width(features, field, 4);
 }
 
-/*
- * Fields that identify the version of the Performance Monitors Extension do
- * not follow the standard ID scheme. See ARM DDI 0487E.a page D13-2825,
- * "Alternative ID scheme used for the Performance Monitors Extension version".
- */
-static inline u64 __attribute_const__
-cpuid_feature_cap_perfmon_field(u64 features, int field, u64 cap)
-{
-	u64 val = cpuid_feature_extract_unsigned_field(features, field);
-	u64 mask = GENMASK_ULL(field + 3, field);
-
-	/* Treat IMPLEMENTATION DEFINED functionality as unimplemented */
-	if (val == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
-		val = 0;
-
-	if (val > cap) {
-		features &= ~mask;
-		features |= (cap << field) & mask;
-	}
-
-	return features;
-}
-
 static inline u64 arm64_ftr_mask(const struct arm64_ftr_bits *ftrp)
 {
 	return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift);
@@ -866,6 +843,11 @@ static __always_inline bool system_supports_mpam_hcr(void)
 	return alternative_has_cap_unlikely(ARM64_MPAM_HCR);
 }
 
+static inline bool system_supports_pmuv3(void)
+{
+	return cpus_have_final_cap(ARM64_HAS_PMUV3);
+}
+
 int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
 bool try_emulate_mrs(struct pt_regs *regs, u32 isn);
 
@@ -943,6 +925,12 @@ static inline unsigned int get_vmid_bits(u64 mmfr1)
 	return 8;
 }
 
+static __always_inline bool system_has_actlr_state(void)
+{
+	return IS_ENABLED(CONFIG_ARM64_ACTLR_STATE) &&
+		alternative_has_cap_unlikely(ARM64_HAS_TSO_APPLE);
+}
+
 s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new, s64 cur);
 struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id);
 
@@ -1066,6 +1054,10 @@ static inline bool cpu_has_lpa2(void)
 #endif
 }
 
+void __init init_cpucap_indirect_list_impdef(void);
+void __init init_cpucap_indirect_list_from_array(const struct arm64_cpu_capabilities *caps);
+bool cpufeature_matches(u64 reg, const struct arm64_cpu_capabilities *entry);
+
 #endif /* __ASSEMBLY__ */
 
 #endif
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 78ec1ef2cfe82a..a30704228584fa 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -80,6 +80,11 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
 {
 	if (!vcpu_has_run_once(vcpu))
 		vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
+	if (IS_ENABLED(CONFIG_ARM64_ACTLR_STATE) && (
+			alternative_has_cap_unlikely(ARM64_HAS_ACTLR_VIRT) ||
+			alternative_has_cap_unlikely(ARM64_HAS_ACTLR_VIRT_APPLE)
+		))
+		vcpu->arch.hcr_el2 &= ~HCR_TACR;
 
 	/*
 	 * For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 717829df294eaf..c82b524b50158e 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -112,7 +112,7 @@
 
 #define DIRECT_MAP_PHYSMEM_END	__pa(PAGE_END - 1)
 
-#define MIN_THREAD_SHIFT	(14 + KASAN_THREAD_SHIFT)
+#define MIN_THREAD_SHIFT	(15 + KASAN_THREAD_SHIFT)
 
 /*
  * VMAP'd stacks are allocated at page granularity, so we must ensure that such
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 1bf1a3b16e8864..dd8acb0d2f3f61 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -192,6 +192,9 @@ struct thread_struct {
 	u64			gcs_base;
 	u64			gcs_size;
 #endif
+#ifdef CONFIG_ARM64_ACTLR_STATE
+	u64			actlr;
+#endif
 };
 
 static inline unsigned int thread_get_vl(struct thread_struct *thread,
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 71c29a2a2f190f..cd59419330d9fa 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -34,6 +34,7 @@ obj-y			:= debug-monitors.o entry.o irq.o fpsimd.o		\
 			   cpufeature.o alternative.o cacheinfo.o		\
 			   smp.o smp_spin_table.o topology.o smccc-call.o	\
 			   syscall.o proton-pack.o idle.o patching.o pi/	\
+			   cpufeature_impdef.o					\
 			   rsi.o
 
 obj-$(CONFIG_COMPAT)			+= sys32.o signal32.o			\
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 7ce55586289518..8d479d0ea50a3e 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -194,6 +194,43 @@ has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
 	return is_midr_in_range(midr, &range) && has_dic;
 }
 
+static const struct midr_range impdef_pmuv3_cpus[] = {
+	MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM),
+	MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM),
+	MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM_PRO),
+	MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_PRO),
+	MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM_MAX),
+	MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_MAX),
+	MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD),
+	MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE),
+	MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_PRO),
+	MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_PRO),
+	MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_MAX),
+	MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_MAX),
+	{},
+};
+
+static bool has_impdef_pmuv3(const struct arm64_cpu_capabilities *entry, int scope)
+{
+	u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
+	unsigned int pmuver;
+
+	if (!is_kernel_in_hyp_mode())
+		return false;
+
+	pmuver = cpuid_feature_extract_unsigned_field(dfr0,
+						      ID_AA64DFR0_EL1_PMUVer_SHIFT);
+	if (pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
+		return false;
+
+	return is_midr_in_range_list(read_cpuid_id(), impdef_pmuv3_cpus);
+}
+
+static void cpu_enable_impdef_pmuv3_traps(const struct arm64_cpu_capabilities *__unused)
+{
+	sysreg_clear_set_s(SYS_HACR_EL2, 0, BIT(56));
+}
+
 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
 static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
@@ -786,6 +823,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
 		ERRATA_MIDR_RANGE_LIST(erratum_ac03_cpu_38_list),
 	},
 #endif
+	{
+		.desc = "Apple IMPDEF PMUv3 Traps",
+		.capability = ARM64_WORKAROUND_PMUV3_IMPDEF_TRAPS,
+		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+		.matches = has_impdef_pmuv3,
+		.cpu_enable = cpu_enable_impdef_pmuv3_traps,
+	},
 	{
 		.desc = "Broken CNTVOFF_EL2",
 		.capability = ARM64_WORKAROUND_QCOM_ORYON_CNTVOFF,
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 59e9dca1595d3f..761148f1d2d0f2 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -1070,7 +1070,7 @@ static void init_cpu_ftr_reg(u32 sys_reg, u64 new)
 extern const struct arm64_cpu_capabilities arm64_errata[];
 static const struct arm64_cpu_capabilities arm64_features[];
 
-static void __init
+void __init
 init_cpucap_indirect_list_from_array(const struct arm64_cpu_capabilities *caps)
 {
 	for (; caps->matches; caps++) {
@@ -1577,8 +1577,8 @@ has_always(const struct arm64_cpu_capabilities *entry, int scope)
 	return true;
 }
 
-static bool
-feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
+bool
+cpufeature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
 {
 	int val, min, max;
 	u64 tmp;
@@ -1631,14 +1631,14 @@ has_user_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope)
 	if (!mask)
 		return false;
 
-	return feature_matches(val, entry);
+	return cpufeature_matches(val, entry);
 }
 
 static bool
 has_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope)
 {
 	u64 val = read_scoped_sysreg(entry, scope);
-	return feature_matches(val, entry);
+	return cpufeature_matches(val, entry);
 }
 
 const struct cpumask *system_32bit_el0_cpumask(void)
@@ -1905,6 +1905,26 @@ static bool has_lpa2(const struct arm64_cpu_capabilities *entry, int scope)
 }
 #endif
 
+static bool has_pmuv3(const struct arm64_cpu_capabilities *entry, int scope)
+{
+	u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
+	unsigned int pmuver;
+
+	/*
+	 * PMUVer follows the standard ID scheme for an unsigned field with the
+	 * exception of 0xF (IMP_DEF) which is treated specially and implies
+	 * FEAT_PMUv3 is not implemented.
+	 *
+	 * See DDI0487L.a D24.1.3.2 for more details.
+	 */
+	pmuver = cpuid_feature_extract_unsigned_field(dfr0,
+						      ID_AA64DFR0_EL1_PMUVer_SHIFT);
+	if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
+		return false;
+
+	return pmuver >= ID_AA64DFR0_EL1_PMUVer_IMP;
+}
+
 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
 #define KPTI_NG_TEMP_VA		(-(1UL << PMD_SHIFT))
 
@@ -3005,6 +3025,14 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
 		.matches = has_cpuid_feature,
 		ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, GCS, IMP)
 	},
+#endif
+#ifdef CONFIG_HW_PERF_EVENTS
+	{
+		.desc = "PMUv3",
+		.capability = ARM64_HAS_PMUV3,
+		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
+		.matches = has_pmuv3,
+	},
 #endif
 	{},
 };
@@ -3315,10 +3343,38 @@ static void update_cpu_capabilities(u16 scope_mask)
 
 	scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
 	for (i = 0; i < ARM64_NCAPS; i++) {
+		bool matches;
+
 		caps = cpucap_ptrs[i];
-		if (!caps || !(caps->type & scope_mask) ||
-		    cpus_have_cap(caps->capability) ||
-		    !caps->matches(caps, cpucap_default_scope(caps)))
+		if (!caps || !(caps->type & scope_mask))
+			continue;
+
+		if (!(scope_mask & SCOPE_LOCAL_CPU) && cpus_have_cap(caps->capability))
+			continue;
+
+		matches = caps->matches(caps, cpucap_default_scope(caps));
+
+		if (matches == cpus_have_cap(caps->capability))
+			continue;
+
+		if (!matches) {
+			/*
+			 * Cap detected on boot CPU but not this CPU,
+			 * disable it if not optional.
+			 */
+			if (!cpucap_late_cpu_optional(caps)) {
+				__clear_bit(caps->capability, system_cpucaps);
+				pr_info("missing on secondary: %s\n", caps->desc);
+			}
+			continue;
+		}
+
+		if (!(scope_mask & (SCOPE_BOOT_CPU | SCOPE_SYSTEM)) &&
+		    cpucap_late_cpu_permitted(caps))
+			/*
+			 * Cap detected on this CPU but not boot CPU,
+			 * skip it if permitted for late CPUs.
+			 */
 			continue;
 
 		if (caps->desc && !caps->cpus)
@@ -3704,6 +3760,7 @@ void __init setup_boot_cpu_features(void)
 	 * handle the boot CPU.
 	 */
 	init_cpucap_indirect_list();
+	init_cpucap_indirect_list_impdef();
 
 	/*
 	 * Detect broken pseudo-NMI. Must be called _before_ the call to
diff --git a/arch/arm64/kernel/cpufeature_impdef.c b/arch/arm64/kernel/cpufeature_impdef.c
new file mode 100644
index 00000000000000..3407034b7878eb
--- /dev/null
+++ b/arch/arm64/kernel/cpufeature_impdef.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Contains implementation-defined CPU feature definitions.
+ */
+
+#define pr_fmt(fmt) "CPU features: " fmt
+
+#include <asm/cpufeature.h>
+#include <asm/apple_cpufeature.h>
+#include <linux/irqflags.h>
+#include <linux/preempt.h>
+#include <linux/printk.h>
+
+#ifdef CONFIG_ARM64_MEMORY_MODEL_CONTROL
+static bool has_apple_feature(const struct arm64_cpu_capabilities *entry, int scope)
+{
+	u64 val;
+	WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
+
+	if (read_cpuid_implementor() != ARM_CPU_IMP_APPLE)
+		return false;
+
+	val = read_sysreg(aidr_el1);
+	return cpufeature_matches(val, entry);
+}
+
+static bool has_apple_tso(const struct arm64_cpu_capabilities *entry, int scope)
+{
+	u64 val;
+
+	if (!has_apple_feature(entry, scope))
+		return false;
+
+	/*
+	 * KVM and old versions of the macOS hypervisor will advertise TSO in
+	 * AIDR_EL1, but then ignore writes to ACTLR_EL1. Test that the bit is
+	 * actually writable before enabling TSO.
+	 */
+
+	val = read_sysreg(actlr_el1);
+	write_sysreg(val ^ ACTLR_APPLE_TSO, actlr_el1);
+	if (!((val ^ read_sysreg(actlr_el1)) & ACTLR_APPLE_TSO)) {
+		pr_info_once("CPU advertises Apple TSO but it is broken, ignoring\n");
+		return false;
+	}
+
+	write_sysreg(val, actlr_el1);
+	return true;
+}
+
+static bool has_tso_fixed(const struct arm64_cpu_capabilities *entry, int scope)
+{
+	/* List of CPUs that always use the TSO memory model */
+	static const struct midr_range fixed_tso_list[] = {
+		MIDR_ALL_VERSIONS(MIDR_NVIDIA_DENVER),
+		MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL),
+		MIDR_ALL_VERSIONS(MIDR_FUJITSU_A64FX),
+		{ /* sentinel */ }
+	};
+
+	return is_midr_in_range_list(read_cpuid_id(), fixed_tso_list);
+}
+#endif
+
+static bool has_apple_actlr_virt_impdef(const struct arm64_cpu_capabilities *entry, int scope)
+{
+	u64 midr = read_cpuid_id() & MIDR_CPU_MODEL_MASK;
+
+	return midr >= MIDR_APPLE_M1_ICESTORM && midr <= MIDR_APPLE_M1_FIRESTORM_MAX;
+}
+
+static bool has_apple_actlr_virt(const struct arm64_cpu_capabilities *entry, int scope)
+{
+	u64 midr = read_cpuid_id() & MIDR_CPU_MODEL_MASK;
+
+	return midr >= MIDR_APPLE_M2_BLIZZARD && midr <= MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, 0xfff);
+}
+
+static const struct arm64_cpu_capabilities arm64_impdef_features[] = {
+#ifdef CONFIG_ARM64_MEMORY_MODEL_CONTROL
+	{
+		.desc = "TSO memory model (Apple)",
+		.capability = ARM64_HAS_TSO_APPLE,
+		.type = SCOPE_LOCAL_CPU | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU,
+		.matches = has_apple_tso,
+		.field_pos = AIDR_APPLE_TSO_SHIFT,
+		.field_width = 1,
+		.sign = FTR_UNSIGNED,
+		.min_field_value = 1,
+		.max_field_value = 1,
+	},
+	{
+		.desc = "TSO memory model (Fixed)",
+		.capability = ARM64_HAS_TSO_FIXED,
+		.type = SCOPE_LOCAL_CPU | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU,
+		.matches = has_tso_fixed,
+	},
+#endif
+	{
+		.desc = "ACTLR virtualization (IMPDEF, Apple)",
+		.capability = ARM64_HAS_ACTLR_VIRT_APPLE,
+		.type = SCOPE_LOCAL_CPU | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU,
+		.matches = has_apple_actlr_virt_impdef,
+	},
+	{
+		.desc = "ACTLR virtualization (architectural?)",
+		.capability = ARM64_HAS_ACTLR_VIRT,
+		.type = SCOPE_LOCAL_CPU | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU,
+		.matches = has_apple_actlr_virt,
+	},
+	{},
+};
+
+void __init init_cpucap_indirect_list_impdef(void)
+{
+	init_cpucap_indirect_list_from_array(arm64_impdef_features);
+}
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
index ef3a69cc398e51..e705c64138ce31 100644
--- a/arch/arm64/kernel/image-vars.h
+++ b/arch/arm64/kernel/image-vars.h
@@ -112,11 +112,6 @@ KVM_NVHE_ALIAS(broken_cntvoff_key);
 KVM_NVHE_ALIAS(__start___kvm_ex_table);
 KVM_NVHE_ALIAS(__stop___kvm_ex_table);
 
-/* PMU available static key */
-#ifdef CONFIG_HW_PERF_EVENTS
-KVM_NVHE_ALIAS(kvm_arm_pmu_available);
-#endif
-
 /* Position-independent library routines */
 KVM_NVHE_ALIAS_HYP(clear_page, __pi_clear_page);
 KVM_NVHE_ALIAS_HYP(copy_page, __pi_copy_page);
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 42faebb7b71232..5a65f6b068ba3d 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -41,8 +41,10 @@
 #include <linux/thread_info.h>
 #include <linux/prctl.h>
 #include <linux/stacktrace.h>
+#include <linux/memory_ordering_model.h>
 
 #include <asm/alternative.h>
+#include <asm/apple_cpufeature.h>
 #include <asm/arch_timer.h>
 #include <asm/compat.h>
 #include <asm/cpufeature.h>
@@ -433,6 +435,11 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
 		if (system_supports_poe())
 			p->thread.por_el0 = read_sysreg_s(SYS_POR_EL0);
 
+#ifdef CONFIG_ARM64_ACTLR_STATE
+		if (system_has_actlr_state())
+			p->thread.actlr = read_sysreg(actlr_el1);
+#endif
+
 		if (stack_start) {
 			if (is_compat_thread(task_thread_info(p)))
 				childregs->compat_sp = stack_start;
@@ -659,6 +666,65 @@ void update_sctlr_el1(u64 sctlr)
 	isb();
 }
 
+#ifdef CONFIG_ARM64_MEMORY_MODEL_CONTROL
+int arch_prctl_mem_model_get(struct task_struct *t)
+{
+	if (alternative_has_cap_unlikely(ARM64_HAS_TSO_APPLE) &&
+		t->thread.actlr & ACTLR_APPLE_TSO)
+		return PR_SET_MEM_MODEL_TSO;
+
+	return PR_SET_MEM_MODEL_DEFAULT;
+}
+
+int arch_prctl_mem_model_set(struct task_struct *t, unsigned long val)
+{
+	if (alternative_has_cap_unlikely(ARM64_HAS_TSO_FIXED) &&
+	    val == PR_SET_MEM_MODEL_TSO)
+		return 0;
+
+	if (alternative_has_cap_unlikely(ARM64_HAS_TSO_APPLE)) {
+		WARN_ON(!system_has_actlr_state());
+
+		switch (val) {
+		case PR_SET_MEM_MODEL_TSO:
+			t->thread.actlr |= ACTLR_APPLE_TSO;
+			break;
+		case PR_SET_MEM_MODEL_DEFAULT:
+			t->thread.actlr &= ~ACTLR_APPLE_TSO;
+			break;
+		default:
+			return -EINVAL;
+		}
+		write_sysreg(t->thread.actlr, actlr_el1);
+		return 0;
+	}
+
+	if (val == PR_SET_MEM_MODEL_DEFAULT)
+		return 0;
+
+	return -EINVAL;
+}
+#endif
+
+#ifdef CONFIG_ARM64_ACTLR_STATE
+/*
+ * IMPDEF control register ACTLR_EL1 handling. Some CPUs use this to
+ * expose features that can be controlled by userspace.
+ */
+static void actlr_thread_switch(struct task_struct *next)
+{
+	if (!system_has_actlr_state())
+		return;
+
+	current->thread.actlr = read_sysreg(actlr_el1);
+	write_sysreg(next->thread.actlr, actlr_el1);
+}
+#else
+static inline void actlr_thread_switch(struct task_struct *next)
+{
+}
+#endif
+
 /*
  * Thread switching.
  */
@@ -678,6 +744,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
 	ptrauth_thread_switch_user(next);
 	permission_overlay_switch(next);
 	gcs_thread_switch(next);
+	actlr_thread_switch(next);
 
 	/*
 	 * Complete any pending TLB or cache maintenance on this CPU in case
@@ -799,6 +866,10 @@ void arch_setup_new_exec(void)
 		arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS,
 					 PR_SPEC_ENABLE);
 	}
+
+#ifdef CONFIG_ARM64_MEMORY_MODEL_CONTROL
+	arch_prctl_mem_model_set(current, PR_SET_MEM_MODEL_DEFAULT);
+#endif
 }
 
 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 85104587f849df..3d28d929a9b4ab 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -368,6 +368,14 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
 	 */
 	init_task.thread_info.ttbr0 = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
 #endif
+#ifdef CONFIG_ARM64_ACTLR_STATE
+	/* Store the boot CPU ACTLR_EL1 value as the default. This will only
+	 * be actually restored during context switching iff the platform is
+	 * known to use ACTLR_EL1 for exposable features and its layout is
+	 * known to be the same on all CPUs.
+	 */
+	init_task.thread.actlr = read_sysreg(actlr_el1);
+#endif
 
 	if (boot_args[1] || boot_args[2] || boot_args[3]) {
 		pr_err("WARNING: x1-x3 nonzero in violation of boot protocol:\n"
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 1a479df5d78eed..1f20c4fc3b68ef 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -366,7 +366,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
 		r = get_num_wrps();
 		break;
 	case KVM_CAP_ARM_PMU_V3:
-		r = kvm_arm_support_pmu_v3();
+		r = kvm_supports_guest_pmuv3();
 		break;
 	case KVM_CAP_ARM_INJECT_SERROR_ESR:
 		r = cpus_have_final_cap(ARM64_HAS_RAS_EXTN);
@@ -1390,7 +1390,7 @@ static unsigned long system_supported_vcpu_features(void)
 	if (!cpus_have_final_cap(ARM64_HAS_32BIT_EL1))
 		clear_bit(KVM_ARM_VCPU_EL1_32BIT, &features);
 
-	if (!kvm_arm_support_pmu_v3())
+	if (!kvm_supports_guest_pmuv3())
 		clear_bit(KVM_ARM_VCPU_PMU_V3, &features);
 
 	if (!system_supports_sve())
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index 23bbe28eaaf95d..b741ea6aefa58f 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -244,7 +244,7 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
 	 * counter, which could make a PMXEVCNTR_EL0 access UNDEF at
 	 * EL1 instead of being trapped to EL2.
 	 */
-	if (kvm_arm_support_pmu_v3()) {
+	if (system_supports_pmuv3()) {
 		struct kvm_cpu_context *hctxt;
 
 		write_sysreg(0, pmselr_el0);
@@ -281,7 +281,7 @@ static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
 	write_sysreg(*host_data_ptr(host_debug_state.mdcr_el2), mdcr_el2);
 
 	write_sysreg(0, hstr_el2);
-	if (kvm_arm_support_pmu_v3()) {
+	if (system_supports_pmuv3()) {
 		struct kvm_cpu_context *hctxt;
 
 		hctxt = host_data_ptr(host_ctxt);
diff --git a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
index 76ff095c6b6ebf..95698a2ccd8711 100644
--- a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
+++ b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
@@ -16,6 +16,9 @@
 #include <asm/kvm_hyp.h>
 #include <asm/kvm_mmu.h>
 
+#define SYS_IMP_APL_ACTLR_EL12		sys_reg(3, 6, 15, 14, 6)
+#define SYS_ACTLR_EL12			sys_reg(3, 5, 1, 0, 1)
+
 static inline bool ctxt_has_s1poe(struct kvm_cpu_context *ctxt);
 
 static inline struct kvm_vcpu *ctxt_to_vcpu(struct kvm_cpu_context *ctxt)
@@ -136,6 +139,12 @@ static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
 	ctxt_sys_reg(ctxt, SP_EL1)	= read_sysreg(sp_el1);
 	ctxt_sys_reg(ctxt, ELR_EL1)	= read_sysreg_el1(SYS_ELR);
 	ctxt_sys_reg(ctxt, SPSR_EL1)	= read_sysreg_el1(SYS_SPSR);
+	if (IS_ENABLED(CONFIG_ARM64_ACTLR_STATE)) {
+		if (alternative_has_cap_unlikely(ARM64_HAS_ACTLR_VIRT))
+			ctxt_sys_reg(ctxt, ACTLR_EL1)	= read_sysreg_s(SYS_ACTLR_EL12);
+		else if (alternative_has_cap_unlikely(ARM64_HAS_ACTLR_VIRT_APPLE))
+			ctxt_sys_reg(ctxt, ACTLR_EL1)	= read_sysreg_s(SYS_IMP_APL_ACTLR_EL12);
+	}
 }
 
 static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt)
@@ -214,6 +223,13 @@ static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt,
 	write_sysreg(ctxt_sys_reg(ctxt, PAR_EL1),	par_el1);
 	write_sysreg(ctxt_sys_reg(ctxt, TPIDR_EL1),	tpidr_el1);
 
+	if (IS_ENABLED(CONFIG_ARM64_ACTLR_STATE)) {
+		if (alternative_has_cap_unlikely(ARM64_HAS_ACTLR_VIRT))
+			write_sysreg_s(ctxt_sys_reg(ctxt, ACTLR_EL1), SYS_ACTLR_EL12);
+		else if (alternative_has_cap_unlikely(ARM64_HAS_ACTLR_VIRT_APPLE))
+			write_sysreg_s(ctxt_sys_reg(ctxt, ACTLR_EL1), SYS_IMP_APL_ACTLR_EL12);
+	}
+
 	if (ctxt_has_mte(ctxt)) {
 		write_sysreg_el1(ctxt_sys_reg(ctxt, TFSR_EL1), SYS_TFSR);
 		write_sysreg_s(ctxt_sys_reg(ctxt, TFSRE0_EL1), SYS_TFSRE0_EL1);
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index 647737d6e8d0b5..731a0378ed1328 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -527,6 +527,25 @@ static bool kvm_hyp_handle_sysreg_vhe(struct kvm_vcpu *vcpu, u64 *exit_code)
 	return kvm_hyp_handle_sysreg(vcpu, exit_code);
 }
 
+static bool kvm_hyp_handle_impdef(struct kvm_vcpu *vcpu, u64 *exit_code)
+{
+	u64 iss;
+
+	if (!cpus_have_final_cap(ARM64_WORKAROUND_PMUV3_IMPDEF_TRAPS))
+		return false;
+
+	/*
+	 * Compute a synthetic ESR for a sysreg trap. Conveniently, AFSR1_EL2
+	 * is populated with a correct ISS for a sysreg trap. These fruity
+	 * parts are 64bit only, so unconditionally set IL.
+	 */
+	iss = ESR_ELx_ISS(read_sysreg_s(SYS_AFSR1_EL2));
+	vcpu->arch.fault.esr_el2 = FIELD_PREP(ESR_ELx_EC_MASK, ESR_ELx_EC_SYS64) |
+				   FIELD_PREP(ESR_ELx_ISS_MASK, iss) |
+				   ESR_ELx_IL;
+	return false;
+}
+
 static const exit_handler_fn hyp_exit_handlers[] = {
 	[0 ... ESR_ELx_EC_MAX]		= NULL,
 	[ESR_ELx_EC_CP15_32]		= kvm_hyp_handle_cp15_32,
@@ -538,6 +557,9 @@ static const exit_handler_fn hyp_exit_handlers[] = {
 	[ESR_ELx_EC_WATCHPT_LOW]	= kvm_hyp_handle_watchpt_low,
 	[ESR_ELx_EC_ERET]		= kvm_hyp_handle_eret,
 	[ESR_ELx_EC_MOPS]		= kvm_hyp_handle_mops,
+
+	/* Apple shenanigans */
+	[0x3F]				= kvm_hyp_handle_impdef,
 };
 
 static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index 6c5950b9ceac88..51fb47e0bf893a 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -17,8 +17,6 @@
 
 #define PERF_ATTR_CFG1_COUNTER_64BIT	BIT(0)
 
-DEFINE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
-
 static LIST_HEAD(arm_pmus);
 static DEFINE_MUTEX(arm_pmus_lock);
 
@@ -26,6 +24,12 @@ static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc);
 static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc);
 static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc);
 
+bool kvm_supports_guest_pmuv3(void)
+{
+	guard(mutex)(&arm_pmus_lock);
+	return !list_empty(&arm_pmus);
+}
+
 static struct kvm_vcpu *kvm_pmc_to_vcpu(const struct kvm_pmc *pmc)
 {
 	return container_of(pmc, struct kvm_vcpu, arch.pmu.pmc[pmc->idx]);
@@ -673,6 +677,20 @@ static bool kvm_pmc_counts_at_el2(struct kvm_pmc *pmc)
 	return kvm_pmc_read_evtreg(pmc) & ARMV8_PMU_INCLUDE_EL2;
 }
 
+static int kvm_map_pmu_event(struct kvm *kvm, unsigned int eventsel)
+{
+	struct arm_pmu *pmu = kvm->arch.arm_pmu;
+
+	/*
+	 * The CPU PMU likely isn't PMUv3; let the driver provide a mapping
+	 * for the guest's PMUv3 event ID.
+	 */
+	if (unlikely(pmu->map_pmuv3_event))
+		return pmu->map_pmuv3_event(eventsel);
+
+	return eventsel;
+}
+
 /**
  * kvm_pmu_create_perf_event - create a perf event for a counter
  * @pmc: Counter context
@@ -683,7 +701,8 @@ static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc)
 	struct arm_pmu *arm_pmu = vcpu->kvm->arch.arm_pmu;
 	struct perf_event *event;
 	struct perf_event_attr attr;
-	u64 eventsel, evtreg;
+	int eventsel;
+	u64 evtreg;
 
 	evtreg = kvm_pmc_read_evtreg(pmc);
 
@@ -709,6 +728,14 @@ static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc)
 	    !test_bit(eventsel, vcpu->kvm->arch.pmu_filter))
 		return;
 
+	/*
+	 * Don't create an event if we're running on hardware that requires
+	 * PMUv3 event translation and we couldn't find a valid mapping.
+	 */
+	eventsel = kvm_map_pmu_event(vcpu->kvm, eventsel);
+	if (eventsel < 0)
+		return;
+
 	memset(&attr, 0, sizeof(struct perf_event_attr));
 	attr.type = arm_pmu->pmu.type;
 	attr.size = sizeof(attr);
@@ -786,29 +813,23 @@ void kvm_host_pmu_init(struct arm_pmu *pmu)
 	if (!pmuv3_implemented(kvm_arm_pmu_get_pmuver_limit()))
 		return;
 
-	mutex_lock(&arm_pmus_lock);
+	guard(mutex)(&arm_pmus_lock);
 
 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
 	if (!entry)
-		goto out_unlock;
+		return;
 
 	entry->arm_pmu = pmu;
 	list_add_tail(&entry->entry, &arm_pmus);
-
-	if (list_is_singular(&arm_pmus))
-		static_branch_enable(&kvm_arm_pmu_available);
-
-out_unlock:
-	mutex_unlock(&arm_pmus_lock);
 }
 
 static struct arm_pmu *kvm_pmu_probe_armpmu(void)
 {
-	struct arm_pmu *tmp, *pmu = NULL;
 	struct arm_pmu_entry *entry;
+	struct arm_pmu *pmu;
 	int cpu;
 
-	mutex_lock(&arm_pmus_lock);
+	guard(mutex)(&arm_pmus_lock);
 
 	/*
 	 * It is safe to use a stale cpu to iterate the list of PMUs so long as
@@ -829,21 +850,53 @@ static struct arm_pmu *kvm_pmu_probe_armpmu(void)
 	 */
 	cpu = raw_smp_processor_id();
 	list_for_each_entry(entry, &arm_pmus, entry) {
-		tmp = entry->arm_pmu;
+		pmu = entry->arm_pmu;
 
-		if (cpumask_test_cpu(cpu, &tmp->supported_cpus)) {
-			pmu = tmp;
-			break;
-		}
+		if (cpumask_test_cpu(cpu, &pmu->supported_cpus))
+			return pmu;
 	}
 
-	mutex_unlock(&arm_pmus_lock);
+	return NULL;
+}
+
+static u64 __compute_pmceid(struct arm_pmu *pmu, bool pmceid1)
+{
+	u32 hi[2], lo[2];
 
-	return pmu;
+	bitmap_to_arr32(lo, pmu->pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
+	bitmap_to_arr32(hi, pmu->pmceid_ext_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
+
+	return ((u64)hi[pmceid1] << 32) | lo[pmceid1];
+}
+
+static u64 compute_pmceid0(struct arm_pmu *pmu)
+{
+	u64 val = __compute_pmceid(pmu, 0);
+
+	/* always support SW_INCR */
+	val |= BIT(ARMV8_PMUV3_PERFCTR_SW_INCR);
+	/* always support CHAIN */
+	val |= BIT(ARMV8_PMUV3_PERFCTR_CHAIN);
+	return val;
+}
+
+static u64 compute_pmceid1(struct arm_pmu *pmu)
+{
+	u64 val = __compute_pmceid(pmu, 1);
+
+	/*
+	 * Don't advertise STALL_SLOT*, as PMMIR_EL0 is handled
+	 * as RAZ
+	 */
+	val &= ~(BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32) |
+		 BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT_FRONTEND - 32) |
+		 BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT_BACKEND - 32));
+	return val;
 }
 
 u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
 {
+	struct arm_pmu *cpu_pmu = vcpu->kvm->arch.arm_pmu;
 	unsigned long *bmap = vcpu->kvm->arch.pmu_filter;
 	u64 val, mask = 0;
 	int base, i, nr_events;
@@ -852,19 +905,10 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
 		return 0;
 
 	if (!pmceid1) {
-		val = read_sysreg(pmceid0_el0);
-		/* always support CHAIN */
-		val |= BIT(ARMV8_PMUV3_PERFCTR_CHAIN);
+		val = compute_pmceid0(cpu_pmu);
 		base = 0;
 	} else {
-		val = read_sysreg(pmceid1_el0);
-		/*
-		 * Don't advertise STALL_SLOT*, as PMMIR_EL0 is handled
-		 * as RAZ
-		 */
-		val &= ~(BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32) |
-			 BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT_FRONTEND - 32) |
-			 BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT_BACKEND - 32));
+		val = compute_pmceid1(cpu_pmu);
 		base = 32;
 	}
 
@@ -994,6 +1038,13 @@ u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm)
 {
 	struct arm_pmu *arm_pmu = kvm->arch.arm_pmu;
 
+	/*
+	 * PMUv3 requires that all event counters are capable of counting any
+	 * event, though the same may not be true of non-PMUv3 hardware.
+	 */
+	if (cpus_have_final_cap(ARM64_WORKAROUND_PMUV3_IMPDEF_TRAPS))
+		return 1;
+
 	/*
 	 * The arm_pmu->cntr_mask considers the fixed counter(s) as well.
 	 * Ignore those and return only the general-purpose counters.
@@ -1205,13 +1256,26 @@ int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
 
 u8 kvm_arm_pmu_get_pmuver_limit(void)
 {
-	u64 tmp;
+	unsigned int pmuver;
+
+	pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer,
+			       read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1));
+
+	/*
+	 * Spoof a barebones PMUv3 implementation if the system supports IMPDEF
+	 * traps of the PMUv3 sysregs
+	 */
+	if (cpus_have_final_cap(ARM64_WORKAROUND_PMUV3_IMPDEF_TRAPS))
+		return ID_AA64DFR0_EL1_PMUVer_IMP;
+
+	/*
+	 * Otherwise, treat IMPLEMENTATION DEFINED functionality as
+	 * unimplemented
+	 */
+	if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
+		return 0;
 
-	tmp = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
-	tmp = cpuid_feature_cap_perfmon_field(tmp,
-					      ID_AA64DFR0_EL1_PMUVer_SHIFT,
-					      ID_AA64DFR0_EL1_PMUVer_V3P5);
-	return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), tmp);
+	return min(pmuver, ID_AA64DFR0_EL1_PMUVer_V3P5);
 }
 
 /**
diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c
index 0b3adf3e17b49e..6b48a3d16d0d5e 100644
--- a/arch/arm64/kvm/pmu.c
+++ b/arch/arm64/kvm/pmu.c
@@ -41,7 +41,7 @@ void kvm_set_pmu_events(u64 set, struct perf_event_attr *attr)
 {
 	struct kvm_pmu_events *pmu = kvm_get_pmu_events();
 
-	if (!kvm_arm_support_pmu_v3() || !kvm_pmu_switch_needed(attr))
+	if (!system_supports_pmuv3() || !kvm_pmu_switch_needed(attr))
 		return;
 
 	if (!attr->exclude_host)
@@ -57,7 +57,7 @@ void kvm_clr_pmu_events(u64 clr)
 {
 	struct kvm_pmu_events *pmu = kvm_get_pmu_events();
 
-	if (!kvm_arm_support_pmu_v3())
+	if (!system_supports_pmuv3())
 		return;
 
 	pmu->events_host &= ~clr;
@@ -133,7 +133,7 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
 	struct kvm_pmu_events *pmu;
 	u64 events_guest, events_host;
 
-	if (!kvm_arm_support_pmu_v3() || !has_vhe())
+	if (!system_supports_pmuv3() || !has_vhe())
 		return;
 
 	preempt_disable();
@@ -154,7 +154,7 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
 	struct kvm_pmu_events *pmu;
 	u64 events_guest, events_host;
 
-	if (!kvm_arm_support_pmu_v3() || !has_vhe())
+	if (!system_supports_pmuv3() || !has_vhe())
 		return;
 
 	pmu = kvm_get_pmu_events();
@@ -180,7 +180,7 @@ bool kvm_set_pmuserenr(u64 val)
 	struct kvm_cpu_context *hctxt;
 	struct kvm_vcpu *vcpu;
 
-	if (!kvm_arm_support_pmu_v3() || !has_vhe())
+	if (!system_supports_pmuv3() || !has_vhe())
 		return false;
 
 	vcpu = kvm_get_running_vcpu();
diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps
index 1e65f2fb45bd17..0f7a3e594a4777 100644
--- a/arch/arm64/tools/cpucaps
+++ b/arch/arm64/tools/cpucaps
@@ -8,6 +8,8 @@ BTI
 # Unreliable: use system_supports_32bit_el0() instead.
 HAS_32BIT_EL0_DO_NOT_USE
 HAS_32BIT_EL1
+HAS_ACTLR_VIRT
+HAS_ACTLR_VIRT_APPLE
 HAS_ADDRESS_AUTH
 HAS_ADDRESS_AUTH_ARCH_QARMA3
 HAS_ADDRESS_AUTH_ARCH_QARMA5
@@ -45,6 +47,7 @@ HAS_LSE_ATOMICS
 HAS_MOPS
 HAS_NESTED_VIRT
 HAS_PAN
+HAS_PMUV3
 HAS_S1PIE
 HAS_S1POE
 HAS_RAS_EXTN
@@ -54,6 +57,8 @@ HAS_STAGE2_FWB
 HAS_TCR2
 HAS_TIDCP1
 HAS_TLB_RANGE
+HAS_TSO_APPLE
+HAS_TSO_FIXED
 HAS_VA52
 HAS_VIRT_HOST_EXTN
 HAS_WFXT
@@ -104,6 +109,7 @@ WORKAROUND_CAVIUM_TX2_219_TVM
 WORKAROUND_CLEAN_CACHE
 WORKAROUND_DEVICE_LOAD_ACQUIRE
 WORKAROUND_NVIDIA_CARMEL_CNP
+WORKAROUND_PMUV3_IMPDEF_TRAPS
 WORKAROUND_QCOM_FALKOR_E1003
 WORKAROUND_QCOM_ORYON_CNTVOFF
 WORKAROUND_REPEAT_TLBI
diff --git a/arch/x86/tools/insn_decoder_test.c b/arch/x86/tools/insn_decoder_test.c
index 472540aeabc235..366e07546344b1 100644
--- a/arch/x86/tools/insn_decoder_test.c
+++ b/arch/x86/tools/insn_decoder_test.c
@@ -106,7 +106,7 @@ static void parse_args(int argc, char **argv)
 	}
 }
 
-#define BUFSIZE 256
+#define BUFSIZE 4096
 
 int main(int argc, char **argv)
 {
diff --git a/drivers/acpi/platform_profile.c b/drivers/acpi/platform_profile.c
index 174a6439a412b9..a40e8972bd64a4 100644
--- a/drivers/acpi/platform_profile.c
+++ b/drivers/acpi/platform_profile.c
@@ -190,6 +190,20 @@ static ssize_t profile_show(struct device *dev,
 	return sysfs_emit(buf, "%s\n", profile_names[profile]);
 }
 
+/**
+ * profile_notify_legacy - Notify the legacy sysfs interface
+ *
+ * This wrapper takes care of only notifying the legacy sysfs interface
+ * if it was registered during module initialization.
+ */
+static void profile_notify_legacy(void)
+{
+	if (!acpi_kobj)
+		return;
+
+	sysfs_notify(acpi_kobj, NULL, "platform_profile");
+}
+
 /**
  * profile_store - Set the profile for a class device
  * @dev: The device
@@ -215,7 +229,7 @@ static ssize_t profile_store(struct device *dev,
 			return ret;
 	}
 
-	sysfs_notify(acpi_kobj, NULL, "platform_profile");
+	profile_notify_legacy();
 
 	return count;
 }
@@ -437,7 +451,7 @@ static ssize_t platform_profile_store(struct kobject *kobj,
 			return ret;
 	}
 
-	sysfs_notify(acpi_kobj, NULL, "platform_profile");
+	profile_notify_legacy();
 
 	return count;
 }
@@ -474,6 +488,22 @@ static const struct attribute_group platform_profile_group = {
 	.is_visible = profile_class_is_visible,
 };
 
+/**
+ * profile_update_legacy - Update the legacy sysfs interface
+ *
+ * This wrapper takes care of only updating the legacy sysfs interface
+ * if it was registered during module initialization.
+ *
+ * Return: 0 on success or error code on failure.
+ */
+static int profile_update_legacy(void)
+{
+	if (!acpi_kobj)
+		return 0;
+
+	return sysfs_update_group(acpi_kobj, &platform_profile_group);
+}
+
 /**
  * platform_profile_notify - Notify class device and legacy sysfs interface
  * @dev: The class device
@@ -483,7 +513,7 @@ void platform_profile_notify(struct device *dev)
 	scoped_cond_guard(mutex_intr, return, &profile_lock) {
 		_notify_class_profile(dev, NULL);
 	}
-	sysfs_notify(acpi_kobj, NULL, "platform_profile");
+	profile_notify_legacy();
 }
 EXPORT_SYMBOL_GPL(platform_profile_notify);
 
@@ -532,7 +562,7 @@ int platform_profile_cycle(void)
 			return err;
 	}
 
-	sysfs_notify(acpi_kobj, NULL, "platform_profile");
+	profile_notify_legacy();
 
 	return 0;
 }
@@ -606,9 +636,9 @@ struct device *platform_profile_register(struct device *dev, const char *name,
 		goto cleanup_ida;
 	}
 
-	sysfs_notify(acpi_kobj, NULL, "platform_profile");
+	profile_notify_legacy();
 
-	err = sysfs_update_group(acpi_kobj, &platform_profile_group);
+	err = profile_update_legacy();
 	if (err)
 		goto cleanup_cur;
 
@@ -640,9 +670,8 @@ int platform_profile_remove(struct device *dev)
 	device_unregister(&pprof->dev);
 	ida_free(&platform_profile_ida, id);
 
-	sysfs_notify(acpi_kobj, NULL, "platform_profile");
-
-	sysfs_update_group(acpi_kobj, &platform_profile_group);
+	profile_notify_legacy();
+	profile_update_legacy();
 
 	return 0;
 }
@@ -696,16 +725,28 @@ static int __init platform_profile_init(void)
 	if (err)
 		return err;
 
-	err = sysfs_create_group(acpi_kobj, &platform_profile_group);
-	if (err)
-		class_unregister(&platform_profile_class);
+	/*
+	 * The ACPI kobject can be missing if ACPI was disabled during booting.
+	 * We thus skip the initialization of the legacy sysfs interface in such
+	 * cases to allow the platform profile class to work on ARM64 notebooks
+	 * without ACPI support.
+	 */
+	if (acpi_kobj) {
+		err = sysfs_create_group(acpi_kobj, &platform_profile_group);
+		if (err < 0) {
+			class_unregister(&platform_profile_class);
+			return err;
+		}
+	}
 
-	return err;
+	return 0;
 }
 
 static void __exit platform_profile_exit(void)
 {
-	sysfs_remove_group(acpi_kobj, &platform_profile_group);
+	if (acpi_kobj)
+		sysfs_remove_group(acpi_kobj, &platform_profile_group);
+
 	class_unregister(&platform_profile_class);
 }
 module_init(platform_profile_init);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 93019bb6998ebf..97841d03f778ad 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -2331,6 +2331,32 @@ static void fw_devlink_link_device(struct device *dev)
 	__fw_devlink_link_to_suppliers(dev, fwnode);
 }
 
+/**
+ * fw_devlink_count_absent_consumers - Return how many consumers have
+ * either not been created yet, or do not yet have a driver attached.
+ * @fwnode: fwnode of the supplier
+ */
+int fw_devlink_count_absent_consumers(struct fwnode_handle *fwnode)
+{
+	struct fwnode_link *link, *tmp;
+	struct device_link *dlink, *dtmp;
+	struct device *sup_dev = get_dev_from_fwnode(fwnode);
+	int count = 0;
+
+	list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook)
+		count++;
+
+	if (!sup_dev)
+		return count;
+
+	list_for_each_entry_safe(dlink, dtmp, &sup_dev->links.consumers, s_node)
+		if (dlink->consumer->links.status != DL_DEV_DRIVER_BOUND)
+			count++;
+
+	return count;
+}
+EXPORT_SYMBOL_GPL(fw_devlink_count_absent_consumers);
+
 /* Device links support end. */
 
 static struct kobject *dev_kobj;
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index cb0912ea3e627e..093e1f70760fa3 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -471,6 +471,8 @@ static int fw_decompress_xz(struct device *dev, struct fw_priv *fw_priv,
 static char fw_path_para[256];
 static const char * const fw_path[] = {
 	fw_path_para,
+	"/lib/firmware/vendor/" UTS_RELEASE,
+	"/lib/firmware/vendor",
 	"/lib/firmware/updates/" UTS_RELEASE,
 	"/lib/firmware/updates",
 	"/lib/firmware/" UTS_RELEASE,
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index b378d2aa49f069..34bc46d1f3f353 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -972,9 +972,6 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
 	if (!file)
 		return -EBADF;
 
-	if ((mode & BLK_OPEN_WRITE) && !file->f_op->write_iter)
-		return -EINVAL;
-
 	error = loop_check_backing_file(file);
 	if (error)
 		return error;
diff --git a/drivers/block/rnull.rs b/drivers/block/rnull.rs
index ddf3629d88940f..d07e76ae2c13f4 100644
--- a/drivers/block/rnull.rs
+++ b/drivers/block/rnull.rs
@@ -27,7 +27,7 @@ use kernel::{
 module! {
     type: NullBlkModule,
     name: "rnull_mod",
-    author: "Andreas Hindborg",
+    authors: ["Andreas Hindborg"],
     description: "Rust implementation of the C null block driver",
     license: "GPL v2",
 }
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
index a1ee475d180dac..c6870f08457632 100644
--- a/drivers/cpuidle/Kconfig.arm
+++ b/drivers/cpuidle/Kconfig.arm
@@ -130,3 +130,11 @@ config ARM_QCOM_SPM_CPUIDLE
 	  The Subsystem Power Manager (SPM) controls low power modes for the
 	  CPU and L2 cores. It interface with various system drivers to put
 	  the cores in low power modes.
+
+config ARM_APPLE_CPUIDLE
+	bool "Apple SoC CPU idle driver"
+	depends on ARM64
+	default ARCH_APPLE
+	select CPU_IDLE_MULTIPLE_DRIVERS
+	help
+	  Select this to enable cpuidle on Apple SoCs.
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index 1de9e92c5b0fc9..f9e7a71d52c13f 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_ARM_PSCI_CPUIDLE)		+= cpuidle-psci.o
 obj-$(CONFIG_ARM_PSCI_CPUIDLE_DOMAIN)	+= cpuidle-psci-domain.o
 obj-$(CONFIG_ARM_TEGRA_CPUIDLE)		+= cpuidle-tegra.o
 obj-$(CONFIG_ARM_QCOM_SPM_CPUIDLE)	+= cpuidle-qcom-spm.o
+obj-$(CONFIG_ARM_APPLE_CPUIDLE)		+= cpuidle-apple.o
 
 ###############################################################################
 # MIPS drivers
diff --git a/drivers/cpuidle/cpuidle-apple.c b/drivers/cpuidle/cpuidle-apple.c
new file mode 100644
index 00000000000000..27b9144b979d3a
--- /dev/null
+++ b/drivers/cpuidle/cpuidle-apple.c
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Copyright The Asahi Linux Contributors
+ *
+ * CPU idle support for Apple SoCs
+ */
+
+#include <linux/init.h>
+#include <linux/bitfield.h>
+#include <linux/cpuidle.h>
+#include <linux/cpu_pm.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <asm/cpuidle.h>
+
+#define DEEP_WFI_STATE_RETENTION BIT(2) // retains base CPU registers in deep WFI
+
+enum idle_state {
+	STATE_WFI,
+	STATE_PWRDOWN,
+	STATE_COUNT
+};
+
+asm(
+	".pushsection .cpuidle.text, \"ax\"\n"
+	".type apple_cpu_deep_wfi, @function\n"
+	"apple_cpu_deep_wfi:\n"
+		"str x30, [sp, #-16]!\n"
+		"stp x28, x29, [sp, #-16]!\n"
+		"stp x26, x27, [sp, #-16]!\n"
+		"stp x24, x25, [sp, #-16]!\n"
+		"stp x22, x23, [sp, #-16]!\n"
+		"stp x20, x21, [sp, #-16]!\n"
+		"stp x18, x19, [sp, #-16]!\n"
+
+		"mrs x0, s3_5_c15_c5_0\n"
+		"orr x0, x0, #(3L << 24)\n"
+		"msr s3_5_c15_c5_0, x0\n"
+
+	"1:\n"
+		"dsb sy\n"
+		"wfi\n"
+
+		"mrs x0, ISR_EL1\n"
+		"cbz x0, 1b\n"
+
+		"mrs x0, s3_5_c15_c5_0\n"
+		"bic x0, x0, #(1L << 24)\n"
+		"msr s3_5_c15_c5_0, x0\n"
+
+		"ldp x18, x19, [sp], #16\n"
+		"ldp x20, x21, [sp], #16\n"
+		"ldp x22, x23, [sp], #16\n"
+		"ldp x24, x25, [sp], #16\n"
+		"ldp x26, x27, [sp], #16\n"
+		"ldp x28, x29, [sp], #16\n"
+		"ldr x30, [sp], #16\n"
+
+		"ret\n"
+	".popsection\n"
+);
+
+void apple_cpu_deep_wfi(void);
+
+static __cpuidle int apple_enter_wfi(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index)
+{
+	cpu_do_idle();
+	return index;
+}
+
+static __cpuidle int apple_enter_idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index)
+{
+	/*
+	 * Deep WFI will clobber FP state, among other things.
+	 * The CPU PM notifier will take care of saving that and anything else
+	 * that needs to be notified of the CPU powering down.
+	 */
+	if (cpu_pm_enter())
+		return -1;
+
+	ct_cpuidle_enter();
+
+	switch(index) {
+	case STATE_PWRDOWN:
+		apple_cpu_deep_wfi();
+		break;
+	default:
+		WARN_ON(1);
+		break;
+	}
+
+	ct_cpuidle_exit();
+
+	cpu_pm_exit();
+
+	return index;
+}
+
+static struct cpuidle_driver apple_idle_driver = {
+	.name = "apple_idle",
+	.owner = THIS_MODULE,
+	.states = {
+		[STATE_WFI] = {
+			.enter			= apple_enter_wfi,
+			.enter_s2idle		= apple_enter_wfi,
+			.exit_latency		= 1,
+			.target_residency	= 1,
+			.power_usage            = UINT_MAX,
+			.name			= "WFI",
+			.desc			= "CPU clock-gated",
+			.flags			= 0,
+		},
+		[STATE_PWRDOWN] = {
+			.enter			= apple_enter_idle,
+			.enter_s2idle		= apple_enter_idle,
+			.exit_latency		= 10,
+			.target_residency	= 10000,
+			.power_usage            = 0,
+			.name			= "CPU PD",
+			.desc			= "CPU/cluster powered down",
+			.flags			= CPUIDLE_FLAG_RCU_IDLE,
+		},
+	},
+	.safe_state_index = STATE_WFI,
+	.state_count = STATE_COUNT,
+};
+
+static int apple_cpuidle_probe(struct platform_device *pdev)
+{
+	return cpuidle_register(&apple_idle_driver, NULL);
+}
+
+static struct platform_driver apple_cpuidle_driver = {
+	.driver = {
+		.name = "cpuidle-apple",
+	},
+	.probe = apple_cpuidle_probe,
+};
+
+static int __init apple_cpuidle_init(void)
+{
+	struct platform_device *pdev;
+	int ret;
+
+	ret = platform_driver_register(&apple_cpuidle_driver);
+	if (ret)
+		return ret;
+
+	if (!of_machine_is_compatible("apple,arm-platform"))
+		return 0;
+
+	if (!FIELD_GET(DEEP_WFI_STATE_RETENTION, read_sysreg(aidr_el1))) {
+		pr_info("cpuidle-apple: CPU does not retain state in deep WFI\n");
+		return 0;
+	}
+
+	pdev = platform_device_register_simple("cpuidle-apple", -1, NULL, 0);
+	if (IS_ERR(pdev)) {
+		platform_driver_unregister(&apple_cpuidle_driver);
+		return PTR_ERR(pdev);
+	}
+
+	return 0;
+}
+device_initcall(apple_cpuidle_init);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 8afea2e2336027..a63cd8cf038b37 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -89,10 +89,22 @@ config APPLE_ADMAC
 	tristate "Apple ADMAC support"
 	depends on ARCH_APPLE || COMPILE_TEST
 	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
 	default ARCH_APPLE
 	help
 	  Enable support for Audio DMA Controller found on Apple Silicon SoCs.
 
+config APPLE_SIO
+	tristate "Apple SIO support"
+	depends on ARCH_APPLE || COMPILE_TEST
+	depends on APPLE_RTKIT
+	depends on OF_ADDRESS
+	select DMA_ENGINE
+	default m if ARCH_APPLE
+	help
+	  Enable support for the SIO coprocessor found on Apple Silicon SoCs
+	  where it provides DMA services.
+
 config AT_HDMAC
 	tristate "Atmel AHB DMA support"
 	depends on ARCH_AT91
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 19ba465011a6d5..734c1aa75f3d11 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_ALTERA_MSGDMA) += altera-msgdma.o
 obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
 obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
 obj-$(CONFIG_APPLE_ADMAC) += apple-admac.o
+obj-$(CONFIG_APPLE_SIO) += apple-sio.o
 obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
 obj-$(CONFIG_AT_XDMAC) += at_xdmac.o
 obj-$(CONFIG_AXI_DMAC) += dma-axi-dmac.o
diff --git a/drivers/dma/apple-admac.c b/drivers/dma/apple-admac.c
index bd49f037429121..21c194e2581a44 100644
--- a/drivers/dma/apple-admac.c
+++ b/drivers/dma/apple-admac.c
@@ -254,7 +254,7 @@ static struct dma_async_tx_descriptor *admac_prep_dma_cyclic(
 		size_t period_len, enum dma_transfer_direction direction,
 		unsigned long flags)
 {
-	struct admac_chan *adchan = container_of(chan, struct admac_chan, chan);
+	struct admac_chan *adchan = to_admac_chan(chan);
 	struct admac_tx *adtx;
 
 	if (direction != admac_chan_direction(adchan->no))
@@ -936,6 +936,7 @@ static void admac_remove(struct platform_device *pdev)
 }
 
 static const struct of_device_id admac_of_match[] = {
+	{ .compatible = "apple,admac2", },
 	{ .compatible = "apple,admac", },
 	{ }
 };
diff --git a/drivers/dma/apple-sio.c b/drivers/dma/apple-sio.c
new file mode 100644
index 00000000000000..511f91999ed3de
--- /dev/null
+++ b/drivers/dma/apple-sio.c
@@ -0,0 +1,942 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Driver for SIO coprocessor on t8103 (M1) and other Apple SoCs
+ *
+ * Copyright (C) The Asahi Linux Contributors
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/soc/apple/rtkit.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+#define NCHANNELS_MAX	0x80
+
+#define REG_CPU_CONTROL	0x44
+#define CPU_CONTROL_RUN BIT(4)
+
+#define SIOMSG_DATA	GENMASK(63, 32)
+#define SIOMSG_TYPE	GENMASK(23, 16)
+#define SIOMSG_PARAM	GENMASK(31, 24)
+#define SIOMSG_TAG	GENMASK(13, 8)
+#define SIOMSG_EP	GENMASK(7, 0)
+
+#define EP_SIO		0x20
+
+#define MSG_START	0x2
+#define MSG_SETUP	0x3
+#define MSG_CONFIGURE	0x5
+#define MSG_ISSUE	0x6
+#define MSG_TERMINATE	0x8
+#define MSG_ACK		0x65
+#define MSG_NACK	0x66
+#define MSG_STARTED	0x67
+#define MSG_REPORT	0x68
+
+#define SIO_CALL_TIMEOUT_MS	100
+#define SIO_SHMEM_SIZE		0x1000
+#define SIO_NO_DESC_SLOTS	64
+
+/*
+ * There are two kinds of 'transaction descriptors' in play here.
+ *
+ * There's the struct sio_tx, and the struct dma_async_tx_descriptor embedded
+ * inside, which jointly represent a transaction to the dmaengine subsystem.
+ * At this time we only support those transactions to be cyclic.
+ *
+ * Then there are the coprocessor descriptors, which is what the coprocessor
+ * knows and understands. These don't seem to have a cyclic regime, so we can't
+ * map the dmaengine transaction on an exact coprocessor counterpart. Instead
+ * we continually queue up many coprocessor descriptors to implement a cyclic
+ * transaction.
+ *
+ * The number below is the maximum of how far ahead (how many) coprocessor
+ * descriptors we should be queuing up, per channel, for a cyclic transaction.
+ * Basically it's a made-up number.
+ */
+#define SIO_MAX_NINFLIGHT	4
+
+struct sio_coproc_desc {
+	u32 pad1;
+	u32 flag;
+	u64 unk;
+	u64 iova;
+	u64 size;
+	u64 pad2;
+	u64 pad3;
+} __packed;
+static_assert(sizeof(struct sio_coproc_desc) == 48);
+
+struct sio_shmem_chan_config {
+	u32 datashape;
+	u32 timeout;
+	u32 fifo;
+	u32 threshold;
+	u32 limit;
+} __packed;
+
+struct sio_data;
+struct sio_tx;
+
+struct sio_chan {
+	unsigned int no;
+	struct sio_data *host;
+	struct virt_dma_chan vc;
+	struct work_struct terminate_wq;
+
+	bool configured;
+	struct sio_shmem_chan_config cfg;
+
+	struct sio_tx *current_tx;
+};
+
+#define SIO_NTAGS		16
+
+typedef void (*sio_ack_callback)(struct sio_chan *, void *, bool);
+
+struct sio_data {
+	void __iomem *base;
+	struct dma_device dma;
+	struct device *dev;
+	struct apple_rtkit *rtk;
+	void *shmem;
+	struct sio_coproc_desc *shmem_desc_base;
+	unsigned long *desc_allocated;
+
+	struct sio_tagdata {
+		DECLARE_BITMAP(allocated, SIO_NTAGS);
+		int last_tag;
+
+		struct completion completions[SIO_NTAGS];
+		bool atomic[SIO_NTAGS];
+		bool acked[SIO_NTAGS];
+
+		sio_ack_callback ack_callback[SIO_NTAGS];
+		void *cookie[SIO_NTAGS];
+	} tags;
+
+	int nchannels;
+	struct sio_chan channels[];
+};
+
+struct sio_tx {
+	struct virt_dma_desc vd;
+	struct completion done;
+
+	bool terminated;
+	size_t period_len;
+	int nperiods;
+	int ninflight;
+	int next;
+
+	struct sio_coproc_desc *siodesc[];
+};
+
+static int sio_send_siomsg(struct sio_data *sio, u64 msg);
+static int sio_send_siomsg_atomic(struct sio_data *sio, u64 msg,
+				  sio_ack_callback ack_callback,
+				  void *cookie);
+static int sio_call(struct sio_data *sio, u64 msg);
+
+static struct sio_chan *to_sio_chan(struct dma_chan *chan)
+{
+	return container_of(chan, struct sio_chan, vc.chan);
+}
+
+static struct sio_tx *to_sio_tx(struct dma_async_tx_descriptor *tx)
+{
+	return container_of(tx, struct sio_tx, vd.tx);
+}
+
+static int sio_alloc_tag(struct sio_data *sio)
+{
+	struct sio_tagdata *tags = &sio->tags;
+	int tag, i;
+
+	/*
+	 * Because tag number 0 is special, the usable tag range
+	 * is 1...(SIO_NTAGS - 1). So, to pick the next usable tag,
+	 * we do modulo (SIO_NTAGS - 1) *then* plus one.
+	 */
+
+#define SIO_USABLE_TAGS (SIO_NTAGS - 1)
+	tag = (READ_ONCE(tags->last_tag) % SIO_USABLE_TAGS) + 1;
+
+	for (i = 0; i < SIO_USABLE_TAGS; i++) {
+		if (!test_and_set_bit(tag, tags->allocated))
+			break;
+
+		tag = (tag % SIO_USABLE_TAGS) + 1;
+	}
+
+	WRITE_ONCE(tags->last_tag, tag);
+
+	if (i < SIO_USABLE_TAGS)
+		return tag;
+	else
+		return -EBUSY;
+#undef SIO_USABLE_TAGS
+}
+
+static void sio_free_tag(struct sio_data *sio, int tag)
+{
+	struct sio_tagdata *tags = &sio->tags;
+
+	if (WARN_ON(tag >= SIO_NTAGS))
+		return;
+
+	tags->atomic[tag] = false;
+	tags->ack_callback[tag] = NULL;
+
+	WARN_ON(!test_and_clear_bit(tag, tags->allocated));
+}
+
+static void sio_set_tag_atomic(struct sio_data *sio, int tag,
+			       sio_ack_callback ack_callback,
+			       void *cookie)
+{
+	struct sio_tagdata *tags = &sio->tags;
+
+	tags->atomic[tag] = true;
+	tags->ack_callback[tag] = ack_callback;
+	tags->cookie[tag] = cookie;
+}
+
+static struct sio_coproc_desc *sio_alloc_desc(struct sio_data *sio)
+{
+	int i;
+
+	for (i = 0; i < SIO_NO_DESC_SLOTS; i++)
+		if (!test_and_set_bit(i, sio->desc_allocated))
+			return sio->shmem_desc_base + i;
+
+	return NULL;
+}
+
+static void sio_free_desc(struct sio_data *sio, struct sio_coproc_desc *desc)
+{
+	clear_bit(desc - sio->shmem_desc_base, sio->desc_allocated);
+}
+
+static int sio_coproc_desc_slot(struct sio_data *sio, struct sio_coproc_desc *desc)
+{
+	return (desc - sio->shmem_desc_base) * 4;
+}
+
+static enum dma_transfer_direction sio_chan_direction(int channo)
+{
+	/* Channel directions are fixed based on channel number */
+	return (channo & 1) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
+}
+
+static void sio_tx_free(struct virt_dma_desc *vd)
+{
+	struct sio_data *sio = to_sio_chan(vd->tx.chan)->host;
+	struct sio_tx *siotx = to_sio_tx(&vd->tx);
+	int i;
+
+	for (i = 0; i < siotx->nperiods; i++)
+		if (siotx->siodesc[i])
+			sio_free_desc(sio, siotx->siodesc[i]);
+	kfree(siotx);
+}
+
+static struct dma_async_tx_descriptor *sio_prep_dma_cyclic(
+		struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+		size_t period_len, enum dma_transfer_direction direction,
+		unsigned long flags)
+{
+	struct sio_chan *siochan = to_sio_chan(chan);
+	struct sio_tx *siotx = NULL;
+	int i, nperiods = buf_len / period_len;
+
+	if (direction != sio_chan_direction(siochan->no))
+		return NULL;
+
+	siotx = kzalloc(struct_size(siotx, siodesc, nperiods), GFP_NOWAIT);
+	if (!siotx)
+		return NULL;
+
+	init_completion(&siotx->done);
+	siotx->period_len = period_len;
+	siotx->nperiods = nperiods;
+
+	for (i = 0; i < nperiods; i++) {
+		struct sio_coproc_desc *d;
+
+		siotx->siodesc[i] = d = sio_alloc_desc(siochan->host);
+		if (!d) {
+			siotx->vd.tx.chan = &siochan->vc.chan;
+			sio_tx_free(&siotx->vd);
+			return NULL;
+		}
+
+		d->flag = 1; /* not sure what's up with this */
+		d->iova = buf_addr + period_len * i;
+		d->size = period_len;
+	}
+	dma_wmb();
+
+	return vchan_tx_prep(&siochan->vc, &siotx->vd, flags);
+}
+
+static enum dma_status sio_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+				     struct dma_tx_state *txstate)
+{
+	struct sio_chan *siochan = to_sio_chan(chan);
+	struct virt_dma_desc *vd;
+	struct sio_tx *siotx;
+	enum dma_status ret;
+	unsigned long flags;
+	int periods_residue;
+	size_t residue;
+
+	ret = dma_cookie_status(chan, cookie, txstate);
+	if (ret == DMA_COMPLETE || !txstate)
+		return ret;
+
+	spin_lock_irqsave(&siochan->vc.lock, flags);
+	siotx = siochan->current_tx;
+
+	if (siotx && siotx->vd.tx.cookie == cookie) {
+		ret = DMA_IN_PROGRESS;
+		periods_residue = siotx->next - siotx->ninflight;
+		while (periods_residue < 0)
+			periods_residue += siotx->nperiods;
+		residue = (siotx->nperiods - periods_residue) * siotx->period_len;
+	} else {
+		ret = DMA_IN_PROGRESS;
+		residue = 0;
+		vd = vchan_find_desc(&siochan->vc, cookie);
+		if (vd) {
+			siotx = to_sio_tx(&vd->tx);
+			residue = siotx->period_len * siotx->nperiods;
+		}
+	}
+	spin_unlock_irqrestore(&siochan->vc.lock, flags);
+	dma_set_residue(txstate, residue);
+
+	return ret;
+}
+
+static bool sio_fill_in_locked(struct sio_chan *siochan);
+
+static void sio_handle_issue_ack(struct sio_chan *siochan, void *cookie, bool ok)
+{
+	dma_cookie_t tx_cookie = (unsigned long) cookie;
+	unsigned long flags;
+	struct sio_tx *tx;
+
+	if (!ok) {
+		dev_err(siochan->host->dev, "nacked issue on chan %d\n", siochan->no);
+		return;
+	}
+
+	spin_lock_irqsave(&siochan->vc.lock, flags);
+	if (!siochan->current_tx || tx_cookie != siochan->current_tx->vd.tx.cookie ||
+			siochan->current_tx->terminated)
+		goto out;
+
+	tx = siochan->current_tx;
+	tx->next = (tx->next + 1) % tx->nperiods;
+	tx->ninflight++;
+	sio_fill_in_locked(siochan);
+
+out:
+	spin_unlock_irqrestore(&siochan->vc.lock, flags);
+}
+
+static bool sio_fill_in_locked(struct sio_chan *siochan)
+{
+	struct sio_data *sio = siochan->host;
+	struct sio_tx *tx = siochan->current_tx;
+	struct sio_coproc_desc *d = tx->siodesc[tx->next];
+	int ret;
+
+	if (tx->ninflight >= SIO_MAX_NINFLIGHT || tx->terminated)
+		return false;
+
+	static_assert(sizeof(dma_cookie_t) <= sizeof(void *));
+	ret = sio_send_siomsg_atomic(sio, FIELD_PREP(SIOMSG_EP, siochan->no) |
+				     FIELD_PREP(SIOMSG_TYPE, MSG_ISSUE) |
+				     FIELD_PREP(SIOMSG_DATA, sio_coproc_desc_slot(sio, d)),
+				     sio_handle_issue_ack, (void *) (uintptr_t) tx->vd.tx.cookie);
+	if (ret < 0)
+		dev_err_ratelimited(sio->dev, "can't issue on chan %d ninflight %d: %d\n",
+				    siochan->no, tx->ninflight, ret);
+	return true;
+}
+
+static void sio_update_current_tx_locked(struct sio_chan *siochan)
+{
+	struct virt_dma_desc *vd = vchan_next_desc(&siochan->vc);
+
+	if (vd && !siochan->current_tx) {
+		list_del(&vd->node);
+		siochan->current_tx = to_sio_tx(&vd->tx);
+		sio_fill_in_locked(siochan);
+	}
+}
+
+static void sio_issue_pending(struct dma_chan *chan)
+{
+	struct sio_chan *siochan = to_sio_chan(chan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&siochan->vc.lock, flags);
+	vchan_issue_pending(&siochan->vc);
+	sio_update_current_tx_locked(siochan);
+	spin_unlock_irqrestore(&siochan->vc.lock, flags);
+}
+
+static int sio_terminate_all(struct dma_chan *chan)
+{
+	struct sio_chan *siochan = to_sio_chan(chan);
+	unsigned long flags;
+	LIST_HEAD(to_free);
+
+	spin_lock_irqsave(&siochan->vc.lock, flags);
+	if (siochan->current_tx && !siochan->current_tx->terminated) {
+		dma_cookie_complete(&siochan->current_tx->vd.tx);
+		siochan->current_tx->terminated = true;
+		schedule_work(&siochan->terminate_wq);
+	}
+	vchan_get_all_descriptors(&siochan->vc, &to_free);
+	spin_unlock_irqrestore(&siochan->vc.lock, flags);
+
+	vchan_dma_desc_free_list(&siochan->vc, &to_free);
+
+	return 0;
+}
+
+static void sio_terminate_work(struct work_struct *wq)
+{
+	struct sio_chan *siochan = container_of(wq, struct sio_chan, terminate_wq);
+	struct sio_tx *tx;
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&siochan->vc.lock, flags);
+	tx = siochan->current_tx;
+	spin_unlock_irqrestore(&siochan->vc.lock, flags);
+
+	if (WARN_ON(!tx))
+		return;
+
+	ret = sio_call(siochan->host, FIELD_PREP(SIOMSG_EP, siochan->no) |
+				      FIELD_PREP(SIOMSG_TYPE, MSG_TERMINATE));
+	if (ret < 0)
+		dev_err(siochan->host->dev, "terminate call on chan %d failed: %d\n",
+			siochan->no, ret);
+
+	ret = wait_for_completion_timeout(&tx->done, msecs_to_jiffies(500));
+	if (!ret)
+		dev_err(siochan->host->dev, "terminate descriptor wait timed out\n");
+
+	tasklet_kill(&siochan->vc.task);
+
+	spin_lock_irqsave(&siochan->vc.lock, flags);
+	WARN_ON(siochan->current_tx != tx);
+	siochan->current_tx = NULL;
+	sio_update_current_tx_locked(siochan);
+	spin_unlock_irqrestore(&siochan->vc.lock, flags);
+
+	sio_tx_free(&tx->vd);
+}
+
+static void sio_synchronize(struct dma_chan *chan)
+{
+	struct sio_chan *siochan = to_sio_chan(chan);
+
+	flush_work(&siochan->terminate_wq);
+}
+
+static void sio_free_chan_resources(struct dma_chan *chan)
+{
+	sio_terminate_all(chan);
+	sio_synchronize(chan);
+	vchan_free_chan_resources(&to_sio_chan(chan)->vc);
+}
+
+static struct dma_chan *sio_dma_of_xlate(struct of_phandle_args *dma_spec,
+					 struct of_dma *ofdma)
+{
+	struct sio_data *sio = (struct sio_data *) ofdma->of_dma_data;
+	unsigned int index = dma_spec->args[0];
+
+	if (dma_spec->args_count != 1 || index >= sio->nchannels)
+		return ERR_PTR(-EINVAL);
+
+	return dma_get_slave_channel(&sio->channels[index].vc.chan);
+}
+
+static void sio_rtk_crashed(void *cookie, const void *crashlog, size_t crashlog_size)
+{
+	struct sio_data *sio = cookie;
+
+	dev_err(sio->dev, "SIO down (crashed)");
+}
+
+static void sio_process_report(struct sio_chan *siochan)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&siochan->vc.lock, flags);
+	if (siochan->current_tx) {
+		struct sio_tx *tx = siochan->current_tx;
+
+		if (tx->ninflight)
+			tx->ninflight--;
+		vchan_cyclic_callback(&tx->vd);
+		if (!sio_fill_in_locked(siochan) && !tx->ninflight)
+			complete(&tx->done);
+	}
+	spin_unlock_irqrestore(&siochan->vc.lock, flags);
+}
+
+static void sio_recv_msg(void *cookie, u8 ep, u64 msg)
+{
+	struct sio_data *sio = cookie;
+	struct sio_tagdata *tags = &sio->tags;
+	u32 data;
+	u8 type, tag, sioep;
+
+	if (ep != EP_SIO)
+		goto unknown;
+
+	data  = FIELD_GET(SIOMSG_DATA, msg);
+	// param = FIELD_GET(SIOMSG_PARAM, msg);
+	type  = FIELD_GET(SIOMSG_TYPE, msg);
+	tag   = FIELD_GET(SIOMSG_TAG, msg);
+	sioep = FIELD_GET(SIOMSG_EP, msg);
+
+	switch (type) {
+	case MSG_STARTED:
+		dev_info(sio->dev, "SIO protocol v%u\n", data);
+		type = MSG_ACK; /* Pretend this is an ACK */
+		fallthrough;
+	case MSG_ACK:
+	case MSG_NACK:
+		if (WARN_ON(tag >= SIO_NTAGS))
+			break;
+
+		if (tags->atomic[tag]) {
+			sio_ack_callback callback = tags->ack_callback[tag];
+
+			if (callback && !WARN_ON(sioep >= sio->nchannels))
+				callback(&sio->channels[sioep],
+					 tags->cookie[tag], type == MSG_ACK);
+			if (type == MSG_NACK)
+				dev_err(sio->dev, "got a NACK on channel %d\n", sioep);
+			sio_free_tag(sio, tag);
+		} else {
+			tags->acked[tag] = (type == MSG_ACK);
+			complete(&tags->completions[tag]);
+		}
+		break;
+
+	case MSG_REPORT:
+		if (WARN_ON(sioep >= sio->nchannels))
+			break;
+
+		sio_process_report(&sio->channels[sioep]);
+		break;
+
+	default:
+		goto unknown;
+	}
+	return;
+
+unknown:
+	dev_warn(sio->dev, "received unknown message: ep %x data %016llx\n",
+		 ep, msg);
+}
+
+static int _sio_send_siomsg(struct sio_data *sio, u64 msg, bool atomic,
+			    sio_ack_callback ack_callback, void *cookie)
+{
+	int tag, ret;
+
+	tag = sio_alloc_tag(sio);
+	if (tag < 0)
+		return tag;
+
+	if (atomic)
+		sio_set_tag_atomic(sio, tag, ack_callback, cookie);
+	else
+		reinit_completion(&sio->tags.completions[tag]);
+
+	msg &= ~SIOMSG_TAG;
+	msg |= FIELD_PREP(SIOMSG_TAG, tag);
+	ret = apple_rtkit_send_message(sio->rtk, EP_SIO, msg, NULL,
+				       atomic);
+	if (ret < 0) {
+		sio_free_tag(sio, tag);
+		return ret;
+	}
+
+	return tag;
+}
+
+static int sio_send_siomsg(struct sio_data *sio, u64 msg)
+{
+	return _sio_send_siomsg(sio, msg, false, NULL, NULL);
+}
+
+static int sio_send_siomsg_atomic(struct sio_data *sio, u64 msg,
+				  sio_ack_callback ack_callback,
+				  void *cookie)
+{
+	return _sio_send_siomsg(sio, msg, true, ack_callback, cookie);
+}
+
+static int sio_call(struct sio_data *sio, u64 msg)
+{
+	int tag, ret;
+
+	tag = sio_send_siomsg(sio, msg);
+	if (tag < 0)
+		return tag;
+
+	ret = wait_for_completion_timeout(&sio->tags.completions[tag],
+					  msecs_to_jiffies(SIO_CALL_TIMEOUT_MS));
+	if (!ret) {
+		dev_warn(sio->dev, "call %8llx timed out\n", msg);
+		sio_free_tag(sio, tag);
+		return -ETIME;
+	}
+
+	ret = sio->tags.acked[tag];
+	sio_free_tag(sio, tag);
+
+	return ret;
+}
+
+static const struct apple_rtkit_ops sio_rtkit_ops = {
+	.crashed = sio_rtk_crashed,
+	.recv_message = sio_recv_msg,
+};
+
+static int sio_device_config(struct dma_chan *chan,
+			     struct dma_slave_config *config)
+{
+	struct sio_chan *siochan = to_sio_chan(chan);
+	struct sio_data *sio = siochan->host;
+	bool is_tx = sio_chan_direction(siochan->no) == DMA_MEM_TO_DEV;
+	struct sio_shmem_chan_config *cfg_shmem = sio->shmem;
+	struct sio_shmem_chan_config cfg;
+	int ret;
+
+	switch (is_tx ? config->dst_addr_width : config->src_addr_width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		cfg.datashape = 0;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		cfg.datashape = 1;
+		break;
+	case DMA_SLAVE_BUSWIDTH_4_BYTES:
+		cfg.datashape = 2;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	cfg.timeout = 0;
+	cfg.fifo = 0x800;
+	cfg.limit = 0x800;
+	cfg.threshold = 0x800;
+
+	/*
+	 * Dmaengine prescribes we ought to apply the new configuration only
+	 * to newly-queued descriptors.
+	 *
+	 * To comply with dmaengine's interface we take the lazy path here:
+	 * we apply the configuration right away, we only allow the channel
+	 * to be configured once, which means subsequent calls to `device_config`
+	 * either return -EBUSY if the configuration differs, or they are
+	 * a no-op if the configuration is the same as the starting one.
+	 *
+	 * This is the reasonable thing to do given that these sio channels
+	 * are tied to fixed peripherals, and what's more given that the
+	 * only planned consumer of this dmaengine driver in the kernel is
+	 * diplayport audio support, where the DMA configuration is fixed,
+	 * and no more than a single descriptor (a cyclic one) gets ever issued
+	 * at the same time.
+	 *
+	 * The code complexity cost of tracking to which descriptor
+	 * the configuration relates would be significant here, especially
+	 * since we need to do a non-atomic operation to apply it (a call to
+	 * the coprocessor) and dmaengine has its bunch of atomicity
+	 * restrictions. And this complexity would be for naught since it
+	 * doesn't even get exercised by the only planned consumer.
+	 */
+	if (siochan->configured && memcmp(&siochan->cfg, &cfg, sizeof(cfg)))
+		return -EBUSY;
+
+	*cfg_shmem = cfg;
+	dma_wmb();
+
+	ret = sio_call(sio, FIELD_PREP(SIOMSG_TYPE, MSG_CONFIGURE) |
+			    FIELD_PREP(SIOMSG_EP, siochan->no));
+
+	if (ret == 1)
+		ret = 0;
+	else if (ret == 0)
+		ret = -EINVAL;
+
+	if (ret == 0) {
+		siochan->configured = true;
+		siochan->cfg = cfg;
+	}
+
+	return ret;
+}
+
+static int sio_alloc_shmem(struct sio_data *sio)
+{
+	dma_addr_t iova;
+	int err;
+
+	sio->shmem = dma_alloc_coherent(sio->dev, SIO_SHMEM_SIZE,
+					&iova, GFP_KERNEL | __GFP_ZERO);
+	if (!sio->shmem)
+		return -ENOMEM;
+
+	sio->shmem_desc_base = (struct sio_coproc_desc *) (sio->shmem + 56);
+	sio->desc_allocated = devm_kzalloc(sio->dev, SIO_NO_DESC_SLOTS / 32,
+					   GFP_KERNEL);
+	if (!sio->desc_allocated)
+		return -ENOMEM;
+
+	err = sio_call(sio, FIELD_PREP(SIOMSG_TYPE, MSG_SETUP) |
+			    FIELD_PREP(SIOMSG_PARAM, 1) |
+			    FIELD_PREP(SIOMSG_DATA, iova >> 12));
+	if (err != 1) {
+		if (err == 0)
+			err = -EINVAL;
+		return err;
+	}
+
+	err = sio_call(sio, FIELD_PREP(SIOMSG_TYPE, MSG_SETUP) |
+			    FIELD_PREP(SIOMSG_PARAM, 2) |
+			    FIELD_PREP(SIOMSG_DATA, SIO_SHMEM_SIZE));
+	if (err != 1) {
+		if (err == 0)
+			err = -EINVAL;
+		return err;
+	}
+
+	return 0;
+}
+
+static int sio_send_dt_params(struct sio_data *sio)
+{
+	struct device_node *np = sio->dev->of_node;
+	const char *propname = "apple,sio-firmware-params";
+	int nparams, err, i;
+
+	nparams = of_property_count_u32_elems(np, propname);
+	if (nparams < 0) {
+		err = nparams;
+		goto badprop;
+	}
+
+	for (i = 0; i < nparams / 2; i++) {
+		u32 key, val;
+
+		err = of_property_read_u32_index(np, propname, 2 * i, &key);
+		if (err)
+			goto badprop;
+		err = of_property_read_u32_index(np, propname, 2 * i + 1, &val);
+		if (err)
+			goto badprop;
+
+		err = sio_call(sio, FIELD_PREP(SIOMSG_TYPE, MSG_SETUP) |
+				    FIELD_PREP(SIOMSG_PARAM, key & 0xff) |
+				    FIELD_PREP(SIOMSG_EP, key >> 8) |
+				    FIELD_PREP(SIOMSG_DATA, val));
+		if (err < 1) {
+			if (err == 0)
+				err = -ENXIO;
+			return dev_err_probe(sio->dev, err, "sending SIO parameter %#x value %#x\n",
+					     key, val);
+		}
+	}
+
+	return 0;
+
+badprop:
+	return dev_err_probe(sio->dev, err, "failed to read '%s'\n", propname);
+}
+
+static int sio_probe(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct sio_data *sio;
+	struct dma_device *dma;
+	int nchannels;
+	int err, i;
+
+	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(42));
+	if (err)
+		return dev_err_probe(&pdev->dev, err, "Failed to set DMA mask\n");
+
+	err = of_property_read_u32(np, "dma-channels", &nchannels);
+	if (err || nchannels > NCHANNELS_MAX)
+		return dev_err_probe(&pdev->dev, -EINVAL,
+				     "missing or invalid dma-channels property\n");
+
+	sio = devm_kzalloc(&pdev->dev, struct_size(sio, channels, nchannels), GFP_KERNEL);
+	if (!sio)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, sio);
+	sio->dev = &pdev->dev;
+	sio->nchannels = nchannels;
+
+	sio->base = devm_platform_ioremap_resource(pdev, 0);
+	if (IS_ERR(sio->base))
+		return PTR_ERR(sio->base);
+
+	pm_runtime_get_noresume(&pdev->dev);
+	pm_runtime_set_active(&pdev->dev);
+	err = devm_pm_runtime_enable(&pdev->dev);
+	if (err < 0)
+		return dev_err_probe(&pdev->dev, err,
+				     "pm_runtime_enable failed: %d\n", err);
+
+	sio->rtk = devm_apple_rtkit_init(&pdev->dev, sio, NULL, 0, &sio_rtkit_ops);
+	if (IS_ERR(sio->rtk)) {
+		err = PTR_ERR(sio->rtk);
+		dev_err(&pdev->dev, "couldn't initialize rtkit\n");
+		goto rpm_put;
+	}
+	for (i = 1; i < SIO_NTAGS; i++)
+		init_completion(&sio->tags.completions[i]);
+
+	dma = &sio->dma;
+	dma_cap_set(DMA_PRIVATE, dma->cap_mask);
+	dma_cap_set(DMA_CYCLIC, dma->cap_mask);
+
+	dma->dev = &pdev->dev;
+	dma->device_free_chan_resources = sio_free_chan_resources;
+	dma->device_tx_status = sio_tx_status;
+	dma->device_issue_pending = sio_issue_pending;
+	dma->device_terminate_all = sio_terminate_all;
+	dma->device_synchronize = sio_synchronize;
+	dma->device_prep_dma_cyclic = sio_prep_dma_cyclic;
+	dma->device_config = sio_device_config;
+
+	dma->directions = BIT(DMA_MEM_TO_DEV);
+	dma->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+	dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+			       BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+			       BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+
+	INIT_LIST_HEAD(&dma->channels);
+	for (i = 0; i < nchannels; i++) {
+		struct sio_chan *siochan = &sio->channels[i];
+
+		siochan->host = sio;
+		siochan->no = i;
+		siochan->vc.desc_free = sio_tx_free;
+		INIT_WORK(&siochan->terminate_wq, sio_terminate_work);
+		vchan_init(&siochan->vc, dma);
+	}
+
+	writel(CPU_CONTROL_RUN, sio->base + REG_CPU_CONTROL);
+
+	err = apple_rtkit_boot(sio->rtk);
+	if (err)
+		return dev_err_probe(&pdev->dev, err, "SIO did not boot\n");
+
+	err = apple_rtkit_start_ep(sio->rtk, EP_SIO);
+	if (err)
+		return dev_err_probe(&pdev->dev, err, "starting SIO endpoint\n");
+
+	err = sio_call(sio, FIELD_PREP(SIOMSG_TYPE, MSG_START));
+	if (err < 1) {
+		if (err == 0)
+			err = -ENXIO;
+		return dev_err_probe(&pdev->dev, err, "starting SIO service\n");
+	}
+
+	err = sio_send_dt_params(sio);
+	if (err < 0)
+		return dev_err_probe(&pdev->dev, err, "failed to send boot-up parameters\n");
+
+	err = sio_alloc_shmem(sio);
+	if (err < 0)
+		return err;
+
+	err = dma_async_device_register(&sio->dma);
+	if (err)
+		return dev_err_probe(&pdev->dev, err, "failed to register DMA device\n");
+
+	err = of_dma_controller_register(pdev->dev.of_node, sio_dma_of_xlate, sio);
+	if (err) {
+		dma_async_device_unregister(&sio->dma);
+		return dev_err_probe(&pdev->dev, err, "failed to register with OF\n");
+	}
+
+rpm_put:
+	pm_runtime_put(&pdev->dev);
+
+	return err;
+}
+
+static void sio_remove(struct platform_device *pdev)
+{
+	struct sio_data *sio = platform_get_drvdata(pdev);
+
+	of_dma_controller_free(pdev->dev.of_node);
+	dma_async_device_unregister(&sio->dma);
+}
+
+static const struct of_device_id sio_of_match[] = {
+	{ .compatible = "apple,sio", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, sio_of_match);
+
+static __maybe_unused int sio_suspend(struct device *dev)
+{
+	/*
+	 * TODO: SIO coproc sleep state
+	 */
+	return 0;
+}
+
+static __maybe_unused int sio_resume(struct device *dev)
+{
+	return 0;
+}
+
+static DEFINE_RUNTIME_DEV_PM_OPS(sio_pm_ops, sio_suspend, sio_resume, NULL);
+
+static struct platform_driver apple_sio_driver = {
+	.driver = {
+		.name = "apple-sio",
+		.of_match_table = sio_of_match,
+		.pm             = pm_ptr(&sio_pm_ops),
+	},
+	.probe = sio_probe,
+	.remove = sio_remove,
+};
+module_platform_driver(apple_sio_driver);
+
+MODULE_AUTHOR("Martin Povišer <povik+lin@cutebit.org>");
+MODULE_DESCRIPTION("Driver for SIO coprocessor on Apple SoCs");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 98b4d1633b258b..da151e12fdbd8a 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -1439,6 +1439,17 @@ config GPIO_LP87565
 	  This driver can also be built as a module. If so, the module will be
 	  called gpio-lp87565.
 
+config GPIO_MACSMC
+	tristate "Apple Mac SMC GPIO"
+	depends on APPLE_SMC
+	default ARCH_APPLE
+	help
+	  Support for GPIOs controlled by the SMC microcontroller on Apple Mac
+	  systems.
+
+	  This driver can also be built as a module. If so, the module will be
+	  called gpio-macsmc.
+
 config GPIO_MADERA
 	tristate "Cirrus Logic Madera class codecs"
 	depends on PINCTRL_MADERA
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index af3ba4d81b5838..aeee6f504ea96a 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -94,6 +94,7 @@ obj-$(CONFIG_GPIO_LP873X)		+= gpio-lp873x.o
 obj-$(CONFIG_GPIO_LP87565)		+= gpio-lp87565.o
 obj-$(CONFIG_GPIO_LPC18XX)		+= gpio-lpc18xx.o
 obj-$(CONFIG_GPIO_LPC32XX)		+= gpio-lpc32xx.o
+obj-$(CONFIG_GPIO_MACSMC)		+= gpio-macsmc.o
 obj-$(CONFIG_GPIO_MADERA)		+= gpio-madera.o
 obj-$(CONFIG_GPIO_MAX3191X)		+= gpio-max3191x.o
 obj-$(CONFIG_GPIO_MAX7300)		+= gpio-max7300.o
diff --git a/drivers/gpio/gpio-macsmc.c b/drivers/gpio/gpio-macsmc.c
new file mode 100644
index 00000000000000..98fc74af69d4c1
--- /dev/null
+++ b/drivers/gpio/gpio-macsmc.c
@@ -0,0 +1,388 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple SMC GPIO driver
+ * Copyright The Asahi Linux Contributors
+ *
+ * This driver implements basic SMC PMU GPIO support that can read inputs
+ * and write outputs. Mode changes and IRQ config are not yet implemented.
+ */
+
+#include <linux/bitmap.h>
+#include <linux/device.h>
+#include <linux/gpio/driver.h>
+#include <linux/irq.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/macsmc.h>
+
+#define MAX_GPIO 64
+
+/*
+ * Commands 0-6 are, presumably, the intended API.
+ * Command 0xff lets you get/set the pin configuration in detail directly,
+ * but the bit meanings seem not to be stable between devices/PMU hardware
+ * versions.
+ *
+ * We're going to try to make do with the low commands for now.
+ * We don't implement pin mode changes at this time.
+ */
+
+#define CMD_ACTION	(0 << 24)
+#define CMD_OUTPUT	(1 << 24)
+#define CMD_INPUT	(2 << 24)
+#define CMD_PINMODE	(3 << 24)
+#define CMD_IRQ_ENABLE	(4 << 24)
+#define CMD_IRQ_ACK	(5 << 24)
+#define CMD_IRQ_MODE	(6 << 24)
+#define CMD_CONFIG	(0xff << 24)
+
+#define MODE_INPUT	0
+#define MODE_OUTPUT	1
+#define MODE_VALUE_0	0
+#define MODE_VALUE_1	2
+
+#define IRQ_MODE_HIGH		0
+#define IRQ_MODE_LOW		1
+#define IRQ_MODE_RISING		2
+#define IRQ_MODE_FALLING	3
+#define IRQ_MODE_BOTH		4
+
+#define CONFIG_MASK	GENMASK(23, 16)
+#define CONFIG_VAL	GENMASK(7, 0)
+
+#define CONFIG_OUTMODE	GENMASK(7, 6)
+#define CONFIG_IRQMODE	GENMASK(5, 3)
+#define CONFIG_PULLDOWN	BIT(2)
+#define CONFIG_PULLUP	BIT(1)
+#define CONFIG_OUTVAL	BIT(0)
+
+/*
+ * output modes seem to differ depending on the PMU in use... ?
+ * j274 / M1 (Sera PMU):
+ *   0 = input
+ *   1 = output
+ *   2 = open drain
+ *   3 = disable
+ * j314 / M1Pro (Maverick PMU):
+ *   0 = input
+ *   1 = open drain
+ *   2 = output
+ *   3 = ?
+ */
+
+#define SMC_EV_GPIO 0x7202
+
+struct macsmc_gpio {
+	struct device *dev;
+	struct apple_smc *smc;
+	struct gpio_chip gc;
+	struct irq_chip ic;
+	struct notifier_block nb;
+
+	struct mutex irq_mutex;
+	DECLARE_BITMAP(irq_supported, MAX_GPIO);
+	DECLARE_BITMAP(irq_enable_shadow, MAX_GPIO);
+	DECLARE_BITMAP(irq_enable, MAX_GPIO);
+	u32 irq_mode_shadow[MAX_GPIO];
+	u32 irq_mode[MAX_GPIO];
+
+	int first_index;
+};
+
+static int macsmc_gpio_nr(smc_key key)
+{
+	int low = hex_to_bin(key & 0xff);
+	int high = hex_to_bin((key >> 8) & 0xff);
+
+	if (low < 0 || high < 0)
+		return -1;
+
+	return low | (high << 4);
+}
+
+static int macsmc_gpio_key(unsigned int offset)
+{
+	return _SMC_KEY("gP\0\0") | (hex_asc_hi(offset) << 8) | hex_asc_lo(offset);
+}
+
+static int macsmc_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+	struct macsmc_gpio *smcgp = gpiochip_get_data(gc);
+	smc_key key = macsmc_gpio_key(offset);
+	u32 val;
+	int ret;
+
+	/* First try reading the explicit pin mode register */
+	ret = apple_smc_rw_u32(smcgp->smc, key, CMD_PINMODE, &val);
+	if (!ret)
+		return (val & MODE_OUTPUT) ? GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN;
+
+	/*
+	 * Less common IRQ configs cause CMD_PINMODE to fail, and so does open drain mode.
+	 * Fall back to reading IRQ mode, which will only succeed for inputs.
+	 */
+	ret = apple_smc_rw_u32(smcgp->smc, key, CMD_IRQ_MODE, &val);
+	return (!ret) ? GPIO_LINE_DIRECTION_IN : GPIO_LINE_DIRECTION_OUT;
+}
+
+static int macsmc_gpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+	struct macsmc_gpio *smcgp = gpiochip_get_data(gc);
+	smc_key key = macsmc_gpio_key(offset);
+	u32 val;
+	int ret;
+
+	ret = macsmc_gpio_get_direction(gc, offset);
+	if (ret < 0)
+		return ret;
+
+	if (ret == GPIO_LINE_DIRECTION_OUT)
+		ret = apple_smc_rw_u32(smcgp->smc, key, CMD_OUTPUT, &val);
+	else
+		ret = apple_smc_rw_u32(smcgp->smc, key, CMD_INPUT, &val);
+
+	if (ret < 0)
+		return ret;
+
+	return val ? 1 : 0;
+}
+
+static void macsmc_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
+{
+	struct macsmc_gpio *smcgp = gpiochip_get_data(gc);
+	smc_key key = macsmc_gpio_key(offset);
+	int ret;
+
+	value |= CMD_OUTPUT;
+	ret = apple_smc_write_u32(smcgp->smc, key, CMD_OUTPUT | value);
+	if (ret < 0)
+		dev_err(smcgp->dev, "GPIO set failed %p4ch = 0x%x\n", &key, value);
+}
+
+static int macsmc_gpio_init_valid_mask(struct gpio_chip *gc,
+				       unsigned long *valid_mask, unsigned int ngpios)
+{
+	struct macsmc_gpio *smcgp = gpiochip_get_data(gc);
+	int count = apple_smc_get_key_count(smcgp->smc) - smcgp->first_index;
+	int i;
+
+	if (count > MAX_GPIO)
+		count = MAX_GPIO;
+
+	bitmap_zero(valid_mask, ngpios);
+
+	for (i = 0; i < count; i++) {
+		smc_key key;
+		int gpio_nr;
+		u32 val;
+		int ret = apple_smc_get_key_by_index(smcgp->smc, smcgp->first_index + i, &key);
+
+		if (ret < 0)
+			return ret;
+
+		if (key > SMC_KEY(gPff))
+			break;
+
+		gpio_nr = macsmc_gpio_nr(key);
+		if (gpio_nr < 0 || gpio_nr > MAX_GPIO) {
+			dev_err(smcgp->dev, "Bad GPIO key %p4ch\n", &key);
+			continue;
+		}
+
+		set_bit(gpio_nr, valid_mask);
+
+		/* Check for IRQ support */
+		ret = apple_smc_rw_u32(smcgp->smc, key, CMD_IRQ_MODE, &val);
+		if (!ret)
+			set_bit(gpio_nr, smcgp->irq_supported);
+	}
+
+	return 0;
+}
+
+static int macsmc_gpio_event(struct notifier_block *nb, unsigned long event, void *data)
+{
+	struct macsmc_gpio *smcgp = container_of(nb, struct macsmc_gpio, nb);
+	u16 type = event >> 16;
+	u8 offset = (event >> 8) & 0xff;
+	smc_key key = macsmc_gpio_key(offset);
+	unsigned long flags;
+
+	if (type != SMC_EV_GPIO)
+		return NOTIFY_DONE;
+
+	if (offset > MAX_GPIO) {
+		dev_err(smcgp->dev, "GPIO event index %d out of range\n", offset);
+		return NOTIFY_BAD;
+	}
+
+	local_irq_save(flags);
+	generic_handle_irq_desc(irq_resolve_mapping(smcgp->gc.irq.domain, offset));
+	local_irq_restore(flags);
+
+	if (apple_smc_write_u32(smcgp->smc, key, CMD_IRQ_ACK | 1) < 0)
+		dev_err(smcgp->dev, "GPIO IRQ ack failed for %p4ch\n", &key);
+
+	return NOTIFY_OK;
+}
+
+static void macsmc_gpio_irq_enable(struct irq_data *d)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct macsmc_gpio *smcgp = gpiochip_get_data(gc);
+
+	set_bit(irqd_to_hwirq(d), smcgp->irq_enable_shadow);
+}
+
+static void macsmc_gpio_irq_disable(struct irq_data *d)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct macsmc_gpio *smcgp = gpiochip_get_data(gc);
+
+	clear_bit(irqd_to_hwirq(d), smcgp->irq_enable_shadow);
+}
+
+static int macsmc_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct macsmc_gpio *smcgp = gpiochip_get_data(gc);
+	int offset = irqd_to_hwirq(d);
+	u32 mode;
+
+	if (!test_bit(offset, smcgp->irq_supported))
+		return -EINVAL;
+
+	switch (type & IRQ_TYPE_SENSE_MASK) {
+	case IRQ_TYPE_LEVEL_HIGH:
+		mode = IRQ_MODE_HIGH;
+		break;
+	case IRQ_TYPE_LEVEL_LOW:
+		mode = IRQ_MODE_LOW;
+		break;
+	case IRQ_TYPE_EDGE_RISING:
+		mode = IRQ_MODE_RISING;
+		break;
+	case IRQ_TYPE_EDGE_FALLING:
+		mode = IRQ_MODE_FALLING;
+		break;
+	case IRQ_TYPE_EDGE_BOTH:
+		mode = IRQ_MODE_BOTH;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	smcgp->irq_mode_shadow[offset] = mode;
+	return 0;
+}
+
+static void macsmc_gpio_irq_bus_lock(struct irq_data *d)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct macsmc_gpio *smcgp = gpiochip_get_data(gc);
+
+	mutex_lock(&smcgp->irq_mutex);
+}
+
+static void macsmc_gpio_irq_bus_sync_unlock(struct irq_data *d)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct macsmc_gpio *smcgp = gpiochip_get_data(gc);
+	smc_key key = macsmc_gpio_key(irqd_to_hwirq(d));
+	int offset = irqd_to_hwirq(d);
+	bool val;
+
+	if (smcgp->irq_mode_shadow[offset] != smcgp->irq_mode[offset]) {
+		u32 cmd = CMD_IRQ_MODE | smcgp->irq_mode_shadow[offset];
+		if (apple_smc_write_u32(smcgp->smc, key, cmd) < 0)
+			dev_err(smcgp->dev, "GPIO IRQ config failed for %p4ch = 0x%x\n", &key, cmd);
+		else
+			smcgp->irq_mode_shadow[offset] = smcgp->irq_mode[offset];
+	}
+
+	val = test_bit(offset, smcgp->irq_enable_shadow);
+	if (test_bit(offset, smcgp->irq_enable) != val) {
+		if (apple_smc_write_u32(smcgp->smc, key, CMD_IRQ_ENABLE | val) < 0)
+			dev_err(smcgp->dev, "GPIO IRQ en/disable failed for %p4ch\n", &key);
+		else
+			change_bit(offset, smcgp->irq_enable);
+	}
+
+	mutex_unlock(&smcgp->irq_mutex);
+}
+
+static int macsmc_gpio_probe(struct platform_device *pdev)
+{
+	struct macsmc_gpio *smcgp;
+	struct apple_smc *smc = dev_get_drvdata(pdev->dev.parent);
+	smc_key key;
+	int ret;
+
+	smcgp = devm_kzalloc(&pdev->dev, sizeof(*smcgp), GFP_KERNEL);
+	if (!smcgp)
+		return -ENOMEM;
+
+	pdev->dev.of_node = of_get_child_by_name(pdev->dev.parent->of_node, "gpio");
+
+	smcgp->dev = &pdev->dev;
+	smcgp->smc = smc;
+	smcgp->first_index = apple_smc_find_first_key_index(smc, SMC_KEY(gP00));
+
+	if (smcgp->first_index >= apple_smc_get_key_count(smc))
+		return -ENODEV;
+
+	ret = apple_smc_get_key_by_index(smc, smcgp->first_index, &key);
+	if (ret < 0)
+		return ret;
+
+	if (key > macsmc_gpio_key(MAX_GPIO - 1))
+		return -ENODEV;
+
+	dev_info(smcgp->dev, "First GPIO key: %p4ch\n", &key);
+
+	smcgp->gc.label = "macsmc-pmu-gpio";
+	smcgp->gc.owner = THIS_MODULE;
+	smcgp->gc.get = macsmc_gpio_get;
+	smcgp->gc.set = macsmc_gpio_set;
+	smcgp->gc.get_direction = macsmc_gpio_get_direction;
+	smcgp->gc.init_valid_mask = macsmc_gpio_init_valid_mask;
+	smcgp->gc.can_sleep = true;
+	smcgp->gc.ngpio = MAX_GPIO;
+	smcgp->gc.base = -1;
+	smcgp->gc.parent = &pdev->dev;
+
+	smcgp->ic.name = "macsmc-pmu-gpio";
+	smcgp->ic.irq_mask = macsmc_gpio_irq_disable;
+	smcgp->ic.irq_unmask = macsmc_gpio_irq_enable;
+	smcgp->ic.irq_set_type = macsmc_gpio_irq_set_type;
+	smcgp->ic.irq_bus_lock = macsmc_gpio_irq_bus_lock;
+	smcgp->ic.irq_bus_sync_unlock = macsmc_gpio_irq_bus_sync_unlock;
+	smcgp->ic.irq_set_type = macsmc_gpio_irq_set_type;
+	smcgp->ic.flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_MASK_ON_SUSPEND;
+
+	smcgp->gc.irq.chip = &smcgp->ic;
+	smcgp->gc.irq.parent_handler = NULL;
+	smcgp->gc.irq.num_parents = 0;
+	smcgp->gc.irq.parents = NULL;
+	smcgp->gc.irq.default_type = IRQ_TYPE_NONE;
+	smcgp->gc.irq.handler = handle_simple_irq;
+
+	mutex_init(&smcgp->irq_mutex);
+
+	smcgp->nb.notifier_call = macsmc_gpio_event;
+	apple_smc_register_notifier(smc, &smcgp->nb);
+
+	return devm_gpiochip_add_data(&pdev->dev, &smcgp->gc, smcgp);
+}
+
+static struct platform_driver macsmc_gpio_driver = {
+	.driver = {
+		.name = "macsmc-gpio",
+	},
+	.probe = macsmc_gpio_probe,
+};
+module_platform_driver(macsmc_gpio_driver);
+
+MODULE_AUTHOR("Hector Martin <marcan@marcan.st>");
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_DESCRIPTION("Apple SMC GPIO driver");
+MODULE_ALIAS("platform:macsmc-gpio");
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index bd228dc77e99b4..a0553d2c7c0d7d 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -353,6 +353,8 @@ config DRM_VGEM
 
 source "drivers/gpu/drm/vkms/Kconfig"
 
+source "drivers/gpu/drm/asahi/Kconfig"
+
 source "drivers/gpu/drm/exynos/Kconfig"
 
 source "drivers/gpu/drm/rockchip/Kconfig"
@@ -441,6 +443,8 @@ source "drivers/gpu/drm/mcde/Kconfig"
 
 source "drivers/gpu/drm/tidss/Kconfig"
 
+source "drivers/gpu/drm/adp/Kconfig"
+
 source "drivers/gpu/drm/xlnx/Kconfig"
 
 source "drivers/gpu/drm/gud/Kconfig"
@@ -449,6 +453,8 @@ source "drivers/gpu/drm/solomon/Kconfig"
 
 source "drivers/gpu/drm/sprd/Kconfig"
 
+source "drivers/gpu/drm/apple/Kconfig"
+
 source "drivers/gpu/drm/imagination/Kconfig"
 
 config DRM_HYPERV
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 19fb370fbc5677..937a03efbff98f 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -208,11 +208,13 @@ obj-y			+= mxsfb/
 obj-y			+= tiny/
 obj-$(CONFIG_DRM_PL111) += pl111/
 obj-$(CONFIG_DRM_TVE200) += tve200/
+obj-$(CONFIG_DRM_ADP) += adp/
 obj-$(CONFIG_DRM_XEN) += xen/
 obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/
 obj-$(CONFIG_DRM_LIMA)  += lima/
 obj-$(CONFIG_DRM_PANFROST) += panfrost/
 obj-$(CONFIG_DRM_PANTHOR) += panthor/
+obj-$(CONFIG_DRM_APPLE) += apple/
 obj-$(CONFIG_DRM_ASPEED_GFX) += aspeed/
 obj-$(CONFIG_DRM_MCDE) += mcde/
 obj-$(CONFIG_DRM_TIDSS) += tidss/
@@ -223,3 +225,4 @@ obj-y			+= solomon/
 obj-$(CONFIG_DRM_SPRD) += sprd/
 obj-$(CONFIG_DRM_LOONGSON) += loongson/
 obj-$(CONFIG_DRM_POWERVR) += imagination/
+obj-$(CONFIG_DRM_ASAHI) += asahi/
diff --git a/drivers/gpu/drm/adp/Kconfig b/drivers/gpu/drm/adp/Kconfig
new file mode 100644
index 00000000000000..9fcc27eb200dbc
--- /dev/null
+++ b/drivers/gpu/drm/adp/Kconfig
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0-only OR MIT
+config DRM_ADP
+	tristate "DRM Support for pre-DCP Apple display controllers"
+	depends on DRM && OF && ARM64
+	depends on ARCH_APPLE || COMPILE_TEST
+	select DRM_KMS_HELPER
+	select DRM_BRIDGE_CONNECTOR
+	select DRM_DISPLAY_HELPER
+	select DRM_KMS_DMA_HELPER
+	select DRM_GEM_DMA_HELPER
+	select DRM_PANEL_BRIDGE
+	select VIDEOMODE_HELPERS
+	select DRM_MIPI_DSI
+	help
+	  Chose this option if you have an Apple Arm laptop with a touchbar.
+
+	  If M is selected, this module will be called adpdrm.
diff --git a/drivers/gpu/drm/adp/Makefile b/drivers/gpu/drm/adp/Makefile
new file mode 100644
index 00000000000000..8e7b618edd3559
--- /dev/null
+++ b/drivers/gpu/drm/adp/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+adpdrm-y := adp_drv.o
+adpdrm-mipi-y := adp-mipi.o
+obj-$(CONFIG_DRM_ADP) += adpdrm.o adpdrm-mipi.o
diff --git a/drivers/gpu/drm/adp/adp-mipi.c b/drivers/gpu/drm/adp/adp-mipi.c
new file mode 100644
index 00000000000000..ad80542b60ed6d
--- /dev/null
+++ b/drivers/gpu/drm/adp/adp-mipi.c
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/component.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_mipi_dsi.h>
+
+#define DSI_GEN_HDR 0x6c
+#define DSI_GEN_PLD_DATA 0x70
+
+#define DSI_CMD_PKT_STATUS 0x74
+
+#define GEN_PLD_R_EMPTY BIT(4)
+#define GEN_PLD_W_FULL BIT(3)
+#define GEN_PLD_W_EMPTY BIT(2)
+#define GEN_CMD_FULL BIT(1)
+#define GEN_CMD_EMPTY BIT(0)
+#define GEN_RD_CMD_BUSY BIT(6)
+#define CMD_PKT_STATUS_TIMEOUT_US 20000
+
+struct adp_mipi_drv_private {
+	struct mipi_dsi_host dsi;
+	struct drm_bridge bridge;
+	struct drm_bridge *next_bridge;
+	void __iomem *mipi;
+};
+
+#define mipi_to_adp(x) container_of(x, struct adp_mipi_drv_private, dsi)
+
+static int adp_dsi_gen_pkt_hdr_write(struct adp_mipi_drv_private *adp, u32 hdr_val)
+{
+	int ret;
+	u32 val, mask;
+
+	ret = readl_poll_timeout(adp->mipi + DSI_CMD_PKT_STATUS,
+				 val, !(val & GEN_CMD_FULL), 1000,
+				 CMD_PKT_STATUS_TIMEOUT_US);
+	if (ret) {
+		dev_err(adp->dsi.dev, "failed to get available command FIFO\n");
+		return ret;
+	}
+
+	writel(hdr_val, adp->mipi + DSI_GEN_HDR);
+
+	mask = GEN_CMD_EMPTY | GEN_PLD_W_EMPTY;
+	ret = readl_poll_timeout(adp->mipi + DSI_CMD_PKT_STATUS,
+				 val, (val & mask) == mask,
+				 1000, CMD_PKT_STATUS_TIMEOUT_US);
+	if (ret) {
+		dev_err(adp->dsi.dev, "failed to write command FIFO\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int adp_dsi_write(struct adp_mipi_drv_private *adp,
+			 const struct mipi_dsi_packet *packet)
+{
+	const u8 *tx_buf = packet->payload;
+	int len = packet->payload_length, pld_data_bytes = sizeof(u32), ret;
+	__le32 word;
+	u32 val;
+
+	while (len) {
+		if (len < pld_data_bytes) {
+			word = 0;
+			memcpy(&word, tx_buf, len);
+			writel(le32_to_cpu(word), adp->mipi + DSI_GEN_PLD_DATA);
+			len = 0;
+		} else {
+			memcpy(&word, tx_buf, pld_data_bytes);
+			writel(le32_to_cpu(word), adp->mipi + DSI_GEN_PLD_DATA);
+			tx_buf += pld_data_bytes;
+			len -= pld_data_bytes;
+		}
+
+		ret = readl_poll_timeout(adp->mipi + DSI_CMD_PKT_STATUS,
+					 val, !(val & GEN_PLD_W_FULL), 1000,
+					 CMD_PKT_STATUS_TIMEOUT_US);
+		if (ret) {
+			dev_err(adp->dsi.dev,
+				"failed to get available write payload FIFO\n");
+			return ret;
+		}
+	}
+
+	word = 0;
+	memcpy(&word, packet->header, sizeof(packet->header));
+	return adp_dsi_gen_pkt_hdr_write(adp, le32_to_cpu(word));
+}
+
+static int adp_dsi_read(struct adp_mipi_drv_private *adp,
+			const struct mipi_dsi_msg *msg)
+{
+	int i, j, ret, len = msg->rx_len;
+	u8 *buf = msg->rx_buf;
+	u32 val;
+
+	/* Wait end of the read operation */
+	ret = readl_poll_timeout(adp->mipi + DSI_CMD_PKT_STATUS,
+				 val, !(val & GEN_RD_CMD_BUSY),
+				 1000, CMD_PKT_STATUS_TIMEOUT_US);
+	if (ret) {
+		dev_err(adp->dsi.dev, "Timeout during read operation\n");
+		return ret;
+	}
+
+	for (i = 0; i < len; i += 4) {
+		/* Read fifo must not be empty before all bytes are read */
+		ret = readl_poll_timeout(adp->mipi + DSI_CMD_PKT_STATUS,
+					 val, !(val & GEN_PLD_R_EMPTY),
+					 1000, CMD_PKT_STATUS_TIMEOUT_US);
+		if (ret) {
+			dev_err(adp->dsi.dev, "Read payload FIFO is empty\n");
+			return ret;
+		}
+
+		val = readl(adp->mipi + DSI_GEN_PLD_DATA);
+		for (j = 0; j < 4 && j + i < len; j++)
+			buf[i + j] = val >> (8 * j);
+	}
+
+	return ret;
+}
+
+static ssize_t adp_dsi_host_transfer(struct mipi_dsi_host *host,
+				     const struct mipi_dsi_msg *msg)
+{
+	struct adp_mipi_drv_private *adp = mipi_to_adp(host);
+	struct mipi_dsi_packet packet;
+	int ret, nb_bytes;
+
+	ret = mipi_dsi_create_packet(&packet, msg);
+	if (ret) {
+		dev_err(adp->dsi.dev, "failed to create packet: %d\n", ret);
+		return ret;
+	}
+
+	ret = adp_dsi_write(adp, &packet);
+	if (ret)
+		return ret;
+
+	if (msg->rx_buf && msg->rx_len) {
+		ret = adp_dsi_read(adp, msg);
+		if (ret)
+			return ret;
+		nb_bytes = msg->rx_len;
+	} else {
+		nb_bytes = packet.size;
+	}
+
+	return nb_bytes;
+}
+
+static int adp_dsi_bind(struct device *dev, struct device *master, void *data)
+{
+	return 0;
+}
+
+static void adp_dsi_unbind(struct device *dev, struct device *master, void *data)
+{
+}
+
+static const struct component_ops adp_dsi_component_ops = {
+	.bind	= adp_dsi_bind,
+	.unbind	= adp_dsi_unbind,
+};
+
+static int adp_dsi_host_attach(struct mipi_dsi_host *host,
+			       struct mipi_dsi_device *dev)
+{
+	struct adp_mipi_drv_private *adp = mipi_to_adp(host);
+	struct drm_bridge *next;
+	int ret;
+
+	next = devm_drm_of_get_bridge(adp->dsi.dev, adp->dsi.dev->of_node, 1, 0);
+	if (IS_ERR(next))
+		return PTR_ERR(next);
+
+	adp->next_bridge = next;
+
+	drm_bridge_add(&adp->bridge);
+
+	ret = component_add(host->dev, &adp_dsi_component_ops);
+	if (ret) {
+		pr_err("failed to add dsi_host component: %d\n", ret);
+		drm_bridge_remove(&adp->bridge);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int adp_dsi_host_detach(struct mipi_dsi_host *host,
+			       struct mipi_dsi_device *dev)
+{
+	struct adp_mipi_drv_private *adp = mipi_to_adp(host);
+
+	component_del(host->dev, &adp_dsi_component_ops);
+	drm_bridge_remove(&adp->bridge);
+	return 0;
+}
+
+static const struct mipi_dsi_host_ops adp_dsi_host_ops = {
+	.transfer = adp_dsi_host_transfer,
+	.attach = adp_dsi_host_attach,
+	.detach = adp_dsi_host_detach,
+};
+
+static int adp_dsi_bridge_attach(struct drm_bridge *bridge,
+				 enum drm_bridge_attach_flags flags)
+{
+	struct adp_mipi_drv_private *adp =
+		container_of(bridge, struct adp_mipi_drv_private, bridge);
+
+	return drm_bridge_attach(bridge->encoder, adp->next_bridge, bridge, flags);
+}
+
+static const struct drm_bridge_funcs adp_dsi_bridge_funcs = {
+	.attach	= adp_dsi_bridge_attach,
+};
+
+static int adp_mipi_probe(struct platform_device *pdev)
+{
+	struct adp_mipi_drv_private *adp;
+
+	adp = devm_kzalloc(&pdev->dev, sizeof(*adp), GFP_KERNEL);
+	if (!adp)
+		return -ENOMEM;
+
+	adp->mipi = devm_platform_ioremap_resource(pdev, 0);
+	if (IS_ERR(adp->mipi)) {
+		dev_err(&pdev->dev, "failed to map mipi mmio");
+		return PTR_ERR(adp->mipi);
+	}
+
+	adp->dsi.dev = &pdev->dev;
+	adp->dsi.ops = &adp_dsi_host_ops;
+	adp->bridge.funcs = &adp_dsi_bridge_funcs;
+	adp->bridge.of_node = pdev->dev.of_node;
+	adp->bridge.type = DRM_MODE_CONNECTOR_DSI;
+	dev_set_drvdata(&pdev->dev, adp);
+	return mipi_dsi_host_register(&adp->dsi);
+}
+
+static void adp_mipi_remove(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct adp_mipi_drv_private *adp = dev_get_drvdata(dev);
+
+	mipi_dsi_host_unregister(&adp->dsi);
+}
+
+static const struct of_device_id adp_mipi_of_match[] = {
+	{ .compatible = "apple,h7-display-pipe-mipi", },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, adp_mipi_of_match);
+
+static struct platform_driver adp_mipi_platform_driver = {
+	.driver = {
+		.name = "adp-mipi",
+		.of_match_table = adp_mipi_of_match,
+	},
+	.probe = adp_mipi_probe,
+	.remove = adp_mipi_remove,
+};
+
+module_platform_driver(adp_mipi_platform_driver);
+
+MODULE_DESCRIPTION("Apple Display Pipe MIPI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/adp/adp_drv.c b/drivers/gpu/drm/adp/adp_drv.c
new file mode 100644
index 00000000000000..6a131ab90c0ebd
--- /dev/null
+++ b/drivers/gpu/drm/adp/adp_drv.c
@@ -0,0 +1,615 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/component.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_dma_helper.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
+#define ADP_INT_STATUS 0x34
+#define ADP_INT_STATUS_INT_MASK 0x7
+#define ADP_INT_STATUS_VBLANK 0x1
+#define ADP_CTRL 0x100
+#define ADP_CTRL_VBLANK_ON 0x12
+#define ADP_CTRL_FIFO_ON 0x601
+#define ADP_SCREEN_SIZE 0x0c
+#define ADP_SCREEN_HSIZE GENMASK(15, 0)
+#define ADP_SCREEN_VSIZE GENMASK(31, 16)
+
+#define ADBE_FIFO 0x10c0
+#define ADBE_FIFO_SYNC 0xc0000000
+
+#define ADBE_BLEND_BYPASS 0x2020
+#define ADBE_BLEND_EN1 0x2028
+#define ADBE_BLEND_EN2 0x2074
+#define ADBE_BLEND_EN3 0x202c
+#define ADBE_BLEND_EN4 0x2034
+#define ADBE_MASK_BUF 0x2200
+
+#define ADBE_SRC_START 0x4040
+#define ADBE_SRC_SIZE 0x4048
+#define ADBE_DST_START 0x4050
+#define ADBE_DST_SIZE 0x4054
+#define ADBE_STRIDE 0x4038
+#define ADBE_FB_BASE 0x4030
+
+#define ADBE_LAYER_EN1 0x4020
+#define ADBE_LAYER_EN2 0x4068
+#define ADBE_LAYER_EN3 0x40b4
+#define ADBE_LAYER_EN4 0x40f4
+#define ADBE_SCALE_CTL 0x40ac
+#define ADBE_SCALE_CTL_BYPASS 0x100000
+
+#define ADBE_LAYER_CTL 0x1038
+#define ADBE_LAYER_CTL_ENABLE 0x10000
+
+#define ADBE_PIX_FMT 0x402c
+#define ADBE_PIX_FMT_XRGB32 0x53e4001
+
+static int adp_open(struct inode *inode, struct file *filp)
+{
+	/*
+	 * The modesetting driver does not check the non-desktop connector
+	 * property and keeps the device open and locked. If the touchbar daemon
+	 * opens the device first, modesetting breaks the whole X session.
+	 * Simply refuse to open the device for X11 server processes as
+	 * workaround.
+	 */
+	if (current->comm[0] == 'X')
+		return -EBUSY;
+
+	return drm_open(inode, filp);
+}
+
+static const struct file_operations adp_fops = {
+	.owner          = THIS_MODULE,
+	.open           = adp_open,
+	.release        = drm_release,
+	.unlocked_ioctl = drm_ioctl,
+	.compat_ioctl   = drm_compat_ioctl,
+	.poll           = drm_poll,
+	.read           = drm_read,
+	.llseek         = noop_llseek,
+	.mmap           = drm_gem_mmap,
+	.fop_flags      = FOP_UNSIGNED_OFFSET,
+	DRM_GEM_DMA_UNMAPPED_AREA_FOPS
+};
+
+static int adp_drm_gem_dumb_create(struct drm_file *file_priv,
+					struct drm_device *drm,
+					struct drm_mode_create_dumb *args)
+{
+	args->height = ALIGN(args->height, 64);
+	args->size = args->pitch * args->height;
+
+	return drm_gem_dma_dumb_create_internal(file_priv, drm, args);
+}
+
+static const struct drm_driver adp_driver = {
+	.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
+	.fops = &adp_fops,
+	DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(adp_drm_gem_dumb_create),
+	.name = "adp",
+	.desc = "Apple Display Pipe DRM Driver",
+	.major = 0,
+	.minor = 1,
+};
+
+struct adp_drv_private {
+	struct drm_device drm;
+	struct drm_crtc crtc;
+	struct drm_encoder *encoder;
+	struct drm_connector *connector;
+	struct drm_bridge *next_bridge;
+	void __iomem *be;
+	void __iomem *fe;
+	u32 *mask_buf;
+	u64 mask_buf_size;
+	dma_addr_t mask_iova;
+	int be_irq;
+	int fe_irq;
+	struct drm_pending_vblank_event *event;
+};
+
+#define to_adp(x) container_of(x, struct adp_drv_private, drm)
+#define crtc_to_adp(x) container_of(x, struct adp_drv_private, crtc)
+
+static int adp_plane_atomic_check(struct drm_plane *plane,
+				    struct drm_atomic_state *state)
+{
+	struct drm_plane_state *new_plane_state;
+	struct drm_crtc_state *crtc_state;
+
+	new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+
+	if (!new_plane_state->crtc)
+		return 0;
+
+	crtc_state = drm_atomic_get_crtc_state(state, new_plane_state->crtc);
+	if (IS_ERR(crtc_state))
+		return PTR_ERR(crtc_state);
+
+	return drm_atomic_helper_check_plane_state(new_plane_state,
+						   crtc_state,
+						   DRM_PLANE_NO_SCALING,
+						   DRM_PLANE_NO_SCALING,
+						   true, true);
+}
+
+static void adp_plane_atomic_update(struct drm_plane *plane,
+				    struct drm_atomic_state *state)
+{
+	struct adp_drv_private *adp;
+	struct drm_rect src_rect;
+	struct drm_gem_dma_object *obj;
+	struct drm_framebuffer *fb;
+	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane);
+	u32 src_pos, src_size, dst_pos, dst_size;
+
+	if (!plane || !new_state)
+		return;
+
+	fb = new_state->fb;
+	if (!fb)
+		return;
+	adp = to_adp(plane->dev);
+
+	drm_rect_fp_to_int(&src_rect, &new_state->src);
+	src_pos = src_rect.x1 << 16 | src_rect.y1;
+	dst_pos = new_state->dst.x1 << 16 | new_state->dst.y1;
+	src_size = drm_rect_width(&src_rect) << 16 | drm_rect_height(&src_rect);
+	dst_size = drm_rect_width(&new_state->dst) << 16 |
+		drm_rect_height(&new_state->dst);
+	writel(src_pos, adp->be + ADBE_SRC_START);
+	writel(src_size, adp->be + ADBE_SRC_SIZE);
+	writel(dst_pos, adp->be + ADBE_DST_START);
+	writel(dst_size, adp->be + ADBE_DST_SIZE);
+	writel(fb->pitches[0], adp->be + ADBE_STRIDE);
+	obj = drm_fb_dma_get_gem_obj(fb, 0);
+	if (obj)
+		writel(obj->dma_addr + fb->offsets[0], adp->be + ADBE_FB_BASE);
+
+	writel(BIT(0), adp->be + ADBE_LAYER_EN1);
+	writel(BIT(0), adp->be + ADBE_LAYER_EN2);
+	writel(BIT(0), adp->be + ADBE_LAYER_EN3);
+	writel(BIT(0), adp->be + ADBE_LAYER_EN4);
+	writel(ADBE_SCALE_CTL_BYPASS, adp->be + ADBE_SCALE_CTL);
+	writel(ADBE_LAYER_CTL_ENABLE | BIT(0), adp->be + ADBE_LAYER_CTL);
+	writel(ADBE_PIX_FMT_XRGB32, adp->be + ADBE_PIX_FMT);
+}
+
+static void adp_plane_atomic_disable(struct drm_plane *plane,
+				     struct drm_atomic_state *state)
+{
+	struct adp_drv_private *adp = to_adp(plane->dev);
+
+	writel(0x0, adp->be + ADBE_LAYER_EN1);
+	writel(0x0, adp->be + ADBE_LAYER_EN2);
+	writel(0x0, adp->be + ADBE_LAYER_EN3);
+	writel(0x0, adp->be + ADBE_LAYER_EN4);
+	writel(ADBE_LAYER_CTL_ENABLE, adp->be + ADBE_LAYER_CTL);
+}
+
+static const struct drm_plane_helper_funcs adp_plane_helper_funcs = {
+	.atomic_check = adp_plane_atomic_check,
+	.atomic_update = adp_plane_atomic_update,
+	.atomic_disable = adp_plane_atomic_disable,
+	DRM_GEM_SHADOW_PLANE_HELPER_FUNCS
+};
+
+static const struct drm_plane_funcs adp_plane_funcs = {
+	.update_plane = drm_atomic_helper_update_plane,
+	.disable_plane = drm_atomic_helper_disable_plane,
+	DRM_GEM_SHADOW_PLANE_FUNCS
+};
+
+static const u32 plane_formats[] = {
+	DRM_FORMAT_XRGB8888,
+};
+
+#define ALL_CRTCS 1
+
+static struct drm_plane *adp_plane_new(struct adp_drv_private *adp)
+{
+	struct drm_device *drm = &adp->drm;
+	struct drm_plane *plane;
+
+	plane = __drmm_universal_plane_alloc(drm, sizeof(struct drm_plane), 0,
+					     ALL_CRTCS, &adp_plane_funcs,
+					     plane_formats, ARRAY_SIZE(plane_formats),
+					     NULL, DRM_PLANE_TYPE_PRIMARY, "plane");
+	if (IS_ERR(plane)) {
+		drm_err(drm, "failed to allocate plane");
+		return plane;
+	}
+
+	drm_plane_helper_add(plane, &adp_plane_helper_funcs);
+	return plane;
+}
+
+static void adp_enable_vblank(struct adp_drv_private *adp)
+{
+	u32 cur_ctrl;
+
+	writel(ADP_INT_STATUS_INT_MASK, adp->fe + ADP_INT_STATUS);
+
+	cur_ctrl = readl(adp->fe + ADP_CTRL);
+	writel(cur_ctrl | ADP_CTRL_VBLANK_ON, adp->fe + ADP_CTRL);
+}
+
+static int adp_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct adp_drv_private *adp = to_adp(dev);
+
+	adp_enable_vblank(adp);
+
+	return 0;
+}
+
+static void adp_disable_vblank(struct adp_drv_private *adp)
+{
+	u32 cur_ctrl;
+
+	cur_ctrl = readl(adp->fe + ADP_CTRL);
+	writel(cur_ctrl & ~ADP_CTRL_VBLANK_ON, adp->fe + ADP_CTRL);
+	writel(ADP_INT_STATUS_INT_MASK, adp->fe + ADP_INT_STATUS);
+}
+
+static void adp_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct adp_drv_private *adp = to_adp(dev);
+
+	adp_disable_vblank(adp);
+}
+
+static void adp_crtc_atomic_enable(struct drm_crtc *crtc,
+				   struct drm_atomic_state *state)
+{
+	struct adp_drv_private *adp = crtc_to_adp(crtc);
+
+	writel(BIT(0), adp->be + ADBE_BLEND_EN2);
+	writel(BIT(4), adp->be + ADBE_BLEND_EN1);
+	writel(BIT(0), adp->be + ADBE_BLEND_EN3);
+	writel(BIT(0), adp->be + ADBE_BLEND_BYPASS);
+	writel(BIT(0), adp->be + ADBE_BLEND_EN4);
+	drm_crtc_vblank_on(crtc);
+}
+
+static void adp_crtc_atomic_disable(struct drm_crtc *crtc,
+				    struct drm_atomic_state *state)
+{
+	struct adp_drv_private *adp = crtc_to_adp(crtc);
+	struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state, crtc);
+
+	drm_atomic_helper_disable_planes_on_crtc(old_state, false);
+
+	writel(0x0, adp->be + ADBE_BLEND_EN2);
+	writel(0x0, adp->be + ADBE_BLEND_EN1);
+	writel(0x0, adp->be + ADBE_BLEND_EN3);
+	writel(0x0, adp->be + ADBE_BLEND_BYPASS);
+	writel(0x0, adp->be + ADBE_BLEND_EN4);
+	drm_crtc_vblank_off(crtc);
+}
+
+static void adp_crtc_atomic_flush(struct drm_crtc *crtc,
+				  struct drm_atomic_state *state)
+{
+	u32 frame_num = 1;
+	unsigned long flags;
+	struct adp_drv_private *adp = crtc_to_adp(crtc);
+	struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state, crtc);
+	u64 new_size = ALIGN(new_state->mode.hdisplay *
+			     new_state->mode.vdisplay * 4, PAGE_SIZE);
+
+	if (new_size != adp->mask_buf_size) {
+		if (adp->mask_buf)
+			dma_free_coherent(crtc->dev->dev, adp->mask_buf_size,
+					  adp->mask_buf, adp->mask_iova);
+		adp->mask_buf = NULL;
+		if (new_size != 0) {
+			adp->mask_buf = dma_alloc_coherent(crtc->dev->dev, new_size,
+							   &adp->mask_iova, GFP_KERNEL);
+			memset(adp->mask_buf, 0xFF, new_size);
+			writel(adp->mask_iova, adp->be + ADBE_MASK_BUF);
+		}
+		adp->mask_buf_size = new_size;
+	}
+	writel(ADBE_FIFO_SYNC | frame_num, adp->be + ADBE_FIFO);
+	//FIXME: use adbe flush interrupt
+	if (crtc->state->event) {
+		spin_lock_irqsave(&crtc->dev->event_lock, flags);
+
+		if (drm_crtc_vblank_get(crtc) != 0)
+			drm_crtc_send_vblank_event(crtc, crtc->state->event);
+		else
+			adp->event = crtc->state->event;
+
+		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+	}
+	crtc->state->event = NULL;
+}
+
+static const struct drm_crtc_funcs adp_crtc_funcs = {
+	.destroy = drm_crtc_cleanup,
+	.set_config = drm_atomic_helper_set_config,
+	.page_flip = drm_atomic_helper_page_flip,
+	.reset = drm_atomic_helper_crtc_reset,
+	.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+	.enable_vblank = adp_crtc_enable_vblank,
+	.disable_vblank = adp_crtc_disable_vblank,
+};
+
+
+static const struct drm_crtc_helper_funcs adp_crtc_helper_funcs = {
+	.atomic_enable = adp_crtc_atomic_enable,
+	.atomic_disable = adp_crtc_atomic_disable,
+	.atomic_flush = adp_crtc_atomic_flush,
+};
+
+static int adp_setup_crtc(struct adp_drv_private *adp)
+{
+	struct drm_device *drm = &adp->drm;
+	struct drm_plane *primary;
+	int ret;
+
+	primary = adp_plane_new(adp);
+	if (IS_ERR(primary))
+		return PTR_ERR(primary);
+
+	ret = drm_crtc_init_with_planes(drm, &adp->crtc, primary,
+					NULL, &adp_crtc_funcs, NULL);
+	if (ret)
+		return ret;
+
+	drm_crtc_helper_add(&adp->crtc, &adp_crtc_helper_funcs);
+	return 0;
+}
+
+static const struct drm_mode_config_funcs adp_mode_config_funcs = {
+	.fb_create = drm_gem_fb_create_with_dirty,
+	.atomic_check = drm_atomic_helper_check,
+	.atomic_commit = drm_atomic_helper_commit,
+};
+
+static int adp_setup_mode_config(struct adp_drv_private *adp)
+{
+	struct drm_device *drm = &adp->drm;
+	int ret;
+	u32 size;
+
+	ret = drmm_mode_config_init(drm);
+	if (ret)
+		return ret;
+
+	/*
+	 * Query screen size restrict the frame buffer size to the screen size
+	 * aligned to the next multiple of 64. This is not necessary but can be
+	 * used as simple check for non-desktop devices.
+	 * Xorg's modesetting driver does not care about the connector
+	 * "non-desktop" property. The max frame buffer width or height can be
+	 * easily checked and a device can be reject if the max width/height is
+	 * smaller than 120 for example.
+	 * Any touchbar daemon is not limited by this small framebuffer size.
+	 */
+	size = readl(adp->fe + ADP_SCREEN_SIZE);
+
+	drm->mode_config.min_width = 32;
+	drm->mode_config.min_height = 32;
+	drm->mode_config.max_width = ALIGN(FIELD_GET(ADP_SCREEN_HSIZE, size), 64);
+	drm->mode_config.max_height = ALIGN(FIELD_GET(ADP_SCREEN_VSIZE, size), 64);
+	drm->mode_config.preferred_depth = 24;
+	drm->mode_config.prefer_shadow = 0;
+	drm->mode_config.funcs = &adp_mode_config_funcs;
+
+	ret = adp_setup_crtc(adp);
+	if (ret) {
+		drm_err(drm, "failed to create crtc");
+		return ret;
+	}
+
+	adp->encoder = drmm_plain_encoder_alloc(drm, NULL, DRM_MODE_ENCODER_DSI, NULL);
+	if (IS_ERR(adp->encoder)) {
+		drm_err(drm, "failed to init encoder");
+		return PTR_ERR(adp->encoder);
+	}
+	adp->encoder->possible_crtcs = ALL_CRTCS;
+
+	ret = drm_bridge_attach(adp->encoder, adp->next_bridge, NULL,
+				DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+	if (ret) {
+		drm_err(drm, "failed to init bridge chain");
+		return ret;
+	}
+
+	adp->connector = drm_bridge_connector_init(drm, adp->encoder);
+	if (IS_ERR(adp->connector))
+		return PTR_ERR(adp->connector);
+
+	drm_connector_attach_encoder(adp->connector, adp->encoder);
+
+	ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
+	if (ret < 0) {
+		drm_err(drm, "failed to initialize vblank");
+		return ret;
+	}
+
+	drm_mode_config_reset(drm);
+
+	return 0;
+}
+
+static int adp_parse_of(struct platform_device *pdev, struct adp_drv_private *adp)
+{
+	struct device *dev = &pdev->dev;
+
+	adp->be = devm_platform_ioremap_resource_byname(pdev, "be");
+	if (IS_ERR(adp->be)) {
+		dev_err(dev, "failed to map display backend mmio");
+		return PTR_ERR(adp->be);
+	}
+
+	adp->fe = devm_platform_ioremap_resource_byname(pdev, "fe");
+	if (IS_ERR(adp->fe)) {
+		dev_err(dev, "failed to map display pipe mmio");
+		return PTR_ERR(adp->fe);
+	}
+
+	adp->be_irq = platform_get_irq_byname(pdev, "be");
+	if (adp->be_irq < 0) {
+		dev_err(dev, "failed to find be irq");
+		return adp->be_irq;
+	}
+
+	adp->fe_irq = platform_get_irq_byname(pdev, "fe");
+	if (adp->fe_irq < 0) {
+		dev_err(dev, "failed to find fe irq");
+		return adp->fe_irq;
+	}
+
+	return 0;
+}
+
+static irqreturn_t adp_fe_irq(int irq, void *arg)
+{
+	struct adp_drv_private *adp = (struct adp_drv_private *)arg;
+	u32 int_status;
+	u32 int_ctl;
+
+	int_status = readl(adp->fe + ADP_INT_STATUS);
+	if (int_status & ADP_INT_STATUS_VBLANK) {
+		drm_crtc_handle_vblank(&adp->crtc);
+		spin_lock(&adp->crtc.dev->event_lock);
+		if (adp->event) {
+			int_ctl = readl(adp->fe + ADP_CTRL);
+			if ((int_ctl & 0xF00) == 0x600) {
+				drm_crtc_send_vblank_event(&adp->crtc, adp->event);
+				adp->event = NULL;
+				drm_crtc_vblank_put(&adp->crtc);
+			}
+		}
+		spin_unlock(&adp->crtc.dev->event_lock);
+	}
+
+	writel(int_status, adp->fe + ADP_INT_STATUS);
+
+
+	return IRQ_HANDLED;
+}
+
+static int adp_drm_bind(struct device *dev)
+{
+	struct drm_device *drm = dev_get_drvdata(dev);
+	struct adp_drv_private *adp = to_adp(drm);
+	int err;
+
+	writel(ADP_CTRL_FIFO_ON, adp->fe + ADP_CTRL);
+
+	adp->next_bridge = drmm_of_get_bridge(&adp->drm, dev->of_node, 0, 0);
+	if (IS_ERR(adp->next_bridge)) {
+		dev_err(dev, "failed to find next bridge");
+		return PTR_ERR(adp->next_bridge);
+	}
+
+	err = adp_setup_mode_config(adp);
+	if (err < 0)
+		return err;
+
+	err = request_irq(adp->fe_irq, adp_fe_irq, 0, "adp-fe", adp);
+	if (err)
+		return err;
+
+	err = drm_dev_register(&adp->drm, 0);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+static void adp_drm_unbind(struct device *dev)
+{
+	struct drm_device *drm = dev_get_drvdata(dev);
+	struct adp_drv_private *adp = to_adp(drm);
+
+	drm_dev_unregister(drm);
+	drm_atomic_helper_shutdown(drm);
+	free_irq(adp->fe_irq, adp);
+}
+
+static const struct component_master_ops adp_master_ops = {
+	.bind	= adp_drm_bind,
+	.unbind = adp_drm_unbind,
+};
+
+static int compare_dev(struct device *dev, void *data)
+{
+	return dev->of_node == data;
+}
+
+static int adp_probe(struct platform_device *pdev)
+{
+	struct device_node *port;
+	struct component_match *match = NULL;
+	struct adp_drv_private *adp;
+	int err;
+
+	adp = devm_drm_dev_alloc(&pdev->dev, &adp_driver, struct adp_drv_private, drm);
+	if (IS_ERR(adp))
+		return PTR_ERR(adp);
+
+	dev_set_drvdata(&pdev->dev, &adp->drm);
+
+	err = adp_parse_of(pdev, adp);
+	if (err < 0)
+		return err;
+
+	port = of_graph_get_remote_node(pdev->dev.of_node, 0, 0);
+	if (!port)
+		return -ENODEV;
+
+	drm_of_component_match_add(&pdev->dev, &match, compare_dev, port);
+	of_node_put(port);
+
+	return component_master_add_with_match(&pdev->dev, &adp_master_ops, match);
+}
+
+static void adp_remove(struct platform_device *pdev)
+{
+	component_master_del(&pdev->dev, &adp_master_ops);
+	dev_set_drvdata(&pdev->dev, NULL);
+}
+
+static const struct of_device_id adp_of_match[] = {
+	{ .compatible = "apple,h7-display-pipe", },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, adp_of_match);
+
+static struct platform_driver adp_platform_driver = {
+	.driver = {
+		.name = "adp",
+		.of_match_table = adp_of_match,
+	},
+	.probe = adp_probe,
+	.remove = adp_remove,
+};
+
+module_platform_driver(adp_platform_driver);
+
+MODULE_DESCRIPTION("Apple Display Pipe DRM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/apple/.gitignore b/drivers/gpu/drm/apple/.gitignore
new file mode 100644
index 00000000000000..d9a77f3b59b21a
--- /dev/null
+++ b/drivers/gpu/drm/apple/.gitignore
@@ -0,0 +1 @@
+*.hdrtest
diff --git a/drivers/gpu/drm/apple/Kconfig b/drivers/gpu/drm/apple/Kconfig
new file mode 100644
index 00000000000000..9828a5fa193284
--- /dev/null
+++ b/drivers/gpu/drm/apple/Kconfig
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-2.0-only OR MIT
+config DRM_APPLE
+	tristate "DRM Support for Apple display controllers"
+	depends on DRM && OF && ARM64
+	depends on ARCH_APPLE || COMPILE_TEST
+	depends on APPLE_RTKIT
+	depends on OF_ADDRESS
+	select DRM_CLIENT_SELECTION
+	select DRM_KMS_HELPER
+	select DRM_KMS_DMA_HELPER
+	select DRM_GEM_DMA_HELPER
+	select VIDEOMODE_HELPERS
+	select MULTIPLEXER
+	help
+	  Say Y if you have an Apple Silicon chipset.
+
+config DRM_APPLE_AUDIO
+	bool "DisplayPort/HDMI Audio support"
+	default y
+	depends on DRM_APPLE
+	depends on SND
+	select SND_PCM
+	select SND_DMAENGINE_PCM
+
+config DRM_APPLE_DEBUG
+	bool "Enable additional driver debugging"
+	depends on DRM_APPLE
+	depends on EXPERT # only for developers
diff --git a/drivers/gpu/drm/apple/Makefile b/drivers/gpu/drm/apple/Makefile
new file mode 100644
index 00000000000000..045183c63bc129
--- /dev/null
+++ b/drivers/gpu/drm/apple/Makefile
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+CFLAGS_trace.o = -I$(src)
+
+appledrm-y := apple_drv.o
+
+apple_dcp-y := afk.o dcp.o dcp_backlight.o dptxep.o iomfb.o parser.o systemep.o
+apple_dcp-$(CONFIG_DRM_APPLE_AUDIO) += audio.o
+apple_dcp-$(CONFIG_DRM_APPLE_AUDIO) += av.o
+apple_dcp-y += connector.o
+apple_dcp-y += ibootep.o
+apple_dcp-y += iomfb_v12_3.o
+apple_dcp-y += iomfb_v13_3.o
+apple_dcp-y += epic/dpavservep.o
+
+apple_dcp-$(CONFIG_TRACING) += trace.o
+
+obj-$(CONFIG_DRM_APPLE) += appledrm.o
+obj-$(CONFIG_DRM_APPLE) += apple_dcp.o
+
+# header test
+
+# exclude some broken headers from the test coverage
+no-header-test := \
+	hdmi-codec-chmap.h
+
+always-y += \
+	$(patsubst %.h,%.hdrtest, $(filter-out $(no-header-test), \
+		$(shell cd $(src) && find * -name '*.h')))
+
+quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@)
+      cmd_hdrtest = $(CC) $(filter-out $(CFLAGS_GCOV), $(c_flags)) -S -o /dev/null -x c /dev/null -include $<; touch $@
+
+$(obj)/%.hdrtest: $(src)/%.h FORCE
+	$(call if_changed_dep,hdrtest)
diff --git a/drivers/gpu/drm/apple/afk.c b/drivers/gpu/drm/apple/afk.c
new file mode 100644
index 00000000000000..d0de72072877b8
--- /dev/null
+++ b/drivers/gpu/drm/apple/afk.c
@@ -0,0 +1,1181 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright 2022 Sven Peter <sven@svenpeter.dev> */
+
+#include <linux/bitfield.h>
+#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/kconfig.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/soc/apple/rtkit.h>
+
+#include "afk.h"
+#include "trace.h"
+
+struct afk_receive_message_work {
+	struct apple_dcp_afkep *ep;
+	u64 message;
+	struct work_struct work;
+};
+
+#define RBEP_TYPE GENMASK(63, 48)
+
+enum rbep_msg_type {
+	RBEP_INIT = 0x80,
+	RBEP_INIT_ACK = 0xa0,
+	RBEP_GETBUF = 0x89,
+	RBEP_GETBUF_ACK = 0xa1,
+	RBEP_INIT_TX = 0x8a,
+	RBEP_INIT_RX = 0x8b,
+	RBEP_START = 0xa3,
+	RBEP_START_ACK = 0x86,
+	RBEP_SEND = 0xa2,
+	RBEP_RECV = 0x85,
+	RBEP_SHUTDOWN = 0xc0,
+	RBEP_SHUTDOWN_ACK = 0xc1,
+};
+
+#define BLOCK_SHIFT 6
+
+#define GETBUF_SIZE GENMASK(31, 16)
+#define GETBUF_TAG GENMASK(15, 0)
+#define GETBUF_ACK_DVA GENMASK(47, 0)
+
+#define INITRB_OFFSET GENMASK(47, 32)
+#define INITRB_SIZE GENMASK(31, 16)
+#define INITRB_TAG GENMASK(15, 0)
+
+#define SEND_WPTR GENMASK(31, 0)
+
+static void afk_send(struct apple_dcp_afkep *ep, u64 message)
+{
+	dcp_send_message(ep->dcp, ep->endpoint, message);
+}
+
+struct apple_dcp_afkep *afk_init(struct apple_dcp *dcp, u32 endpoint,
+				 const struct apple_epic_service_ops *ops)
+{
+	struct apple_dcp_afkep *afkep;
+	int ret;
+
+	afkep = devm_kzalloc(dcp->dev, sizeof(*afkep), GFP_KERNEL);
+	if (!afkep)
+		return ERR_PTR(-ENOMEM);
+
+	afkep->ops = ops;
+	afkep->dcp = dcp;
+	afkep->endpoint = endpoint;
+	afkep->wq = alloc_ordered_workqueue("apple-dcp-afkep%02x",
+					    WQ_MEM_RECLAIM, endpoint);
+	if (!afkep->wq) {
+		ret = -ENOMEM;
+		goto out_free_afkep;
+	}
+
+	// TODO: devm_ for wq
+
+	init_completion(&afkep->started);
+	init_completion(&afkep->stopped);
+	spin_lock_init(&afkep->lock);
+
+	return afkep;
+
+out_free_afkep:
+	devm_kfree(dcp->dev, afkep);
+	return ERR_PTR(ret);
+}
+
+void afk_shutdown(struct apple_dcp_afkep *afkep)
+{
+	afk_send(afkep, FIELD_PREP(RBEP_TYPE, RBEP_SHUTDOWN));
+	int ret;
+
+	ret = wait_for_completion_timeout(&afkep->stopped, msecs_to_jiffies(1000));
+	if (ret <= 0) {
+		dev_err(afkep->dcp->dev, "Timed out shutting down AFK endpoint %02x", afkep->endpoint);
+	}
+
+	destroy_workqueue(afkep->wq);
+}
+
+int afk_start(struct apple_dcp_afkep *ep)
+{
+	int ret;
+
+	reinit_completion(&ep->started);
+	apple_rtkit_start_ep(ep->dcp->rtk, ep->endpoint);
+	afk_send(ep, FIELD_PREP(RBEP_TYPE, RBEP_INIT));
+
+	ret = wait_for_completion_timeout(&ep->started, msecs_to_jiffies(1000));
+	if (ret <= 0)
+		return -ETIMEDOUT;
+	else
+		return 0;
+}
+
+static void afk_getbuf(struct apple_dcp_afkep *ep, u64 message)
+{
+	u16 size = FIELD_GET(GETBUF_SIZE, message) << BLOCK_SHIFT;
+	u16 tag = FIELD_GET(GETBUF_TAG, message);
+	u64 reply;
+
+	trace_afk_getbuf(ep, size, tag);
+
+	if (ep->bfr) {
+		dev_err(ep->dcp->dev,
+			"Got GETBUF message but buffer already exists\n");
+		return;
+	}
+
+	ep->bfr = dmam_alloc_coherent(ep->dcp->dev, size, &ep->bfr_dma,
+				      GFP_KERNEL);
+	if (!ep->bfr) {
+		dev_err(ep->dcp->dev, "Failed to allocate %d bytes buffer\n",
+			size);
+		return;
+	}
+
+	ep->bfr_size = size;
+	ep->bfr_tag = tag;
+
+	reply = FIELD_PREP(RBEP_TYPE, RBEP_GETBUF_ACK);
+	reply |= FIELD_PREP(GETBUF_ACK_DVA, ep->bfr_dma);
+	afk_send(ep, reply);
+}
+
+static void afk_init_rxtx(struct apple_dcp_afkep *ep, u64 message,
+			  struct afk_ringbuffer *bfr)
+{
+	u16 base = FIELD_GET(INITRB_OFFSET, message) << BLOCK_SHIFT;
+	u16 size = FIELD_GET(INITRB_SIZE, message) << BLOCK_SHIFT;
+	u16 tag = FIELD_GET(INITRB_TAG, message);
+	u32 bufsz, end;
+
+	if (tag != ep->bfr_tag) {
+		dev_err(ep->dcp->dev, "AFK[ep:%02x]: expected tag 0x%x but got 0x%x\n",
+			ep->endpoint, ep->bfr_tag, tag);
+		return;
+	}
+
+	if (bfr->ready) {
+		dev_err(ep->dcp->dev, "AFK[ep:%02x]: buffer is already initialized\n",
+			ep->endpoint);
+		return;
+	}
+
+	if (base >= ep->bfr_size) {
+		dev_err(ep->dcp->dev,
+			"AFK[ep:%02x]: requested base 0x%x >= max size 0x%lx\n",
+			ep->endpoint, base, ep->bfr_size);
+		return;
+	}
+
+	end = base + size;
+	if (end > ep->bfr_size) {
+		dev_err(ep->dcp->dev,
+			"AFK[ep:%02x]: requested end 0x%x > max size 0x%lx\n",
+			ep->endpoint, end, ep->bfr_size);
+		return;
+	}
+
+	bfr->hdr = ep->bfr + base;
+	bufsz = le32_to_cpu(bfr->hdr->bufsz);
+	if (bufsz + sizeof(*bfr->hdr) != size) {
+		dev_err(ep->dcp->dev,
+			"AFK[ep:%02x]: ring buffer size 0x%x != expected 0x%lx\n",
+			ep->endpoint, bufsz, sizeof(*bfr->hdr));
+		return;
+	}
+
+	bfr->buf = bfr->hdr + 1;
+	bfr->bufsz = bufsz;
+	bfr->ready = true;
+
+	if (ep->rxbfr.ready && ep->txbfr.ready)
+		afk_send(ep, FIELD_PREP(RBEP_TYPE, RBEP_START));
+}
+
+#if IS_ENABLED(CONFIG_DRM_APPLE_DEBUG)
+static void afk_populate_service_debugfs(struct apple_epic_service *srv);
+static void afk_remove_service_debugfs(struct apple_epic_service *srv);
+#else
+static void afk_populate_service_debugfs(struct apple_epic_service *srv)
+{
+}
+static void afk_remove_service_debugfs(struct apple_epic_service *srv)
+{
+}
+#endif
+
+static const struct apple_epic_service_ops *
+afk_match_service(struct apple_dcp_afkep *ep, const char *name)
+{
+	const struct apple_epic_service_ops *ops;
+
+	if (!name[0])
+		return NULL;
+	if (!ep->ops)
+		return NULL;
+
+	for (ops = ep->ops; ops->name[0]; ops++) {
+		if (strcmp(ops->name, name))
+			continue;
+
+		return ops;
+	}
+
+	return NULL;
+}
+
+static struct apple_epic_service *afk_epic_find_service(struct apple_dcp_afkep *ep,
+						 u32 channel)
+{
+    for (u32 i = 0; i < ep->num_channels; i++)
+        if (ep->services[i].enabled && ep->services[i].channel == channel)
+            return &ep->services[i];
+
+    return NULL;
+}
+
+static void afk_recv_handle_init(struct apple_dcp_afkep *ep, u32 channel,
+				 u8 *payload, size_t payload_size)
+{
+	char name[32];
+	s64 epic_unit = -1;
+	u32 ch_idx;
+	const char *service_name = name;
+	const char *epic_name = NULL, *epic_class = NULL;
+	const struct apple_epic_service_ops *ops;
+	struct dcp_parse_ctx ctx;
+	u8 *props = payload + sizeof(name);
+	size_t props_size = payload_size - sizeof(name);
+
+	WARN_ON(afk_epic_find_service(ep, channel));
+
+	if (payload_size < sizeof(name)) {
+		dev_err(ep->dcp->dev, "AFK[ep:%02x]: payload too small: %lx\n",
+			ep->endpoint, payload_size);
+		return;
+	}
+
+	if (ep->num_channels >= AFK_MAX_CHANNEL) {
+		dev_err(ep->dcp->dev, "AFK[ep:%02x]: too many enabled services!\n",
+			ep->endpoint);
+		return;
+	}
+
+	strscpy(name, payload, sizeof(name));
+
+	/*
+	 * in DCP firmware 13.2 DCP reports interface-name as name which starts
+	 * with "dispext%d" using -1 s ID for "dcp". In the 12.3 firmware
+	 * EPICProviderClass was used. If the init call has props parse them and
+	 * use EPICProviderClass to match the service.
+	 */
+	if (props_size > 36) {
+		int ret = parse(props, props_size, &ctx);
+		if (ret) {
+			dev_err(ep->dcp->dev,
+				"AFK[ep:%02x]: Failed to parse service init props for %s\n",
+				ep->endpoint, name);
+			return;
+		}
+		ret = parse_epic_service_init(&ctx, &epic_name, &epic_class, &epic_unit);
+		if (ret) {
+			dev_err(ep->dcp->dev,
+				"AFK[ep:%02x]: failed to extract init props: %d\n",
+				ep->endpoint, ret);
+			return;
+		}
+		service_name = epic_class;
+	} else {
+            service_name = name;
+        }
+
+	if (ep->match_epic_name)
+		ops = afk_match_service(ep, epic_name);
+	else
+		ops = afk_match_service(ep, service_name);
+
+	if (!ops) {
+		dev_err(ep->dcp->dev,
+			"AFK[ep:%02x]: unable to match service %s on channel %d\n",
+			ep->endpoint, service_name, channel);
+		goto free;
+	}
+
+	ch_idx = ep->num_channels++;
+	spin_lock_init(&ep->services[ch_idx].lock);
+	ep->services[ch_idx].enabled = true;
+	ep->services[ch_idx].torndown = false;
+	ep->services[ch_idx].ops = ops;
+	ep->services[ch_idx].ep = ep;
+	ep->services[ch_idx].channel = channel;
+	ep->services[ch_idx].cmd_tag = 0;
+	ops->init(&ep->services[ch_idx], epic_name, epic_class, epic_unit);
+	dev_info(ep->dcp->dev, "AFK[ep:%02x]: new service %s on channel %d\n",
+		 ep->endpoint, service_name, channel);
+
+	afk_populate_service_debugfs(&ep->services[ch_idx]);
+
+free:
+	kfree(epic_name);
+	kfree(epic_class);
+}
+
+static void afk_recv_handle_teardown(struct apple_dcp_afkep *ep, u32 channel)
+{
+	struct apple_epic_service *service;
+	const struct apple_epic_service_ops *ops;
+	unsigned long flags;
+
+	service = afk_epic_find_service(ep, channel);
+	if (!service) {
+		dev_warn(ep->dcp->dev, "AFK[ep:%02x]: teardown for disabled channel %u\n",
+			 ep->endpoint, channel);
+		return;
+	}
+
+	afk_remove_service_debugfs(service);
+
+	// TODO: think through what locking is necessary
+	spin_lock_irqsave(&service->lock, flags);
+	/*
+	 * teardown must not disable the service since since it may be sent as
+	 * side effect of a COMMAND which for which a reply is expected.
+	 * Seen with DCP's "av" endpoint during the close afk_service_call.
+	 */
+	service->torndown = true;
+	ops = service->ops;
+	spin_unlock_irqrestore(&service->lock, flags);
+
+	if (ops->teardown)
+		ops->teardown(service);
+}
+
+static void afk_recv_handle_reply(struct apple_dcp_afkep *ep, u32 channel,
+				  u16 tag, void *payload, size_t payload_size)
+{
+	struct epic_cmd *cmd = payload;
+	struct apple_epic_service *service;
+	unsigned long flags;
+	u8 idx = tag & 0xff;
+	void *rxbuf, *txbuf;
+	dma_addr_t rxbuf_dma, txbuf_dma;
+	size_t rxlen, txlen;
+
+	service = afk_epic_find_service(ep, channel);
+	if (!service) {
+		dev_warn(ep->dcp->dev, "AFK[ep:%02x]: command reply on disabled channel %u\n",
+			 ep->endpoint, channel);
+		return;
+	}
+
+	if (payload_size < sizeof(*cmd)) {
+		dev_err(ep->dcp->dev,
+			"AFK[ep:%02x]: command reply on channel %d too small: %ld\n",
+			ep->endpoint, channel, payload_size);
+		return;
+	}
+
+	if (idx >= MAX_PENDING_CMDS) {
+		dev_err(ep->dcp->dev,
+			"AFK[ep:%02x]: command reply on channel %d out of range: %d\n",
+			ep->endpoint, channel, idx);
+		return;
+	}
+
+	spin_lock_irqsave(&service->lock, flags);
+	if (service->cmds[idx].done) {
+		dev_err(ep->dcp->dev,
+			"AFK[ep:%02x]: command reply on channel %d already handled\n",
+			ep->endpoint, channel);
+		spin_unlock_irqrestore(&service->lock, flags);
+		return;
+	}
+
+	if (tag != service->cmds[idx].tag) {
+		dev_err(ep->dcp->dev,
+			"AFK[ep:%02x]: command reply on channel %d has invalid tag: expected 0x%04x != 0x%04x\n",
+			ep->endpoint, channel, tag, service->cmds[idx].tag);
+		spin_unlock_irqrestore(&service->lock, flags);
+		return;
+	}
+
+	service->cmds[idx].done = true;
+	service->cmds[idx].retcode = le32_to_cpu(cmd->retcode);
+	if (service->cmds[idx].free_on_ack) {
+		/* defer freeing until we're no longer in atomic context */
+		rxbuf = service->cmds[idx].rxbuf;
+		txbuf = service->cmds[idx].txbuf;
+		rxlen = service->cmds[idx].rxlen;
+		txlen = service->cmds[idx].txlen;
+		rxbuf_dma = service->cmds[idx].rxbuf_dma;
+		txbuf_dma = service->cmds[idx].txbuf_dma;
+		bitmap_release_region(service->cmd_map, idx, 0);
+	} else {
+		rxbuf = txbuf = NULL;
+		rxlen = txlen = 0;
+	}
+	if (service->cmds[idx].completion)
+		complete(service->cmds[idx].completion);
+
+	spin_unlock_irqrestore(&service->lock, flags);
+
+	if (rxbuf && rxlen)
+		dma_free_coherent(ep->dcp->dev, rxlen, rxbuf, rxbuf_dma);
+	if (txbuf && txlen)
+		dma_free_coherent(ep->dcp->dev, txlen, txbuf, txbuf_dma);
+}
+
+struct epic_std_service_ap_call {
+	__le32 unk0;
+	__le32 unk1;
+	__le32 type;
+	__le32 len;
+	__le32 magic;
+	u8 _unk[48];
+} __attribute__((packed));
+
+static void afk_recv_handle_std_service(struct apple_dcp_afkep *ep, u32 channel,
+					u32 type, struct epic_hdr *ehdr,
+					struct epic_sub_hdr *eshdr,
+					void *payload, size_t payload_size)
+{
+	struct apple_epic_service *service = afk_epic_find_service(ep, channel);
+
+	if (!service) {
+		dev_warn(ep->dcp->dev,
+			 "AFK[ep:%02x]: std service notify on disabled channel %u\n",
+			 ep->endpoint, channel);
+		return;
+	}
+	if (service->torndown) {
+		dev_warn(ep->dcp->dev,
+			 "AFK[ep:%02x]: std service notify on torn down service "
+			 "(chan:%u)\n", ep->endpoint, channel);
+		return;
+	}
+
+	if (type == EPIC_TYPE_NOTIFY && eshdr->category == EPIC_CAT_NOTIFY) {
+		struct epic_std_service_ap_call *call = payload;
+		size_t call_size;
+		void *reply;
+		int ret;
+
+		if (payload_size < sizeof(*call))
+			return;
+
+		call_size = le32_to_cpu(call->len);
+		if (payload_size < sizeof(*call) + call_size)
+			return;
+
+		if (!service->ops->call)
+			return;
+		reply = kzalloc(payload_size, GFP_KERNEL);
+		if (!reply)
+			return;
+
+		ret = service->ops->call(service, le32_to_cpu(call->type),
+					 payload + sizeof(*call), call_size,
+					 reply + sizeof(*call), call_size);
+		if (ret) {
+			kfree(reply);
+			return;
+		}
+
+		memcpy(reply, call, sizeof(*call));
+		afk_send_epic(ep, channel, le16_to_cpu(eshdr->tag),
+			      EPIC_TYPE_NOTIFY_ACK, EPIC_CAT_REPLY,
+			      EPIC_SUBTYPE_STD_SERVICE, reply, payload_size);
+		kfree(reply);
+
+		return;
+	}
+
+	if (type == EPIC_TYPE_NOTIFY && eshdr->category == EPIC_CAT_REPORT) {
+		if (service->ops->report)
+			service->ops->report(service, le16_to_cpu(eshdr->type),
+					     payload, payload_size);
+		return;
+	}
+
+	dev_err(ep->dcp->dev,
+		"AFK[ep:%02x]: channel %d received unhandled standard service message: %x / %x\n",
+		ep->endpoint, channel, type, eshdr->category);
+	print_hex_dump(KERN_INFO, "AFK: ", DUMP_PREFIX_NONE, 16, 1, payload,
+				   payload_size, true);
+}
+
+static void afk_recv_handle(struct apple_dcp_afkep *ep, u32 channel, u32 type,
+			    u8 *data, size_t data_size)
+{
+	struct apple_epic_service *service;
+	struct epic_hdr *ehdr = (struct epic_hdr *)data;
+	struct epic_sub_hdr *eshdr =
+		(struct epic_sub_hdr *)(data + sizeof(*ehdr));
+	u16 subtype = le16_to_cpu(eshdr->type);
+	u8 *payload = data + sizeof(*ehdr) + sizeof(*eshdr);
+	size_t payload_size;
+
+	if (data_size < sizeof(*ehdr) + sizeof(*eshdr)) {
+		dev_err(ep->dcp->dev, "AFK[ep:%02x]: payload too small: %lx\n",
+			ep->endpoint, data_size);
+		return;
+	}
+	payload_size = data_size - sizeof(*ehdr) - sizeof(*eshdr);
+
+	trace_afk_recv_handle(ep, channel, type, data_size, ehdr, eshdr);
+
+	service = afk_epic_find_service(ep, channel);
+
+	if (!service) {
+		if (type != EPIC_TYPE_NOTIFY && type != EPIC_TYPE_REPLY) {
+			dev_err(ep->dcp->dev,
+				"AFK[ep:%02x]: expected notify but got 0x%x on channel %d\n",
+				ep->endpoint, type, channel);
+			return;
+		}
+		if (eshdr->category != EPIC_CAT_REPORT) {
+			dev_err(ep->dcp->dev,
+				"AFK[ep:%02x]: expected report but got 0x%x on channel %d\n",
+				ep->endpoint, eshdr->category, channel);
+			return;
+		}
+		if (subtype == EPIC_SUBTYPE_TEARDOWN) {
+			dev_dbg(ep->dcp->dev,
+				"AFK[ep:%02x]: teardown without service on channel %d\n",
+				ep->endpoint, channel);
+			return;
+		}
+		if (subtype != EPIC_SUBTYPE_ANNOUNCE) {
+			dev_err(ep->dcp->dev,
+				"AFK[ep:%02x]: expected announce but got 0x%x on channel %d\n",
+				ep->endpoint, subtype, channel);
+			return;
+		}
+
+		return afk_recv_handle_init(ep, channel, payload, payload_size);
+	}
+
+	if (!service) {
+		dev_err(ep->dcp->dev, "AFK[ep:%02x]: channel %d has no service\n",
+			ep->endpoint, channel);
+		return;
+	}
+
+	if (type == EPIC_TYPE_NOTIFY && eshdr->category == EPIC_CAT_REPORT &&
+	    subtype == EPIC_SUBTYPE_TEARDOWN)
+		return afk_recv_handle_teardown(ep, channel);
+
+	if (type == EPIC_TYPE_REPLY && eshdr->category == EPIC_CAT_REPLY)
+		return afk_recv_handle_reply(ep, channel,
+					     le16_to_cpu(eshdr->tag), payload,
+					     payload_size);
+
+	if (subtype == EPIC_SUBTYPE_STD_SERVICE)
+		return afk_recv_handle_std_service(
+			ep, channel, type, ehdr, eshdr, payload, payload_size);
+
+	dev_err(ep->dcp->dev, "AFK[ep:%02x]: channel %d received unhandled message "
+		"(type %x subtype %x)\n", ep->endpoint, channel, type, subtype);
+	print_hex_dump(KERN_INFO, "AFK: ", DUMP_PREFIX_NONE, 16, 1, payload,
+				   payload_size, true);
+}
+
+static bool afk_recv(struct apple_dcp_afkep *ep)
+{
+	struct afk_qe *hdr;
+	u32 rptr, wptr;
+	u32 magic, size, channel, type;
+
+	if (!ep->rxbfr.ready) {
+		dev_err(ep->dcp->dev, "AFK[ep:%02x]: got RECV but not ready\n",
+			ep->endpoint);
+		return false;
+	}
+
+	rptr = le32_to_cpu(ep->rxbfr.hdr->rptr);
+	wptr = le32_to_cpu(ep->rxbfr.hdr->wptr);
+	trace_afk_recv_rwptr_pre(ep, rptr, wptr);
+
+	if (rptr == wptr)
+		return false;
+
+	if (rptr > (ep->rxbfr.bufsz - sizeof(*hdr))) {
+		dev_warn(ep->dcp->dev,
+			 "AFK[ep:%02x]: rptr out of bounds: 0x%x > 0x%lx\n",
+			 ep->endpoint, rptr, ep->rxbfr.bufsz - sizeof(*hdr));
+		return false;
+	}
+
+	dma_rmb();
+
+	hdr = ep->rxbfr.buf + rptr;
+	magic = le32_to_cpu(hdr->magic);
+	size = le32_to_cpu(hdr->size);
+	trace_afk_recv_qe(ep, rptr, magic, size);
+
+	if (magic != QE_MAGIC) {
+		dev_warn(ep->dcp->dev, "AFK[ep:%02x]: invalid queue entry magic: 0x%x\n",
+			 ep->endpoint, magic);
+		return false;
+	}
+
+	/*
+	 * If there's not enough space for the payload the co-processor inserted
+	 * the current dummy queue entry and we have to advance to the next one
+	 * which will contain the real data.
+	*/
+	if (rptr + size + sizeof(*hdr) > ep->rxbfr.bufsz) {
+		rptr = 0;
+		hdr = ep->rxbfr.buf + rptr;
+		magic = le32_to_cpu(hdr->magic);
+		size = le32_to_cpu(hdr->size);
+		trace_afk_recv_qe(ep, rptr, magic, size);
+
+		if (magic != QE_MAGIC) {
+			dev_warn(ep->dcp->dev,
+				 "AFK[ep:%02x]: invalid next queue entry magic: 0x%x\n",
+				 ep->endpoint, magic);
+			return false;
+		}
+
+		ep->rxbfr.hdr->rptr = cpu_to_le32(rptr);
+	}
+
+	if (rptr + size + sizeof(*hdr) > ep->rxbfr.bufsz) {
+		dev_warn(ep->dcp->dev,
+			 "AFK[ep:%02x]: queue entry out of bounds: 0x%lx > 0x%lx\n",
+			 ep->endpoint, rptr + size + sizeof(*hdr), ep->rxbfr.bufsz);
+		return false;
+	}
+
+	channel = le32_to_cpu(hdr->channel);
+	type = le32_to_cpu(hdr->type);
+
+	rptr = ALIGN(rptr + sizeof(*hdr) + size, 1 << BLOCK_SHIFT);
+	if (WARN_ON(rptr > ep->rxbfr.bufsz))
+		rptr = 0;
+	if (rptr == ep->rxbfr.bufsz)
+		rptr = 0;
+
+	dma_mb();
+
+	ep->rxbfr.hdr->rptr = cpu_to_le32(rptr);
+	trace_afk_recv_rwptr_post(ep, rptr, wptr);
+
+	/*
+	 * TODO: this is theoretically unsafe since DCP could overwrite data
+	 *       after the read pointer was updated above. Do it anyway since
+	 *       it avoids 2 problems in the DCP tracer:
+	 *       1. the tracer sees replies before the notifies from dcp
+	 *       2. the tracer tries to read buffers after they are unmapped.
+	 */
+	afk_recv_handle(ep, channel, type, hdr->data, size);
+
+	return true;
+}
+
+static void afk_receive_message_worker(struct work_struct *work_)
+{
+	struct afk_receive_message_work *work;
+	u16 type;
+
+	work = container_of(work_, struct afk_receive_message_work, work);
+
+	type = FIELD_GET(RBEP_TYPE, work->message);
+	switch (type) {
+	case RBEP_INIT_ACK:
+		break;
+
+	case RBEP_START_ACK:
+		complete_all(&work->ep->started);
+		break;
+
+	case RBEP_SHUTDOWN_ACK:
+		complete_all(&work->ep->stopped);
+		break;
+
+	case RBEP_GETBUF:
+		afk_getbuf(work->ep, work->message);
+		break;
+
+	case RBEP_INIT_TX:
+		afk_init_rxtx(work->ep, work->message, &work->ep->txbfr);
+		break;
+
+	case RBEP_INIT_RX:
+		afk_init_rxtx(work->ep, work->message, &work->ep->rxbfr);
+		break;
+
+	case RBEP_RECV:
+		while (afk_recv(work->ep))
+			;
+		break;
+
+	default:
+		dev_err(work->ep->dcp->dev,
+			"Received unknown AFK message type: 0x%x\n", type);
+	}
+
+	kfree(work);
+}
+
+int afk_receive_message(struct apple_dcp_afkep *ep, u64 message)
+{
+	struct afk_receive_message_work *work;
+
+	// TODO: comment why decoupling from rtkit thread is required here
+	work = kzalloc(sizeof(*work), GFP_KERNEL);
+	if (!work)
+		return -ENOMEM;
+
+	work->ep = ep;
+	work->message = message;
+	INIT_WORK(&work->work, afk_receive_message_worker);
+	queue_work(ep->wq, &work->work);
+
+	return 0;
+}
+
+int afk_send_epic(struct apple_dcp_afkep *ep, u32 channel, u16 tag,
+		  enum epic_type etype, enum epic_category ecat, u8 stype,
+		  const void *payload, size_t payload_len)
+{
+	u32 rptr, wptr;
+	struct afk_qe *hdr, *hdr2;
+	struct epic_hdr *ehdr;
+	struct epic_sub_hdr *eshdr;
+	unsigned long flags;
+	size_t total_epic_size, total_size;
+	int ret;
+
+	spin_lock_irqsave(&ep->lock, flags);
+
+	dma_rmb();
+	rptr = le32_to_cpu(ep->txbfr.hdr->rptr);
+	wptr = le32_to_cpu(ep->txbfr.hdr->wptr);
+	trace_afk_send_rwptr_pre(ep, rptr, wptr);
+	total_epic_size = sizeof(*ehdr) + sizeof(*eshdr) + payload_len;
+	total_size = sizeof(*hdr) + total_epic_size;
+
+	hdr = hdr2 = NULL;
+
+	/*
+	 * We need to figure out how to place the entire headers and payload
+	 * into the ring buffer:
+	 * - If the write pointer is in front of the read pointer we just need
+	 *   enough space inbetween to store everything.
+	 * - If the read pointer has already wrapper around the end of the
+	 *   buffer we can
+	 *    a) either store the entire payload at the writer pointer if
+	 *       there's enough space until the end,
+	 *    b) or just store the queue entry at the write pointer to indicate
+	 *       that we need to wrap to the start and then store the headers
+	 *       and the payload at the beginning of the buffer. The queue
+	 *       header has to be store twice in this case.
+	 * In either case we have to ensure that there's always enough space
+	 * so that we don't accidentally overwrite other buffers.
+	 */
+	if (wptr < rptr) {
+		/*
+		 * If wptr < rptr we can't wrap around and only have to make
+		 * sure that there's enough space for the entire payload.
+		 */
+		if (wptr + total_size > rptr) {
+			ret = -ENOMEM;
+			goto out;
+		}
+
+		hdr = ep->txbfr.buf + wptr;
+		wptr += sizeof(*hdr);
+	} else {
+		/* We need enough space to place at least a queue entry */
+		if (wptr + sizeof(*hdr) > ep->txbfr.bufsz) {
+			ret = -ENOMEM;
+			goto out;
+		}
+
+		/*
+		 * If we can place a single queue entry but not the full payload
+		 * we need to place one queue entry at the end of the ring
+		 * buffer and then another one together with the entire
+		 * payload at the beginning.
+		 */
+		if (wptr + total_size > ep->txbfr.bufsz) {
+			/*
+			 * Ensure there's space for the  queue entry at the
+			 * beginning
+			 */
+			if (sizeof(*hdr) > rptr) {
+				ret = -ENOMEM;
+				goto out;
+			}
+
+			/*
+			 * Place two queue entries to indicate we want to wrap
+			 * around to the firmware.
+			 */
+			hdr = ep->txbfr.buf + wptr;
+			hdr2 = ep->txbfr.buf;
+			wptr = sizeof(*hdr);
+
+			/* Ensure there's enough space for the entire payload */
+			if (wptr + total_epic_size > rptr) {
+				ret = -ENOMEM;
+				goto out;
+			}
+		} else {
+			/* We have enough space to place the entire payload */
+			hdr = ep->txbfr.buf + wptr;
+			wptr += sizeof(*hdr);
+		}
+	}
+	/*
+	 * At this point we're guaranteed that hdr (and possibly hdr2) point
+	 * to a buffer large enough to fit the queue entry and that we have
+	 * enough space at wptr to store the payload.
+	 */
+
+	hdr->magic = cpu_to_le32(QE_MAGIC);
+	hdr->size = cpu_to_le32(total_epic_size);
+	hdr->channel = cpu_to_le32(channel);
+	hdr->type = cpu_to_le32(etype);
+	if (hdr2)
+		memcpy(hdr2, hdr, sizeof(*hdr));
+
+	ehdr = ep->txbfr.buf + wptr;
+	memset(ehdr, 0, sizeof(*ehdr));
+	ehdr->version = 2;
+	ehdr->seq = cpu_to_le16(ep->qe_seq++);
+	ehdr->timestamp = cpu_to_le64(0);
+	wptr += sizeof(*ehdr);
+
+	eshdr = ep->txbfr.buf + wptr;
+	memset(eshdr, 0, sizeof(*eshdr));
+	eshdr->length = cpu_to_le32(payload_len);
+	eshdr->version = 4;
+	eshdr->category = ecat;
+	eshdr->type = cpu_to_le16(stype);
+	eshdr->timestamp = cpu_to_le64(0);
+	eshdr->tag = cpu_to_le16(tag);
+	if (ecat == EPIC_CAT_REPLY)
+		eshdr->inline_len = cpu_to_le16(payload_len - 4);
+	else
+		eshdr->inline_len = cpu_to_le16(0);
+	wptr += sizeof(*eshdr);
+
+	memcpy(ep->txbfr.buf + wptr, payload, payload_len);
+	wptr += payload_len;
+	wptr = ALIGN(wptr, 1 << BLOCK_SHIFT);
+	if (wptr == ep->txbfr.bufsz)
+		wptr = 0;
+	trace_afk_send_rwptr_post(ep, rptr, wptr);
+
+	ep->txbfr.hdr->wptr = cpu_to_le32(wptr);
+	afk_send(ep, FIELD_PREP(RBEP_TYPE, RBEP_SEND) |
+			     FIELD_PREP(SEND_WPTR, wptr));
+	ret = 0;
+
+out:
+	spin_unlock_irqrestore(&ep->lock, flags);
+	return ret;
+}
+
+int afk_send_command(struct apple_epic_service *service, u8 type,
+		     const void *payload, size_t payload_len, void *output,
+		     size_t output_len, u32 *retcode)
+{
+	struct epic_cmd cmd;
+	void *rxbuf, *txbuf;
+	dma_addr_t rxbuf_dma, txbuf_dma;
+	unsigned long flags;
+	int ret, idx;
+	u16 tag;
+	struct apple_dcp_afkep *ep = service->ep;
+	DECLARE_COMPLETION_ONSTACK(completion);
+
+	rxbuf = dma_alloc_coherent(ep->dcp->dev, output_len, &rxbuf_dma,
+				   GFP_KERNEL);
+	if (!rxbuf)
+		return -ENOMEM;
+	txbuf = dma_alloc_coherent(ep->dcp->dev, payload_len, &txbuf_dma,
+				   GFP_KERNEL);
+	if (!txbuf) {
+		ret = -ENOMEM;
+		goto err_free_rxbuf;
+	}
+
+	memcpy(txbuf, payload, payload_len);
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.retcode = cpu_to_le32(0);
+	cmd.rxbuf = cpu_to_le64(rxbuf_dma);
+	cmd.rxlen = cpu_to_le32(output_len);
+	cmd.txbuf = cpu_to_le64(txbuf_dma);
+	cmd.txlen = cpu_to_le32(payload_len);
+
+	spin_lock_irqsave(&service->lock, flags);
+	idx = bitmap_find_free_region(service->cmd_map, MAX_PENDING_CMDS, 0);
+	if (idx < 0) {
+		ret = -ENOSPC;
+		goto err_unlock;
+	}
+
+	tag = (service->cmd_tag & 0xff) << 8;
+	tag |= idx & 0xff;
+	service->cmd_tag++;
+
+	service->cmds[idx].tag = tag;
+	service->cmds[idx].rxbuf = rxbuf;
+	service->cmds[idx].txbuf = txbuf;
+	service->cmds[idx].rxbuf_dma = rxbuf_dma;
+	service->cmds[idx].txbuf_dma = txbuf_dma;
+	service->cmds[idx].rxlen = output_len;
+	service->cmds[idx].txlen = payload_len;
+	service->cmds[idx].free_on_ack = false;
+	service->cmds[idx].done = false;
+	service->cmds[idx].completion = &completion;
+	init_completion(&completion);
+
+	spin_unlock_irqrestore(&service->lock, flags);
+
+	ret = afk_send_epic(service->ep, service->channel, tag,
+			    EPIC_TYPE_COMMAND, EPIC_CAT_COMMAND, type, &cmd,
+			    sizeof(cmd));
+	if (ret)
+		goto err_free_cmd;
+
+	ret = wait_for_completion_timeout(&completion,
+					  msecs_to_jiffies(MSEC_PER_SEC));
+
+	if (ret <= 0) {
+		spin_lock_irqsave(&service->lock, flags);
+		/*
+		 * Check again while we're inside the lock to make sure
+		 * the command wasn't completed just after
+		 * wait_for_completion_timeout returned.
+		 */
+		if (!service->cmds[idx].done) {
+			service->cmds[idx].completion = NULL;
+			service->cmds[idx].free_on_ack = true;
+			spin_unlock_irqrestore(&service->lock, flags);
+			return -ETIMEDOUT;
+		}
+		spin_unlock_irqrestore(&service->lock, flags);
+	}
+
+	ret = 0;
+	if (retcode)
+		*retcode = service->cmds[idx].retcode;
+	if (output && output_len)
+		memcpy(output, rxbuf, output_len);
+
+err_free_cmd:
+	spin_lock_irqsave(&service->lock, flags);
+	bitmap_release_region(service->cmd_map, idx, 0);
+err_unlock:
+	spin_unlock_irqrestore(&service->lock, flags);
+	dma_free_coherent(ep->dcp->dev, payload_len, txbuf, txbuf_dma);
+err_free_rxbuf:
+	dma_free_coherent(ep->dcp->dev, output_len, rxbuf, rxbuf_dma);
+	return ret;
+}
+
+int afk_service_call(struct apple_epic_service *service, u16 group, u32 command,
+		     const void *data, size_t data_len, size_t data_pad,
+		     void *output, size_t output_len, size_t output_pad)
+{
+	struct epic_service_call *call;
+	void *bfr;
+	size_t bfr_len = max(data_len + data_pad, output_len + output_pad) +
+			 sizeof(*call);
+	int ret;
+	u32 retcode;
+	u32 retlen;
+
+	bfr = kzalloc(bfr_len, GFP_KERNEL);
+	if (!bfr)
+		return -ENOMEM;
+
+	call = bfr;
+
+	memset(call, 0, sizeof(*call));
+	call->group = cpu_to_le16(group);
+	call->command = cpu_to_le32(command);
+	call->data_len = cpu_to_le32(data_len + data_pad);
+	call->magic = cpu_to_le32(EPIC_SERVICE_CALL_MAGIC);
+
+	memcpy(bfr + sizeof(*call), data, data_len);
+
+	ret = afk_send_command(service, EPIC_SUBTYPE_STD_SERVICE, bfr, bfr_len,
+			       bfr, bfr_len, &retcode);
+	if (ret)
+		goto out;
+	if (retcode) {
+		ret = -EINVAL;
+		goto out;
+	}
+	if (le32_to_cpu(call->magic) != EPIC_SERVICE_CALL_MAGIC ||
+	    le16_to_cpu(call->group) != group ||
+	    le32_to_cpu(call->command) != command) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	retlen = le32_to_cpu(call->data_len);
+	if (output_len < retlen)
+		retlen = output_len;
+	if (output && output_len) {
+		memset(output, 0, output_len);
+		memcpy(output, bfr + sizeof(*call), retlen);
+	}
+
+out:
+	kfree(bfr);
+	return ret;
+}
+
+#if IS_ENABLED(CONFIG_DRM_APPLE_DEBUG)
+
+#define AFK_DEBUGFS_MAX_REPLY 8192
+
+static ssize_t service_call_write_file(struct file *file, const char __user *user_buf,
+				       size_t count, loff_t *ppos)
+{
+	struct apple_epic_service *srv = file->private_data;
+	void *buf;
+	int ret;
+	struct {
+		u32 group;
+		u32 command;
+	} call_info;
+
+	if (count < sizeof(call_info))
+		return -EINVAL;
+	if (!srv->debugfs.scratch) {
+		srv->debugfs.scratch = \
+			devm_kzalloc(srv->ep->dcp->dev, AFK_DEBUGFS_MAX_REPLY, GFP_KERNEL);
+		if (!srv->debugfs.scratch)
+			return -ENOMEM;
+	}
+
+	ret = copy_from_user(&call_info, user_buf, sizeof(call_info));
+	if (ret == sizeof(call_info))
+		return -EFAULT;
+	user_buf += sizeof(call_info);
+	count -= sizeof(call_info);
+
+	buf = kmalloc(count, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+	ret = copy_from_user(buf, user_buf, count);
+	if (ret == count) {
+		kfree(buf);
+		return -EFAULT;
+	}
+
+	memset(srv->debugfs.scratch, 0, AFK_DEBUGFS_MAX_REPLY);
+	dma_mb();
+
+	ret = afk_service_call(srv, call_info.group, call_info.command, buf, count, 0,
+			       srv->debugfs.scratch, AFK_DEBUGFS_MAX_REPLY, 0);
+	kfree(buf);
+
+	if (ret < 0)
+		return ret;
+
+	return count + sizeof(call_info);
+}
+
+static ssize_t service_call_read_file(struct file *file, char __user *user_buf,
+				      size_t count, loff_t *ppos)
+{
+	struct apple_epic_service *srv = file->private_data;
+
+	if (!srv->debugfs.scratch)
+		return -EINVAL;
+
+	return simple_read_from_buffer(user_buf, count, ppos,
+				       srv->debugfs.scratch, AFK_DEBUGFS_MAX_REPLY);
+}
+
+static const struct file_operations service_call_fops = {
+	.open = simple_open,
+	.write = service_call_write_file,
+	.read = service_call_read_file,
+};
+
+static ssize_t service_raw_call_write_file(struct file *file, const char __user *user_buf,
+					   size_t count, loff_t *ppos)
+{
+	struct apple_epic_service *srv = file->private_data;
+	u32 retcode;
+	int ret;
+
+	if (!srv->debugfs.scratch) {
+		srv->debugfs.scratch = \
+			devm_kzalloc(srv->ep->dcp->dev, AFK_DEBUGFS_MAX_REPLY, GFP_KERNEL);
+		if (!srv->debugfs.scratch)
+			return -ENOMEM;
+	}
+
+	memset(srv->debugfs.scratch, 0, AFK_DEBUGFS_MAX_REPLY);
+	ret = copy_from_user(srv->debugfs.scratch, user_buf, count);
+	if (ret == count)
+		return -EFAULT;
+
+	ret = afk_send_command(srv, EPIC_SUBTYPE_STD_SERVICE, srv->debugfs.scratch, count,
+			       srv->debugfs.scratch, AFK_DEBUGFS_MAX_REPLY, &retcode);
+	if (ret < 0)
+		return ret;
+	if (retcode)
+		return -EINVAL;
+
+	return count;
+}
+
+static ssize_t service_raw_call_read_file(struct file *file, char __user *user_buf,
+					  size_t count, loff_t *ppos)
+{
+	struct apple_epic_service *srv = file->private_data;
+
+	if (!srv->debugfs.scratch)
+		return -EINVAL;
+
+	return simple_read_from_buffer(user_buf, count, ppos,
+				       srv->debugfs.scratch, AFK_DEBUGFS_MAX_REPLY);
+}
+
+static const struct file_operations service_raw_call_fops = {
+	.open = simple_open,
+	.write = service_raw_call_write_file,
+	.read = service_raw_call_read_file,
+};
+
+static void afk_populate_service_debugfs(struct apple_epic_service *srv)
+{
+	if (!srv->ep->debugfs_entry || !srv->ops)
+		return;
+
+	if (strcmp(srv->ops->name, "DCPAVAudioInterface") == 0) {
+		srv->debugfs.entry = debugfs_create_dir(srv->ops->name,
+							srv->ep->debugfs_entry);
+		debugfs_create_file("call", 0600, srv->debugfs.entry, srv,
+				&service_call_fops);
+		debugfs_create_file("raw_call", 0600, srv->debugfs.entry, srv,
+				&service_raw_call_fops);
+	}
+}
+
+static void afk_remove_service_debugfs(struct apple_epic_service *srv)
+{
+	if (srv->debugfs.entry) {
+		debugfs_remove_recursive(srv->debugfs.entry);
+		srv->debugfs.entry = NULL;
+	}
+}
+
+#endif
diff --git a/drivers/gpu/drm/apple/afk.h b/drivers/gpu/drm/apple/afk.h
new file mode 100644
index 00000000000000..a339c00a2a0138
--- /dev/null
+++ b/drivers/gpu/drm/apple/afk.h
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * AFK (Apple Firmware Kit) EPIC (EndPoint Interface Client) support
+ */
+/* Copyright 2022 Sven Peter <sven@svenpeter.dev> */
+
+#ifndef _DRM_APPLE_DCP_AFK_H
+#define _DRM_APPLE_DCP_AFK_H
+
+#include <linux/completion.h>
+#include <linux/kconfig.h>
+#include <linux/types.h>
+
+#include "dcp.h"
+
+#define AFK_MAX_CHANNEL 16
+#define MAX_PENDING_CMDS 16
+
+struct apple_epic_service_ops;
+struct apple_dcp_afkep;
+
+struct epic_cmd_info {
+	u16 tag;
+
+	void *rxbuf;
+	void *txbuf;
+	dma_addr_t rxbuf_dma;
+	dma_addr_t txbuf_dma;
+	size_t rxlen;
+	size_t txlen;
+
+	u32 retcode;
+	bool done;
+	bool free_on_ack;
+	struct completion *completion;
+};
+
+struct apple_epic_service {
+	const struct apple_epic_service_ops *ops;
+	struct apple_dcp_afkep *ep;
+
+	struct epic_cmd_info cmds[MAX_PENDING_CMDS];
+	DECLARE_BITMAP(cmd_map, MAX_PENDING_CMDS);
+	u8 cmd_tag;
+	spinlock_t lock;
+
+	u32 channel;
+	bool enabled;
+	bool torndown;
+
+	void *cookie;
+
+    struct {
+        struct dentry *entry;
+        u8 *scratch;
+    } debugfs;
+};
+
+enum epic_subtype;
+
+struct apple_epic_service_ops {
+	const char name[32];
+
+	void (*init)(struct apple_epic_service *service, const char *name,
+			      const char *class, s64 unit);
+	int (*call)(struct apple_epic_service *service, u32 idx,
+		    const void *data, size_t data_size, void *reply,
+		    size_t reply_size);
+	int (*report)(struct apple_epic_service *service, enum epic_subtype type,
+		      const void *data, size_t data_size);
+	void (*teardown)(struct apple_epic_service *service);
+};
+
+struct afk_ringbuffer_header {
+	__le32 bufsz;
+	u32 unk;
+	u32 _pad1[14];
+	__le32 rptr;
+	u32 _pad2[15];
+	__le32 wptr;
+	u32 _pad3[15];
+};
+
+struct afk_qe {
+#define QE_MAGIC 0x20504f49 // ' POI'
+	__le32 magic;
+	__le32 size;
+	__le32 channel;
+	__le32 type;
+	u8 data[];
+};
+
+struct epic_hdr {
+	u8 version;
+	__le16 seq;
+	u8 _pad;
+	__le32 unk;
+	__le64 timestamp;
+} __attribute__((packed));
+
+struct epic_sub_hdr {
+	__le32 length;
+	u8 version;
+	u8 category;
+	__le16 type;
+	__le64 timestamp;
+	__le16 tag;
+	__le16 unk;
+	__le32 inline_len;
+} __attribute__((packed));
+
+struct epic_cmd {
+	__le32 retcode;
+	__le64 rxbuf;
+	__le64 txbuf;
+	__le32 rxlen;
+	__le32 txlen;
+	u8 rxcookie;
+	u8 txcookie;
+} __attribute__((packed));
+
+struct epic_service_call {
+	u8 _pad0[2];
+	__le16 group;
+	__le32 command;
+	__le32 data_len;
+#define EPIC_SERVICE_CALL_MAGIC 0x69706378
+	__le32 magic;
+	u8 _pad1[48];
+} __attribute__((packed));
+static_assert(sizeof(struct epic_service_call) == 64);
+
+enum epic_type {
+	EPIC_TYPE_NOTIFY = 0,
+	EPIC_TYPE_COMMAND = 3,
+	EPIC_TYPE_REPLY = 4,
+	EPIC_TYPE_NOTIFY_ACK = 8,
+};
+
+enum epic_category {
+	EPIC_CAT_REPORT = 0x00,
+	EPIC_CAT_NOTIFY = 0x10,
+	EPIC_CAT_REPLY = 0x20,
+	EPIC_CAT_COMMAND = 0x30,
+};
+
+enum epic_subtype {
+	EPIC_SUBTYPE_ANNOUNCE = 0x30,
+	EPIC_SUBTYPE_TEARDOWN = 0x32,
+	EPIC_SUBTYPE_STD_SERVICE = 0xc0,
+};
+
+struct afk_ringbuffer {
+	bool ready;
+	struct afk_ringbuffer_header *hdr;
+	u32 rptr;
+	void *buf;
+	size_t bufsz;
+};
+
+struct apple_dcp_afkep {
+	struct apple_dcp *dcp;
+
+	u32 endpoint;
+	struct workqueue_struct *wq;
+
+	struct completion started;
+	struct completion stopped;
+
+	void *bfr;
+	u16 bfr_tag;
+	size_t bfr_size;
+	dma_addr_t bfr_dma;
+
+	struct afk_ringbuffer txbfr;
+	struct afk_ringbuffer rxbfr;
+
+	spinlock_t lock;
+	u16 qe_seq;
+
+	const struct apple_epic_service_ops *ops;
+	struct apple_epic_service services[AFK_MAX_CHANNEL];
+	u32 num_channels;
+
+	struct dentry *debugfs_entry;
+
+	bool match_epic_name;
+};
+
+struct apple_dcp_afkep *afk_init(struct apple_dcp *dcp, u32 endpoint,
+				 const struct apple_epic_service_ops *ops);
+int afk_start(struct apple_dcp_afkep *ep);
+void afk_shutdown(struct apple_dcp_afkep *ep);
+int afk_receive_message(struct apple_dcp_afkep *ep, u64 message);
+int afk_send_epic(struct apple_dcp_afkep *ep, u32 channel, u16 tag,
+		  enum epic_type etype, enum epic_category ecat, u8 stype,
+		  const void *payload, size_t payload_len);
+int afk_send_command(struct apple_epic_service *service, u8 type,
+		     const void *payload, size_t payload_len, void *output,
+		     size_t output_len, u32 *retcode);
+int afk_service_call(struct apple_epic_service *service, u16 group, u32 command,
+		     const void *data, size_t data_len, size_t data_pad,
+		     void *output, size_t output_len, size_t output_pad);
+#endif
diff --git a/drivers/gpu/drm/apple/apple_drv.c b/drivers/gpu/drm/apple/apple_drv.c
new file mode 100644
index 00000000000000..b4527d6f9ce110
--- /dev/null
+++ b/drivers/gpu/drm/apple/apple_drv.c
@@ -0,0 +1,819 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io> */
+/* Based on meson driver which is
+ * Copyright (C) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
+ * Copyright (C) 2014 Endless Mobile
+ */
+
+#include <linux/aperture.h>
+#include <linux/component.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/of_platform.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
+#include <drm/clients/drm_client_setup.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_dma.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_fb_dma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_mode.h>
+#include <drm/drm_modeset_helper.h>
+#include <drm/drm_module.h>
+#include <drm/drm_of.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+#include <drm/drm_fixed.h>
+
+#include "dcp.h"
+
+#define DRIVER_NAME     "apple"
+#define DRIVER_DESC     "Apple display controller DRM driver"
+
+#define FRAC_16_16(mult, div)    (((mult) << 16) / (div))
+
+#define MAX_COPROCESSORS 3
+
+struct apple_drm_private {
+	struct drm_device drm;
+};
+
+DEFINE_DRM_GEM_DMA_FOPS(apple_fops);
+
+#define DART_PAGE_SIZE 16384
+
+static int apple_drm_gem_dumb_create(struct drm_file *file_priv,
+                            struct drm_device *drm,
+                            struct drm_mode_create_dumb *args)
+{
+        args->pitch = ALIGN(DIV_ROUND_UP(args->width * args->bpp, 8), 64);
+        args->size = round_up(args->pitch * args->height, DART_PAGE_SIZE);
+
+	return drm_gem_dma_dumb_create_internal(file_priv, drm, args);
+}
+
+static const struct drm_driver apple_drm_driver = {
+	DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(apple_drm_gem_dumb_create),
+	DRM_FBDEV_DMA_DRIVER_OPS,
+	.name			= DRIVER_NAME,
+	.desc			= DRIVER_DESC,
+	.major			= 1,
+	.minor			= 0,
+	.driver_features	= DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
+	.fops			= &apple_fops,
+};
+
+static int apple_plane_atomic_check(struct drm_plane *plane,
+				    struct drm_atomic_state *state)
+{
+	struct drm_plane_state *new_plane_state;
+	struct drm_crtc_state *crtc_state;
+	struct drm_rect *dst;
+	int ret;
+
+	new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+
+	if (!new_plane_state->crtc)
+		return 0;
+
+	crtc_state = drm_atomic_get_crtc_state(state, new_plane_state->crtc);
+	if (IS_ERR(crtc_state))
+		return PTR_ERR(crtc_state);
+
+	/*
+	 * DCP limits downscaling to 2x and upscaling to 4x. Attempting to
+	 * scale outside these bounds errors out when swapping.
+	 *
+	 * This function also takes care of clipping the src/dest rectangles,
+	 * which is required for correct operation. Partially off-screen
+	 * surfaces may appear corrupted.
+	 *
+	 * DCP does not distinguish plane types in the hardware, so we set
+	 * can_position. If the primary plane does not fill the screen, the
+	 * hardware will fill in zeroes (black).
+	 */
+	ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
+						  FRAC_16_16(1, 2),
+						  FRAC_16_16(4, 1),
+						  true, true);
+	if (ret < 0)
+		return ret;
+
+	if (!new_plane_state->visible)
+		return 0;
+
+	/*
+	 * DCP does not allow a surface to clip off the screen, and will crash
+	 * if any blended surface is smaller than 32x32. Reject the atomic op
+	 * if the plane will crash DCP.
+	 *
+	 * This is most pertinent to cursors. Userspace should fall back to
+	 * software cursors if the plane check is rejected.
+	 */
+	dst = &new_plane_state->dst;
+	if (drm_rect_width(dst) < 32 || drm_rect_height(dst) < 32) {
+		dev_err_once(state->dev->dev,
+			"Plane operation would have crashed DCP! Rejected!\n\
+			DCP requires 32x32 of every plane to be within screen space.\n\
+			Your compositor asked to overlay [%dx%d, %dx%d] on %dx%d.\n\
+			This is not supported, and your compositor should have\n\
+			switched to software compositing when this operation failed.\n\
+			You should not have noticed this at all. If your screen\n\
+			froze/hitched, or your compositor crashed, please report\n\
+			this to the your compositor's developers. We will not\n\
+			throw this error again until you next reboot.\n",
+			dst->x1, dst->y1, dst->x2, dst->y2,
+			crtc_state->mode.hdisplay, crtc_state->mode.vdisplay);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void apple_plane_atomic_update(struct drm_plane *plane,
+				      struct drm_atomic_state *state)
+{
+	/* Handled in atomic_flush */
+}
+
+static const struct drm_plane_helper_funcs apple_primary_plane_helper_funcs = {
+	.atomic_check	= apple_plane_atomic_check,
+	.atomic_update	= apple_plane_atomic_update,
+	.get_scanout_buffer = drm_fb_dma_get_scanout_buffer,
+};
+
+static const struct drm_plane_helper_funcs apple_plane_helper_funcs = {
+	.atomic_check	= apple_plane_atomic_check,
+	.atomic_update	= apple_plane_atomic_update,
+};
+
+static void apple_plane_cleanup(struct drm_plane *plane)
+{
+	drm_plane_cleanup(plane);
+	kfree(plane);
+}
+
+static const struct drm_plane_funcs apple_plane_funcs = {
+	.update_plane		= drm_atomic_helper_update_plane,
+	.disable_plane		= drm_atomic_helper_disable_plane,
+	.destroy		= apple_plane_cleanup,
+	.reset			= drm_atomic_helper_plane_reset,
+	.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+	.atomic_destroy_state	= drm_atomic_helper_plane_destroy_state,
+};
+
+/*
+ * Table of supported formats, mapping from DRM fourccs to DCP fourccs.
+ *
+ * For future work, DCP supports more formats not listed, including YUV
+ * formats, an extra RGBA format, and a biplanar RGB10_A8 format (fourcc b3a8)
+ * used for HDR.
+ *
+ * Note: we don't have non-alpha formats but userspace breaks without XRGB. It
+ * doesn't matter for the primary plane, but cursors/overlays must not
+ * advertise formats without alpha.
+ */
+static const u32 dcp_primary_formats[] = {
+	DRM_FORMAT_XRGB2101010,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_XBGR8888,
+	DRM_FORMAT_ABGR8888,
+};
+
+static const u32 dcp_overlay_formats[] = {
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_ABGR8888,
+};
+
+u64 apple_format_modifiers[] = {
+	DRM_FORMAT_MOD_LINEAR,
+	DRM_FORMAT_MOD_INVALID
+};
+
+static struct drm_plane *apple_plane_init(struct drm_device *dev,
+					  unsigned long possible_crtcs,
+					  enum drm_plane_type type)
+{
+	int ret;
+	struct drm_plane *plane;
+
+	plane = kzalloc(sizeof(*plane), GFP_KERNEL);
+
+	switch (type) {
+	case DRM_PLANE_TYPE_PRIMARY:
+		ret = drm_universal_plane_init(dev, plane, possible_crtcs,
+				       &apple_plane_funcs,
+				       dcp_primary_formats, ARRAY_SIZE(dcp_primary_formats),
+				       apple_format_modifiers, type, NULL);
+		break;
+	case DRM_PLANE_TYPE_OVERLAY:
+	case DRM_PLANE_TYPE_CURSOR:
+		ret = drm_universal_plane_init(dev, plane, possible_crtcs,
+				       &apple_plane_funcs,
+				       dcp_overlay_formats, ARRAY_SIZE(dcp_overlay_formats),
+				       apple_format_modifiers, type, NULL);
+		break;
+	default:
+		return NULL;
+	}
+
+	if (ret)
+		return ERR_PTR(ret);
+
+	if (type == DRM_PLANE_TYPE_PRIMARY)
+		drm_plane_helper_add(plane, &apple_primary_plane_helper_funcs);
+	else
+		drm_plane_helper_add(plane, &apple_plane_helper_funcs);
+
+	return plane;
+}
+
+static enum drm_connector_status
+apple_connector_detect(struct drm_connector *connector, bool force)
+{
+	struct apple_connector *apple_connector = to_apple_connector(connector);
+
+	return apple_connector->connected ? connector_status_connected :
+						  connector_status_disconnected;
+}
+
+static void apple_connector_oob_hotplug(struct drm_connector *connector,
+					enum drm_connector_status status)
+{
+	struct apple_connector *apple_connector = to_apple_connector(connector);
+
+	printk("#### oob_hotplug status:0x%x ####\n", (u32)status);
+
+	if (status == connector_status_connected)
+		dcp_dptx_connect_oob(apple_connector->dcp, 0);
+	else if (status == connector_status_disconnected)
+		dcp_dptx_disconnect_oob(apple_connector->dcp, 0);
+	else
+		dev_err(&apple_connector->dcp->dev, "unexpected connector status"
+			":0x%x in oob_hotplug event\n", (u32)status);
+}
+
+static void apple_crtc_atomic_enable(struct drm_crtc *crtc,
+				     struct drm_atomic_state *state)
+{
+	struct drm_crtc_state *crtc_state;
+	crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+
+	if (crtc_state->active_changed && crtc_state->active) {
+		struct apple_crtc *apple_crtc = to_apple_crtc(crtc);
+		dcp_poweron(apple_crtc->dcp);
+	}
+
+	if (crtc_state->active)
+		dcp_crtc_atomic_modeset(crtc, state);
+}
+
+static void apple_crtc_atomic_disable(struct drm_crtc *crtc,
+				      struct drm_atomic_state *state)
+{
+	struct drm_crtc_state *crtc_state;
+	crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+
+	if (crtc_state->active_changed && !crtc_state->active) {
+		struct apple_crtc *apple_crtc = to_apple_crtc(crtc);
+		dcp_poweroff(apple_crtc->dcp);
+	}
+
+	if (crtc->state->event && !crtc->state->active) {
+		spin_lock_irq(&crtc->dev->event_lock);
+		drm_crtc_send_vblank_event(crtc, crtc->state->event);
+		spin_unlock_irq(&crtc->dev->event_lock);
+
+		crtc->state->event = NULL;
+	}
+}
+
+static void apple_crtc_atomic_begin(struct drm_crtc *crtc,
+				    struct drm_atomic_state *state)
+{
+	struct apple_crtc *apple_crtc = to_apple_crtc(crtc);
+	unsigned long flags;
+
+	if (crtc->state->event) {
+		spin_lock_irqsave(&crtc->dev->event_lock, flags);
+		apple_crtc->event = crtc->state->event;
+		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+		crtc->state->event = NULL;
+	}
+}
+
+static void apple_crtc_cleanup(struct drm_crtc *crtc)
+{
+	drm_crtc_cleanup(crtc);
+	kfree(to_apple_crtc(crtc));
+}
+
+static int apple_crtc_parse_crc_source(const char *source, bool *enabled)
+{
+	int ret = 0;
+
+	if (!source) {
+		*enabled = false;
+	} else if (strcmp(source, "auto") == 0) {
+		*enabled = true;
+	} else {
+		*enabled = false;
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int apple_crtc_set_crc_source(struct drm_crtc *crtc, const char *source)
+{
+	bool enabled = false;
+
+	int ret = apple_crtc_parse_crc_source(source, &enabled);
+
+	if (!ret)
+		dcp_set_crc(crtc, enabled);
+
+	return ret;
+}
+
+static int apple_crtc_verify_crc_source(struct drm_crtc *crtc,
+					const char *source,
+					size_t *values_cnt)
+{
+	bool enabled;
+
+	if (apple_crtc_parse_crc_source(source, &enabled) < 0) {
+		pr_warn("dcp: Invalid CRC source name %s\n", source);
+		return -EINVAL;
+	}
+
+	*values_cnt = 1;
+
+	return 0;
+}
+
+static const char * const apple_crtc_crc_sources[] = {"auto"};
+
+static const char *const * apple_crtc_get_crc_sources(struct drm_crtc *crtc,
+						      size_t *count)
+{
+	*count = ARRAY_SIZE(apple_crtc_crc_sources);
+	return apple_crtc_crc_sources;
+}
+
+static const struct drm_crtc_funcs apple_crtc_funcs = {
+	.atomic_destroy_state	= drm_atomic_helper_crtc_destroy_state,
+	.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+	.destroy		= apple_crtc_cleanup,
+	.page_flip		= drm_atomic_helper_page_flip,
+	.reset			= drm_atomic_helper_crtc_reset,
+	.set_config             = drm_atomic_helper_set_config,
+	.set_crc_source		= apple_crtc_set_crc_source,
+	.verify_crc_source	= apple_crtc_verify_crc_source,
+	.get_crc_sources	= apple_crtc_get_crc_sources,
+
+};
+
+static const struct drm_mode_config_funcs apple_mode_config_funcs = {
+	.atomic_check		= drm_atomic_helper_check,
+	.atomic_commit		= drm_atomic_helper_commit,
+	.fb_create		= drm_gem_fb_create,
+};
+
+static const struct drm_mode_config_helper_funcs apple_mode_config_helpers = {
+	.atomic_commit_tail	= drm_atomic_helper_commit_tail_rpm,
+};
+
+static void appledrm_connector_cleanup(struct drm_connector *connector)
+{
+	drm_connector_cleanup(connector);
+	kfree(to_apple_connector(connector));
+}
+
+static const struct drm_connector_funcs apple_connector_funcs = {
+	.fill_modes		= drm_helper_probe_single_connector_modes,
+	.destroy		= appledrm_connector_cleanup,
+	.reset			= drm_atomic_helper_connector_reset,
+	.atomic_duplicate_state	= drm_atomic_helper_connector_duplicate_state,
+	.atomic_destroy_state	= drm_atomic_helper_connector_destroy_state,
+	.detect			= apple_connector_detect,
+	.debugfs_init		= apple_connector_debugfs_init,
+	.oob_hotplug_event	= apple_connector_oob_hotplug,
+};
+
+static const struct drm_connector_helper_funcs apple_connector_helper_funcs = {
+	.get_modes		= dcp_get_modes,
+	.mode_valid		= dcp_mode_valid,
+};
+
+static const struct drm_crtc_helper_funcs apple_crtc_helper_funcs = {
+	.atomic_begin		= apple_crtc_atomic_begin,
+	.atomic_check		= dcp_crtc_atomic_check,
+	.atomic_flush		= dcp_flush,
+	.atomic_enable		= apple_crtc_atomic_enable,
+	.atomic_disable		= apple_crtc_atomic_disable,
+	.mode_fixup		= dcp_crtc_mode_fixup,
+};
+
+static int apple_probe_per_dcp(struct device *dev,
+			       struct drm_device *drm,
+			       struct platform_device *dcp,
+			       int num, bool dcp_ext)
+{
+	struct apple_crtc *crtc;
+	struct apple_connector *connector;
+	struct apple_encoder *enc;
+	struct drm_plane *planes[DCP_MAX_PLANES];
+	int ret, i;
+	int immutable_zpos = 0;
+
+	planes[0] = apple_plane_init(drm, 1U << num, DRM_PLANE_TYPE_PRIMARY);
+	if (IS_ERR(planes[0]))
+		return PTR_ERR(planes[0]);
+	ret = drm_plane_create_zpos_immutable_property(planes[0], immutable_zpos);
+	if (ret) {
+		return ret;
+	}
+
+
+	/* Set up our other planes */
+	for (i = 1; i < DCP_MAX_PLANES; i++) {
+		planes[i] = apple_plane_init(drm, 1U << num, DRM_PLANE_TYPE_OVERLAY);
+		if (IS_ERR(planes[i]))
+			return PTR_ERR(planes[i]);
+		immutable_zpos++;
+		ret = drm_plane_create_zpos_immutable_property(planes[i], immutable_zpos);
+		if (ret) {
+			return ret;
+		}
+	}
+
+	/*
+	 * Even though we have an overlay plane, we cannot expose it to legacy
+	 * userspace for cursors as we cannot make the same guarantees as ye olde
+	 * hardware cursor planes such userspace would expect us to. Modern userspace
+	 * knows what to do with overlays.
+	 */
+	crtc = kzalloc(sizeof(*crtc), GFP_KERNEL);
+	ret = drm_crtc_init_with_planes(drm, &crtc->base, planes[0], NULL,
+					&apple_crtc_funcs, NULL);
+	if (ret)
+		return ret;
+
+	drm_crtc_helper_add(&crtc->base, &apple_crtc_helper_funcs);
+	drm_crtc_enable_color_mgmt(&crtc->base, 0, true, 0);
+
+	enc = drmm_simple_encoder_alloc(drm, struct apple_encoder, base,
+					DRM_MODE_ENCODER_TMDS);
+	if (IS_ERR(enc))
+                return PTR_ERR(enc);
+	enc->base.possible_crtcs = drm_crtc_mask(&crtc->base);
+
+	connector = kzalloc(sizeof(*connector), GFP_KERNEL);
+	mutex_init(&connector->chunk_lock);
+	drm_connector_helper_add(&connector->base,
+				 &apple_connector_helper_funcs);
+
+	// HACK:
+	if (dcp_ext)
+		connector->base.fwnode = fwnode_handle_get(dcp->dev.fwnode);
+
+	ret = drm_connector_init(drm, &connector->base, &apple_connector_funcs,
+				 dcp_get_connector_type(dcp));
+	if (ret)
+		return ret;
+
+	connector->base.polled = DRM_CONNECTOR_POLL_HPD;
+	connector->connected = false;
+	connector->dcp = dcp;
+
+	INIT_WORK(&connector->hotplug_wq, dcp_hotplug);
+
+	crtc->dcp = dcp;
+	dcp_link(dcp, crtc, connector);
+
+	return drm_connector_attach_encoder(&connector->base, &enc->base);
+}
+
+static int apple_get_fb_resource(struct device *dev, const char *name,
+				 struct resource *fb_r)
+{
+	int idx, ret = -ENODEV;
+	struct device_node *node;
+
+	idx = of_property_match_string(dev->of_node, "memory-region-names", name);
+
+	node = of_parse_phandle(dev->of_node, "memory-region", idx);
+	if (!node) {
+		dev_err(dev, "reserved-memory node '%s' not found\n", name);
+		return -ENODEV;
+	}
+
+	if (!of_device_is_available(node)) {
+		dev_err(dev, "reserved-memory node '%s' is unavailable\n", name);
+		goto err;
+	}
+
+	if (!of_device_is_compatible(node, "framebuffer")) {
+		dev_err(dev, "reserved-memory node '%s' is incompatible\n",
+			node->full_name);
+		goto err;
+	}
+
+	ret = of_address_to_resource(node, 0, fb_r);
+
+err:
+	of_node_put(node);
+	return ret;
+}
+
+static const struct of_device_id apple_dcp_id_tbl[] = {
+	{ .compatible = "apple,dcp" },
+	{ .compatible = "apple,dcpext" },
+	{},
+};
+
+static int apple_drm_init_dcp(struct device *dev)
+{
+	struct apple_drm_private *apple = dev_get_drvdata(dev);
+	struct platform_device *dcp[MAX_COPROCESSORS];
+	struct device_node *np;
+	u64 timeout;
+	int i, ret, num_dcp = 0;
+
+	for_each_matching_node(np, apple_dcp_id_tbl) {
+		bool dcp_ext;
+		if (!of_device_is_available(np)) {
+			of_node_put(np);
+			continue;
+		}
+		dcp_ext = of_device_is_compatible(np, "apple,dcpext") ||
+		          of_property_present(np, "phys");
+
+		dcp[num_dcp] = of_find_device_by_node(np);
+		of_node_put(np);
+		if (!dcp[num_dcp])
+			continue;
+
+		ret = apple_probe_per_dcp(dev, &apple->drm, dcp[num_dcp],
+					  num_dcp, dcp_ext);
+		if (ret)
+			continue;
+
+		ret = dcp_start(dcp[num_dcp]);
+		if (ret)
+			continue;
+
+		num_dcp++;
+	}
+
+	if (num_dcp < 1)
+		return -ENODEV;
+
+	/*
+	 * Starting DPTX might take some time.
+	 */
+	timeout = get_jiffies_64() + msecs_to_jiffies(3000);
+
+	for (i = 0; i < num_dcp; ++i) {
+		u64 jiffies = get_jiffies_64();
+		u64 wait = time_after_eq64(jiffies, timeout) ?
+				   0 :
+				   timeout - jiffies;
+		ret = dcp_wait_ready(dcp[i], wait);
+		/* There is nothing we can do if a dcp/dcpext does not boot
+		 * (successfully). Ignoring it should not do any harm now.
+		 * Needs to reevaluated when adding dcpext support.
+		 */
+		if (ret)
+			dev_warn(dev, "DCP[%d] not ready: %d\n", i, ret);
+	}
+	/* HACK: Wait for dcp* to settle before a modeset */
+	msleep(100);
+
+	return 0;
+}
+
+static int apple_drm_init(struct device *dev)
+{
+	struct apple_drm_private *apple;
+	struct resource fb_r;
+	resource_size_t fb_size;
+	int ret;
+
+	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(42));
+	if (ret)
+		return ret;
+
+	ret = apple_get_fb_resource(dev, "framebuffer", &fb_r);
+	if (ret)
+		return ret;
+
+	fb_size = fb_r.end - fb_r.start + 1;
+	ret = aperture_remove_conflicting_devices(fb_r.start, fb_size,
+						  apple_drm_driver.name);
+	if (ret) {
+		dev_err(dev, "Failed remove fb: %d\n", ret);
+		goto err_unbind;
+	}
+
+	apple = devm_drm_dev_alloc(dev, &apple_drm_driver,
+				   struct apple_drm_private, drm);
+	if (IS_ERR(apple))
+		return PTR_ERR(apple);
+
+	dev_set_drvdata(dev, apple);
+
+	ret = component_bind_all(dev, apple);
+	if (ret)
+		return ret;
+
+	ret = drmm_mode_config_init(&apple->drm);
+	if (ret)
+		goto err_unbind;
+
+	/*
+	 * IOMFB::UPPipeDCP_H13P::verify_surfaces produces the error "plane
+	 * requires a minimum of 32x32 for the source buffer" if smaller
+	 */
+	apple->drm.mode_config.min_width = 32;
+	apple->drm.mode_config.min_height = 32;
+
+	/*
+	 * TODO: this is the max framebuffer size not the maximal supported
+	 * output resolution. DCP reports the maximal framebuffer size take it
+	 * from there.
+	 * Hardcode it for now to the M1 Max DCP reported 'MaxSrcBufferWidth'
+	 * and 'MaxSrcBufferHeight' of 16384.
+	 */
+	apple->drm.mode_config.max_width = 16384;
+	apple->drm.mode_config.max_height = 16384;
+
+	apple->drm.mode_config.funcs = &apple_mode_config_funcs;
+	apple->drm.mode_config.helper_private = &apple_mode_config_helpers;
+
+	ret = apple_drm_init_dcp(dev);
+	if (ret)
+		goto err_unbind;
+
+	drm_mode_config_reset(&apple->drm);
+
+	ret = drm_dev_register(&apple->drm, 0);
+	if (ret)
+		goto err_unbind;
+
+	drm_client_setup_with_fourcc(&apple->drm, DRM_FORMAT_XRGB8888);
+
+	return 0;
+
+err_unbind:
+	component_unbind_all(dev, NULL);
+	return ret;
+}
+
+static void apple_drm_uninit(struct device *dev)
+{
+	struct apple_drm_private *apple = dev_get_drvdata(dev);
+
+	drm_dev_unregister(&apple->drm);
+	drm_atomic_helper_shutdown(&apple->drm);
+
+	component_unbind_all(dev, NULL);
+
+	dev_set_drvdata(dev, NULL);
+}
+
+static int apple_drm_bind(struct device *dev)
+{
+	return apple_drm_init(dev);
+}
+
+static void apple_drm_unbind(struct device *dev)
+{
+	apple_drm_uninit(dev);
+}
+
+const struct component_master_ops apple_drm_ops = {
+	.bind	= apple_drm_bind,
+	.unbind	= apple_drm_unbind,
+};
+
+static int add_dcp_components(struct device *dev,
+			      struct component_match **matchptr)
+{
+	struct device_node *np, *endpoint, *port;
+	int num = 0;
+
+	for_each_matching_node(np, apple_dcp_id_tbl) {
+		if (of_device_is_available(np)) {
+			drm_of_component_match_add(dev, matchptr,
+						   component_compare_of, np);
+			num++;
+			for_each_endpoint_of_node(np, endpoint) {
+				port = of_graph_get_remote_port_parent(endpoint);
+				if (!port)
+					continue;
+
+#if !IS_ENABLED(CONFIG_DRM_APPLE_AUDIO)
+				if (of_device_is_compatible(port, "apple,dpaudio")) {
+					of_node_put(port);
+					continue;
+				}
+#endif
+				if (of_device_is_available(port))
+					drm_of_component_match_add(dev, matchptr,
+							   component_compare_of,
+							   port);
+				of_node_put(port);
+			}
+		}
+		of_node_put(np);
+	}
+
+	return num;
+}
+
+static int apple_platform_probe(struct platform_device *pdev)
+{
+	struct device *mdev = &pdev->dev;
+	struct component_match *match = NULL;
+	int num_dcp;
+
+	/* add DCP components, handle less than 1 as probe error */
+	num_dcp = add_dcp_components(mdev, &match);
+	if (num_dcp < 1)
+		return -ENODEV;
+
+	return component_master_add_with_match(mdev, &apple_drm_ops, match);
+}
+
+static void apple_platform_remove(struct platform_device *pdev)
+{
+	component_master_del(&pdev->dev, &apple_drm_ops);
+}
+
+static const struct of_device_id of_match[] = {
+	{ .compatible = "apple,display-subsystem" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, of_match);
+
+#ifdef CONFIG_PM_SLEEP
+static int apple_platform_suspend(struct device *dev)
+{
+	struct apple_drm_private *apple = dev_get_drvdata(dev);
+
+	if (apple)
+		return drm_mode_config_helper_suspend(&apple->drm);
+
+	return 0;
+}
+
+static int apple_platform_resume(struct device *dev)
+{
+	struct apple_drm_private *apple = dev_get_drvdata(dev);
+
+	if (apple)
+		drm_mode_config_helper_resume(&apple->drm);
+
+	return 0;
+}
+
+static const struct dev_pm_ops apple_platform_pm_ops = {
+	.suspend	= apple_platform_suspend,
+	.resume		= apple_platform_resume,
+};
+#endif
+
+static struct platform_driver apple_platform_driver = {
+	.driver	= {
+		.name = "apple-drm",
+		.of_match_table	= of_match,
+#ifdef CONFIG_PM_SLEEP
+		.pm = &apple_platform_pm_ops,
+#endif
+	},
+	.probe		= apple_platform_probe,
+	.remove		= apple_platform_remove,
+};
+
+drm_module_platform_driver(apple_platform_driver);
+
+MODULE_AUTHOR("Alyssa Rosenzweig <alyssa@rosenzweig.io>");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/gpu/drm/apple/audio.c b/drivers/gpu/drm/apple/audio.c
new file mode 100644
index 00000000000000..38718e2f56117b
--- /dev/null
+++ b/drivers/gpu/drm/apple/audio.c
@@ -0,0 +1,776 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * DCP Audio Bits
+ *
+ * Copyright (C) The Asahi Linux Contributors
+ *
+ * TODO:
+ *  - figure some nice identification of the sound card (in case
+ *    there's many DCP instances)
+ */
+
+#define DEBUG
+
+#include <linux/component.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/of_dma.h>
+#include <linux/of_graph.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <sound/dmaengine_pcm.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/initval.h>
+#include <sound/jack.h>
+
+#include "av.h"
+#include "dcp.h"
+#include "audio.h"
+#include "parser.h"
+
+#define DCPAUD_ELEMENTS_MAXSIZE		16384
+#define DCPAUD_PRODUCTATTRS_MAXSIZE	1024
+
+struct dcp_audio {
+	struct device *dev;
+	struct device *dcp_dev;
+	struct device *dma_dev;
+	struct device_link *dma_link;
+	struct dma_chan *chan;
+	struct snd_card *card;
+	struct snd_jack *jack;
+	struct snd_pcm_substream *substream;
+	unsigned int open_cookie;
+
+	struct mutex data_lock;
+	bool dcp_connected; /// dcp status keep for delayed initialization
+	bool connected;
+	unsigned int connection_cookie;
+
+	struct snd_pcm_chmap_elem selected_chmap;
+	struct dcp_sound_cookie selected_cookie;
+	void *elements;
+	void *productattrs;
+
+	struct snd_pcm_chmap *chmap_info;
+};
+
+static const struct snd_pcm_hardware dcp_pcm_hw = {
+	.info	 = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
+		   SNDRV_PCM_INFO_INTERLEAVED,
+	.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_LE |
+		   SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE,
+	.rates			= SNDRV_PCM_RATE_CONTINUOUS,
+	.rate_min		= 0,
+	.rate_max		= UINT_MAX,
+	.channels_min		= 1,
+	.channels_max		= 16,
+	.buffer_bytes_max	= SIZE_MAX,
+	.period_bytes_min	= 4096, /* TODO */
+	.period_bytes_max	= SIZE_MAX,
+	.periods_min		= 2,
+	.periods_max		= UINT_MAX,
+};
+
+static int dcpaud_read_remote_info(struct dcp_audio *dcpaud)
+{
+	int ret;
+
+	ret = dcp_audiosrv_get_elements(dcpaud->dcp_dev, dcpaud->elements,
+					DCPAUD_ELEMENTS_MAXSIZE);
+	if (ret < 0)
+		return ret;
+
+	ret = dcp_audiosrv_get_product_attrs(dcpaud->dcp_dev, dcpaud->productattrs,
+					     DCPAUD_PRODUCTATTRS_MAXSIZE);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int dcpaud_interval_bitmask(struct snd_interval *i,
+				   unsigned int mask)
+{
+	struct snd_interval range;
+	if (!mask)
+		return -EINVAL;
+
+	snd_interval_any(&range);
+	range.min = __ffs(mask);
+	range.max = __fls(mask);
+	return snd_interval_refine(i, &range);
+}
+
+extern const struct snd_pcm_hw_constraint_list snd_pcm_known_rates;
+
+static void dcpaud_fill_fmt_sieve(struct snd_pcm_hw_params *params,
+				  struct dcp_sound_format_mask *sieve)
+{
+	struct snd_interval *c = hw_param_interval(params,
+				SNDRV_PCM_HW_PARAM_CHANNELS);
+	struct snd_interval *r = hw_param_interval(params,
+				SNDRV_PCM_HW_PARAM_RATE);
+	struct snd_mask *f = hw_param_mask(params,
+				SNDRV_PCM_HW_PARAM_FORMAT);
+	int i;
+
+	sieve->nchans = GENMASK(c->max, c->min);
+	sieve->formats = f->bits[0] | ((u64) f->bits[1]) << 32; /* TODO: don't open-code */
+
+	for (i = 0; i < snd_pcm_known_rates.count; i++) {
+		unsigned int rate = snd_pcm_known_rates.list[i];
+
+		if (snd_interval_test(r, rate))
+			sieve->rates |= 1u << i;
+	}
+}
+
+static void dcpaud_consult_elements(struct dcp_audio *dcpaud,
+				    struct snd_pcm_hw_params *params,
+				    struct dcp_sound_format_mask *hits)
+{
+	struct dcp_sound_format_mask sieve;
+	struct dcp_parse_ctx elements = {
+		.dcp = dev_get_drvdata(dcpaud->dcp_dev),
+		.blob = dcpaud->elements + 4,
+		.len = DCPAUD_ELEMENTS_MAXSIZE - 4,
+		.pos = 0,
+	};
+
+	dcpaud_fill_fmt_sieve(params, &sieve);
+	dev_dbg(dcpaud->dev, "elements in: %llx %x %x\n", sieve.formats, sieve.nchans, sieve.rates);
+	parse_sound_constraints(&elements, &sieve, hits);
+	dev_dbg(dcpaud->dev, "elements out: %llx %x %x\n", hits->formats, hits->nchans, hits->rates);
+}
+
+static int dcpaud_select_cookie(struct dcp_audio *dcpaud,
+				 struct snd_pcm_hw_params *params)
+{
+	struct dcp_sound_format_mask sieve;
+	struct dcp_parse_ctx elements = {
+		.dcp = dev_get_drvdata(dcpaud->dcp_dev),
+		.blob = dcpaud->elements + 4,
+		.len = DCPAUD_ELEMENTS_MAXSIZE - 4,
+		.pos = 0,
+	};
+
+	dcpaud_fill_fmt_sieve(params, &sieve);
+	return parse_sound_mode(&elements, &sieve, &dcpaud->selected_chmap,
+				&dcpaud->selected_cookie);
+}
+
+static int dcpaud_rule_channels(struct snd_pcm_hw_params *params,
+                                struct snd_pcm_hw_rule *rule)
+{
+	struct dcp_audio *dcpaud = rule->private;
+	struct snd_interval *c = hw_param_interval(params,
+				SNDRV_PCM_HW_PARAM_CHANNELS);
+	struct dcp_sound_format_mask hits = {0, 0, 0};
+
+        dcpaud_consult_elements(dcpaud, params, &hits);
+
+        return dcpaud_interval_bitmask(c, hits.nchans);
+}
+
+static int dcpaud_refine_fmt_mask(struct snd_mask *m, u64 mask)
+{
+	struct snd_mask mask_mask;
+
+	if (!mask)
+		return -EINVAL;
+	mask_mask.bits[0] = mask;
+	mask_mask.bits[1] = mask >> 32;
+
+	return snd_mask_refine(m, &mask_mask);
+}
+
+static int dcpaud_rule_format(struct snd_pcm_hw_params *params,
+                               struct snd_pcm_hw_rule *rule)
+{
+	struct dcp_audio *dcpaud = rule->private;
+	struct snd_mask *f = hw_param_mask(params,
+				SNDRV_PCM_HW_PARAM_FORMAT);
+	struct dcp_sound_format_mask hits;
+
+        dcpaud_consult_elements(dcpaud, params, &hits);
+
+        return dcpaud_refine_fmt_mask(f, hits.formats);
+}
+
+static int dcpaud_rule_rate(struct snd_pcm_hw_params *params,
+                             struct snd_pcm_hw_rule *rule)
+{
+	struct dcp_audio *dcpaud = rule->private;
+	struct snd_interval *r = hw_param_interval(params,
+				SNDRV_PCM_HW_PARAM_RATE);
+	struct dcp_sound_format_mask hits;
+
+        dcpaud_consult_elements(dcpaud, params, &hits);
+
+        return snd_interval_rate_bits(r, hits.rates);
+}
+
+static int dcpaud_init_dma(struct dcp_audio *dcpaud)
+{
+	struct dma_chan *chan;
+	if (dcpaud->chan)
+		return 0;
+
+	chan = of_dma_request_slave_channel(dcpaud->dev->of_node, "tx");
+	/* squelch dma channel request errors, the driver will try again alter */
+	if (!chan) {
+		dev_warn(dcpaud->dev, "audio TX DMA channel request failed\n");
+		return -ENXIO;
+	} else if (chan == ERR_PTR(-EPROBE_DEFER)) {
+		dev_info(dcpaud->dev, "audio TX DMA channel is not ready yet\n");
+		return -ENXIO;
+	} else if (IS_ERR(chan)) {
+		dev_warn(dcpaud->dev, "audio TX DMA channel request failed: %ld\n", PTR_ERR(chan));
+		return PTR_ERR(chan);
+	}
+	dcpaud->chan = chan;
+
+	snd_pcm_set_managed_buffer(dcpaud->substream, SNDRV_DMA_TYPE_DEV_IRAM,
+				   dcpaud->chan->device->dev, 1024 * 1024,
+				   SIZE_MAX);
+
+	return 0;
+}
+
+static int dcp_pcm_open(struct snd_pcm_substream *substream)
+{
+	struct dcp_audio *dcpaud = substream->pcm->private_data;
+	struct snd_dmaengine_dai_dma_data dma_data = {
+		.flags = SND_DMAENGINE_PCM_DAI_FLAG_PACK,
+	};
+	struct snd_pcm_hardware hw;
+	int ret;
+
+	mutex_lock(&dcpaud->data_lock);
+	ret = dcpaud_init_dma(dcpaud);
+	if (ret < 0)
+		return ret;
+
+	if (!dcpaud->connected) {
+		mutex_unlock(&dcpaud->data_lock);
+		return -ENXIO;
+	}
+	dcpaud->open_cookie = dcpaud->connection_cookie;
+	mutex_unlock(&dcpaud->data_lock);
+
+	ret = dcpaud_read_remote_info(dcpaud);
+	if (ret < 0)
+		return ret;
+
+	snd_pcm_hw_rule_add(substream->runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT,
+			    dcpaud_rule_format, dcpaud,
+			    SNDRV_PCM_HW_PARAM_CHANNELS, SNDRV_PCM_HW_PARAM_RATE, -1);
+	snd_pcm_hw_rule_add(substream->runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+			    dcpaud_rule_channels, dcpaud,
+			    SNDRV_PCM_HW_PARAM_FORMAT, SNDRV_PCM_HW_PARAM_RATE, -1);
+	snd_pcm_hw_rule_add(substream->runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
+			    dcpaud_rule_rate, dcpaud,
+			    SNDRV_PCM_HW_PARAM_FORMAT, SNDRV_PCM_HW_PARAM_CHANNELS, -1);
+
+	hw = dcp_pcm_hw;
+	hw.info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
+			  SNDRV_PCM_INFO_INTERLEAVED;
+	hw.periods_min = 2;
+	hw.periods_max = UINT_MAX;
+	hw.period_bytes_min = 256;
+	hw.period_bytes_max = SIZE_MAX; // TODO dma_get_max_seg_size(dma_dev);
+	hw.buffer_bytes_max = SIZE_MAX;
+	hw.fifo_size = 16;
+	ret = snd_dmaengine_pcm_refine_runtime_hwparams(substream, &dma_data,
+							&hw, dcpaud->chan);
+	if (ret)
+		return ret;
+	substream->runtime->hw = hw;
+
+	return snd_dmaengine_pcm_open(substream, dcpaud->chan);
+}
+
+static int dcp_pcm_close(struct snd_pcm_substream *substream)
+{
+	struct dcp_audio *dcpaud = substream->pcm->private_data;
+	dcpaud->selected_chmap.channels = 0;
+
+	return snd_dmaengine_pcm_close(substream);
+}
+
+static int dcpaud_connection_up(struct dcp_audio *dcpaud)
+{
+	bool ret;
+	mutex_lock(&dcpaud->data_lock);
+	ret = dcpaud->connected &&
+	      dcpaud->open_cookie == dcpaud->connection_cookie;
+	mutex_unlock(&dcpaud->data_lock);
+	return ret;
+}
+
+static int dcp_pcm_hw_params(struct snd_pcm_substream *substream,
+			     struct snd_pcm_hw_params *params)
+{
+	struct dcp_audio *dcpaud = substream->pcm->private_data;
+	struct dma_slave_config slave_config;
+	struct dma_chan *chan = snd_dmaengine_pcm_get_chan(substream);
+	int ret;
+
+	if (!dcpaud_connection_up(dcpaud))
+		return -ENXIO;
+
+	ret = dcpaud_select_cookie(dcpaud, params);
+	if (ret < 0)
+		return ret;
+	if (!ret)
+		return -EINVAL;
+
+	memset(&slave_config, 0, sizeof(slave_config));
+	ret = snd_hwparams_to_dma_slave_config(substream, params, &slave_config);
+	dev_info(dcpaud->dev, "snd_hwparams_to_dma_slave_config: %d\n", ret);
+	if (ret < 0)
+		return ret;
+
+	slave_config.direction = DMA_MEM_TO_DEV;
+	/*
+	 * The data entry from the DMA controller to the DPA peripheral
+	 * is 32-bit wide no matter the actual sample size.
+	 */
+	slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+
+	ret = dmaengine_slave_config(chan, &slave_config);
+	dev_info(dcpaud->dev, "dmaengine_slave_config: %d\n", ret);
+	return ret;
+}
+
+static int dcp_pcm_hw_free(struct snd_pcm_substream *substream)
+{
+	struct dcp_audio *dcpaud = substream->pcm->private_data;
+
+	if (!dcpaud_connection_up(dcpaud))
+		return 0;
+
+	return dcp_audiosrv_unprepare(dcpaud->dcp_dev);
+}
+
+static int dcp_pcm_prepare(struct snd_pcm_substream *substream)
+{
+	struct dcp_audio *dcpaud = substream->pcm->private_data;
+
+	if (!dcpaud_connection_up(dcpaud))
+		return -ENXIO;
+
+	return dcp_audiosrv_prepare(dcpaud->dcp_dev,
+				    &dcpaud->selected_cookie);
+}
+
+static int dcp_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+	struct dcp_audio *dcpaud = substream->pcm->private_data;
+	int ret;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_RESUME:
+		if (!dcpaud_connection_up(dcpaud))
+			return -ENXIO;
+
+		WARN_ON(pm_runtime_get_sync(dcpaud->dev) < 0);
+		ret = dcp_audiosrv_startlink(dcpaud->dcp_dev,
+					     &dcpaud->selected_cookie);
+		if (ret < 0)
+			return ret;
+		break;
+
+	case SNDRV_PCM_TRIGGER_STOP:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	ret = snd_dmaengine_pcm_trigger(substream, cmd);
+	if (ret < 0)
+		return ret;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_RESUME:
+		break;
+
+	case SNDRV_PCM_TRIGGER_STOP:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+		ret = dcp_audiosrv_stoplink(dcpaud->dcp_dev);
+		pm_runtime_mark_last_busy(dcpaud->dev);
+		__pm_runtime_put_autosuspend(dcpaud->dev);
+		if (ret < 0)
+			return ret;
+		break;
+	}
+
+	return 0;
+}
+
+struct snd_pcm_ops dcp_playback_ops = {
+	.open = dcp_pcm_open,
+	.close = dcp_pcm_close,
+	.hw_params = dcp_pcm_hw_params,
+	.hw_free = dcp_pcm_hw_free,
+	.prepare = dcp_pcm_prepare,
+	.trigger = dcp_pcm_trigger,
+	.pointer = snd_dmaengine_pcm_pointer,
+};
+
+// Transitional workaround: for the chmap control TLV, advertise options
+// copied from hdmi-codec.c
+#include "hdmi-codec-chmap.h"
+
+static int dcpaud_chmap_ctl_get(struct snd_kcontrol *kcontrol,
+			        struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
+	struct dcp_audio *dcpaud = info->private_data;
+	unsigned int i;
+
+	for (i = 0; i < info->max_channels; i++)
+		ucontrol->value.integer.value[i] = \
+				(i < dcpaud->selected_chmap.channels) ?
+				dcpaud->selected_chmap.map[i] : SNDRV_CHMAP_UNKNOWN;
+
+	return 0;
+}
+
+
+static int dcpaud_create_chmap_ctl(struct dcp_audio *dcpaud)
+{
+	struct snd_pcm *pcm = dcpaud->substream->pcm;
+	struct snd_pcm_chmap *chmap_info;
+	int ret;
+
+	ret = snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK, NULL,
+				     dcp_pcm_hw.channels_max, 0, &chmap_info);
+	if (ret < 0)
+		return ret;
+
+	chmap_info->kctl->get = dcpaud_chmap_ctl_get;
+	chmap_info->chmap = hdmi_codec_8ch_chmaps;
+	chmap_info->private_data = dcpaud;
+
+	return 0;
+}
+
+static int dcpaud_create_pcm(struct dcp_audio *dcpaud)
+{
+	struct snd_card *card = dcpaud->card;
+	struct snd_pcm *pcm;
+	int ret;
+
+#define NUM_PLAYBACK 1
+#define NUM_CAPTURE 0
+
+	ret = snd_pcm_new(card, card->shortname, 0, NUM_PLAYBACK, NUM_CAPTURE, &pcm);
+	if (ret)
+		return ret;
+
+	snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &dcp_playback_ops);
+	dcpaud->substream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
+	pcm->nonatomic = true;
+	pcm->private_data = dcpaud;
+	strscpy(pcm->name, card->shortname, sizeof(pcm->name));
+
+	return 0;
+}
+
+/* expects to be called with data_lock locked and unlocks it */
+static void dcpaud_report_hotplug(struct dcp_audio *dcpaud, bool connected)
+{
+	struct snd_pcm_substream *substream = dcpaud->substream;
+
+	if (!dcpaud->card || dcpaud->connected == connected) {
+		mutex_unlock(&dcpaud->data_lock);
+		return;
+	}
+
+	dcpaud->connected = connected;
+	if (connected)
+		dcpaud->connection_cookie++;
+	mutex_unlock(&dcpaud->data_lock);
+
+	snd_jack_report(dcpaud->jack, connected ? SND_JACK_AVOUT : 0);
+
+	if (!connected) {
+		snd_pcm_stream_lock(substream);
+		if (substream->runtime)
+			snd_pcm_stop(substream, SNDRV_PCM_STATE_DISCONNECTED);
+		snd_pcm_stream_unlock(substream);
+	}
+}
+
+static int dcpaud_create_jack(struct dcp_audio *dcpaud)
+{
+	struct snd_card *card = dcpaud->card;
+
+	return snd_jack_new(card, "HDMI/DP", SND_JACK_AVOUT,
+			    &dcpaud->jack, true, false);
+}
+
+static void dcpaud_set_card_names(struct dcp_audio *dcpaud)
+{
+	struct snd_card *card = dcpaud->card;
+
+	strscpy(card->driver, "apple_dcp", sizeof(card->driver));
+	strscpy(card->longname, "Apple DisplayPort", sizeof(card->longname));
+	strscpy(card->shortname, "Apple DisplayPort", sizeof(card->shortname));
+}
+
+#ifdef CONFIG_SND_DEBUG
+static void dcpaud_expose_debugfs_blob(struct dcp_audio *dcpaud, const char *name, void *base, size_t size)
+{
+	struct debugfs_blob_wrapper *wrapper;
+	wrapper = devm_kzalloc(dcpaud->dev, sizeof(*wrapper), GFP_KERNEL);
+	if (!wrapper)
+		return;
+	wrapper->data = base;
+	wrapper->size = size;
+	debugfs_create_blob(name, 0600, dcpaud->card->debugfs_root, wrapper);
+}
+#else
+static void dcpaud_expose_debugfs_blob(struct dcp_audio *dcpaud, const char *name, void *base, size_t size) {}
+#endif
+
+extern bool hdmi_audio;
+
+static int dcpaud_init_snd_card(struct dcp_audio *dcpaud)
+{
+	int ret;
+	if (!hdmi_audio)
+		return -ENODEV;
+
+
+	ret = snd_card_new(dcpaud->dev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
+			   THIS_MODULE, 0, &dcpaud->card);
+	if (ret)
+		return ret;
+
+	dcpaud_set_card_names(dcpaud);
+
+	ret = dcpaud_create_pcm(dcpaud);
+	if (ret)
+		goto err_free_card;
+
+	ret = dcpaud_create_chmap_ctl(dcpaud);
+	if (ret)
+		goto err_free_card;
+
+	ret = dcpaud_create_jack(dcpaud);
+	if (ret)
+		goto err_free_card;
+
+	ret = snd_card_register(dcpaud->card);
+	if (ret)
+		goto err_free_card;
+
+	return 0;
+err_free_card:
+	dev_warn(dcpaud->dev, "Failed to initialize sound card: %d\n", ret);
+	snd_card_free(dcpaud->card);
+	dcpaud->card = NULL;
+	return ret;
+}
+
+void dcpaud_connect(struct platform_device *pdev, bool connected)
+{
+	struct dcp_audio *dcpaud = platform_get_drvdata(pdev);
+
+	mutex_lock(&dcpaud->data_lock);
+
+	dcpaud_report_hotplug(dcpaud, connected);
+}
+
+void dcpaud_disconnect(struct platform_device *pdev)
+{
+	struct dcp_audio *dcpaud = platform_get_drvdata(pdev);
+
+	mutex_lock(&dcpaud->data_lock);
+
+	dcpaud_report_hotplug(dcpaud, false);
+}
+
+static int dcpaud_comp_bind(struct device *dev, struct device *main, void *data)
+{
+	struct dcp_audio *dcpaud = dev_get_drvdata(dev);
+	struct device_node *endpoint, *dcp_node = NULL;
+	struct platform_device *dcp_pdev, *dma_pdev;
+	struct of_phandle_args dma_spec;
+	int index;
+	int ret;
+
+	pm_runtime_get_noresume(dev);
+	pm_runtime_set_active(dev);
+
+	ret = devm_pm_runtime_enable(dev);
+	if (ret)
+		return dev_err_probe(dev, ret, "Failed to enable runtime PM: %d\n", ret);
+
+	/* find linked DCP instance */
+	endpoint = of_graph_get_endpoint_by_regs(dev->of_node, 0, 0);
+	if (endpoint) {
+		dcp_node = of_graph_get_remote_port_parent(endpoint);
+		of_node_put(endpoint);
+	}
+	if (!dcp_node || !of_device_is_available(dcp_node)) {
+		of_node_put(dcp_node);
+		dev_info(dev, "No audio support\n");
+		goto rpm_put;
+	}
+
+	index = of_property_match_string(dev->of_node, "dma-names", "tx");
+	if (index < 0) {
+		dev_err(dev, "No dma-names property\n");
+		goto rpm_put;
+	}
+
+	if (of_parse_phandle_with_args(dev->of_node, "dmas", "#dma-cells", index,
+				       &dma_spec) || !dma_spec.np) {
+		dev_err(dev, "Failed to parse dmas property\n");
+		goto rpm_put;
+	}
+
+	dcp_pdev = of_find_device_by_node(dcp_node);
+	of_node_put(dcp_node);
+	if (!dcp_pdev) {
+		dev_info(dev, "No DP/HDMI audio device, dcp not ready\n");
+		goto rpm_put;
+	}
+	dcpaud->dcp_dev = &dcp_pdev->dev;
+
+	dma_pdev = of_find_device_by_node(dma_spec.np);
+	of_node_put(dma_spec.np);
+	if (!dma_pdev) {
+		dev_info(dev, "No DMA device\n");
+		goto rpm_put;
+	}
+	dcpaud->dma_dev = &dma_pdev->dev;
+
+	dcpaud->dma_link = device_link_add(dev, dcpaud->dma_dev,
+					   DL_FLAG_PM_RUNTIME |
+					   DL_FLAG_RPM_ACTIVE |
+					   DL_FLAG_STATELESS);
+
+	/* ignore errors to prevent audio issues affecting the display side */
+	ret = dcpaud_init_snd_card(dcpaud);
+
+	if (!ret) {
+		dcpaud_expose_debugfs_blob(dcpaud, "selected_cookie", &dcpaud->selected_cookie,
+					sizeof(dcpaud->selected_cookie));
+		dcpaud_expose_debugfs_blob(dcpaud, "elements", dcpaud->elements,
+					DCPAUD_ELEMENTS_MAXSIZE);
+		dcpaud_expose_debugfs_blob(dcpaud, "product_attrs", dcpaud->productattrs,
+					DCPAUD_PRODUCTATTRS_MAXSIZE);
+	}
+
+rpm_put:
+	pm_runtime_put(dev);
+
+	return 0;
+}
+
+static void dcpaud_comp_unbind(struct device *dev, struct device *main,
+			       void *data)
+{
+	struct dcp_audio *dcpaud = dev_get_drvdata(dev);
+
+	/* snd_card_free_when_closed() checks for NULL */
+	snd_card_free_when_closed(dcpaud->card);
+
+	if (dcpaud->dma_link)
+		device_link_del(dcpaud->dma_link);
+}
+
+static const struct component_ops dcpaud_comp_ops = {
+	.bind	= dcpaud_comp_bind,
+	.unbind	= dcpaud_comp_unbind,
+};
+
+static int dcpaud_probe(struct platform_device *pdev)
+{
+	struct dcp_audio *dcpaud;
+
+	dcpaud = devm_kzalloc(&pdev->dev, sizeof(*dcpaud), GFP_KERNEL);
+	if (!dcpaud)
+		return -ENOMEM;
+
+	dcpaud->elements = devm_kzalloc(&pdev->dev, DCPAUD_ELEMENTS_MAXSIZE,
+					GFP_KERNEL);
+	if (!dcpaud->elements)
+		return -ENOMEM;
+
+	dcpaud->productattrs = devm_kzalloc(&pdev->dev, DCPAUD_PRODUCTATTRS_MAXSIZE,
+					    GFP_KERNEL);
+	if (!dcpaud->productattrs)
+		return -ENOMEM;
+
+	dcpaud->dev = &pdev->dev;
+	mutex_init(&dcpaud->data_lock);
+	platform_set_drvdata(pdev, dcpaud);
+
+	return component_add(&pdev->dev, &dcpaud_comp_ops);
+}
+
+static void dcpaud_remove(struct platform_device *pdev)
+{
+	component_del(&pdev->dev, &dcpaud_comp_ops);
+}
+
+static void dcpaud_shutdown(struct platform_device *pdev)
+{
+	component_del(&pdev->dev, &dcpaud_comp_ops);
+}
+
+static __maybe_unused int dcpaud_suspend(struct device *dev)
+{
+	/*
+	 * Using snd_power_change_state() does not work since the sound card
+	 * is what resumes runtime PM.
+	 */
+
+	return 0;
+}
+
+static __maybe_unused int dcpaud_resume(struct device *dev)
+{
+	return 0;
+}
+
+static DEFINE_RUNTIME_DEV_PM_OPS(dcpaud_pm_ops, dcpaud_suspend, dcpaud_resume, NULL);
+
+static const struct of_device_id dcpaud_of_match[] = {
+	{ .compatible = "apple,dpaudio" },
+	{}
+};
+
+static struct platform_driver dcpaud_driver = {
+	.driver = {
+		.name = "dcp-dp-audio",
+		.of_match_table	= dcpaud_of_match,
+		.pm		= pm_ptr(&dcpaud_pm_ops),
+	},
+	.probe		= dcpaud_probe,
+	.remove		= dcpaud_remove,
+	.shutdown	= dcpaud_shutdown,
+};
+
+void __init dcp_audio_register(void)
+{
+        platform_driver_register(&dcpaud_driver);
+}
+
+void __exit dcp_audio_unregister(void)
+{
+        platform_driver_unregister(&dcpaud_driver);
+}
+
diff --git a/drivers/gpu/drm/apple/audio.h b/drivers/gpu/drm/apple/audio.h
new file mode 100644
index 00000000000000..83b990dc6c343f
--- /dev/null
+++ b/drivers/gpu/drm/apple/audio.h
@@ -0,0 +1,20 @@
+#ifndef __AUDIO_H__
+#define __AUDIO_H__
+
+#include <linux/types.h>
+
+struct device;
+struct platform_device;
+struct dcp_sound_cookie;
+
+int dcp_audiosrv_prepare(struct device *dev, struct dcp_sound_cookie *cookie);
+int dcp_audiosrv_startlink(struct device *dev, struct dcp_sound_cookie *cookie);
+int dcp_audiosrv_stoplink(struct device *dev);
+int dcp_audiosrv_unprepare(struct device *dev);
+int dcp_audiosrv_get_elements(struct device *dev, void *elements, size_t maxsize);
+int dcp_audiosrv_get_product_attrs(struct device *dev, void *attrs, size_t maxsize);
+
+void dcpaud_connect(struct platform_device *pdev, bool connected);
+void dcpaud_disconnect(struct platform_device *pdev);
+
+#endif /* __AUDIO_H__ */
diff --git a/drivers/gpu/drm/apple/av.c b/drivers/gpu/drm/apple/av.c
new file mode 100644
index 00000000000000..0d3c752f62d5f5
--- /dev/null
+++ b/drivers/gpu/drm/apple/av.c
@@ -0,0 +1,433 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright 2023 Martin Povišer <povik+lin@cutebit.org> */
+
+// #define DEBUG
+
+#include <linux/debugfs.h>
+#include <linux/kconfig.h>
+#include <linux/of_graph.h>
+#include <linux/of_platform.h>
+#include <linux/rwsem.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "audio.h"
+#include "afk.h"
+#include "av.h"
+#include "dcp.h"
+#include "dcp-internal.h"
+
+struct dcp_av_audio_cmds {
+	/* commands in group 0*/
+	u32 open;
+	u32 close;
+	u32 prepare;
+	u32 start_link;
+	u32 stop_link;
+	u32 unprepare;
+	/* commands in group 1*/
+	u32 get_elements;
+	u32 get_product_attrs;
+};
+
+static const struct dcp_av_audio_cmds dcp_av_audio_cmds_v12_3 = {
+	.open = 6,
+	.close = 7,
+	.prepare = 8,
+	.start_link = 9,
+	.stop_link = 12,
+	.unprepare = 13,
+	.get_elements = 18,
+	.get_product_attrs = 20,
+};
+
+static const struct dcp_av_audio_cmds dcp_av_audio_cmds_v13_5 = {
+	.open = 4,
+	.close = 5,
+	.prepare = 6,
+	.start_link = 7,
+	.stop_link = 10,
+	.unprepare = 11,
+	.get_elements = 16,
+	.get_product_attrs = 18,
+};
+
+struct audiosrv_data {
+	struct platform_device *audio_dev;
+	bool plugged;
+	struct mutex plug_lock;
+
+	struct apple_epic_service *srv;
+	struct rw_semaphore srv_rwsem;
+	/* Workqueue for starting the audio service */
+	struct work_struct start_av_service_wq;
+
+	struct dcp_av_audio_cmds cmds;
+
+	bool warned_get_elements;
+	bool warned_get_product_attrs;
+	bool is_open;
+};
+
+static void av_interface_init(struct apple_epic_service *service, const char *name,
+			      const char *class, s64 unit)
+{
+}
+
+static void av_interface_teardown(struct apple_epic_service *service)
+{
+	struct apple_dcp *dcp = service->ep->dcp;
+	struct audiosrv_data *asrv = dcp->audiosrv;
+
+	service->enabled = false;
+
+	mutex_lock(&asrv->plug_lock);
+
+	asrv->plugged = false;
+	if (asrv->audio_dev)
+		dcpaud_disconnect(asrv->audio_dev);
+
+	mutex_unlock(&asrv->plug_lock);
+}
+
+static void av_audiosrv_init(struct apple_epic_service *service, const char *name,
+			     const char *class, s64 unit)
+{
+	struct apple_dcp *dcp = service->ep->dcp;
+	struct audiosrv_data *asrv = dcp->audiosrv;
+
+	mutex_lock(&asrv->plug_lock);
+
+	down_write(&asrv->srv_rwsem);
+	asrv->srv = service;
+	up_write(&asrv->srv_rwsem);
+
+	asrv->plugged = true;
+	mutex_unlock(&asrv->plug_lock);
+	schedule_work(&asrv->start_av_service_wq);
+}
+
+static void av_audiosrv_teardown(struct apple_epic_service *service)
+{
+	struct apple_dcp *dcp = service->ep->dcp;
+	struct audiosrv_data *asrv = dcp->audiosrv;
+
+	mutex_lock(&asrv->plug_lock);
+
+	down_write(&asrv->srv_rwsem);
+	asrv->srv = NULL;
+	up_write(&asrv->srv_rwsem);
+
+	asrv->plugged = false;
+	if (asrv->audio_dev)
+		dcpaud_disconnect(asrv->audio_dev);
+
+	mutex_unlock(&asrv->plug_lock);
+}
+
+int dcp_audiosrv_prepare(struct device *dev, struct dcp_sound_cookie *cookie)
+{
+	struct apple_dcp *dcp = dev_get_drvdata(dev);
+	struct audiosrv_data *asrv = dcp->audiosrv;
+	int ret;
+
+	down_write(&asrv->srv_rwsem);
+	ret = afk_service_call(asrv->srv, 0, asrv->cmds.prepare, cookie,
+			       sizeof(*cookie), 64 - sizeof(*cookie), NULL, 0,
+			       64);
+	up_write(&asrv->srv_rwsem);
+
+	return ret;
+}
+
+int dcp_audiosrv_startlink(struct device *dev, struct dcp_sound_cookie *cookie)
+{
+	struct apple_dcp *dcp = dev_get_drvdata(dev);
+	struct audiosrv_data *asrv = dcp->audiosrv;
+	int ret;
+
+	down_write(&asrv->srv_rwsem);
+	ret = afk_service_call(asrv->srv, 0, asrv->cmds.start_link, cookie,
+			       sizeof(*cookie), 64 - sizeof(*cookie), NULL, 0,
+			       64);
+	up_write(&asrv->srv_rwsem);
+
+	return ret;
+}
+
+int dcp_audiosrv_stoplink(struct device *dev)
+{
+	struct apple_dcp *dcp = dev_get_drvdata(dev);
+	struct audiosrv_data *asrv = dcp->audiosrv;
+	int ret;
+
+	down_write(&asrv->srv_rwsem);
+	ret = afk_service_call(asrv->srv, 0, asrv->cmds.stop_link, NULL, 0, 64,
+			       NULL, 0, 64);
+	up_write(&asrv->srv_rwsem);
+
+	return ret;
+}
+
+int dcp_audiosrv_unprepare(struct device *dev)
+{
+	struct apple_dcp *dcp = dev_get_drvdata(dev);
+	struct audiosrv_data *asrv = dcp->audiosrv;
+	int ret;
+
+	down_write(&asrv->srv_rwsem);
+	ret = afk_service_call(asrv->srv, 0, asrv->cmds.unprepare, NULL, 0, 64,
+			       NULL, 0, 64);
+	up_write(&asrv->srv_rwsem);
+
+	return ret;
+}
+
+static int
+dcp_audiosrv_osobject_call(struct apple_epic_service *service, u16 group,
+			   u32 command, void *output, size_t output_maxsize,
+			   size_t *output_size)
+{
+	struct {
+		__le64 max_size;
+		u8 _pad1[24];
+		__le64 used_size;
+		u8 _pad2[8];
+	} __attribute__((packed)) *hdr;
+	static_assert(sizeof(*hdr) == 48);
+	size_t bfr_len = output_maxsize + sizeof(*hdr);
+	void *bfr;
+	int ret;
+
+	bfr = kzalloc(bfr_len, GFP_KERNEL);
+	if (!bfr)
+		return -ENOMEM;
+
+	hdr = bfr;
+	hdr->max_size = cpu_to_le64(output_maxsize);
+	ret = afk_service_call(service, group, command, hdr, sizeof(*hdr), output_maxsize,
+			       bfr, sizeof(*hdr) + output_maxsize, 0);
+	if (ret)
+		return ret;
+
+	if (output)
+		memcpy(output, bfr + sizeof(*hdr), output_maxsize);
+
+	if (output_size)
+		*output_size = le64_to_cpu(hdr->used_size);
+
+	return 0;
+}
+
+int dcp_audiosrv_get_elements(struct device *dev, void *elements, size_t maxsize)
+{
+	struct apple_dcp *dcp = dev_get_drvdata(dev);
+	struct audiosrv_data *asrv = dcp->audiosrv;
+	size_t size;
+	int ret;
+
+	down_write(&asrv->srv_rwsem);
+	ret = dcp_audiosrv_osobject_call(asrv->srv, 1, asrv->cmds.get_elements,
+					 elements, maxsize, &size);
+	up_write(&asrv->srv_rwsem);
+
+	if (ret && asrv->warned_get_elements) {
+		dev_err(dev, "audiosrv: error getting elements: %d\n", ret);
+		asrv->warned_get_elements = true;
+	} else {
+		dev_dbg(dev, "audiosrv: got %zd bytes worth of elements\n", size);
+	}
+
+	return ret;
+}
+
+int dcp_audiosrv_get_product_attrs(struct device *dev, void *attrs, size_t maxsize)
+{
+	struct apple_dcp *dcp = dev_get_drvdata(dev);
+	struct audiosrv_data *asrv = dcp->audiosrv;
+	size_t size;
+	int ret;
+
+	down_write(&asrv->srv_rwsem);
+	ret = dcp_audiosrv_osobject_call(asrv->srv, 1,
+					 asrv->cmds.get_product_attrs, attrs,
+					 maxsize, &size);
+	up_write(&asrv->srv_rwsem);
+
+	if (ret && asrv->warned_get_product_attrs) {
+		dev_err(dev, "audiosrv: error getting product attributes: %d\n", ret);
+		asrv->warned_get_product_attrs = true;
+	} else {
+		dev_dbg(dev, "audiosrv: got %zd bytes worth of product attributes\n", size);
+	}
+
+	return ret;
+}
+
+static int av_audiosrv_report(struct apple_epic_service *service, u32 idx,
+						  const void *data, size_t data_size)
+{
+	dev_dbg(service->ep->dcp->dev, "got audio report %d size %zx\n", idx, data_size);
+#ifdef DEBUG
+	print_hex_dump(KERN_DEBUG, "audio report: ", DUMP_PREFIX_NONE, 16, 1, data, data_size, true);
+#endif
+
+	return 0;
+}
+
+static const struct apple_epic_service_ops avep_ops[] = {
+	{
+		.name = "DCPAVSimpleVideoInterface",
+		.init = av_interface_init,
+		.teardown = av_interface_teardown,
+	},
+	{
+		.name = "DCPAVAudioInterface",
+		.init = av_audiosrv_init,
+		.report = av_audiosrv_report,
+		.teardown = av_audiosrv_teardown,
+	},
+	{}
+};
+
+void av_service_connect(struct apple_dcp *dcp)
+{
+	struct apple_epic_service *service;
+	struct audiosrv_data *asrv = dcp->audiosrv;
+	int ret;
+
+	scoped_guard(rwsem_write, &asrv->srv_rwsem) {
+		if (!asrv->srv)
+			return;
+		service = asrv->srv;
+	}
+
+	/* open AV audio service */
+	dev_info(dcp->dev, "%s: starting audio service, plugged:%d\n", __func__,  asrv->plugged);
+	if (asrv->is_open)
+		return;
+
+	ret = afk_service_call(service, 0, asrv->cmds.open, NULL, 0, 32,
+			       NULL, 0, 32);
+	if (ret) {
+		dev_err(dcp->dev, "error opening audio service: %d\n", ret);
+		return;
+	}
+	mutex_lock(&asrv->plug_lock);
+	asrv->is_open = true;
+
+	if (asrv->audio_dev)
+		dcpaud_connect(asrv->audio_dev, asrv->plugged);
+	mutex_unlock(&asrv->plug_lock);
+}
+
+void av_service_disconnect(struct apple_dcp *dcp)
+{
+	struct apple_epic_service *service;
+	struct audiosrv_data *asrv = dcp->audiosrv;
+	int ret;
+
+	scoped_guard(rwsem_write, &asrv->srv_rwsem) {
+		if (!asrv->srv)
+			return;
+		service = asrv->srv;
+	}
+
+	/* close AV audio service */
+	dev_info(dcp->dev, "%s: stopping audio service\n", __func__);
+	if (!asrv->is_open)
+		return;
+
+	mutex_lock(&asrv->plug_lock);
+
+	if (asrv->audio_dev)
+		dcpaud_disconnect(asrv->audio_dev);
+
+	mutex_unlock(&asrv->plug_lock);
+
+	ret = afk_service_call(service, 0, asrv->cmds.close, NULL, 0, 16,
+			       NULL, 0, 16);
+	if (ret) {
+		dev_err(dcp->dev, "error closing audio service: %d\n", ret);
+	}
+	if (service->torndown)
+		service->enabled = false;
+	asrv->is_open = false;
+}
+
+static void av_work_service_start(struct work_struct *work)
+{
+	struct audiosrv_data *audiosrv_data;
+	struct apple_dcp *dcp;
+
+	audiosrv_data = container_of(work, struct audiosrv_data, start_av_service_wq);
+
+	scoped_guard(rwsem_read, &audiosrv_data->srv_rwsem) {
+		if (!audiosrv_data->srv ||
+		!audiosrv_data->srv->ep ||
+		!audiosrv_data->srv->ep->dcp) {
+			pr_err("%s: dcp: av: NULL ptr during startup\n", __func__);
+			return;
+		}
+		dcp = audiosrv_data->srv->ep->dcp;
+	}
+
+	av_service_connect(dcp);
+}
+
+int avep_init(struct apple_dcp *dcp)
+{
+	struct audiosrv_data *audiosrv_data;
+	struct platform_device *audio_pdev;
+	struct device *dev = dcp->dev;
+	struct device_node *endpoint, *audio_node = NULL;
+
+	audiosrv_data = devm_kzalloc(dcp->dev, sizeof(*audiosrv_data), GFP_KERNEL);
+	if (!audiosrv_data)
+		return -ENOMEM;
+	init_rwsem(&audiosrv_data->srv_rwsem);
+	mutex_init(&audiosrv_data->plug_lock);
+
+	switch (dcp->fw_compat) {
+	case DCP_FIRMWARE_V_12_3:
+		audiosrv_data->cmds = dcp_av_audio_cmds_v12_3;
+		break;
+	case DCP_FIRMWARE_V_13_5:
+		audiosrv_data->cmds = dcp_av_audio_cmds_v13_5;
+		break;
+	default:
+		dev_err(dcp->dev, "Audio not supported for firmware\n");
+		return -ENODEV;
+	}
+
+	dcp->audiosrv = audiosrv_data;
+	INIT_WORK(&audiosrv_data->start_av_service_wq, av_work_service_start);
+
+	endpoint = of_graph_get_endpoint_by_regs(dev->of_node, 0, 0);
+	if (endpoint) {
+		audio_node = of_graph_get_remote_port_parent(endpoint);
+		of_node_put(endpoint);
+	}
+	if (!audio_node || !of_device_is_available(audio_node)) {
+		of_node_put(audio_node);
+		dev_info(dev, "No audio support\n");
+		return 0;
+	}
+
+	audio_pdev = of_find_device_by_node(audio_node);
+	of_node_put(audio_node);
+	if (!audio_pdev) {
+		dev_info(dev, "No DP/HDMI audio device not ready\n");
+		return 0;
+	}
+	dcp->audiosrv->audio_dev = audio_pdev;
+
+	device_link_add(&audio_pdev->dev, dev,
+			DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME);
+
+	dcp->avep = afk_init(dcp, AV_ENDPOINT, avep_ops);
+	if (IS_ERR(dcp->avep))
+		return PTR_ERR(dcp->avep);
+	dcp->avep->debugfs_entry = dcp->ep_debugfs[AV_ENDPOINT - 0x20];
+	return afk_start(dcp->avep);
+}
diff --git a/drivers/gpu/drm/apple/av.h b/drivers/gpu/drm/apple/av.h
new file mode 100644
index 00000000000000..c00cbef549fd2e
--- /dev/null
+++ b/drivers/gpu/drm/apple/av.h
@@ -0,0 +1,17 @@
+#ifndef __AV_H__
+#define __AV_H__
+
+#include "parser.h"
+
+//int avep_audiosrv_startlink(struct apple_dcp *dcp, struct dcp_sound_cookie *cookie);
+//int avep_audiosrv_stoplink(struct apple_dcp *dcp);
+
+#if IS_ENABLED(CONFIG_DRM_APPLE_AUDIO)
+void av_service_connect(struct apple_dcp *dcp);
+void av_service_disconnect(struct apple_dcp *dcp);
+#else
+static inline void av_service_connect(struct apple_dcp *dcp) { }
+static inline void av_service_disconnect(struct apple_dcp *dcp) { }
+#endif
+
+#endif /* __AV_H__ */
diff --git a/drivers/gpu/drm/apple/connector.c b/drivers/gpu/drm/apple/connector.c
new file mode 100644
index 00000000000000..9e786670893387
--- /dev/null
+++ b/drivers/gpu/drm/apple/connector.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Copyright (C) The Asahi Linux Contributors
+ */
+
+#include "connector.h"
+
+#include "linux/err.h"
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/string_helpers.h>
+#include <linux/uaccess.h>
+
+#include <drm/drm_managed.h>
+
+#include "dcp-internal.h"
+
+enum dcp_chunk_type {
+	DCP_CHUNK_COLOR_ELEMENTS,
+	DCP_CHUNK_TIMING_ELELMENTS,
+	DCP_CHUNK_DISPLAY_ATTRIBUTES,
+	DCP_CHUNK_TRANSPORT,
+	DCP_CHUNK_NUM_TYPES,
+};
+
+static int chunk_show(struct seq_file *m,
+		      enum dcp_chunk_type chunk_type)
+{
+	struct apple_connector *apple_con = m->private;
+	struct dcp_chunks *chunk = NULL;
+
+	mutex_lock(&apple_con->chunk_lock);
+
+	switch (chunk_type) {
+	case DCP_CHUNK_COLOR_ELEMENTS:
+		chunk = &apple_con->color_elements;
+		break;
+	case DCP_CHUNK_TIMING_ELELMENTS:
+		chunk = &apple_con->timing_elements;
+		break;
+	case DCP_CHUNK_DISPLAY_ATTRIBUTES:
+		chunk = &apple_con->display_attributes;
+		break;
+	case DCP_CHUNK_TRANSPORT:
+		chunk = &apple_con->transport;
+		break;
+	default:
+		break;
+	}
+
+	if (chunk)
+                seq_write(m, chunk->data, chunk->length);
+
+	mutex_unlock(&apple_con->chunk_lock);
+
+	return 0;
+}
+
+#define CONNECTOR_DEBUGFS_ENTRY(name, type) \
+static int chunk_ ## name ## _show(struct seq_file *m, void *data) \
+{ \
+        return chunk_show(m, type); \
+} \
+static int chunk_ ## name ## _open(struct inode *inode, struct file *file) \
+{ \
+        return single_open(file,  chunk_ ## name ## _show, inode->i_private); \
+} \
+static const struct file_operations chunk_ ## name ## _fops = { \
+        .owner = THIS_MODULE, \
+        .open = chunk_ ## name ## _open, \
+        .read = seq_read, \
+        .llseek = seq_lseek, \
+        .release = single_release, \
+}
+
+CONNECTOR_DEBUGFS_ENTRY(color, DCP_CHUNK_COLOR_ELEMENTS);
+CONNECTOR_DEBUGFS_ENTRY(timing, DCP_CHUNK_TIMING_ELELMENTS);
+CONNECTOR_DEBUGFS_ENTRY(display_attribs, DCP_CHUNK_DISPLAY_ATTRIBUTES);
+CONNECTOR_DEBUGFS_ENTRY(transport, DCP_CHUNK_TRANSPORT);
+
+static void dcp_afk_debugfs_root(struct platform_device *pdev, int ep, struct dentry *root)
+{
+#if IS_ENABLED(CONFIG_DRM_APPLE_DEBUG)
+	struct dentry *entry = NULL;
+	struct apple_dcp *dcp = platform_get_drvdata(pdev);
+
+	switch (ep) {
+	case AV_ENDPOINT:
+		entry = debugfs_create_dir("avep", root);
+		break;
+	default:
+		break;
+	}
+
+	if (!IS_ERR_OR_NULL(entry))
+		dcp->ep_debugfs[ep - 0x20] = entry;
+#endif
+}
+
+void apple_connector_debugfs_init(struct drm_connector *connector, struct dentry *root)
+{
+	struct apple_connector *apple_con = to_apple_connector(connector);
+
+        debugfs_create_file("ColorElements", 0444, root, apple_con,
+                            &chunk_color_fops);
+        debugfs_create_file("TimingElements", 0444, root, apple_con,
+                            &chunk_timing_fops);
+        debugfs_create_file("DisplayAttributes", 0444, root, apple_con,
+                            &chunk_display_attribs_fops);
+        debugfs_create_file("Transport", 0444, root, apple_con,
+                            &chunk_transport_fops);
+
+	switch (connector->connector_type) {
+	case DRM_MODE_CONNECTOR_DisplayPort:
+	case DRM_MODE_CONNECTOR_HDMIA:
+		dcp_afk_debugfs_root(apple_con->dcp, AV_ENDPOINT, root);
+		break;
+	default:
+		break;
+	}
+}
+EXPORT_SYMBOL(apple_connector_debugfs_init);
+
+static void dcp_connector_set_dict(struct apple_connector *connector,
+				   struct dcp_chunks *dict,
+				   struct dcp_chunks *chunks)
+{
+	if (dict->data)
+		devm_kfree(&connector->dcp->dev, dict->data);
+
+	*dict = *chunks;
+}
+
+void dcp_connector_update_dict(struct apple_connector *connector, const char *key,
+			       struct dcp_chunks *chunks)
+{
+	mutex_lock(&connector->chunk_lock);
+	if (!strcmp(key, "ColorElements"))
+		dcp_connector_set_dict(connector, &connector->color_elements, chunks);
+	else if (!strcmp(key, "TimingElements"))
+		dcp_connector_set_dict(connector, &connector->timing_elements, chunks);
+	else if (!strcmp(key, "DisplayAttributes"))
+		dcp_connector_set_dict(connector, &connector->display_attributes, chunks);
+	else if (!strcmp(key, "Transport"))
+		dcp_connector_set_dict(connector, &connector->transport, chunks);
+
+	chunks->data = NULL;
+	chunks->length = 0;
+
+	mutex_unlock(&connector->chunk_lock);
+}
diff --git a/drivers/gpu/drm/apple/connector.h b/drivers/gpu/drm/apple/connector.h
new file mode 100644
index 00000000000000..ff643552c77d4c
--- /dev/null
+++ b/drivers/gpu/drm/apple/connector.h
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* "Copyright" 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io> */
+
+#ifndef __APPLE_CONNECTOR_H__
+#define __APPLE_CONNECTOR_H__
+
+#include <linux/workqueue.h>
+
+#include <drm/drm_atomic.h>
+#include "drm/drm_connector.h"
+#include "drm/drm_edid.h"
+
+struct apple_connector;
+
+#include "dcp-internal.h"
+
+void dcp_hotplug(struct work_struct *work);
+
+struct apple_connector {
+	struct drm_connector base;
+	bool connected;
+
+	struct platform_device *dcp;
+
+	const struct drm_edid *drm_edid;
+
+	/* Workqueue for sending hotplug events to the associated device */
+	struct work_struct hotplug_wq;
+
+	struct mutex chunk_lock;
+
+	struct dcp_chunks color_elements;
+	struct dcp_chunks timing_elements;
+	struct dcp_chunks display_attributes;
+	struct dcp_chunks transport;
+};
+
+#define to_apple_connector(x) container_of(x, struct apple_connector, base)
+
+void apple_connector_debugfs_init(struct drm_connector *connector, struct dentry *root);
+
+void dcp_connector_update_dict(struct apple_connector *connector, const char *key,
+			       struct dcp_chunks *chunks);
+#endif
diff --git a/drivers/gpu/drm/apple/dcp-internal.h b/drivers/gpu/drm/apple/dcp-internal.h
new file mode 100644
index 00000000000000..2c31d2a8cef09d
--- /dev/null
+++ b/drivers/gpu/drm/apple/dcp-internal.h
@@ -0,0 +1,274 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io> */
+
+#ifndef __APPLE_DCP_INTERNAL_H__
+#define __APPLE_DCP_INTERNAL_H__
+
+#include <linux/backlight.h>
+#include <linux/device.h>
+#include <linux/ioport.h>
+#include <linux/mutex.h>
+#include <linux/mux/consumer.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+
+#include "dptxep.h"
+#include "iomfb.h"
+#include "iomfb_v12_3.h"
+#include "iomfb_v13_3.h"
+#include "epic/dpavservep.h"
+
+#define DCP_MAX_PLANES 2
+
+struct apple_dcp;
+struct apple_dcp_afkep;
+
+struct dcpav_service_epic;
+
+enum dcp_firmware_version {
+	DCP_FIRMWARE_UNKNOWN,
+	DCP_FIRMWARE_V_12_3,
+	DCP_FIRMWARE_V_13_5,
+};
+
+enum {
+	SYSTEM_ENDPOINT = 0x20,
+	TEST_ENDPOINT = 0x21,
+	DCP_EXPERT_ENDPOINT = 0x22,
+	DISP0_ENDPOINT = 0x23,
+	DPAVSERV_ENDPOINT = 0x28,
+	AV_ENDPOINT = 0x29,
+	DPTX_ENDPOINT = 0x2a,
+	HDCP_ENDPOINT = 0x2b,
+	REMOTE_ALLOC_ENDPOINT = 0x2d,
+	IOMFB_ENDPOINT = 0x37,
+};
+
+/* Temporary backing for a chunked transfer via setDCPAVPropStart/Chunk/End */
+struct dcp_chunks {
+	size_t length;
+	void *data;
+};
+
+#define DCP_MAX_MAPPINGS (128) /* should be enough */
+#define MAX_DISP_REGISTERS (7)
+
+struct dcp_mem_descriptor {
+	size_t size;
+	void *buf;
+	dma_addr_t dva;
+	struct sg_table map;
+	u64 reg;
+};
+
+/* Limit on call stack depth (arbitrary). Some nesting is required */
+#define DCP_MAX_CALL_DEPTH 8
+
+typedef void (*dcp_callback_t)(struct apple_dcp *, void *, void *);
+
+struct dcp_channel {
+	dcp_callback_t callbacks[DCP_MAX_CALL_DEPTH];
+	void *cookies[DCP_MAX_CALL_DEPTH];
+	void *output[DCP_MAX_CALL_DEPTH];
+	u16 end[DCP_MAX_CALL_DEPTH];
+
+	/* Current depth of the call stack. Less than DCP_MAX_CALL_DEPTH */
+	u8 depth;
+	/* Already warned about busy channel */
+	bool warned_busy;
+};
+
+struct dcp_fb_reference {
+	struct list_head head;
+	struct drm_framebuffer *fb;
+	u32 swap_id;
+};
+
+#define MAX_NOTCH_HEIGHT 160
+
+struct dcp_brightness {
+	struct backlight_device *bl_dev;
+	u32 maximum;
+	u32 dac;
+	int nits;
+	int scale;
+	bool update;
+};
+
+struct audiosrv_data;
+
+/** laptop/AiO integrated panel parameters from DT */
+struct dcp_panel {
+	/// panel width in millimeter
+	int width_mm;
+	/// panel height in millimeter
+	int height_mm;
+	/// panel has a mini-LED backlight
+	bool has_mini_led;
+};
+
+struct apple_dcp_hw_data {
+	u32 num_dptx_ports;
+};
+
+/* TODO: move IOMFB members to its own struct */
+struct apple_dcp {
+	struct device *dev;
+	struct platform_device *piodma;
+	struct iommu_domain *iommu_dom;
+	struct apple_rtkit *rtk;
+	struct apple_crtc *crtc;
+	struct apple_connector *connector;
+
+	struct apple_dcp_hw_data hw;
+
+	/* firmware version and compatible firmware version */
+	enum dcp_firmware_version fw_compat;
+
+	/* Coprocessor control register */
+	void __iomem *coproc_reg;
+
+	/* DCP has crashed */
+	bool crashed;
+
+	/************* IOMFB **************************************************
+	 * everything below is mostly used inside IOMFB but it could make     *
+	 * sense to keep some of the members in apple_dcp.                    *
+	 **********************************************************************/
+
+	/* clock rate request by dcp in */
+	struct clk *clk;
+
+	/* DCP shared memory */
+	void *shmem;
+
+	/* Display registers mappable to the DCP */
+	struct resource *disp_registers[MAX_DISP_REGISTERS];
+	unsigned int nr_disp_registers;
+
+	struct resource disp_bw_scratch_res;
+	struct resource disp_bw_doorbell_res;
+	u32 disp_bw_scratch_index;
+	u32 disp_bw_scratch_offset;
+	u32 disp_bw_doorbell_index;
+	u32 disp_bw_doorbell_offset;
+
+	u32 index;
+
+	/* Bitmap of memory descriptors used for mappings made by the DCP */
+	DECLARE_BITMAP(memdesc_map, DCP_MAX_MAPPINGS);
+
+	/* Indexed table of memory descriptors */
+	struct dcp_mem_descriptor memdesc[DCP_MAX_MAPPINGS];
+
+	struct dcp_channel ch_cmd, ch_oobcmd;
+	struct dcp_channel ch_cb, ch_oobcb, ch_async, ch_oobasync;
+
+	/* iomfb EP callback handlers */
+	const iomfb_cb_handler *cb_handlers;
+
+	/* Active chunked transfer. There can only be one at a time. */
+	struct dcp_chunks chunks;
+
+	/* Queued swap. Owned by the DCP to avoid per-swap memory allocation */
+	union {
+		struct dcp_swap_submit_req_v12_3 v12_3;
+		struct dcp_swap_submit_req_v13_3 v13_3;
+	} swap;
+
+	/* swap id of the last completed swap */
+	u32 last_swap_id;
+	ktime_t swap_start;
+
+	/* Current display mode */
+	bool during_modeset;
+	bool valid_mode;
+	struct dcp_set_digital_out_mode_req mode;
+
+	/* completion for active turning true */
+	struct completion start_done;
+
+	/* Is the DCP booted? */
+	bool active;
+
+	/* eDP display without DP-HDMI conversion */
+	bool main_display;
+
+	/* clear all surfaces on init */
+	bool surfaces_cleared;
+
+	/* enable CRC calculation */
+	bool crc_enabled;
+
+	/* Modes valid for the connected display */
+	struct dcp_display_mode *modes;
+	unsigned int nr_modes;
+
+	/* Attributes of the connector */
+	int connector_type;
+
+	/* Attributes of the connected display */
+	int width_mm, height_mm;
+
+	unsigned notch_height;
+
+	/* Workqueue for sending vblank events when a dcp swap is not possible */
+	struct work_struct vblank_wq;
+
+	/* List of referenced drm_framebuffers which can be unreferenced
+	 * on the next successfully completed swap.
+	 */
+	struct list_head swapped_out_fbs;
+
+	struct dcp_brightness brightness;
+	/* Workqueue for updating the initial brightness */
+	struct work_struct bl_register_wq;
+	struct mutex bl_register_mutex;
+	/* Workqueue for updating the brightness */
+	struct work_struct bl_update_wq;
+
+	/* integrated panel if present */
+	struct dcp_panel panel;
+
+	struct apple_dcp_afkep *systemep;
+	struct completion systemep_done;
+
+	struct apple_dcp_afkep *ibootep;
+	struct apple_dcp_afkep *dcpavservep;
+	struct dcpavserv dcpavserv;
+
+	struct apple_dcp_afkep *avep;
+	struct audiosrv_data *audiosrv;
+
+	struct apple_dcp_afkep *dptxep;
+
+	struct dptx_port dptxport[2];
+
+	/* debugfs entries */
+	struct dentry *ep_debugfs[0x20];
+
+	/* these fields are output port specific */
+	struct phy *phy;
+	struct mux_control *xbar;
+
+	struct gpio_desc *hdmi_hpd;
+	struct gpio_desc *hdmi_pwren;
+	struct gpio_desc *dp2hdmi_pwren;
+
+	struct mutex hpd_mutex;
+
+	u32 dptx_phy;
+	u32 dptx_die;
+	int hdmi_hpd_irq;
+};
+
+void dcp_drm_crtc_page_flip(struct apple_dcp *dcp, ktime_t now);
+
+int dcp_backlight_register(struct apple_dcp *dcp);
+int dcp_backlight_update(struct apple_dcp *dcp);
+bool dcp_has_panel(struct apple_dcp *dcp);
+
+#define DCP_AUDIO_MAX_CHANS 15
+
+#endif /* __APPLE_DCP_INTERNAL_H__ */
diff --git a/drivers/gpu/drm/apple/dcp.c b/drivers/gpu/drm/apple/dcp.c
new file mode 100644
index 00000000000000..72b6d0fd7460d7
--- /dev/null
+++ b/drivers/gpu/drm/apple/dcp.c
@@ -0,0 +1,1363 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io> */
+
+#include <linux/align.h>
+#include <linux/bitmap.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/component.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/gpio/consumer.h>
+#include <linux/iommu.h>
+#include <linux/jiffies.h>
+#include <linux/kconfig.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/soc/apple/rtkit.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+
+#include <drm/drm_fb_dma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_module.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "afk.h"
+#include "av.h"
+#include "dcp.h"
+#include "dcp-internal.h"
+#include "iomfb.h"
+#include "parser.h"
+#include "trace.h"
+
+#define APPLE_DCP_COPROC_CPU_CONTROL	 0x44
+#define APPLE_DCP_COPROC_CPU_CONTROL_RUN BIT(4)
+
+#define DCP_BOOT_TIMEOUT msecs_to_jiffies(1000)
+
+static bool show_notch;
+module_param(show_notch, bool, 0644);
+MODULE_PARM_DESC(show_notch, "Use the full display height and shows the notch");
+
+bool hdmi_audio;
+module_param(hdmi_audio, bool, 0644);
+MODULE_PARM_DESC(hdmi_audio, "Enable unstable HDMI audio support");
+
+static bool unstable_edid = true;
+module_param(unstable_edid, bool, 0644);
+MODULE_PARM_DESC(unstable_edid, "Enable unstable EDID retrival support");
+
+/* copied and simplified from drm_vblank.c */
+static void send_vblank_event(struct drm_device *dev,
+		struct drm_pending_vblank_event *e,
+		u64 seq, ktime_t now)
+{
+	struct timespec64 tv;
+
+	if (e->event.base.type != DRM_EVENT_FLIP_COMPLETE)
+		return;
+
+	tv = ktime_to_timespec64(now);
+	e->event.vbl.sequence = seq;
+	/*
+		* e->event is a user space structure, with hardcoded unsigned
+		* 32-bit seconds/microseconds. This is safe as we always use
+		* monotonic timestamps since linux-4.15
+		*/
+	e->event.vbl.tv_sec = tv.tv_sec;
+	e->event.vbl.tv_usec = tv.tv_nsec / 1000;
+
+	/*
+	 * Use the same timestamp for any associated fence signal to avoid
+	 * mismatch in timestamps for vsync & fence events triggered by the
+	 * same HW event. Frameworks like SurfaceFlinger in Android expects the
+	 * retire-fence timestamp to match exactly with HW vsync as it uses it
+	 * for its software vsync modeling.
+	 */
+	drm_send_event_timestamp_locked(dev, &e->base, now);
+}
+
+/**
+ * dcp_crtc_send_page_flip_event - helper to send vblank event after pageflip
+ *
+ * Compensate for unknown slack between page flip and arrival of the
+ * swap_complete callback. Minimal observed duration on DCP with HDMI output
+ * was around 2.3 ms. If the fb swap was submitted closer to the expected
+ * swap_complete it gets a penalty of one frame duration. This is on the border
+ * of unreasonable considering that Apple advertises support for 240 Hz (frame
+ * duration of 4.167 ms).
+ * It is unreasonable considering kwin's kms commit scheduling. Kwin commits
+ * 1.5 ms + the mode's vblank time before the expected next page flip
+ * completion. This results in presenting at half the display's rate for HDMI
+ * outputs.
+ * This might be a difference between dcp and dcpext.
+ */
+static void dcp_crtc_send_page_flip_event(struct apple_crtc *crtc,
+					  struct drm_pending_vblank_event *e,
+					  ktime_t now, ktime_t start)
+{
+	struct drm_device *dev = crtc->base.dev;
+	u64 seq;
+	unsigned int pipe = drm_crtc_index(&crtc->base);
+	ktime_t flip;
+
+	seq = 0;
+	if (start != KTIME_MIN) {
+		s64 delta = ktime_us_delta(now, start);
+		if (delta <= 500)
+			flip = now;
+		else if (delta >= 2500)
+			flip = ktime_sub_us(now, 1000);
+		else
+			flip = ktime_sub_us(now, (delta - 500) / 2);
+	} else {
+		flip = now;
+	}
+	e->pipe = pipe;
+	send_vblank_event(dev, e, seq, flip);
+}
+
+/* HACK: moved here to avoid circular dependency between apple_drv and dcp */
+void dcp_drm_crtc_vblank(struct apple_crtc *crtc)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&crtc->base.dev->event_lock, flags);
+	if (crtc->event) {
+		drm_crtc_send_vblank_event(&crtc->base, crtc->event);
+		crtc->event = NULL;
+	}
+	spin_unlock_irqrestore(&crtc->base.dev->event_lock, flags);
+}
+
+void dcp_drm_crtc_page_flip(struct apple_dcp *dcp, ktime_t now)
+{
+	unsigned long flags;
+	struct apple_crtc *crtc = dcp->crtc;
+
+	spin_lock_irqsave(&crtc->base.dev->event_lock, flags);
+	if (crtc->event) {
+		if (crtc->event->event.base.type == DRM_EVENT_FLIP_COMPLETE)
+			dcp_crtc_send_page_flip_event(crtc, crtc->event, now, dcp->swap_start);
+		else
+			drm_crtc_send_vblank_event(&crtc->base, crtc->event);
+		crtc->event = NULL;
+		dcp->swap_start = KTIME_MIN;
+	}
+	spin_unlock_irqrestore(&crtc->base.dev->event_lock, flags);
+}
+
+void dcp_set_dimensions(struct apple_dcp *dcp)
+{
+	int i;
+	int width_mm = dcp->width_mm;
+	int height_mm = dcp->height_mm;
+
+	if (width_mm == 0 || height_mm == 0) {
+		width_mm = dcp->panel.width_mm;
+		height_mm = dcp->panel.height_mm;
+	}
+
+	/* Set the connector info */
+	if (dcp->connector) {
+		struct drm_connector *connector = &dcp->connector->base;
+
+		mutex_lock(&connector->dev->mode_config.mutex);
+		connector->display_info.width_mm = width_mm;
+		connector->display_info.height_mm = height_mm;
+		mutex_unlock(&connector->dev->mode_config.mutex);
+	}
+
+	/*
+	 * Fix up any probed modes. Modes are created when parsing
+	 * TimingElements, dimensions are calculated when parsing
+	 * DisplayAttributes, and TimingElements may be sent first
+	 */
+	for (i = 0; i < dcp->nr_modes; ++i) {
+		dcp->modes[i].mode.width_mm = width_mm;
+		dcp->modes[i].mode.height_mm = height_mm;
+	}
+}
+
+bool dcp_has_panel(struct apple_dcp *dcp)
+{
+	return dcp->panel.width_mm > 0;
+}
+
+int dcp_set_crc(struct drm_crtc *crtc, bool enabled)
+{
+	struct apple_crtc *ac = to_apple_crtc(crtc);
+	struct apple_dcp *dcp = platform_get_drvdata(ac->dcp);
+
+	dcp->crc_enabled = enabled;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(dcp_set_crc);
+
+/*
+ * Helper to send a DRM vblank event. We do not know how call swap_submit_dcp
+ * without surfaces. To avoid timeouts in drm_atomic_helper_wait_for_vblanks
+ * send a vblank event via a workqueue.
+ */
+static void dcp_delayed_vblank(struct work_struct *work)
+{
+	struct apple_dcp *dcp;
+
+	dcp = container_of(work, struct apple_dcp, vblank_wq);
+	mdelay(5);
+	dcp_drm_crtc_vblank(dcp->crtc);
+}
+
+static void dcp_recv_msg(void *cookie, u8 endpoint, u64 message)
+{
+	struct apple_dcp *dcp = cookie;
+
+	trace_dcp_recv_msg(dcp, endpoint, message);
+
+	switch (endpoint) {
+	case IOMFB_ENDPOINT:
+		return iomfb_recv_msg(dcp, message);
+	case AV_ENDPOINT:
+		afk_receive_message(dcp->avep, message);
+		return;
+	case SYSTEM_ENDPOINT:
+		afk_receive_message(dcp->systemep, message);
+		return;
+	case DISP0_ENDPOINT:
+		afk_receive_message(dcp->ibootep, message);
+		return;
+	case DPAVSERV_ENDPOINT:
+		afk_receive_message(dcp->dcpavservep, message);
+		return;
+	case DPTX_ENDPOINT:
+		afk_receive_message(dcp->dptxep, message);
+		return;
+	default:
+		WARN(endpoint, "unknown DCP endpoint %hhu\n", endpoint);
+	}
+}
+
+static void dcp_rtk_crashed(void *cookie, const void *crashlog, size_t crashlog_size)
+{
+	struct apple_dcp *dcp = cookie;
+
+	dcp->crashed = true;
+	dev_err(dcp->dev, "DCP has crashed\n");
+	if (dcp->connector) {
+		dcp->connector->connected = 0;
+		drm_edid_free(dcp->connector->drm_edid);
+		dcp->connector->drm_edid = NULL;
+		schedule_work(&dcp->connector->hotplug_wq);
+	}
+	complete(&dcp->start_done);
+}
+
+static int dcp_rtk_shmem_setup(void *cookie, struct apple_rtkit_shmem *bfr)
+{
+	struct apple_dcp *dcp = cookie;
+
+	if (bfr->iova) {
+		struct iommu_domain *domain =
+			iommu_get_domain_for_dev(dcp->dev);
+		phys_addr_t phy_addr;
+
+		if (!domain)
+			return -ENOMEM;
+
+		// TODO: get map from device-tree
+		phy_addr = iommu_iova_to_phys(domain, bfr->iova);
+		if (!phy_addr)
+			return -ENOMEM;
+
+		// TODO: verify phy_addr, cache attribute
+		bfr->buffer = memremap(phy_addr, bfr->size, MEMREMAP_WB);
+		if (!bfr->buffer)
+			return -ENOMEM;
+
+		bfr->is_mapped = true;
+		dev_info(dcp->dev,
+			 "shmem_setup: iova: %lx -> pa: %lx -> iomem: %lx\n",
+			 (uintptr_t)bfr->iova, (uintptr_t)phy_addr,
+			 (uintptr_t)bfr->buffer);
+	} else {
+		bfr->buffer = dma_alloc_coherent(dcp->dev, bfr->size,
+						 &bfr->iova, GFP_KERNEL);
+		if (!bfr->buffer)
+			return -ENOMEM;
+
+		dev_info(dcp->dev, "shmem_setup: iova: %lx, buffer: %lx\n",
+			 (uintptr_t)bfr->iova, (uintptr_t)bfr->buffer);
+	}
+
+	return 0;
+}
+
+static void dcp_rtk_shmem_destroy(void *cookie, struct apple_rtkit_shmem *bfr)
+{
+	struct apple_dcp *dcp = cookie;
+
+	if (bfr->is_mapped)
+		memunmap(bfr->buffer);
+	else
+		dma_free_coherent(dcp->dev, bfr->size, bfr->buffer, bfr->iova);
+}
+
+static struct apple_rtkit_ops rtkit_ops = {
+	.crashed = dcp_rtk_crashed,
+	.recv_message = dcp_recv_msg,
+	.shmem_setup = dcp_rtk_shmem_setup,
+	.shmem_destroy = dcp_rtk_shmem_destroy,
+};
+
+void dcp_send_message(struct apple_dcp *dcp, u8 endpoint, u64 message)
+{
+	trace_dcp_send_msg(dcp, endpoint, message);
+	apple_rtkit_send_message(dcp->rtk, endpoint, message, NULL,
+				 true);
+}
+
+int dcp_crtc_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state)
+{
+	struct platform_device *pdev = to_apple_crtc(crtc)->dcp;
+	struct apple_dcp *dcp = platform_get_drvdata(pdev);
+	struct drm_plane_state *new_state;
+	struct drm_plane *plane;
+	struct drm_crtc_state *crtc_state;
+	int plane_idx, plane_count = 0;
+	bool needs_modeset;
+
+	if (dcp->crashed)
+		return -EINVAL;
+
+	crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+
+	needs_modeset = drm_atomic_crtc_needs_modeset(crtc_state) || !dcp->valid_mode;
+	if (!needs_modeset && !dcp->connector->connected) {
+		dev_err(dcp->dev, "crtc_atomic_check: disconnected but no modeset\n");
+		return -EINVAL;
+	}
+
+	for_each_new_plane_in_state(state, plane, new_state, plane_idx) {
+		/* skip planes not for this crtc */
+		if (new_state->crtc != crtc)
+			continue;
+
+		plane_count += 1;
+	}
+
+	if (plane_count > DCP_MAX_PLANES) {
+		dev_err(dcp->dev, "crtc_atomic_check: Blend supports only 2 layers!\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(dcp_crtc_atomic_check);
+
+int dcp_get_connector_type(struct platform_device *pdev)
+{
+	struct apple_dcp *dcp = platform_get_drvdata(pdev);
+
+	return (dcp->connector_type);
+}
+EXPORT_SYMBOL_GPL(dcp_get_connector_type);
+
+#define DPTX_CONNECT_TIMEOUT msecs_to_jiffies(2000)
+
+static int dcp_dptx_connect(struct apple_dcp *dcp, u32 port)
+{
+	int ret = 0;
+
+	if (!dcp->phy) {
+		dev_warn(dcp->dev, "dcp_dptx_connect: missing phy\n");
+		return -ENODEV;
+	}
+	dev_info(dcp->dev, "%s(port=%d)\n", __func__, port);
+
+	mutex_lock(&dcp->hpd_mutex);
+	if (!dcp->dptxport[port].enabled) {
+		dev_warn(dcp->dev, "dcp_dptx_connect: dptx service for port %d not enabled\n", port);
+		ret = -ENODEV;
+		goto out_unlock;
+	}
+
+	if (dcp->dptxport[port].connected)
+		goto out_unlock;
+
+	reinit_completion(&dcp->dptxport[port].linkcfg_completion);
+	dcp->dptxport[port].atcphy = dcp->phy;
+	dptxport_connect(dcp->dptxport[port].service, 0, dcp->dptx_phy, dcp->dptx_die);
+	dptxport_request_display(dcp->dptxport[port].service);
+	dcp->dptxport[port].connected = true;
+
+	mutex_unlock(&dcp->hpd_mutex);
+	ret = wait_for_completion_timeout(&dcp->dptxport[port].linkcfg_completion,
+				    DPTX_CONNECT_TIMEOUT);
+	if (ret < 0)
+		dev_warn(dcp->dev, "dcp_dptx_connect: port %d link complete failed:%d\n",
+			 port, ret);
+	else
+		dev_dbg(dcp->dev, "dcp_dptx_connect: waited %d ms for link\n",
+			jiffies_to_msecs(DPTX_CONNECT_TIMEOUT - ret));
+
+	usleep_range(5, 10);
+
+	if (dcp->connector_type == DRM_MODE_CONNECTOR_DisplayPort)
+		dptxport_set_hpd(dcp->dptxport[port].service, true);
+
+	if (dcp->avep)
+		av_service_connect(dcp);
+
+	return 0;
+
+out_unlock:
+	mutex_unlock(&dcp->hpd_mutex);
+	return ret;
+}
+
+static void disconnected_hpd_event(struct apple_connector *con)
+{
+	if (con) {
+		con->connected = 0;
+		drm_kms_helper_connector_hotplug_event(&con->base);
+	}
+}
+
+static int dcp_dptx_disconnect(struct apple_dcp *dcp, u32 port)
+{
+	dev_info(dcp->dev, "%s(port=%d)\n", __func__, port);
+
+	mutex_lock(&dcp->hpd_mutex);
+	if (dcp->dptxport[port].enabled && dcp->dptxport[port].connected) {
+		dptxport_release_display(dcp->dptxport[port].service);
+		dcp->dptxport[port].connected = false;
+	}
+	mutex_unlock(&dcp->hpd_mutex);
+
+	return 0;
+}
+
+int dcp_dptx_connect_oob(struct platform_device *pdev, u32 port)
+{
+	struct apple_dcp *dcp = platform_get_drvdata(pdev);
+	return dcp_dptx_connect(dcp, port);
+}
+EXPORT_SYMBOL_GPL(dcp_dptx_connect_oob);
+
+int dcp_dptx_disconnect_oob(struct platform_device *pdev, u32 port)
+{
+	struct apple_dcp *dcp = platform_get_drvdata(pdev);
+
+	disconnected_hpd_event(dcp->connector);
+
+	if (dcp->avep)
+		av_service_disconnect(dcp);
+	dptxport_set_hpd(dcp->dptxport[port].service, false);
+	return dcp_dptx_disconnect(dcp, port);
+}
+EXPORT_SYMBOL_GPL(dcp_dptx_disconnect_oob);
+
+static irqreturn_t dcp_dp2hdmi_hpd(int irq, void *data)
+{
+	struct apple_dcp *dcp = data;
+	bool connected = gpiod_get_value_cansleep(dcp->hdmi_hpd);
+
+	/* do nothing on disconnect and trust that dcp detects it itself.
+	 * Parallel disconnect HPDs result drm disabling the CRTC even when it
+	 * should not.
+	 * The interrupt should be changed to rising but for now the disconnect
+	 * IRQs might be helpful for debugging.
+	 */
+	dev_info(dcp->dev, "DP2HDMI HPD irq, connected:%d\n", connected);
+
+	if (connected) {
+		msleep(500);
+		connected = gpiod_get_value_cansleep(dcp->hdmi_hpd);
+		dev_info(dcp->dev, "DP2HDMI HPD irq, 500ms debounce: connected:%d\n", connected);
+	}
+
+	if (connected)
+		dcp_dptx_connect(dcp, 0);
+
+	return IRQ_HANDLED;
+}
+
+void dcp_link(struct platform_device *pdev, struct apple_crtc *crtc,
+	      struct apple_connector *connector)
+{
+	struct apple_dcp *dcp = platform_get_drvdata(pdev);
+
+	dcp->crtc = crtc;
+	dcp->connector = connector;
+}
+EXPORT_SYMBOL_GPL(dcp_link);
+
+int dcp_start(struct platform_device *pdev)
+{
+	struct apple_dcp *dcp = platform_get_drvdata(pdev);
+	int ret;
+
+	init_completion(&dcp->start_done);
+
+	/* start RTKit endpoints */
+	ret = systemep_init(dcp);
+	if (ret)
+		dev_warn(dcp->dev, "Failed to start system endpoint: %d\n", ret);
+
+	if (unstable_edid && !dcp_has_panel(dcp)) {
+		ret = dpavservep_init(dcp);
+		if (ret)
+			dev_warn(dcp->dev, "Failed to start DPAVSERV endpoint: %d",
+				 ret);
+	}
+
+	if (dcp->phy && dcp->fw_compat >= DCP_FIRMWARE_V_13_5) {
+		ret = ibootep_init(dcp);
+		if (ret)
+			dev_warn(dcp->dev, "Failed to start IBOOT endpoint: %d\n",
+				 ret);
+
+		ret = dptxep_init(dcp);
+		if (ret) {
+			dev_warn(dcp->dev, "Failed to start DPTX endpoint: %d\n",
+				 ret);
+#ifdef DCP_DPTX_DISCONNECT_ON_INIT
+		/*
+		 * This disconnect / connect cycle on init is only necessary
+		 * when using dcp0 on j473, j474s and presumedly j475c.
+		 * Since dcp0 is not used at the moment let's avoid this
+		 * since it is possibly the cause for startup issues.
+		 */
+		} else if (dcp->dptxport[0].enabled) {
+			bool connected;
+			/* force disconnect on start - necessary if the display
+			 * is already up from m1n1
+			 */
+			dptxport_set_hpd(dcp->dptxport[0].service, false);
+			dptxport_release_display(dcp->dptxport[0].service);
+			usleep_range(10 * USEC_PER_MSEC, 25 * USEC_PER_MSEC);
+
+			connected = gpiod_get_value_cansleep(dcp->hdmi_hpd);
+			dev_info(dcp->dev, "%s: DP2HDMI HPD connected:%d\n", __func__, connected);
+
+			// necessary on j473/j474 but not on j314c
+			if (connected)
+				dcp_dptx_connect(dcp, 0);
+#endif
+		}
+	} else if (dcp->phy) {
+		dev_warn(dcp->dev, "OS firmware incompatible with dptxport EP\n");
+	}
+	ret = iomfb_start_rtkit(dcp);
+	if (ret)
+		dev_err(dcp->dev, "Failed to start IOMFB endpoint: %d\n", ret);
+
+#if IS_ENABLED(CONFIG_DRM_APPLE_AUDIO)
+	if (hdmi_audio) {
+		ret = avep_init(dcp);
+		if (ret)
+			dev_warn(dcp->dev, "Failed to start AV endpoint: %d", ret);
+		ret = 0;
+	}
+#endif
+
+	return ret;
+}
+EXPORT_SYMBOL(dcp_start);
+
+static int dcp_enable_dp2hdmi_hpd(struct apple_dcp *dcp)
+{
+	// check HPD state before enabling the edge triggered IRQ
+	if (dcp->hdmi_hpd) {
+		bool connected = gpiod_get_value_cansleep(dcp->hdmi_hpd);
+		dev_info(dcp->dev, "%s: DP2HDMI HPD connected:%d\n", __func__, connected);
+
+		if (connected)
+			dcp_dptx_connect(dcp, 0);
+	}
+
+	if (dcp->hdmi_hpd_irq)
+		enable_irq(dcp->hdmi_hpd_irq);
+
+	return 0;
+}
+
+int dcp_wait_ready(struct platform_device *pdev, u64 timeout)
+{
+	struct apple_dcp *dcp = platform_get_drvdata(pdev);
+	int ret;
+
+	if (dcp->crashed)
+		return -ENODEV;
+	if (dcp->active)
+		return dcp_enable_dp2hdmi_hpd(dcp);
+	if (timeout <= 0)
+		return -ETIMEDOUT;
+
+	ret = wait_for_completion_timeout(&dcp->start_done, timeout);
+	if (ret < 0)
+		return ret;
+
+	if (dcp->crashed)
+		return -ENODEV;
+
+	if (dcp->active)
+		dcp_enable_dp2hdmi_hpd(dcp);
+
+	return dcp->active ? 0 : -ETIMEDOUT;
+}
+EXPORT_SYMBOL(dcp_wait_ready);
+
+static void __maybe_unused dcp_sleep(struct apple_dcp *dcp)
+{
+	switch (dcp->fw_compat) {
+	case DCP_FIRMWARE_V_12_3:
+		iomfb_sleep_v12_3(dcp);
+		break;
+	case DCP_FIRMWARE_V_13_5:
+		iomfb_sleep_v13_3(dcp);
+		break;
+	default:
+		WARN_ONCE(true, "Unexpected firmware version: %u\n", dcp->fw_compat);
+		break;
+	}
+}
+
+void dcp_poweron(struct platform_device *pdev)
+{
+	struct apple_dcp *dcp = platform_get_drvdata(pdev);
+
+	if (dcp->hdmi_hpd) {
+		bool connected = gpiod_get_value_cansleep(dcp->hdmi_hpd);
+		dev_info(dcp->dev, "%s: DP2HDMI HPD connected:%d\n", __func__, connected);
+
+		if (connected)
+			dcp_dptx_connect(dcp, 0);
+	}
+
+	switch (dcp->fw_compat) {
+	case DCP_FIRMWARE_V_12_3:
+		iomfb_poweron_v12_3(dcp);
+		break;
+	case DCP_FIRMWARE_V_13_5:
+		iomfb_poweron_v13_3(dcp);
+		break;
+	default:
+		WARN_ONCE(true, "Unexpected firmware version: %u\n", dcp->fw_compat);
+		break;
+	}
+
+	if (dcp->avep)
+		av_service_connect(dcp);
+}
+EXPORT_SYMBOL(dcp_poweron);
+
+void dcp_poweroff(struct platform_device *pdev)
+{
+	struct apple_dcp *dcp = platform_get_drvdata(pdev);
+
+	if (dcp->avep)
+		av_service_disconnect(dcp);
+
+	switch (dcp->fw_compat) {
+	case DCP_FIRMWARE_V_12_3:
+		iomfb_poweroff_v12_3(dcp);
+		break;
+	case DCP_FIRMWARE_V_13_5:
+		iomfb_poweroff_v13_3(dcp);
+		break;
+	default:
+		WARN_ONCE(true, "Unexpected firmware version: %u\n", dcp->fw_compat);
+		break;
+	}
+
+	if (dcp->hdmi_hpd) {
+		bool connected = gpiod_get_value_cansleep(dcp->hdmi_hpd);
+		if (!connected) {
+			disconnected_hpd_event(dcp->connector);
+			dcp_dptx_disconnect(dcp, 0);
+		}
+	}
+}
+EXPORT_SYMBOL(dcp_poweroff);
+
+static void dcp_work_register_backlight(struct work_struct *work)
+{
+	int ret;
+	struct apple_dcp *dcp;
+
+	dcp = container_of(work, struct apple_dcp, bl_register_wq);
+
+	mutex_lock(&dcp->bl_register_mutex);
+	if (dcp->brightness.bl_dev)
+		goto out_unlock;
+
+	/* try to register backlight device, */
+	ret = dcp_backlight_register(dcp);
+	if (ret) {
+		dev_err(dcp->dev, "Unable to register backlight device\n");
+		dcp->brightness.maximum = 0;
+	}
+
+out_unlock:
+	mutex_unlock(&dcp->bl_register_mutex);
+}
+
+static void dcp_work_update_backlight(struct work_struct *work)
+{
+	struct apple_dcp *dcp;
+
+	dcp = container_of(work, struct apple_dcp, bl_update_wq);
+
+	dcp_backlight_update(dcp);
+}
+
+static int dcp_create_piodma_iommu_dev(struct apple_dcp *dcp)
+{
+	int ret;
+	struct device_node *node = of_get_child_by_name(dcp->dev->of_node, "piodma");
+
+	if (!node)
+		return dev_err_probe(dcp->dev, -ENODEV,
+				     "Failed to get piodma child DT node\n");
+
+	dcp->piodma = of_platform_device_create(node, NULL, dcp->dev);
+	if (!dcp->piodma) {
+		of_node_put(node);
+		return dev_err_probe(dcp->dev, -ENODEV, "Failed to create piodma pdev for %pOF\n", node);
+	}
+
+	ret = dma_set_mask_and_coherent(&dcp->piodma->dev, DMA_BIT_MASK(42));
+	if (ret)
+		goto err_destroy_pdev;
+
+	ret = of_dma_configure(&dcp->piodma->dev, node, true);
+	if (ret) {
+		ret = dev_err_probe(dcp->dev, ret,
+			"Failed to configure IOMMU child DMA\n");
+		goto err_destroy_pdev;
+	}
+	of_node_put(node);
+
+	dcp->iommu_dom = iommu_get_domain_for_dev(&dcp->piodma->dev);
+	if (IS_ERR(dcp->iommu_dom)) {
+		ret = dev_err_probe(dcp->dev, PTR_ERR(dcp->iommu_dom),
+				    "Failed to get default iommu domain for "
+				    "piodma device\n");
+		dcp->iommu_dom = NULL;
+		goto err_destroy_pdev;
+	}
+
+	return 0;
+err_destroy_pdev:
+	of_node_put(node);
+	of_platform_device_destroy(&dcp->piodma->dev, NULL);
+	return ret;
+}
+
+static int dcp_get_bw_scratch_reg(struct apple_dcp *dcp, u32 expected)
+{
+	struct of_phandle_args ph_args;
+	u32 addr_idx, disp_idx, offset;
+	int ret;
+
+	ret = of_parse_phandle_with_args(dcp->dev->of_node, "apple,bw-scratch",
+				   "#apple,bw-scratch-cells", 0, &ph_args);
+	if (ret < 0) {
+		dev_err(dcp->dev, "Failed to read 'apple,bw-scratch': %d\n", ret);
+		return ret;
+	}
+
+	if (ph_args.args_count != 3) {
+		dev_err(dcp->dev, "Unexpected 'apple,bw-scratch' arg count %d\n",
+			ph_args.args_count);
+		ret = -EINVAL;
+		goto err_of_node_put;
+	}
+
+	addr_idx = ph_args.args[0];
+	disp_idx = ph_args.args[1];
+	offset = ph_args.args[2];
+
+	if (disp_idx != expected || disp_idx >= MAX_DISP_REGISTERS) {
+		dev_err(dcp->dev, "Unexpected disp_reg value in 'apple,bw-scratch': %d\n",
+			disp_idx);
+		ret = -EINVAL;
+		goto err_of_node_put;
+	}
+
+	ret = of_address_to_resource(ph_args.np, addr_idx, &dcp->disp_bw_scratch_res);
+	if (ret < 0) {
+		dev_err(dcp->dev, "Failed to get 'apple,bw-scratch' resource %d from %pOF\n",
+			addr_idx, ph_args.np);
+		goto err_of_node_put;
+	}
+	if (offset > resource_size(&dcp->disp_bw_scratch_res) - 4) {
+		ret = -EINVAL;
+		goto err_of_node_put;
+	}
+
+	dcp->disp_registers[disp_idx] = &dcp->disp_bw_scratch_res;
+	dcp->disp_bw_scratch_index = disp_idx;
+	dcp->disp_bw_scratch_offset = offset;
+	ret = 0;
+
+err_of_node_put:
+	of_node_put(ph_args.np);
+	return ret;
+}
+
+static int dcp_get_bw_doorbell_reg(struct apple_dcp *dcp, u32 expected)
+{
+	struct of_phandle_args ph_args;
+	u32 addr_idx, disp_idx;
+	int ret;
+
+	ret = of_parse_phandle_with_args(dcp->dev->of_node, "apple,bw-doorbell",
+				   "#apple,bw-doorbell-cells", 0, &ph_args);
+	if (ret < 0) {
+		dev_err(dcp->dev, "Failed to read 'apple,bw-doorbell': %d\n", ret);
+		return ret;
+	}
+
+	if (ph_args.args_count != 2) {
+		dev_err(dcp->dev, "Unexpected 'apple,bw-doorbell' arg count %d\n",
+			ph_args.args_count);
+		ret = -EINVAL;
+		goto err_of_node_put;
+	}
+
+	addr_idx = ph_args.args[0];
+	disp_idx = ph_args.args[1];
+
+	if (disp_idx != expected || disp_idx >= MAX_DISP_REGISTERS) {
+		dev_err(dcp->dev, "Unexpected disp_reg value in 'apple,bw-doorbell': %d\n",
+			disp_idx);
+		ret = -EINVAL;
+		goto err_of_node_put;
+	}
+
+	ret = of_address_to_resource(ph_args.np, addr_idx, &dcp->disp_bw_doorbell_res);
+	if (ret < 0) {
+		dev_err(dcp->dev, "Failed to get 'apple,bw-doorbell' resource %d from %pOF\n",
+			addr_idx, ph_args.np);
+		goto err_of_node_put;
+	}
+	dcp->disp_bw_doorbell_index = disp_idx;
+	dcp->disp_registers[disp_idx] = &dcp->disp_bw_doorbell_res;
+	ret = 0;
+
+err_of_node_put:
+	of_node_put(ph_args.np);
+	return ret;
+}
+
+static int dcp_get_disp_regs(struct apple_dcp *dcp)
+{
+	struct platform_device *pdev = to_platform_device(dcp->dev);
+	int count = pdev->num_resources - 1;
+	int i, ret;
+
+	if (count <= 0 || count > MAX_DISP_REGISTERS)
+		return -EINVAL;
+
+	for (i = 0; i < count; ++i) {
+		dcp->disp_registers[i] =
+			platform_get_resource(pdev, IORESOURCE_MEM, 1 + i);
+	}
+
+	/* load pmgr bandwidth scratch resource and offset */
+	ret = dcp_get_bw_scratch_reg(dcp, count);
+	if (ret < 0)
+		return ret;
+	count += 1;
+
+	/* load pmgr bandwidth doorbell resource if present (only on t8103) */
+	if (of_property_present(dcp->dev->of_node, "apple,bw-doorbell")) {
+		ret = dcp_get_bw_doorbell_reg(dcp, count);
+		if (ret < 0)
+			return ret;
+		count += 1;
+	}
+
+	dcp->nr_disp_registers = count;
+	return 0;
+}
+
+#define DCP_FW_VERSION_MIN_LEN	3
+#define DCP_FW_VERSION_MAX_LEN	5
+#define DCP_FW_VERSION_STR_LEN	(DCP_FW_VERSION_MAX_LEN * 4)
+
+static int dcp_read_fw_version(struct device *dev, const char *name,
+			       char *version_str)
+{
+	u32 ver[DCP_FW_VERSION_MAX_LEN];
+	int len_str;
+	int len;
+
+	len = of_property_read_variable_u32_array(dev->of_node, name, ver,
+						  DCP_FW_VERSION_MIN_LEN,
+						  DCP_FW_VERSION_MAX_LEN);
+
+	switch (len) {
+	case 3:
+		len_str = scnprintf(version_str, DCP_FW_VERSION_STR_LEN,
+				    "%d.%d.%d", ver[0], ver[1], ver[2]);
+		break;
+	case 4:
+		len_str = scnprintf(version_str, DCP_FW_VERSION_STR_LEN,
+				    "%d.%d.%d.%d", ver[0], ver[1], ver[2],
+				    ver[3]);
+		break;
+	case 5:
+		len_str = scnprintf(version_str, DCP_FW_VERSION_STR_LEN,
+				    "%d.%d.%d.%d.%d", ver[0], ver[1], ver[2],
+				    ver[3], ver[4]);
+		break;
+	default:
+		len_str = strscpy(version_str, "UNKNOWN",
+				  DCP_FW_VERSION_STR_LEN);
+		if (len >= 0)
+			len = -EOVERFLOW;
+		break;
+	}
+
+	if (len_str >= DCP_FW_VERSION_STR_LEN)
+		dev_warn(dev, "'%s' truncated: '%s'\n", name, version_str);
+
+	return len;
+}
+
+static enum dcp_firmware_version dcp_check_firmware_version(struct device *dev)
+{
+	char compat_str[DCP_FW_VERSION_STR_LEN];
+	char fw_str[DCP_FW_VERSION_STR_LEN];
+	int ret;
+
+	/* firmware version is just informative */
+	dcp_read_fw_version(dev, "apple,firmware-version", fw_str);
+
+	ret = dcp_read_fw_version(dev, "apple,firmware-compat", compat_str);
+	if (ret < 0) {
+		dev_err(dev, "Could not read 'apple,firmware-compat': %d\n", ret);
+		return DCP_FIRMWARE_UNKNOWN;
+	}
+
+	if (strncmp(compat_str, "12.3.0", sizeof(compat_str)) == 0)
+		return DCP_FIRMWARE_V_12_3;
+	/*
+	 * m1n1 reports firmware version 13.5 as compatible with 13.3. This is
+	 * only true for the iomfb endpoint. The interface for the dptx-port
+	 * endpoint changed between 13.3 and 13.5. The driver will only support
+	 * firmware 13.5. Check the actual firmware version for compat version
+	 * 13.3 until m1n1 reports 13.5 as "firmware-compat".
+	 */
+	else if ((strncmp(compat_str, "13.3.0", sizeof(compat_str)) == 0) &&
+		 (strncmp(fw_str, "13.5.0", sizeof(compat_str)) == 0))
+		return DCP_FIRMWARE_V_13_5;
+	else if (strncmp(compat_str, "13.5.0", sizeof(compat_str)) == 0)
+		return DCP_FIRMWARE_V_13_5;
+
+	dev_err(dev, "DCP firmware-compat %s (FW: %s) is not supported\n",
+		compat_str, fw_str);
+
+	return DCP_FIRMWARE_UNKNOWN;
+}
+
+static int dcp_comp_bind(struct device *dev, struct device *main, void *data)
+{
+	struct device_node *panel_np;
+	struct apple_dcp *dcp = dev_get_drvdata(dev);
+	u32 cpu_ctrl;
+	int ret;
+
+	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(42));
+	if (ret)
+		return ret;
+
+	dcp->coproc_reg = devm_platform_ioremap_resource_byname(to_platform_device(dev), "coproc");
+	if (IS_ERR(dcp->coproc_reg))
+		return PTR_ERR(dcp->coproc_reg);
+
+	of_property_read_u32(dev->of_node, "apple,dcp-index",
+					   &dcp->index);
+	of_property_read_u32(dev->of_node, "apple,dptx-phy",
+					   &dcp->dptx_phy);
+	of_property_read_u32(dev->of_node, "apple,dptx-die",
+					   &dcp->dptx_die);
+	if (dcp->index || dcp->dptx_phy || dcp->dptx_die)
+		dev_info(dev, "DCP index:%u dptx target phy: %u dptx die: %u\n",
+			 dcp->index, dcp->dptx_phy, dcp->dptx_die);
+	mutex_init(&dcp->hpd_mutex);
+
+	if (!show_notch)
+		ret = of_property_read_u32(dev->of_node, "apple,notch-height",
+					   &dcp->notch_height);
+
+	if (dcp->notch_height > MAX_NOTCH_HEIGHT)
+		dcp->notch_height = MAX_NOTCH_HEIGHT;
+	if (dcp->notch_height > 0)
+		dev_info(dev, "Detected display with notch of %u pixel\n", dcp->notch_height);
+
+	/* initialize brightness scale to a sensible default to avoid divide by 0*/
+	dcp->brightness.scale = 65536;
+	panel_np = of_get_compatible_child(dev->of_node, "apple,panel-mini-led");
+	if (panel_np)
+		dcp->panel.has_mini_led = true;
+	else
+		panel_np = of_get_compatible_child(dev->of_node, "apple,panel");
+
+	if (panel_np) {
+		const char height_prop[2][16] = { "adj-height-mm", "height-mm" };
+
+		if (of_device_is_available(panel_np)) {
+			ret = of_property_read_u32(panel_np, "apple,max-brightness",
+						   &dcp->brightness.maximum);
+			if (ret)
+				dev_err(dev, "Missing property 'apple,max-brightness'\n");
+		}
+
+		of_property_read_u32(panel_np, "width-mm", &dcp->panel.width_mm);
+		/* use adjusted height as long as the notch is hidden */
+		of_property_read_u32(panel_np, height_prop[!dcp->notch_height],
+				     &dcp->panel.height_mm);
+
+		of_node_put(panel_np);
+		dcp->connector_type = DRM_MODE_CONNECTOR_eDP;
+		INIT_WORK(&dcp->bl_register_wq, dcp_work_register_backlight);
+		mutex_init(&dcp->bl_register_mutex);
+		INIT_WORK(&dcp->bl_update_wq, dcp_work_update_backlight);
+	} else if (of_property_match_string(dev->of_node, "apple,connector-type", "HDMI-A") >= 0)
+		dcp->connector_type = DRM_MODE_CONNECTOR_HDMIA;
+	else if (of_property_match_string(dev->of_node, "apple,connector-type", "DP") >= 0)
+		dcp->connector_type = DRM_MODE_CONNECTOR_DisplayPort;
+	else if (of_property_match_string(dev->of_node, "apple,connector-type", "USB-C") >= 0)
+		dcp->connector_type = DRM_MODE_CONNECTOR_USB;
+	else
+		dcp->connector_type = DRM_MODE_CONNECTOR_Unknown;
+
+	ret = dcp_create_piodma_iommu_dev(dcp);
+	if (ret)
+		return dev_err_probe(dev, ret,
+				"Failed to created PIODMA iommu child device");
+
+	ret = dcp_get_disp_regs(dcp);
+	if (ret) {
+		dev_err(dev, "failed to find display registers\n");
+		return ret;
+	}
+
+	dcp->clk = devm_clk_get(dev, NULL);
+	if (IS_ERR(dcp->clk))
+		return dev_err_probe(dev, PTR_ERR(dcp->clk),
+				     "Unable to find clock\n");
+
+	bitmap_zero(dcp->memdesc_map, DCP_MAX_MAPPINGS);
+	// TDOD: mem_desc IDs start at 1, for simplicity just skip '0' entry
+	set_bit(0, dcp->memdesc_map);
+
+	INIT_WORK(&dcp->vblank_wq, dcp_delayed_vblank);
+
+	dcp->swapped_out_fbs =
+		(struct list_head)LIST_HEAD_INIT(dcp->swapped_out_fbs);
+
+	cpu_ctrl =
+		readl_relaxed(dcp->coproc_reg + APPLE_DCP_COPROC_CPU_CONTROL);
+	writel_relaxed(cpu_ctrl | APPLE_DCP_COPROC_CPU_CONTROL_RUN,
+		       dcp->coproc_reg + APPLE_DCP_COPROC_CPU_CONTROL);
+
+	dcp->rtk = devm_apple_rtkit_init(dev, dcp, "mbox", 0, &rtkit_ops);
+	if (IS_ERR(dcp->rtk))
+		return dev_err_probe(dev, PTR_ERR(dcp->rtk),
+				     "Failed to initialize RTKit\n");
+
+	ret = apple_rtkit_wake(dcp->rtk);
+	if (ret)
+		return dev_err_probe(dev, ret,
+				     "Failed to boot RTKit: %d\n", ret);
+	return ret;
+}
+
+/*
+ * We need to shutdown DCP before tearing down the display subsystem. Otherwise
+ * the DCP will crash and briefly flash a green screen of death.
+ */
+static void dcp_comp_unbind(struct device *dev, struct device *main, void *data)
+{
+	struct apple_dcp *dcp = dev_get_drvdata(dev);
+
+	if (!dcp)
+		return;
+
+	if (dcp->hdmi_hpd_irq)
+		disable_irq(dcp->hdmi_hpd_irq);
+
+	if (dcp->avep) {
+		av_service_disconnect(dcp);
+		afk_shutdown(dcp->avep);
+		dcp->avep = NULL;
+	}
+
+	if (dcp->dptxep) {
+		afk_shutdown(dcp->dptxep);
+		dcp->dptxep = NULL;
+	}
+
+	if (dcp->ibootep) {
+		afk_shutdown(dcp->ibootep);
+		dcp->ibootep = NULL;
+	}
+
+	if (dcp->systemep) {
+		afk_shutdown(dcp->systemep);
+		dcp->systemep = NULL;
+	}
+
+	if (dcp->dcpavservep) {
+		afk_shutdown(dcp->dcpavservep);
+		dcp->dcpavservep = NULL;
+	}
+
+	if (dcp->shmem)
+		iomfb_shutdown(dcp);
+
+	if (dcp->piodma) {
+		dcp->iommu_dom = NULL;
+		of_platform_device_destroy(&dcp->piodma->dev, NULL);
+		dcp->piodma = NULL;
+	}
+
+	if (dcp->connector_type == DRM_MODE_CONNECTOR_eDP) {
+		cancel_work_sync(&dcp->bl_register_wq);
+		cancel_work_sync(&dcp->bl_update_wq);
+	}
+	cancel_work_sync(&dcp->vblank_wq);
+
+	devm_clk_put(dev, dcp->clk);
+	dcp->clk = NULL;
+}
+
+static const struct component_ops dcp_comp_ops = {
+	.bind	= dcp_comp_bind,
+	.unbind	= dcp_comp_unbind,
+};
+
+static int dcp_platform_probe(struct platform_device *pdev)
+{
+	enum dcp_firmware_version fw_compat;
+	struct device *dev = &pdev->dev;
+	struct apple_dcp *dcp;
+	u32 mux_index;
+
+	fw_compat = dcp_check_firmware_version(dev);
+	if (fw_compat == DCP_FIRMWARE_UNKNOWN)
+		return -ENODEV;
+
+	/* Check for "apple,bw-scratch" to avoid probing appledrm with outdated
+	 * device trees. This prevents replacing simpledrm and ending up without
+	 * display.
+	 */
+	if (!of_property_present(dev->of_node, "apple,bw-scratch"))
+		return dev_err_probe(dev, -ENODEV, "Incompatible devicetree! "
+			"Use devicetree matching this kernel.\n");
+
+	dcp = devm_kzalloc(dev, sizeof(*dcp), GFP_KERNEL);
+	if (!dcp)
+		return -ENOMEM;
+
+	dcp->fw_compat = fw_compat;
+	dcp->dev = dev;
+	dcp->hw = *(struct apple_dcp_hw_data *)of_device_get_match_data(dev);
+
+	platform_set_drvdata(pdev, dcp);
+
+	dcp->phy = devm_phy_optional_get(dev, "dp-phy");
+	if (IS_ERR(dcp->phy)) {
+		dev_err(dev, "Failed to get dp-phy: %ld\n", PTR_ERR(dcp->phy));
+		return PTR_ERR(dcp->phy);
+	}
+	if (dcp->phy) {
+		int ret;
+		/*
+		 * Request DP2HDMI related GPIOs as optional for DP-altmode
+		 * compatibility. J180D misses a dp2hdmi-pwren GPIO in the
+		 * template ADT. TODO: check device ADT
+		 */
+		dcp->hdmi_hpd = devm_gpiod_get_optional(dev, "hdmi-hpd", GPIOD_IN);
+		if (IS_ERR(dcp->hdmi_hpd))
+			return PTR_ERR(dcp->hdmi_hpd);
+		if (dcp->hdmi_hpd) {
+			int irq = gpiod_to_irq(dcp->hdmi_hpd);
+			if (irq < 0) {
+				dev_err(dev, "failed to translate HDMI hpd GPIO to IRQ\n");
+				return irq;
+			}
+			dcp->hdmi_hpd_irq = irq;
+
+			ret = devm_request_threaded_irq(dev, dcp->hdmi_hpd_irq,
+						NULL, dcp_dp2hdmi_hpd,
+						IRQF_ONESHOT | IRQF_NO_AUTOEN |
+						IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+						"dp2hdmi-hpd-irq", dcp);
+			if (ret < 0) {
+				dev_err(dev, "failed to request HDMI hpd irq %d: %d\n",
+					irq, ret);
+				return ret;
+			}
+		}
+
+		/*
+		 * Power DP2HDMI on as it is required for the HPD irq.
+		 * TODO: check if one is sufficient for the hpd to save power
+		 *       on battery powered Macbooks.
+		 */
+		dcp->hdmi_pwren = devm_gpiod_get_optional(dev, "hdmi-pwren", GPIOD_OUT_HIGH);
+		if (IS_ERR(dcp->hdmi_pwren))
+			return PTR_ERR(dcp->hdmi_pwren);
+
+		dcp->dp2hdmi_pwren = devm_gpiod_get_optional(dev, "dp2hdmi-pwren", GPIOD_OUT_HIGH);
+		if (IS_ERR(dcp->dp2hdmi_pwren))
+			return PTR_ERR(dcp->dp2hdmi_pwren);
+
+		ret = of_property_read_u32(dev->of_node, "mux-index", &mux_index);
+		if (!ret) {
+			dcp->xbar = devm_mux_control_get(dev, "dp-xbar");
+			if (IS_ERR(dcp->xbar)) {
+				dev_err(dev, "Failed to get dp-xbar: %ld\n", PTR_ERR(dcp->xbar));
+				return PTR_ERR(dcp->xbar);
+			}
+			ret = mux_control_select(dcp->xbar, mux_index);
+			if (ret)
+				dev_warn(dev, "mux_control_select failed: %d\n", ret);
+		}
+	}
+
+	return component_add(&pdev->dev, &dcp_comp_ops);
+}
+
+static void dcp_platform_remove(struct platform_device *pdev)
+{
+	component_del(&pdev->dev, &dcp_comp_ops);
+}
+
+static void dcp_platform_shutdown(struct platform_device *pdev)
+{
+	component_del(&pdev->dev, &dcp_comp_ops);
+}
+
+static int dcp_platform_suspend(struct device *dev)
+{
+	struct apple_dcp *dcp = dev_get_drvdata(dev);
+
+	if (dcp->avep)
+		av_service_disconnect(dcp);
+
+	if (dcp->hdmi_hpd_irq) {
+		disable_irq(dcp->hdmi_hpd_irq);
+		disconnected_hpd_event(dcp->connector);
+		dcp_dptx_disconnect(dcp, 0);
+	}
+	/*
+	 * Set the device as a wakeup device, which forces its power
+	 * domains to stay on. We need this as we do not support full
+	 * shutdown properly yet.
+	 */
+	device_set_wakeup_path(dev);
+
+	return 0;
+}
+
+static int dcp_platform_resume(struct device *dev)
+{
+	struct apple_dcp *dcp = dev_get_drvdata(dev);
+
+	if (dcp->hdmi_hpd_irq)
+		enable_irq(dcp->hdmi_hpd_irq);
+
+	if (dcp->hdmi_hpd) {
+		bool connected = gpiod_get_value_cansleep(dcp->hdmi_hpd);
+		dev_info(dcp->dev, "resume: HPD connected:%d\n", connected);
+		if (connected)
+			dcp_dptx_connect(dcp, 0);
+	}
+
+	if (dcp->avep)
+		av_service_connect(dcp);
+
+	return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(dcp_platform_pm_ops,
+				dcp_platform_suspend, dcp_platform_resume);
+
+
+static const struct apple_dcp_hw_data apple_dcp_hw_t6020 = {
+	.num_dptx_ports = 1,
+};
+
+static const struct apple_dcp_hw_data apple_dcp_hw_t8112 = {
+	.num_dptx_ports = 2,
+};
+
+static const struct apple_dcp_hw_data apple_dcp_hw_dcp = {
+	.num_dptx_ports = 0,
+};
+
+static const struct apple_dcp_hw_data apple_dcp_hw_dcpext = {
+	.num_dptx_ports = 2,
+};
+
+static const struct of_device_id of_match[] = {
+	{ .compatible = "apple,t6020-dcp", .data = &apple_dcp_hw_t6020,  },
+	{ .compatible = "apple,t8112-dcp", .data = &apple_dcp_hw_t8112,  },
+	{ .compatible = "apple,dcp",       .data = &apple_dcp_hw_dcp,    },
+	{ .compatible = "apple,dcpext",    .data = &apple_dcp_hw_dcpext, },
+	{}
+};
+MODULE_DEVICE_TABLE(of, of_match);
+
+static struct platform_driver apple_platform_driver = {
+	.probe		= dcp_platform_probe,
+	.remove		= dcp_platform_remove,
+	.shutdown	= dcp_platform_shutdown,
+	.driver	= {
+		.name = "apple-dcp",
+		.of_match_table	= of_match,
+		.pm = pm_sleep_ptr(&dcp_platform_pm_ops),
+	},
+};
+
+static int __init apple_dcp_register(void)
+{
+	if (drm_firmware_drivers_only())
+		return -ENODEV;
+
+#if IS_ENABLED(CONFIG_DRM_APPLE_AUDIO)
+	dcp_audio_register();
+#endif
+	return platform_driver_register(&apple_platform_driver);
+}
+
+static void __exit apple_dcp_unregister(void)
+{
+	platform_driver_unregister(&apple_platform_driver);
+#if IS_ENABLED(CONFIG_DRM_APPLE_AUDIO)
+	dcp_audio_unregister();
+#endif
+}
+
+module_init(apple_dcp_register);
+module_exit(apple_dcp_unregister);
+
+MODULE_AUTHOR("Alyssa Rosenzweig <alyssa@rosenzweig.io>");
+MODULE_DESCRIPTION("Apple Display Controller DRM driver");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/gpu/drm/apple/dcp.h b/drivers/gpu/drm/apple/dcp.h
new file mode 100644
index 00000000000000..e34bc495fd3973
--- /dev/null
+++ b/drivers/gpu/drm/apple/dcp.h
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io> */
+
+#ifndef __APPLE_DCP_H__
+#define __APPLE_DCP_H__
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_fourcc.h>
+
+#include "connector.h"
+#include "dcp-internal.h"
+#include "parser.h"
+
+struct apple_crtc {
+	struct drm_crtc base;
+	struct drm_pending_vblank_event *event;
+	bool vsync_disabled;
+
+	/* Reference to the DCP device owning this CRTC */
+	struct platform_device *dcp;
+};
+
+#define to_apple_crtc(x) container_of(x, struct apple_crtc, base)
+
+struct apple_encoder {
+	struct drm_encoder base;
+};
+
+#define to_apple_encoder(x) container_of(x, struct apple_encoder, base)
+
+void dcp_poweroff(struct platform_device *pdev);
+void dcp_poweron(struct platform_device *pdev);
+int dcp_set_crc(struct drm_crtc *crtc, bool enabled);
+int dcp_crtc_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state);
+int dcp_get_connector_type(struct platform_device *pdev);
+void dcp_link(struct platform_device *pdev, struct apple_crtc *apple,
+	      struct apple_connector *connector);
+int dcp_start(struct platform_device *pdev);
+int dcp_wait_ready(struct platform_device *pdev, u64 timeout);
+void dcp_flush(struct drm_crtc *crtc, struct drm_atomic_state *state);
+bool dcp_is_initialized(struct platform_device *pdev);
+void apple_crtc_vblank(struct apple_crtc *apple);
+void dcp_drm_crtc_vblank(struct apple_crtc *crtc);
+int dcp_get_modes(struct drm_connector *connector);
+int dcp_mode_valid(struct drm_connector *connector,
+		   struct drm_display_mode *mode);
+int dcp_crtc_atomic_modeset(struct drm_crtc *crtc,
+			    struct drm_atomic_state *state);
+bool dcp_crtc_mode_fixup(struct drm_crtc *crtc,
+			 const struct drm_display_mode *mode,
+			 struct drm_display_mode *adjusted_mode);
+void dcp_set_dimensions(struct apple_dcp *dcp);
+void dcp_send_message(struct apple_dcp *dcp, u8 endpoint, u64 message);
+
+int dcp_dptx_connect_oob(struct platform_device *pdev, u32 port);
+int dcp_dptx_disconnect_oob(struct platform_device *pdev, u32 port);
+
+int iomfb_start_rtkit(struct apple_dcp *dcp);
+void iomfb_shutdown(struct apple_dcp *dcp);
+/* rtkit message handler for IOMFB messages */
+void iomfb_recv_msg(struct apple_dcp *dcp, u64 message);
+
+int systemep_init(struct apple_dcp *dcp);
+int dptxep_init(struct apple_dcp *dcp);
+int ibootep_init(struct apple_dcp *dcp);
+int dpavservep_init(struct apple_dcp *dcp);
+int avep_init(struct apple_dcp *dcp);
+
+
+void __init dcp_audio_register(void);
+void __exit dcp_audio_unregister(void);
+
+#endif
diff --git a/drivers/gpu/drm/apple/dcp_backlight.c b/drivers/gpu/drm/apple/dcp_backlight.c
new file mode 100644
index 00000000000000..1397000c27935c
--- /dev/null
+++ b/drivers/gpu/drm/apple/dcp_backlight.c
@@ -0,0 +1,238 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (C) The Asahi Linux Contributors */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_modeset_lock.h>
+
+#include <linux/backlight.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include "linux/jiffies.h"
+
+#include "dcp.h"
+#include "dcp-internal.h"
+
+#define MIN_BRIGHTNESS_PART1	2U
+#define MAX_BRIGHTNESS_PART1	99U
+#define MIN_BRIGHTNESS_PART2	103U
+#define MAX_BRIGHTNESS_PART2	510U
+
+/*
+ * lookup for display brightness 2 to 99 nits
+ * */
+static u32 brightness_part1[] = {
+	0x0000000, 0x0810038, 0x0f000bd, 0x143011c,
+	0x1850165, 0x1bc01a1, 0x1eb01d4, 0x2140200,
+	0x2380227, 0x2590249, 0x2770269, 0x2930285,
+	0x2ac02a0, 0x2c402b8, 0x2d902cf, 0x2ee02e4,
+	0x30102f8, 0x314030b, 0x325031c, 0x335032d,
+	0x345033d, 0x354034d, 0x362035b, 0x3700369,
+	0x37d0377, 0x38a0384, 0x3960390, 0x3a2039c,
+	0x3ad03a7, 0x3b803b3, 0x3c303bd, 0x3cd03c8,
+	0x3d703d2, 0x3e103dc, 0x3ea03e5, 0x3f303ef,
+	0x3fc03f8, 0x4050400, 0x40d0409, 0x4150411,
+	0x41d0419, 0x4250421, 0x42d0429, 0x4340431,
+	0x43c0438, 0x443043f, 0x44a0446, 0x451044d,
+	0x4570454, 0x45e045b, 0x4640461, 0x46b0468,
+	0x471046e, 0x4770474, 0x47d047a, 0x4830480,
+	0x4890486, 0x48e048b, 0x4940491, 0x4990497,
+	0x49f049c, 0x4a404a1, 0x4a904a7, 0x4ae04ac,
+	0x4b304b1, 0x4b804b6, 0x4bd04bb, 0x4c204c0,
+	0x4c704c5, 0x4cc04c9, 0x4d004ce, 0x4d504d3,
+	0x4d904d7, 0x4de04dc, 0x4e204e0, 0x4e704e4,
+	0x4eb04e9, 0x4ef04ed, 0x4f304f1, 0x4f704f5,
+	0x4fb04f9, 0x4ff04fd, 0x5030501, 0x5070505,
+	0x50b0509, 0x50f050d, 0x5130511, 0x5160515,
+	0x51a0518, 0x51e051c, 0x5210520, 0x5250523,
+	0x5290527, 0x52c052a, 0x52f052e, 0x5330531,
+	0x5360535, 0x53a0538, 0x53d053b, 0x540053f,
+	0x5440542, 0x5470545, 0x54a0548, 0x54d054c,
+	0x550054f, 0x5530552, 0x5560555, 0x5590558,
+	0x55c055b, 0x55f055e, 0x5620561, 0x5650564,
+	0x5680567, 0x56b056a, 0x56e056d, 0x571056f,
+	0x5740572, 0x5760575, 0x5790578, 0x57c057b,
+	0x57f057d, 0x5810580, 0x5840583, 0x5870585,
+	0x5890588, 0x58c058b, 0x58f058d
+};
+
+static u32 brightness_part12[] = { 0x58f058d, 0x59d058f };
+
+/*
+ * lookup table for display brightness 103.3 to 510 nits
+ * */
+static u32 brightness_part2[] = {
+	0x59d058f, 0x5b805ab, 0x5d105c5, 0x5e805dd,
+	0x5fe05f3, 0x6120608, 0x625061c, 0x637062e,
+	0x6480640, 0x6580650, 0x6680660, 0x677066f,
+	0x685067e, 0x693068c, 0x6a00699, 0x6ac06a6,
+	0x6b806b2, 0x6c406be, 0x6cf06ca, 0x6da06d5,
+	0x6e506df, 0x6ef06ea, 0x6f906f4, 0x70206fe,
+	0x70c0707, 0x7150710, 0x71e0719, 0x7260722,
+	0x72f072a, 0x7370733, 0x73f073b, 0x7470743,
+	0x74e074a, 0x7560752, 0x75d0759, 0x7640760,
+	0x76b0768, 0x772076e, 0x7780775, 0x77f077c,
+	0x7850782, 0x78c0789, 0x792078f, 0x7980795,
+	0x79e079b, 0x7a407a1, 0x7aa07a7, 0x7af07ac,
+	0x7b507b2, 0x7ba07b8, 0x7c007bd, 0x7c507c2,
+	0x7ca07c8, 0x7cf07cd, 0x7d407d2, 0x7d907d7,
+	0x7de07dc, 0x7e307e1, 0x7e807e5, 0x7ec07ea,
+	0x7f107ef, 0x7f607f3, 0x7fa07f8, 0x7fe07fc
+};
+
+
+static int dcp_get_brightness(struct backlight_device *bd)
+{
+	struct apple_dcp *dcp = bl_get_data(bd);
+
+	return dcp->brightness.nits;
+}
+
+#define SCALE_FACTOR (1 << 10)
+
+static u32 interpolate(int val, int min, int max, u32 *tbl, size_t tbl_size)
+{
+	u32 frac;
+	u64 low, high;
+	u32 interpolated = (tbl_size - 1) * ((val - min) * SCALE_FACTOR) / (max - min);
+
+	size_t index = interpolated / SCALE_FACTOR;
+
+	if (WARN(index + 1 >= tbl_size, "invalid index %zu for brightness %u\n", index, val))
+		return tbl[tbl_size / 2];
+
+	frac = interpolated & (SCALE_FACTOR - 1);
+	low = tbl[index];
+	high = tbl[index + 1];
+
+	return ((frac * high) + ((SCALE_FACTOR - frac) * low)) / SCALE_FACTOR;
+}
+
+static u32 calculate_dac(struct apple_dcp *dcp, int val)
+{
+	u32 dac;
+
+	if (val <= MIN_BRIGHTNESS_PART1)
+		return 16 * brightness_part1[0];
+	else if (val == MAX_BRIGHTNESS_PART1)
+		return 16 * brightness_part1[ARRAY_SIZE(brightness_part1) - 1];
+	else if (val == MIN_BRIGHTNESS_PART2)
+		return 16 * brightness_part2[0];
+	else if (val >= MAX_BRIGHTNESS_PART2)
+		return brightness_part2[ARRAY_SIZE(brightness_part2) - 1];
+
+	if (val < MAX_BRIGHTNESS_PART1) {
+		dac = interpolate(val, MIN_BRIGHTNESS_PART1, MAX_BRIGHTNESS_PART1,
+				  brightness_part1, ARRAY_SIZE(brightness_part1));
+	} else if (val > MIN_BRIGHTNESS_PART2) {
+		dac = interpolate(val, MIN_BRIGHTNESS_PART2, MAX_BRIGHTNESS_PART2,
+				  brightness_part2, ARRAY_SIZE(brightness_part2));
+	} else {
+		dac = interpolate(val, MAX_BRIGHTNESS_PART1, MIN_BRIGHTNESS_PART2,
+				  brightness_part12, ARRAY_SIZE(brightness_part12));
+	}
+
+	return 16 * dac;
+}
+
+static int drm_crtc_set_brightness(struct apple_dcp *dcp)
+{
+	struct drm_atomic_state *state;
+	struct drm_crtc_state *crtc_state;
+	struct drm_modeset_acquire_ctx ctx;
+	struct drm_crtc *crtc = &dcp->crtc->base;
+	int ret = 0;
+
+	DRM_MODESET_LOCK_ALL_BEGIN(crtc->dev, ctx, 0, ret);
+
+	if (!dcp->brightness.update)
+		goto done;
+
+	state = drm_atomic_state_alloc(crtc->dev);
+	if (!state) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	state->acquire_ctx = &ctx;
+	crtc_state = drm_atomic_get_crtc_state(state, crtc);
+	if (IS_ERR(crtc_state)) {
+		ret = PTR_ERR(crtc_state);
+		goto fail;
+	}
+
+	crtc_state->color_mgmt_changed |= true;
+
+	ret = drm_atomic_commit(state);
+
+fail:
+	drm_atomic_state_put(state);
+done:
+	DRM_MODESET_LOCK_ALL_END(crtc->dev, ctx, ret);
+
+	return ret;
+}
+
+int dcp_backlight_update(struct apple_dcp *dcp)
+{
+	/*
+	 * Do not actively try to change brightness if no mode is set.
+	 * TODO: should this be reflected the in backlight's power property?
+	 *       defer this hopefully until it becomes irrelevant due to proper
+	 *       drm integrated backlight handling
+	 */
+	if (!dcp->valid_mode)
+		return 0;
+
+	/* Wait 1 vblank cycle in the hope an atomic swap has already updated
+	 * the brightness */
+	msleep((1001 + 23) / 24); // 42ms for 23.976 fps
+
+	return drm_crtc_set_brightness(dcp);
+}
+
+static int dcp_set_brightness(struct backlight_device *bd)
+{
+	int ret = 0;
+	struct apple_dcp *dcp = bl_get_data(bd);
+	struct drm_modeset_acquire_ctx ctx;
+	int brightness = backlight_get_brightness(bd);
+
+	DRM_MODESET_LOCK_ALL_BEGIN(dcp->crtc->base.dev, ctx, 0, ret);
+
+	dcp->brightness.dac = calculate_dac(dcp, brightness);
+	dcp->brightness.update = true;
+
+	DRM_MODESET_LOCK_ALL_END(dcp->crtc->base.dev, ctx, ret);
+
+	return dcp_backlight_update(dcp);
+}
+
+static const struct backlight_ops dcp_backlight_ops = {
+	.options = BL_CORE_SUSPENDRESUME,
+	.get_brightness = dcp_get_brightness,
+	.update_status = dcp_set_brightness,
+};
+
+int dcp_backlight_register(struct apple_dcp *dcp)
+{
+	struct device *dev = dcp->dev;
+	struct backlight_device *bl_dev;
+	struct backlight_properties props = {
+		.type = BACKLIGHT_PLATFORM,
+		.brightness = dcp->brightness.nits,
+		.scale = BACKLIGHT_SCALE_LINEAR,
+	};
+	props.max_brightness = min(dcp->brightness.maximum, MAX_BRIGHTNESS_PART2 - 1);
+
+	bl_dev = devm_backlight_device_register(dev, "apple-panel-bl", dev, dcp,
+						&dcp_backlight_ops, &props);
+	if (IS_ERR(bl_dev))
+		return PTR_ERR(bl_dev);
+
+	dcp->brightness.bl_dev = bl_dev;
+	dcp->brightness.dac = calculate_dac(dcp, dcp->brightness.nits);
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/apple/dcp_trace.c b/drivers/gpu/drm/apple/dcp_trace.c
new file mode 100644
index 00000000000000..d18e71af73a74d
--- /dev/null
+++ b/drivers/gpu/drm/apple/dcp_trace.c
@@ -0,0 +1,3 @@
+// SPDX-License-Identifier: GPL-2.0
+#define CREATE_TRACE_POINTS
+#include "dcp_trace.h"
\ No newline at end of file
diff --git a/drivers/gpu/drm/apple/dptxep.c b/drivers/gpu/drm/apple/dptxep.c
new file mode 100644
index 00000000000000..e6e863dea76887
--- /dev/null
+++ b/drivers/gpu/drm/apple/dptxep.c
@@ -0,0 +1,638 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright 2022 Sven Peter <sven@svenpeter.dev> */
+
+#include <linux/bitfield.h>
+#include <linux/completion.h>
+#include <linux/phy/phy.h>
+#include <linux/delay.h>
+
+#include "afk.h"
+#include "dcp.h"
+#include "dptxep.h"
+#include "parser.h"
+#include "trace.h"
+
+struct dcpdptx_connection_cmd {
+	__le32 unk;
+	__le32 target;
+} __attribute__((packed));
+
+struct dcpdptx_hotplug_cmd {
+	u8 _pad0[16];
+	__le32 unk;
+} __attribute__((packed));
+
+struct dptxport_apcall_link_rate {
+	__le32 retcode;
+	u8 _unk0[12];
+	__le32 link_rate;
+	u8 _unk1[12];
+} __attribute__((packed));
+
+struct dptxport_apcall_lane_count {
+	__le32 retcode;
+	u8 _unk0[12];
+	__le64 lane_count;
+	u8 _unk1[8];
+} __attribute__((packed));
+
+struct dptxport_apcall_set_active_lane_count {
+	__le32 retcode;
+	u8 _unk0[12];
+	__le64 lane_count;
+	u8 _unk1[8];
+} __packed;
+
+struct dptxport_apcall_get_support {
+	__le32 retcode;
+	u8 _unk0[12];
+	__le32 supported;
+	u8 _unk1[12];
+} __attribute__((packed));
+
+struct dptxport_apcall_max_drive_settings {
+	__le32 retcode;
+	u8 _unk0[12];
+	__le32 max_drive_settings[2];
+	u8 _unk1[8];
+};
+
+struct dptxport_apcall_drive_settings {
+	__le32 retcode;
+	u8 _unk0[12];
+	__le32 unk1;
+	__le32 unk2;
+	__le32 unk3;
+	__le32 unk4;
+	__le32 unk5;
+	__le32 unk6;
+	__le32 unk7;
+};
+
+int dptxport_validate_connection(struct apple_epic_service *service, u8 core,
+				 u8 atc, u8 die)
+{
+	struct dptx_port *dptx = service->cookie;
+	struct dcpdptx_connection_cmd cmd, resp;
+	int ret;
+	u32 target = FIELD_PREP(DCPDPTX_REMOTE_PORT_CORE, core) |
+		     FIELD_PREP(DCPDPTX_REMOTE_PORT_ATC, atc) |
+		     FIELD_PREP(DCPDPTX_REMOTE_PORT_DIE, die) |
+		     DCPDPTX_REMOTE_PORT_CONNECTED;
+
+	trace_dptxport_validate_connection(dptx, core, atc, die);
+
+	cmd.target = cpu_to_le32(target);
+	cmd.unk = cpu_to_le32(0x100);
+	ret = afk_service_call(service, 0, 12, &cmd, sizeof(cmd), 40, &resp,
+			       sizeof(resp), 40);
+	if (ret)
+		return ret;
+
+	if (le32_to_cpu(resp.target) != target)
+		return -EINVAL;
+	if (le32_to_cpu(resp.unk) != 0x100)
+		return -EINVAL;
+
+	return 0;
+}
+
+int dptxport_connect(struct apple_epic_service *service, u8 core, u8 atc,
+		     u8 die)
+{
+	struct dptx_port *dptx = service->cookie;
+	struct dcpdptx_connection_cmd cmd, resp;
+	u32 unk_field = 0x0; // seen as 0x100 under some conditions
+	int ret;
+	u32 target = FIELD_PREP(DCPDPTX_REMOTE_PORT_CORE, core) |
+		     FIELD_PREP(DCPDPTX_REMOTE_PORT_ATC, atc) |
+		     FIELD_PREP(DCPDPTX_REMOTE_PORT_DIE, die) |
+		     DCPDPTX_REMOTE_PORT_CONNECTED;
+
+	trace_dptxport_connect(dptx, core, atc, die);
+
+	cmd.target = cpu_to_le32(target);
+	cmd.unk = cpu_to_le32(unk_field);
+	ret = afk_service_call(service, 0, 11, &cmd, sizeof(cmd), 24, &resp,
+			       sizeof(resp), 24);
+	if (ret)
+		return ret;
+
+	if (le32_to_cpu(resp.target) != target)
+		return -EINVAL;
+	if (le32_to_cpu(resp.unk) != unk_field)
+		dev_notice(service->ep->dcp->dev, "unexpected unk field in reply: 0x%x (0x%x)\n",
+			  le32_to_cpu(resp.unk), unk_field);
+
+	return 0;
+}
+
+int dptxport_request_display(struct apple_epic_service *service)
+{
+	return afk_service_call(service, 0, 6, NULL, 0, 16, NULL, 0, 16);
+}
+
+int dptxport_release_display(struct apple_epic_service *service)
+{
+	return afk_service_call(service, 0, 7, NULL, 0, 16, NULL, 0, 16);
+}
+
+int dptxport_set_hpd(struct apple_epic_service *service, bool hpd)
+{
+	struct dcpdptx_hotplug_cmd cmd, resp;
+	int ret;
+
+	memset(&cmd, 0, sizeof(cmd));
+
+	if (hpd)
+		cmd.unk = cpu_to_le32(1);
+
+	ret = afk_service_call(service, 8, 8, &cmd, sizeof(cmd), 12, &resp,
+			       sizeof(resp), 12);
+	if (ret)
+		return ret;
+	if (le32_to_cpu(resp.unk) != 1)
+		return -EINVAL;
+	return 0;
+}
+
+static int
+dptxport_call_get_max_drive_settings(struct apple_epic_service *service,
+				     void *reply_, size_t reply_size)
+{
+	struct dptxport_apcall_max_drive_settings *reply = reply_;
+
+	if (reply_size < sizeof(*reply))
+		return -EINVAL;
+
+	reply->retcode = cpu_to_le32(0);
+	reply->max_drive_settings[0] = cpu_to_le32(0x3);
+	reply->max_drive_settings[1] = cpu_to_le32(0x3);
+
+	return 0;
+}
+
+static int
+dptxport_call_get_drive_settings(struct apple_epic_service *service,
+				     const void *request_, size_t request_size,
+				     void *reply_, size_t reply_size)
+{
+	struct dptx_port *dptx = service->cookie;
+	const struct dptxport_apcall_drive_settings *request = request_;
+	struct dptxport_apcall_drive_settings *reply = reply_;
+
+	if (reply_size < sizeof(*reply) || request_size < sizeof(*request))
+		return -EINVAL;
+
+	*reply = *request;
+
+	/* Clear the rest of the buffer */
+	memset(reply_ + sizeof(*reply), 0, reply_size - sizeof(*reply));
+
+	/*
+	 * retcode appears to be lane count, seeing 2 for USB-C dp alt mode
+	 * with lanes splitted for DP/USB3.
+	 */
+	if (reply->retcode != dptx->lane_count)
+		dev_err(service->ep->dcp->dev,
+			"get_drive_settings: unexpected retcode %d\n",
+			reply->retcode);
+
+	reply->retcode = dptx->lane_count;
+	reply->unk5 = dptx->drive_settings[0];
+	reply->unk6 = 0;
+	reply->unk7 = dptx->drive_settings[1];
+
+	return 0;
+}
+
+static int
+dptxport_call_set_drive_settings(struct apple_epic_service *service,
+				     const void *request_, size_t request_size,
+				     void *reply_, size_t reply_size)
+{
+	struct dptx_port *dptx = service->cookie;
+	const struct dptxport_apcall_drive_settings *request = request_;
+	struct dptxport_apcall_drive_settings *reply = reply_;
+
+	if (reply_size < sizeof(*reply) || request_size < sizeof(*request))
+		return -EINVAL;
+
+	*reply = *request;
+	reply->retcode = cpu_to_le32(0);
+
+	dev_info(service->ep->dcp->dev, "set_drive_settings: %d:%d:%d:%d:%d:%d:%d\n",
+		 request->unk1, request->unk2, request->unk3, request->unk4,
+		 request->unk5, request->unk6, request->unk7);
+
+	dptx->drive_settings[0] = reply->unk5;
+	dptx->drive_settings[1] = reply->unk7;
+
+	return 0;
+}
+
+static int dptxport_call_get_max_link_rate(struct apple_epic_service *service,
+					   void *reply_, size_t reply_size)
+{
+	struct dptxport_apcall_link_rate *reply = reply_;
+
+	if (reply_size < sizeof(*reply))
+		return -EINVAL;
+
+	reply->retcode = cpu_to_le32(0);
+	reply->link_rate = cpu_to_le32(LINK_RATE_HBR3);
+
+	return 0;
+}
+
+static int dptxport_call_get_max_lane_count(struct apple_epic_service *service,
+					   void *reply_, size_t reply_size)
+{
+	struct dptxport_apcall_lane_count *reply = reply_;
+	struct dptx_port *dptx = service->cookie;
+	struct apple_dcp *dcp = service->ep->dcp;
+	union phy_configure_opts phy_ops;
+	int ret;
+
+	if (reply_size < sizeof(*reply))
+		return -EINVAL;
+
+	ret = phy_validate(dptx->atcphy, PHY_MODE_DP, 0, &phy_ops);
+	if (ret < 0) {
+		dev_err(dcp->dev, "phy_validate failed: %d\n", ret);
+		reply->retcode = cpu_to_le32(1);
+		reply->lane_count = cpu_to_le64(0);
+	} else {
+		if (phy_ops.dp.lanes < 2) {
+			// phy_validate might return 0 lanes if atc phy is not
+			// yet switched to DP mode
+			dev_dbg(dcp->dev, "get_max_lane_count: phy lanes: %d\n",
+				phy_ops.dp.lanes);
+			// default to 4 lanes
+			dptx->lane_count = 4;
+		} else {
+			dptx->lane_count = phy_ops.dp.lanes;
+		}
+		reply->retcode = cpu_to_le32(0);
+		reply->lane_count = cpu_to_le64(dptx->lane_count);
+	}
+
+	return 0;
+}
+
+static int dptxport_call_set_active_lane_count(struct apple_epic_service *service,
+					       const void *data, size_t data_size,
+					       void *reply_, size_t reply_size)
+{
+	struct dptx_port *dptx = service->cookie;
+	struct apple_dcp *dcp = service->ep->dcp;
+	const struct dptxport_apcall_set_active_lane_count *request = data;
+	struct dptxport_apcall_set_active_lane_count *reply = reply_;
+	int ret = 0;
+	int retcode = 0;
+
+	if (reply_size < sizeof(*reply))
+		return -1;
+	if (data_size < sizeof(*request))
+		return -1;
+
+	u64 lane_count = cpu_to_le64(request->lane_count);
+
+	if (dptx->lane_count < lane_count)
+		dev_err(dcp->dev, "set_active_lane_count: unexpected lane "
+			"count:%llu phy: %d\n", lane_count, dptx->lane_count);
+
+	switch (lane_count) {
+	case 0 ... 2:
+	case 4:
+		dptx->phy_ops.dp.lanes = lane_count;
+		// Use dptx phy index > 3 as indication for dptx-phy or
+		// lpdptx-phy and configure the number of lanes for those
+		dptx->phy_ops.dp.set_lanes = (dcp->dptx_phy > 3);
+		break;
+	default:
+		dev_err(dcp->dev, "set_active_lane_count: invalid lane count:%llu\n", lane_count);
+		retcode = 1;
+		lane_count = 0;
+		break;
+	}
+
+	if (dptx->phy_ops.dp.set_lanes) {
+		if (dptx->atcphy) {
+			ret = phy_configure(dptx->atcphy, &dptx->phy_ops);
+			if (ret)
+				return ret;
+		}
+		dptx->phy_ops.dp.set_lanes = 0;
+		dptx->lane_count = lane_count;
+	}
+
+	reply->retcode = cpu_to_le32(retcode);
+	reply->lane_count = cpu_to_le64(lane_count);
+
+	if (lane_count > 0)
+		complete(&dptx->linkcfg_completion);
+
+	return ret;
+}
+
+static int dptxport_call_get_link_rate(struct apple_epic_service *service,
+				       void *reply_, size_t reply_size)
+{
+	struct dptx_port *dptx = service->cookie;
+	struct dptxport_apcall_link_rate *reply = reply_;
+
+	if (reply_size < sizeof(*reply))
+		return -EINVAL;
+
+	reply->retcode = cpu_to_le32(0);
+	reply->link_rate = cpu_to_le32(dptx->link_rate);
+
+	return 0;
+}
+
+static int
+dptxport_call_will_change_link_config(struct apple_epic_service *service)
+{
+	struct dptx_port *dptx = service->cookie;
+
+	dptx->phy_ops.dp.set_lanes = 0;
+	dptx->phy_ops.dp.set_rate = 0;
+	dptx->phy_ops.dp.set_voltages = 0;
+
+	return 0;
+}
+
+static int
+dptxport_call_did_change_link_config(struct apple_epic_service *service)
+{
+	/* assume the link config did change and wait a little bit */
+	mdelay(10);
+
+	return 0;
+}
+
+static int dptxport_call_set_link_rate(struct apple_epic_service *service,
+				       const void *data, size_t data_size,
+				       void *reply_, size_t reply_size)
+{
+	struct dptx_port *dptx = service->cookie;
+	const struct dptxport_apcall_link_rate *request = data;
+	struct dptxport_apcall_link_rate *reply = reply_;
+	u32 link_rate, phy_link_rate;
+	bool phy_set_rate = false;
+	int ret;
+
+	if (reply_size < sizeof(*reply))
+		return -EINVAL;
+	if (data_size < sizeof(*request))
+		return -EINVAL;
+
+	link_rate = le32_to_cpu(request->link_rate);
+	trace_dptxport_call_set_link_rate(dptx, link_rate);
+
+	switch (link_rate) {
+	case LINK_RATE_RBR:
+		phy_link_rate = 1620;
+		phy_set_rate = true;
+		break;
+	case LINK_RATE_HBR:
+		phy_link_rate = 2700;
+		phy_set_rate = true;
+		break;
+	case LINK_RATE_HBR2:
+		phy_link_rate = 5400;
+		phy_set_rate = true;
+		break;
+	case LINK_RATE_HBR3:
+		phy_link_rate = 8100;
+		phy_set_rate = true;
+		break;
+	case 0:
+		phy_link_rate = 0;
+		phy_set_rate = true;
+		break;
+	default:
+		dev_err(service->ep->dcp->dev,
+			"DPTXPort: Unsupported link rate 0x%x requested\n",
+			link_rate);
+		link_rate = 0;
+		phy_set_rate = false;
+		break;
+	}
+
+	if (phy_set_rate) {
+		dptx->phy_ops.dp.link_rate = phy_link_rate;
+		dptx->phy_ops.dp.set_rate = 1;
+
+		if (dptx->atcphy) {
+			ret = phy_configure(dptx->atcphy, &dptx->phy_ops);
+			if (ret)
+				return ret;
+		}
+
+		//if (dptx->phy_ops.dp.set_rate)
+		dptx->link_rate = dptx->pending_link_rate = link_rate;
+
+	}
+
+	//dptx->pending_link_rate = link_rate;
+	reply->retcode = cpu_to_le32(0);
+	reply->link_rate = cpu_to_le32(link_rate);
+
+	return 0;
+}
+
+static int dptxport_call_get_supports_hpd(struct apple_epic_service *service,
+					  void *reply_, size_t reply_size)
+{
+	struct dptxport_apcall_get_support *reply = reply_;
+
+	if (reply_size < sizeof(*reply))
+		return -EINVAL;
+
+	reply->retcode = cpu_to_le32(0);
+	reply->supported = cpu_to_le32(0);
+	return 0;
+}
+
+static int
+dptxport_call_get_supports_downspread(struct apple_epic_service *service,
+				      void *reply_, size_t reply_size)
+{
+	struct dptxport_apcall_get_support *reply = reply_;
+
+	if (reply_size < sizeof(*reply))
+		return -EINVAL;
+
+	reply->retcode = cpu_to_le32(0);
+	reply->supported = cpu_to_le32(0);
+	return 0;
+}
+
+static int
+dptxport_call_activate(struct apple_epic_service *service,
+		       const void *data, size_t data_size,
+		       void *reply, size_t reply_size)
+{
+	struct dptx_port *dptx = service->cookie;
+	const struct apple_dcp *dcp = service->ep->dcp;
+
+	// TODO: hack, use phy_set_mode to select the correct DCP(EXT) input
+	phy_set_mode_ext(dptx->atcphy, PHY_MODE_DP, dcp->index);
+
+	memcpy(reply, data, min(reply_size, data_size));
+	if (reply_size >= 4)
+		memset(reply, 0, 4);
+
+	return 0;
+}
+
+static int
+dptxport_call_deactivate(struct apple_epic_service *service,
+		       const void *data, size_t data_size,
+		       void *reply, size_t reply_size)
+{
+	struct dptx_port *dptx = service->cookie;
+
+	/* deactivate phy */
+	phy_set_mode_ext(dptx->atcphy, PHY_MODE_INVALID, 0);
+
+	memcpy(reply, data, min(reply_size, data_size));
+	if (reply_size >= 4)
+		memset(reply, 0, 4);
+
+	return 0;
+}
+
+static int dptxport_call(struct apple_epic_service *service, u32 idx,
+			 const void *data, size_t data_size, void *reply,
+			 size_t reply_size)
+{
+	struct dptx_port *dptx = service->cookie;
+	trace_dptxport_apcall(dptx, idx, data_size);
+
+	switch (idx) {
+	case DPTX_APCALL_WILL_CHANGE_LINKG_CONFIG:
+		return dptxport_call_will_change_link_config(service);
+	case DPTX_APCALL_DID_CHANGE_LINK_CONFIG:
+		return dptxport_call_did_change_link_config(service);
+	case DPTX_APCALL_GET_MAX_LINK_RATE:
+		return dptxport_call_get_max_link_rate(service, reply,
+						       reply_size);
+	case DPTX_APCALL_GET_LINK_RATE:
+		return dptxport_call_get_link_rate(service, reply, reply_size);
+	case DPTX_APCALL_SET_LINK_RATE:
+		return dptxport_call_set_link_rate(service, data, data_size,
+						   reply, reply_size);
+	case DPTX_APCALL_GET_MAX_LANE_COUNT:
+		return dptxport_call_get_max_lane_count(service, reply, reply_size);
+        case DPTX_APCALL_SET_ACTIVE_LANE_COUNT:
+		return dptxport_call_set_active_lane_count(service, data, data_size,
+							   reply, reply_size);
+	case DPTX_APCALL_GET_SUPPORTS_HPD:
+		return dptxport_call_get_supports_hpd(service, reply,
+						      reply_size);
+	case DPTX_APCALL_GET_SUPPORTS_DOWN_SPREAD:
+		return dptxport_call_get_supports_downspread(service, reply,
+							     reply_size);
+	case DPTX_APCALL_GET_MAX_DRIVE_SETTINGS:
+		return dptxport_call_get_max_drive_settings(service, reply,
+							    reply_size);
+	case DPTX_APCALL_GET_DRIVE_SETTINGS:
+		return dptxport_call_get_drive_settings(service, data, data_size,
+							reply, reply_size);
+	case DPTX_APCALL_SET_DRIVE_SETTINGS:
+		return dptxport_call_set_drive_settings(service, data, data_size,
+							reply, reply_size);
+        case DPTX_APCALL_ACTIVATE:
+		return dptxport_call_activate(service, data, data_size,
+					      reply, reply_size);
+	case DPTX_APCALL_DEACTIVATE:
+		return dptxport_call_deactivate(service, data, data_size,
+						reply, reply_size);
+	default:
+		/* just try to ACK and hope for the best... */
+		dev_info(service->ep->dcp->dev, "DPTXPort: acking unhandled call %u\n",
+			idx);
+		fallthrough;
+	case DPTX_APCALL_GET_DOWN_SPREAD:
+	case DPTX_APCALL_SET_DOWN_SPREAD:
+		memcpy(reply, data, min(reply_size, data_size));
+		if (reply_size >= 4)
+			memset(reply, 0, 4);
+		return 0;
+	}
+}
+
+static void dptxport_init(struct apple_epic_service *service, const char *name,
+			  const char *class, s64 unit)
+{
+
+	if (strcmp(name, "dcpdptx-port-epic"))
+		return;
+	if (strcmp(class, "AppleDCPDPTXRemotePort"))
+		return;
+
+	trace_dptxport_init(service->ep->dcp, unit);
+
+	switch (unit) {
+	case 0:
+	case 1:
+		if (service->ep->dcp->dptxport[unit].enabled) {
+			dev_err(service->ep->dcp->dev,
+				"DPTXPort: unit %lld already exists\n", unit);
+			return;
+		}
+		service->ep->dcp->dptxport[unit].unit = unit;
+		service->ep->dcp->dptxport[unit].service = service;
+		service->ep->dcp->dptxport[unit].enabled = true;
+		service->cookie = (void *)&service->ep->dcp->dptxport[unit];
+		complete(&service->ep->dcp->dptxport[unit].enable_completion);
+		break;
+	default:
+		dev_err(service->ep->dcp->dev, "DPTXPort: invalid unit %lld\n",
+			unit);
+	}
+}
+
+static const struct apple_epic_service_ops dptxep_ops[] = {
+	{
+		.name = "AppleDCPDPTXRemotePort",
+		.init = dptxport_init,
+		.call = dptxport_call,
+	},
+	{}
+};
+
+int dptxep_init(struct apple_dcp *dcp)
+{
+	int ret;
+	u32 port;
+	unsigned long timeout = msecs_to_jiffies(1000);
+
+	init_completion(&dcp->dptxport[0].enable_completion);
+	init_completion(&dcp->dptxport[1].enable_completion);
+	init_completion(&dcp->dptxport[0].linkcfg_completion);
+	init_completion(&dcp->dptxport[1].linkcfg_completion);
+
+	dcp->dptxep = afk_init(dcp, DPTX_ENDPOINT, dptxep_ops);
+	if (IS_ERR(dcp->dptxep))
+		return PTR_ERR(dcp->dptxep);
+
+	ret = afk_start(dcp->dptxep);
+	if (ret)
+		return ret;
+
+	for (port = 0; port < dcp->hw.num_dptx_ports; port++) {
+		ret = wait_for_completion_timeout(&dcp->dptxport[port].enable_completion,
+						timeout);
+		if (!ret)
+			return -ETIMEDOUT;
+		else if (ret < 0)
+			return ret;
+		timeout = ret;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/apple/dptxep.h b/drivers/gpu/drm/apple/dptxep.h
new file mode 100644
index 00000000000000..0bf2534054fd7b
--- /dev/null
+++ b/drivers/gpu/drm/apple/dptxep.h
@@ -0,0 +1,70 @@
+#ifndef __APPLE_DCP_DPTXEP_H__
+#define __APPLE_DCP_DPTXEP_H__
+
+#include <linux/phy/phy.h>
+#include <linux/mux/consumer.h>
+
+enum dptx_apcall {
+	DPTX_APCALL_ACTIVATE = 0,
+	DPTX_APCALL_DEACTIVATE = 1,
+	DPTX_APCALL_GET_MAX_DRIVE_SETTINGS = 2,
+	DPTX_APCALL_SET_DRIVE_SETTINGS = 3,
+	DPTX_APCALL_GET_DRIVE_SETTINGS = 4,
+	DPTX_APCALL_WILL_CHANGE_LINKG_CONFIG = 5,
+	DPTX_APCALL_DID_CHANGE_LINK_CONFIG = 6,
+	DPTX_APCALL_GET_MAX_LINK_RATE = 7,
+	DPTX_APCALL_GET_LINK_RATE = 8,
+	DPTX_APCALL_SET_LINK_RATE = 9,
+	DPTX_APCALL_GET_MAX_LANE_COUNT = 10,
+	DPTX_APCALL_GET_ACTIVE_LANE_COUNT = 11,
+	DPTX_APCALL_SET_ACTIVE_LANE_COUNT = 12,
+	DPTX_APCALL_GET_SUPPORTS_DOWN_SPREAD = 13,
+	DPTX_APCALL_GET_DOWN_SPREAD = 14,
+	DPTX_APCALL_SET_DOWN_SPREAD = 15,
+	DPTX_APCALL_GET_SUPPORTS_LANE_MAPPING = 16,
+	DPTX_APCALL_SET_LANE_MAP = 17,
+	DPTX_APCALL_GET_SUPPORTS_HPD = 18,
+	DPTX_APCALL_FORCE_HOTPLUG_DETECT = 19,
+	DPTX_APCALL_INACTIVE_SINK_DETECTED = 20,
+	DPTX_APCALL_SET_TILED_DISPLAY_HINTS = 21,
+	DPTX_APCALL_DEVICE_NOT_RESPONDING = 22,
+	DPTX_APCALL_DEVICE_BUSY_TIMEOUT = 23,
+	DPTX_APCALL_DEVICE_NOT_STARTED = 24,
+};
+
+#define DCPDPTX_REMOTE_PORT_CORE GENMASK(3, 0)
+#define DCPDPTX_REMOTE_PORT_ATC GENMASK(7, 4)
+#define DCPDPTX_REMOTE_PORT_DIE GENMASK(11, 8)
+#define DCPDPTX_REMOTE_PORT_CONNECTED BIT(15)
+
+enum dptx_link_rate {
+	LINK_RATE_RBR = 0x06,
+	LINK_RATE_HBR = 0x0a,
+	LINK_RATE_HBR2 = 0x14,
+	LINK_RATE_HBR3 = 0x1e,
+};
+
+struct apple_epic_service;
+
+struct dptx_port {
+	bool enabled, connected;
+	struct completion enable_completion;
+	struct completion linkcfg_completion;
+	u32 unit;
+	struct apple_epic_service *service;
+	union phy_configure_opts phy_ops;
+	struct phy *atcphy;
+	struct mux_control *mux;
+	u32 lane_count;
+	u32 link_rate, pending_link_rate;
+	u32 drive_settings[2];
+};
+
+int dptxport_validate_connection(struct apple_epic_service *service, u8 core,
+				 u8 atc, u8 die);
+int dptxport_connect(struct apple_epic_service *service, u8 core, u8 atc,
+		     u8 die);
+int dptxport_request_display(struct apple_epic_service *service);
+int dptxport_release_display(struct apple_epic_service *service);
+int dptxport_set_hpd(struct apple_epic_service *service, bool hpd);
+#endif
diff --git a/drivers/gpu/drm/apple/epic/dpavservep.c b/drivers/gpu/drm/apple/epic/dpavservep.c
new file mode 100644
index 00000000000000..2de9d2fe4c24a3
--- /dev/null
+++ b/drivers/gpu/drm/apple/epic/dpavservep.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright The Asahi Linux Contributors */
+
+#include "dpavservep.h"
+
+#include <drm/drm_edid.h>
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/types.h>
+
+#include "../afk.h"
+#include "../dcp.h"
+#include "../dcp-internal.h"
+#include "../trace.h"
+
+static void dcpavserv_init(struct apple_epic_service *service, const char *name,
+			  const char *class, s64 unit)
+{
+	struct apple_dcp *dcp = service->ep->dcp;
+	trace_dcpavserv_init(dcp, unit);
+
+	if (unit == 0 && name && !strcmp(name, "dcpav-service-epic")) {
+		if (dcp->dcpavserv.enabled) {
+			dev_err(dcp->dev,
+				"DCPAVSERV: unit %lld already exists\n", unit);
+			return;
+		}
+		dcp->dcpavserv.service = service;
+		dcp->dcpavserv.enabled = true;
+		service->cookie = &dcp->dcpavserv;
+		complete(&dcp->dcpavserv.enable_completion);
+	}
+}
+
+static void dcpavserv_teardown(struct apple_epic_service *service)
+{
+	struct apple_dcp *dcp = service->ep->dcp;
+	service->enabled = false;
+
+	if (dcp->dcpavserv.enabled) {
+		dcp->dcpavserv.enabled = false;
+		dcp->dcpavserv.service = NULL;
+		service->cookie = NULL;
+		reinit_completion(&dcp->dcpavserv.enable_completion);
+	}
+}
+
+static void dcpdpserv_init(struct apple_epic_service *service, const char *name,
+			  const char *class, s64 unit)
+{
+}
+
+static void dcpdpserv_teardown(struct apple_epic_service *service)
+{
+	service->enabled = false;
+}
+
+struct dcpavserv_status_report {
+	u32 unk00[4];
+	u8 flag0;
+	u8 flag1;
+	u8 flag2;
+	u8 flag3;
+	u32 unk14[3];
+	u32 status;
+	u32 unk24[3];
+} __packed;
+
+struct dpavserv_copy_edid_cmd {
+	__le64 max_size;
+	u8 _pad1[24];
+	__le64 used_size;
+	u8 _pad2[8];
+} __packed;
+
+#define EDID_LEADING_DATA_SIZE		8
+#define EDID_BLOCK_SIZE			128
+#define EDID_EXT_BLOCK_COUNT_OFFSET	0x7E
+#define EDID_MAX_SIZE			SZ_32K
+#define EDID_BUF_SIZE			(EDID_LEADING_DATA_SIZE + EDID_MAX_SIZE)
+
+struct dpavserv_copy_edid_resp {
+	__le64 max_size;
+	u8 _pad1[24];
+	__le64 used_size;
+	u8 _pad2[8];
+	u8 data[];
+} __packed;
+
+static int parse_report(struct apple_epic_service *service, enum epic_subtype type,
+			 const void *data, size_t data_size)
+{
+#if defined(DEBUG)
+	struct apple_dcp *dcp = service->ep->dcp;
+	const struct epic_service_call *call;
+	const void *payload;
+	size_t payload_size;
+
+	dev_dbg(dcp->dev, "dcpavserv[ch:%u]: report type:%02x len:%zu\n",
+		service->channel, type, data_size);
+
+	if (type != EPIC_SUBTYPE_STD_SERVICE)
+		return 0;
+
+	if (data_size < sizeof(*call))
+		return 0;
+
+	call = data;
+
+	if (le32_to_cpu(call->magic) != EPIC_SERVICE_CALL_MAGIC) {
+		dev_warn(dcp->dev, "dcpavserv[ch:%u]: report magic 0x%08x != 0x%08x\n",
+			service->channel, le32_to_cpu(call->magic), EPIC_SERVICE_CALL_MAGIC);
+		return 0;
+	}
+
+	payload_size = data_size - sizeof(*call);
+	if (payload_size < le32_to_cpu(call->data_len)) {
+		dev_warn(dcp->dev, "dcpavserv[ch:%u]: report payload size %zu call len %u\n",
+			service->channel, payload_size, le32_to_cpu(call->data_len));
+		return 0;
+	}
+	payload_size = le32_to_cpu(call->data_len);
+	payload = data + sizeof(*call);
+
+	if (le16_to_cpu(call->group) == 2 && le16_to_cpu(call->command) == 0) {
+		if (payload_size == sizeof(struct dcpavserv_status_report)) {
+			const struct dcpavserv_status_report *stat = payload;
+			dev_info(dcp->dev, "dcpavserv[ch:%u]: flags: 0x%02x,0x%02x,0x%02x,0x%02x status:%u\n",
+				service->channel, stat->flag0, stat->flag1,
+				stat->flag2, stat->flag3, stat->status);
+		} else {
+			dev_dbg(dcp->dev, "dcpavserv[ch:%u]: report payload size %zu\n", service->channel, payload_size);
+		}
+	} else {
+		print_hex_dump(KERN_DEBUG, "dcpavserv report: ", DUMP_PREFIX_NONE,
+			       16, 1, payload, payload_size, true);
+	}
+#endif
+
+	return 0;
+}
+
+static int dcpavserv_report(struct apple_epic_service *service,
+			    enum epic_subtype type, const void *data,
+			    size_t data_size)
+{
+	return parse_report(service, type, data, data_size);
+}
+
+static int dcpdpserv_report(struct apple_epic_service *service,
+			    enum epic_subtype type, const void *data,
+			    size_t data_size)
+{
+	return parse_report(service, type, data, data_size);
+}
+
+const struct drm_edid *dcpavserv_copy_edid(struct apple_epic_service *service)
+{
+	struct dpavserv_copy_edid_cmd cmd;
+	struct dpavserv_copy_edid_resp *resp __free(kfree) = NULL;
+	int num_blocks;
+	u64 data_size;
+	int ret;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.max_size = cpu_to_le64(EDID_BUF_SIZE);
+	resp = kzalloc(sizeof(*resp) + EDID_BUF_SIZE, GFP_KERNEL);
+	if (!resp)
+		return ERR_PTR(-ENOMEM);
+
+	ret = afk_service_call(service, 1, 7, &cmd, sizeof(cmd), EDID_BUF_SIZE, resp,
+			       sizeof(resp) + EDID_BUF_SIZE, 0);
+	if (ret < 0)
+		return ERR_PTR(ret);
+
+	if (le64_to_cpu(resp->max_size) != EDID_BUF_SIZE)
+		return ERR_PTR(-EIO);
+
+	// print_hex_dump(KERN_DEBUG, "dpavserv EDID cmd: ", DUMP_PREFIX_NONE,
+	// 	       16, 1, resp, 192, true);
+
+	data_size = le64_to_cpu(resp->used_size);
+	if (data_size < EDID_LEADING_DATA_SIZE + EDID_BLOCK_SIZE)
+		return ERR_PTR(-EIO);
+
+	num_blocks = resp->data[EDID_LEADING_DATA_SIZE + EDID_EXT_BLOCK_COUNT_OFFSET];
+	if ((1 + num_blocks) * EDID_BLOCK_SIZE != data_size - EDID_LEADING_DATA_SIZE)
+		return ERR_PTR(-EIO);
+
+	return drm_edid_alloc(resp->data + EDID_LEADING_DATA_SIZE,
+			      data_size - EDID_LEADING_DATA_SIZE);
+}
+
+static const struct apple_epic_service_ops dpavservep_ops[] = {
+	{
+		.name = "dcpav-service-epic",
+		.init = dcpavserv_init,
+		.teardown = dcpavserv_teardown,
+		.report = dcpavserv_report,
+	},
+	{
+		.name = "dcpdp-service-epic",
+		.init = dcpdpserv_init,
+		.teardown = dcpdpserv_teardown,
+		.report = dcpdpserv_report,
+	},
+	{},
+};
+
+int dpavservep_init(struct apple_dcp *dcp)
+{
+	int ret;
+
+	init_completion(&dcp->dcpavserv.enable_completion);
+
+	dcp->dcpavservep = afk_init(dcp, DPAVSERV_ENDPOINT, dpavservep_ops);
+	if (IS_ERR(dcp->dcpavservep))
+		return PTR_ERR(dcp->dcpavservep);
+
+	dcp->dcpavservep->match_epic_name = true;
+
+	ret = afk_start(dcp->dcpavservep);
+	if (ret)
+		return ret;
+
+	ret = wait_for_completion_timeout(&dcp->dcpavserv.enable_completion,
+					  msecs_to_jiffies(1000));
+	if (ret >= 0)
+		return 0;
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/apple/epic/dpavservep.h b/drivers/gpu/drm/apple/epic/dpavservep.h
new file mode 100644
index 00000000000000..858ff14b0bd7be
--- /dev/null
+++ b/drivers/gpu/drm/apple/epic/dpavservep.h
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright The Asahi Linux Contributors */
+
+#ifndef _DRM_APPLE_EPIC_DPAVSERV_H
+#define _DRM_APPLE_EPIC_DPAVSERV_H
+
+#include <linux/completion.h>
+#include <linux/types.h>
+
+struct drm_edid;
+struct apple_epic_service;
+
+struct dcpavserv {
+	bool enabled;
+	struct completion enable_completion;
+	u32 unit;
+	struct apple_epic_service *service;
+};
+
+const struct drm_edid *dcpavserv_copy_edid(struct apple_epic_service *service);
+
+#endif /* _DRM_APPLE_EPIC_DPAVSERV_H */
diff --git a/drivers/gpu/drm/apple/hdmi-codec-chmap.h b/drivers/gpu/drm/apple/hdmi-codec-chmap.h
new file mode 100644
index 00000000000000..f98e1e86b89602
--- /dev/null
+++ b/drivers/gpu/drm/apple/hdmi-codec-chmap.h
@@ -0,0 +1,123 @@
+// copied from sound/soc/codecs/hdmi-codec.c
+
+#include <sound/pcm.h>
+
+/* Channel maps for multi-channel playbacks, up to 8 n_ch */
+static const struct snd_pcm_chmap_elem hdmi_codec_8ch_chmaps[] = {
+	{ .channels = 2, /* CA_ID 0x00 */
+	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
+	{ .channels = 4, /* CA_ID 0x01 */
+	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_LFE,
+		   SNDRV_CHMAP_NA } },
+	{ .channels = 4, /* CA_ID 0x02 */
+	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_NA,
+		   SNDRV_CHMAP_FC } },
+	{ .channels = 4, /* CA_ID 0x03 */
+	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_LFE,
+		   SNDRV_CHMAP_FC } },
+	{ .channels = 6, /* CA_ID 0x04 */
+	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_NA,
+		   SNDRV_CHMAP_NA, SNDRV_CHMAP_RC, SNDRV_CHMAP_NA } },
+	{ .channels = 6, /* CA_ID 0x05 */
+	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_LFE,
+		   SNDRV_CHMAP_NA, SNDRV_CHMAP_RC, SNDRV_CHMAP_NA } },
+	{ .channels = 6, /* CA_ID 0x06 */
+	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_NA,
+		   SNDRV_CHMAP_FC, SNDRV_CHMAP_RC, SNDRV_CHMAP_NA } },
+	{ .channels = 6, /* CA_ID 0x07 */
+	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_LFE,
+		   SNDRV_CHMAP_FC, SNDRV_CHMAP_RC, SNDRV_CHMAP_NA } },
+	{ .channels = 6, /* CA_ID 0x08 */
+	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_NA,
+		   SNDRV_CHMAP_NA, SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
+	{ .channels = 6, /* CA_ID 0x09 */
+	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_LFE,
+		   SNDRV_CHMAP_NA, SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
+	{ .channels = 6, /* CA_ID 0x0A */
+	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_NA,
+		   SNDRV_CHMAP_FC, SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
+	{ .channels = 6, /* CA_ID 0x0B */
+	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_LFE,
+		   SNDRV_CHMAP_FC, SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
+	{ .channels = 8, /* CA_ID 0x0C */
+	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_NA,
+		   SNDRV_CHMAP_NA, SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
+		   SNDRV_CHMAP_RC, SNDRV_CHMAP_NA } },
+	{ .channels = 8, /* CA_ID 0x0D */
+	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_LFE,
+		   SNDRV_CHMAP_NA, SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
+		   SNDRV_CHMAP_RC, SNDRV_CHMAP_NA } },
+	{ .channels = 8, /* CA_ID 0x0E */
+	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_NA,
+		   SNDRV_CHMAP_FC, SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
+		   SNDRV_CHMAP_RC, SNDRV_CHMAP_NA } },
+	{ .channels = 8, /* CA_ID 0x0F */
+	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_LFE,
+		   SNDRV_CHMAP_FC, SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
+		   SNDRV_CHMAP_RC, SNDRV_CHMAP_NA } },
+	{ .channels = 8, /* CA_ID 0x10 */
+	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_NA,
+		   SNDRV_CHMAP_NA, SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
+		   SNDRV_CHMAP_RLC, SNDRV_CHMAP_RRC } },
+	{ .channels = 8, /* CA_ID 0x11 */
+	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_LFE,
+		   SNDRV_CHMAP_NA, SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
+		   SNDRV_CHMAP_RLC, SNDRV_CHMAP_RRC } },
+	{ .channels = 8, /* CA_ID 0x12 */
+	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_NA,
+		   SNDRV_CHMAP_FC, SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
+		   SNDRV_CHMAP_RLC, SNDRV_CHMAP_RRC } },
+	{ .channels = 8, /* CA_ID 0x13 */
+	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_LFE,
+		   SNDRV_CHMAP_FC, SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
+		   SNDRV_CHMAP_RLC, SNDRV_CHMAP_RRC } },
+	{ .channels = 8, /* CA_ID 0x14 */
+	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_NA,
+		   SNDRV_CHMAP_NA, SNDRV_CHMAP_NA, SNDRV_CHMAP_NA,
+		   SNDRV_CHMAP_FLC, SNDRV_CHMAP_FRC } },
+	{ .channels = 8, /* CA_ID 0x15 */
+	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_LFE,
+		   SNDRV_CHMAP_NA, SNDRV_CHMAP_NA, SNDRV_CHMAP_NA,
+		   SNDRV_CHMAP_FLC, SNDRV_CHMAP_FRC } },
+	{ .channels = 8, /* CA_ID 0x16 */
+	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_NA,
+		   SNDRV_CHMAP_FC, SNDRV_CHMAP_NA, SNDRV_CHMAP_NA,
+		   SNDRV_CHMAP_FLC, SNDRV_CHMAP_FRC } },
+	{ .channels = 8, /* CA_ID 0x17 */
+	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_LFE,
+		   SNDRV_CHMAP_FC, SNDRV_CHMAP_NA, SNDRV_CHMAP_NA,
+		   SNDRV_CHMAP_FLC, SNDRV_CHMAP_FRC } },
+	{ .channels = 8, /* CA_ID 0x18 */
+	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_NA,
+		   SNDRV_CHMAP_NA, SNDRV_CHMAP_NA, SNDRV_CHMAP_NA,
+		   SNDRV_CHMAP_FLC, SNDRV_CHMAP_FRC } },
+	{ .channels = 8, /* CA_ID 0x19 */
+	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_LFE,
+		   SNDRV_CHMAP_NA, SNDRV_CHMAP_NA, SNDRV_CHMAP_NA,
+		   SNDRV_CHMAP_FLC, SNDRV_CHMAP_FRC } },
+	{ .channels = 8, /* CA_ID 0x1A */
+	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_NA,
+		   SNDRV_CHMAP_FC, SNDRV_CHMAP_NA, SNDRV_CHMAP_NA,
+		   SNDRV_CHMAP_FLC, SNDRV_CHMAP_FRC } },
+	{ .channels = 8, /* CA_ID 0x1B */
+	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_LFE,
+		   SNDRV_CHMAP_FC, SNDRV_CHMAP_NA, SNDRV_CHMAP_NA,
+		   SNDRV_CHMAP_FLC, SNDRV_CHMAP_FRC } },
+	{ .channels = 8, /* CA_ID 0x1C */
+	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_NA,
+		   SNDRV_CHMAP_NA, SNDRV_CHMAP_NA, SNDRV_CHMAP_NA,
+		   SNDRV_CHMAP_FLC, SNDRV_CHMAP_FRC } },
+	{ .channels = 8, /* CA_ID 0x1D */
+	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_LFE,
+		   SNDRV_CHMAP_NA, SNDRV_CHMAP_NA, SNDRV_CHMAP_NA,
+		   SNDRV_CHMAP_FLC, SNDRV_CHMAP_FRC } },
+	{ .channels = 8, /* CA_ID 0x1E */
+	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_NA,
+		   SNDRV_CHMAP_FC, SNDRV_CHMAP_NA, SNDRV_CHMAP_NA,
+		   SNDRV_CHMAP_FLC, SNDRV_CHMAP_FRC } },
+	{ .channels = 8, /* CA_ID 0x1F */
+	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_LFE,
+		   SNDRV_CHMAP_FC, SNDRV_CHMAP_NA, SNDRV_CHMAP_NA,
+		   SNDRV_CHMAP_FLC, SNDRV_CHMAP_FRC } },
+	{ }
+};
diff --git a/drivers/gpu/drm/apple/ibootep.c b/drivers/gpu/drm/apple/ibootep.c
new file mode 100644
index 00000000000000..ae4bc8a69f2a8d
--- /dev/null
+++ b/drivers/gpu/drm/apple/ibootep.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright 2023 */
+
+#include <linux/completion.h>
+
+#include "afk.h"
+#include "dcp.h"
+
+static void disp_service_init(struct apple_epic_service *service, const char *name,
+			const char *class, s64 unit)
+{
+}
+
+
+static const struct apple_epic_service_ops ibootep_ops[] = {
+	{
+		.name = "disp0-service",
+		.init = disp_service_init,
+	},
+	{}
+};
+
+int ibootep_init(struct apple_dcp *dcp)
+{
+	dcp->ibootep = afk_init(dcp, DISP0_ENDPOINT, ibootep_ops);
+	afk_start(dcp->ibootep);
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/apple/iomfb.c b/drivers/gpu/drm/apple/iomfb.c
new file mode 100644
index 00000000000000..398118933e801e
--- /dev/null
+++ b/drivers/gpu/drm/apple/iomfb.c
@@ -0,0 +1,597 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io> */
+
+#include <linux/align.h>
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
+#include <linux/kref.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/ratelimit.h>
+#include <linux/slab.h>
+#include <linux/soc/apple/rtkit.h>
+
+#include <drm/drm_edid.h>
+#include <drm/drm_fb_dma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "dcp.h"
+#include "dcp-internal.h"
+#include "iomfb.h"
+#include "iomfb_internal.h"
+#include "parser.h"
+#include "trace.h"
+
+static int dcp_tx_offset(enum dcp_context_id id)
+{
+	switch (id) {
+	case DCP_CONTEXT_CB:
+	case DCP_CONTEXT_CMD:
+		return 0x00000;
+	case DCP_CONTEXT_OOBCB:
+	case DCP_CONTEXT_OOBCMD:
+		return 0x08000;
+	default:
+		return -EINVAL;
+	}
+}
+
+static int dcp_channel_offset(enum dcp_context_id id)
+{
+	switch (id) {
+	case DCP_CONTEXT_ASYNC:
+		return 0x40000;
+	case DCP_CONTEXT_OOBASYNC:
+		return 0x48000;
+	case DCP_CONTEXT_CB:
+		return 0x60000;
+	case DCP_CONTEXT_OOBCB:
+		return 0x68000;
+	default:
+		return dcp_tx_offset(id);
+	}
+}
+
+static inline u64 dcpep_set_shmem(u64 dart_va)
+{
+	return FIELD_PREP(IOMFB_MESSAGE_TYPE, IOMFB_MESSAGE_TYPE_SET_SHMEM) |
+	       FIELD_PREP(IOMFB_SHMEM_FLAG, IOMFB_SHMEM_FLAG_VALUE) |
+	       FIELD_PREP(IOMFB_SHMEM_DVA, dart_va);
+}
+
+static inline u64 dcpep_msg(enum dcp_context_id id, u32 length, u16 offset)
+{
+	return FIELD_PREP(IOMFB_MESSAGE_TYPE, IOMFB_MESSAGE_TYPE_MSG) |
+		FIELD_PREP(IOMFB_MSG_CONTEXT, id) |
+		FIELD_PREP(IOMFB_MSG_OFFSET, offset) |
+		FIELD_PREP(IOMFB_MSG_LENGTH, length);
+}
+
+static inline u64 dcpep_ack(enum dcp_context_id id)
+{
+	return dcpep_msg(id, 0, 0) | IOMFB_MSG_ACK;
+}
+
+/*
+ * A channel is busy if we have sent a message that has yet to be
+ * acked. The driver must not sent a message to a busy channel.
+ */
+static bool dcp_channel_busy(struct dcp_channel *ch)
+{
+	return (ch->depth != 0);
+}
+
+/*
+ * Get the context ID passed to the DCP for a command we push. The rule is
+ * simple: callback contexts are used when replying to the DCP, command
+ * contexts are used otherwise. That corresponds to a non/zero call stack
+ * depth. This rule frees the caller from tracking the call context manually.
+ */
+static enum dcp_context_id dcp_call_context(struct apple_dcp *dcp, bool oob)
+{
+	u8 depth = oob ? dcp->ch_oobcmd.depth : dcp->ch_cmd.depth;
+
+	if (depth)
+		return oob ? DCP_CONTEXT_OOBCB : DCP_CONTEXT_CB;
+	else
+		return oob ? DCP_CONTEXT_OOBCMD : DCP_CONTEXT_CMD;
+}
+
+/* Get a channel for a context */
+static struct dcp_channel *dcp_get_channel(struct apple_dcp *dcp,
+					   enum dcp_context_id context)
+{
+	switch (context) {
+	case DCP_CONTEXT_CB:
+		return &dcp->ch_cb;
+	case DCP_CONTEXT_CMD:
+		return &dcp->ch_cmd;
+	case DCP_CONTEXT_OOBCB:
+		return &dcp->ch_oobcb;
+	case DCP_CONTEXT_OOBCMD:
+		return &dcp->ch_oobcmd;
+	case DCP_CONTEXT_ASYNC:
+		return &dcp->ch_async;
+	case DCP_CONTEXT_OOBASYNC:
+		return &dcp->ch_oobasync;
+	default:
+		return NULL;
+	}
+}
+
+/* Get the start of a packet: after the end of the previous packet */
+static u16 dcp_packet_start(struct dcp_channel *ch, u8 depth)
+{
+	if (depth > 0)
+		return ch->end[depth - 1];
+	else
+		return 0;
+}
+
+/* Pushes and pops the depth of the call stack with safety checks */
+static u8 dcp_push_depth(u8 *depth)
+{
+	u8 ret = (*depth)++;
+
+	WARN_ON(ret >= DCP_MAX_CALL_DEPTH);
+	return ret;
+}
+
+static u8 dcp_pop_depth(u8 *depth)
+{
+	WARN_ON((*depth) == 0);
+
+	return --(*depth);
+}
+
+/* Call a DCP function given by a tag */
+void dcp_push(struct apple_dcp *dcp, bool oob, const struct dcp_method_entry *call,
+		     u32 in_len, u32 out_len, void *data, dcp_callback_t cb,
+		     void *cookie)
+{
+	enum dcp_context_id context = dcp_call_context(dcp, oob);
+	struct dcp_channel *ch = dcp_get_channel(dcp, context);
+
+	struct dcp_packet_header header = {
+		.in_len = in_len,
+		.out_len = out_len,
+
+		/* Tag is reversed due to endianness of the fourcc */
+		.tag[0] = call->tag[3],
+		.tag[1] = call->tag[2],
+		.tag[2] = call->tag[1],
+		.tag[3] = call->tag[0],
+	};
+
+	u8 depth = dcp_push_depth(&ch->depth);
+	u16 offset = dcp_packet_start(ch, depth);
+
+	void *out = dcp->shmem + dcp_tx_offset(context) + offset;
+	void *out_data = out + sizeof(header);
+	size_t data_len = sizeof(header) + in_len + out_len;
+
+	memcpy(out, &header, sizeof(header));
+
+	if (in_len > 0)
+		memcpy(out_data, data, in_len);
+
+	trace_iomfb_push(dcp, call, context, offset, depth);
+
+	ch->callbacks[depth] = cb;
+	ch->cookies[depth] = cookie;
+	ch->output[depth] = out + sizeof(header) + in_len;
+	ch->end[depth] = offset + ALIGN(data_len, DCP_PACKET_ALIGNMENT);
+
+	dcp_send_message(dcp, IOMFB_ENDPOINT,
+			 dcpep_msg(context, data_len, offset));
+}
+
+/* Parse a callback tag "D123" into the ID 123. Returns -EINVAL on failure. */
+int dcp_parse_tag(char tag[4])
+{
+	u32 d[3];
+	int i;
+
+	if (tag[3] != 'D')
+		return -EINVAL;
+
+	for (i = 0; i < 3; ++i) {
+		d[i] = (u32)(tag[i] - '0');
+
+		if (d[i] > 9)
+			return -EINVAL;
+	}
+
+	return d[0] + (d[1] * 10) + (d[2] * 100);
+}
+
+/* Ack a callback from the DCP */
+void dcp_ack(struct apple_dcp *dcp, enum dcp_context_id context)
+{
+	struct dcp_channel *ch = dcp_get_channel(dcp, context);
+
+	dcp_pop_depth(&ch->depth);
+	dcp_send_message(dcp, IOMFB_ENDPOINT,
+			 dcpep_ack(context));
+}
+
+/*
+ * Helper to send a DRM hotplug event. The DCP is accessed from a single
+ * (RTKit) thread. To handle hotplug callbacks, we need to call
+ * drm_kms_helper_hotplug_event, which does an atomic commit (via DCP) and
+ * waits for vblank (a DCP callback). That means we deadlock if we call from
+ * the RTKit thread! Instead, move the call to another thread via a workqueue.
+ */
+void dcp_hotplug(struct work_struct *work)
+{
+	struct apple_connector *connector;
+	struct apple_dcp *dcp;
+
+	connector = container_of(work, struct apple_connector, hotplug_wq);
+
+	dcp = platform_get_drvdata(connector->dcp);
+	dev_info(dcp->dev, "%s() connected:%d valid_mode:%d nr_modes:%u\n", __func__,
+		 connector->connected, dcp->valid_mode, dcp->nr_modes);
+
+	if (!connector->connected) {
+		drm_edid_free(connector->drm_edid);
+		connector->drm_edid = NULL;
+	}
+
+	/*
+	 * DCP defers link training until we set a display mode. But we set
+	 * display modes from atomic_flush, so userspace needs to trigger a
+	 * flush, or the CRTC gets no signal.
+	 */
+	if (connector->base.state && !dcp->valid_mode && connector->connected)
+		drm_connector_set_link_status_property(&connector->base,
+						       DRM_MODE_LINK_STATUS_BAD);
+
+	drm_kms_helper_connector_hotplug_event(&connector->base);
+}
+EXPORT_SYMBOL_GPL(dcp_hotplug);
+
+static void dcpep_handle_cb(struct apple_dcp *dcp, enum dcp_context_id context,
+			    void *data, u32 length, u16 offset)
+{
+	struct device *dev = dcp->dev;
+	struct dcp_packet_header *hdr = data;
+	void *in, *out;
+	int tag = dcp_parse_tag(hdr->tag);
+	struct dcp_channel *ch = dcp_get_channel(dcp, context);
+	u8 depth;
+
+	if (tag < 0 || tag >= IOMFB_MAX_CB || !dcp->cb_handlers || !dcp->cb_handlers[tag]) {
+		dev_warn(dev, "received unknown callback %c%c%c%c\n",
+			 hdr->tag[3], hdr->tag[2], hdr->tag[1], hdr->tag[0]);
+		return;
+	}
+
+	in = data + sizeof(*hdr);
+	out = in + hdr->in_len;
+
+	// TODO: verify that in_len and out_len match our prototypes
+	// for now just clear the out data to have at least consistent results
+	if (hdr->out_len)
+		memset(out, 0, hdr->out_len);
+
+	depth = dcp_push_depth(&ch->depth);
+	ch->output[depth] = out;
+	ch->end[depth] = offset + ALIGN(length, DCP_PACKET_ALIGNMENT);
+
+	if (dcp->cb_handlers[tag](dcp, tag, out, in))
+		dcp_ack(dcp, context);
+}
+
+static void dcpep_handle_ack(struct apple_dcp *dcp, enum dcp_context_id context,
+			     void *data, u32 length)
+{
+	struct dcp_packet_header *header = data;
+	struct dcp_channel *ch = dcp_get_channel(dcp, context);
+	void *cookie;
+	dcp_callback_t cb;
+
+	if (!ch) {
+		dev_warn(dcp->dev, "ignoring ack on context %X\n", context);
+		return;
+	}
+
+	dcp_pop_depth(&ch->depth);
+
+	cb = ch->callbacks[ch->depth];
+	cookie = ch->cookies[ch->depth];
+
+	ch->callbacks[ch->depth] = NULL;
+	ch->cookies[ch->depth] = NULL;
+
+	if (cb)
+		cb(dcp, data + sizeof(*header) + header->in_len, cookie);
+}
+
+static void dcpep_got_msg(struct apple_dcp *dcp, u64 message)
+{
+	enum dcp_context_id ctx_id;
+	u16 offset;
+	u32 length;
+	int channel_offset;
+	void *data;
+
+	ctx_id = FIELD_GET(IOMFB_MSG_CONTEXT, message);
+	offset = FIELD_GET(IOMFB_MSG_OFFSET, message);
+	length = FIELD_GET(IOMFB_MSG_LENGTH, message);
+
+	channel_offset = dcp_channel_offset(ctx_id);
+
+	if (channel_offset < 0) {
+		dev_warn(dcp->dev, "invalid context received %u\n", ctx_id);
+		return;
+	}
+
+	data = dcp->shmem + channel_offset + offset;
+
+	if (FIELD_GET(IOMFB_MSG_ACK, message))
+		dcpep_handle_ack(dcp, ctx_id, data, length);
+	else
+		dcpep_handle_cb(dcp, ctx_id, data, length, offset);
+}
+
+/*
+ * DRM specifies rectangles as start and end coordinates.  DCP specifies
+ * rectangles as a start coordinate and a width/height. Convert a DRM rectangle
+ * to a DCP rectangle.
+ */
+struct dcp_rect drm_to_dcp_rect(struct drm_rect *rect)
+{
+	return (struct dcp_rect){ .x = rect->x1,
+				  .y = rect->y1,
+				  .w = drm_rect_width(rect),
+				  .h = drm_rect_height(rect) };
+}
+
+u32 drm_format_to_dcp(u32 drm)
+{
+	switch (drm) {
+	case DRM_FORMAT_XRGB8888:
+	case DRM_FORMAT_ARGB8888:
+		return fourcc_code('A', 'R', 'G', 'B');
+
+	case DRM_FORMAT_XBGR8888:
+	case DRM_FORMAT_ABGR8888:
+		return fourcc_code('A', 'B', 'G', 'R');
+
+	case DRM_FORMAT_XRGB2101010:
+		return fourcc_code('r', '0', '3', 'w');
+	}
+
+	pr_warn("DRM format %X not supported in DCP\n", drm);
+	return 0;
+}
+
+int dcp_get_modes(struct drm_connector *connector)
+{
+	struct apple_connector *apple_connector = to_apple_connector(connector);
+	struct platform_device *pdev = apple_connector->dcp;
+	struct apple_dcp *dcp = platform_get_drvdata(pdev);
+
+	struct drm_device *dev = connector->dev;
+	struct drm_display_mode *mode;
+	int i;
+
+	for (i = 0; i < dcp->nr_modes; ++i) {
+		mode = drm_mode_duplicate(dev, &dcp->modes[i].mode);
+
+		if (!mode) {
+			dev_err(dev->dev, "Failed to duplicate display mode\n");
+			return 0;
+		}
+
+		drm_mode_probed_add(connector, mode);
+	}
+
+	if (dcp->nr_modes && dcp->dcpavserv.enabled &&
+	    !apple_connector->drm_edid) {
+		const struct drm_edid *edid;
+		edid = dcpavserv_copy_edid(dcp->dcpavserv.service);
+		if (IS_ERR_OR_NULL(edid)) {
+			dev_info(dcp->dev, "copy_edid failed: %pe\n", edid);
+		} else {
+			drm_edid_free(apple_connector->drm_edid);
+			apple_connector->drm_edid = edid;
+		}
+	}
+	if (dcp->nr_modes && apple_connector->drm_edid)
+		drm_edid_connector_update(connector, apple_connector->drm_edid);
+
+	return dcp->nr_modes;
+}
+EXPORT_SYMBOL_GPL(dcp_get_modes);
+
+/* The user may own drm_display_mode, so we need to search for our copy */
+struct dcp_display_mode *lookup_mode(struct apple_dcp *dcp,
+					    const struct drm_display_mode *mode)
+{
+	int i;
+
+	for (i = 0; i < dcp->nr_modes; ++i) {
+		if (drm_mode_match(mode, &dcp->modes[i].mode,
+				   DRM_MODE_MATCH_TIMINGS |
+					   DRM_MODE_MATCH_CLOCK))
+			return &dcp->modes[i];
+	}
+
+	return NULL;
+}
+
+int dcp_mode_valid(struct drm_connector *connector,
+		   struct drm_display_mode *mode)
+{
+	struct apple_connector *apple_connector = to_apple_connector(connector);
+	struct platform_device *pdev = apple_connector->dcp;
+	struct apple_dcp *dcp = platform_get_drvdata(pdev);
+
+	return lookup_mode(dcp, mode) ? MODE_OK : MODE_BAD;
+}
+EXPORT_SYMBOL_GPL(dcp_mode_valid);
+
+int dcp_crtc_atomic_modeset(struct drm_crtc *crtc,
+			    struct drm_atomic_state *state)
+{
+	struct apple_crtc *apple_crtc = to_apple_crtc(crtc);
+	struct apple_dcp *dcp = platform_get_drvdata(apple_crtc->dcp);
+	struct drm_crtc_state *crtc_state;
+	int ret = -EIO;
+	bool modeset;
+
+	crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+	if (!crtc_state)
+		return 0;
+
+	modeset = drm_atomic_crtc_needs_modeset(crtc_state) || !dcp->valid_mode;
+
+	if (!modeset)
+		return 0;
+
+	/* ignore no mode, poweroff is handled elsewhere */
+	if (crtc_state->mode.hdisplay == 0 && crtc_state->mode.vdisplay == 0)
+		return 0;
+
+	switch (dcp->fw_compat) {
+	case DCP_FIRMWARE_V_12_3:
+		ret = iomfb_modeset_v12_3(dcp, crtc_state);
+		break;
+	case DCP_FIRMWARE_V_13_5:
+		ret = iomfb_modeset_v13_3(dcp, crtc_state);
+		break;
+	default:
+		WARN_ONCE(true, "Unexpected firmware version: %u\n",
+			  dcp->fw_compat);
+		break;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(dcp_crtc_atomic_modeset);
+
+bool dcp_crtc_mode_fixup(struct drm_crtc *crtc,
+			 const struct drm_display_mode *mode,
+			 struct drm_display_mode *adjusted_mode)
+{
+	struct apple_crtc *apple_crtc = to_apple_crtc(crtc);
+	struct platform_device *pdev = apple_crtc->dcp;
+	struct apple_dcp *dcp = platform_get_drvdata(pdev);
+
+	/* TODO: support synthesized modes through scaling */
+	return lookup_mode(dcp, mode) != NULL;
+}
+EXPORT_SYMBOL(dcp_crtc_mode_fixup);
+
+
+void dcp_flush(struct drm_crtc *crtc, struct drm_atomic_state *state)
+{
+	struct platform_device *pdev = to_apple_crtc(crtc)->dcp;
+	struct apple_dcp *dcp = platform_get_drvdata(pdev);
+
+	if (dcp_channel_busy(&dcp->ch_cmd))
+	{
+		if (!dcp->ch_cmd.warned_busy) {
+			dev_err(dcp->dev, "unexpected busy command channel\n");
+			dcp->ch_cmd.warned_busy = true;
+		}
+		/* HACK: issue a delayed vblank event to avoid timeouts in
+		 * drm_atomic_helper_wait_for_vblanks().
+		 */
+		schedule_work(&dcp->vblank_wq);
+		return;
+	} else if (dcp->ch_cmd.warned_busy) {
+		dcp->ch_cmd.warned_busy = false;
+	}
+
+	switch (dcp->fw_compat) {
+	case DCP_FIRMWARE_V_12_3:
+		iomfb_flush_v12_3(dcp, crtc, state);
+		break;
+	case DCP_FIRMWARE_V_13_5:
+		iomfb_flush_v13_3(dcp, crtc, state);
+		break;
+	default:
+		WARN_ONCE(true, "Unexpected firmware version: %u\n", dcp->fw_compat);
+		break;
+	}
+}
+EXPORT_SYMBOL_GPL(dcp_flush);
+
+static void iomfb_start(struct apple_dcp *dcp)
+{
+	switch (dcp->fw_compat) {
+	case DCP_FIRMWARE_V_12_3:
+		iomfb_start_v12_3(dcp);
+		break;
+	case DCP_FIRMWARE_V_13_5:
+		iomfb_start_v13_3(dcp);
+		break;
+	default:
+		WARN_ONCE(true, "Unexpected firmware version: %u\n", dcp->fw_compat);
+		break;
+	}
+}
+
+bool dcp_is_initialized(struct platform_device *pdev)
+{
+	struct apple_dcp *dcp = platform_get_drvdata(pdev);
+
+	return dcp->active;
+}
+EXPORT_SYMBOL_GPL(dcp_is_initialized);
+
+void iomfb_recv_msg(struct apple_dcp *dcp, u64 message)
+{
+	enum dcpep_type type = FIELD_GET(IOMFB_MESSAGE_TYPE, message);
+
+	if (type == IOMFB_MESSAGE_TYPE_INITIALIZED)
+		iomfb_start(dcp);
+	else if (type == IOMFB_MESSAGE_TYPE_MSG)
+		dcpep_got_msg(dcp, message);
+	else
+		dev_warn(dcp->dev, "Ignoring unknown message %llx\n", message);
+}
+
+int iomfb_start_rtkit(struct apple_dcp *dcp)
+{
+	dma_addr_t shmem_iova;
+	apple_rtkit_start_ep(dcp->rtk, IOMFB_ENDPOINT);
+
+	dcp->shmem = dma_alloc_coherent(dcp->dev, DCP_SHMEM_SIZE, &shmem_iova,
+					GFP_KERNEL);
+
+	dcp_send_message(dcp, IOMFB_ENDPOINT, dcpep_set_shmem(shmem_iova));
+
+	return 0;
+}
+
+void iomfb_shutdown(struct apple_dcp *dcp)
+{
+	/* We're going down */
+	dcp->active = false;
+	dcp->valid_mode = false;
+
+	switch (dcp->fw_compat) {
+	case DCP_FIRMWARE_V_12_3:
+		iomfb_shutdown_v12_3(dcp);
+		break;
+	case DCP_FIRMWARE_V_13_5:
+		iomfb_shutdown_v13_3(dcp);
+		break;
+	default:
+		WARN_ONCE(true, "Unexpected firmware version: %u\n", dcp->fw_compat);
+		break;
+	}
+}
diff --git a/drivers/gpu/drm/apple/iomfb.h b/drivers/gpu/drm/apple/iomfb.h
new file mode 100644
index 00000000000000..c92d4c087168c1
--- /dev/null
+++ b/drivers/gpu/drm/apple/iomfb.h
@@ -0,0 +1,434 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io> */
+
+#ifndef __APPLE_DCPEP_H__
+#define __APPLE_DCPEP_H__
+
+#include <linux/types.h>
+
+#include "version_utils.h"
+
+/* Fixed size of shared memory between DCP and AP */
+#define DCP_SHMEM_SIZE 0x100000
+
+/* DCP message contexts */
+enum dcp_context_id {
+	/* Callback */
+	DCP_CONTEXT_CB = 0,
+
+	/* Command */
+	DCP_CONTEXT_CMD = 2,
+
+	/* Asynchronous */
+	DCP_CONTEXT_ASYNC = 3,
+
+	/* Out-of-band callback */
+	DCP_CONTEXT_OOBCB = 4,
+
+	/* Out-of-band command */
+	DCP_CONTEXT_OOBCMD = 6,
+
+	/* Out-of-band Asynchronous */
+	DCP_CONTEXT_OOBASYNC = 7,
+
+	DCP_NUM_CONTEXTS
+};
+
+/* RTKit endpoint message types */
+enum dcpep_type {
+	/* Set shared memory */
+	IOMFB_MESSAGE_TYPE_SET_SHMEM = 0,
+
+	/* DCP is initialized */
+	IOMFB_MESSAGE_TYPE_INITIALIZED = 1,
+
+	/* Remote procedure call */
+	IOMFB_MESSAGE_TYPE_MSG = 2,
+};
+
+#define IOMFB_MESSAGE_TYPE	GENMASK_ULL( 3,  0)
+
+/* Message */
+#define IOMFB_MSG_LENGTH	GENMASK_ULL(63, 32)
+#define IOMFB_MSG_OFFSET	GENMASK_ULL(31, 16)
+#define IOMFB_MSG_CONTEXT	GENMASK_ULL(11,  8)
+#define IOMFB_MSG_ACK		BIT_ULL(6)
+
+/* Set shmem */
+#define IOMFB_SHMEM_DVA		GENMASK_ULL(63, 16)
+#define IOMFB_SHMEM_FLAG	GENMASK_ULL( 7,  4)
+#define IOMFB_SHMEM_FLAG_VALUE	4
+
+struct dcp_packet_header {
+	char tag[4];
+	u32 in_len;
+	u32 out_len;
+} __packed;
+
+#define DCP_IS_NULL(ptr) ((ptr) ? 1 : 0)
+#define DCP_PACKET_ALIGNMENT (0x40)
+
+enum iomfb_property_id {
+    IOMFB_PROPERTY_NITS = 15, // divide by Brightness_Scale
+};
+
+#define IOMFB_BRIGHTNESS_MIN 0x10000000
+
+/* Structures used in v12.0 firmware */
+
+#define SWAP_SURFACES 4
+/* We have 4 surfaces, but we can only ever blend two */
+#define MAX_BLEND_SURFACES 2
+#define MAX_PLANES 3
+
+enum dcp_colorspace {
+	DCP_COLORSPACE_BG_SRGB = 0,
+	DCP_COLORSPACE_BG_BT2020 = 9,
+	DCP_COLORSPACE_NATIVE = 12,
+};
+
+enum dcp_xfer_func {
+	DCP_XFER_FUNC_SDR = 13,
+	DCP_XFER_FUNC_HDR = 16,
+};
+
+struct dcp_iouserclient {
+	/* Handle for the IOUserClient. macOS sets this to a kernel VA. */
+	u64 handle;
+	u32 unk;
+	u8 flag1;
+	u8 flag2;
+	u8 padding[2];
+} __packed;
+
+struct dcp_rect {
+	u32 x;
+	u32 y;
+	u32 w;
+	u32 h;
+} __packed;
+
+/*
+ * Update background color to struct dcp_swap.bg_color
+ */
+#define IOMFB_SET_BACKGROUND	BIT(31)
+
+/* Information describing a plane of a planar compressed surface */
+struct dcp_plane_info {
+	u32 width;
+	u32 height;
+	u32 base;
+	u32 offset;
+	u32 stride;
+	u32 size;
+	u16 tile_size;
+	u8 tile_w;
+	u8 tile_h;
+	u32 unk[13];
+} __packed;
+
+struct dcp_component_types {
+	u8 count;
+	u8 types[7];
+} __packed;
+
+struct dcp_allocate_bandwidth_req {
+	u64 unk1;
+	u64 unk2;
+	u64 unk3;
+	u8 unk1_null;
+	u8 unk2_null;
+	u8 padding[8];
+} __packed;
+
+struct dcp_allocate_bandwidth_resp {
+	u64 unk1;
+	u64 unk2;
+	u32 ret;
+} __packed;
+
+struct dcp_rt_bandwidth {
+	u64 unk1;
+	u64 reg_scratch;
+	u64 reg_doorbell;
+	u32 unk2;
+	u32 doorbell_bit;
+	u32 padding[7];
+} __packed;
+
+struct frame_sync_props {
+	u8 unk[28];
+};
+
+struct dcp_set_frame_sync_props_req {
+	struct frame_sync_props props;
+	u8 frame_sync_props_null;
+	u8 padding[3];
+} __packed;
+
+struct dcp_set_frame_sync_props_resp {
+	struct frame_sync_props props;
+} __packed;
+
+/* Method calls */
+
+enum dcpep_method {
+	dcpep_late_init_signal,
+	dcpep_setup_video_limits,
+	dcpep_set_create_dfb,
+	dcpep_start_signal,
+	dcpep_swap_start,
+	dcpep_swap_submit,
+	dcpep_set_display_device,
+	dcpep_set_digital_out_mode,
+	dcpep_create_default_fb,
+	dcpep_set_display_refresh_properties,
+	dcpep_flush_supports_power,
+	dcpep_set_power_state,
+	dcpep_first_client_open,
+	dcpep_set_parameter_dcp,
+	dcpep_enable_disable_video_power_savings,
+	dcpep_is_main_display,
+	iomfbep_a131_pmu_service_matched,
+	iomfbep_a132_backlight_service_matched,
+	iomfbep_a358_vi_set_temperature_hint,
+	iomfbep_get_color_remap_mode,
+	iomfbep_last_client_close,
+	iomfbep_abort_swaps_dcp,
+	iomfbep_set_matrix,
+	dcpep_num_methods
+};
+
+#define IOMFB_METHOD(tag, name) [name] = { #name, { tag[0], tag[1], tag[2], tag[3] } }
+
+struct dcp_method_entry {
+	const char *name;
+	char tag[4];
+};
+
+#define IOMFB_MAX_CB (1000)
+struct apple_dcp;
+
+typedef bool (*iomfb_cb_handler)(struct apple_dcp *, int, void *, void *);
+
+/* Prototypes */
+
+struct dcp_set_digital_out_mode_req {
+	u32 color_mode_id;
+	u32 timing_mode_id;
+} __packed;
+
+struct dcp_map_buf_req {
+	u64 buffer;
+	u8 unk;
+	u8 buf_null;
+	u8 vaddr_null;
+	u8 dva_null;
+} __packed;
+
+struct dcp_map_buf_resp {
+	u64 vaddr;
+	u64 dva;
+	u32 ret;
+} __packed;
+
+struct dcp_unmap_buf_resp {
+	u64 buffer;
+	u64 vaddr;
+	u64 dva;
+	u8 unk;
+	u8 buf_null;
+} __packed;
+
+struct dcp_allocate_buffer_req {
+	u32 unk0;
+	u64 size;
+	u32 unk2;
+	u8 paddr_null;
+	u8 dva_null;
+	u8 dva_size_null;
+	u8 padding;
+} __packed;
+
+struct dcp_allocate_buffer_resp {
+	u64 paddr;
+	u64 dva;
+	u64 dva_size;
+	u32 mem_desc_id;
+} __packed;
+
+struct dcp_map_physical_req {
+	u64 paddr;
+	u64 size;
+	u32 flags;
+	u8 dva_null;
+	u8 dva_size_null;
+	u8 padding[2];
+} __packed;
+
+struct dcp_map_physical_resp {
+	u64 dva;
+	u64 dva_size;
+	u32 mem_desc_id;
+} __packed;
+
+struct dcp_swap_start_req {
+	u32 swap_id;
+	struct dcp_iouserclient client;
+	u8 swap_id_null;
+	u8 client_null;
+	u8 padding[2];
+} __packed;
+
+struct dcp_swap_start_resp {
+	u32 swap_id;
+	struct dcp_iouserclient client;
+	u32 ret;
+} __packed;
+
+struct dcp_get_uint_prop_req {
+	char obj[4];
+	char key[0x40];
+	u64 value;
+	u8 value_null;
+	u8 padding[3];
+} __packed;
+
+struct dcp_get_uint_prop_resp {
+	u64 value;
+	u8 ret;
+	u8 padding[3];
+} __packed;
+
+struct iomfb_sr_set_property_int_req {
+	char obj[4];
+	char key[0x40];
+	u64 value;
+	u8 value_null;
+	u8 padding[3];
+} __packed;
+
+struct iomfb_set_fx_prop_req {
+	char obj[4];
+	char key[0x40];
+	u32 value;
+} __packed;
+
+struct dcp_set_power_state_req {
+	u64 unklong;
+	u8 unkbool;
+	u8 unkint_null;
+	u8 padding[2];
+} __packed;
+
+struct dcp_set_power_state_resp {
+	u32 unkint;
+	u32 ret;
+} __packed;
+
+struct dcp_set_dcpav_prop_chunk_req {
+	char data[0x1000];
+	u32 offset;
+	u32 length;
+} __packed;
+
+struct dcp_set_dcpav_prop_end_req {
+	char key[0x40];
+} __packed;
+
+struct dcp_set_parameter_dcp {
+	u32 param;
+	u32 value[8];
+	u32 count;
+} __packed;
+
+struct dcp_swap_complete_intent_gated {
+	u32 swap_id;
+	u8 unkBool;
+	u32 unkInt;
+	u32 width;
+	u32 height;
+} __packed;
+
+struct dcp_read_edt_data_req {
+	char key[0x40];
+	u32 count;
+	u32 value[8];
+} __packed;
+
+struct dcp_read_edt_data_resp {
+	u32 value[8];
+	u8 ret;
+} __packed;
+
+struct iomfb_property {
+	u32 id;
+	u32 value;
+} __packed;
+
+struct iomfb_get_color_remap_mode_req {
+	u32 mode;
+	u8 mode_null;
+	u8 padding[3];
+} __packed;
+
+struct iomfb_get_color_remap_mode_resp {
+	u32 mode;
+	u32 ret;
+} __packed;
+
+struct iomfb_last_client_close_req {
+	u8 unkint_null;
+	u8 padding[3];
+} __packed;
+
+struct iomfb_last_client_close_resp {
+	u32 unkint;
+} __packed;
+
+struct io_user_client {
+	u64 addr;
+	u32 unk;
+	u8 flag1;
+	u8 flag2;
+	u8 pad[2];
+} __packed;
+
+struct iomfb_abort_swaps_dcp_req {
+	struct io_user_client client;
+	u8 client_null;
+	u8 pad[3];
+} __packed;
+
+struct iomfb_abort_swaps_dcp_resp {
+	struct io_user_client client;
+	u32 ret;
+} __packed;
+
+struct iomfb_set_matrix_req {
+	u32 unk_u32; // maybe length?
+	u64 r[3];
+	u64 g[3];
+	u64 b[3];
+	u8 matrix_null;
+	u8 padding[3];
+} __packed;
+
+struct iomfb_set_matrix_resp {
+	u32 ret;
+} __packed;
+
+struct dcpep_get_tiling_state_req {
+	u32 event;
+	u32 param;
+	u32 value;
+	u8 value_null;
+	u8 padding[3];
+} __packed;
+
+struct dcpep_get_tiling_state_resp {
+	u32 value;
+	u32 ret;
+} __packed;
+
+#endif
diff --git a/drivers/gpu/drm/apple/iomfb_internal.h b/drivers/gpu/drm/apple/iomfb_internal.h
new file mode 100644
index 00000000000000..09f8857d30c341
--- /dev/null
+++ b/drivers/gpu/drm/apple/iomfb_internal.h
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright The Asahi Linux Contributors */
+
+#include <drm/drm_modes.h>
+#include <drm/drm_rect.h>
+
+#include "dcp-internal.h"
+
+struct apple_dcp;
+
+typedef void (*dcp_callback_t)(struct apple_dcp *, void *, void *);
+
+
+#define DCP_THUNK_VOID(func, handle)                                         \
+	static void func(struct apple_dcp *dcp, bool oob, dcp_callback_t cb, \
+			 void *cookie)                                       \
+	{                                                                    \
+		dcp_push(dcp, oob, &dcp_methods[handle], 0, 0, NULL, cb, cookie);          \
+	}
+
+#define DCP_THUNK_OUT(func, handle, T)                                       \
+	static void func(struct apple_dcp *dcp, bool oob, dcp_callback_t cb, \
+			 void *cookie)                                       \
+	{                                                                    \
+		dcp_push(dcp, oob, &dcp_methods[handle], 0, sizeof(T), NULL, cb, cookie);  \
+	}
+
+#define DCP_THUNK_IN(func, handle, T)                                       \
+	static void func(struct apple_dcp *dcp, bool oob, T *data,          \
+			 dcp_callback_t cb, void *cookie)                   \
+	{                                                                   \
+		dcp_push(dcp, oob, &dcp_methods[handle], sizeof(T), 0, data, cb, cookie); \
+	}
+
+#define DCP_THUNK_INOUT(func, handle, T_in, T_out)                            \
+	static void func(struct apple_dcp *dcp, bool oob, T_in *data,         \
+			 dcp_callback_t cb, void *cookie)                     \
+	{                                                                     \
+		dcp_push(dcp, oob, &dcp_methods[handle], sizeof(T_in), sizeof(T_out), data, \
+			 cb, cookie);                                         \
+	}
+
+#define IOMFB_THUNK_INOUT(name)                                     \
+	static void iomfb_ ## name(struct apple_dcp *dcp, bool oob, \
+			struct iomfb_ ## name ## _req *data,        \
+			dcp_callback_t cb, void *cookie)            \
+	{                                                           \
+		dcp_push(dcp, oob, &dcp_methods[iomfbep_ ## name],                \
+			 sizeof(struct iomfb_ ## name ## _req),     \
+			 sizeof(struct iomfb_ ## name ## _resp),    \
+			 data,  cb, cookie);                        \
+	}
+
+/*
+ * Define type-safe trampolines. Define typedefs to enforce type-safety on the
+ * input data (so if the types don't match, gcc errors out).
+ */
+
+#define TRAMPOLINE_VOID(func, handler)                                        \
+	static bool __maybe_unused func(struct apple_dcp *dcp, int tag, void *out, void *in) \
+	{                                                                     \
+		trace_iomfb_callback(dcp, tag, #handler);                     \
+		handler(dcp);                                                 \
+		return true;                                                  \
+	}
+
+#define TRAMPOLINE_IN(func, handler, T_in)                                    \
+	typedef void (*callback_##handler)(struct apple_dcp *, T_in *);       \
+                                                                              \
+	static bool __maybe_unused func(struct apple_dcp *dcp, int tag, void *out, void *in) \
+	{                                                                     \
+		callback_##handler cb = handler;                              \
+                                                                              \
+		trace_iomfb_callback(dcp, tag, #handler);                     \
+		cb(dcp, in);                                                  \
+		return true;                                                  \
+	}
+
+#define TRAMPOLINE_INOUT(func, handler, T_in, T_out)                          \
+	typedef T_out (*callback_##handler)(struct apple_dcp *, T_in *);      \
+                                                                              \
+	static bool __maybe_unused func(struct apple_dcp *dcp, int tag, void *out, void *in) \
+	{                                                                     \
+		T_out *typed_out = out;                                       \
+		callback_##handler cb = handler;                              \
+                                                                              \
+		trace_iomfb_callback(dcp, tag, #handler);                     \
+		*typed_out = cb(dcp, in);                                     \
+		return true;                                                  \
+	}
+
+#define TRAMPOLINE_OUT(func, handler, T_out)                                  \
+	static bool __maybe_unused func(struct apple_dcp *dcp, int tag, void *out, void *in) \
+	{                                                                     \
+		T_out *typed_out = out;                                       \
+                                                                              \
+		trace_iomfb_callback(dcp, tag, #handler);                     \
+		*typed_out = handler(dcp);                                    \
+		return true;                                                  \
+	}
+
+/* Call a DCP function given by a tag */
+void dcp_push(struct apple_dcp *dcp, bool oob, const struct dcp_method_entry *call,
+		     u32 in_len, u32 out_len, void *data, dcp_callback_t cb,
+		     void *cookie);
+
+/* Parse a callback tag "D123" into the ID 123. Returns -EINVAL on failure. */
+int dcp_parse_tag(char tag[4]);
+
+void dcp_ack(struct apple_dcp *dcp, enum dcp_context_id context);
+
+/*
+ * DRM specifies rectangles as start and end coordinates.  DCP specifies
+ * rectangles as a start coordinate and a width/height. Convert a DRM rectangle
+ * to a DCP rectangle.
+ */
+struct dcp_rect drm_to_dcp_rect(struct drm_rect *rect);
+
+u32 drm_format_to_dcp(u32 drm);
+
+/* The user may own drm_display_mode, so we need to search for our copy */
+struct dcp_display_mode *lookup_mode(struct apple_dcp *dcp,
+					    const struct drm_display_mode *mode);
diff --git a/drivers/gpu/drm/apple/iomfb_template.c b/drivers/gpu/drm/apple/iomfb_template.c
new file mode 100644
index 00000000000000..0a1b9495128562
--- /dev/null
+++ b/drivers/gpu/drm/apple/iomfb_template.c
@@ -0,0 +1,1495 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Copyright 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io>
+ * Copyright The Asahi Linux Contributors
+ */
+
+#include <linux/align.h>
+#include <linux/bitmap.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
+#include <linux/kref.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+#include <drm/drm_fb_dma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "dcp.h"
+#include "dcp-internal.h"
+#include "iomfb.h"
+#include "iomfb_internal.h"
+#include "parser.h"
+#include "trace.h"
+#include "version_utils.h"
+
+/* Register defines used in bandwidth setup structure */
+#define REG_DOORBELL_BIT(idx) (2 + (idx))
+
+struct dcp_wait_cookie {
+	struct kref refcount;
+	struct completion done;
+};
+
+static void release_wait_cookie(struct kref *ref)
+{
+	struct dcp_wait_cookie *cookie;
+	cookie = container_of(ref, struct dcp_wait_cookie, refcount);
+
+        kfree(cookie);
+}
+
+DCP_THUNK_OUT(iomfb_a131_pmu_service_matched, iomfbep_a131_pmu_service_matched, u32);
+DCP_THUNK_OUT(iomfb_a132_backlight_service_matched, iomfbep_a132_backlight_service_matched, u32);
+DCP_THUNK_OUT(iomfb_a358_vi_set_temperature_hint, iomfbep_a358_vi_set_temperature_hint, u32);
+
+IOMFB_THUNK_INOUT(set_matrix);
+IOMFB_THUNK_INOUT(get_color_remap_mode);
+IOMFB_THUNK_INOUT(last_client_close);
+IOMFB_THUNK_INOUT(abort_swaps_dcp);
+
+DCP_THUNK_INOUT(dcp_swap_submit, dcpep_swap_submit,
+		struct DCP_FW_NAME(dcp_swap_submit_req),
+		struct DCP_FW_NAME(dcp_swap_submit_resp));
+
+DCP_THUNK_INOUT(dcp_swap_start, dcpep_swap_start, struct dcp_swap_start_req,
+		struct dcp_swap_start_resp);
+
+DCP_THUNK_INOUT(dcp_set_power_state, dcpep_set_power_state,
+		struct dcp_set_power_state_req,
+		struct dcp_set_power_state_resp);
+
+DCP_THUNK_INOUT(dcp_set_digital_out_mode, dcpep_set_digital_out_mode,
+		struct dcp_set_digital_out_mode_req, u32);
+
+DCP_THUNK_INOUT(dcp_set_display_device, dcpep_set_display_device, u32, u32);
+
+DCP_THUNK_OUT(dcp_set_display_refresh_properties,
+	      dcpep_set_display_refresh_properties, u32);
+
+#if DCP_FW_VER >= DCP_FW_VERSION(13, 2, 0)
+DCP_THUNK_INOUT(dcp_late_init_signal, dcpep_late_init_signal, u32, u32);
+#else
+DCP_THUNK_OUT(dcp_late_init_signal, dcpep_late_init_signal, u32);
+#endif
+DCP_THUNK_IN(dcp_flush_supports_power, dcpep_flush_supports_power, u32);
+DCP_THUNK_OUT(dcp_create_default_fb, dcpep_create_default_fb, u32);
+DCP_THUNK_OUT(dcp_start_signal, dcpep_start_signal, u32);
+DCP_THUNK_VOID(dcp_setup_video_limits, dcpep_setup_video_limits);
+DCP_THUNK_VOID(dcp_set_create_dfb, dcpep_set_create_dfb);
+DCP_THUNK_VOID(dcp_first_client_open, dcpep_first_client_open);
+
+DCP_THUNK_INOUT(dcp_set_parameter_dcp, dcpep_set_parameter_dcp,
+		struct dcp_set_parameter_dcp, u32);
+
+DCP_THUNK_INOUT(dcp_enable_disable_video_power_savings,
+		dcpep_enable_disable_video_power_savings, u32, int);
+
+DCP_THUNK_OUT(dcp_is_main_display, dcpep_is_main_display, u32);
+
+/* DCP callback handlers */
+static void dcpep_cb_nop(struct apple_dcp *dcp)
+{
+	/* No operation */
+}
+
+static u8 dcpep_cb_true(struct apple_dcp *dcp)
+{
+	return true;
+}
+
+static u8 dcpep_cb_false(struct apple_dcp *dcp)
+{
+	return false;
+}
+
+static u32 dcpep_cb_zero(struct apple_dcp *dcp)
+{
+	return 0;
+}
+
+static void dcpep_cb_swap_complete(struct apple_dcp *dcp,
+				   struct DCP_FW_NAME(dc_swap_complete_resp) *resp)
+{
+	ktime_t now = ktime_get();
+	trace_iomfb_swap_complete(dcp, resp->swap_id);
+	dcp->last_swap_id = resp->swap_id;
+
+	dcp_drm_crtc_page_flip(dcp, now);
+	if (dcp->crc_enabled) {
+		u32 crc32 = 0;
+		drm_crtc_add_crc_entry(&dcp->crtc->base, true, resp->swap_id, &crc32);
+	}
+}
+
+/* special */
+static void complete_vi_set_temperature_hint(struct apple_dcp *dcp, void *out, void *cookie)
+{
+	// ack D100 cb_match_pmu_service
+	dcp_ack(dcp, DCP_CONTEXT_CB);
+}
+
+static bool iomfbep_cb_match_pmu_service(struct apple_dcp *dcp, int tag, void *out, void *in)
+{
+	trace_iomfb_callback(dcp, tag, __func__);
+	iomfb_a358_vi_set_temperature_hint(dcp, false,
+					   complete_vi_set_temperature_hint,
+					   NULL);
+
+	// return false for deferred ACK
+	return false;
+}
+
+static void complete_pmu_service_matched(struct apple_dcp *dcp, void *out, void *cookie)
+{
+	struct dcp_channel *ch = &dcp->ch_cb;
+	u8 *succ = ch->output[ch->depth - 1];
+
+	*succ = true;
+
+	// ack D206 cb_match_pmu_service_2
+	dcp_ack(dcp, DCP_CONTEXT_CB);
+}
+
+static bool iomfbep_cb_match_pmu_service_2(struct apple_dcp *dcp, int tag, void *out, void *in)
+{
+	trace_iomfb_callback(dcp, tag, __func__);
+
+	iomfb_a131_pmu_service_matched(dcp, false, complete_pmu_service_matched,
+				       out);
+
+	// return false for deferred ACK
+	return false;
+}
+
+static void complete_backlight_service_matched(struct apple_dcp *dcp, void *out, void *cookie)
+{
+	struct dcp_channel *ch = &dcp->ch_cb;
+	u8 *succ = ch->output[ch->depth - 1];
+
+	*succ = true;
+
+	// ack D206 cb_match_backlight_service
+	dcp_ack(dcp, DCP_CONTEXT_CB);
+}
+
+static bool iomfbep_cb_match_backlight_service(struct apple_dcp *dcp, int tag, void *out, void *in)
+{
+	trace_iomfb_callback(dcp, tag, __func__);
+
+	if (!dcp_has_panel(dcp)) {
+		u8 *succ = out;
+		*succ = true;
+		return true;
+	}
+
+	iomfb_a132_backlight_service_matched(dcp, false, complete_backlight_service_matched, out);
+
+	// return false for deferred ACK
+	return false;
+}
+
+static void iomfb_cb_pr_publish(struct apple_dcp *dcp, struct iomfb_property *prop)
+{
+	switch (prop->id) {
+	case IOMFB_PROPERTY_NITS:
+	{
+		if (dcp_has_panel(dcp)) {
+			dcp->brightness.nits = prop->value / dcp->brightness.scale;
+			/* notify backlight device of the initial brightness */
+			if (!dcp->brightness.bl_dev && dcp->brightness.maximum > 0)
+				schedule_work(&dcp->bl_register_wq);
+			trace_iomfb_brightness(dcp, prop->value);
+		}
+		break;
+	}
+	default:
+		dev_dbg(dcp->dev, "pr_publish: id: %d = %u\n", prop->id, prop->value);
+	}
+}
+
+static struct dcp_get_uint_prop_resp
+dcpep_cb_get_uint_prop(struct apple_dcp *dcp, struct dcp_get_uint_prop_req *req)
+{
+	struct dcp_get_uint_prop_resp resp = (struct dcp_get_uint_prop_resp){
+	    .value = 0
+	};
+
+	if (dcp->panel.has_mini_led &&
+	    memcmp(req->obj, "SUMP", sizeof(req->obj)) == 0) { /* "PMUS */
+	    if (strncmp(req->key, "Temperature", sizeof(req->key)) == 0) {
+		/*
+		 * TODO: value from j314c, find out if it is temperature in
+		 *       centigrade C and which temperature sensor reports it
+		 */
+		resp.value = 3029;
+		resp.ret = true;
+	    }
+	}
+
+	return resp;
+}
+
+static u8 iomfbep_cb_sr_set_property_int(struct apple_dcp *dcp,
+					 struct iomfb_sr_set_property_int_req *req)
+{
+	if (memcmp(req->obj, "FMOI", sizeof(req->obj)) == 0) { /* "IOMF */
+		if (strncmp(req->key, "Brightness_Scale", sizeof(req->key)) == 0) {
+			if (!req->value_null)
+				dcp->brightness.scale = req->value;
+		}
+	}
+
+	return 1;
+}
+
+static void iomfbep_cb_set_fx_prop(struct apple_dcp *dcp, struct iomfb_set_fx_prop_req *req)
+{
+    // TODO: trace this, see if there properties which needs to used later
+}
+
+/*
+ * Callback to map a buffer allocated with allocate_buf for PIODMA usage.
+ * PIODMA is separate from the main DCP and uses own IOVA space on a dedicated
+ * stream of the display DART, rather than the expected DCP DART.
+ */
+static struct dcp_map_buf_resp dcpep_cb_map_piodma(struct apple_dcp *dcp,
+						   struct dcp_map_buf_req *req)
+{
+	struct dcp_mem_descriptor *memdesc;
+	struct sg_table *map;
+	ssize_t ret;
+
+	if (req->buffer >= ARRAY_SIZE(dcp->memdesc))
+		goto reject;
+
+	memdesc = &dcp->memdesc[req->buffer];
+	map = &memdesc->map;
+
+	if (!map->sgl)
+		goto reject;
+
+	/* use the piodma iommu domain to map against the right IOMMU */
+	ret = iommu_map_sgtable(dcp->iommu_dom, memdesc->dva, map,
+				IOMMU_READ | IOMMU_WRITE);
+
+	/* HACK: expect size to be 16K aligned since the iommu API only maps
+	 *       full pages
+	 */
+	if (ret < 0 || ret != ALIGN(memdesc->size, SZ_16K)) {
+		dev_err(dcp->dev, "iommu_map_sgtable() returned %zd instead of expected buffer size of %zu\n", ret, memdesc->size);
+		goto reject;
+	}
+
+	return (struct dcp_map_buf_resp){ .dva = memdesc->dva };
+
+reject:
+	dev_err(dcp->dev, "denying map of invalid buffer %llx for piodma\n",
+		req->buffer);
+	return (struct dcp_map_buf_resp){ .ret = EINVAL };
+}
+
+static void dcpep_cb_unmap_piodma(struct apple_dcp *dcp,
+				  struct dcp_unmap_buf_resp *resp)
+{
+	struct dcp_mem_descriptor *memdesc;
+
+	if (resp->buffer >= ARRAY_SIZE(dcp->memdesc)) {
+		dev_warn(dcp->dev, "unmap request for out of range buffer %llu\n",
+			 resp->buffer);
+		return;
+	}
+
+	memdesc = &dcp->memdesc[resp->buffer];
+
+	if (!memdesc->buf) {
+		dev_warn(dcp->dev,
+			 "unmap for non-mapped buffer %llu iova:0x%08llx\n",
+			 resp->buffer, resp->dva);
+		return;
+	}
+
+	if (memdesc->dva != resp->dva) {
+		dev_warn(dcp->dev, "unmap buffer %llu address mismatch "
+			 "memdesc.dva:%llx dva:%llx\n", resp->buffer,
+			 memdesc->dva, resp->dva);
+		return;
+	}
+
+	/* use the piodma iommu domain to unmap from the right IOMMU */
+	/* HACK: expect size to be 16K aligned since the iommu API only maps
+	 *       full pages
+	 */
+	iommu_unmap(dcp->iommu_dom, memdesc->dva, ALIGN(memdesc->size, SZ_16K));
+}
+
+/*
+ * Allocate an IOVA contiguous buffer mapped to the DCP. The buffer need not be
+ * physically contiguous, however we should save the sgtable in case the
+ * buffer needs to be later mapped for PIODMA.
+ */
+static struct dcp_allocate_buffer_resp
+dcpep_cb_allocate_buffer(struct apple_dcp *dcp,
+			 struct dcp_allocate_buffer_req *req)
+{
+	struct dcp_allocate_buffer_resp resp = { 0 };
+	struct dcp_mem_descriptor *memdesc;
+	size_t size;
+	u32 id;
+
+	resp.dva_size = ALIGN(req->size, 4096);
+	resp.mem_desc_id =
+		find_first_zero_bit(dcp->memdesc_map, DCP_MAX_MAPPINGS);
+
+	if (resp.mem_desc_id >= DCP_MAX_MAPPINGS) {
+		dev_warn(dcp->dev, "DCP overflowed mapping table, ignoring\n");
+		resp.dva_size = 0;
+		resp.mem_desc_id = 0;
+		return resp;
+	}
+	id = resp.mem_desc_id;
+	set_bit(id, dcp->memdesc_map);
+
+	memdesc = &dcp->memdesc[id];
+
+	memdesc->size = resp.dva_size;
+	/* HACK: align size to 16K since the iommu API only maps full pages */
+	size = ALIGN(resp.dva_size, SZ_16K);
+	memdesc->buf = dma_alloc_coherent(dcp->dev, size,
+					  &memdesc->dva, GFP_KERNEL);
+
+	dma_get_sgtable(dcp->dev, &memdesc->map, memdesc->buf, memdesc->dva,
+			size);
+	resp.dva = memdesc->dva;
+
+	return resp;
+}
+
+static u8 dcpep_cb_release_mem_desc(struct apple_dcp *dcp, u32 *mem_desc_id)
+{
+	struct dcp_mem_descriptor *memdesc;
+	size_t size;
+	u32 id = *mem_desc_id;
+
+	if (id >= DCP_MAX_MAPPINGS) {
+		dev_warn(dcp->dev,
+			 "unmap request for out of range mem_desc_id %u", id);
+		return 0;
+	}
+
+	if (!test_and_clear_bit(id, dcp->memdesc_map)) {
+		dev_warn(dcp->dev, "unmap request for unused mem_desc_id %u\n",
+			 id);
+		return 0;
+	}
+
+	memdesc = &dcp->memdesc[id];
+	size = ALIGN(memdesc->size, SZ_16K);
+	if (memdesc->buf) {
+		dma_free_coherent(dcp->dev, size, memdesc->buf, memdesc->dva);
+		memdesc->buf = NULL;
+		memset(&memdesc->map, 0, sizeof(memdesc->map));
+	} else {
+		memdesc->reg = 0;
+	}
+
+	memdesc->size = 0;
+
+	return 1;
+}
+
+/* Validate that the specified region is a display register */
+static bool is_disp_register(struct apple_dcp *dcp, u64 start, u64 end)
+{
+	int i;
+
+	for (i = 0; i < dcp->nr_disp_registers; ++i) {
+		struct resource *r = dcp->disp_registers[i];
+
+		if ((start >= r->start) && (end <= r->end))
+			return true;
+	}
+
+	return false;
+}
+
+/*
+ * Map contiguous physical memory into the DCP's address space. The firmware
+ * uses this to map the display registers we advertise in
+ * sr_map_device_memory_with_index, so we bounds check against that to guard
+ * safe against malicious coprocessors.
+ */
+static struct dcp_map_physical_resp
+dcpep_cb_map_physical(struct apple_dcp *dcp, struct dcp_map_physical_req *req)
+{
+	int size = ALIGN(req->size, 4096);
+	dma_addr_t dva;
+	u32 id;
+
+	if (!is_disp_register(dcp, req->paddr, req->paddr + size - 1)) {
+		dev_err(dcp->dev, "refusing to map phys address %llx size %llx\n",
+			req->paddr, req->size);
+		return (struct dcp_map_physical_resp){};
+	}
+
+	id = find_first_zero_bit(dcp->memdesc_map, DCP_MAX_MAPPINGS);
+	set_bit(id, dcp->memdesc_map);
+	dcp->memdesc[id].size = size;
+	dcp->memdesc[id].reg = req->paddr;
+
+	dva = dma_map_resource(dcp->dev, req->paddr, size, DMA_BIDIRECTIONAL, 0);
+	WARN_ON(dva == DMA_MAPPING_ERROR);
+
+	return (struct dcp_map_physical_resp){
+		.dva_size = size,
+		.mem_desc_id = id,
+		.dva = dva,
+	};
+}
+
+static u64 dcpep_cb_get_frequency(struct apple_dcp *dcp)
+{
+	return clk_get_rate(dcp->clk);
+}
+
+static struct DCP_FW_NAME(dcp_map_reg_resp) dcpep_cb_map_reg(struct apple_dcp *dcp,
+						struct DCP_FW_NAME(dcp_map_reg_req) *req)
+{
+	if (req->index >= dcp->nr_disp_registers) {
+		dev_warn(dcp->dev, "attempted to read invalid reg index %u\n",
+			 req->index);
+
+		return (struct DCP_FW_NAME(dcp_map_reg_resp)){ .ret = 1 };
+	} else {
+		struct resource *rsrc = dcp->disp_registers[req->index];
+#if DCP_FW_VER >= DCP_FW_VERSION(13, 2, 0)
+		dma_addr_t dva = dma_map_resource(dcp->dev, rsrc->start, resource_size(rsrc),
+						  DMA_BIDIRECTIONAL, 0);
+		WARN_ON(dva == DMA_MAPPING_ERROR);
+#endif
+
+		return (struct DCP_FW_NAME(dcp_map_reg_resp)){
+			.addr = rsrc->start,
+			.length = resource_size(rsrc),
+#if DCP_FW_VER >= DCP_FW_VERSION(13, 2, 0)
+			.dva = dva,
+#endif
+		};
+	}
+}
+
+static struct dcp_read_edt_data_resp
+dcpep_cb_read_edt_data(struct apple_dcp *dcp, struct dcp_read_edt_data_req *req)
+{
+	return (struct dcp_read_edt_data_resp){
+		.value[0] = req->value[0],
+		.ret = 0,
+	};
+}
+
+static void iomfbep_cb_enable_backlight_message_ap_gated(struct apple_dcp *dcp,
+							 u8 *enabled)
+{
+	/*
+	 * update backlight brightness on next swap, on non mini-LED displays
+	 * DCP seems to set an invalid iDAC value after coming out of DPMS.
+	 * syslog: "[BrightnessLCD.cpp:743][AFK]nitsToDBV: iDAC out of range"
+	 */
+	dcp->brightness.update = true;
+	schedule_work(&dcp->bl_update_wq);
+}
+
+/* Chunked data transfer for property dictionaries */
+static u8 dcpep_cb_prop_start(struct apple_dcp *dcp, u32 *length)
+{
+	if (dcp->chunks.data != NULL) {
+		dev_warn(dcp->dev, "ignoring spurious transfer start\n");
+		return false;
+	}
+
+	dcp->chunks.length = *length;
+	dcp->chunks.data = devm_kzalloc(dcp->dev, *length, GFP_KERNEL);
+
+	if (!dcp->chunks.data) {
+		dev_warn(dcp->dev, "failed to allocate chunks\n");
+		return false;
+	}
+
+	return true;
+}
+
+static u8 dcpep_cb_prop_chunk(struct apple_dcp *dcp,
+			      struct dcp_set_dcpav_prop_chunk_req *req)
+{
+	if (!dcp->chunks.data) {
+		dev_warn(dcp->dev, "ignoring spurious chunk\n");
+		return false;
+	}
+
+	if (req->offset + req->length > dcp->chunks.length) {
+		dev_warn(dcp->dev, "ignoring overflowing chunk\n");
+		return false;
+	}
+
+	memcpy(dcp->chunks.data + req->offset, req->data, req->length);
+	return true;
+}
+
+static bool dcpep_process_chunks(struct apple_dcp *dcp,
+				 struct dcp_set_dcpav_prop_end_req *req)
+{
+	struct dcp_parse_ctx ctx;
+	int ret;
+
+	if (!dcp->chunks.data) {
+		dev_warn(dcp->dev, "ignoring spurious end\n");
+		return false;
+	}
+
+	/* used just as opaque pointer for tracing */
+	ctx.dcp = dcp;
+
+	ret = parse(dcp->chunks.data, dcp->chunks.length, &ctx);
+
+	if (ret) {
+		dev_warn(dcp->dev, "bad header on dcpav props\n");
+		return false;
+	}
+
+	if (!strcmp(req->key, "TimingElements")) {
+		dcp->modes = enumerate_modes(&ctx, &dcp->nr_modes,
+					     dcp->width_mm, dcp->height_mm,
+					     dcp->notch_height);
+
+		if (IS_ERR(dcp->modes)) {
+			dev_warn(dcp->dev, "failed to parse modes\n");
+			dcp->modes = NULL;
+			dcp->nr_modes = 0;
+			return false;
+		}
+		if (dcp->nr_modes == 0)
+			dev_warn(dcp->dev, "TimingElements without valid modes!\n");
+	} else if (!strcmp(req->key, "DisplayAttributes")) {
+		ret = parse_display_attributes(&ctx, &dcp->width_mm,
+					&dcp->height_mm);
+
+		if (ret) {
+			dev_warn(dcp->dev, "failed to parse display attribs\n");
+			return false;
+		}
+
+		dcp_set_dimensions(dcp);
+	}
+
+	return true;
+}
+
+static u8 dcpep_cb_prop_end(struct apple_dcp *dcp,
+			    struct dcp_set_dcpav_prop_end_req *req)
+{
+	u8 resp = dcpep_process_chunks(dcp, req);
+
+	/* move chunked data to connector to provide it via debugfs */
+	dcp_connector_update_dict(dcp->connector, req->key, &dcp->chunks);
+	dcp->chunks.data = NULL;
+	dcp->chunks.length = 0;
+
+	return resp;
+}
+
+/* Boot sequence */
+static void boot_done(struct apple_dcp *dcp, void *out, void *cookie)
+{
+	struct dcp_channel *ch = &dcp->ch_cb;
+	u8 *succ = ch->output[ch->depth - 1];
+	dev_dbg(dcp->dev, "boot done\n");
+
+	*succ = true;
+	dcp_ack(dcp, DCP_CONTEXT_CB);
+}
+
+static void boot_5(struct apple_dcp *dcp, void *out, void *cookie)
+{
+	dcp_set_display_refresh_properties(dcp, false, boot_done, NULL);
+}
+
+static void boot_4(struct apple_dcp *dcp, void *out, void *cookie)
+{
+#if DCP_FW_VER >= DCP_FW_VERSION(13, 2, 0)
+	u32 v_true = 1;
+	dcp_late_init_signal(dcp, false, &v_true, boot_5, NULL);
+#else
+	dcp_late_init_signal(dcp, false, boot_5, NULL);
+#endif
+}
+
+static void boot_3(struct apple_dcp *dcp, void *out, void *cookie)
+{
+	u32 v_true = true;
+
+	dcp_flush_supports_power(dcp, false, &v_true, boot_4, NULL);
+}
+
+static void boot_2(struct apple_dcp *dcp, void *out, void *cookie)
+{
+	dcp_setup_video_limits(dcp, false, boot_3, NULL);
+}
+
+static void boot_1_5(struct apple_dcp *dcp, void *out, void *cookie)
+{
+	dcp_create_default_fb(dcp, false, boot_2, NULL);
+}
+
+/* Use special function signature to defer the ACK */
+static bool dcpep_cb_boot_1(struct apple_dcp *dcp, int tag, void *out, void *in)
+{
+	trace_iomfb_callback(dcp, tag, __func__);
+	dcp_set_create_dfb(dcp, false, boot_1_5, NULL);
+	return false;
+}
+
+static struct dcp_allocate_bandwidth_resp dcpep_cb_allocate_bandwidth(struct apple_dcp *dcp,
+						struct dcp_allocate_bandwidth_req *req)
+{
+	return (struct dcp_allocate_bandwidth_resp){
+		.unk1 = req->unk1,
+		.unk2 = req->unk2,
+		.ret = 1,
+	};
+}
+
+static struct dcp_rt_bandwidth dcpep_cb_rt_bandwidth(struct apple_dcp *dcp)
+{
+	struct dcp_rt_bandwidth rt_bw = (struct dcp_rt_bandwidth){
+			.reg_scratch = 0,
+			.reg_doorbell = 0,
+			.doorbell_bit = 0,
+	};
+
+	if (dcp->disp_bw_scratch_index) {
+		u32 offset = dcp->disp_bw_scratch_offset;
+		u32 index = dcp->disp_bw_scratch_index;
+		rt_bw.reg_scratch = dcp->disp_registers[index]->start + offset;
+	}
+
+	if (dcp->disp_bw_doorbell_index) {
+		u32 index = dcp->disp_bw_doorbell_index;
+		rt_bw.reg_doorbell = dcp->disp_registers[index]->start;
+		rt_bw.doorbell_bit = REG_DOORBELL_BIT(dcp->index);
+		/*
+		 * This is most certainly not padding. t8103-dcp crashes without
+		 * setting this immediately during modeset on 12.3 and 13.5
+		 * firmware.
+		 */
+		rt_bw.padding[3] = 0x4;
+	}
+
+	return rt_bw;
+}
+
+static struct dcp_set_frame_sync_props_resp
+dcpep_cb_set_frame_sync_props(struct apple_dcp *dcp,
+			      struct dcp_set_frame_sync_props_req *req)
+{
+	return (struct dcp_set_frame_sync_props_resp){};
+}
+
+/* Callback to get the current time as milliseconds since the UNIX epoch */
+static u64 dcpep_cb_get_time(struct apple_dcp *dcp)
+{
+	return ktime_to_ms(ktime_get_real());
+}
+
+struct dcp_swap_cookie {
+	struct kref refcount;
+	struct completion done;
+	u32 swap_id;
+};
+
+static void release_swap_cookie(struct kref *ref)
+{
+	struct dcp_swap_cookie *cookie;
+	cookie = container_of(ref, struct dcp_swap_cookie, refcount);
+
+        kfree(cookie);
+}
+
+static void dcp_swap_cleared(struct apple_dcp *dcp, void *data, void *cookie)
+{
+	struct DCP_FW_NAME(dcp_swap_submit_resp) *resp = data;
+
+	if (cookie) {
+		struct dcp_swap_cookie *info = cookie;
+		complete(&info->done);
+		kref_put(&info->refcount, release_swap_cookie);
+	}
+
+	if (resp->ret) {
+		dev_err(dcp->dev, "swap_clear failed! status %u\n", resp->ret);
+		dcp_drm_crtc_vblank(dcp->crtc);
+		return;
+	}
+
+	while (!list_empty(&dcp->swapped_out_fbs)) {
+		struct dcp_fb_reference *entry;
+		entry = list_first_entry(&dcp->swapped_out_fbs,
+					 struct dcp_fb_reference, head);
+		if (entry->swap_id == dcp->last_swap_id)
+			break;
+		if (entry->fb)
+			drm_framebuffer_put(entry->fb);
+		list_del(&entry->head);
+		kfree(entry);
+	}
+}
+
+static void dcp_swap_clear_started(struct apple_dcp *dcp, void *data,
+				   void *cookie)
+{
+	struct dcp_swap_start_resp *resp = data;
+	DCP_FW_UNION(dcp->swap).swap.swap_id = resp->swap_id;
+
+	if (cookie) {
+		struct dcp_swap_cookie *info = cookie;
+		info->swap_id = resp->swap_id;
+	}
+
+	dcp_swap_submit(dcp, false, &DCP_FW_UNION(dcp->swap), dcp_swap_cleared, cookie);
+}
+
+static void dcp_on_final(struct apple_dcp *dcp, void *out, void *cookie)
+{
+	struct dcp_wait_cookie *wait = cookie;
+
+	if (wait) {
+		complete(&wait->done);
+		kref_put(&wait->refcount, release_wait_cookie);
+	}
+}
+
+static void dcp_on_set_power_state(struct apple_dcp *dcp, void *out, void *cookie)
+{
+	struct dcp_set_power_state_req req = {
+		.unklong = 1,
+	};
+
+	dcp_set_power_state(dcp, false, &req, dcp_on_final, cookie);
+}
+
+static void dcp_on_set_parameter(struct apple_dcp *dcp, void *out, void *cookie)
+{
+	struct dcp_set_parameter_dcp param = {
+		.param = 14,
+		.value = { 0 },
+#if DCP_FW_VER >= DCP_FW_VERSION(13, 2, 0)
+		.count = 3,
+#else
+		.count = 1,
+#endif
+	};
+
+	dcp_set_parameter_dcp(dcp, false, &param, dcp_on_set_power_state, cookie);
+}
+
+void DCP_FW_NAME(iomfb_poweron)(struct apple_dcp *dcp)
+{
+	struct dcp_wait_cookie *cookie;
+	int ret;
+	u32 handle;
+	dev_info(dcp->dev, "dcp_poweron() starting\n");
+
+	cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
+	if (!cookie)
+		return;
+
+	init_completion(&cookie->done);
+	kref_init(&cookie->refcount);
+	/* increase refcount to ensure the receiver has a reference */
+	kref_get(&cookie->refcount);
+
+	if (dcp->main_display) {
+		handle = 0;
+		dcp_set_display_device(dcp, false, &handle, dcp_on_set_power_state,
+				       cookie);
+	} else {
+		handle = 2;
+		dcp_set_display_device(dcp, false, &handle,
+				       dcp_on_set_parameter, cookie);
+	}
+	ret = wait_for_completion_timeout(&cookie->done, msecs_to_jiffies(500));
+
+	if (ret == 0)
+		dev_warn(dcp->dev, "wait for power timed out\n");
+
+	kref_put(&cookie->refcount, release_wait_cookie);;
+
+	/* Force a brightness update after poweron, to restore the brightness */
+	dcp->brightness.update = true;
+}
+
+static void complete_set_powerstate(struct apple_dcp *dcp, void *out,
+				    void *cookie)
+{
+	struct dcp_wait_cookie *wait = cookie;
+
+	if (wait) {
+		complete(&wait->done);
+		kref_put(&wait->refcount, release_wait_cookie);
+	}
+}
+
+static void last_client_closed_poff(struct apple_dcp *dcp, void *out, void *cookie)
+{
+	struct dcp_set_power_state_req power_req = {
+		.unklong = 0,
+	};
+	dcp_set_power_state(dcp, false, &power_req, complete_set_powerstate,
+			    cookie);
+}
+
+static void aborted_swaps_dcp_poff(struct apple_dcp *dcp, void *out, void *cookie)
+{
+	struct iomfb_last_client_close_req last_client_req = {};
+	iomfb_last_client_close(dcp, false, &last_client_req,
+				last_client_closed_poff, cookie);
+}
+
+void DCP_FW_NAME(iomfb_poweroff)(struct apple_dcp *dcp)
+{
+	int ret, swap_id;
+	struct iomfb_abort_swaps_dcp_req abort_req = {
+		.client = {
+			.flag2 = 1,
+		},
+	};
+	struct dcp_swap_cookie *cookie;
+	struct dcp_wait_cookie *poff_cookie;
+	struct dcp_swap_start_req swap_req = { 0 };
+	struct DCP_FW_NAME(dcp_swap_submit_req) *swap = &DCP_FW_UNION(dcp->swap);
+
+	cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
+	if (!cookie)
+		return;
+	init_completion(&cookie->done);
+	kref_init(&cookie->refcount);
+	/* increase refcount to ensure the receiver has a reference */
+	kref_get(&cookie->refcount);
+
+	// clear surfaces
+	memset(swap, 0, sizeof(*swap));
+
+	swap->swap.swap_enabled =
+		swap->swap.swap_completed = IOMFB_SET_BACKGROUND | 0x7;
+	swap->swap.bg_color = 0xFF000000;
+
+	/*
+	 * Turn off the backlight. This matters because the DCP's idea of
+	 * backlight brightness gets desynced after a power change, and it
+	 * needs to be told it's going to turn off so it will consider the
+	 * subsequent update on poweron an actual change and restore the
+	 * brightness.
+	 */
+	if (dcp_has_panel(dcp)) {
+		swap->swap.bl_unk = 1;
+		swap->swap.bl_value = 0;
+		swap->swap.bl_power = 0;
+	}
+
+	/* Null all surfaces */
+	for (int l = 0; l < SWAP_SURFACES; l++)
+		swap->surf_null[l] = true;
+#if DCP_FW_VER >= DCP_FW_VERSION(13, 2, 0)
+	for (int l = 0; l < 5; l++)
+		swap->surf2_null[l] = true;
+	swap->unkU32Ptr_null = true;
+	swap->unkU32out_null = true;
+#endif
+
+	dcp_swap_start(dcp, false, &swap_req, dcp_swap_clear_started, cookie);
+
+	ret = wait_for_completion_timeout(&cookie->done, msecs_to_jiffies(50));
+	swap_id = cookie->swap_id;
+	kref_put(&cookie->refcount, release_swap_cookie);
+	if (ret <= 0) {
+		dcp->crashed = true;
+		return;
+	}
+
+	dev_dbg(dcp->dev, "%s: clear swap submitted: %u\n", __func__, swap_id);
+
+	poff_cookie = kzalloc(sizeof(*poff_cookie), GFP_KERNEL);
+	if (!poff_cookie)
+		return;
+	init_completion(&poff_cookie->done);
+	kref_init(&poff_cookie->refcount);
+	/* increase refcount to ensure the receiver has a reference */
+	kref_get(&poff_cookie->refcount);
+
+	iomfb_abort_swaps_dcp(dcp, false, &abort_req,
+				aborted_swaps_dcp_poff, poff_cookie);
+	ret = wait_for_completion_timeout(&poff_cookie->done,
+					  msecs_to_jiffies(1000));
+
+	if (ret == 0)
+		dev_warn(dcp->dev, "setPowerState(0) timeout %u ms\n", 1000);
+	else if (ret > 0)
+		dev_dbg(dcp->dev,
+			"setPowerState(0) finished with %d ms to spare",
+			jiffies_to_msecs(ret));
+
+	kref_put(&poff_cookie->refcount, release_wait_cookie);
+
+	dev_info(dcp->dev, "dcp_poweroff() done\n");
+}
+
+static void last_client_closed_sleep(struct apple_dcp *dcp, void *out, void *cookie)
+{
+	struct dcp_set_power_state_req power_req = {
+		.unklong = 0,
+	};
+	dcp_set_power_state(dcp, false, &power_req, complete_set_powerstate, cookie);
+}
+
+static void aborted_swaps_dcp_sleep(struct apple_dcp *dcp, void *out, void *cookie)
+{
+	struct iomfb_last_client_close_req req = { 0 };
+	iomfb_last_client_close(dcp, false, &req, last_client_closed_sleep, cookie);
+}
+
+void DCP_FW_NAME(iomfb_sleep)(struct apple_dcp *dcp)
+{
+	int ret;
+	struct iomfb_abort_swaps_dcp_req req = {
+		.client = {
+			.flag2 = 1,
+		},
+	};
+
+	struct dcp_wait_cookie *cookie;
+
+	cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
+	if (!cookie)
+		return;
+	init_completion(&cookie->done);
+	kref_init(&cookie->refcount);
+	/* increase refcount to ensure the receiver has a reference */
+	kref_get(&cookie->refcount);
+
+	iomfb_abort_swaps_dcp(dcp, false, &req, aborted_swaps_dcp_sleep,
+				cookie);
+	ret = wait_for_completion_timeout(&cookie->done,
+					  msecs_to_jiffies(1000));
+
+	if (ret == 0)
+		dev_warn(dcp->dev, "setDCPPower(0) timeout %u ms\n", 1000);
+
+	kref_put(&cookie->refcount, release_wait_cookie);
+	dev_info(dcp->dev, "dcp_sleep() done\n");
+}
+
+static void dcpep_cb_hotplug(struct apple_dcp *dcp, u64 *connected)
+{
+	struct apple_connector *connector = dcp->connector;
+
+	/* DCP issues hotplug_gated callbacks after SetPowerState() calls on
+	 * devices with display (macbooks, imacs). This must not result in
+	 * connector state changes on DRM side. Some applications won't enable
+	 * a CRTC with a connector in disconnected state. Weston after DPMS off
+	 * is one example. dcp_is_main_display() returns true on devices with
+	 * integrated display. Ignore the hotplug_gated() callbacks there.
+	 */
+	if (dcp->main_display)
+		return;
+
+	if (dcp->during_modeset) {
+		dev_info(dcp->dev,
+			 "cb_hotplug() ignored during modeset connected:%llu\n",
+			 *connected);
+		return;
+	}
+
+	dev_info(dcp->dev, "cb_hotplug() connected:%llu, valid_mode:%d\n",
+		 *connected, dcp->valid_mode);
+
+	/* Hotplug invalidates mode. DRM doesn't always handle this. */
+	if (!(*connected)) {
+		dcp->valid_mode = false;
+		/* after unplug swap will not complete until the next
+		 * set_digital_out_mode */
+		schedule_work(&dcp->vblank_wq);
+	}
+
+	if (connector && connector->connected != !!(*connected)) {
+		connector->connected = !!(*connected);
+		dcp->valid_mode = false;
+		schedule_work(&connector->hotplug_wq);
+	}
+}
+
+static void
+dcpep_cb_swap_complete_intent_gated(struct apple_dcp *dcp,
+				    struct dcp_swap_complete_intent_gated *info)
+{
+	trace_iomfb_swap_complete_intent_gated(dcp, info->swap_id,
+		info->width, info->height);
+}
+
+static void
+dcpep_cb_abort_swap_ap_gated(struct apple_dcp *dcp, u32 *swap_id)
+{
+	trace_iomfb_abort_swap_ap_gated(dcp, *swap_id);
+}
+
+static struct dcpep_get_tiling_state_resp
+dcpep_cb_get_tiling_state(struct apple_dcp *dcp,
+			  struct dcpep_get_tiling_state_req *req)
+{
+	return (struct dcpep_get_tiling_state_resp){
+		.value = 0,
+		.ret = 1,
+	};
+}
+
+static u8 dcpep_cb_create_backlight_service(struct apple_dcp *dcp)
+{
+	return dcp_has_panel(dcp);
+}
+
+TRAMPOLINE_VOID(trampoline_nop, dcpep_cb_nop);
+TRAMPOLINE_OUT(trampoline_true, dcpep_cb_true, u8);
+TRAMPOLINE_OUT(trampoline_false, dcpep_cb_false, u8);
+TRAMPOLINE_OUT(trampoline_zero, dcpep_cb_zero, u32);
+TRAMPOLINE_IN(trampoline_swap_complete, dcpep_cb_swap_complete,
+	      struct DCP_FW_NAME(dc_swap_complete_resp));
+TRAMPOLINE_INOUT(trampoline_get_uint_prop, dcpep_cb_get_uint_prop,
+		 struct dcp_get_uint_prop_req, struct dcp_get_uint_prop_resp);
+TRAMPOLINE_IN(trampoline_set_fx_prop, iomfbep_cb_set_fx_prop,
+	      struct iomfb_set_fx_prop_req)
+TRAMPOLINE_INOUT(trampoline_map_piodma, dcpep_cb_map_piodma,
+		 struct dcp_map_buf_req, struct dcp_map_buf_resp);
+TRAMPOLINE_IN(trampoline_unmap_piodma, dcpep_cb_unmap_piodma,
+	      struct dcp_unmap_buf_resp);
+TRAMPOLINE_INOUT(trampoline_sr_set_property_int, iomfbep_cb_sr_set_property_int,
+		 struct iomfb_sr_set_property_int_req, u8);
+TRAMPOLINE_INOUT(trampoline_allocate_buffer, dcpep_cb_allocate_buffer,
+		 struct dcp_allocate_buffer_req,
+		 struct dcp_allocate_buffer_resp);
+TRAMPOLINE_INOUT(trampoline_map_physical, dcpep_cb_map_physical,
+		 struct dcp_map_physical_req, struct dcp_map_physical_resp);
+TRAMPOLINE_INOUT(trampoline_release_mem_desc, dcpep_cb_release_mem_desc, u32,
+		 u8);
+TRAMPOLINE_INOUT(trampoline_map_reg, dcpep_cb_map_reg,
+		 struct DCP_FW_NAME(dcp_map_reg_req),
+		 struct DCP_FW_NAME(dcp_map_reg_resp));
+TRAMPOLINE_INOUT(trampoline_read_edt_data, dcpep_cb_read_edt_data,
+		 struct dcp_read_edt_data_req, struct dcp_read_edt_data_resp);
+TRAMPOLINE_INOUT(trampoline_prop_start, dcpep_cb_prop_start, u32, u8);
+TRAMPOLINE_INOUT(trampoline_prop_chunk, dcpep_cb_prop_chunk,
+		 struct dcp_set_dcpav_prop_chunk_req, u8);
+TRAMPOLINE_INOUT(trampoline_prop_end, dcpep_cb_prop_end,
+		 struct dcp_set_dcpav_prop_end_req, u8);
+TRAMPOLINE_INOUT(trampoline_allocate_bandwidth, dcpep_cb_allocate_bandwidth,
+	       struct dcp_allocate_bandwidth_req, struct dcp_allocate_bandwidth_resp);
+TRAMPOLINE_OUT(trampoline_rt_bandwidth, dcpep_cb_rt_bandwidth,
+	       struct dcp_rt_bandwidth);
+TRAMPOLINE_INOUT(trampoline_set_frame_sync_props, dcpep_cb_set_frame_sync_props,
+	       struct dcp_set_frame_sync_props_req,
+	       struct dcp_set_frame_sync_props_resp);
+TRAMPOLINE_OUT(trampoline_get_frequency, dcpep_cb_get_frequency, u64);
+TRAMPOLINE_OUT(trampoline_get_time, dcpep_cb_get_time, u64);
+TRAMPOLINE_IN(trampoline_hotplug, dcpep_cb_hotplug, u64);
+TRAMPOLINE_IN(trampoline_swap_complete_intent_gated,
+	      dcpep_cb_swap_complete_intent_gated,
+	      struct dcp_swap_complete_intent_gated);
+TRAMPOLINE_IN(trampoline_abort_swap_ap_gated, dcpep_cb_abort_swap_ap_gated, u32);
+TRAMPOLINE_IN(trampoline_enable_backlight_message_ap_gated,
+	      iomfbep_cb_enable_backlight_message_ap_gated, u8);
+TRAMPOLINE_IN(trampoline_pr_publish, iomfb_cb_pr_publish,
+	      struct iomfb_property);
+TRAMPOLINE_INOUT(trampoline_get_tiling_state, dcpep_cb_get_tiling_state,
+		 struct dcpep_get_tiling_state_req, struct dcpep_get_tiling_state_resp);
+TRAMPOLINE_OUT(trampoline_create_backlight_service, dcpep_cb_create_backlight_service, u8);
+
+/*
+ * Callback for swap requests. If a swap failed, we'll never get a swap
+ * complete event so we need to fake a vblank event early to avoid a hang.
+ */
+
+static void dcp_swapped(struct apple_dcp *dcp, void *data, void *cookie)
+{
+	struct DCP_FW_NAME(dcp_swap_submit_resp) *resp = data;
+
+	if (resp->ret) {
+		dev_err(dcp->dev, "swap failed! status %u\n", resp->ret);
+		dcp_drm_crtc_vblank(dcp->crtc);
+		return;
+	}
+	dcp->swap_start = ktime_get();
+
+	while (!list_empty(&dcp->swapped_out_fbs)) {
+		struct dcp_fb_reference *entry;
+		entry = list_first_entry(&dcp->swapped_out_fbs,
+					 struct dcp_fb_reference, head);
+		if (entry->swap_id == dcp->last_swap_id)
+			break;
+		if (entry->fb)
+			drm_framebuffer_put(entry->fb);
+		list_del(&entry->head);
+		kfree(entry);
+	}
+}
+
+static void dcp_swap_started(struct apple_dcp *dcp, void *data, void *cookie)
+{
+	struct dcp_swap_start_resp *resp = data;
+
+	DCP_FW_UNION(dcp->swap).swap.swap_id = resp->swap_id;
+
+	trace_iomfb_swap_submit(dcp, resp->swap_id);
+	dcp_swap_submit(dcp, false, &DCP_FW_UNION(dcp->swap), dcp_swapped, NULL);
+}
+
+/* Helpers to modeset and swap, used to flush */
+static void do_swap(struct apple_dcp *dcp, void *data, void *cookie)
+{
+	struct dcp_swap_start_req start_req = { 0 };
+
+	if (dcp->connector && dcp->connector->connected)
+		dcp_swap_start(dcp, false, &start_req, dcp_swap_started, NULL);
+	else
+		dcp_drm_crtc_vblank(dcp->crtc);
+}
+
+static void complete_set_digital_out_mode(struct apple_dcp *dcp, void *data,
+					  void *cookie)
+{
+	struct dcp_wait_cookie *wait = cookie;
+
+	if (wait) {
+		complete(&wait->done);
+		kref_put(&wait->refcount, release_wait_cookie);
+	}
+}
+
+int DCP_FW_NAME(iomfb_modeset)(struct apple_dcp *dcp,
+			       struct drm_crtc_state *crtc_state)
+{
+	struct dcp_display_mode *mode;
+	struct dcp_wait_cookie *cookie;
+	struct dcp_color_mode *cmode = NULL;
+	int ret;
+
+	mode = lookup_mode(dcp, &crtc_state->mode);
+	if (!mode) {
+		dev_err(dcp->dev, "no match for " DRM_MODE_FMT "\n",
+			DRM_MODE_ARG(&crtc_state->mode));
+		return -EIO;
+	}
+
+	dev_info(dcp->dev,
+		 "set_digital_out_mode(color:%d timing:%d) " DRM_MODE_FMT "\n",
+		 mode->color_mode_id, mode->timing_mode_id,
+		 DRM_MODE_ARG(&crtc_state->mode));
+	if (mode->color_mode_id == mode->sdr_rgb.id)
+		cmode = &mode->sdr_rgb;
+	else if (mode->color_mode_id == mode->sdr_444.id)
+		cmode = &mode->sdr_444;
+	else if (mode->color_mode_id == mode->sdr.id)
+		cmode = &mode->sdr;
+	else if (mode->color_mode_id == mode->best.id)
+		cmode = &mode->best;
+	if (cmode)
+		dev_info(dcp->dev,
+			"set_digital_out_mode() color mode depth:%hhu format:%u "
+			"colorimetry:%u eotf:%u range:%u\n", cmode->depth,
+			cmode->format, cmode->colorimetry, cmode->eotf,
+			cmode->range);
+
+	dcp->mode = (struct dcp_set_digital_out_mode_req){
+		.color_mode_id = mode->color_mode_id,
+		.timing_mode_id = mode->timing_mode_id
+	};
+
+	cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
+	if (!cookie) {
+		return -ENOMEM;
+	}
+
+	init_completion(&cookie->done);
+	kref_init(&cookie->refcount);
+	/* increase refcount to ensure the receiver has a reference */
+	kref_get(&cookie->refcount);
+
+	dcp->during_modeset = true;
+
+	dcp_set_digital_out_mode(dcp, false, &dcp->mode,
+				 complete_set_digital_out_mode, cookie);
+
+	/*
+	 * The DCP firmware has an internal timeout of ~8 seconds for
+	 * modesets. Add an extra 500ms to safe side that the modeset
+	 * call has returned.
+	 */
+	ret = wait_for_completion_timeout(&cookie->done,
+					  msecs_to_jiffies(8500));
+
+	kref_put(&cookie->refcount, release_wait_cookie);
+	dcp->during_modeset = false;
+	dev_info(dcp->dev, "set_digital_out_mode finished:%d\n", ret);
+
+	if (ret == 0) {
+		dev_info(dcp->dev, "set_digital_out_mode timed out\n");
+		return -EIO;
+	} else if (ret < 0) {
+		dev_info(dcp->dev,
+			 "waiting on set_digital_out_mode failed:%d\n", ret);
+		return -EIO;
+
+	} else if (ret > 0) {
+		dev_dbg(dcp->dev,
+			"set_digital_out_mode finished with %d to spare\n",
+			jiffies_to_msecs(ret));
+	}
+	dcp->valid_mode = true;
+
+	return 0;
+}
+
+void DCP_FW_NAME(iomfb_flush)(struct apple_dcp *dcp, struct drm_crtc *crtc, struct drm_atomic_state *state)
+{
+	struct drm_plane *plane;
+	struct drm_plane_state *new_state, *old_state;
+	struct drm_crtc_state *crtc_state;
+	struct DCP_FW_NAME(dcp_swap_submit_req) *req = &DCP_FW_UNION(dcp->swap);
+	int plane_idx, l;
+	int has_surface = 0;
+
+	crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+
+	/* Reset all surfaces to defaults */
+	memset(req, 0, sizeof(*req));
+	for (l = 0; l < SWAP_SURFACES; l++)
+		req->surf_null[l] = true;
+#if DCP_FW_VER >= DCP_FW_VERSION(13, 2, 0)
+	for (l = 0; l < 5; l++)
+		req->surf2_null[l] = true;
+	req->unkU32Ptr_null = true;
+	req->unkU32out_null = true;
+#endif
+
+	/*
+	 * Clear all surfaces on startup. The boot framebuffer in surface 0
+	 * sticks around.
+	 */
+	if (!dcp->surfaces_cleared) {
+		req->swap.swap_enabled = IOMFB_SET_BACKGROUND | 0x7;
+		req->swap.bg_color = 0xFF000000;
+		dcp->surfaces_cleared = true;
+	}
+
+	for_each_oldnew_plane_in_state(state, plane, old_state, new_state, plane_idx) {
+		struct drm_framebuffer *fb = new_state->fb;
+		struct drm_gem_dma_object *obj;
+		struct drm_rect src_rect;
+		bool is_premultiplied = false;
+
+		/* skip planes not for this crtc */
+		if (old_state->crtc != crtc && new_state->crtc != crtc)
+			continue;
+
+		/*
+		 * Plane order is nondeterministic for this iterator. DCP will
+		 * almost always crash at some point if the z order of planes
+		 * flip-flops around. Make sure we are always blending them
+		 * in the correct order.
+		 *
+		 * Despite having 4 surfaces, we can only blend two. Surface 0 is
+		 * also unusable on some machines, so ignore it.
+		 */
+
+		l = MAX_BLEND_SURFACES - new_state->normalized_zpos;
+
+		WARN_ON(l > MAX_BLEND_SURFACES);
+
+		req->swap.swap_enabled |= BIT(l);
+
+		if (old_state->fb && fb != old_state->fb) {
+			/*
+			 * Race condition between a framebuffer unbind getting
+			 * swapped out and GEM unreferencing a framebuffer. If
+			 * we lose the race, the display gets IOVA faults and
+			 * the DCP crashes. We need to extend the lifetime of
+			 * the drm_framebuffer (and hence the GEM object) until
+			 * after we get a swap complete for the swap unbinding
+			 * it.
+			 */
+			struct dcp_fb_reference *entry =
+				kzalloc(sizeof(*entry), GFP_KERNEL);
+			if (entry) {
+				entry->fb = old_state->fb;
+				entry->swap_id = dcp->last_swap_id;
+				list_add_tail(&entry->head,
+					      &dcp->swapped_out_fbs);
+			}
+			drm_framebuffer_get(old_state->fb);
+		}
+
+		if (!new_state->fb || !new_state->visible) {
+			continue;
+		}
+		req->surf_null[l] = false;
+		has_surface = 1;
+
+		/*
+		 * DCP doesn't support XBGR8 / XRGB8 natively. Blending as
+		 * pre-multiplied alpha with a black background can be used as
+		 * workaround for the bottommost plane.
+		 */
+		if (fb->format->format == DRM_FORMAT_XRGB8888 ||
+		    fb->format->format == DRM_FORMAT_XBGR8888)
+		    is_premultiplied = true;
+
+		drm_rect_fp_to_int(&src_rect, &new_state->src);
+
+		req->swap.src_rect[l] = drm_to_dcp_rect(&src_rect);
+		req->swap.dst_rect[l] = drm_to_dcp_rect(&new_state->dst);
+
+		if (dcp->notch_height > 0)
+			req->swap.dst_rect[l].y += dcp->notch_height;
+
+		/* the obvious helper call drm_fb_dma_get_gem_addr() adjusts
+		 * the address for source x/y offsets. Since IOMFB has a direct
+		 * support source position prefer that.
+		 */
+		obj = drm_fb_dma_get_gem_obj(fb, 0);
+		if (obj)
+			req->surf_iova[l] = obj->dma_addr + fb->offsets[0];
+
+		req->surf[l] = (struct DCP_FW_NAME(dcp_surface)){
+			.is_premultiplied = is_premultiplied,
+			.format = drm_format_to_dcp(fb->format->format),
+			.xfer_func = DCP_XFER_FUNC_SDR,
+			.colorspace = DCP_COLORSPACE_NATIVE,
+			.stride = fb->pitches[0],
+			.width = fb->width,
+			.height = fb->height,
+			.buf_size = fb->height * fb->pitches[0],
+			.surface_id = req->swap.surf_ids[l],
+
+			/* Only used for compressed or multiplanar surfaces */
+			.pix_size = 1,
+			.pel_w = 1,
+			.pel_h = 1,
+			.has_comp = 1,
+			.has_planes = 1,
+		};
+
+	}
+
+	if (!has_surface && !crtc_state->color_mgmt_changed) {
+		if (crtc_state->enable && crtc_state->active &&
+		    !crtc_state->planes_changed) {
+			schedule_work(&dcp->vblank_wq);
+			return;
+		}
+
+		/* Set black background */
+		req->swap.swap_enabled |= IOMFB_SET_BACKGROUND;
+		req->swap.bg_color = 0xFF000000;
+		req->clear = 1;
+	}
+
+	/* These fields should be set together */
+	req->swap.swap_completed = req->swap.swap_enabled;
+
+	/* update brightness if changed */
+	if (dcp_has_panel(dcp) && dcp->brightness.update) {
+		req->swap.bl_unk = 1;
+		req->swap.bl_value = dcp->brightness.dac;
+		req->swap.bl_power = 0x40;
+		dcp->brightness.update = false;
+	}
+
+	if (crtc_state->color_mgmt_changed && crtc_state->ctm) {
+		struct iomfb_set_matrix_req mat;
+		struct drm_color_ctm *ctm = (struct drm_color_ctm *)crtc_state->ctm->data;
+
+		mat.unk_u32 = 9;
+		mat.r[0] = ctm->matrix[0];
+		mat.r[1] = ctm->matrix[1];
+		mat.r[2] = ctm->matrix[2];
+		mat.g[0] = ctm->matrix[3];
+		mat.g[1] = ctm->matrix[4];
+		mat.g[2] = ctm->matrix[5];
+		mat.b[0] = ctm->matrix[6];
+		mat.b[1] = ctm->matrix[7];
+		mat.b[2] = ctm->matrix[8];
+
+		iomfb_set_matrix(dcp, false, &mat, do_swap, NULL);
+	} else
+		do_swap(dcp, NULL, NULL);
+}
+
+static void res_is_main_display(struct apple_dcp *dcp, void *out, void *cookie)
+{
+	struct apple_connector *connector;
+	int result = *(int *)out;
+	dev_info(dcp->dev, "DCP is_main_display: %d\n", result);
+
+	dcp->main_display = result != 0;
+
+	connector = dcp->connector;
+	if (connector) {
+		connector->connected = dcp->nr_modes > 0;
+		schedule_work(&connector->hotplug_wq);
+	}
+
+	dcp->active = true;
+	complete(&dcp->start_done);
+}
+
+static void init_3(struct apple_dcp *dcp, void *out, void *cookie)
+{
+	dcp_is_main_display(dcp, false, res_is_main_display, NULL);
+}
+
+static void init_2(struct apple_dcp *dcp, void *out, void *cookie)
+{
+	dcp_first_client_open(dcp, false, init_3, NULL);
+}
+
+static void init_1(struct apple_dcp *dcp, void *out, void *cookie)
+{
+	u32 val = 0;
+	dcp_enable_disable_video_power_savings(dcp, false, &val, init_2, NULL);
+}
+
+static void dcp_started(struct apple_dcp *dcp, void *data, void *cookie)
+{
+	struct iomfb_get_color_remap_mode_req color_remap =
+		(struct iomfb_get_color_remap_mode_req){
+			.mode = 6,
+		};
+
+	dev_info(dcp->dev, "DCP booted\n");
+
+	iomfb_get_color_remap_mode(dcp, false, &color_remap, init_1, cookie);
+}
+
+void DCP_FW_NAME(iomfb_shutdown)(struct apple_dcp *dcp)
+{
+	struct dcp_set_power_state_req req = {
+		/* defaults are ok */
+	};
+
+	dcp_set_power_state(dcp, false, &req, NULL, NULL);
+}
diff --git a/drivers/gpu/drm/apple/iomfb_template.h b/drivers/gpu/drm/apple/iomfb_template.h
new file mode 100644
index 00000000000000..f446a4d8f38b90
--- /dev/null
+++ b/drivers/gpu/drm/apple/iomfb_template.h
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io> */
+
+/*
+ * This file is intended to be included multiple times with IOMFB_VER
+ * defined to declare DCP firmware version dependent structs.
+ */
+
+#ifdef DCP_FW_VER
+
+#include <drm/drm_crtc.h>
+
+#include <linux/types.h>
+
+#include "iomfb.h"
+#include "version_utils.h"
+
+struct DCP_FW_NAME(dcp_swap) {
+	u64 ts1;
+	u64 ts2;
+	u64 unk_10[6];
+	u64 flags1;
+	u64 flags2;
+
+	u32 swap_id;
+
+	u32 surf_ids[SWAP_SURFACES];
+	struct dcp_rect src_rect[SWAP_SURFACES];
+	u32 surf_flags[SWAP_SURFACES];
+	u32 surf_unk[SWAP_SURFACES];
+	struct dcp_rect dst_rect[SWAP_SURFACES];
+	u32 swap_enabled;
+	u32 swap_completed;
+
+	u32 bg_color;
+	u8 unk_110[0x1b8];
+	u32 unk_2c8;
+	u8 unk_2cc[0x14];
+	u32 unk_2e0;
+#if DCP_FW_VER < DCP_FW_VERSION(13, 2, 0)
+	u16 unk_2e2;
+#else
+	u8 unk_2e2[3];
+#endif
+	u64 bl_unk;
+	u32 bl_value; // min value is 0x10000000
+	u8  bl_power; // constant 0x40 for on
+	u8 unk_2f3[0x2d];
+#if DCP_FW_VER >= DCP_FW_VERSION(13, 2, 0)
+	u8 unk_320[0x13f];
+	u64 unk_1;
+#endif
+} __packed;
+
+/* Information describing a surface */
+struct DCP_FW_NAME(dcp_surface) {
+	u8 is_tiled;
+	u8 is_tearing_allowed;
+	u8 is_premultiplied;
+	u32 plane_cnt;
+	u32 plane_cnt2;
+	u32 format; /* DCP fourcc */
+	u32 ycbcr_matrix;
+	u8 xfer_func;
+	u8 colorspace;
+	u32 stride;
+	u16 pix_size;
+	u8 pel_w;
+	u8 pel_h;
+	u32 offset;
+	u32 width;
+	u32 height;
+	u32 buf_size;
+	u64 protection_opts;
+	u32 surface_id;
+	struct dcp_component_types comp_types[MAX_PLANES];
+	u64 has_comp;
+	struct dcp_plane_info planes[MAX_PLANES];
+	u64 has_planes;
+	u32 compression_info[MAX_PLANES][13];
+	u64 has_compr_info;
+	u32 unk_num;
+	u32 unk_denom;
+#if DCP_FW_VER < DCP_FW_VERSION(13, 2, 0)
+	u8 padding[7];
+#else
+	u8 padding[47];
+#endif
+} __packed;
+
+/* Prototypes */
+
+struct DCP_FW_NAME(dcp_swap_submit_req) {
+	struct DCP_FW_NAME(dcp_swap) swap;
+	struct DCP_FW_NAME(dcp_surface) surf[SWAP_SURFACES];
+	u64 surf_iova[SWAP_SURFACES];
+#if DCP_FW_VER >= DCP_FW_VERSION(13, 2, 0)
+	u64 unk_u64_a[SWAP_SURFACES];
+	struct DCP_FW_NAME(dcp_surface) surf2[5];
+	u64 surf2_iova[5];
+#endif
+	u8 unkbool;
+	u64 unkdouble;
+#if DCP_FW_VER >= DCP_FW_VERSION(13, 2, 0)
+	u64 unkU64;
+	u8 unkbool2;
+#endif
+	u32 clear; // or maybe switch to default fb?
+#if DCP_FW_VER >= DCP_FW_VERSION(13, 2, 0)
+	u32 unkU32Ptr;
+#endif
+	u8 swap_null;
+	u8 surf_null[SWAP_SURFACES];
+#if DCP_FW_VER >= DCP_FW_VERSION(13, 2, 0)
+	u8 surf2_null[5];
+#endif
+	u8 unkoutbool_null;
+#if DCP_FW_VER >= DCP_FW_VERSION(13, 2, 0)
+	u8 unkU32Ptr_null;
+	u8 unkU32out_null;
+#endif
+	u8 padding[1];
+} __packed;
+
+struct DCP_FW_NAME(dcp_swap_submit_resp) {
+	u8 unkoutbool;
+#if DCP_FW_VER >= DCP_FW_VERSION(13, 2, 0)
+	u32 unkU32out;
+#endif
+	u32 ret;
+	u8 padding[3];
+} __packed;
+
+struct DCP_FW_NAME(dc_swap_complete_resp) {
+	u32 swap_id;
+	u8 unkbool;
+	u64 swap_data;
+#if DCP_FW_VER < DCP_FW_VERSION(13, 2, 0)
+	u8 swap_info[0x6c4];
+#else
+	u8 swap_info[0x6c5];
+#endif
+	u32 unkint;
+	u8 swap_info_null;
+} __packed;
+
+struct DCP_FW_NAME(dcp_map_reg_req) {
+	char obj[4];
+	u32 index;
+	u32 flags;
+#if DCP_FW_VER >= DCP_FW_VERSION(13, 2, 0)
+	u8 unk_u64_null;
+#endif
+	u8 addr_null;
+	u8 length_null;
+#if DCP_FW_VER >= DCP_FW_VERSION(13, 2, 0)
+	u8 padding[1];
+#else
+	u8 padding[2];
+#endif
+} __packed;
+
+struct DCP_FW_NAME(dcp_map_reg_resp) {
+#if DCP_FW_VER >= DCP_FW_VERSION(13, 2, 0)
+	u64 dva;
+#endif
+	u64 addr;
+	u64 length;
+	u32 ret;
+} __packed;
+
+
+struct apple_dcp;
+
+int DCP_FW_NAME(iomfb_modeset)(struct apple_dcp *dcp,
+			       struct drm_crtc_state *crtc_state);
+void DCP_FW_NAME(iomfb_flush)(struct apple_dcp *dcp, struct drm_crtc *crtc, struct drm_atomic_state *state);
+void DCP_FW_NAME(iomfb_poweron)(struct apple_dcp *dcp);
+void DCP_FW_NAME(iomfb_poweroff)(struct apple_dcp *dcp);
+void DCP_FW_NAME(iomfb_sleep)(struct apple_dcp *dcp);
+void DCP_FW_NAME(iomfb_start)(struct apple_dcp *dcp);
+void DCP_FW_NAME(iomfb_shutdown)(struct apple_dcp *dcp);
+
+#endif
diff --git a/drivers/gpu/drm/apple/iomfb_v12_3.c b/drivers/gpu/drm/apple/iomfb_v12_3.c
new file mode 100644
index 00000000000000..0fe08c42d64659
--- /dev/null
+++ b/drivers/gpu/drm/apple/iomfb_v12_3.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright The Asahi Linux Contributors */
+
+#include "iomfb_v12_3.h"
+#include "iomfb_v13_3.h"
+#include "version_utils.h"
+
+static const struct dcp_method_entry dcp_methods[dcpep_num_methods] = {
+	IOMFB_METHOD("A000", dcpep_late_init_signal),
+	IOMFB_METHOD("A029", dcpep_setup_video_limits),
+	IOMFB_METHOD("A131", iomfbep_a131_pmu_service_matched),
+	IOMFB_METHOD("A132", iomfbep_a132_backlight_service_matched),
+	IOMFB_METHOD("A357", dcpep_set_create_dfb),
+	IOMFB_METHOD("A358", iomfbep_a358_vi_set_temperature_hint),
+	IOMFB_METHOD("A401", dcpep_start_signal),
+	IOMFB_METHOD("A407", dcpep_swap_start),
+	IOMFB_METHOD("A408", dcpep_swap_submit),
+	IOMFB_METHOD("A410", dcpep_set_display_device),
+	IOMFB_METHOD("A411", dcpep_is_main_display),
+	IOMFB_METHOD("A412", dcpep_set_digital_out_mode),
+	IOMFB_METHOD("A422", iomfbep_set_matrix),
+	IOMFB_METHOD("A426", iomfbep_get_color_remap_mode),
+	IOMFB_METHOD("A439", dcpep_set_parameter_dcp),
+	IOMFB_METHOD("A443", dcpep_create_default_fb),
+	IOMFB_METHOD("A447", dcpep_enable_disable_video_power_savings),
+	IOMFB_METHOD("A454", dcpep_first_client_open),
+	IOMFB_METHOD("A455", iomfbep_last_client_close),
+	IOMFB_METHOD("A460", dcpep_set_display_refresh_properties),
+	IOMFB_METHOD("A463", dcpep_flush_supports_power),
+	IOMFB_METHOD("A464", iomfbep_abort_swaps_dcp),
+	IOMFB_METHOD("A468", dcpep_set_power_state),
+};
+
+#define DCP_FW v12_3
+#define DCP_FW_VER DCP_FW_VERSION(12, 3, 0)
+
+#include "iomfb_template.c"
+
+static const iomfb_cb_handler cb_handlers[IOMFB_MAX_CB] = {
+	[0] = trampoline_true, /* did_boot_signal */
+	[1] = trampoline_true, /* did_power_on_signal */
+	[2] = trampoline_nop, /* will_power_off_signal */
+	[3] = trampoline_rt_bandwidth,
+	[100] = iomfbep_cb_match_pmu_service,
+	[101] = trampoline_zero, /* get_display_default_stride */
+	[102] = trampoline_nop, /* set_number_property */
+	[103] = trampoline_nop, /* set_boolean_property */
+	[106] = trampoline_nop, /* remove_property */
+	[107] = trampoline_true, /* create_provider_service */
+	[108] = trampoline_true, /* create_product_service */
+	[109] = trampoline_true, /* create_pmu_service */
+	[110] = trampoline_true, /* create_iomfb_service */
+	[111] = trampoline_create_backlight_service,
+	[116] = dcpep_cb_boot_1,
+	[117] = trampoline_false, /* is_dark_boot */
+	[118] = trampoline_false, /* is_dark_boot / is_waking_from_hibernate*/
+	[120] = trampoline_read_edt_data,
+	[122] = trampoline_prop_start,
+	[123] = trampoline_prop_chunk,
+	[124] = trampoline_prop_end,
+	[201] = trampoline_map_piodma,
+	[202] = trampoline_unmap_piodma,
+	[206] = iomfbep_cb_match_pmu_service_2,
+	[207] = iomfbep_cb_match_backlight_service,
+	[208] = trampoline_get_time,
+	[211] = trampoline_nop, /* update_backlight_factor_prop */
+	[300] = trampoline_pr_publish,
+	[401] = trampoline_get_uint_prop,
+	[404] = trampoline_nop, /* sr_set_uint_prop */
+	[406] = trampoline_set_fx_prop,
+	[408] = trampoline_get_frequency,
+	[411] = trampoline_map_reg,
+	[413] = trampoline_true, /* sr_set_property_dict */
+	[414] = trampoline_sr_set_property_int,
+	[415] = trampoline_true, /* sr_set_property_bool */
+	[451] = trampoline_allocate_buffer,
+	[452] = trampoline_map_physical,
+	[456] = trampoline_release_mem_desc,
+	[552] = trampoline_true, /* set_property_dict_0 */
+	[561] = trampoline_true, /* set_property_dict */
+	[563] = trampoline_true, /* set_property_int */
+	[565] = trampoline_true, /* set_property_bool */
+	[567] = trampoline_true, /* set_property_str */
+	[574] = trampoline_zero, /* power_up_dart */
+	[576] = trampoline_hotplug,
+	[577] = trampoline_nop, /* powerstate_notify */
+	[582] = trampoline_true, /* create_default_fb_surface */
+	[584] = trampoline_nop, /* IOMobileFramebufferAP::clear_default_surface */
+	[588] = trampoline_nop, /* resize_default_fb_surface_gated */
+	[589] = trampoline_swap_complete,
+	[591] = trampoline_swap_complete_intent_gated,
+	[592] = trampoline_abort_swap_ap_gated,
+	[593] = trampoline_enable_backlight_message_ap_gated,
+	[594] = trampoline_nop, /* IOMobileFramebufferAP::setSystemConsoleMode */
+	[596] = trampoline_false, /* IOMobileFramebufferAP::isDFBAllocated */
+	[597] = trampoline_false, /* IOMobileFramebufferAP::preserveContents */
+	[598] = trampoline_nop, /* find_swap_function_gated */
+};
+
+void DCP_FW_NAME(iomfb_start)(struct apple_dcp *dcp)
+{
+	dcp->cb_handlers = cb_handlers;
+
+	dcp_start_signal(dcp, false, dcp_started, NULL);
+}
+
+#undef DCP_FW_VER
+#undef DCP_FW
diff --git a/drivers/gpu/drm/apple/iomfb_v12_3.h b/drivers/gpu/drm/apple/iomfb_v12_3.h
new file mode 100644
index 00000000000000..7359685d981fe5
--- /dev/null
+++ b/drivers/gpu/drm/apple/iomfb_v12_3.h
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright The Asahi Linux Contributors */
+
+#ifndef __APPLE_IOMFB_V12_3_H__
+#define __APPLE_IOMFB_V12_3_H__
+
+#include "version_utils.h"
+
+#define DCP_FW v12_3
+#define DCP_FW_VER DCP_FW_VERSION(12, 3, 0)
+
+#include "iomfb_template.h"
+
+#undef DCP_FW_VER
+#undef DCP_FW
+
+#endif /* __APPLE_IOMFB_V12_3_H__ */
diff --git a/drivers/gpu/drm/apple/iomfb_v13_3.c b/drivers/gpu/drm/apple/iomfb_v13_3.c
new file mode 100644
index 00000000000000..0ac869d24eb01b
--- /dev/null
+++ b/drivers/gpu/drm/apple/iomfb_v13_3.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright The Asahi Linux Contributors */
+
+#include "iomfb_v12_3.h"
+#include "iomfb_v13_3.h"
+#include "version_utils.h"
+
+static const struct dcp_method_entry dcp_methods[dcpep_num_methods] = {
+	IOMFB_METHOD("A000", dcpep_late_init_signal),
+	IOMFB_METHOD("A029", dcpep_setup_video_limits),
+	IOMFB_METHOD("A131", iomfbep_a131_pmu_service_matched),
+	IOMFB_METHOD("A132", iomfbep_a132_backlight_service_matched),
+	IOMFB_METHOD("A373", dcpep_set_create_dfb),
+	IOMFB_METHOD("A374", iomfbep_a358_vi_set_temperature_hint),
+	IOMFB_METHOD("A401", dcpep_start_signal),
+	IOMFB_METHOD("A407", dcpep_swap_start),
+	IOMFB_METHOD("A408", dcpep_swap_submit),
+	IOMFB_METHOD("A410", dcpep_set_display_device),
+	IOMFB_METHOD("A411", dcpep_is_main_display),
+	IOMFB_METHOD("A412", dcpep_set_digital_out_mode),
+	IOMFB_METHOD("A422", iomfbep_set_matrix),
+	IOMFB_METHOD("A426", iomfbep_get_color_remap_mode),
+	IOMFB_METHOD("A441", dcpep_set_parameter_dcp),
+	IOMFB_METHOD("A445", dcpep_create_default_fb),
+	IOMFB_METHOD("A449", dcpep_enable_disable_video_power_savings),
+	IOMFB_METHOD("A456", dcpep_first_client_open),
+	IOMFB_METHOD("A457", iomfbep_last_client_close),
+	IOMFB_METHOD("A463", dcpep_set_display_refresh_properties),
+	IOMFB_METHOD("A466", dcpep_flush_supports_power),
+	IOMFB_METHOD("A467", iomfbep_abort_swaps_dcp),
+	IOMFB_METHOD("A472", dcpep_set_power_state),
+};
+
+#define DCP_FW v13_3
+#define DCP_FW_VER DCP_FW_VERSION(13, 3, 0)
+
+#include "iomfb_template.c"
+
+static const iomfb_cb_handler cb_handlers[IOMFB_MAX_CB] = {
+	[0] = trampoline_true, /* did_boot_signal */
+	[1] = trampoline_true, /* did_power_on_signal */
+	[2] = trampoline_nop, /* will_power_off_signal */
+	[3] = trampoline_rt_bandwidth,
+	[6] = trampoline_set_frame_sync_props,
+	[100] = iomfbep_cb_match_pmu_service,
+	[101] = trampoline_zero, /* get_display_default_stride */
+	[102] = trampoline_nop, /* set_number_property */
+	[103] = trampoline_nop, /* trigger_user_cal_loader */
+	[104] = trampoline_nop, /* set_boolean_property */
+	[107] = trampoline_nop, /* remove_property */
+	[108] = trampoline_true, /* create_provider_service */
+	[109] = trampoline_true, /* create_product_service */
+	[110] = trampoline_true, /* create_pmu_service */
+	[111] = trampoline_true, /* create_iomfb_service */
+	[112] = trampoline_create_backlight_service,
+	[113] = trampoline_true, /* create_nvram_service? */
+	[114] = trampoline_get_tiling_state,
+	[115] = trampoline_false, /* set_tiling_state */
+	[120] = dcpep_cb_boot_1,
+	[121] = trampoline_false, /* is_dark_boot */
+	[122] = trampoline_false, /* is_dark_boot / is_waking_from_hibernate*/
+	[124] = trampoline_read_edt_data,
+	[126] = trampoline_prop_start,
+	[127] = trampoline_prop_chunk,
+	[128] = trampoline_prop_end,
+	[129] = trampoline_allocate_bandwidth,
+	[201] = trampoline_map_piodma,
+	[202] = trampoline_unmap_piodma,
+	[206] = iomfbep_cb_match_pmu_service_2,
+	[207] = iomfbep_cb_match_backlight_service,
+	[208] = trampoline_nop, /* update_backlight_factor_prop */
+	[209] = trampoline_get_time,
+	[300] = trampoline_pr_publish,
+	[401] = trampoline_get_uint_prop,
+	[404] = trampoline_nop, /* sr_set_uint_prop */
+	[406] = trampoline_set_fx_prop,
+	[408] = trampoline_get_frequency,
+	[411] = trampoline_map_reg,
+	[413] = trampoline_true, /* sr_set_property_dict */
+	[414] = trampoline_sr_set_property_int,
+	[415] = trampoline_true, /* sr_set_property_bool */
+	[451] = trampoline_allocate_buffer,
+	[452] = trampoline_map_physical,
+	[454] = trampoline_release_mem_desc,
+	[552] = trampoline_true, /* set_property_dict_0 */
+	[561] = trampoline_true, /* set_property_dict */
+	[563] = trampoline_true, /* set_property_int */
+	[565] = trampoline_true, /* set_property_bool */
+	[567] = trampoline_true, /* set_property_str */
+	[574] = trampoline_zero, /* power_up_dart */
+	[576] = trampoline_hotplug,
+	[577] = trampoline_nop, /* powerstate_notify */
+	[582] = trampoline_true, /* create_default_fb_surface */
+	[584] = trampoline_nop, /* IOMobileFramebufferAP::clear_default_surface */
+	[588] = trampoline_nop, /* resize_default_fb_surface_gated */
+	[589] = trampoline_swap_complete,
+	[591] = trampoline_swap_complete_intent_gated,
+	[592] = trampoline_abort_swap_ap_gated,
+	[593] = trampoline_enable_backlight_message_ap_gated,
+	[594] = trampoline_nop, /* IOMobileFramebufferAP::setSystemConsoleMode */
+	[596] = trampoline_false, /* IOMobileFramebufferAP::isDFBAllocated */
+	[597] = trampoline_false, /* IOMobileFramebufferAP::preserveContents */
+	[598] = trampoline_nop, /* find_swap_function_gated */
+};
+void DCP_FW_NAME(iomfb_start)(struct apple_dcp *dcp)
+{
+	dcp->cb_handlers = cb_handlers;
+
+	dcp_start_signal(dcp, false, dcp_started, NULL);
+}
+
+#undef DCP_FW_VER
+#undef DCP_FW
diff --git a/drivers/gpu/drm/apple/iomfb_v13_3.h b/drivers/gpu/drm/apple/iomfb_v13_3.h
new file mode 100644
index 00000000000000..bbb3156b40f893
--- /dev/null
+++ b/drivers/gpu/drm/apple/iomfb_v13_3.h
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright The Asahi Linux Contributors */
+
+#ifndef __APPLE_IOMFB_V13_3_H__
+#define __APPLE_IOMFB_V13_3_H__
+
+#include "version_utils.h"
+
+#define DCP_FW v13_3
+#define DCP_FW_VER DCP_FW_VERSION(13, 3, 0)
+
+#include "iomfb_template.h"
+
+#undef DCP_FW_VER
+#undef DCP_FW
+
+#endif /* __APPLE_IOMFB_V13_3_H__ */
diff --git a/drivers/gpu/drm/apple/parser.c b/drivers/gpu/drm/apple/parser.c
new file mode 100644
index 00000000000000..700a31883eac8e
--- /dev/null
+++ b/drivers/gpu/drm/apple/parser.c
@@ -0,0 +1,1042 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io> */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/math.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+
+#if IS_ENABLED(CONFIG_DRM_APPLE_AUDIO)
+#include <sound/pcm.h> // for sound format masks
+#endif
+
+#include "parser.h"
+#include "trace.h"
+
+#define DCP_PARSE_HEADER 0xd3
+
+enum dcp_parse_type {
+	DCP_TYPE_DICTIONARY = 1,
+	DCP_TYPE_ARRAY = 2,
+	DCP_TYPE_INT64 = 4,
+	DCP_TYPE_STRING = 9,
+	DCP_TYPE_BLOB = 10,
+	DCP_TYPE_BOOL = 11
+};
+
+struct dcp_parse_tag {
+	unsigned int size : 24;
+	enum dcp_parse_type type : 5;
+	unsigned int padding : 2;
+	bool last : 1;
+} __packed;
+
+static const void *parse_bytes(struct dcp_parse_ctx *ctx, size_t count)
+{
+	const void *ptr = ctx->blob + ctx->pos;
+
+	if (ctx->pos + count > ctx->len)
+		return ERR_PTR(-EINVAL);
+
+	ctx->pos += count;
+	return ptr;
+}
+
+static const u32 *parse_u32(struct dcp_parse_ctx *ctx)
+{
+	return parse_bytes(ctx, sizeof(u32));
+}
+
+static const struct dcp_parse_tag *parse_tag(struct dcp_parse_ctx *ctx)
+{
+	const struct dcp_parse_tag *tag;
+
+	/* Align to 32-bits */
+	ctx->pos = round_up(ctx->pos, 4);
+
+	tag = parse_bytes(ctx, sizeof(struct dcp_parse_tag));
+
+	if (IS_ERR(tag))
+		return tag;
+
+	if (tag->padding)
+		return ERR_PTR(-EINVAL);
+
+	return tag;
+}
+
+static const struct dcp_parse_tag *parse_tag_of_type(struct dcp_parse_ctx *ctx,
+					       enum dcp_parse_type type)
+{
+	const struct dcp_parse_tag *tag = parse_tag(ctx);
+
+	if (IS_ERR(tag))
+		return tag;
+
+	if (tag->type != type)
+		return ERR_PTR(-EINVAL);
+
+	return tag;
+}
+
+static int skip(struct dcp_parse_ctx *handle)
+{
+	const struct dcp_parse_tag *tag = parse_tag(handle);
+	int ret = 0;
+	int i;
+
+	if (IS_ERR(tag))
+		return PTR_ERR(tag);
+
+	switch (tag->type) {
+	case DCP_TYPE_DICTIONARY:
+		for (i = 0; i < tag->size; ++i) {
+			ret |= skip(handle); /* key */
+			ret |= skip(handle); /* value */
+		}
+
+		return ret;
+
+	case DCP_TYPE_ARRAY:
+		for (i = 0; i < tag->size; ++i)
+			ret |= skip(handle);
+
+		return ret;
+
+	case DCP_TYPE_INT64:
+		handle->pos += sizeof(s64);
+		return 0;
+
+	case DCP_TYPE_STRING:
+	case DCP_TYPE_BLOB:
+		handle->pos += tag->size;
+		return 0;
+
+	case DCP_TYPE_BOOL:
+		return 0;
+
+	default:
+		return -EINVAL;
+	}
+}
+
+#if IS_ENABLED(CONFIG_DRM_APPLE_AUDIO)
+static int skip_pair(struct dcp_parse_ctx *handle)
+{
+	int ret;
+
+	ret = skip(handle);
+	if (ret)
+		return ret;
+
+	return skip(handle);
+}
+
+static bool consume_string(struct dcp_parse_ctx *ctx, const char *specimen)
+{
+	const struct dcp_parse_tag *tag;
+	const char *key;
+	ctx->pos = round_up(ctx->pos, 4);
+
+	if (ctx->pos + sizeof(*tag) + strlen(specimen) - 1 > ctx->len)
+		return false;
+	tag = ctx->blob + ctx->pos;
+	key = ctx->blob + ctx->pos + sizeof(*tag);
+	if (tag->padding)
+		return false;
+
+	if (tag->type != DCP_TYPE_STRING ||
+	    tag->size != strlen(specimen) ||
+	    strncmp(key, specimen, tag->size))
+		return false;
+
+	skip(ctx);
+	return true;
+}
+#endif
+
+/* Caller must free the result */
+static char *parse_string(struct dcp_parse_ctx *handle)
+{
+	const struct dcp_parse_tag *tag = parse_tag_of_type(handle, DCP_TYPE_STRING);
+	const char *in;
+	char *out;
+
+	if (IS_ERR(tag))
+		return (void *)tag;
+
+	in = parse_bytes(handle, tag->size);
+	if (IS_ERR(in))
+		return (void *)in;
+
+	out = kmalloc(tag->size + 1, GFP_KERNEL);
+
+	memcpy(out, in, tag->size);
+	out[tag->size] = '\0';
+	return out;
+}
+
+static int parse_int(struct dcp_parse_ctx *handle, s64 *value)
+{
+	const void *tag = parse_tag_of_type(handle, DCP_TYPE_INT64);
+	const s64 *in;
+
+	if (IS_ERR(tag))
+		return PTR_ERR(tag);
+
+	in = parse_bytes(handle, sizeof(s64));
+
+	if (IS_ERR(in))
+		return PTR_ERR(in);
+
+	memcpy(value, in, sizeof(*value));
+	return 0;
+}
+
+static int parse_bool(struct dcp_parse_ctx *handle, bool *b)
+{
+	const struct dcp_parse_tag *tag = parse_tag_of_type(handle, DCP_TYPE_BOOL);
+
+	if (IS_ERR(tag))
+		return PTR_ERR(tag);
+
+	*b = !!tag->size;
+	return 0;
+}
+
+#if IS_ENABLED(CONFIG_DRM_APPLE_AUDIO)
+static int parse_blob(struct dcp_parse_ctx *handle, size_t size, u8 const **blob)
+{
+	const struct dcp_parse_tag *tag = parse_tag_of_type(handle, DCP_TYPE_BLOB);
+	const u8 *out;
+
+	if (IS_ERR(tag))
+		return PTR_ERR(tag);
+
+	if (tag->size < size)
+		return -EINVAL;
+
+	out = parse_bytes(handle, tag->size);
+
+	if (IS_ERR(out))
+		return PTR_ERR(out);
+
+	*blob = out;
+	return 0;
+}
+#endif
+
+struct iterator {
+	struct dcp_parse_ctx *handle;
+	u32 idx, len;
+};
+
+static int iterator_begin(struct dcp_parse_ctx *handle, struct iterator *it,
+			  bool dict)
+{
+	const struct dcp_parse_tag *tag;
+	enum dcp_parse_type type = dict ? DCP_TYPE_DICTIONARY : DCP_TYPE_ARRAY;
+
+	*it = (struct iterator) {
+		.handle = handle,
+		.idx = 0
+	};
+
+	tag = parse_tag_of_type(it->handle, type);
+	if (IS_ERR(tag))
+		return PTR_ERR(tag);
+
+	it->len = tag->size;
+	return 0;
+}
+
+#define dcp_parse_foreach_in_array(handle, it)                                 \
+	for (iterator_begin(handle, &it, false); it.idx < it.len; ++it.idx)
+#define dcp_parse_foreach_in_dict(handle, it)                                  \
+	for (iterator_begin(handle, &it, true); it.idx < it.len; ++it.idx)
+
+int parse(const void *blob, size_t size, struct dcp_parse_ctx *ctx)
+{
+	const u32 *header;
+
+	*ctx = (struct dcp_parse_ctx) {
+		.blob = blob,
+		.len = size,
+		.pos = 0,
+	};
+
+	header = parse_u32(ctx);
+	if (IS_ERR(header))
+		return PTR_ERR(header);
+
+	if (*header != DCP_PARSE_HEADER)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int parse_dimension(struct dcp_parse_ctx *handle, struct dimension *dim)
+{
+	struct iterator it;
+	int ret = 0;
+
+	dcp_parse_foreach_in_dict(handle, it) {
+		char *key = parse_string(it.handle);
+
+		if (IS_ERR(key))
+			ret = PTR_ERR(key);
+		else if (!strcmp(key, "Active"))
+			ret = parse_int(it.handle, &dim->active);
+		else if (!strcmp(key, "Total"))
+			ret = parse_int(it.handle, &dim->total);
+		else if (!strcmp(key, "FrontPorch"))
+			ret = parse_int(it.handle, &dim->front_porch);
+		else if (!strcmp(key, "SyncWidth"))
+			ret = parse_int(it.handle, &dim->sync_width);
+		else if (!strcmp(key, "PreciseSyncRate"))
+			ret = parse_int(it.handle, &dim->precise_sync_rate);
+		else
+			skip(it.handle);
+
+		if (!IS_ERR_OR_NULL(key))
+			kfree(key);
+
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+struct color_mode {
+	s64 colorimetry;
+	s64 depth;
+	s64 dynamic_range;
+	s64 eotf;
+	s64 id;
+	s64 pixel_encoding;
+	s64 score;
+};
+
+static int fill_color_mode(struct dcp_color_mode *color,
+			   struct color_mode *cmode)
+{
+	if (color->score >= cmode->score)
+		return 0;
+
+	if (cmode->colorimetry < 0 || cmode->colorimetry >= DCP_COLORIMETRY_COUNT)
+		return -EINVAL;
+	if (cmode->depth < 8 || cmode->depth > 12)
+		return -EINVAL;
+	if (cmode->dynamic_range < 0 || cmode->dynamic_range >= DCP_COLOR_YCBCR_RANGE_COUNT)
+		return -EINVAL;
+	if (cmode->eotf < 0 || cmode->eotf >= DCP_EOTF_COUNT)
+		return -EINVAL;
+	if (cmode->pixel_encoding < 0 || cmode->pixel_encoding >= DCP_COLOR_FORMAT_COUNT)
+		return -EINVAL;
+
+	color->score = cmode->score;
+	color->id = cmode->id;
+	color->eotf = cmode->eotf;
+	color->format = cmode->pixel_encoding;
+	color->colorimetry = cmode->colorimetry;
+	color->range = cmode->dynamic_range;
+	color->depth = cmode->depth;
+
+	return 0;
+}
+
+static int parse_color_modes(struct dcp_parse_ctx *handle,
+			     struct dcp_display_mode *out)
+{
+	struct iterator outer_it;
+	int ret = 0;
+	out->sdr_444.score = -1;
+	out->sdr_rgb.score = -1;
+	out->sdr.score = -1;
+	out->best.score = -1;
+
+	dcp_parse_foreach_in_array(handle, outer_it) {
+		struct iterator it;
+		bool is_virtual = true;
+		struct color_mode cmode;
+
+		dcp_parse_foreach_in_dict(handle, it) {
+			char *key = parse_string(it.handle);
+
+			if (IS_ERR(key))
+				ret = PTR_ERR(key);
+			else if (!strcmp(key, "Colorimetry"))
+				ret = parse_int(it.handle, &cmode.colorimetry);
+			else if (!strcmp(key, "Depth"))
+				ret = parse_int(it.handle, &cmode.depth);
+			else if (!strcmp(key, "DynamicRange"))
+				ret = parse_int(it.handle, &cmode.dynamic_range);
+			else if (!strcmp(key, "EOTF"))
+				ret = parse_int(it.handle, &cmode.eotf);
+			else if (!strcmp(key, "ID"))
+				ret = parse_int(it.handle, &cmode.id);
+			else if (!strcmp(key, "IsVirtual"))
+				ret = parse_bool(it.handle, &is_virtual);
+			else if (!strcmp(key, "PixelEncoding"))
+				ret = parse_int(it.handle, &cmode.pixel_encoding);
+			else if (!strcmp(key, "Score"))
+				ret = parse_int(it.handle, &cmode.score);
+			else
+				skip(it.handle);
+
+			if (!IS_ERR_OR_NULL(key))
+				kfree(key);
+
+			if (ret)
+				return ret;
+		}
+
+		/* Skip virtual or partial entries */
+		if (is_virtual || cmode.score < 0 || cmode.id < 0)
+			continue;
+
+		trace_iomfb_color_mode(handle->dcp, cmode.id, cmode.score,
+				       cmode.depth, cmode.colorimetry,
+				       cmode.eotf, cmode.dynamic_range,
+				       cmode.pixel_encoding);
+
+		if (cmode.eotf == DCP_EOTF_SDR_GAMMA) {
+			if (cmode.pixel_encoding == DCP_COLOR_FORMAT_RGB &&
+				cmode.depth <= 10)
+				fill_color_mode(&out->sdr_rgb, &cmode);
+			else if (cmode.pixel_encoding == DCP_COLOR_FORMAT_YCBCR444 &&
+				cmode.depth <= 10)
+				fill_color_mode(&out->sdr_444, &cmode);
+			fill_color_mode(&out->sdr, &cmode);
+		}
+		fill_color_mode(&out->best, &cmode);
+	}
+
+	return 0;
+}
+
+/*
+ * Calculate the pixel clock for a mode given the 16:16 fixed-point refresh
+ * rate. The pixel clock is the refresh rate times the pixel count. DRM
+ * specifies the clock in kHz. The intermediate result may overflow a u32, so
+ * use a u64 where required.
+ */
+static u32 calculate_clock(struct dimension *horiz, struct dimension *vert)
+{
+	u32 pixels = horiz->total * vert->total;
+	u64 clock = mul_u32_u32(pixels, vert->precise_sync_rate);
+
+	return DIV_ROUND_CLOSEST_ULL(clock >> 16, 1000);
+}
+
+static int parse_mode(struct dcp_parse_ctx *handle,
+		      struct dcp_display_mode *out, s64 *score, int width_mm,
+		      int height_mm, unsigned notch_height)
+{
+	int ret = 0;
+	struct iterator it;
+	struct dimension horiz, vert;
+	s64 id = -1;
+	s64 best_color_mode = -1;
+	bool is_virtual = false;
+	struct drm_display_mode *mode = &out->mode;
+
+	dcp_parse_foreach_in_dict(handle, it) {
+		char *key = parse_string(it.handle);
+
+		if (IS_ERR(key))
+			ret = PTR_ERR(key);
+		else if (is_virtual)
+			skip(it.handle);
+		else if (!strcmp(key, "HorizontalAttributes"))
+			ret = parse_dimension(it.handle, &horiz);
+		else if (!strcmp(key, "VerticalAttributes"))
+			ret = parse_dimension(it.handle, &vert);
+		else if (!strcmp(key, "ColorModes"))
+			ret = parse_color_modes(it.handle, out);
+		else if (!strcmp(key, "ID"))
+			ret = parse_int(it.handle, &id);
+		else if (!strcmp(key, "IsVirtual"))
+			ret = parse_bool(it.handle, &is_virtual);
+		else if (!strcmp(key, "Score"))
+			ret = parse_int(it.handle, score);
+		else
+			skip(it.handle);
+
+		if (!IS_ERR_OR_NULL(key))
+			kfree(key);
+
+		if (ret) {
+			trace_iomfb_parse_mode_fail(id, &horiz, &vert, best_color_mode, is_virtual, *score);
+			return ret;
+		}
+	}
+	if (out->sdr_rgb.score >= 0)
+		best_color_mode = out->sdr_rgb.id;
+	else if (out->sdr_444.score >= 0)
+		best_color_mode = out->sdr_444.id;
+	else if (out->sdr.score >= 0)
+		best_color_mode = out->sdr.id;
+	else if (out->best.score >= 0)
+		best_color_mode = out->best.id;
+
+	trace_iomfb_parse_mode_success(id, &horiz, &vert, best_color_mode,
+				       is_virtual, *score);
+
+	/*
+	 * Reject modes without valid color mode.
+	 */
+	if (best_color_mode < 0)
+		return -EINVAL;
+
+	/*
+	 * We need to skip virtual modes. In some cases, virtual modes are "too
+	 * big" for the monitor and can cause breakage. It is unclear why the
+	 * DCP reports these modes at all. Treat as a recoverable error.
+	 */
+	if (is_virtual)
+		return -EINVAL;
+
+	/*
+	 * HACK:
+	 * Ignore the 120 Hz mode on j314/j316 (identified by resolution).
+	 * DCP limits normal swaps to 60 Hz anyway and the 120 Hz mode might
+	 * cause choppiness with X11.
+	 * Just downscoring it and thus making the 60 Hz mode the preferred mode
+	 * seems not enough for some user space.
+	 */
+	if (vert.precise_sync_rate >> 16 == 120 &&
+	    ((horiz.active == 3024 && vert.active == 1964) ||
+	     (horiz.active == 3456 && vert.active == 2234)))
+		return -EINVAL;
+
+	vert.active -= notch_height;
+	vert.sync_width += notch_height;
+
+	/* From here we must succeed. Start filling out the mode. */
+	*mode = (struct drm_display_mode) {
+		.type = DRM_MODE_TYPE_DRIVER,
+		.clock = calculate_clock(&horiz, &vert),
+
+		.vdisplay = vert.active,
+		.vsync_start = vert.active + vert.front_porch,
+		.vsync_end = vert.active + vert.front_porch + vert.sync_width,
+		.vtotal = vert.total,
+
+		.hdisplay = horiz.active,
+		.hsync_start = horiz.active + horiz.front_porch,
+		.hsync_end = horiz.active + horiz.front_porch +
+			     horiz.sync_width,
+		.htotal = horiz.total,
+
+		.width_mm = width_mm,
+		.height_mm = height_mm,
+	};
+
+	drm_mode_set_name(mode);
+
+	out->timing_mode_id = id;
+	out->color_mode_id = best_color_mode;
+
+	trace_iomfb_timing_mode(handle->dcp, id, *score, horiz.active,
+				vert.active, vert.precise_sync_rate,
+				best_color_mode);
+
+	return 0;
+}
+
+struct dcp_display_mode *enumerate_modes(struct dcp_parse_ctx *handle,
+					 unsigned int *count, int width_mm,
+					 int height_mm, unsigned notch_height)
+{
+	struct iterator it;
+	int ret;
+	struct dcp_display_mode *mode, *modes;
+	struct dcp_display_mode *best_mode = NULL;
+	s64 score, best_score = -1;
+
+	ret = iterator_begin(handle, &it, false);
+
+	if (ret)
+		return ERR_PTR(ret);
+
+	/* Start with a worst case allocation */
+	modes = kmalloc_array(it.len, sizeof(*modes), GFP_KERNEL);
+	*count = 0;
+
+	if (!modes)
+		return ERR_PTR(-ENOMEM);
+
+	for (; it.idx < it.len; ++it.idx) {
+		mode = &modes[*count];
+		ret = parse_mode(it.handle, mode, &score, width_mm, height_mm, notch_height);
+
+		/* Errors for a single mode are recoverable -- just skip it. */
+		if (ret)
+			continue;
+
+		/* Process a successful mode */
+		(*count)++;
+
+		if (score > best_score) {
+			best_score = score;
+			best_mode = mode;
+		}
+	}
+
+	if (best_mode != NULL)
+		best_mode->mode.type |= DRM_MODE_TYPE_PREFERRED;
+
+	return modes;
+}
+
+int parse_display_attributes(struct dcp_parse_ctx *handle, int *width_mm,
+			     int *height_mm)
+{
+	int ret = 0;
+	struct iterator it;
+	s64 width_cm = 0, height_cm = 0;
+
+	dcp_parse_foreach_in_dict(handle, it) {
+		char *key = parse_string(it.handle);
+
+		if (IS_ERR(key))
+			ret = PTR_ERR(key);
+		else if (!strcmp(key, "MaxHorizontalImageSize"))
+			ret = parse_int(it.handle, &width_cm);
+		else if (!strcmp(key, "MaxVerticalImageSize"))
+			ret = parse_int(it.handle, &height_cm);
+		else
+			skip(it.handle);
+
+		if (!IS_ERR_OR_NULL(key))
+			kfree(key);
+
+		if (ret)
+			return ret;
+	}
+
+	/* 1cm = 10mm */
+	*width_mm = 10 * width_cm;
+	*height_mm = 10 * height_cm;
+
+	return 0;
+}
+
+int parse_epic_service_init(struct dcp_parse_ctx *handle, const char **name,
+			    const char **class, s64 *unit)
+{
+	int ret = 0;
+	struct iterator it;
+	bool parsed_unit = false;
+	bool parsed_name = false;
+	bool parsed_class = false;
+
+	*name = ERR_PTR(-ENOENT);
+	*class = ERR_PTR(-ENOENT);
+
+	dcp_parse_foreach_in_dict(handle, it) {
+		char *key = parse_string(it.handle);
+
+		if (IS_ERR(key)) {
+			ret = PTR_ERR(key);
+			break;
+		}
+
+		if (!strcmp(key, "EPICName")) {
+			*name = parse_string(it.handle);
+			if (IS_ERR(*name))
+				ret = PTR_ERR(*name);
+			else
+				parsed_name = true;
+		} else if (!strcmp(key, "EPICProviderClass")) {
+			*class = parse_string(it.handle);
+			if (IS_ERR(*class))
+				ret = PTR_ERR(*class);
+			else
+				parsed_class = true;
+		} else if (!strcmp(key, "EPICUnit")) {
+			ret = parse_int(it.handle, unit);
+			if (!ret)
+				parsed_unit = true;
+		} else {
+			skip(it.handle);
+		}
+
+		kfree(key);
+		if (ret)
+			break;
+	}
+
+	if (!parsed_unit || !parsed_name || !parsed_class)
+		ret = -ENOENT;
+
+	if (ret) {
+		if (!IS_ERR(*name)) {
+			kfree(*name);
+			*name = ERR_PTR(ret);
+		}
+		if (!IS_ERR(*class)) {
+			kfree(*class);
+			*class = ERR_PTR(ret);
+		}
+	}
+
+	return ret;
+}
+
+#if IS_ENABLED(CONFIG_DRM_APPLE_AUDIO)
+static int parse_sample_rate_bit(struct dcp_parse_ctx *handle, unsigned int *ratebit)
+{
+	s64 rate;
+	int ret = parse_int(handle, &rate);
+
+	if (ret)
+		return ret;
+
+	*ratebit = snd_pcm_rate_to_rate_bit(rate);
+	if (*ratebit == SNDRV_PCM_RATE_KNOT) {
+		/*
+		 * The rate wasn't recognized, and unless we supply
+		 * a supplementary constraint, the SNDRV_PCM_RATE_KNOT bit
+		 * will allow any rate. So clear it.
+		 */
+		*ratebit = 0;
+	}
+
+	return 0;
+}
+
+static int parse_sample_fmtbit(struct dcp_parse_ctx *handle, u64 *fmtbit)
+{
+	s64 sample_size;
+	int ret = parse_int(handle, &sample_size);
+
+	if (ret)
+		return ret;
+
+	switch (sample_size) {
+	case 16:
+		*fmtbit = SNDRV_PCM_FMTBIT_S16;
+		break;
+	case 20:
+		*fmtbit = SNDRV_PCM_FMTBIT_S20;
+		break;
+	case 24:
+		*fmtbit = SNDRV_PCM_FMTBIT_S24;
+		break;
+	case 32:
+		*fmtbit = SNDRV_PCM_FMTBIT_S32;
+		break;
+	default:
+		*fmtbit = 0;
+		break;
+	}
+
+	return 0;
+}
+
+static struct {
+	const char *label;
+	u8 type;
+} chan_position_names[] = {
+	{ "Front Left", SNDRV_CHMAP_FL },
+	{ "Front Right", SNDRV_CHMAP_FR },
+	{ "Rear Left", SNDRV_CHMAP_RL },
+	{ "Rear Right", SNDRV_CHMAP_RR },
+	{ "Front Center", SNDRV_CHMAP_FC },
+	{ "Low Frequency Effects", SNDRV_CHMAP_LFE },
+	{ "Rear Center", SNDRV_CHMAP_RC },
+	{ "Front Left Center", SNDRV_CHMAP_FLC },
+	{ "Front Right Center", SNDRV_CHMAP_FRC },
+	{ "Rear Left Center", SNDRV_CHMAP_RLC },
+	{ "Rear Right Center", SNDRV_CHMAP_RRC },
+	{ "Front Left Wide", SNDRV_CHMAP_FLW },
+	{ "Front Right Wide", SNDRV_CHMAP_FRW },
+	{ "Front Left High", SNDRV_CHMAP_FLH },
+	{ "Front Center High", SNDRV_CHMAP_FCH },
+	{ "Front Right High", SNDRV_CHMAP_FRH },
+	{ "Top Center", SNDRV_CHMAP_TC },
+};
+
+static void append_chmap(struct snd_pcm_chmap_elem *chmap, u8 type)
+{
+	if (!chmap || chmap->channels >= ARRAY_SIZE(chmap->map))
+		return;
+
+	chmap->map[chmap->channels] = type;
+	chmap->channels++;
+}
+
+static int parse_chmap(struct dcp_parse_ctx *handle, struct snd_pcm_chmap_elem *chmap)
+{
+	struct iterator it;
+	int i, ret;
+
+	if (!chmap) {
+		skip(handle);
+		return 0;
+	}
+
+	chmap->channels = 0;
+
+	dcp_parse_foreach_in_array(handle, it) {
+		for (i = 0; i < ARRAY_SIZE(chan_position_names); i++)
+			if (consume_string(it.handle, chan_position_names[i].label))
+				break;
+
+		if (i == ARRAY_SIZE(chan_position_names)) {
+			ret = skip(it.handle);
+			if (ret)
+				return ret;
+
+			append_chmap(chmap, SNDRV_CHMAP_UNKNOWN);
+			continue;
+		}
+
+		append_chmap(chmap, chan_position_names[i].type);
+	}
+
+	return 0;
+}
+
+static int parse_chan_layout_element(struct dcp_parse_ctx *handle,
+				     unsigned int *nchans_out,
+				     struct snd_pcm_chmap_elem *chmap)
+{
+	struct iterator it;
+	int ret;
+	s64 nchans = 0;
+
+	dcp_parse_foreach_in_dict(handle, it) {
+		if (consume_string(it.handle, "ActiveChannelCount"))
+			ret = parse_int(it.handle, &nchans);
+		else if (consume_string(it.handle, "ChannelLayout"))
+			ret = parse_chmap(it.handle, chmap);
+		else
+			ret = skip_pair(it.handle);
+
+		if (ret)
+			return ret;
+	}
+
+	if (nchans_out)
+		*nchans_out = nchans;
+
+	return 0;
+}
+
+static int parse_nchans_mask(struct dcp_parse_ctx *handle, unsigned int *mask)
+{
+	struct iterator it;
+	int ret;
+
+	*mask = 0;
+
+	dcp_parse_foreach_in_array(handle, it) {
+		int nchans;
+
+		ret = parse_chan_layout_element(it.handle, &nchans, NULL);
+		if (ret)
+			return ret;
+		*mask |= 1 << nchans;
+	}
+
+	return 0;
+}
+
+static int parse_avep_element(struct dcp_parse_ctx *handle,
+			      struct dcp_sound_format_mask *sieve,
+			      struct dcp_sound_format_mask *hits)
+{
+	struct dcp_sound_format_mask mask = {0, 0, 0};
+	struct iterator it;
+	int ret;
+
+	dcp_parse_foreach_in_dict(handle, it) {
+		if (consume_string(handle, "StreamSampleRate"))
+			ret = parse_sample_rate_bit(it.handle, &mask.rates);
+		else if (consume_string(handle, "SampleSize"))
+			ret = parse_sample_fmtbit(it.handle, &mask.formats);
+		else if (consume_string(handle, "AudioChannelLayoutElements"))
+			ret = parse_nchans_mask(it.handle, &mask.nchans);
+		else
+			ret = skip_pair(it.handle);
+
+		if (ret)
+			return ret;
+	}
+
+	trace_avep_sound_mode(handle->dcp, mask.rates, mask.formats, mask.nchans);
+
+	if (!(mask.rates & sieve->rates) || !(mask.formats & sieve->formats) ||
+		!(mask.nchans & sieve->nchans))
+	    return 0;
+
+	if (hits) {
+		hits->rates |= mask.rates;
+		hits->formats |= mask.formats;
+		hits->nchans |= mask.nchans;
+	}
+
+	return 1;
+}
+
+static int parse_mode_in_avep_element(struct dcp_parse_ctx *handle,
+				      unsigned int selected_nchans,
+				      struct snd_pcm_chmap_elem *chmap,
+				      struct dcp_sound_cookie *cookie)
+{
+	struct iterator it;
+	struct dcp_parse_ctx save_handle;
+	int ret;
+
+	dcp_parse_foreach_in_dict(handle, it) {
+		if (consume_string(it.handle, "AudioChannelLayoutElements")) {
+			struct iterator inner_it;
+			int nchans;
+
+			dcp_parse_foreach_in_array(it.handle, inner_it) {
+				save_handle = *it.handle;
+				ret = parse_chan_layout_element(inner_it.handle,
+								&nchans, NULL);
+				if (ret)
+					return ret;
+
+				if (nchans != selected_nchans)
+					continue;
+
+				/*
+				 * Now that we know this layout matches the
+				 * selected channel number, reread the element
+				 * and fill in the channel map.
+				 */
+				*inner_it.handle = save_handle;
+				ret = parse_chan_layout_element(inner_it.handle,
+								NULL, chmap);
+				if (ret)
+					return ret;
+			}
+		} else if (consume_string(it.handle, "ElementData")) {
+			const u8 *blob;
+
+			ret = parse_blob(it.handle, sizeof(*cookie), &blob);
+			if (ret)
+				return ret;
+
+			if (cookie)
+				memcpy(cookie, blob, sizeof(*cookie));
+		} else {
+			ret = skip_pair(it.handle);
+			if (ret)
+				return ret;
+		}
+	}
+
+	return 0;
+}
+
+int parse_sound_constraints(struct dcp_parse_ctx *handle,
+			    struct dcp_sound_format_mask *sieve,
+			    struct dcp_sound_format_mask *hits)
+{
+	int ret;
+	struct iterator it;
+
+	if (hits) {
+		hits->rates = 0;
+		hits->formats = 0;
+		hits->nchans = 0;
+	}
+
+	dcp_parse_foreach_in_array(handle, it) {
+		ret = parse_avep_element(it.handle, sieve, hits);
+
+		if (ret < 0)
+			return ret;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(parse_sound_constraints);
+
+int parse_sound_mode(struct dcp_parse_ctx *handle,
+		     struct dcp_sound_format_mask *sieve,
+		     struct snd_pcm_chmap_elem *chmap,
+		     struct dcp_sound_cookie *cookie)
+{
+	struct dcp_parse_ctx save_handle;
+	struct iterator it;
+	int ret;
+
+	dcp_parse_foreach_in_array(handle, it) {
+		save_handle = *it.handle;
+		ret = parse_avep_element(it.handle, sieve, NULL);
+
+		if (!ret)
+			continue;
+
+		if (ret < 0)
+			return ret;
+
+		ret = parse_mode_in_avep_element(&save_handle, __ffs(sieve->nchans),
+						 chmap, cookie);
+		if (ret < 0)
+			return ret;
+		return 1;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(parse_sound_mode);
+#endif
+
+int parse_system_log_mnits(struct dcp_parse_ctx *handle, struct dcp_system_ev_mnits *entry)
+{
+	struct iterator it;
+	int ret;
+	s64 mnits = -1;
+	s64 idac = -1;
+	s64 timestamp = -1;
+	bool type_match = false;
+
+	dcp_parse_foreach_in_dict(handle, it) {
+		char *key = parse_string(it.handle);
+		if (IS_ERR(key)) {
+			ret = PTR_ERR(key);
+		} else if (!strcmp(key, "mNits")) {
+			ret = parse_int(it.handle, &mnits);
+		} else if (!strcmp(key, "iDAC")) {
+			ret = parse_int(it.handle, &idac);
+		} else if (!strcmp(key, "logEvent")) {
+			const char * value = parse_string(it.handle);
+			if (!IS_ERR_OR_NULL(value)) {
+				type_match = strcmp(value, "Display (Event Forward)") == 0;
+				kfree(value);
+			}
+		} else if (!strcmp(key, "timestamp")) {
+			ret = parse_int(it.handle, &timestamp);
+		} else {
+			skip(it.handle);
+		}
+
+		if (!IS_ERR_OR_NULL(key))
+			kfree(key);
+
+		if (ret) {
+			pr_err("dcp parser: failed to parse mNits sys event\n");
+			return ret;
+		}
+	}
+
+	if (!type_match ||  mnits < 0 || idac < 0 || timestamp < 0)
+		return -EINVAL;
+
+	entry->millinits = mnits;
+	entry->idac = idac;
+	entry->timestamp = timestamp;
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/apple/parser.h b/drivers/gpu/drm/apple/parser.h
new file mode 100644
index 00000000000000..2f52e063bbd426
--- /dev/null
+++ b/drivers/gpu/drm/apple/parser.h
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io> */
+
+#ifndef __APPLE_DCP_PARSER_H__
+#define __APPLE_DCP_PARSER_H__
+
+/* For mode parsing */
+#include <drm/drm_modes.h>
+
+struct apple_dcp;
+
+struct dcp_parse_ctx {
+	struct apple_dcp *dcp;
+	const void *blob;
+	u32 pos, len;
+};
+
+enum dcp_color_eotf {
+	DCP_EOTF_SDR_GAMMA = 0, // "SDR gamma"
+	DCP_EOTF_HDR_GAMMA = 1, // "HDR gamma"
+	DCP_EOTF_ST_2084   = 2, // "ST 2084 (PQ)"
+	DCP_EOTF_BT_2100   = 3, // "BT.2100 (HLG)"
+	DCP_EOTF_COUNT
+};
+
+enum dcp_color_format {
+	DCP_COLOR_FORMAT_RGB                 =  0, // "RGB"
+	DCP_COLOR_FORMAT_YCBCR420            =  1, // "YUV 4:2:0"
+	DCP_COLOR_FORMAT_YCBCR422            =  3, // "YUV 4:2:2"
+	DCP_COLOR_FORMAT_YCBCR444            =  2, // "YUV 4:4:4"
+	DCP_COLOR_FORMAT_DV_NATIVE           =  4, // "DolbyVision (native)"
+	DCP_COLOR_FORMAT_DV_HDMI             =  5, // "DolbyVision (HDMI)"
+	DCP_COLOR_FORMAT_YCBCR422_DP         =  6, // "YCbCr 4:2:2 (DP tunnel)"
+	DCP_COLOR_FORMAT_YCBCR422_HDMI       =  7, // "YCbCr 4:2:2 (HDMI tunnel)"
+	DCP_COLOR_FORMAT_DV_LL_YCBCR422      =  8, // "DolbyVision LL YCbCr 4:2:2"
+	DCP_COLOR_FORMAT_DV_LL_YCBCR422_DP   =  9, // "DolbyVision LL YCbCr 4:2:2 (DP)"
+	DCP_COLOR_FORMAT_DV_LL_YCBCR422_HDMI = 10, // "DolbyVision LL YCbCr 4:2:2 (HDMI)"
+	DCP_COLOR_FORMAT_DV_LL_YCBCR444      = 11, // "DolbyVision LL YCbCr 4:4:4"
+	DCP_COLOR_FORMAT_DV_LL_RGB422        = 12, // "DolbyVision LL RGB 4:2:2"
+	DCP_COLOR_FORMAT_GRGB_BLUE_422       = 13, // "GRGB as YCbCr422 (Even line blue)"
+	DCP_COLOR_FORMAT_GRGB_RED_422        = 14, // "GRGB as YCbCr422 (Even line red)"
+	DCP_COLOR_FORMAT_COUNT
+};
+
+enum dcp_colorimetry {
+	DCP_COLORIMETRY_BT601              =  0, // "SMPTE 170M/BT.601"
+	DCP_COLORIMETRY_BT709              =  1, // "BT.701"
+	DCP_COLORIMETRY_XVYCC_601          =  2, // "xvYCC601"
+	DCP_COLORIMETRY_XVYCC_709          =  3, // "xvYCC709"
+	DCP_COLORIMETRY_SYCC_601           =  4, // "sYCC601"
+	DCP_COLORIMETRY_ADOBE_YCC_601      =  5, // "AdobeYCC601"
+	DCP_COLORIMETRY_BT2020_CYCC        =  6, // "BT.2020 (c)"
+	DCP_COLORIMETRY_BT2020_YCC         =  7, // "BT.2020 (nc)"
+	DCP_COLORIMETRY_VSVDB              =  8, // "DolbyVision VSVDB"
+	DCP_COLORIMETRY_BT2020_RGB         =  9, // "BT.2020 (RGB)"
+	DCP_COLORIMETRY_SRGB               = 10, // "sRGB"
+	DCP_COLORIMETRY_SCRGB              = 11, // "scRGB"
+	DCP_COLORIMETRY_SCRGB_FIXED        = 12, // "scRGBfixed"
+	DCP_COLORIMETRY_ADOBE_RGB          = 13, // "AdobeRGB"
+	DCP_COLORIMETRY_DCI_P3_RGB_D65     = 14, // "DCI-P3 (D65)"
+	DCP_COLORIMETRY_DCI_P3_RGB_THEATER = 15, // "DCI-P3 (Theater)"
+	DCP_COLORIMETRY_RGB                = 16, // "Default RGB"
+	DCP_COLORIMETRY_COUNT
+};
+
+enum dcp_color_range {
+	DCP_COLOR_YCBCR_RANGE_FULL    = 0,
+	DCP_COLOR_YCBCR_RANGE_LIMITED = 1,
+	DCP_COLOR_YCBCR_RANGE_COUNT
+};
+
+struct dcp_color_mode {
+	s64 score;
+	u32 id;
+	enum dcp_color_eotf eotf;
+	enum dcp_color_format format;
+	enum dcp_colorimetry colorimetry;
+	enum dcp_color_range range;
+	u8 depth;
+};
+
+/*
+ * Represents a single display mode. These mode objects are populated at
+ * runtime based on the TimingElements dictionary sent by the DCP.
+ */
+struct dcp_display_mode {
+	struct drm_display_mode mode;
+	u32 color_mode_id;
+	u32 timing_mode_id;
+	struct dcp_color_mode sdr_rgb;
+	struct dcp_color_mode sdr_444;
+	struct dcp_color_mode sdr;
+	struct dcp_color_mode best;
+};
+
+struct dimension {
+	s64 total, front_porch, sync_width, active;
+	s64 precise_sync_rate;
+};
+
+int parse(const void *blob, size_t size, struct dcp_parse_ctx *ctx);
+struct dcp_display_mode *enumerate_modes(struct dcp_parse_ctx *handle,
+					 unsigned int *count, int width_mm,
+					 int height_mm, unsigned notch_height);
+int parse_display_attributes(struct dcp_parse_ctx *handle, int *width_mm,
+			     int *height_mm);
+int parse_epic_service_init(struct dcp_parse_ctx *handle, const char **name,
+			    const char **class, s64 *unit);
+
+struct dcp_sound_format_mask {
+	u64 formats;			/* SNDRV_PCM_FMTBIT_* */
+	unsigned int rates;		/* SNDRV_PCM_RATE_* */
+	unsigned int nchans;
+};
+
+struct dcp_sound_cookie {
+	u8 data[24];
+};
+
+struct snd_pcm_chmap_elem;
+int parse_sound_constraints(struct dcp_parse_ctx *handle,
+			    struct dcp_sound_format_mask *sieve,
+			    struct dcp_sound_format_mask *hits);
+int parse_sound_mode(struct dcp_parse_ctx *handle,
+		     struct dcp_sound_format_mask *sieve,
+		     struct snd_pcm_chmap_elem *chmap,
+		     struct dcp_sound_cookie *cookie);
+
+struct dcp_system_ev_mnits {
+	u32 timestamp;
+	u32 millinits;
+	u32 idac;
+};
+
+int parse_system_log_mnits(struct dcp_parse_ctx *handle,
+			   struct dcp_system_ev_mnits *entry);
+
+#endif
diff --git a/drivers/gpu/drm/apple/systemep.c b/drivers/gpu/drm/apple/systemep.c
new file mode 100644
index 00000000000000..9fe7a0ce495aab
--- /dev/null
+++ b/drivers/gpu/drm/apple/systemep.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright 2022 Sven Peter <sven@svenpeter.dev> */
+
+#include <linux/completion.h>
+
+#include "afk.h"
+#include "dcp.h"
+#include "parser.h"
+
+static bool enable_verbose_logging;
+module_param(enable_verbose_logging, bool, 0644);
+MODULE_PARM_DESC(enable_verbose_logging, "Enable DCP firmware verbose logging");
+
+/*
+ * Serialized setProperty("gAFKConfigLogMask", 0xffff) IPC call which
+ * will set the DCP firmware log level to the most verbose setting
+ */
+#define SYSTEM_SET_PROPERTY 0x43
+static const u8 setprop_gAFKConfigLogMask_ffff[] = {
+	0x14, 0x00, 0x00, 0x00, 0x67, 0x41, 0x46, 0x4b, 0x43, 0x6f,
+	0x6e, 0x66, 0x69, 0x67, 0x4c, 0x6f, 0x67, 0x4d, 0x61, 0x73,
+	0x6b, 0x00, 0x00, 0x00, 0xd3, 0x00, 0x00, 0x00, 0x40, 0x00,
+	0x00, 0x84, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+struct systemep_work {
+	struct apple_epic_service *service;
+	struct work_struct work;
+};
+
+static void system_log_work(struct work_struct *work_)
+{
+	struct systemep_work *work =
+		container_of(work_, struct systemep_work, work);
+
+	afk_send_command(work->service, SYSTEM_SET_PROPERTY,
+			 setprop_gAFKConfigLogMask_ffff,
+			 sizeof(setprop_gAFKConfigLogMask_ffff), NULL,
+			 sizeof(setprop_gAFKConfigLogMask_ffff), NULL);
+	complete(&work->service->ep->dcp->systemep_done);
+	kfree(work);
+}
+
+static void system_init(struct apple_epic_service *service, const char *name,
+			const char *class, s64 unit)
+{
+	struct systemep_work *work;
+
+	if (!enable_verbose_logging)
+		return;
+
+	/*
+	 * We're called from the service message handler thread and can't
+	 * dispatch blocking message from there.
+	 */
+	work = kzalloc(sizeof(*work), GFP_KERNEL);
+	if (!work)
+		return;
+
+	work->service = service;
+	INIT_WORK(&work->work, system_log_work);
+	schedule_work(&work->work);
+}
+
+static void powerlog_init(struct apple_epic_service *service, const char *name,
+			  const char *class, s64 unit)
+{
+}
+
+static int powerlog_report(struct apple_epic_service *service, enum epic_subtype type,
+			 const void *data, size_t data_size)
+{
+	struct dcp_system_ev_mnits mnits;
+	struct dcp_parse_ctx parse_ctx;
+	struct apple_dcp *dcp = service->ep->dcp;
+	int ret;
+
+	dev_dbg(dcp->dev, "systemep[ch:%u]: report type:%02x len:%zu\n",
+		service->channel, type, data_size);
+
+	if (type != EPIC_SUBTYPE_STD_SERVICE)
+		return 0;
+
+	ret = parse(data, data_size, &parse_ctx);
+	if (ret) {
+		dev_warn(service->ep->dcp->dev, "systemep: failed to parse report: %d\n", ret);
+		return ret;
+	}
+
+	ret = parse_system_log_mnits(&parse_ctx, &mnits);
+	if (ret) {
+		/* ignore parse errors in the case dcp sends unknown log events */
+		dev_dbg(dcp->dev, "systemep: failed to parse mNits event: %d\n", ret);
+		return 0;
+	}
+
+	dev_dbg(dcp->dev, "systemep: mNits event: Nits: %u.%03u, iDAC: %u\n",
+		mnits.millinits / 1000, mnits.millinits % 1000, mnits.idac);
+
+	dcp->brightness.nits = mnits.millinits / 1000;
+
+	return 0;
+}
+
+static const struct apple_epic_service_ops systemep_ops[] = {
+	{
+		.name = "system",
+		.init = system_init,
+	},
+	{
+		.name = "powerlog-service",
+		.init = powerlog_init,
+		.report = powerlog_report,
+	},
+	{}
+};
+
+int systemep_init(struct apple_dcp *dcp)
+{
+	init_completion(&dcp->systemep_done);
+
+	dcp->systemep = afk_init(dcp, SYSTEM_ENDPOINT, systemep_ops);
+	afk_start(dcp->systemep);
+
+	if (!enable_verbose_logging)
+		return 0;
+
+	/*
+	 * Timeouts aren't really fatal here: in the worst case we just weren't
+	 * able to enable additional debug prints inside DCP
+	 */
+	if (!wait_for_completion_timeout(&dcp->systemep_done,
+					 msecs_to_jiffies(MSEC_PER_SEC)))
+		dev_err(dcp->dev, "systemep: couldn't enable verbose logs\n");
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/apple/trace.c b/drivers/gpu/drm/apple/trace.c
new file mode 100644
index 00000000000000..6f40d5a583df01
--- /dev/null
+++ b/drivers/gpu/drm/apple/trace.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Tracepoints for Apple DCP driver
+ *
+ * Copyright (C) The Asahi Linux Contributors
+ */
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/drivers/gpu/drm/apple/trace.h b/drivers/gpu/drm/apple/trace.h
new file mode 100644
index 00000000000000..a13dd34fb7aab1
--- /dev/null
+++ b/drivers/gpu/drm/apple/trace.h
@@ -0,0 +1,608 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (C) The Asahi Linux Contributors */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM dcp
+
+#if !defined(_TRACE_DCP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_DCP_H
+
+#include "afk.h"
+#include "dptxep.h"
+#include "dcp-internal.h"
+#include "parser.h"
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#define show_dcp_endpoint(ep)                                      \
+	__print_symbolic(ep, { SYSTEM_ENDPOINT, "system" },        \
+			 { TEST_ENDPOINT, "test" },                \
+			 { DCP_EXPERT_ENDPOINT, "dcpexpert" },     \
+			 { DISP0_ENDPOINT, "disp0" },              \
+			 { DPTX_ENDPOINT, "dptxport" },            \
+			 { HDCP_ENDPOINT, "hdcp" },                \
+			 { REMOTE_ALLOC_ENDPOINT, "remotealloc" }, \
+			 { IOMFB_ENDPOINT, "iomfb" })
+#define print_epic_type(etype)                                  \
+	__print_symbolic(etype, { EPIC_TYPE_NOTIFY, "notify" }, \
+			 { EPIC_TYPE_COMMAND, "command" },      \
+			 { EPIC_TYPE_REPLY, "reply" },          \
+			 { EPIC_TYPE_NOTIFY_ACK, "notify-ack" })
+
+#define print_epic_category(ecat)                             \
+	__print_symbolic(ecat, { EPIC_CAT_REPORT, "report" }, \
+			 { EPIC_CAT_NOTIFY, "notify" },       \
+			 { EPIC_CAT_REPLY, "reply" },         \
+			 { EPIC_CAT_COMMAND, "command" })
+
+#define show_dptxport_apcall(idx)                                              \
+	__print_symbolic(                                                     \
+		idx, { DPTX_APCALL_ACTIVATE, "activate" },                    \
+		{ DPTX_APCALL_DEACTIVATE, "deactivate" },                     \
+		{ DPTX_APCALL_GET_MAX_DRIVE_SETTINGS,                         \
+		  "get_max_drive_settings" },                                 \
+		{ DPTX_APCALL_SET_DRIVE_SETTINGS, "set_drive_settings" },     \
+		{ DPTX_APCALL_GET_DRIVE_SETTINGS, "get_drive_settings" },     \
+		{ DPTX_APCALL_WILL_CHANGE_LINKG_CONFIG,                       \
+		  "will_change_link_config" },                                \
+		{ DPTX_APCALL_DID_CHANGE_LINK_CONFIG,                         \
+		  "did_change_link_config" },                                 \
+		{ DPTX_APCALL_GET_MAX_LINK_RATE, "get_max_link_rate" },       \
+		{ DPTX_APCALL_GET_LINK_RATE, "get_link_rate" },               \
+		{ DPTX_APCALL_SET_LINK_RATE, "set_link_rate" },               \
+		{ DPTX_APCALL_GET_MAX_LANE_COUNT,                             \
+		  "get_max_lane_count" },                                     \
+		{ DPTX_APCALL_GET_ACTIVE_LANE_COUNT,                          \
+		  "get_active_lane_count" },                                  \
+		{ DPTX_APCALL_SET_ACTIVE_LANE_COUNT,                          \
+		  "set_active_lane_count" },                                  \
+		{ DPTX_APCALL_GET_SUPPORTS_DOWN_SPREAD,                       \
+		  "get_supports_downspread" },                                \
+		{ DPTX_APCALL_GET_DOWN_SPREAD, "get_downspread" },            \
+		{ DPTX_APCALL_SET_DOWN_SPREAD, "set_downspread" },            \
+		{ DPTX_APCALL_GET_SUPPORTS_LANE_MAPPING,                      \
+		  "get_supports_lane_mapping" },                              \
+		{ DPTX_APCALL_SET_LANE_MAP, "set_lane_map" },                 \
+		{ DPTX_APCALL_GET_SUPPORTS_HPD, "get_supports_hpd" },         \
+		{ DPTX_APCALL_FORCE_HOTPLUG_DETECT, "force_hotplug_detect" }, \
+		{ DPTX_APCALL_INACTIVE_SINK_DETECTED,                         \
+		  "inactive_sink_detected" },                                 \
+		{ DPTX_APCALL_SET_TILED_DISPLAY_HINTS,                        \
+		  "set_tiled_display_hints" },                                \
+		{ DPTX_APCALL_DEVICE_NOT_RESPONDING,                          \
+		  "device_not_responding" },                                  \
+		{ DPTX_APCALL_DEVICE_BUSY_TIMEOUT, "device_busy_timeout" },   \
+		{ DPTX_APCALL_DEVICE_NOT_STARTED, "device_not_started" })
+
+TRACE_EVENT(dcp_recv_msg,
+	    TP_PROTO(struct apple_dcp *dcp, u8 endpoint, u64 message),
+	    TP_ARGS(dcp, endpoint, message),
+
+	    TP_STRUCT__entry(__string(devname, dev_name(dcp->dev))
+			     __field(u8, endpoint)
+			     __field(u64, message)),
+
+	    TP_fast_assign(__assign_str(devname);
+			   __entry->endpoint = endpoint;
+			   __entry->message = message;),
+
+	    TP_printk("%s: endpoint 0x%x (%s): received message 0x%016llx",
+		      __get_str(devname), __entry->endpoint,
+		      show_dcp_endpoint(__entry->endpoint), __entry->message));
+
+TRACE_EVENT(dcp_send_msg,
+	    TP_PROTO(struct apple_dcp *dcp, u8 endpoint, u64 message),
+	    TP_ARGS(dcp, endpoint, message),
+
+	    TP_STRUCT__entry(__string(devname, dev_name(dcp->dev))
+			     __field(u8, endpoint)
+			     __field(u64, message)),
+
+	    TP_fast_assign(__assign_str(devname);
+			   __entry->endpoint = endpoint;
+			   __entry->message = message;),
+
+	    TP_printk("%s: endpoint 0x%x (%s): will send message 0x%016llx",
+		      __get_str(devname), __entry->endpoint,
+		      show_dcp_endpoint(__entry->endpoint), __entry->message));
+
+TRACE_EVENT(
+	afk_getbuf, TP_PROTO(struct apple_dcp_afkep *ep, u16 size, u16 tag),
+	TP_ARGS(ep, size, tag),
+
+	TP_STRUCT__entry(__string(devname, dev_name(ep->dcp->dev))
+				 __field(u8, endpoint) __field(u16, size)
+					 __field(u16, tag)),
+
+	TP_fast_assign(__assign_str(devname);
+		       __entry->endpoint = ep->endpoint; __entry->size = size;
+		       __entry->tag = tag;),
+
+	TP_printk(
+		"%s: endpoint 0x%x (%s): get buffer with size 0x%x and tag 0x%x",
+		__get_str(devname), __entry->endpoint,
+		show_dcp_endpoint(__entry->endpoint), __entry->size,
+		__entry->tag));
+
+DECLARE_EVENT_CLASS(afk_rwptr_template,
+	    TP_PROTO(struct apple_dcp_afkep *ep, u32 rptr, u32 wptr),
+	    TP_ARGS(ep, rptr, wptr),
+
+	    TP_STRUCT__entry(__string(devname, dev_name(ep->dcp->dev))
+				     __field(u8, endpoint) __field(u32, rptr)
+					     __field(u32, wptr)),
+
+	    TP_fast_assign(__assign_str(devname);
+			   __entry->endpoint = ep->endpoint;
+			   __entry->rptr = rptr; __entry->wptr = wptr;),
+
+	    TP_printk("%s: endpoint 0x%x (%s): rptr 0x%x, wptr 0x%x",
+		      __get_str(devname), __entry->endpoint,
+		      show_dcp_endpoint(__entry->endpoint), __entry->rptr,
+		      __entry->wptr));
+
+DEFINE_EVENT(afk_rwptr_template, afk_recv_rwptr_pre,
+	TP_PROTO(struct apple_dcp_afkep *ep, u32 rptr, u32 wptr),
+	TP_ARGS(ep, rptr, wptr));
+DEFINE_EVENT(afk_rwptr_template, afk_recv_rwptr_post,
+	TP_PROTO(struct apple_dcp_afkep *ep, u32 rptr, u32 wptr),
+	TP_ARGS(ep, rptr, wptr));
+DEFINE_EVENT(afk_rwptr_template, afk_send_rwptr_pre,
+	TP_PROTO(struct apple_dcp_afkep *ep, u32 rptr, u32 wptr),
+	TP_ARGS(ep, rptr, wptr));
+DEFINE_EVENT(afk_rwptr_template, afk_send_rwptr_post,
+	TP_PROTO(struct apple_dcp_afkep *ep, u32 rptr, u32 wptr),
+	TP_ARGS(ep, rptr, wptr));
+
+TRACE_EVENT(
+	afk_recv_qe,
+	TP_PROTO(struct apple_dcp_afkep *ep, u32 rptr, u32 magic, u32 size),
+	TP_ARGS(ep, rptr, magic, size),
+
+	TP_STRUCT__entry(__string(devname, dev_name(ep->dcp->dev))
+				 __field(u8, endpoint) __field(u32, rptr)
+					 __field(u32, magic)
+						 __field(u32, size)),
+
+	TP_fast_assign(__assign_str(devname);
+		       __entry->endpoint = ep->endpoint; __entry->rptr = rptr;
+		       __entry->magic = magic; __entry->size = size;),
+
+	TP_printk("%s: endpoint 0x%x (%s): QE rptr 0x%x, magic 0x%x, size 0x%x",
+		  __get_str(devname), __entry->endpoint,
+		  show_dcp_endpoint(__entry->endpoint), __entry->rptr,
+		  __entry->magic, __entry->size));
+
+TRACE_EVENT(
+	afk_recv_handle,
+	TP_PROTO(struct apple_dcp_afkep *ep, u32 channel, u32 type,
+		 u32 data_size, struct epic_hdr *ehdr,
+		 struct epic_sub_hdr *eshdr),
+	TP_ARGS(ep, channel, type, data_size, ehdr, eshdr),
+
+	TP_STRUCT__entry(__string(devname, dev_name(ep->dcp->dev)) __field(
+		u8, endpoint) __field(u32, channel) __field(u32, type)
+				 __field(u32, data_size) __field(u8, category)
+					 __field(u16, subtype)
+						 __field(u16, tag)),
+
+	TP_fast_assign(__assign_str(devname);
+		       __entry->endpoint = ep->endpoint;
+		       __entry->channel = channel; __entry->type = type;
+		       __entry->data_size = data_size;
+		       __entry->category = eshdr->category,
+		       __entry->subtype = le16_to_cpu(eshdr->type),
+		       __entry->tag = le16_to_cpu(eshdr->tag)),
+
+	TP_printk(
+		"%s: endpoint 0x%x (%s): channel 0x%x, type 0x%x (%s), data_size 0x%x, category: 0x%x (%s), subtype: 0x%x, seq: 0x%x",
+		__get_str(devname), __entry->endpoint,
+		show_dcp_endpoint(__entry->endpoint), __entry->channel,
+		__entry->type, print_epic_type(__entry->type),
+		__entry->data_size, __entry->category,
+		print_epic_category(__entry->category), __entry->subtype,
+		__entry->tag));
+
+TRACE_EVENT(iomfb_callback,
+	    TP_PROTO(struct apple_dcp *dcp, int tag, const char *name),
+	    TP_ARGS(dcp, tag, name),
+
+	    TP_STRUCT__entry(
+				__string(devname, dev_name(dcp->dev))
+				__field(int, tag)
+				__field(const char *, name)
+			),
+
+	    TP_fast_assign(
+				__assign_str(devname);
+				__entry->tag = tag; __entry->name = name;
+			),
+
+	    TP_printk("%s: Callback D%03d %s", __get_str(devname), __entry->tag,
+		      __entry->name));
+
+TRACE_EVENT(iomfb_push,
+	    TP_PROTO(struct apple_dcp *dcp,
+		     const struct dcp_method_entry *method, int context,
+		     int offset, int depth),
+	    TP_ARGS(dcp, method, context, offset, depth),
+
+	    TP_STRUCT__entry(
+				__string(devname, dev_name(dcp->dev))
+				__string(name, method->name)
+				__field(int, context)
+				__field(int, offset)
+				__field(int, depth)),
+
+	    TP_fast_assign(
+				__assign_str(devname);
+				__assign_str(name);
+				__entry->context = context; __entry->offset = offset;
+				__entry->depth = depth;
+			),
+
+	    TP_printk("%s: Method %s: context %u, offset %u, depth %u",
+		      __get_str(devname), __get_str(name), __entry->context,
+		      __entry->offset, __entry->depth));
+
+TRACE_EVENT(iomfb_swap_submit,
+	    TP_PROTO(struct apple_dcp *dcp, u32 swap_id),
+	    TP_ARGS(dcp, swap_id),
+	    TP_STRUCT__entry(
+			     __field(u64, dcp)
+			     __field(u32, swap_id)
+	    ),
+	    TP_fast_assign(
+			   __entry->dcp = (u64)dcp;
+			   __entry->swap_id = swap_id;
+	    ),
+	    TP_printk("dcp=%llx, swap_id=%d",
+		      __entry->dcp,
+		      __entry->swap_id)
+);
+
+TRACE_EVENT(iomfb_swap_complete,
+	    TP_PROTO(struct apple_dcp *dcp, u32 swap_id),
+	    TP_ARGS(dcp, swap_id),
+	    TP_STRUCT__entry(
+			     __field(u64, dcp)
+			     __field(u32, swap_id)
+	    ),
+	    TP_fast_assign(
+			   __entry->dcp = (u64)dcp;
+			   __entry->swap_id = swap_id;
+	    ),
+	    TP_printk("dcp=%llx, swap_id=%d",
+		      __entry->dcp,
+		      __entry->swap_id
+	    )
+);
+
+TRACE_EVENT(iomfb_swap_complete_intent_gated,
+	    TP_PROTO(struct apple_dcp *dcp, u32 swap_id, u32 width, u32 height),
+	    TP_ARGS(dcp, swap_id, width, height),
+	    TP_STRUCT__entry(
+			     __field(u64, dcp)
+			     __field(u32, swap_id)
+			     __field(u32, width)
+			     __field(u32, height)
+	    ),
+	    TP_fast_assign(
+			   __entry->dcp = (u64)dcp;
+			   __entry->swap_id = swap_id;
+			   __entry->height = height;
+			   __entry->width = width;
+	    ),
+	    TP_printk("dcp=%llx, swap_id=%u %ux%u",
+		      __entry->dcp,
+		      __entry->swap_id,
+		      __entry->width,
+		      __entry->height
+	    )
+);
+
+TRACE_EVENT(iomfb_abort_swap_ap_gated,
+	    TP_PROTO(struct apple_dcp *dcp, u32 swap_id),
+	    TP_ARGS(dcp, swap_id),
+	    TP_STRUCT__entry(
+			     __field(u64, dcp)
+			     __field(u32, swap_id)
+	    ),
+	    TP_fast_assign(
+			   __entry->dcp = (u64)dcp;
+			   __entry->swap_id = swap_id;
+	    ),
+	    TP_printk("dcp=%llx, swap_id=%u",
+		      __entry->dcp,
+		      __entry->swap_id
+	    )
+);
+
+DECLARE_EVENT_CLASS(iomfb_parse_mode_template,
+	    TP_PROTO(s64 id, struct dimension *horiz, struct dimension *vert, s64 best_color_mode, bool is_virtual, s64 score),
+	    TP_ARGS(id, horiz, vert, best_color_mode, is_virtual, score),
+
+	    TP_STRUCT__entry(__field(s64, id)
+			     __field_struct(struct dimension, horiz)
+			     __field_struct(struct dimension, vert)
+			     __field(s64, best_color_mode)
+			     __field(bool, is_virtual)
+			     __field(s64, score)),
+
+	    TP_fast_assign(__entry->id = id;
+			   __entry->horiz = *horiz;
+			   __entry->vert = *vert;
+			   __entry->best_color_mode = best_color_mode;
+			   __entry->is_virtual = is_virtual;
+			   __entry->score = score;),
+
+	    TP_printk("id: %lld, best_color_mode: %lld, resolution:%lldx%lld virtual: %d, score: %lld",
+		      __entry->id, __entry->best_color_mode,
+		      __entry->horiz.active, __entry->vert.active,
+		      __entry->is_virtual, __entry->score));
+
+DEFINE_EVENT(iomfb_parse_mode_template, iomfb_parse_mode_success,
+	    TP_PROTO(s64 id, struct dimension *horiz, struct dimension *vert, s64 best_color_mode, bool is_virtual, s64 score),
+	    TP_ARGS(id, horiz, vert, best_color_mode, is_virtual, score));
+
+DEFINE_EVENT(iomfb_parse_mode_template, iomfb_parse_mode_fail,
+	    TP_PROTO(s64 id, struct dimension *horiz, struct dimension *vert, s64 best_color_mode, bool is_virtual, s64 score),
+	    TP_ARGS(id, horiz, vert, best_color_mode, is_virtual, score));
+
+TRACE_EVENT(dcpavserv_init, TP_PROTO(struct apple_dcp *dcp, u64 unit),
+	    TP_ARGS(dcp, unit),
+
+	    TP_STRUCT__entry(__string(devname, dev_name(dcp->dev))
+				     __field(u64, unit)),
+
+	    TP_fast_assign(__assign_str(devname);
+			   __entry->unit = unit;),
+
+	    TP_printk("%s: dcpav-service unit %lld initialized", __get_str(devname),
+		      __entry->unit));
+
+TRACE_EVENT(dptxport_init, TP_PROTO(struct apple_dcp *dcp, u64 unit),
+	    TP_ARGS(dcp, unit),
+
+	    TP_STRUCT__entry(__string(devname, dev_name(dcp->dev))
+				     __field(u64, unit)),
+
+	    TP_fast_assign(__assign_str(devname);
+			   __entry->unit = unit;),
+
+	    TP_printk("%s: dptxport unit %lld initialized", __get_str(devname),
+		      __entry->unit));
+
+TRACE_EVENT(
+	dptxport_apcall,
+	TP_PROTO(struct dptx_port *dptx, int idx, size_t len),
+	TP_ARGS(dptx, idx, len),
+
+	TP_STRUCT__entry(__string(devname, dev_name(dptx->service->ep->dcp->dev))
+			__field(u32, unit) __field(int, idx) __field(size_t, len)),
+
+	TP_fast_assign(__assign_str(devname);
+		       __entry->unit = dptx->unit; __entry->idx = idx; __entry->len = len;),
+
+	TP_printk("%s: dptx%d: AP Call %d (%s) with len %lu", __get_str(devname),
+		  __entry->unit,
+		  __entry->idx, show_dptxport_apcall(__entry->idx), __entry->len));
+
+TRACE_EVENT(
+	dptxport_validate_connection,
+	TP_PROTO(struct dptx_port *dptx, u8 core, u8 atc, u8 die),
+	TP_ARGS(dptx, core, atc, die),
+
+	TP_STRUCT__entry(__string(devname, dev_name(dptx->service->ep->dcp->dev))
+			 __field(u32, unit) __field(u8, core) __field(u8, atc) __field(u8, die)),
+
+	TP_fast_assign(__assign_str(devname);
+		       __entry->unit = dptx->unit; __entry->core = core; __entry->atc = atc; __entry->die = die;),
+
+	TP_printk("%s: dptx%d: core %d, atc %d, die %d", __get_str(devname),
+		  __entry->unit, __entry->core, __entry->atc, __entry->die));
+
+TRACE_EVENT(
+	dptxport_connect,
+	TP_PROTO(struct dptx_port *dptx, u8 core, u8 atc, u8 die),
+	TP_ARGS(dptx, core, atc, die),
+
+	TP_STRUCT__entry(__string(devname, dev_name(dptx->service->ep->dcp->dev))
+			 __field(u32, unit) __field(u8, core) __field(u8, atc) __field(u8, die)),
+
+	TP_fast_assign(__assign_str(devname);
+		       __entry->unit = dptx->unit; __entry->core = core; __entry->atc = atc; __entry->die = die;),
+
+	TP_printk("%s: dptx%d: core %d, atc %d, die %d", __get_str(devname),
+		  __entry->unit, __entry->core, __entry->atc, __entry->die));
+
+TRACE_EVENT(
+	dptxport_call_set_link_rate,
+	TP_PROTO(struct dptx_port *dptx, u32 link_rate),
+	TP_ARGS(dptx, link_rate),
+
+	TP_STRUCT__entry(__string(devname, dev_name(dptx->service->ep->dcp->dev))
+			 __field(u32, unit)
+			 __field(u32, link_rate)),
+
+	TP_fast_assign(__assign_str(devname);
+		       __entry->unit = dptx->unit;
+		       __entry->link_rate = link_rate;),
+
+	TP_printk("%s: dptx%d: link rate 0x%x", __get_str(devname), __entry->unit,
+		  __entry->link_rate));
+
+TRACE_EVENT(iomfb_brightness,
+	    TP_PROTO(struct apple_dcp *dcp, u32 nits),
+	    TP_ARGS(dcp, nits),
+	    TP_STRUCT__entry(
+			     __field(u64, dcp)
+			     __field(u32, nits)
+	    ),
+	    TP_fast_assign(
+			   __entry->dcp = (u64)dcp;
+			   __entry->nits = nits;
+	    ),
+	    TP_printk("dcp=%llx, nits=%u (raw=0x%05x)",
+		      __entry->dcp,
+		      __entry->nits >> 16,
+		      __entry->nits
+	    )
+);
+
+#define show_eotf(eotf)					\
+	__print_symbolic(eotf, { 0, "SDR gamma"},	\
+			       { 1, "HDR gamma"},	\
+			       { 2, "ST 2084 (PQ)"},	\
+			       { 3, "BT.2100 (HLG)"},	\
+			       { 4, "unexpected"})
+
+#define show_encoding(enc)							\
+	__print_symbolic(enc, { 0, "RGB"},					\
+			      { 1, "YUV 4:2:0"},				\
+			      { 3, "YUV 4:2:2"},				\
+			      { 2, "YUV 4:4:4"},				\
+			      { 4, "DolbyVision (native)"},			\
+			      { 5, "DolbyVision (HDMI)"},			\
+			      { 6, "YCbCr 4:2:2 (DP tunnel)"},			\
+			      { 7, "YCbCr 4:2:2 (HDMI tunnel)"},		\
+			      { 8, "DolbyVision LL YCbCr 4:2:2"},		\
+			      { 9, "DolbyVision LL YCbCr 4:2:2 (DP)"},		\
+			      {10, "DolbyVision LL YCbCr 4:2:2 (HDMI)"},	\
+			      {11, "DolbyVision LL YCbCr 4:4:4"},		\
+			      {12, "DolbyVision LL RGB 4:2:2"},			\
+			      {13, "GRGB as YCbCr422 (Even line blue)"},	\
+			      {14, "GRGB as YCbCr422 (Even line red)"},		\
+			      {15, "unexpected"})
+
+#define show_colorimetry(col)					\
+	__print_symbolic(col, { 0, "SMPTE 170M/BT.601"},	\
+			      { 1, "BT.701"},			\
+			      { 2, "xvYCC601"},			\
+			      { 3, "xvYCC709"},			\
+			      { 4, "sYCC601"},			\
+			      { 5, "AdobeYCC601"},		\
+			      { 6, "BT.2020 (c)"},		\
+			      { 7, "BT.2020 (nc)"},		\
+			      { 8, "DolbyVision VSVDB"},	\
+			      { 9, "BT.2020 (RGB)"},		\
+			      {10, "sRGB"},			\
+			      {11, "scRGB"},			\
+			      {12, "scRGBfixed"},		\
+			      {13, "AdobeRGB"},			\
+			      {14, "DCI-P3 (D65)"},		\
+			      {15, "DCI-P3 (Theater)"},		\
+			      {16, "Default RGB"},		\
+			      {17, "unexpected"})
+
+#define show_range(range)				\
+	__print_symbolic(range, { 0, "Full"},		\
+				{ 1, "Limited"},	\
+				{ 2, "unexpected"})
+
+TRACE_EVENT(iomfb_color_mode,
+	    TP_PROTO(struct apple_dcp *dcp, u32 id, u32 score, u32 depth,
+		     u32 colorimetry, u32 eotf, u32 range, u32 pixel_enc),
+	    TP_ARGS(dcp, id, score, depth, colorimetry, eotf, range, pixel_enc),
+	    TP_STRUCT__entry(
+			     __field(u64, dcp)
+			     __field(u32, id)
+			     __field(u32, score)
+			     __field(u32, depth)
+			     __field(u32, colorimetry)
+			     __field(u32, eotf)
+			     __field(u32, range)
+			     __field(u32, pixel_enc)
+	    ),
+	    TP_fast_assign(
+			   __entry->dcp = (u64)dcp;
+			   __entry->id = id;
+			   __entry->score = score;
+			   __entry->depth = depth;
+			   __entry->colorimetry = min_t(u32, colorimetry, 17U);
+			   __entry->eotf = min_t(u32, eotf, 4U);
+			   __entry->range = min_t(u32, range, 2U);
+			   __entry->pixel_enc = min_t(u32, pixel_enc, 15U);
+	    ),
+	    TP_printk("dcp=%llx, id=%u, score=%u,  depth=%u, colorimetry=%s, eotf=%s, range=%s, pixel_enc=%s",
+		      __entry->dcp,
+		      __entry->id,
+		      __entry->score,
+		      __entry->depth,
+		      show_colorimetry(__entry->colorimetry),
+		      show_eotf(__entry->eotf),
+		      show_range(__entry->range),
+		      show_encoding(__entry->pixel_enc)
+	    )
+);
+
+TRACE_EVENT(iomfb_timing_mode,
+	    TP_PROTO(struct apple_dcp *dcp, u32 id, u32 score, u32 width,
+		     u32 height, u32 clock, u32 color_mode),
+	    TP_ARGS(dcp, id, score, width, height, clock, color_mode),
+	    TP_STRUCT__entry(
+			     __field(u64, dcp)
+			     __field(u32, id)
+			     __field(u32, score)
+			     __field(u32, width)
+			     __field(u32, height)
+			     __field(u32, clock)
+			     __field(u32, color_mode)
+	    ),
+	    TP_fast_assign(
+			   __entry->dcp = (u64)dcp;
+			   __entry->id = id;
+			   __entry->score = score;
+			   __entry->width = width;
+			   __entry->height = height;
+			   __entry->clock = clock;
+			   __entry->color_mode = color_mode;
+	    ),
+	    TP_printk("dcp=%llx, id=%u, score=%u,  %ux%u@%u.%u, color_mode=%u",
+		      __entry->dcp,
+		      __entry->id,
+		      __entry->score,
+		      __entry->width,
+		      __entry->height,
+		      __entry->clock >> 16,
+		      ((__entry->clock & 0xffff) * 1000) >> 16,
+		      __entry->color_mode
+	    )
+);
+
+TRACE_EVENT(avep_sound_mode,
+	    TP_PROTO(struct apple_dcp *dcp, u32 rates, u64 formats, unsigned int nchans),
+	    TP_ARGS(dcp, rates, formats, nchans),
+	    TP_STRUCT__entry(
+			     __field(u64, dcp)
+			     __field(u32, rates)
+			     __field(u64, formats)
+			     __field(unsigned int, nchans)
+	    ),
+	    TP_fast_assign(
+			   __entry->dcp = (u64)dcp;
+			   __entry->rates = rates;
+			   __entry->formats = formats;
+			   __entry->nchans = nchans;
+	    ),
+	    TP_printk("dcp=%llx, rates=%#x, formats=%#llx, nchans=%#x",
+		      __entry->dcp,
+		      __entry->rates,
+		      __entry->formats,
+		      __entry->nchans
+	    )
+);
+
+#endif /* _TRACE_DCP_H */
+
+/* This part must be outside protection */
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/apple/version_utils.h b/drivers/gpu/drm/apple/version_utils.h
new file mode 100644
index 00000000000000..5a33ce1db61c47
--- /dev/null
+++ b/drivers/gpu/drm/apple/version_utils.h
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright The Asahi Linux Contributors */
+
+#ifndef __APPLE_VERSION_UTILS_H__
+#define __APPLE_VERSION_UTILS_H__
+
+#include <linux/kernel.h>
+#include <linux/args.h>
+
+#define DCP_FW_UNION(u) (u).DCP_FW
+#define DCP_FW_SUFFIX CONCATENATE(_, DCP_FW)
+#define DCP_FW_NAME(name) CONCATENATE(name, DCP_FW_SUFFIX)
+#define DCP_FW_VERSION(x, y, z) ( ((x) << 16) | ((y) << 8) | (z) )
+
+#endif /*__APPLE_VERSION_UTILS_H__*/
diff --git a/drivers/gpu/drm/asahi/Kconfig b/drivers/gpu/drm/asahi/Kconfig
new file mode 100644
index 00000000000000..ee2e9a85cfb5bd
--- /dev/null
+++ b/drivers/gpu/drm/asahi/Kconfig
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config RUST_DRM_SCHED
+	bool
+	select DRM_SCHED
+
+config RUST_DRM_GEM_SHMEM_HELPER
+	bool
+	select DRM_GEM_SHMEM_HELPER
+
+config RUST_DRM_GPUVM
+	bool
+	select DRM_GPUVM
+
+config DRM_ASAHI
+	tristate "Asahi (DRM support for Apple AGX GPUs)"
+	depends on RUST
+	depends on DRM=y
+	depends on (ARM64 && ARCH_APPLE) || (COMPILE_TEST && !GENERIC_ATOMIC64)
+	depends on MMU
+	depends on IOMMU_SUPPORT
+	depends on PAGE_SIZE_16KB
+	select RUST_DRM_SCHED
+	select IOMMU_IO_PGTABLE_LPAE
+	select RUST_DRM_GEM_SHMEM_HELPER
+	select RUST_DRM_GPUVM
+	select RUST_APPLE_RTKIT
+	select WANT_DEV_COREDUMP
+	help
+	  DRM driver for Apple AGX GPUs (G13x, found in the M1 SoC family)
+
+config DRM_ASAHI_DEBUG_ALLOCATOR
+	bool "Use debug allocator"
+	depends on DRM_ASAHI
+	help
+	  Use an alternate, simpler allocator which significantly reduces
+	  performance, but can help find firmware- or GPU-side memory safety
+	  issues. However, it can also trigger firmware bugs more easily,
+	  so expect GPU crashes.
+
+	  Say N unless you are debugging firmware structures or porting to a
+	  new firmware version.
diff --git a/drivers/gpu/drm/asahi/Makefile b/drivers/gpu/drm/asahi/Makefile
new file mode 100644
index 00000000000000..e6724866798760
--- /dev/null
+++ b/drivers/gpu/drm/asahi/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_DRM_ASAHI) += asahi.o
diff --git a/drivers/gpu/drm/asahi/alloc.rs b/drivers/gpu/drm/asahi/alloc.rs
new file mode 100644
index 00000000000000..0db55f72c1c5f1
--- /dev/null
+++ b/drivers/gpu/drm/asahi/alloc.rs
@@ -0,0 +1,1063 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+//! GPU kernel object allocator.
+//!
+//! This kernel driver needs to manage a large number of GPU objects, in both firmware/kernel
+//! address space and user address space. This module implements a simple grow-only heap allocator
+//! based on the DRM MM range allocator, and a debug allocator that allocates each object as a
+//! separate GEM object.
+//!
+//! Allocations may optionally have debugging enabled, which adds preambles that store metadata
+//! about the allocation. This is useful for live debugging using the hypervisor or postmortem
+//! debugging with a GPU memory snapshot, since it makes it easier to identify use-after-free and
+//! caching issues.
+
+use kernel::{drm::mm, error::Result, prelude::*, str::CString};
+
+use crate::debug::*;
+use crate::driver::{AsahiDevRef, AsahiDevice};
+use crate::fw::types::Zeroable;
+use crate::mmu;
+use crate::object::{GpuArray, GpuObject, GpuOnlyArray, GpuStruct, GpuWeakPointer};
+use crate::util::RangeExt;
+
+use core::cmp::Ordering;
+use core::fmt;
+use core::fmt::{Debug, Formatter};
+use core::marker::PhantomData;
+use core::mem;
+use core::ops::Range;
+use core::ptr::NonNull;
+
+const DEBUG_CLASS: DebugFlags = DebugFlags::Alloc;
+
+#[cfg(not(CONFIG_DRM_ASAHI_DEBUG_ALLOCATOR))]
+/// The driver-global allocator type
+pub(crate) type DefaultAllocator = HeapAllocator;
+
+#[cfg(not(CONFIG_DRM_ASAHI_DEBUG_ALLOCATOR))]
+/// The driver-global allocation type
+pub(crate) type DefaultAllocation = HeapAllocation;
+
+#[cfg(CONFIG_DRM_ASAHI_DEBUG_ALLOCATOR)]
+/// The driver-global allocator type
+pub(crate) type DefaultAllocator = SimpleAllocator;
+
+#[cfg(CONFIG_DRM_ASAHI_DEBUG_ALLOCATOR)]
+/// The driver-global allocation type
+pub(crate) type DefaultAllocation = SimpleAllocation;
+
+/// Represents a raw allocation (without any type information).
+pub(crate) trait RawAllocation {
+    /// Returns the CPU-side pointer (if CPU mapping is enabled) as a byte non-null pointer.
+    fn ptr(&self) -> Option<NonNull<u8>>;
+    /// Returns the GPU VA pointer as a u64.
+    fn gpu_ptr(&self) -> u64;
+    /// Returns the AsahiDevice that owns this allocation.
+    fn device(&self) -> &AsahiDevice;
+}
+
+/// Represents a typed allocation.
+pub(crate) trait Allocation<T>: Debug {
+    /// Returns the typed CPU-side pointer (if CPU mapping is enabled).
+    fn ptr(&self) -> Option<NonNull<T>>;
+    /// Returns the GPU VA pointer as a u64.
+    fn gpu_ptr(&self) -> u64;
+    /// Returns the size of the allocation in bytes.
+    fn size(&self) -> usize;
+    /// Returns the AsahiDevice that owns this allocation.
+    fn device(&self) -> &AsahiDevice;
+}
+
+/// A generic typed allocation wrapping a RawAllocation.
+///
+/// This is currently the only Allocation implementation, since it is shared by all allocators.
+///
+/// # Invariants
+/// The alloaction at `alloc` must have a size equal or greater than `alloc_size` plus `debug_offset` plus `padding`.
+pub(crate) struct GenericAlloc<T, U: RawAllocation> {
+    alloc: U,
+    alloc_size: usize,
+    debug_offset: usize,
+    padding: usize,
+    tag: u32,
+    pad_word: u32,
+    _p: PhantomData<T>,
+}
+
+impl<T, U: RawAllocation> Allocation<T> for GenericAlloc<T, U> {
+    /// Returns a pointer to the inner (usable) part of the allocation.
+    fn ptr(&self) -> Option<NonNull<T>> {
+        // SAFETY: self.debug_offset is always within the allocation per the invariant, so is safe to add
+        // to the base pointer.
+        unsafe { self.alloc.ptr().map(|p| p.add(self.debug_offset).cast()) }
+    }
+    /// Returns the GPU pointer to the inner (usable) part of the allocation.
+    fn gpu_ptr(&self) -> u64 {
+        self.alloc.gpu_ptr() + self.debug_offset as u64
+    }
+    /// Returns the size of the inner (usable) part of the allocation.
+    fn size(&self) -> usize {
+        self.alloc_size
+    }
+    fn device(&self) -> &AsahiDevice {
+        self.alloc.device()
+    }
+}
+
+impl<T, U: RawAllocation> Debug for GenericAlloc<T, U> {
+    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
+        f.debug_struct(core::any::type_name::<GenericAlloc<T, U>>())
+            .field("ptr", &format_args!("{:?}", self.ptr()))
+            .field("gpu_ptr", &format_args!("{:#X?}", self.gpu_ptr()))
+            .field("size", &format_args!("{:#X?}", self.size()))
+            .finish()
+    }
+}
+
+/// Debugging data associated with an allocation, when debugging is enabled.
+#[repr(C)]
+struct AllocDebugData {
+    state: u32,
+    tag: u32,
+    size: u64,
+    base_gpuva: u64,
+    obj_gpuva: u64,
+    name: [u8; 0x20],
+}
+
+/// Magic flag indicating a live allocation.
+const STATE_LIVE: u32 = u32::from_le_bytes(*b"LIVE");
+/// Magic flag indicating a freed allocation.
+const STATE_DEAD: u32 = u32::from_le_bytes(*b"DEAD");
+
+/// Marker byte to identify when firmware/GPU write beyond the end of an allocation.
+const GUARD_MARKER: u32 = 0x93939393;
+
+impl<T, U: RawAllocation> Drop for GenericAlloc<T, U> {
+    fn drop(&mut self) {
+        let debug_len = mem::size_of::<AllocDebugData>();
+        if self.debug_offset >= debug_len {
+            if let Some(p) = self.alloc.ptr() {
+                // SAFETY: self.debug_offset is always greater than the alloc size per
+                // the invariant, and greater than debug_len as checked above.
+                unsafe {
+                    let p = p.as_ptr().add(self.debug_offset - debug_len);
+                    (p as *mut u32).write(STATE_DEAD);
+                }
+            }
+        }
+        if debug_enabled(DebugFlags::FillAllocations) {
+            if let Some(p) = self.ptr() {
+                // SAFETY: Writing to our inner base pointer with our known inner size is safe.
+                unsafe { (p.as_ptr() as *mut u8).write_bytes(0xde, self.size()) };
+            }
+        }
+        if self.padding != 0 {
+            if let Some(p) = self.ptr() {
+                // SAFETY: Per the invariant, we have at least `self.padding` bytes trailing
+                // the inner base pointer, after `size()` bytes.
+                let guard = unsafe {
+                    core::slice::from_raw_parts(
+                        (p.as_ptr() as *mut u8 as *const u8).add(self.size()),
+                        self.padding,
+                    )
+                };
+                let mut first_err = None;
+                let mut last_err = 0;
+                for (i, p) in guard.iter().enumerate() {
+                    if *p != (self.pad_word >> (8 * (i & 3))) as u8 {
+                        if first_err.is_none() {
+                            first_err = Some(i);
+                        }
+                        last_err = i;
+                    }
+                }
+                if let Some(start) = first_err {
+                    dev_warn!(
+                        self.device().as_ref(),
+                        "Allocator: Corruption after object of type {}/{:#x} at {:#x}:{:#x} + {:#x}..={:#x}\n",
+                        core::any::type_name::<T>(),
+                        self.tag,
+                        self.gpu_ptr(),
+                        self.size(),
+                        start,
+                        last_err,
+                    );
+                }
+            }
+        }
+    }
+}
+
+static_assert!(mem::size_of::<AllocDebugData>() == 0x40);
+
+/// A trait representing an allocator.
+pub(crate) trait Allocator {
+    /// The raw allocation type used by this allocator.
+    type Raw: RawAllocation;
+    // TODO: Needs associated_type_defaults
+    // type Allocation<T> = GenericAlloc<T, Self::Raw>;
+
+    /// Returns whether CPU-side mapping is enabled.
+    fn cpu_maps(&self) -> bool;
+    /// Returns the minimum alignment for allocations.
+    fn min_align(&self) -> usize;
+    /// Allocate an object of the given size in bytes with the given alignment.
+    fn alloc(&mut self, size: usize, align: usize) -> Result<Self::Raw>;
+
+    /// Returns a tuple of (count, size) of how much garbage (freed but not yet reusable objects)
+    /// exists in this allocator. Optional.
+    fn garbage(&self) -> (usize, usize) {
+        (0, 0)
+    }
+    /// Collect garbage for this allocator, up to the given object count. Optional.
+    fn collect_garbage(&mut self, _count: usize) {}
+
+    /// Allocate a new GpuStruct object. See [`GpuObject::new`].
+    #[inline(never)]
+    fn new_object<T: GpuStruct>(
+        &mut self,
+        inner: T,
+        callback: impl for<'a> FnOnce(&'a T) -> T::Raw<'a>,
+    ) -> Result<GpuObject<T, GenericAlloc<T, Self::Raw>>> {
+        GpuObject::<T, GenericAlloc<T, Self::Raw>>::new(self.alloc_object()?, inner, callback)
+    }
+
+    /// Allocate a new GpuStruct object. See [`GpuObject::new_default`].
+    #[inline(never)]
+    fn new_default<T: GpuStruct + Default>(
+        &mut self,
+    ) -> Result<GpuObject<T, GenericAlloc<T, Self::Raw>>>
+    where
+        for<'a> <T as GpuStruct>::Raw<'a>: Default + Zeroable,
+    {
+        GpuObject::<T, GenericAlloc<T, Self::Raw>>::new_default(self.alloc_object()?)
+    }
+
+    /// Allocate a new GpuStruct object. See [`GpuObject::new_init`].
+    #[inline(never)]
+    fn new_init<'a, T: GpuStruct, R: PinInit<T::Raw<'a>, F>, E, F>(
+        &mut self,
+        inner_init: impl Init<T, E>,
+        raw_init: impl FnOnce(&'a T, GpuWeakPointer<T>) -> R,
+    ) -> Result<GpuObject<T, GenericAlloc<T, Self::Raw>>>
+    where
+        kernel::error::Error: core::convert::From<E>,
+        kernel::error::Error: core::convert::From<F>,
+    {
+        GpuObject::<T, GenericAlloc<T, Self::Raw>>::new_init_prealloc(
+            self.alloc_object()?,
+            |_p| inner_init,
+            raw_init,
+        )
+    }
+
+    /// Allocate a generic buffer of the given size and alignment, applying the debug features if
+    /// enabled to tag it and detect overflows.
+    fn alloc_generic<T>(
+        &mut self,
+        size: usize,
+        align: usize,
+        tag: Option<u32>,
+    ) -> Result<GenericAlloc<T, Self::Raw>> {
+        let padding = if debug_enabled(DebugFlags::DetectOverflows) {
+            size
+        } else {
+            0
+        };
+
+        let ret: GenericAlloc<T, Self::Raw> =
+            if self.cpu_maps() && debug_enabled(debug::DebugFlags::DebugAllocations) {
+                let debug_align = self.min_align().max(align);
+                let debug_len = mem::size_of::<AllocDebugData>();
+                let debug_offset = (debug_len * 2 + debug_align - 1) & !(debug_align - 1);
+
+                let alloc = self.alloc(size + debug_offset + padding, align)?;
+
+                let mut debug = AllocDebugData {
+                    state: STATE_LIVE,
+                    tag: tag.unwrap_or(0),
+                    size: size as u64,
+                    base_gpuva: alloc.gpu_ptr(),
+                    obj_gpuva: alloc.gpu_ptr() + debug_offset as u64,
+                    name: [0; 0x20],
+                };
+
+                let name = core::any::type_name::<T>().as_bytes();
+                let len = name.len().min(debug.name.len() - 1);
+                debug.name[..len].copy_from_slice(&name[..len]);
+
+                if let Some(p) = alloc.ptr() {
+                    // SAFETY: Per the size calculations above, this pointer math and the
+                    // writes never exceed the allocation size.
+                    unsafe {
+                        let p = p.as_ptr();
+                        p.write_bytes(0x42, debug_offset - 2 * debug_len);
+                        let cur = p.add(debug_offset - debug_len) as *mut AllocDebugData;
+                        let prev = p.add(debug_offset - 2 * debug_len) as *mut AllocDebugData;
+                        prev.copy_from(cur, 1);
+                        cur.copy_from(&debug, 1);
+                    };
+                }
+
+                GenericAlloc {
+                    alloc,
+                    alloc_size: size,
+                    debug_offset,
+                    tag: tag.unwrap_or(0),
+                    pad_word: tag.unwrap_or(GUARD_MARKER) | 0x81818181,
+                    padding,
+                    _p: PhantomData,
+                }
+            } else {
+                GenericAlloc {
+                    alloc: self.alloc(size + padding, align)?,
+                    alloc_size: size,
+                    debug_offset: 0,
+                    tag: tag.unwrap_or(0),
+                    pad_word: tag.unwrap_or(GUARD_MARKER) | 0x81818181,
+                    padding,
+                    _p: PhantomData,
+                }
+            };
+
+        if debug_enabled(DebugFlags::FillAllocations) {
+            if let Some(p) = ret.ptr() {
+                // SAFETY: Writing to our inner base pointer with our known inner size is safe.
+                unsafe { (p.as_ptr() as *mut u8).write_bytes(0xaa, ret.size()) };
+            }
+        }
+
+        if padding != 0 {
+            if let Some(p) = ret.ptr() {
+                // SAFETY: Per the invariant, we have at least `self.padding` bytes trailing
+                // the inner base pointer, after `size()` bytes.
+                let guard = unsafe {
+                    core::slice::from_raw_parts_mut(
+                        (p.as_ptr() as *mut u8).add(ret.size()),
+                        padding,
+                    )
+                };
+                for (i, p) in guard.iter_mut().enumerate() {
+                    *p = (ret.pad_word >> (8 * (i & 3))) as u8;
+                }
+            }
+        }
+
+        Ok(ret)
+    }
+
+    /// Allocate an object of a given type, without actually initializing the allocation.
+    ///
+    /// This is useful to directly call [`GpuObject::new_*`], without borrowing a reference to the
+    /// allocator for the entire duration (e.g. if further allocations need to happen inside the
+    /// callbacks).
+    fn alloc_object<T: GpuStruct>(&mut self) -> Result<GenericAlloc<T, Self::Raw>> {
+        let size = mem::size_of::<T::Raw<'static>>();
+        let align = mem::align_of::<T::Raw<'static>>();
+
+        self.alloc_generic(size, align, None)
+    }
+
+    /// Allocate an empty `GpuArray` of a given type and length.
+    fn array_empty<T: Sized + Default>(
+        &mut self,
+        count: usize,
+    ) -> Result<GpuArray<T, GenericAlloc<T, Self::Raw>>> {
+        let size = mem::size_of::<T>() * count;
+        let align = mem::align_of::<T>();
+
+        let alloc = self.alloc_generic(size, align, None)?;
+        GpuArray::<T, GenericAlloc<T, Self::Raw>>::empty(alloc, count)
+    }
+
+    /// Allocate an empty `GpuArray` of a given type and length.
+    fn array_empty_tagged<T: Sized + Default>(
+        &mut self,
+        count: usize,
+        tag: &[u8; 4],
+    ) -> Result<GpuArray<T, GenericAlloc<T, Self::Raw>>> {
+        let size = mem::size_of::<T>() * count;
+        let align = mem::align_of::<T>();
+
+        let alloc = self.alloc_generic(size, align, Some(u32::from_le_bytes(*tag)))?;
+        GpuArray::<T, GenericAlloc<T, Self::Raw>>::empty(alloc, count)
+    }
+
+    /// Allocate an empty `GpuOnlyArray` of a given type and length.
+    fn array_gpuonly<T: Sized + Default>(
+        &mut self,
+        count: usize,
+    ) -> Result<GpuOnlyArray<T, GenericAlloc<T, Self::Raw>>> {
+        let size = mem::size_of::<T>() * count;
+        let align = mem::align_of::<T>();
+
+        let alloc = self.alloc_generic(size, align, None)?;
+        GpuOnlyArray::<T, GenericAlloc<T, Self::Raw>>::new(alloc, count)
+    }
+}
+
+/// A simple allocation backed by a separate GEM object.
+///
+/// # Invariants
+/// `ptr` is either None or a valid, non-null pointer to the CPU view of the object.
+/// `gpu_ptr` is the GPU-side VA of the object.
+pub(crate) struct SimpleAllocation {
+    dev: AsahiDevRef,
+    ptr: Option<NonNull<u8>>,
+    gpu_ptr: u64,
+    _mapping: mmu::KernelMapping,
+    obj: crate::gem::ObjectRef,
+}
+
+/// SAFETY: `SimpleAllocation` just points to raw memory and should be safe to send across threads.
+unsafe impl Send for SimpleAllocation {}
+/// SAFETY: `SimpleAllocation` just points to raw memory and should be safe to share across threads.
+unsafe impl Sync for SimpleAllocation {}
+
+impl Drop for SimpleAllocation {
+    fn drop(&mut self) {
+        mod_dev_dbg!(
+            self.device(),
+            "SimpleAllocator: drop object @ {:#x}\n",
+            self.gpu_ptr()
+        );
+        if debug_enabled(DebugFlags::FillAllocations) {
+            if let Ok(vmap) = self.obj.vmap() {
+                vmap.as_mut_slice().fill(0x42);
+            }
+        }
+    }
+}
+
+impl RawAllocation for SimpleAllocation {
+    fn ptr(&self) -> Option<NonNull<u8>> {
+        self.ptr
+    }
+    fn gpu_ptr(&self) -> u64 {
+        self.gpu_ptr
+    }
+    fn device(&self) -> &AsahiDevice {
+        &self.dev
+    }
+}
+
+/// A simple allocator that allocates each object as its own GEM object, aligned to the end of a
+/// page.
+///
+/// This is very slow, but it has the advantage that over-reads by the firmware or GPU will fault on
+/// the guard page after the allocation, which can be useful to validate that the firmware's or
+/// GPU's idea of object size what we expect.
+pub(crate) struct SimpleAllocator {
+    dev: AsahiDevRef,
+    range: Range<u64>,
+    prot: mmu::Prot,
+    vm: mmu::Vm,
+    min_align: usize,
+    cpu_maps: bool,
+}
+
+impl SimpleAllocator {
+    /// Create a new `SimpleAllocator` for a given address range and `Vm`.
+    #[allow(dead_code)]
+    #[allow(clippy::too_many_arguments)]
+    pub(crate) fn new(
+        dev: &AsahiDevice,
+        vm: &mmu::Vm,
+        range: Range<u64>,
+        min_align: usize,
+        prot: mmu::Prot,
+        _block_size: usize,
+        mut cpu_maps: bool,
+        _name: fmt::Arguments<'_>,
+        _keep_garbage: bool,
+    ) -> Result<SimpleAllocator> {
+        if debug_enabled(DebugFlags::ForceCPUMaps) {
+            cpu_maps = true;
+        }
+        Ok(SimpleAllocator {
+            dev: dev.into(),
+            vm: vm.clone(),
+            range,
+            prot,
+            min_align,
+            cpu_maps,
+        })
+    }
+}
+
+impl Allocator for SimpleAllocator {
+    type Raw = SimpleAllocation;
+
+    fn cpu_maps(&self) -> bool {
+        self.cpu_maps
+    }
+
+    fn min_align(&self) -> usize {
+        self.min_align
+    }
+
+    #[inline(never)]
+    fn alloc(&mut self, size: usize, align: usize) -> Result<SimpleAllocation> {
+        let size_aligned = (size + mmu::UAT_PGSZ - 1) & !mmu::UAT_PGMSK;
+        let align = self.min_align.max(align);
+        let offset = (size_aligned - size) & !(align - 1);
+
+        mod_dev_dbg!(
+            &self.dev,
+            "SimpleAllocator::new: size={:#x} size_al={:#x} al={:#x} off={:#x}\n",
+            size,
+            size_aligned,
+            align,
+            offset
+        );
+
+        let mut obj = crate::gem::new_kernel_object(&self.dev, size_aligned)?;
+        let p = obj.vmap()?.as_mut_ptr() as *mut u8;
+        if debug_enabled(DebugFlags::FillAllocations) {
+            obj.vmap()?.as_mut_slice().fill(0xde);
+        }
+        let mapping = obj.map_into_range(
+            &self.vm,
+            self.range.clone(),
+            self.min_align.max(mmu::UAT_PGSZ) as u64,
+            self.prot,
+            true,
+        )?;
+
+        let iova = mapping.iova();
+
+        // SAFETY: Per the math above to calculate `size_aligned`, this can never overflow.
+        let ptr = unsafe { p.add(offset) };
+        let gpu_ptr = iova + offset as u64;
+
+        mod_dev_dbg!(
+            &self.dev,
+            "SimpleAllocator::new -> {:#?} / {:#?} | {:#x} / {:#x}\n",
+            p,
+            ptr,
+            iova,
+            gpu_ptr
+        );
+
+        Ok(SimpleAllocation {
+            dev: self.dev.clone(),
+            ptr: NonNull::new(ptr),
+            gpu_ptr,
+            _mapping: mapping,
+            obj,
+        })
+    }
+}
+
+/// Inner data for an allocation from the heap allocator.
+///
+/// This is wrapped in an `mm::Node`.
+pub(crate) struct HeapAllocationInner {
+    dev: AsahiDevRef,
+    ptr: Option<NonNull<u8>>,
+    real_size: usize,
+}
+
+/// SAFETY: `HeapAllocationInner` just points to raw memory and should be safe to send across threads.
+unsafe impl Send for HeapAllocationInner {}
+/// SAFETY: `HeapAllocationInner` just points to raw memory and should be safe to share between threads.
+unsafe impl Sync for HeapAllocationInner {}
+
+/// Outer view of a heap allocation.
+///
+/// This uses an Option<> so we can move the internal `Node` into the garbage pool when it gets
+/// dropped.
+///
+/// # Invariants
+/// The `Option` must always be `Some(...)` while this object is alive.
+pub(crate) struct HeapAllocation(Option<mm::Node<HeapAllocatorInner, HeapAllocationInner>>);
+
+impl Drop for HeapAllocation {
+    fn drop(&mut self) {
+        let node = self.0.take().unwrap();
+        let size = node.size();
+        let alloc = node.alloc_ref();
+
+        alloc.with(|a| {
+            if let Some(garbage) = a.garbage.as_mut() {
+                if garbage.push(node, GFP_KERNEL).is_err() {
+                    dev_err!(
+                        &a.dev.as_ref(),
+                        "HeapAllocation[{}]::drop: Failed to keep garbage\n",
+                        &*a.name,
+                    );
+                }
+                a.total_garbage += size as usize;
+                None
+            } else {
+                // We need to ensure node survives this scope, since dropping it
+                // will try to take the mm lock and deadlock us
+                Some(node)
+            }
+        });
+    }
+}
+
+impl mm::AllocInner<HeapAllocationInner> for HeapAllocatorInner {
+    fn drop_object(
+        &mut self,
+        start: u64,
+        _size: u64,
+        _color: usize,
+        obj: &mut HeapAllocationInner,
+    ) {
+        /* real_size == 0 means it's a guard node */
+        if obj.real_size > 0 {
+            mod_dev_dbg!(
+                obj.dev,
+                "HeapAllocator[{}]: drop object @ {:#x} ({} bytes)\n",
+                &*self.name,
+                start,
+                obj.real_size,
+            );
+            self.allocated -= obj.real_size;
+        }
+    }
+}
+
+impl RawAllocation for HeapAllocation {
+    // SAFETY: This function must always return a valid pointer.
+    // Since the HeapAllocation contains a reference to the
+    // backing_objects array that contains the object backing this pointer,
+    // and objects are only ever added to it, this pointer is guaranteed to
+    // remain valid for the lifetime of the HeapAllocation.
+    fn ptr(&self) -> Option<NonNull<u8>> {
+        self.0.as_ref().unwrap().ptr
+    }
+    // SAFETY: This function must always return a valid GPU pointer.
+    // See the explanation in ptr().
+    fn gpu_ptr(&self) -> u64 {
+        self.0.as_ref().unwrap().start()
+    }
+    fn device(&self) -> &AsahiDevice {
+        &self.0.as_ref().unwrap().dev
+    }
+}
+
+/// Inner data for a heap allocator which uses the DRM MM range allocator to manage the heap.
+///
+/// This is wrapped by an `mm::Allocator`.
+struct HeapAllocatorInner {
+    dev: AsahiDevRef,
+    allocated: usize,
+    backing_objects: KVec<(crate::gem::ObjectRef, mmu::KernelMapping, u64)>,
+    garbage: Option<KVec<mm::Node<HeapAllocatorInner, HeapAllocationInner>>>,
+    total_garbage: usize,
+    name: CString,
+}
+
+/// A heap allocator which uses the DRM MM range allocator to manage its objects.
+///
+/// The heap is composed of a series of GEM objects. This implementation only ever grows the heap,
+/// never shrinks it.
+pub(crate) struct HeapAllocator {
+    dev: AsahiDevRef,
+    range: Range<u64>,
+    top: u64,
+    prot: mmu::Prot,
+    vm: mmu::Vm,
+    min_align: usize,
+    block_size: usize,
+    cpu_maps: bool,
+    guard_nodes: KVec<mm::Node<HeapAllocatorInner, HeapAllocationInner>>,
+    mm: mm::Allocator<HeapAllocatorInner, HeapAllocationInner>,
+    name: CString,
+    garbage: Option<KVec<mm::Node<HeapAllocatorInner, HeapAllocationInner>>>,
+}
+
+impl HeapAllocator {
+    /// Create a new HeapAllocator for a given `Vm` and address range.
+    #[allow(dead_code)]
+    #[allow(clippy::too_many_arguments)]
+    pub(crate) fn new(
+        dev: &AsahiDevice,
+        vm: &mmu::Vm,
+        range: Range<u64>,
+        min_align: usize,
+        prot: mmu::Prot,
+        block_size: usize,
+        mut cpu_maps: bool,
+        name: fmt::Arguments<'_>,
+        keep_garbage: bool,
+    ) -> Result<HeapAllocator> {
+        if !min_align.is_power_of_two() {
+            return Err(EINVAL);
+        }
+        if debug_enabled(DebugFlags::ForceCPUMaps) {
+            cpu_maps = true;
+        }
+
+        let name = CString::try_from_fmt(name)?;
+
+        let inner = HeapAllocatorInner {
+            dev: dev.into(),
+            allocated: 0,
+            backing_objects: KVec::new(),
+            // TODO: This clearly needs a try_clone() or similar
+            name: CString::try_from_fmt(fmt!("{}", &*name))?,
+            garbage: if keep_garbage {
+                Some(KVec::new())
+            } else {
+                None
+            },
+            total_garbage: 0,
+        };
+
+        let mm = mm::Allocator::new(range.start, range.range(), inner)?;
+
+        Ok(HeapAllocator {
+            dev: dev.into(),
+            vm: vm.clone(),
+            top: range.start,
+            range,
+            prot,
+            min_align,
+            block_size: block_size.max(min_align),
+            cpu_maps,
+            guard_nodes: KVec::new(),
+            mm,
+            name,
+            garbage: if keep_garbage {
+                Some({
+                    let mut v = KVec::new();
+                    v.reserve(128, GFP_KERNEL)?;
+                    v
+                })
+            } else {
+                None
+            },
+        })
+    }
+
+    /// Add a new backing block of the given size to this heap.
+    ///
+    /// If CPU mapping is enabled, this also adds a guard node to the range allocator to ensure that
+    /// objects cannot straddle backing block boundaries, since we cannot easily create a contiguous
+    /// CPU VA mapping for them. This can create some fragmentation. If CPU mapping is disabled, we
+    /// skip the guard blocks, since the GPU view of the heap is always contiguous.
+    #[inline(never)]
+    fn add_block(&mut self, size: usize) -> Result {
+        let size_aligned = (size + mmu::UAT_PGSZ - 1) & !mmu::UAT_PGMSK;
+
+        mod_dev_dbg!(
+            &self.dev,
+            "HeapAllocator[{}]::add_block: size={:#x} size_al={:#x}\n",
+            &*self.name,
+            size,
+            size_aligned,
+        );
+
+        if self.top.saturating_add(size_aligned as u64) > self.range.end {
+            dev_err!(
+                self.dev.as_ref(),
+                "HeapAllocator[{}]::add_block: Exhausted VA space\n",
+                &*self.name,
+            );
+        }
+
+        let mut obj = crate::gem::new_kernel_object(&self.dev, size_aligned)?;
+        if self.cpu_maps && debug_enabled(DebugFlags::FillAllocations) {
+            obj.vmap()?.as_mut_slice().fill(0xde);
+        }
+
+        let gpu_ptr = self.top;
+        let mapping = obj
+            .map_at(&self.vm, gpu_ptr, self.prot, self.cpu_maps)
+            .inspect_err(|err| {
+                dev_err!(
+                    self.dev.as_ref(),
+                    "HeapAllocator[{}]::add_block: Failed to map at {:#x} ({:?})\n",
+                    &*self.name,
+                    gpu_ptr,
+                    err
+                );
+            })?;
+
+        self.mm
+            .with_inner(|inner| inner.backing_objects.reserve(1, GFP_KERNEL))?;
+
+        let mut new_top = self.top + size_aligned as u64;
+        if self.cpu_maps {
+            let guard = self.min_align.max(mmu::UAT_PGSZ);
+            mod_dev_dbg!(
+                self.dev,
+                "HeapAllocator[{}]::add_block: Adding guard node {:#x}:{:#x}\n",
+                &*self.name,
+                new_top,
+                guard
+            );
+
+            let inner = HeapAllocationInner {
+                dev: self.dev.clone(),
+                ptr: None,
+                real_size: 0,
+            };
+
+            let node = match self.mm.reserve_node(inner, new_top, guard as u64, 0) {
+                Ok(a) => a,
+                Err(a) => {
+                    dev_err!(
+                        self.dev.as_ref(),
+                        "HeapAllocator[{}]::add_block: Failed to reserve guard node {:#x}:{:#x}: {:?}\n",
+                        &*self.name,
+                        guard,
+                        new_top,
+                        a
+                    );
+                    return Err(EIO);
+                }
+            };
+
+            self.guard_nodes.push(node, GFP_KERNEL)?;
+
+            new_top += guard as u64;
+        }
+        mod_dev_dbg!(
+            &self.dev,
+            "HeapAllocator[{}]::add_block: top={:#x}\n",
+            &*self.name,
+            new_top
+        );
+
+        self.mm.with_inner(|inner| {
+            inner
+                .backing_objects
+                .push((obj, mapping, gpu_ptr), GFP_KERNEL)
+        })?;
+
+        self.top = new_top;
+
+        cls_dev_dbg!(
+            MemStats,
+            &self.dev,
+            "{} Heap: grow to {} bytes\n",
+            &*self.name,
+            self.top - self.range.start
+        );
+
+        Ok(())
+    }
+
+    /// Find the backing object index that backs a given GPU address.
+    fn find_obj(&mut self, addr: u64) -> Result<usize> {
+        self.mm.with_inner(|inner| {
+            inner
+                .backing_objects
+                .binary_search_by(|obj| {
+                    let start = obj.2;
+                    let end = obj.2 + obj.0.size() as u64;
+                    if start > addr {
+                        Ordering::Greater
+                    } else if end <= addr {
+                        Ordering::Less
+                    } else {
+                        Ordering::Equal
+                    }
+                })
+                .or(Err(ENOENT))
+        })
+    }
+
+    fn alloc_inner(&mut self, size: usize, align: usize) -> Result<HeapAllocation> {
+        if align != 0 && !align.is_power_of_two() {
+            return Err(EINVAL);
+        }
+        let align = self.min_align.max(align);
+        let size_aligned = (size + align - 1) & !(align - 1);
+
+        mod_dev_dbg!(
+            &self.dev,
+            "HeapAllocator[{}]::new: size={:#x} size_al={:#x}\n",
+            &*self.name,
+            size,
+            size_aligned,
+        );
+
+        let inner = HeapAllocationInner {
+            dev: self.dev.clone(),
+            ptr: None,
+            real_size: size,
+        };
+
+        let mut node = match self.mm.insert_node_generic(
+            inner,
+            size_aligned as u64,
+            align as u64,
+            0,
+            mm::InsertMode::Best,
+        ) {
+            Ok(a) => a,
+            Err(a) => {
+                dev_err!(
+                    &self.dev.as_ref(),
+                    "HeapAllocator[{}]::new: Failed to insert node of size {:#x} / align {:#x}: {:?}\n",
+                    &*self.name, size_aligned, align, a
+                );
+                return Err(a);
+            }
+        };
+
+        self.mm.with_inner(|inner| inner.allocated += size);
+
+        let mut new_object = false;
+        let start = node.start();
+        let end = start + node.size();
+        if end > self.top {
+            if start > self.top {
+                dev_warn!(
+                    self.dev.as_ref(),
+                    "HeapAllocator[{}]::alloc: top={:#x}, start={:#x}\n",
+                    &*self.name,
+                    self.top,
+                    start
+                );
+            }
+            let block_size = self.block_size.max((end - self.top) as usize);
+            self.add_block(block_size)?;
+            new_object = true;
+        }
+        assert!(end <= self.top);
+
+        if self.cpu_maps {
+            mod_dev_dbg!(
+                self.dev,
+                "HeapAllocator[{}]::alloc: mapping to CPU\n",
+                &*self.name
+            );
+
+            let idx = if new_object {
+                None
+            } else {
+                Some(match self.find_obj(start) {
+                    Ok(a) => a,
+                    Err(_) => {
+                        dev_warn!(
+                            self.dev.as_ref(),
+                            "HeapAllocator[{}]::alloc: Failed to find object at {:#x}\n",
+                            &*self.name,
+                            start
+                        );
+                        return Err(EIO);
+                    }
+                })
+            };
+            let (obj_start, obj_size, p) = self.mm.with_inner(|inner| -> Result<_> {
+                let idx = idx.unwrap_or(inner.backing_objects.len() - 1);
+                let obj = &mut inner.backing_objects[idx];
+                let p = obj.0.vmap()?.as_mut_ptr() as *mut u8;
+                Ok((obj.2, obj.0.size(), p))
+            })?;
+            assert!(obj_start <= start);
+            assert!(obj_start + obj_size as u64 >= end);
+            node.as_mut().inner_mut().ptr =
+                // SAFETY: Per the asserts above, this offset is always within the allocation.
+                NonNull::new(unsafe { p.add((start - obj_start) as usize) });
+            mod_dev_dbg!(
+                self.dev,
+                "HeapAllocator[{}]::alloc: CPU pointer = {:?}\n",
+                &*self.name,
+                node.ptr
+            );
+        }
+
+        mod_dev_dbg!(
+            self.dev,
+            "HeapAllocator[{}]::alloc: Allocated {:#x} bytes @ {:#x}\n",
+            &*self.name,
+            end - start,
+            start
+        );
+
+        Ok(HeapAllocation(Some(node)))
+    }
+}
+
+impl Allocator for HeapAllocator {
+    type Raw = HeapAllocation;
+
+    fn cpu_maps(&self) -> bool {
+        self.cpu_maps
+    }
+
+    fn min_align(&self) -> usize {
+        self.min_align
+    }
+
+    fn alloc(&mut self, size: usize, align: usize) -> Result<HeapAllocation> {
+        let ret = self.alloc_inner(size, align);
+
+        if ret.is_err() {
+            dev_warn!(
+                self.dev.as_ref(),
+                "HeapAllocator[{}]::alloc: Allocation of {:#x}({:#x}) size object failed\n",
+                &*self.name,
+                size,
+                align
+            );
+        }
+        ret
+    }
+
+    fn garbage(&self) -> (usize, usize) {
+        self.mm.with_inner(|inner| {
+            if let Some(g) = inner.garbage.as_ref() {
+                (g.len(), inner.total_garbage)
+            } else {
+                (0, 0)
+            }
+        })
+    }
+
+    fn collect_garbage(&mut self, mut count: usize) {
+        if let Some(garbage) = self.garbage.as_mut() {
+            garbage.clear();
+
+            while count > 0 {
+                let block = count.min(garbage.capacity());
+                assert!(block > 0);
+
+                // Take the garbage out of the inner block, so we can safely drop it without deadlocking
+                self.mm.with_inner(|inner| {
+                    if let Some(g) = inner.garbage.as_mut() {
+                        for node in g.drain(0..block) {
+                            inner.total_garbage -= node.size() as usize;
+                            garbage
+                                .push(node, GFP_KERNEL)
+                                .expect("push() failed after reserve()");
+                        }
+                    }
+                });
+
+                count -= block;
+                // Now drop it
+                garbage.clear();
+            }
+        }
+    }
+}
+
+impl Drop for HeapAllocatorInner {
+    fn drop(&mut self) {
+        mod_dev_dbg!(
+            self.dev,
+            "HeapAllocator[{}]: dropping allocator\n",
+            &*self.name
+        );
+        if self.allocated > 0 {
+            // This should never happen
+            dev_crit!(
+                self.dev.as_ref(),
+                "HeapAllocator[{}]: dropping with {} bytes allocated\n",
+                &*self.name,
+                self.allocated
+            );
+        }
+    }
+}
diff --git a/drivers/gpu/drm/asahi/asahi.rs b/drivers/gpu/drm/asahi/asahi.rs
new file mode 100644
index 00000000000000..3f79700a4a81ca
--- /dev/null
+++ b/drivers/gpu/drm/asahi/asahi.rs
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+#![recursion_limit = "2048"]
+
+//! Driver for the Apple AGX GPUs found in Apple Silicon SoCs.
+
+mod alloc;
+mod buffer;
+mod channel;
+#[cfg(CONFIG_DEV_COREDUMP)]
+mod crashdump;
+mod debug;
+mod driver;
+mod event;
+mod file;
+mod float;
+mod fw;
+mod gem;
+mod gpu;
+mod hw;
+mod initdata;
+mod mem;
+mod microseq;
+mod mmu;
+mod object;
+mod pgtable;
+mod queue;
+mod regs;
+mod slotalloc;
+mod util;
+mod workqueue;
+
+kernel::module_platform_driver! {
+    type: driver::AsahiDriver,
+    name: "asahi",
+    license: "Dual MIT/GPL",
+    params: {
+        debug_flags: u64 {
+            default: 0,
+            // permissions: 0o644,
+            description: "Debug flags",
+        },
+        fault_control: u32 {
+            default: 0xb,
+            // permissions: 0,
+            description: "Fault control (0x0: hard faults, 0xb: macOS default)",
+        },
+        initial_tvb_size: usize {
+            default: 0x8,
+            // permissions: 0o644,
+            description: "Initial TVB size in blocks",
+        },
+        robust_isolation: u32 {
+            default: 0,
+            // permissions: 0o644,
+            description: "Fully isolate GPU contexts (limits performance)",
+        },
+        starlight_debug: u32 {
+            default: 0,
+            // permissions: 0o644,
+            description: "Compare devicetree supplied initdata with computed one",
+        },
+    },
+}
diff --git a/drivers/gpu/drm/asahi/buffer.rs b/drivers/gpu/drm/asahi/buffer.rs
new file mode 100644
index 00000000000000..ca51d3ef6c2bd1
--- /dev/null
+++ b/drivers/gpu/drm/asahi/buffer.rs
@@ -0,0 +1,795 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+//! Tiled Vertex Buffer management
+//!
+//! This module manages the Tiled Vertex Buffer, also known as the Parameter Buffer (in imgtec
+//! parlance) or the tiler heap (on other architectures). This buffer holds transformed primitive
+//! data between the vertex/tiling stage and the fragment stage.
+//!
+//! On AGX, the buffer is a heap of 128K blocks split into 32K pages (which must be aligned to a
+//! multiple of 32K in VA space). The buffer can be shared between multiple render jobs, and each
+//! will allocate pages from it during vertex processing and return them during fragment processing.
+//!
+//! If the buffer runs out of free pages, the vertex pass stops and a partial fragment pass occurs,
+//! spilling the intermediate render target state to RAM (a partial render). This is all managed
+//! transparently by the firmware. Since partial renders are less efficient, the kernel must grow
+//! the heap in response to feedback from the firmware to avoid partial renders in the future.
+//! Currently, we only ever grow the heap, and never shrink it.
+//!
+//! AGX also supports memoryless render targets, which can be used for intermediate results within
+//! a render pass. To support partial renders, it seems the GPU/firmware has the ability to borrow
+//! pages from the TVB buffer as a temporary render target buffer. Since this happens during a
+//! partial render itself, if the buffer runs out of space, it requires synchronous growth in
+//! response to a firmware interrupt. This is not currently supported, but may be in the future,
+//! though it is unclear whether it is worth the effort.
+//!
+//! This module is also in charge of managing the temporary objects associated with a single render
+//! pass, which includes the top-level tile array, the tail pointer cache, preemption buffers, and
+//! other miscellaneous structures collectively managed as a "scene".
+//!
+//! To avoid runaway memory usage, there is a maximum size for buffers (at that point it's unlikely
+//! that partial renders will incur much overhead over the buffer data access itself). This is
+//! different depending on whether memoryless render targets are in use, and is currently hardcoded.
+//! to the most common value used by macOS.
+
+use crate::debug::*;
+use crate::fw::buffer;
+use crate::fw::types::*;
+use crate::util::*;
+use crate::{alloc, fw, gpu, hw, mmu, slotalloc};
+use core::sync::atomic::Ordering;
+use kernel::prelude::*;
+use kernel::sync::{Arc, Mutex};
+use kernel::{c_str, static_lock_class};
+
+const DEBUG_CLASS: DebugFlags = DebugFlags::Buffer;
+
+/// There are 127 GPU/firmware-side buffer manager slots (yes, 127, not 128).
+const NUM_BUFFERS: u32 = 127;
+
+/// Page size bits for buffer pages (32K). VAs must be aligned to this size.
+pub(crate) const PAGE_SHIFT: usize = 15;
+/// Page size for buffer pages.
+pub(crate) const PAGE_SIZE: usize = 1 << PAGE_SHIFT;
+/// Number of pages in a buffer block, which should be contiguous in VA space.
+pub(crate) const PAGES_PER_BLOCK: usize = 4;
+/// Size of a buffer block.
+pub(crate) const BLOCK_SIZE: usize = PAGE_SIZE * PAGES_PER_BLOCK;
+
+/// Metadata about the tiling configuration for a scene. This is computed in the `render` module.
+/// based on dimensions, tile size, and other info.
+pub(crate) struct TileInfo {
+    /// Tile count in the X dimension. Tiles are always 32x32.
+    pub(crate) tiles_x: u32,
+    /// Tile count in the Y dimension. Tiles are always 32x32.
+    pub(crate) tiles_y: u32,
+    /// Total tile count.
+    pub(crate) tiles: u32,
+    /// Micro-tile width (16 or 32).
+    pub(crate) utile_width: u32,
+    /// Micro-tile height (16 or 32).
+    pub(crate) utile_height: u32,
+    // Macro-tiles in the X dimension. Always 4.
+    //pub(crate) mtiles_x: u32,
+    // Macro-tiles in the Y dimension. Always 4.
+    //pub(crate) mtiles_y: u32,
+    /// Tiles per macro-tile in the X dimension.
+    pub(crate) tiles_per_mtile_x: u32,
+    /// Tiles per macro-tile in the Y dimension.
+    pub(crate) tiles_per_mtile_y: u32,
+    // Total tiles per macro-tile.
+    //pub(crate) tiles_per_mtile: u32,
+    /// Micro-tiles per macro-tile in the X dimension.
+    pub(crate) utiles_per_mtile_x: u32,
+    /// Micro-tiles per macro-tile in the Y dimension.
+    pub(crate) utiles_per_mtile_y: u32,
+    // Total micro-tiles per macro-tile.
+    //pub(crate) utiles_per_mtile: u32,
+    /// Size of the top-level tilemap, in bytes (for all layers, one cluster).
+    pub(crate) tilemap_size: usize,
+    /// Size of the Tail Pointer Cache, in bytes (for all layers * clusters).
+    pub(crate) tpc_size: usize,
+    /// Number of blocks in the clustering meta buffer (for clustering) per layer.
+    pub(crate) meta1_layer_stride: u32,
+    /// Number of blocks in the clustering meta buffer (for clustering).
+    pub(crate) meta1_blocks: u32,
+    /// Layering metadata size.
+    pub(crate) layermeta_size: usize,
+    /// Minimum number of TVB blocks for this render.
+    pub(crate) min_tvb_blocks: usize,
+    /// Tiling parameter structure passed to firmware.
+    pub(crate) params: fw::vertex::raw::TilingParameters,
+}
+
+/// A single scene, representing a render pass and its required buffers.
+#[versions(AGX)]
+#[derive(Debug)]
+pub(crate) struct Scene {
+    object: GpuObject<buffer::Scene::ver>,
+    slot: u32,
+    rebind: bool,
+    preempt2_off: usize,
+    preempt3_off: usize,
+    // Note: these are dead code only on some version variants.
+    // It's easier to do this than to propagate the version conditionals everywhere.
+    #[allow(dead_code)]
+    meta1_off: usize,
+    #[allow(dead_code)]
+    meta2_off: usize,
+    #[allow(dead_code)]
+    meta3_off: usize,
+    #[allow(dead_code)]
+    meta4_off: usize,
+}
+
+#[versions(AGX)]
+impl Scene::ver {
+    /// Returns true if the buffer was bound to a fresh manager slot, and therefore needs an init
+    /// command before a render.
+    pub(crate) fn rebind(&self) -> bool {
+        self.rebind
+    }
+
+    /// Returns the buffer manager slot this scene's buffer was bound to.
+    pub(crate) fn slot(&self) -> u32 {
+        self.slot
+    }
+
+    /// Returns the GPU pointer to the [`buffer::Scene::ver`].
+    pub(crate) fn gpu_pointer(&self) -> GpuPointer<'_, buffer::Scene::ver> {
+        self.object.gpu_pointer()
+    }
+
+    /// Returns the GPU weak pointer to the [`buffer::Scene::ver`].
+    pub(crate) fn weak_pointer(&self) -> GpuWeakPointer<buffer::Scene::ver> {
+        self.object.weak_pointer()
+    }
+
+    /// Returns the GPU weak pointer to the kernel-side temp buffer.
+    /// (purpose unknown...)
+    pub(crate) fn kernel_buffer_pointer(&self) -> GpuWeakPointer<[u8]> {
+        self.object.buffer.inner.lock().kernel_buffer.weak_pointer()
+    }
+
+    /// Returns the GPU pointer to the `buffer::Info::ver` object associated with this Scene.
+    pub(crate) fn buffer_pointer(&self) -> GpuPointer<'_, buffer::Info::ver> {
+        // SAFETY: We can't return the strong pointer directly since its lifetime crosses a lock,
+        // but we know its lifetime will be valid as long as &self since we hold a reference to the
+        // buffer, so just construct the strong pointer with the right lifetime here.
+        unsafe { self.weak_buffer_pointer().upgrade() }
+    }
+
+    /// Returns the GPU weak pointer to the `buffer::Info::ver` object associated with this Scene.
+    pub(crate) fn weak_buffer_pointer(&self) -> GpuWeakPointer<buffer::Info::ver> {
+        self.object.buffer.inner.lock().info.weak_pointer()
+    }
+
+    /// Returns the GPU pointer to the TVB heap metadata buffer.
+    pub(crate) fn tvb_heapmeta_pointer(&self) -> GpuPointer<'_, &'_ [u8]> {
+        self.object.tvb_heapmeta.gpu_pointer()
+    }
+
+    /// Returns the GPU pointer to the layer metadata buffer.
+    pub(crate) fn tvb_layermeta_pointer(&self) -> GpuPointer<'_, &'_ [u8]> {
+        self.object.tvb_heapmeta.gpu_offset_pointer(0x200)
+    }
+
+    /// Returns the GPU pointer to the top-level TVB tilemap buffer.
+    pub(crate) fn tvb_tilemap_pointer(&self) -> GpuPointer<'_, &'_ [u8]> {
+        self.object.tvb_tilemap.gpu_pointer()
+    }
+
+    /// Returns the GPU pointer to the Tail Pointer Cache buffer.
+    pub(crate) fn tpc_pointer(&self) -> GpuPointer<'_, &'_ [u8]> {
+        self.object.tpc.gpu_pointer()
+    }
+
+    /// Returns the GPU pointer to the first preemption scratch buffer.
+    pub(crate) fn preempt_buf_1_pointer(&self) -> GpuPointer<'_, &'_ [u8]> {
+        self.object.preempt_buf.gpu_pointer()
+    }
+
+    /// Returns the GPU pointer to the second preemption scratch buffer.
+    pub(crate) fn preempt_buf_2_pointer(&self) -> GpuPointer<'_, &'_ [u8]> {
+        self.object
+            .preempt_buf
+            .gpu_offset_pointer(self.preempt2_off)
+    }
+
+    /// Returns the GPU pointer to the third preemption scratch buffer.
+    pub(crate) fn preempt_buf_3_pointer(&self) -> GpuPointer<'_, &'_ [u8]> {
+        self.object
+            .preempt_buf
+            .gpu_offset_pointer(self.preempt3_off)
+    }
+
+    /// Returns the GPU pointer to the per-cluster tilemap buffer, if clustering is enabled.
+    #[allow(dead_code)]
+    pub(crate) fn cluster_tilemaps_pointer(&self) -> Option<GpuPointer<'_, &'_ [u8]>> {
+        self.object
+            .clustering
+            .as_ref()
+            .map(|c| c.tilemaps.gpu_pointer())
+    }
+
+    /// Returns the GPU pointer to the clustering layer metadata buffer, if clustering is enabled.
+    #[allow(dead_code)]
+    pub(crate) fn tvb_cluster_layermeta_pointer(&self) -> Option<GpuPointer<'_, &'_ [u8]>> {
+        self.object
+            .clustering
+            .as_ref()
+            .map(|c| c.meta.gpu_pointer())
+    }
+
+    /// Returns the GPU pointer to the clustering metadata 1 buffer, if clustering is enabled.
+    #[allow(dead_code)]
+    pub(crate) fn meta_1_pointer(&self) -> Option<GpuPointer<'_, &'_ [u8]>> {
+        self.object
+            .clustering
+            .as_ref()
+            .map(|c| c.meta.gpu_offset_pointer(self.meta1_off))
+    }
+
+    /// Returns the GPU pointer to the clustering metadata 2 buffer, if clustering is enabled.
+    #[allow(dead_code)]
+    pub(crate) fn meta_2_pointer(&self) -> Option<GpuPointer<'_, &'_ [u8]>> {
+        self.object
+            .clustering
+            .as_ref()
+            .map(|c| c.meta.gpu_offset_pointer(self.meta2_off))
+    }
+
+    /// Returns the GPU pointer to the clustering metadata 3 buffer, if clustering is enabled.
+    #[allow(dead_code)]
+    pub(crate) fn meta_3_pointer(&self) -> Option<GpuPointer<'_, &'_ [u8]>> {
+        self.object
+            .clustering
+            .as_ref()
+            .map(|c| c.meta.gpu_offset_pointer(self.meta3_off))
+    }
+
+    /// Returns the GPU pointer to the clustering metadata 4 buffer, if clustering is enabled.
+    #[allow(dead_code)]
+    pub(crate) fn meta_4_pointer(&self) -> Option<GpuPointer<'_, &'_ [u8]>> {
+        self.object
+            .clustering
+            .as_ref()
+            .map(|c| c.meta.gpu_offset_pointer(self.meta4_off))
+    }
+}
+
+#[versions(AGX)]
+impl Drop for Scene::ver {
+    fn drop(&mut self) {
+        let mut inner = self.object.buffer.inner.lock();
+        assert_ne!(inner.active_scenes, 0);
+        inner.active_scenes -= 1;
+
+        if inner.active_scenes == 0 {
+            mod_pr_debug!(
+                "Buffer: no scenes left, dropping slot {}",
+                inner.active_slot.take().unwrap().slot()
+            );
+            inner.active_slot = None;
+        }
+    }
+}
+
+/// Inner data for a single TVB buffer object.
+#[versions(AGX)]
+struct BufferInner {
+    info: GpuObject<buffer::Info::ver>,
+    ualloc: Arc<Mutex<alloc::DefaultAllocator>>,
+    ualloc_priv: Arc<Mutex<alloc::DefaultAllocator>>,
+    blocks: KVec<GpuOnlyArray<u8>>,
+    max_blocks: usize,
+    max_blocks_nomemless: usize,
+    mgr: BufferManager::ver,
+    active_scenes: usize,
+    active_slot: Option<slotalloc::Guard<BufferSlotInner::ver>>,
+    last_token: Option<slotalloc::SlotToken>,
+    tpc: Option<Arc<GpuArray<u8>>>,
+    kernel_buffer: GpuArray<u8>,
+    stats: GpuObject<buffer::Stats>,
+    cfg: &'static hw::HwConfig,
+    preempt1_size: usize,
+    preempt2_size: usize,
+    preempt3_size: usize,
+    num_clusters: usize,
+}
+
+/// Locked and reference counted TVB buffer.
+#[versions(AGX)]
+pub(crate) struct Buffer {
+    inner: Arc<Mutex<BufferInner::ver>>,
+}
+
+#[versions(AGX)]
+impl Buffer::ver {
+    /// Create a new Buffer for a given VM, given the per-VM allocators.
+    pub(crate) fn new(
+        gpu: &dyn gpu::GpuManager,
+        alloc: &mut gpu::KernelAllocators,
+        ualloc: Arc<Mutex<alloc::DefaultAllocator>>,
+        ualloc_priv: Arc<Mutex<alloc::DefaultAllocator>>,
+        mgr: &BufferManager::ver,
+    ) -> Result<Buffer::ver> {
+        // These are the typical max numbers on macOS.
+        // 8GB machines have this halved.
+        let max_size: usize = 862_322_688; // bytes
+        let max_size_nomemless = max_size / 3;
+
+        let max_blocks = max_size / BLOCK_SIZE;
+        let max_blocks_nomemless = max_size_nomemless / BLOCK_SIZE;
+        let max_pages = max_blocks * PAGES_PER_BLOCK;
+        let max_pages_nomemless = max_blocks_nomemless * PAGES_PER_BLOCK;
+
+        let num_clusters = gpu.get_dyncfg().id.num_clusters as usize;
+        let num_clusters_adj = if num_clusters > 1 {
+            num_clusters + 1
+        } else {
+            1
+        };
+
+        let preempt1_size = num_clusters_adj * gpu.get_cfg().preempt1_size;
+        let preempt2_size = num_clusters_adj * gpu.get_cfg().preempt2_size;
+        let preempt3_size = num_clusters_adj * gpu.get_cfg().preempt3_size;
+
+        let shared = &mut alloc.shared;
+        let info = alloc.private.new_init(
+            {
+                let ualloc_priv = &ualloc_priv;
+                try_init!(buffer::Info::ver {
+                    block_ctl: shared.new_default::<buffer::BlockControl>()?,
+                    counter: shared.new_default::<buffer::Counter>()?,
+                    page_list: ualloc_priv.lock().array_empty_tagged(max_pages, b"PLST")?,
+                    block_list: ualloc_priv
+                        .lock()
+                        .array_empty_tagged(max_blocks * 2, b"BLST")?,
+                })
+            },
+            |inner, _p| {
+                try_init!(buffer::raw::Info::ver {
+                    gpu_counter: 0x0,
+                    unk_4: 0,
+                    last_id: 0x0,
+                    cur_id: -1,
+                    unk_10: 0x0,
+                    gpu_counter2: 0x0,
+                    unk_18: 0x0,
+                    #[ver(V < V13_0B4 || G >= G14X)]
+                    unk_1c: 0x0,
+                    page_list: inner.page_list.gpu_pointer(),
+                    page_list_size: (4 * max_pages).try_into()?,
+                    page_count: AtomicU32::new(0),
+                    max_blocks: max_blocks.try_into()?,
+                    block_count: AtomicU32::new(0),
+                    unk_38: 0x0,
+                    block_list: inner.block_list.gpu_pointer(),
+                    block_ctl: inner.block_ctl.gpu_pointer(),
+                    last_page: AtomicU32::new(0),
+                    gpu_page_ptr1: 0x0,
+                    gpu_page_ptr2: 0x0,
+                    unk_58: 0x0,
+                    block_size: BLOCK_SIZE as u32,
+                    unk_60: U64(0x0),
+                    counter: inner.counter.gpu_pointer(),
+                    unk_70: 0x0,
+                    unk_74: 0x0,
+                    unk_78: 0x0,
+                    unk_7c: 0x0,
+                    unk_80: 0x1,
+                    max_pages: max_pages.try_into()?,
+                    max_pages_nomemless: max_pages_nomemless.try_into()?,
+                    unk_8c: 0x0,
+                    unk_90: Default::default(),
+                })
+            },
+        )?;
+
+        // Technically similar to Scene below, let's play it safe.
+        let kernel_buffer = alloc.shared.array_empty_tagged(0x40, b"KBUF")?;
+        let stats = alloc
+            .shared
+            .new_object(Default::default(), |_inner| buffer::raw::Stats {
+                reset: AtomicU32::from(1),
+                ..Default::default()
+            })?;
+
+        Ok(Buffer::ver {
+            inner: Arc::pin_init(
+                Mutex::new(BufferInner::ver {
+                    info,
+                    ualloc,
+                    ualloc_priv,
+                    blocks: KVec::new(),
+                    max_blocks,
+                    max_blocks_nomemless,
+                    mgr: mgr.clone(),
+                    active_scenes: 0,
+                    active_slot: None,
+                    last_token: None,
+                    tpc: None,
+                    kernel_buffer,
+                    stats,
+                    cfg: gpu.get_cfg(),
+                    preempt1_size,
+                    preempt2_size,
+                    preempt3_size,
+                    num_clusters,
+                }),
+                GFP_KERNEL,
+            )?,
+        })
+    }
+
+    /// Returns the total block count allocated to this Buffer.
+    pub(crate) fn block_count(&self) -> u32 {
+        self.inner.lock().blocks.len() as u32
+    }
+
+    /// Automatically grow the Buffer based on feedback from the statistics.
+    pub(crate) fn auto_grow(&self) -> Result<bool> {
+        let inner = self.inner.lock();
+
+        let used_pages = inner.stats.with(|raw, _inner| {
+            let used = raw.max_pages.load(Ordering::Relaxed);
+            raw.reset.store(1, Ordering::Release);
+            used as usize
+        });
+
+        let need_blocks = (used_pages * 2)
+            .div_ceil(PAGES_PER_BLOCK)
+            .min(inner.max_blocks_nomemless);
+        let want_blocks = (used_pages * 3)
+            .div_ceil(PAGES_PER_BLOCK)
+            .min(inner.max_blocks_nomemless);
+
+        let cur_count = inner.blocks.len();
+
+        if need_blocks <= cur_count {
+            Ok(false)
+        } else {
+            // Grow to 3x requested size (same logic as macOS)
+            core::mem::drop(inner);
+            self.ensure_blocks(want_blocks)?;
+            Ok(true)
+        }
+    }
+
+    /// Synchronously grow the Buffer.
+    pub(crate) fn sync_grow(&self) {
+        let inner = self.inner.lock();
+
+        let cur_count = inner.blocks.len();
+        core::mem::drop(inner);
+        if self.ensure_blocks(cur_count + 10).is_err() {
+            pr_err!("BufferManager: Failed to grow buffer synchronously\n");
+        }
+    }
+
+    /// Ensure that the buffer has at least a certain minimum size in blocks.
+    pub(crate) fn ensure_blocks(&self, min_blocks: usize) -> Result<bool> {
+        let mut inner = self.inner.lock();
+
+        let cur_count = inner.blocks.len();
+        if cur_count >= min_blocks {
+            return Ok(false);
+        }
+        if min_blocks > inner.max_blocks {
+            return Err(ENOMEM);
+        }
+
+        let add_blocks = min_blocks - cur_count;
+        let new_count = min_blocks;
+
+        let mut new_blocks: KVec<GpuOnlyArray<u8>> = KVec::new();
+
+        // Allocate the new blocks first, so if it fails they will be dropped
+        let mut ualloc = inner.ualloc.lock();
+        for _i in 0..add_blocks {
+            new_blocks.push(ualloc.array_gpuonly(BLOCK_SIZE)?, GFP_KERNEL)?;
+        }
+        core::mem::drop(ualloc);
+
+        // Then actually commit them
+        inner.blocks.reserve(add_blocks, GFP_KERNEL)?;
+
+        for (i, block) in new_blocks.into_iter().enumerate() {
+            let page_num = (block.gpu_va().get() >> PAGE_SHIFT) as u32;
+
+            inner
+                .blocks
+                .push(block, GFP_KERNEL)
+                .expect("push() failed after reserve()");
+            inner.info.block_list[2 * (cur_count + i)] = page_num;
+            for j in 0..PAGES_PER_BLOCK {
+                inner.info.page_list[(cur_count + i) * PAGES_PER_BLOCK + j] = page_num + j as u32;
+            }
+        }
+
+        inner.info.block_ctl.with(|raw, _inner| {
+            raw.total.store(new_count as u32, Ordering::SeqCst);
+            raw.wptr.store(new_count as u32, Ordering::SeqCst);
+        });
+
+        /* Only do this update if the buffer manager is idle (which means we own it) */
+        if inner.active_scenes == 0 {
+            let page_count = (new_count * PAGES_PER_BLOCK) as u32;
+            inner.info.with(|raw, _inner| {
+                raw.page_count.store(page_count, Ordering::Relaxed);
+                raw.block_count.store(new_count as u32, Ordering::Relaxed);
+                raw.last_page.store(page_count - 1, Ordering::Relaxed);
+            });
+        }
+
+        Ok(true)
+    }
+
+    /// Create a new [`Scene::ver`] (render pass) using this buffer.
+    pub(crate) fn new_scene(
+        &self,
+        alloc: &mut gpu::KernelAllocators,
+        tile_info: &TileInfo,
+    ) -> Result<Scene::ver> {
+        let mut inner = self.inner.lock();
+
+        let tilemap_size = tile_info.tilemap_size;
+        let tpc_size = tile_info.tpc_size;
+
+        // TODO: what is this exactly?
+        mod_pr_debug!("Buffer: Allocating TVB buffers\n");
+
+        // This seems to be a list, with 4x2 bytes of headers and 8 bytes per entry.
+        // On single-cluster devices, the used length always seems to be 1.
+        // On M1 Ultra, it can grow and usually doesn't exceed 64 entries.
+        // macOS allocates a whole 64K * 0x80 for this, so let's go with
+        // that to be safe...
+        let user_buffer = inner.ualloc.lock().array_empty_tagged(
+            if inner.num_clusters > 1 {
+                0x10080
+            } else {
+                0x80
+            },
+            b"UBUF",
+        )?;
+
+        let tvb_heapmeta = inner
+            .ualloc
+            .lock()
+            .array_empty_tagged(0x200 + tile_info.layermeta_size, b"HMTA")?;
+        let tvb_tilemap = inner
+            .ualloc
+            .lock()
+            .array_empty_tagged(tilemap_size, b"TMAP")?;
+
+        mod_pr_debug!("Buffer: Allocating misc buffers\n");
+        let preempt_buf = inner.ualloc.lock().array_empty_tagged(
+            inner.preempt1_size + inner.preempt2_size + inner.preempt3_size,
+            b"PRMT",
+        )?;
+
+        let tpc = match inner.tpc.as_ref() {
+            Some(buf) if buf.len() >= tpc_size => buf.clone(),
+            _ => {
+                // MacOS allocates this as shared GPU+FW, but
+                // priv seems to work and might be faster?
+                // Needs to be FW-writable anyway, so ualloc
+                // won't work.
+                let buf = Arc::new(
+                    inner.ualloc_priv.lock().array_empty_tagged(
+                        (tpc_size + mmu::UAT_PGMSK) & !mmu::UAT_PGMSK,
+                        b"TPC ",
+                    )?,
+                    GFP_KERNEL,
+                )?;
+                inner.tpc = Some(buf.clone());
+                buf
+            }
+        };
+
+        let mut clmeta_size = 0;
+        let mut meta1_size = 0;
+        let mut meta2_size = 0;
+        let mut meta3_size = 0;
+
+        let clustering = if inner.num_clusters > 1 {
+            let cfg = inner.cfg.clustering.as_ref().unwrap();
+
+            clmeta_size = tile_info.layermeta_size * cfg.max_splits;
+            // Maybe: (4x4 macro tiles + 1 global page)*n, 32bit each (17*4*n)
+            // Unused on t602x?
+            meta1_size = align(tile_info.meta1_blocks as usize * cfg.meta1_blocksize, 0x80);
+            meta2_size = align(cfg.meta2_size, 0x80);
+            meta3_size = align(cfg.meta3_size, 0x80);
+            let meta4_size = cfg.meta4_size;
+
+            let meta_size = clmeta_size + meta1_size + meta2_size + meta3_size + meta4_size;
+
+            mod_pr_debug!("Buffer: Allocating clustering buffers\n");
+            let tilemaps = inner
+                .ualloc
+                .lock()
+                .array_empty_tagged(cfg.max_splits * tilemap_size, b"CTMP")?;
+            let meta = inner.ualloc.lock().array_empty_tagged(meta_size, b"CMTA")?;
+            Some(buffer::ClusterBuffers { tilemaps, meta })
+        } else {
+            None
+        };
+
+        // Could be made strong, but we wind up with a deadlock if we try to grab the
+        // pointer through the inner.buffer path inside the closure.
+        let stats_pointer = inner.stats.weak_pointer();
+
+        let _gpu = &mut alloc.gpu;
+
+        // macOS allocates this as private. However, the firmware does not
+        // DC CIVAC this before reading it (like it does most other things),
+        // which causes odd cache incoherency bugs when combined with
+        // speculation on the firmware side (maybe). This doesn't happen
+        // on macOS because these structs are a circular pool that is mapped
+        // already initialized. Just mark this shared for now.
+        let scene = alloc.shared.new_init(
+            try_init!(buffer::Scene::ver {
+                user_buffer: user_buffer,
+                buffer: self.clone(),
+                tvb_heapmeta: tvb_heapmeta,
+                tvb_tilemap: tvb_tilemap,
+                tpc: tpc,
+                clustering: clustering,
+                preempt_buf: preempt_buf,
+                #[ver(G >= G14X)]
+                control_word: _gpu.array_empty_tagged(1, b"CWRD")?,
+            }),
+            |inner, _p| {
+                try_init!(buffer::raw::Scene::ver {
+                    #[ver(G >= G14X)]
+                    control_word: inner.control_word.gpu_pointer(),
+                    #[ver(G >= G14X)]
+                    control_word2: inner.control_word.gpu_pointer(),
+                    pass_page_count: AtomicU32::new(0),
+                    unk_4: 0,
+                    unk_8: U64(0),
+                    unk_10: U64(0),
+                    user_buffer: inner.user_buffer.gpu_pointer(),
+                    unk_20: 0,
+                    #[ver(V >= V13_3)]
+                    unk_28: U64(0),
+                    stats: stats_pointer,
+                    total_page_count: AtomicU32::new(0),
+                    #[ver(G < G14X)]
+                    unk_30: U64(0),
+                    #[ver(G < G14X)]
+                    unk_38: U64(0),
+                })
+            },
+        )?;
+
+        let mut rebind = false;
+
+        if inner.active_slot.is_none() {
+            assert_eq!(inner.active_scenes, 0);
+
+            let slot = inner.mgr.0.get_inner(inner.last_token, |inner, mgr| {
+                inner.owners[mgr.slot() as usize] = Some(self.clone());
+                Ok(())
+            })?;
+            rebind = slot.changed();
+
+            mod_pr_debug!("Buffer: assigning slot {} (rebind={})", slot.slot(), rebind);
+
+            inner.last_token = Some(slot.token());
+            inner.active_slot = Some(slot);
+        }
+
+        inner.active_scenes += 1;
+
+        Ok(Scene::ver {
+            object: scene,
+            slot: inner.active_slot.as_ref().unwrap().slot(),
+            rebind,
+            preempt2_off: inner.preempt1_size,
+            preempt3_off: inner.preempt1_size + inner.preempt2_size,
+            meta1_off: clmeta_size,
+            meta2_off: clmeta_size + meta1_size,
+            meta3_off: clmeta_size + meta1_size + meta2_size,
+            meta4_off: clmeta_size + meta1_size + meta2_size + meta3_size,
+        })
+    }
+
+    /// Increment the buffer manager usage count. Should we done once we know the Scene is ready
+    /// to be committed and used in commands submitted to the GPU.
+    pub(crate) fn increment(&self) {
+        let inner = self.inner.lock();
+        inner.info.counter.with(|raw, _inner| {
+            // We could use fetch_add, but the non-LSE atomic
+            // sequence Rust produces confuses the hypervisor.
+            // We have inner locked anyway, so this is not racy.
+            let v = raw.count.load(Ordering::Relaxed);
+            raw.count.store(v + 1, Ordering::Relaxed);
+        });
+    }
+
+    pub(crate) fn any_ref(&self) -> Arc<dyn core::any::Any + Send + Sync> {
+        self.inner.clone()
+    }
+}
+
+#[versions(AGX)]
+impl Clone for Buffer::ver {
+    fn clone(&self) -> Self {
+        Buffer::ver {
+            inner: self.inner.clone(),
+        }
+    }
+}
+
+#[versions(AGX)]
+struct BufferSlotInner();
+
+#[versions(AGX)]
+impl slotalloc::SlotItem for BufferSlotInner::ver {
+    type Data = BufferManagerInner::ver;
+
+    fn release(&mut self, data: &mut Self::Data, slot: u32) {
+        mod_pr_debug!("EventManager: Released slot {}\n", slot);
+        data.owners[slot as usize] = None;
+    }
+}
+
+/// Inner data for the event manager, to be protected by the SlotAllocator lock.
+#[versions(AGX)]
+pub(crate) struct BufferManagerInner {
+    owners: KVec<Option<Buffer::ver>>,
+}
+
+/// The GPU-global buffer manager, used to allocate and release buffer slots from the pool.
+#[versions(AGX)]
+pub(crate) struct BufferManager(slotalloc::SlotAllocator<BufferSlotInner::ver>);
+
+#[versions(AGX)]
+impl BufferManager::ver {
+    pub(crate) fn new() -> Result<BufferManager::ver> {
+        let mut owners = KVec::new();
+        for _i in 0..(NUM_BUFFERS as usize) {
+            owners.push(None, GFP_KERNEL)?;
+        }
+        Ok(BufferManager::ver(slotalloc::SlotAllocator::new(
+            NUM_BUFFERS,
+            BufferManagerInner::ver { owners },
+            |_inner, _slot| Some(BufferSlotInner::ver()),
+            c_str!("BufferManager::SlotAllocator"),
+            static_lock_class!(),
+            static_lock_class!(),
+        )?))
+    }
+
+    /// Signals a Buffer to synchronously grow.
+    pub(crate) fn grow(&self, slot: u32) {
+        match self
+            .0
+            .with_inner(|inner| inner.owners[slot as usize].as_ref().cloned())
+        {
+            Some(owner) => {
+                pr_err!(
+                    "BufferManager: Unexpected grow request for slot {}. This might deadlock. Please report this bug.\n",
+                    slot
+                );
+                owner.sync_grow();
+            }
+            None => {
+                pr_err!(
+                    "BufferManager: Received grow request for empty slot {}\n",
+                    slot
+                );
+            }
+        }
+    }
+}
+
+#[versions(AGX)]
+impl Clone for BufferManager::ver {
+    fn clone(&self) -> Self {
+        BufferManager::ver(self.0.clone())
+    }
+}
diff --git a/drivers/gpu/drm/asahi/channel.rs b/drivers/gpu/drm/asahi/channel.rs
new file mode 100644
index 00000000000000..fb0a03a75209ab
--- /dev/null
+++ b/drivers/gpu/drm/asahi/channel.rs
@@ -0,0 +1,616 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+//! GPU ring buffer channels
+//!
+//! The GPU firmware use a set of ring buffer channels to receive commands from the driver and send
+//! it notifications and status messages.
+//!
+//! These ring buffers mostly follow uniform conventions, so they share the same base
+//! implementation.
+
+use crate::debug::*;
+use crate::driver::{AsahiDevRef, AsahiDevice};
+use crate::fw::channels::*;
+use crate::fw::initdata::{raw, ChannelRing};
+use crate::fw::types::*;
+use crate::{buffer, event, gpu, mem};
+use core::time::Duration;
+use kernel::{
+    c_str,
+    delay::coarse_sleep,
+    prelude::*,
+    sync::Arc,
+    time::{clock, Now},
+};
+
+pub(crate) use crate::fw::channels::PipeType;
+
+/// A receive (FW->driver) channel.
+pub(crate) struct RxChannel<T: RxChannelState, U: Copy + Default>
+where
+    for<'a> <T as GpuStruct>::Raw<'a>: Debug + Default + Zeroable,
+{
+    ring: ChannelRing<T, U>,
+    // FIXME: needs feature(generic_const_exprs)
+    //rptr: [u32; T::SUB_CHANNELS],
+    rptr: [u32; 6],
+    count: u32,
+}
+
+impl<T: RxChannelState, U: Copy + Default> RxChannel<T, U>
+where
+    for<'a> <T as GpuStruct>::Raw<'a>: Debug + Default + Zeroable,
+{
+    /// Allocates a new receive channel with a given message count.
+    pub(crate) fn new(alloc: &mut gpu::KernelAllocators, count: usize) -> Result<RxChannel<T, U>> {
+        Ok(RxChannel {
+            ring: ChannelRing {
+                state: alloc.shared.new_default()?,
+                ring: alloc.shared.array_empty(T::SUB_CHANNELS * count)?,
+            },
+            rptr: Default::default(),
+            count: count as u32,
+        })
+    }
+
+    /// Receives a message on the specified sub-channel index, optionally leaving in the ring
+    /// buffer.
+    ///
+    /// Returns None if the channel is empty.
+    fn get_or_peek(&mut self, index: usize, peek: bool) -> Option<U> {
+        self.ring.state.with(|raw, _inner| {
+            let wptr = T::wptr(raw, index);
+            let rptr = &mut self.rptr[index];
+            if wptr == *rptr {
+                None
+            } else {
+                let off = self.count as usize * index;
+                let msg = self.ring.ring[off + *rptr as usize];
+                if !peek {
+                    *rptr = (*rptr + 1) % self.count;
+                    T::set_rptr(raw, index, *rptr);
+                }
+                Some(msg)
+            }
+        })
+    }
+
+    /// Receives a message on the specified sub-channel index, and dequeues it from the ring buffer.
+    ///
+    /// Returns None if the channel is empty.
+    pub(crate) fn get(&mut self, index: usize) -> Option<U> {
+        self.get_or_peek(index, false)
+    }
+
+    /// Peeks a message on the specified sub-channel index, leaving it in the ring buffer.
+    ///
+    /// Returns None if the channel is empty.
+    pub(crate) fn peek(&mut self, index: usize) -> Option<U> {
+        self.get_or_peek(index, true)
+    }
+}
+
+/// A transmit (driver->FW) channel.
+pub(crate) struct TxChannel<T: TxChannelState, U: Copy + Default>
+where
+    for<'a> <T as GpuStruct>::Raw<'a>: Debug + Default + Zeroable,
+{
+    ring: ChannelRing<T, U>,
+    wptr: u32,
+    count: u32,
+}
+
+impl<T: TxChannelState, U: Copy + Default> TxChannel<T, U>
+where
+    for<'a> <T as GpuStruct>::Raw<'a>: Debug + Default + Zeroable,
+{
+    /// Allocates a new cached transmit channel with a given message count.
+    pub(crate) fn new(alloc: &mut gpu::KernelAllocators, count: usize) -> Result<TxChannel<T, U>> {
+        Ok(TxChannel {
+            ring: ChannelRing {
+                state: alloc.shared.new_default()?,
+                ring: alloc.private.array_empty(count)?,
+            },
+            wptr: 0,
+            count: count as u32,
+        })
+    }
+
+    /// Allocates a new uncached transmit channel with a given message count.
+    pub(crate) fn new_uncached(
+        alloc: &mut gpu::KernelAllocators,
+        count: usize,
+    ) -> Result<TxChannel<T, U>> {
+        Ok(TxChannel {
+            ring: ChannelRing {
+                state: alloc.shared.new_default()?,
+                ring: alloc.shared.array_empty(count)?,
+            },
+            wptr: 0,
+            count: count as u32,
+        })
+    }
+
+    /// Send a message to the ring, returning a cookie with the ring buffer position.
+    ///
+    /// This will poll/block if the ring is full, which we don't really expect to happen.
+    pub(crate) fn put(&mut self, msg: &U) -> u32 {
+        self.ring.state.with(|raw, _inner| {
+            let next_wptr = (self.wptr + 1) % self.count;
+            let mut rptr = T::rptr(raw);
+            if next_wptr == rptr {
+                pr_err!(
+                    "TX ring buffer is full! Waiting... ({}, {})\n",
+                    next_wptr,
+                    rptr
+                );
+                // TODO: block properly on incoming messages?
+                while next_wptr == rptr {
+                    coarse_sleep(Duration::from_millis(8));
+                    rptr = T::rptr(raw);
+                }
+            }
+            self.ring.ring[self.wptr as usize] = *msg;
+            mem::sync();
+            T::set_wptr(raw, next_wptr);
+            self.wptr = next_wptr;
+        });
+        self.wptr
+    }
+
+    /// Wait for a previously submitted message to be popped off of the ring by the GPU firmware.
+    ///
+    /// This busy-loops, and is intended to be used for rare cases when we need to block for
+    /// completion of a cache management or invalidation operation synchronously (which
+    /// the firmware normally completes fast enough not to be worth sleeping for).
+    /// If the poll takes longer than 10ms, this switches to sleeping between polls.
+    pub(crate) fn wait_for(&mut self, wptr: u32, timeout_ms: u64) -> Result {
+        const MAX_FAST_POLL: u64 = 10;
+        let start = clock::KernelTime::now();
+        let timeout_fast = Duration::from_millis(timeout_ms.min(MAX_FAST_POLL));
+        let timeout_slow = Duration::from_millis(timeout_ms);
+        self.ring.state.with(|raw, _inner| {
+            while start.elapsed() < timeout_fast {
+                if T::rptr(raw) == wptr {
+                    return Ok(());
+                }
+                mem::sync();
+            }
+            while start.elapsed() < timeout_slow {
+                if T::rptr(raw) == wptr {
+                    return Ok(());
+                }
+                coarse_sleep(Duration::from_millis(5));
+                mem::sync();
+            }
+            Err(ETIMEDOUT)
+        })
+    }
+}
+
+/// Device Control channel for global device management commands.
+#[versions(AGX)]
+pub(crate) struct DeviceControlChannel {
+    dev: AsahiDevRef,
+    ch: TxChannel<ChannelState, DeviceControlMsg::ver>,
+}
+
+#[versions(AGX)]
+impl DeviceControlChannel::ver {
+    const COMMAND_TIMEOUT_MS: u64 = 1000;
+
+    /// Allocate a new Device Control channel.
+    pub(crate) fn new(
+        dev: &AsahiDevice,
+        alloc: &mut gpu::KernelAllocators,
+    ) -> Result<DeviceControlChannel::ver> {
+        Ok(DeviceControlChannel::ver {
+            dev: dev.into(),
+            ch: TxChannel::<ChannelState, DeviceControlMsg::ver>::new(alloc, 0x100)?,
+        })
+    }
+
+    /// Returns the raw `ChannelRing` structure to pass to firmware.
+    pub(crate) fn to_raw(&self) -> raw::ChannelRing<ChannelState, DeviceControlMsg::ver> {
+        self.ch.ring.to_raw()
+    }
+
+    /// Submits a Device Control command.
+    pub(crate) fn send(&mut self, msg: &DeviceControlMsg::ver) -> u32 {
+        cls_dev_dbg!(DeviceControlCh, self.dev, "DeviceControl: {:?}\n", msg);
+        self.ch.put(msg)
+    }
+
+    /// Waits for a previously submitted Device Control command to complete.
+    pub(crate) fn wait_for(&mut self, wptr: u32) -> Result {
+        self.ch.wait_for(wptr, Self::COMMAND_TIMEOUT_MS)
+    }
+}
+
+/// Pipe channel to submit WorkQueue execution requests.
+#[versions(AGX)]
+pub(crate) struct PipeChannel {
+    dev: AsahiDevRef,
+    ch: TxChannel<ChannelState, PipeMsg::ver>,
+}
+
+#[versions(AGX)]
+impl PipeChannel::ver {
+    /// Allocate a new Pipe submission channel.
+    pub(crate) fn new(
+        dev: &AsahiDevice,
+        alloc: &mut gpu::KernelAllocators,
+    ) -> Result<PipeChannel::ver> {
+        Ok(PipeChannel::ver {
+            dev: dev.into(),
+            ch: TxChannel::<ChannelState, PipeMsg::ver>::new(alloc, 0x100)?,
+        })
+    }
+
+    /// Returns the raw `ChannelRing` structure to pass to firmware.
+    pub(crate) fn to_raw(&self) -> raw::ChannelRing<ChannelState, PipeMsg::ver> {
+        self.ch.ring.to_raw()
+    }
+
+    /// Submits a Pipe kick command to the firmware.
+    pub(crate) fn send(&mut self, msg: &PipeMsg::ver) {
+        cls_dev_dbg!(PipeCh, self.dev, "Pipe: {:?}\n", msg);
+        self.ch.put(msg);
+    }
+}
+
+/// Firmware Control channel, used for secure cache flush requests.
+pub(crate) struct FwCtlChannel {
+    dev: AsahiDevRef,
+    ch: TxChannel<FwCtlChannelState, FwCtlMsg>,
+}
+
+impl FwCtlChannel {
+    const COMMAND_TIMEOUT_MS: u64 = 1000;
+
+    /// Allocate a new Firmware Control channel.
+    pub(crate) fn new(
+        dev: &AsahiDevice,
+        alloc: &mut gpu::KernelAllocators,
+    ) -> Result<FwCtlChannel> {
+        Ok(FwCtlChannel {
+            dev: dev.into(),
+            ch: TxChannel::<FwCtlChannelState, FwCtlMsg>::new_uncached(alloc, 0x100)?,
+        })
+    }
+
+    /// Returns the raw `ChannelRing` structure to pass to firmware.
+    pub(crate) fn to_raw(&self) -> raw::ChannelRing<FwCtlChannelState, FwCtlMsg> {
+        self.ch.ring.to_raw()
+    }
+
+    /// Submits a Firmware Control command to the firmware.
+    pub(crate) fn send(&mut self, msg: &FwCtlMsg) -> u32 {
+        cls_dev_dbg!(FwCtlCh, self.dev, "FwCtl: {:?}\n", msg);
+        self.ch.put(msg)
+    }
+
+    /// Waits for a previously submitted Firmware Control command to complete.
+    pub(crate) fn wait_for(&mut self, wptr: u32) -> Result {
+        self.ch.wait_for(wptr, Self::COMMAND_TIMEOUT_MS)
+    }
+}
+
+/// Event channel, used to notify the driver of command completions, GPU faults and errors, and
+/// other events.
+#[versions(AGX)]
+pub(crate) struct EventChannel {
+    dev: AsahiDevRef,
+    ch: RxChannel<ChannelState, RawEventMsg>,
+    ev_mgr: Arc<event::EventManager>,
+    buf_mgr: buffer::BufferManager::ver,
+    gpu: Option<Arc<dyn gpu::GpuManager>>,
+}
+
+#[versions(AGX)]
+impl EventChannel::ver {
+    /// Allocate a new Event channel.
+    pub(crate) fn new(
+        dev: &AsahiDevice,
+        alloc: &mut gpu::KernelAllocators,
+        ev_mgr: Arc<event::EventManager>,
+        buf_mgr: buffer::BufferManager::ver,
+    ) -> Result<EventChannel::ver> {
+        Ok(EventChannel::ver {
+            dev: dev.into(),
+            ch: RxChannel::<ChannelState, RawEventMsg>::new(alloc, 0x100)?,
+            ev_mgr,
+            buf_mgr,
+            gpu: None,
+        })
+    }
+
+    /// Registers the managing `Gpu` instance that will handle events on this channel.
+    pub(crate) fn set_manager(&mut self, gpu: Arc<dyn gpu::GpuManager>) {
+        self.gpu = Some(gpu);
+    }
+
+    /// Returns the raw `ChannelRing` structure to pass to firmware.
+    pub(crate) fn to_raw(&self) -> raw::ChannelRing<ChannelState, RawEventMsg> {
+        self.ch.ring.to_raw()
+    }
+
+    /// Polls for new Event messages on this ring.
+    pub(crate) fn poll(&mut self) {
+        while let Some(msg) = self.ch.get(0) {
+            // SAFETY: The raw view is always valid for all bit patterns.
+            let tag = unsafe { msg.raw.0 };
+            match tag {
+                0..=EVENT_MAX => {
+                    // SAFETY: Since we have checked the tag to be in range,
+                    // accessing the enum view is valid.
+                    let msg = unsafe { msg.msg };
+
+                    cls_dev_dbg!(EventCh, self.dev, "Event: {:?}\n", msg);
+                    match msg {
+                        EventMsg::Fault => match self.gpu.as_ref() {
+                            Some(gpu) => gpu.handle_fault(),
+                            None => {
+                                dev_crit!(
+                                    self.dev.as_ref(),
+                                    "EventChannel: No GPU manager available!\n"
+                                )
+                            }
+                        },
+                        EventMsg::Timeout {
+                            counter,
+                            unk_8,
+                            event_slot,
+                        } => match self.gpu.as_ref() {
+                            Some(gpu) => gpu.handle_timeout(counter, event_slot, unk_8),
+                            None => {
+                                dev_crit!(
+                                    self.dev.as_ref(),
+                                    "EventChannel: No GPU manager available!\n"
+                                )
+                            }
+                        },
+                        EventMsg::Flag { firing, .. } => {
+                            for (i, flags) in firing.iter().enumerate() {
+                                for j in 0..32 {
+                                    if flags & (1u32 << j) != 0 {
+                                        self.ev_mgr.signal((i * 32 + j) as u32);
+                                    }
+                                }
+                            }
+                        }
+                        EventMsg::GrowTVB {
+                            vm_slot,
+                            buffer_slot,
+                            counter,
+                        } => match self.gpu.as_ref() {
+                            Some(gpu) => {
+                                self.buf_mgr.grow(buffer_slot);
+                                gpu.ack_grow(buffer_slot, vm_slot, counter);
+                            }
+                            None => {
+                                dev_crit!(
+                                    self.dev.as_ref(),
+                                    "EventChannel: No GPU manager available!\n"
+                                )
+                            }
+                        },
+                        EventMsg::ChannelError {
+                            error_type,
+                            pipe_type,
+                            event_slot,
+                            event_value,
+                        } => match self.gpu.as_ref() {
+                            Some(gpu) => {
+                                let error_type = match error_type {
+                                    0 => ChannelErrorType::MemoryError,
+                                    1 => ChannelErrorType::DMKill,
+                                    2 => ChannelErrorType::Aborted,
+                                    3 => ChannelErrorType::Unk3,
+                                    a => ChannelErrorType::Unknown(a),
+                                };
+                                gpu.handle_channel_error(
+                                    error_type,
+                                    pipe_type,
+                                    event_slot,
+                                    event_value,
+                                );
+                            }
+                            None => {
+                                dev_crit!(
+                                    self.dev.as_ref(),
+                                    "EventChannel: No GPU manager available!\n"
+                                )
+                            }
+                        },
+                        msg => {
+                            dev_crit!(self.dev.as_ref(), "Unknown event message: {:?}\n", msg);
+                        }
+                    }
+                }
+                _ => {
+                    // SAFETY: The raw view is always valid for all bit patterns.
+                    dev_warn!(self.dev.as_ref(), "Unknown event message: {:?}\n", unsafe {
+                        msg.raw
+                    });
+                }
+            }
+        }
+    }
+}
+
+/// Firmware Log channel. This one is pretty special, since it has 6 sub-channels (for different log
+/// levels), and it also uses a side buffer to actually hold the log messages, only passing around
+/// pointers in the main buffer.
+pub(crate) struct FwLogChannel {
+    dev: AsahiDevRef,
+    ch: RxChannel<FwLogChannelState, RawFwLogMsg>,
+    payload_buf: GpuArray<RawFwLogPayloadMsg>,
+}
+
+impl FwLogChannel {
+    const RING_SIZE: usize = 0x100;
+    const BUF_SIZE: usize = 0x100;
+
+    /// Allocate a new Firmware Log channel.
+    pub(crate) fn new(
+        dev: &AsahiDevice,
+        alloc: &mut gpu::KernelAllocators,
+    ) -> Result<FwLogChannel> {
+        Ok(FwLogChannel {
+            dev: dev.into(),
+            ch: RxChannel::<FwLogChannelState, RawFwLogMsg>::new(alloc, Self::RING_SIZE)?,
+            payload_buf: alloc
+                .shared
+                .array_empty(Self::BUF_SIZE * FwLogChannelState::SUB_CHANNELS)?,
+        })
+    }
+
+    /// Returns the raw `ChannelRing` structure to pass to firmware.
+    pub(crate) fn to_raw(&self) -> raw::ChannelRing<FwLogChannelState, RawFwLogMsg> {
+        self.ch.ring.to_raw()
+    }
+
+    /// Returns the GPU pointers to the firmware log payload buffer.
+    pub(crate) fn get_buf(&self) -> GpuWeakPointer<[RawFwLogPayloadMsg]> {
+        self.payload_buf.weak_pointer()
+    }
+
+    /// Polls for new log messages on all sub-rings.
+    pub(crate) fn poll(&mut self) {
+        for i in 0..=FwLogChannelState::SUB_CHANNELS - 1 {
+            while let Some(msg) = self.ch.peek(i) {
+                cls_dev_dbg!(FwLogCh, self.dev, "FwLog{}: {:?}\n", i, msg);
+                if msg.msg_type != 2 {
+                    dev_warn!(self.dev.as_ref(), "Unknown FWLog{} message: {:?}\n", i, msg);
+                    self.ch.get(i);
+                    continue;
+                }
+                if msg.msg_index.0 as usize >= Self::BUF_SIZE {
+                    dev_warn!(
+                        self.dev.as_ref(),
+                        "FWLog{} message index out of bounds: {:?}\n",
+                        i,
+                        msg
+                    );
+                    self.ch.get(i);
+                    continue;
+                }
+                let index = Self::BUF_SIZE * i + msg.msg_index.0 as usize;
+                let payload = &self.payload_buf.as_slice()[index];
+                if payload.msg_type != 3 {
+                    dev_warn!(
+                        self.dev.as_ref(),
+                        "Unknown FWLog{} payload: {:?}\n",
+                        i,
+                        payload
+                    );
+                    self.ch.get(i);
+                    continue;
+                }
+                let msg = if let Some(end) = payload.msg.iter().position(|&r| r == 0) {
+                    CStr::from_bytes_with_nul(&(*payload.msg)[..end + 1])
+                        .unwrap_or(c_str!("cstr_err"))
+                } else {
+                    dev_warn!(
+                        self.dev.as_ref(),
+                        "FWLog{} payload not NUL-terminated: {:?}\n",
+                        i,
+                        payload
+                    );
+                    self.ch.get(i);
+                    continue;
+                };
+                match i {
+                    0 => dev_dbg!(self.dev.as_ref(), "FWLog: {}\n", msg),
+                    1 => dev_info!(self.dev.as_ref(), "FWLog: {}\n", msg),
+                    2 => dev_notice!(self.dev.as_ref(), "FWLog: {}\n", msg),
+                    3 => dev_warn!(self.dev.as_ref(), "FWLog: {}\n", msg),
+                    4 => dev_err!(self.dev.as_ref(), "FWLog: {}\n", msg),
+                    5 => dev_crit!(self.dev.as_ref(), "FWLog: {}\n", msg),
+                    _ => (),
+                };
+                self.ch.get(i);
+            }
+        }
+    }
+}
+
+pub(crate) struct KTraceChannel {
+    dev: AsahiDevRef,
+    ch: RxChannel<ChannelState, RawKTraceMsg>,
+}
+
+/// KTrace channel, used to receive detailed execution trace markers from the firmware.
+/// We currently disable this in initdata, so no messages are expected here at this time.
+impl KTraceChannel {
+    /// Allocate a new KTrace channel.
+    pub(crate) fn new(
+        dev: &AsahiDevice,
+        alloc: &mut gpu::KernelAllocators,
+    ) -> Result<KTraceChannel> {
+        Ok(KTraceChannel {
+            dev: dev.into(),
+            ch: RxChannel::<ChannelState, RawKTraceMsg>::new(alloc, 0x200)?,
+        })
+    }
+
+    /// Returns the raw `ChannelRing` structure to pass to firmware.
+    pub(crate) fn to_raw(&self) -> raw::ChannelRing<ChannelState, RawKTraceMsg> {
+        self.ch.ring.to_raw()
+    }
+
+    /// Polls for new KTrace messages on this ring.
+    pub(crate) fn poll(&mut self) {
+        while let Some(msg) = self.ch.get(0) {
+            cls_dev_dbg!(KTraceCh, self.dev, "KTrace: {:?}\n", msg);
+        }
+    }
+}
+
+/// Statistics channel, reporting power-related statistics to the driver.
+/// Not really implemented other than debug logs yet...
+#[versions(AGX)]
+pub(crate) struct StatsChannel {
+    dev: AsahiDevRef,
+    ch: RxChannel<ChannelState, RawStatsMsg::ver>,
+}
+
+#[versions(AGX)]
+impl StatsChannel::ver {
+    /// Allocate a new Statistics channel.
+    pub(crate) fn new(
+        dev: &AsahiDevice,
+        alloc: &mut gpu::KernelAllocators,
+    ) -> Result<StatsChannel::ver> {
+        Ok(StatsChannel::ver {
+            dev: dev.into(),
+            ch: RxChannel::<ChannelState, RawStatsMsg::ver>::new(alloc, 0x100)?,
+        })
+    }
+
+    /// Returns the raw `ChannelRing` structure to pass to firmware.
+    pub(crate) fn to_raw(&self) -> raw::ChannelRing<ChannelState, RawStatsMsg::ver> {
+        self.ch.ring.to_raw()
+    }
+
+    /// Polls for new statistics messages on this ring.
+    pub(crate) fn poll(&mut self) {
+        while let Some(msg) = self.ch.get(0) {
+            // SAFETY: The raw view is always valid for all bit patterns.
+            let tag = unsafe { msg.raw.0 };
+            match tag {
+                0..=STATS_MAX::ver => {
+                    // SAFETY: Since we have checked the tag to be in range,
+                    // accessing the enum view is valid.
+                    let msg = unsafe { msg.msg };
+                    cls_dev_dbg!(StatsCh, self.dev, "Stats: {:?}\n", msg);
+                }
+                _ => {
+                    // SAFETY: The raw view is always valid for all bit patterns.
+                    pr_warn!("Unknown stats message: {:?}\n", unsafe { msg.raw });
+                }
+            }
+        }
+    }
+}
diff --git a/drivers/gpu/drm/asahi/crashdump.rs b/drivers/gpu/drm/asahi/crashdump.rs
new file mode 100644
index 00000000000000..bd9f2f1649584f
--- /dev/null
+++ b/drivers/gpu/drm/asahi/crashdump.rs
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+//! GPU crash dump formatter
+//!
+//! Takes a raw dump of firmware/kernel mapped pages from `pgtable` and formats it into
+//! an ELF core dump suitable for dumping into userspace.
+
+use core::mem::size_of;
+
+use kernel::{
+    devcoredump::DevCoreDump,
+    error::Result,
+    page::{Page, PAGE_MASK, PAGE_SHIFT, PAGE_SIZE},
+    prelude::*,
+    types::Owned,
+    uapi,
+};
+
+use crate::hw;
+use crate::pgtable::{self, DumpedPage, Prot, UAT_PGSZ};
+use crate::util::align;
+
+pub(crate) struct CrashDump {
+    headers: KVVec<u8>,
+    pages: KVVec<Owned<Page>>,
+}
+
+const NOTE_NAME_AGX: &str = "AGX";
+const NOTE_AGX_DUMP_INFO: u32 = 1;
+
+const NOTE_NAME_RTKIT: &str = "RTKIT";
+const NOTE_RTKIT_CRASHLOG: u32 = 1;
+
+#[repr(C)]
+pub(crate) struct AGXDumpInfo {
+    initdata_address: u64,
+    chip_id: u32,
+    gpu_gen: hw::GpuGen,
+    gpu_variant: hw::GpuVariant,
+    gpu_rev: hw::GpuRevision,
+    total_active_cores: u32,
+    firmware_version: [u32; 6],
+}
+
+struct ELFNote {
+    name: &'static str,
+    ty: u32,
+    data: KVVec<u8>,
+}
+
+pub(crate) struct CrashDumpBuilder {
+    page_dump: KVVec<DumpedPage>,
+    notes: KVec<ELFNote>,
+}
+
+/// Helper to convert ELF headers into byte slices
+/// TODO: Hook this up into kernel::AsBytes somehow
+///
+/// # Safety
+///
+/// Types implementing this trait must have no padding bytes.
+unsafe trait AsBytes: Sized {
+    fn as_bytes(&self) -> &[u8] {
+        // SAFETY: This trait is only implemented for types with no padding bytes
+        unsafe { core::slice::from_raw_parts(self as *const _ as *const u8, size_of::<Self>()) }
+    }
+    fn slice_as_bytes(slice: &[Self]) -> &[u8] {
+        // SAFETY: This trait is only implemented for types with no padding bytes
+        unsafe {
+            core::slice::from_raw_parts(slice.as_ptr() as *const u8, core::mem::size_of_val(slice))
+        }
+    }
+}
+
+// SAFETY: This type has no padding
+unsafe impl AsBytes for uapi::Elf64_Ehdr {}
+// SAFETY: This type has no padding
+unsafe impl AsBytes for uapi::Elf64_Phdr {}
+// SAFETY: This type has no padding
+unsafe impl AsBytes for uapi::Elf64_Nhdr {}
+// SAFETY: This type has no padding
+unsafe impl AsBytes for AGXDumpInfo {}
+
+const FIRMWARE_ENTRYPOINT: u64 = 0xFFFFFF8000000000u64;
+
+impl CrashDumpBuilder {
+    pub(crate) fn new(page_dump: KVVec<DumpedPage>) -> Result<CrashDumpBuilder> {
+        Ok(CrashDumpBuilder {
+            page_dump,
+            notes: KVec::new(),
+        })
+    }
+
+    pub(crate) fn add_agx_info(
+        &mut self,
+        cfg: &hw::HwConfig,
+        dyncfg: &hw::DynConfig,
+        initdata_address: u64,
+    ) -> Result {
+        let mut info = AGXDumpInfo {
+            chip_id: cfg.chip_id,
+            gpu_gen: dyncfg.id.gpu_gen,
+            gpu_variant: dyncfg.id.gpu_variant,
+            gpu_rev: dyncfg.id.gpu_rev,
+            total_active_cores: dyncfg.id.total_active_cores,
+            firmware_version: [0; 6],
+            initdata_address,
+        };
+        info.firmware_version[..dyncfg.firmware_version.len().min(6)]
+            .copy_from_slice(&dyncfg.firmware_version);
+
+        let mut data = KVVec::new();
+        data.extend_from_slice(info.as_bytes(), GFP_KERNEL)?;
+
+        self.notes.push(
+            ELFNote {
+                name: NOTE_NAME_AGX,
+                ty: NOTE_AGX_DUMP_INFO,
+                data,
+            },
+            GFP_KERNEL,
+        )?;
+        Ok(())
+    }
+
+    pub(crate) fn add_crashlog(&mut self, crashlog: &[u8]) -> Result {
+        let mut data = KVVec::new();
+        data.extend_from_slice(crashlog, GFP_KERNEL)?;
+
+        self.notes.push(
+            ELFNote {
+                name: NOTE_NAME_RTKIT,
+                ty: NOTE_RTKIT_CRASHLOG,
+                data,
+            },
+            GFP_KERNEL,
+        )?;
+
+        Ok(())
+    }
+
+    pub(crate) fn finalize(self) -> Result<CrashDump> {
+        let CrashDumpBuilder { page_dump, notes } = self;
+
+        let mut ehdr: uapi::Elf64_Ehdr = Default::default();
+
+        ehdr.e_ident[uapi::EI_MAG0 as usize..=uapi::EI_MAG3 as usize].copy_from_slice(b"\x7fELF");
+        ehdr.e_ident[uapi::EI_CLASS as usize] = uapi::ELFCLASS64 as u8;
+        ehdr.e_ident[uapi::EI_DATA as usize] = uapi::ELFDATA2LSB as u8;
+        ehdr.e_ident[uapi::EI_VERSION as usize] = uapi::EV_CURRENT as u8;
+        ehdr.e_type = uapi::ET_CORE as u16;
+        ehdr.e_machine = uapi::EM_AARCH64 as u16;
+        ehdr.e_version = uapi::EV_CURRENT;
+        ehdr.e_entry = FIRMWARE_ENTRYPOINT;
+        ehdr.e_ehsize = core::mem::size_of::<uapi::Elf64_Ehdr>() as u16;
+        ehdr.e_phentsize = core::mem::size_of::<uapi::Elf64_Phdr>() as u16;
+
+        let phdr_offset = core::mem::size_of::<uapi::Elf64_Ehdr>();
+
+        // PHDRs come after the ELF header
+        ehdr.e_phoff = phdr_offset as u64;
+
+        let mut phdrs = KVVec::new();
+
+        // First PHDR is the NOTE section
+        phdrs.push(
+            uapi::Elf64_Phdr {
+                p_type: uapi::PT_NOTE,
+                p_flags: uapi::PF_R,
+                p_align: 1,
+                ..Default::default()
+            },
+            GFP_KERNEL,
+        )?;
+
+        // Generate the page phdrs. The offset will be fixed up later.
+        let mut off: usize = 0;
+        let mut next = None;
+        let mut pages: KVVec<Owned<Page>> = KVVec::new();
+
+        for mut page in page_dump {
+            let vaddr = page.iova;
+            let paddr = page.pte & pgtable::PTE_ADDR_BITS;
+            let flags = Prot::from_pte(page.pte).elf_flags();
+            let valid = page.data.is_some();
+            let cur = (vaddr, paddr, flags, valid);
+            if Some(cur) != next {
+                phdrs.push(
+                    uapi::Elf64_Phdr {
+                        p_type: uapi::PT_LOAD,
+                        p_offset: if valid { off as u64 } else { 0 },
+                        p_vaddr: vaddr,
+                        p_paddr: paddr,
+                        p_filesz: if valid { UAT_PGSZ as u64 } else { 0 },
+                        p_memsz: UAT_PGSZ as u64,
+                        p_flags: flags,
+                        p_align: UAT_PGSZ as u64,
+                    },
+                    GFP_KERNEL,
+                )?;
+                if valid {
+                    off += UAT_PGSZ;
+                }
+            } else {
+                let ph = phdrs.last_mut().unwrap();
+                ph.p_memsz += UAT_PGSZ as u64;
+                if valid {
+                    ph.p_filesz += UAT_PGSZ as u64;
+                    off += UAT_PGSZ;
+                }
+            }
+            if let Some(data_page) = page.data.take() {
+                pages.push(data_page, GFP_KERNEL)?;
+            }
+            next = Some((
+                vaddr + UAT_PGSZ as u64,
+                paddr + UAT_PGSZ as u64,
+                flags,
+                valid,
+            ));
+        }
+
+        ehdr.e_phnum = phdrs.len() as u16;
+
+        let note_offset = phdr_offset + size_of::<uapi::Elf64_Phdr>() * phdrs.len();
+
+        let mut note_data: KVVec<u8> = KVVec::new();
+
+        for note in notes {
+            let hdr = uapi::Elf64_Nhdr {
+                n_namesz: note.name.len() as u32 + 1,
+                n_descsz: note.data.len() as u32,
+                n_type: note.ty,
+            };
+            note_data.extend_from_slice(hdr.as_bytes(), GFP_KERNEL)?;
+            note_data.extend_from_slice(note.name.as_bytes(), GFP_KERNEL)?;
+            note_data.push(0, GFP_KERNEL)?;
+            while note_data.len() & 3 != 0 {
+                note_data.push(0, GFP_KERNEL)?;
+            }
+            note_data.extend_from_slice(&note.data, GFP_KERNEL)?;
+            while note_data.len() & 3 != 0 {
+                note_data.push(0, GFP_KERNEL)?;
+            }
+        }
+
+        // NOTE section comes after the PHDRs
+        phdrs[0].p_offset = note_offset as u64;
+        phdrs[0].p_filesz = note_data.len() as u64;
+
+        // Align data section to the page size
+        let data_offset = align(note_offset + note_data.len(), UAT_PGSZ);
+
+        // Fix up data PHDR offsets
+        for phdr in &mut phdrs[1..] {
+            phdr.p_offset += data_offset as u64;
+        }
+
+        // Build ELF header buffer
+        let mut headers: KVVec<u8> = KVVec::from_elem(0, data_offset, GFP_KERNEL)?;
+
+        headers[0..size_of::<uapi::Elf64_Ehdr>()].copy_from_slice(ehdr.as_bytes());
+        headers[phdr_offset..phdr_offset + phdrs.len() * size_of::<uapi::Elf64_Phdr>()]
+            .copy_from_slice(AsBytes::slice_as_bytes(&phdrs));
+        headers[note_offset..note_offset + note_data.len()].copy_from_slice(&note_data);
+
+        Ok(CrashDump { headers, pages })
+    }
+}
+
+impl DevCoreDump for CrashDump {
+    fn read(&self, buf: &mut [u8], mut offset: usize) -> Result<usize> {
+        let mut read = 0;
+        let mut left = buf.len();
+        if offset < self.headers.len() {
+            let block = left.min(self.headers.len() - offset);
+            buf[..block].copy_from_slice(&self.headers[offset..offset + block]);
+            read += block;
+            offset += block;
+            left -= block;
+        }
+        if left == 0 {
+            return Ok(read);
+        }
+        offset -= self.headers.len(); // Offset from the page area
+
+        while left > 0 {
+            let page_index = offset >> PAGE_SHIFT;
+            let page_offset = offset & !PAGE_MASK;
+            let block = left.min(PAGE_SIZE - page_offset);
+            let Some(page) = self.pages.get(page_index) else {
+                break;
+            };
+            let slice = &mut buf[read..read + block];
+            // SAFETY: We own the page, and the slice guarantees the
+            // dst length is sufficient.
+            unsafe { page.read_raw(slice.as_mut_ptr(), page_offset, slice.len())? };
+            read += block;
+            offset += block;
+            left -= block;
+        }
+
+        Ok(read)
+    }
+}
diff --git a/drivers/gpu/drm/asahi/debug.rs b/drivers/gpu/drm/asahi/debug.rs
new file mode 100644
index 00000000000000..5348bff7df8196
--- /dev/null
+++ b/drivers/gpu/drm/asahi/debug.rs
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+#![allow(dead_code)]
+
+//! Debug enable/disable flags and convenience macros
+
+#[allow(unused_imports)]
+pub(crate) use super::{cls_dev_dbg, cls_pr_debug, debug, mod_dev_dbg, mod_pr_debug};
+use crate::module_parameters;
+use core::sync::atomic::{AtomicU64, Ordering};
+
+static DEBUG_FLAGS: AtomicU64 = AtomicU64::new(0);
+
+/// Debug flag bit indices
+pub(crate) enum DebugFlags {
+    // 0-4: Memory-related debug
+    Mmu = 0,
+    PgTable = 1,
+    Alloc = 2,
+    Gem = 3,
+    Object = 4,
+
+    // 5-7: Firmware objects and resources
+    Event = 5,
+    Buffer = 6,
+    WorkQueue = 7,
+
+    // 8-13: DRM interface, rendering, compute, GPU globals
+    Gpu = 8,
+    File = 9,
+    Queue = 10,
+    Render = 11,
+    Compute = 12,
+    Errors = 13,
+
+    // 14-15: Misc stats
+    MemStats = 14,
+    TVBStats = 15,
+
+    // 16-22: Channels
+    FwLogCh = 16,
+    KTraceCh = 17,
+    StatsCh = 18,
+    EventCh = 19,
+    PipeCh = 20,
+    DeviceControlCh = 21,
+    FwCtlCh = 22,
+
+    // 32-35: Allocator debugging
+    FillAllocations = 32,
+    DebugAllocations = 33,
+    DetectOverflows = 34,
+    ForceCPUMaps = 35,
+
+    // 36-: Behavior flags
+    ConservativeTlbi = 36,
+    KeepGpuPowered = 37,
+    WaitForPowerOff = 38,
+    NoGpuRecovery = 39,
+    DisableClustering = 40,
+
+    // 48-: Misc
+    Debug0 = 48,
+    Debug1 = 49,
+    Debug2 = 50,
+    Debug3 = 51,
+    Debug4 = 52,
+    Debug5 = 53,
+    Debug6 = 54,
+    Debug7 = 55,
+
+    VerboseFaults = 61,
+    AllowUnknownOverrides = 62,
+    OopsOnGpuCrash = 63,
+}
+
+/// Update the cached global debug flags from the module parameter
+pub(crate) fn update_debug_flags() {
+    let flags = *module_parameters::debug_flags.get();
+
+    DEBUG_FLAGS.store(flags, Ordering::Relaxed);
+}
+
+/// Check whether debug is enabled for a given flag
+#[inline(always)]
+pub(crate) fn debug_enabled(flag: DebugFlags) -> bool {
+    DEBUG_FLAGS.load(Ordering::Relaxed) & 1 << (flag as usize) != 0
+}
+
+/// Run some code only if debug is enabled for the calling module
+#[macro_export]
+macro_rules! debug {
+    ($($arg:tt)*) => {
+        if $crate::debug::debug_enabled(DEBUG_CLASS) {
+            $($arg)*
+        }
+    };
+}
+
+/// pr_info!() if debug is enabled for the calling module
+#[macro_export]
+macro_rules! mod_pr_debug (
+    ($($arg:tt)*) => (
+        $crate::debug! { ::kernel::pr_info! ( $($arg)* ); }
+    )
+);
+
+/// dev_info!() if debug is enabled for the calling module
+#[macro_export]
+macro_rules! mod_dev_dbg (
+    ($dev:expr, $($arg:tt)*) => (
+        $crate::debug! { ::kernel::dev_info! ( $dev.as_ref(), $($arg)* ); }
+    )
+);
+
+/// pr_info!() if debug is enabled for a specific module
+#[macro_export]
+macro_rules! cls_pr_debug (
+    ($cls:ident, $($arg:tt)*) => (
+        if $crate::debug::debug_enabled($crate::debug::DebugFlags::$cls) {
+            ::kernel::pr_info! ( $($arg)* );
+        }
+    )
+);
+
+/// dev_info!() if debug is enabled for a specific module
+#[macro_export]
+macro_rules! cls_dev_dbg (
+    ($cls:ident, $dev:expr, $($arg:tt)*) => (
+        if $crate::debug::debug_enabled($crate::debug::DebugFlags::$cls) {
+            ::kernel::dev_info! ( $dev.as_ref(), $($arg)* );
+        }
+    )
+);
diff --git a/drivers/gpu/drm/asahi/driver.rs b/drivers/gpu/drm/asahi/driver.rs
new file mode 100644
index 00000000000000..2aa7bb7ca6869f
--- /dev/null
+++ b/drivers/gpu/drm/asahi/driver.rs
@@ -0,0 +1,208 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+//! Top-level GPU driver implementation.
+
+use kernel::{
+    c_str, drm, drm::drv, drm::ioctl, error::Result, of, platform, prelude::*, sync::Arc,
+};
+
+use crate::{debug, file, gem, gpu, hw, regs};
+
+use kernel::dma::Device;
+use kernel::macros::vtable;
+use kernel::types::ARef;
+
+/// Convenience type alias for the `device::Data` type for this driver.
+// type DeviceData = device::Data<drv::Registration<AsahiDriver>, regs::Resources, AsahiData>;
+
+/// Holds a reference to the top-level `GpuManager` object.
+// pub(crate) struct AsahiData {
+//     pub(crate) dev: ARef<device::Device>,
+//     pub(crate) gpu: Arc<dyn gpu::GpuManager>,
+// }
+
+#[pin_data]
+pub(crate) struct AsahiData {
+    #[pin]
+    pub(crate) gpu: Arc<dyn gpu::GpuManager>,
+    pub(crate) pdev: platform::Device,
+    pub(crate) resources: regs::Resources,
+}
+
+pub(crate) struct AsahiDriver {
+    _reg: drm::drv::Registration<Self>,
+    pub(crate) data: Arc<AsahiData>,
+}
+
+/// Convenience type alias for the DRM device type for this driver.
+pub(crate) type AsahiDevice = kernel::drm::device::Device<AsahiDriver>;
+pub(crate) type AsahiDevRef = ARef<AsahiDevice>;
+
+/// DRM Driver metadata
+const INFO: drv::DriverInfo = drv::DriverInfo {
+    major: 0,
+    minor: 0,
+    patchlevel: 0,
+    name: c_str!("asahi"),
+    desc: c_str!("Apple AGX Graphics"),
+    date: c_str!("20220831"),
+};
+
+/// DRM Driver implementation for `AsahiDriver`.
+#[vtable]
+impl drv::Driver for AsahiDriver {
+    /// Our `DeviceData` type, reference-counted
+    type Data = Arc<AsahiData>;
+    /// Our `File` type.
+    type File = file::File;
+    /// Our `Object` type.
+    type Object = gem::Object;
+
+    const INFO: drv::DriverInfo = INFO;
+    const FEATURES: u32 = drv::FEAT_GEM
+        | drv::FEAT_RENDER
+        | drv::FEAT_SYNCOBJ
+        | drv::FEAT_SYNCOBJ_TIMELINE
+        | drv::FEAT_GEM_GPUVA;
+
+    kernel::declare_drm_ioctls! {
+        (ASAHI_GET_PARAMS,      drm_asahi_get_params,
+                          ioctl::RENDER_ALLOW, crate::file::File::get_params),
+        (ASAHI_GET_TIME,        drm_asahi_get_time,
+            ioctl::AUTH | ioctl::RENDER_ALLOW, crate::file::File::get_time),
+        (ASAHI_VM_CREATE,       drm_asahi_vm_create,
+            ioctl::AUTH | ioctl::RENDER_ALLOW, crate::file::File::vm_create),
+        (ASAHI_VM_DESTROY,      drm_asahi_vm_destroy,
+            ioctl::AUTH | ioctl::RENDER_ALLOW, crate::file::File::vm_destroy),
+        (ASAHI_VM_BIND,         drm_asahi_vm_bind,
+            ioctl::AUTH | ioctl::RENDER_ALLOW, crate::file::File::vm_bind),
+        (ASAHI_GEM_CREATE,      drm_asahi_gem_create,
+            ioctl::AUTH | ioctl::RENDER_ALLOW, crate::file::File::gem_create),
+        (ASAHI_GEM_MMAP_OFFSET, drm_asahi_gem_mmap_offset,
+            ioctl::AUTH | ioctl::RENDER_ALLOW, crate::file::File::gem_mmap_offset),
+        (ASAHI_GEM_BIND_OBJECT, drm_asahi_gem_bind_object,
+            ioctl::AUTH | ioctl::RENDER_ALLOW, crate::file::File::gem_bind_object),
+        (ASAHI_QUEUE_CREATE,    drm_asahi_queue_create,
+            ioctl::AUTH | ioctl::RENDER_ALLOW, crate::file::File::queue_create),
+        (ASAHI_QUEUE_DESTROY,   drm_asahi_queue_destroy,
+            ioctl::AUTH | ioctl::RENDER_ALLOW, crate::file::File::queue_destroy),
+        (ASAHI_SUBMIT,          drm_asahi_submit,
+            ioctl::AUTH | ioctl::RENDER_ALLOW, crate::file::File::submit),
+    }
+}
+
+// OF Device ID table.s
+kernel::of_device_table!(
+    OF_TABLE,
+    MODULE_OF_TABLE,
+    <AsahiDriver as platform::Driver>::IdInfo,
+    [
+        (
+            of::DeviceId::new(c_str!("apple,agx-t8103")),
+            &hw::t8103::HWCONFIG
+        ),
+        (
+            of::DeviceId::new(c_str!("apple,agx-t8112")),
+            &hw::t8112::HWCONFIG
+        ),
+        (
+            of::DeviceId::new(c_str!("apple,agx-t6000")),
+            &hw::t600x::HWCONFIG_T6000
+        ),
+        (
+            of::DeviceId::new(c_str!("apple,agx-t6001")),
+            &hw::t600x::HWCONFIG_T6001
+        ),
+        (
+            of::DeviceId::new(c_str!("apple,agx-t6002")),
+            &hw::t600x::HWCONFIG_T6002
+        ),
+        (
+            of::DeviceId::new(c_str!("apple,agx-t6020")),
+            &hw::t602x::HWCONFIG_T6020
+        ),
+        (
+            of::DeviceId::new(c_str!("apple,agx-t6021")),
+            &hw::t602x::HWCONFIG_T6021
+        ),
+        (
+            of::DeviceId::new(c_str!("apple,agx-t6022")),
+            &hw::t602x::HWCONFIG_T6022
+        ),
+    ]
+);
+
+/// Platform Driver implementation for `AsahiDriver`.
+impl platform::Driver for AsahiDriver {
+    type IdInfo = &'static hw::HwConfig;
+    const OF_ID_TABLE: Option<of::IdTable<Self::IdInfo>> = Some(&OF_TABLE);
+
+    /// Device probe function.
+    fn probe(pdev: &mut platform::Device, info: Option<&Self::IdInfo>) -> Result<Pin<KBox<Self>>> {
+        debug::update_debug_flags();
+
+        let dev = pdev.clone();
+
+        dev_info!(pdev.as_ref(), "Probing...\n");
+
+        let cfg = info.ok_or(ENODEV)?;
+
+        pdev.dma_set_mask_and_coherent((1 << cfg.uat_oas) - 1)?;
+
+        let res = regs::Resources::new(pdev)?;
+
+        // Initialize misc MMIO
+        res.init_mmio()?;
+
+        // Start the coprocessor CPU, so UAT can initialize the handoff
+        res.start_cpu()?;
+
+        let node = pdev.as_ref().of_node().ok_or(EIO)?;
+        let compat: KVec<u32> = node.get_property(c_str!("apple,firmware-compat"))?;
+
+        let drm = drm::device::Device::<AsahiDriver>::new(dev.as_ref())?;
+
+        let gpu = match (cfg.gpu_gen, cfg.gpu_variant, compat.as_slice()) {
+            (hw::GpuGen::G13, _, &[12, 3, 0]) => {
+                gpu::GpuManagerG13V12_3::new(&drm, &res, cfg)? as Arc<dyn gpu::GpuManager>
+            }
+            (hw::GpuGen::G14, hw::GpuVariant::G, &[12, 4, 0]) => {
+                gpu::GpuManagerG14V12_4::new(&drm, &res, cfg)? as Arc<dyn gpu::GpuManager>
+            }
+            (hw::GpuGen::G13, _, &[13, 5, 0]) => {
+                gpu::GpuManagerG13V13_5::new(&drm, &res, cfg)? as Arc<dyn gpu::GpuManager>
+            }
+            (hw::GpuGen::G14, hw::GpuVariant::G, &[13, 5, 0]) => {
+                gpu::GpuManagerG14V13_5::new(&drm, &res, cfg)? as Arc<dyn gpu::GpuManager>
+            }
+            (hw::GpuGen::G14, _, &[13, 5, 0]) => {
+                gpu::GpuManagerG14XV13_5::new(&drm, &res, cfg)? as Arc<dyn gpu::GpuManager>
+            }
+            _ => {
+                dev_info!(
+                    pdev.as_ref(),
+                    "Unsupported GPU/firmware combination ({:?}, {:?}, {:?})\n",
+                    cfg.gpu_gen,
+                    cfg.gpu_variant,
+                    compat
+                );
+                return Err(ENODEV);
+            }
+        };
+
+        let data = Arc::pin_init(
+            try_pin_init!(AsahiData {
+                gpu,
+                pdev: pdev.clone(),
+                resources: res,
+            }),
+            GFP_KERNEL,
+        )?;
+
+        data.gpu.init()?;
+
+        let reg = drm::drv::Registration::new(drm, data.clone(), 0)?;
+
+        Ok(KBox::new(Self { _reg: reg, data }, GFP_KERNEL)?.into())
+    }
+}
diff --git a/drivers/gpu/drm/asahi/event.rs b/drivers/gpu/drm/asahi/event.rs
new file mode 100644
index 00000000000000..57fda8c1ea91e2
--- /dev/null
+++ b/drivers/gpu/drm/asahi/event.rs
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+//! GPU event manager
+//!
+//! The GPU firmware manages work completion by using event objects (Apple calls them "stamps"),
+//! which are monotonically incrementing counters. There are a fixed number of objects, and
+//! they are managed with a `SlotAllocator`.
+//!
+//! This module manages the set of available events and lets users compute expected values.
+//! It also manages signaling owners when the GPU firmware reports that an event fired.
+
+use crate::debug::*;
+use crate::fw::types::*;
+use crate::{gpu, slotalloc, workqueue};
+use core::cmp;
+use core::sync::atomic::Ordering;
+use kernel::prelude::*;
+use kernel::sync::Arc;
+use kernel::{c_str, static_lock_class};
+
+const DEBUG_CLASS: DebugFlags = DebugFlags::Event;
+
+/// Number of events managed by the firmware.
+const NUM_EVENTS: u32 = 128;
+
+/// Inner data associated with a given event slot.
+pub(crate) struct EventInner {
+    /// CPU pointer to the driver notification event stamp
+    stamp: *const AtomicU32,
+    /// GPU pointer to the driver notification event stamp
+    gpu_stamp: GpuWeakPointer<Stamp>,
+    /// GPU pointer to the firmware-internal event stamp
+    gpu_fw_stamp: GpuWeakPointer<FwStamp>,
+}
+
+/// SAFETY: The event slots are safe to send across threads.
+unsafe impl Send for EventInner {}
+
+/// Alias for an event token, which allows requesting the same event.
+pub(crate) type Token = slotalloc::SlotToken;
+/// Alias for an allocated `Event` that has a slot.
+pub(crate) type Event = slotalloc::Guard<EventInner>;
+
+/// Represents a given stamp value for an event.
+#[derive(Eq, PartialEq, Copy, Clone, Debug)]
+#[repr(transparent)]
+pub(crate) struct EventValue(u32);
+
+impl EventValue {
+    /// Returns the `EventValue` that succeeds this one.
+    pub(crate) fn next(&self) -> EventValue {
+        EventValue(self.0.wrapping_add(0x100))
+    }
+
+    /// Increments this `EventValue` in place.
+    pub(crate) fn increment(&mut self) {
+        self.0 = self.0.wrapping_add(0x100);
+    }
+
+    /* Not used
+    /// Increments this `EventValue` in place by a certain count.
+    pub(crate) fn add(&mut self, val: u32) {
+        self.0 = self
+            .0
+            .wrapping_add(val.checked_mul(0x100).expect("Adding too many events"));
+    }
+    */
+
+    /// Increments this `EventValue` in place by a certain count.
+    pub(crate) fn sub(&mut self, val: u32) {
+        self.0 = self
+            .0
+            .wrapping_sub(val.checked_mul(0x100).expect("Subtracting too many events"));
+    }
+
+    /// Computes the delta between this event and another event.
+    pub(crate) fn delta(&self, other: &EventValue) -> i32 {
+        (self.0.wrapping_sub(other.0) as i32) >> 8
+    }
+}
+
+impl PartialOrd for EventValue {
+    fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
+        Some(self.cmp(other))
+    }
+}
+
+impl Ord for EventValue {
+    fn cmp(&self, other: &Self) -> cmp::Ordering {
+        self.delta(other).cmp(&0)
+    }
+}
+
+impl EventInner {
+    /// Returns the GPU pointer to the driver notification stamp
+    pub(crate) fn stamp_pointer(&self) -> GpuWeakPointer<Stamp> {
+        self.gpu_stamp
+    }
+
+    /// Returns the GPU pointer to the firmware internal stamp
+    pub(crate) fn fw_stamp_pointer(&self) -> GpuWeakPointer<FwStamp> {
+        self.gpu_fw_stamp
+    }
+
+    /// Fetches the current event value from shared memory
+    pub(crate) fn current(&self) -> EventValue {
+        // SAFETY: The pointer is always valid as constructed in
+        // EventManager below, and outside users cannot construct
+        // new EventInners, nor move or copy them, and Guards as
+        // returned by the SlotAllocator hold a reference to the
+        // SlotAllocator containing the EventManagerInner, which
+        // keeps the GpuObject the stamp is contained within alive.
+        EventValue(unsafe { &*self.stamp }.load(Ordering::Acquire))
+    }
+}
+
+impl slotalloc::SlotItem for EventInner {
+    type Data = EventManagerInner;
+
+    fn release(&mut self, data: &mut Self::Data, slot: u32) {
+        mod_pr_debug!("EventManager: Released slot {}\n", slot);
+        data.owners[slot as usize] = None;
+    }
+}
+
+/// Inner data for the event manager, to be protected by the SlotAllocator lock.
+pub(crate) struct EventManagerInner {
+    stamps: GpuArray<Stamp>,
+    fw_stamps: GpuArray<FwStamp>,
+    // Note: Use dyn to avoid having to version this entire module.
+    owners: KVec<Option<Arc<dyn workqueue::WorkQueue + Send + Sync>>>,
+}
+
+/// Top-level EventManager object.
+pub(crate) struct EventManager {
+    alloc: slotalloc::SlotAllocator<EventInner>,
+}
+
+impl EventManager {
+    /// Create a new EventManager.
+    #[inline(never)]
+    pub(crate) fn new(alloc: &mut gpu::KernelAllocators) -> Result<EventManager> {
+        let mut owners = KVec::new();
+        for _i in 0..(NUM_EVENTS as usize) {
+            owners.push(None, GFP_KERNEL)?;
+        }
+        let inner = EventManagerInner {
+            stamps: alloc.shared.array_empty(NUM_EVENTS as usize)?,
+            fw_stamps: alloc.private.array_empty(NUM_EVENTS as usize)?,
+            owners,
+        };
+
+        for slot in 0..NUM_EVENTS {
+            inner.stamps[slot as usize]
+                .0
+                .store(slot << 24, Ordering::Relaxed);
+        }
+
+        Ok(EventManager {
+            alloc: slotalloc::SlotAllocator::new(
+                NUM_EVENTS,
+                inner,
+                |inner: &mut EventManagerInner, slot| {
+                    Some(EventInner {
+                        stamp: &inner.stamps[slot as usize].0,
+                        gpu_stamp: inner.stamps.weak_item_pointer(slot as usize),
+                        gpu_fw_stamp: inner.fw_stamps.weak_item_pointer(slot as usize),
+                    })
+                },
+                c_str!("EventManager::SlotAllocator"),
+                static_lock_class!(),
+                static_lock_class!(),
+            )?,
+        })
+    }
+
+    /// Gets a free `Event`, optionally trying to reuse the last one allocated by this caller.
+    pub(crate) fn get(
+        &self,
+        token: Option<Token>,
+        owner: Arc<dyn workqueue::WorkQueue + Send + Sync>,
+    ) -> Result<Event> {
+        let ev = self.alloc.get_inner(token, |inner, ev| {
+            mod_pr_debug!(
+                "EventManager: Registered owner {:p} on slot {}\n",
+                &*owner,
+                ev.slot()
+            );
+            inner.owners[ev.slot() as usize] = Some(owner);
+            Ok(())
+        })?;
+        Ok(ev)
+    }
+
+    /// Signals an event by slot, indicating completion (of one or more commands).
+    pub(crate) fn signal(&self, slot: u32) {
+        match self
+            .alloc
+            .with_inner(|inner| inner.owners[slot as usize].as_ref().cloned())
+        {
+            Some(owner) => {
+                owner.signal();
+            }
+            None => {
+                mod_pr_debug!("EventManager: Received event for empty slot {}\n", slot);
+            }
+        }
+    }
+
+    /// Marks the owner of an event as having lost its work due to a GPU error.
+    pub(crate) fn mark_error(&self, slot: u32, wait_value: u32, error: workqueue::WorkError) {
+        match self
+            .alloc
+            .with_inner(|inner| inner.owners[slot as usize].as_ref().cloned())
+        {
+            Some(owner) => {
+                owner.mark_error(EventValue(wait_value), error);
+            }
+            None => {
+                pr_err!("Received error for empty slot {}\n", slot);
+            }
+        }
+    }
+
+    /// Returns a reference to the workqueue owning an event.
+    pub(crate) fn get_owner(
+        &self,
+        slot: u32,
+    ) -> Option<Arc<dyn workqueue::WorkQueue + Send + Sync>> {
+        self.alloc
+            .with_inner(|inner| inner.owners[slot as usize].as_ref().cloned())
+    }
+
+    /// Fail all commands, used when the GPU crashes.
+    pub(crate) fn fail_all(&self, error: workqueue::WorkError) {
+        let mut owners: KVec<Arc<dyn workqueue::WorkQueue + Send + Sync>> = KVec::new();
+
+        self.alloc.with_inner(|inner| {
+            for wq in inner.owners.iter().filter_map(|o| o.as_ref()).cloned() {
+                if owners.push(wq, GFP_KERNEL).is_err() {
+                    pr_err!("Failed to signal failure to WorkQueue\n");
+                }
+            }
+        });
+
+        for wq in owners {
+            wq.fail_all(error);
+        }
+    }
+}
diff --git a/drivers/gpu/drm/asahi/file.rs b/drivers/gpu/drm/asahi/file.rs
new file mode 100644
index 00000000000000..a398cedfa66cfe
--- /dev/null
+++ b/drivers/gpu/drm/asahi/file.rs
@@ -0,0 +1,1050 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+#![allow(clippy::unusual_byte_groupings)]
+
+//! File implementation, which represents a single DRM client.
+//!
+//! This is in charge of managing the resources associated with one GPU client, including an
+//! arbitrary number of submission queues and Vm objects, and reporting hardware/driver
+//! information to userspace and accepting submissions.
+
+use crate::debug::*;
+use crate::driver::AsahiDevice;
+use crate::{
+    alloc, buffer, driver, gem, mmu, module_parameters, queue,
+    util::{align, align_down, gcd, AnyBitPattern, RangeExt, Reader},
+};
+use core::mem::MaybeUninit;
+use core::ops::Range;
+use kernel::dma_fence::RawDmaFence;
+use kernel::drm::gem::BaseObject;
+use kernel::error::code::*;
+use kernel::prelude::*;
+use kernel::sync::{Arc, Mutex};
+use kernel::time::NSEC_PER_SEC;
+use kernel::types::ForeignOwnable;
+use kernel::uaccess::{UserPtr, UserSlice};
+use kernel::{dma_fence, drm, uapi, xarray};
+
+const DEBUG_CLASS: DebugFlags = DebugFlags::File;
+
+pub(crate) const MAX_COMMANDS_PER_SUBMISSION: u32 = 64;
+
+/// A client instance of an `mmu::Vm` address space.
+struct Vm {
+    ualloc: Arc<Mutex<alloc::DefaultAllocator>>,
+    ualloc_priv: Arc<Mutex<alloc::DefaultAllocator>>,
+    vm: mmu::Vm,
+    kernel_range: Range<u64>,
+    _dummy_mapping: mmu::KernelMapping,
+}
+
+impl Drop for Vm {
+    fn drop(&mut self) {
+        // When the user Vm is dropped, unmap everything in the user range
+        let left_range = VM_USER_RANGE.start..self.kernel_range.start;
+        let right_range = self.kernel_range.end..VM_USER_RANGE.end;
+
+        if !left_range.is_empty()
+            && self
+                .vm
+                .unmap_range(left_range.start, left_range.range())
+                .is_err()
+        {
+            pr_err!("Vm::Drop: vm.unmap_range() failed\n");
+        }
+        if !right_range.is_empty()
+            && self
+                .vm
+                .unmap_range(right_range.start, right_range.range())
+                .is_err()
+        {
+            pr_err!("Vm::Drop: vm.unmap_range() failed\n");
+        }
+    }
+}
+
+/// Sync object from userspace.
+pub(crate) struct SyncItem {
+    pub(crate) syncobj: drm::syncobj::SyncObj,
+    pub(crate) fence: Option<dma_fence::Fence>,
+    pub(crate) chain_fence: Option<dma_fence::FenceChain>,
+    pub(crate) timeline_value: u64,
+}
+
+impl SyncItem {
+    fn parse_one(file: &DrmFile, data: uapi::drm_asahi_sync, out: bool) -> Result<SyncItem> {
+        match data.sync_type {
+            uapi::drm_asahi_sync_type_DRM_ASAHI_SYNC_SYNCOBJ => {
+                if data.timeline_value != 0 {
+                    cls_pr_debug!(Errors, "Non-timeline sync object with a nonzero value\n");
+                    return Err(EINVAL);
+                }
+                let syncobj = drm::syncobj::SyncObj::lookup_handle(file, data.handle)?;
+
+                Ok(SyncItem {
+                    fence: if out {
+                        None
+                    } else {
+                        Some(syncobj.fence_get().ok_or_else(|| {
+                            cls_pr_debug!(Errors, "Failed to get fence from sync object\n");
+                            EINVAL
+                        })?)
+                    },
+                    syncobj,
+                    chain_fence: None,
+                    timeline_value: data.timeline_value,
+                })
+            }
+            uapi::drm_asahi_sync_type_DRM_ASAHI_SYNC_TIMELINE_SYNCOBJ => {
+                let syncobj = drm::syncobj::SyncObj::lookup_handle(file, data.handle)?;
+                let fence = if out {
+                    None
+                } else {
+                    syncobj
+                        .fence_get()
+                        .ok_or_else(|| {
+                            cls_pr_debug!(
+                                Errors,
+                                "Failed to get fence from timeline sync object\n"
+                            );
+                            EINVAL
+                        })?
+                        .chain_find_seqno(data.timeline_value)?
+                };
+
+                Ok(SyncItem {
+                    fence,
+                    syncobj,
+                    chain_fence: if out {
+                        Some(dma_fence::FenceChain::new()?)
+                    } else {
+                        None
+                    },
+                    timeline_value: data.timeline_value,
+                })
+            }
+            _ => {
+                cls_pr_debug!(Errors, "Invalid sync type {}\n", data.sync_type);
+                Err(EINVAL)
+            }
+        }
+    }
+
+    fn parse_array(
+        file: &DrmFile,
+        ptr: u64,
+        in_count: u32,
+        out_count: u32,
+    ) -> Result<KVec<SyncItem>> {
+        let count = in_count + out_count;
+        let mut vec = KVec::with_capacity(count as usize, GFP_KERNEL)?;
+
+        const STRIDE: usize = core::mem::size_of::<uapi::drm_asahi_sync>();
+        let size = STRIDE * count as usize;
+
+        // SAFETY: We only read this once, so there are no TOCTOU issues.
+        let mut reader = UserSlice::new(ptr as UserPtr, size).reader();
+
+        for i in 0..count {
+            let mut sync: MaybeUninit<uapi::drm_asahi_sync> = MaybeUninit::uninit();
+
+            // SAFETY: The size of `sync` is STRIDE
+            reader.read_raw(unsafe {
+                core::slice::from_raw_parts_mut(sync.as_mut_ptr() as *mut MaybeUninit<u8>, STRIDE)
+            })?;
+
+            // SAFETY: All bit patterns in the struct are valid
+            let sync = unsafe { sync.assume_init() };
+
+            vec.push(SyncItem::parse_one(file, sync, i >= in_count)?, GFP_KERNEL)?;
+        }
+
+        Ok(vec)
+    }
+}
+
+pub(crate) enum Object {
+    TimestampBuffer(Arc<mmu::KernelMapping>),
+}
+
+/// State associated with a client.
+pub(crate) struct File {
+    id: u64,
+    vms: xarray::XArray<KBox<Vm>>,
+    queues: xarray::XArray<Arc<Mutex<KBox<dyn queue::Queue>>>>,
+    objects: xarray::XArray<KBox<Object>>,
+}
+
+/// Convenience type alias for our DRM `File` type.
+pub(crate) type DrmFile = drm::file::File<File>;
+
+/// Available VM range for the user
+const VM_USER_RANGE: Range<u64> = mmu::IOVA_USER_USABLE_RANGE;
+
+/// Minimum reserved AS for kernel mappings
+const VM_KERNEL_MIN_SIZE: u64 = 0x20000000;
+
+impl drm::file::DriverFile for File {
+    kernel::define_driver_file_types!(driver::AsahiDriver);
+
+    /// Create a new `File` instance for a fresh client.
+    fn open(
+        device: &AsahiDevice,
+        dev_data: <Self as drm::file::DriverFile>::BorrowedData<'_>,
+    ) -> Result<Pin<KBox<Self>>> {
+        debug::update_debug_flags();
+
+        let gpu = &dev_data.gpu;
+        let id = gpu.ids().file.next();
+
+        mod_dev_dbg!(device, "[File {}]: DRM device opened\n", id);
+        Ok(KBox::pin(
+            Self {
+                id,
+                vms: xarray::XArray::new(xarray::flags::ALLOC1),
+                queues: xarray::XArray::new(xarray::flags::ALLOC1),
+                objects: xarray::XArray::new(xarray::flags::ALLOC1),
+            },
+            GFP_KERNEL,
+        )?)
+    }
+}
+
+// SAFETY: All bit patterns are valid by construction.
+unsafe impl AnyBitPattern for uapi::drm_asahi_gem_bind_op {}
+
+impl File {
+    fn vms(self: Pin<&Self>) -> Pin<&xarray::XArray<KBox<Vm>>> {
+        // SAFETY: Structural pinned projection for vms.
+        // We never move out of this field.
+        unsafe { self.map_unchecked(|s| &s.vms) }
+    }
+
+    #[allow(clippy::type_complexity)]
+    fn queues(self: Pin<&Self>) -> Pin<&xarray::XArray<Arc<Mutex<KBox<dyn queue::Queue>>>>> {
+        // SAFETY: Structural pinned projection for queues.
+        // We never move out of this field.
+        unsafe { self.map_unchecked(|s| &s.queues) }
+    }
+
+    fn objects(self: Pin<&Self>) -> Pin<&xarray::XArray<KBox<Object>>> {
+        // SAFETY: Structural pinned projection for objects.
+        // We never move out of this field.
+        unsafe { self.map_unchecked(|s| &s.objects) }
+    }
+
+    /// IOCTL: get_param: Get a driver parameter value.
+    pub(crate) fn get_params(
+        device: &AsahiDevice,
+        dev_data: <Self as drm::file::DriverFile>::BorrowedData<'_>,
+        data: &uapi::drm_asahi_get_params,
+        file: &DrmFile,
+    ) -> Result<u32> {
+        mod_dev_dbg!(device, "[File {}]: IOCTL: get_params\n", file.inner().id);
+
+        let gpu = &dev_data.gpu;
+
+        if data.param_group != 0 || data.pad != 0 {
+            cls_pr_debug!(Errors, "get_params: Invalid arguments\n");
+            return Err(EINVAL);
+        }
+
+        if gpu.is_crashed() {
+            return Err(ENODEV);
+        }
+
+        let mut params = uapi::drm_asahi_params_global {
+            features: 0,
+
+            gpu_generation: gpu.get_dyncfg().id.gpu_gen as u32,
+            gpu_variant: gpu.get_dyncfg().id.gpu_variant as u32,
+            gpu_revision: gpu.get_dyncfg().id.gpu_rev as u32,
+            chip_id: gpu.get_cfg().chip_id,
+
+            num_dies: gpu.get_cfg().num_dies,
+            num_clusters_total: gpu.get_dyncfg().id.num_clusters,
+            num_cores_per_cluster: gpu.get_dyncfg().id.num_cores,
+            core_masks: [0; uapi::DRM_ASAHI_MAX_CLUSTERS as usize],
+
+            vm_start: VM_USER_RANGE.start,
+            vm_end: VM_USER_RANGE.end,
+            vm_kernel_min_size: VM_KERNEL_MIN_SIZE,
+
+            max_commands_per_submission: MAX_COMMANDS_PER_SUBMISSION,
+            max_attachments: crate::microseq::MAX_ATTACHMENTS as u32,
+            max_frequency_khz: gpu.get_dyncfg().pwr.max_frequency_khz(),
+
+            command_timestamp_frequency_hz: 1_000_000_000, // User timestamps always in nanoseconds
+        };
+
+        for (i, mask) in gpu.get_dyncfg().id.core_masks.iter().enumerate() {
+            *(params.core_masks.get_mut(i).ok_or(EIO)?) = (*mask).into();
+        }
+
+        if *module_parameters::fault_control.get() == 0xb {
+            params.features |= uapi::drm_asahi_feature_DRM_ASAHI_FEATURE_SOFT_FAULTS as u64;
+        }
+
+        let size = core::mem::size_of::<uapi::drm_asahi_params_global>().min(data.size.try_into()?);
+
+        // SAFETY: We only write to this userptr once, so there are no TOCTOU issues.
+        let mut params_writer = UserSlice::new(data.pointer as UserPtr, size).writer();
+
+        // SAFETY: `size` is at most the sizeof of `params`
+        params_writer.write_slice(unsafe {
+            core::slice::from_raw_parts(&params as *const _ as *const u8, size)
+        })?;
+
+        Ok(0)
+    }
+
+    /// IOCTL: vm_create: Create a new `Vm`.
+    pub(crate) fn vm_create(
+        device: &AsahiDevice,
+        dev_data: <Self as drm::file::DriverFile>::BorrowedData<'_>,
+        data: &mut uapi::drm_asahi_vm_create,
+        file: &DrmFile,
+    ) -> Result<u32> {
+        let kernel_range = data.kernel_start..data.kernel_end;
+
+        // Validate requested kernel range
+        if !VM_USER_RANGE.is_superset(kernel_range.clone())
+            || kernel_range.range() < VM_KERNEL_MIN_SIZE
+            || kernel_range.start & (mmu::UAT_PGMSK as u64) != 0
+            || kernel_range.end & (mmu::UAT_PGMSK as u64) != 0
+        {
+            cls_pr_debug!(Errors, "vm_create: Invalid kernel range\n");
+            return Err(EINVAL);
+        }
+
+        // Align to buffer::PAGE_SIZE so the allocators are happy
+        let kernel_range = align(kernel_range.start, buffer::PAGE_SIZE as u64)
+            ..align_down(kernel_range.end, buffer::PAGE_SIZE as u64);
+
+        let kernel_half_size = align_down(kernel_range.range() >> 1, buffer::PAGE_SIZE as u64);
+        let kernel_gpu_range = kernel_range.start..(kernel_range.start + kernel_half_size);
+        let kernel_gpufw_range = kernel_gpu_range.end..kernel_range.end;
+
+        let gpu = &dev_data.gpu;
+        let file_id = file.inner().id;
+        let vm = gpu.new_vm(kernel_range.clone())?;
+
+        let resv = file.inner().vms().reserve()?;
+        let id: u32 = resv.index().try_into()?;
+
+        mod_dev_dbg!(device, "[File {} VM {}]: VM Create\n", file_id, id);
+        mod_dev_dbg!(
+            device,
+            "[File {} VM {}]: Creating allocators\n",
+            file_id,
+            id
+        );
+        let ualloc = Arc::pin_init(
+            Mutex::new(alloc::DefaultAllocator::new(
+                device,
+                &vm,
+                kernel_gpu_range,
+                buffer::PAGE_SIZE,
+                mmu::PROT_GPU_SHARED_RW,
+                512 * 1024,
+                true,
+                fmt!("File {} VM {} GPU Shared", file_id, id),
+                false,
+            )?),
+            GFP_KERNEL,
+        )?;
+        let ualloc_priv = Arc::pin_init(
+            Mutex::new(alloc::DefaultAllocator::new(
+                device,
+                &vm,
+                kernel_gpufw_range,
+                buffer::PAGE_SIZE,
+                mmu::PROT_GPU_FW_PRIV_RW,
+                64 * 1024,
+                true,
+                fmt!("File {} VM {} GPU FW Private", file_id, id),
+                false,
+            )?),
+            GFP_KERNEL,
+        )?;
+
+        mod_dev_dbg!(
+            device,
+            "[File {} VM {}]: Creating dummy object\n",
+            file_id,
+            id
+        );
+        let mut dummy_obj = gem::new_kernel_object(device, 0x4000)?;
+        dummy_obj.vmap()?.as_mut_slice().fill(0);
+        let dummy_mapping =
+            dummy_obj.map_at(&vm, mmu::IOVA_UNK_PAGE, mmu::PROT_GPU_SHARED_RW, true)?;
+
+        mod_dev_dbg!(device, "[File {} VM {}]: VM created\n", file_id, id);
+        resv.store(KBox::new(
+            Vm {
+                ualloc,
+                ualloc_priv,
+                vm,
+                kernel_range,
+                _dummy_mapping: dummy_mapping,
+            },
+            GFP_KERNEL,
+        )?)?;
+
+        data.vm_id = id;
+
+        Ok(0)
+    }
+
+    /// IOCTL: vm_destroy: Destroy a `Vm`.
+    pub(crate) fn vm_destroy(
+        _device: &AsahiDevice,
+        _dev_data: <Self as drm::file::DriverFile>::BorrowedData<'_>,
+        data: &mut uapi::drm_asahi_vm_destroy,
+        file: &DrmFile,
+    ) -> Result<u32> {
+        if file.inner().vms().remove(data.vm_id as usize).is_none() {
+            Err(ENOENT)
+        } else {
+            Ok(0)
+        }
+    }
+
+    /// IOCTL: gem_create: Create a new GEM object.
+    pub(crate) fn gem_create(
+        device: &AsahiDevice,
+        _dev_data: <Self as drm::file::DriverFile>::BorrowedData<'_>,
+        data: &mut uapi::drm_asahi_gem_create,
+        file: &DrmFile,
+    ) -> Result<u32> {
+        mod_dev_dbg!(
+            device,
+            "[File {}]: IOCTL: gem_create size={:#x?}\n",
+            file.inner().id,
+            data.size
+        );
+
+        if (data.flags
+            & !(uapi::drm_asahi_gem_flags_DRM_ASAHI_GEM_WRITEBACK
+                | uapi::drm_asahi_gem_flags_DRM_ASAHI_GEM_VM_PRIVATE))
+            != 0
+            || (data.flags & uapi::drm_asahi_gem_flags_DRM_ASAHI_GEM_VM_PRIVATE == 0
+                && data.vm_id != 0)
+        {
+            cls_pr_debug!(Errors, "gem_create: Invalid arguments\n");
+            return Err(EINVAL);
+        }
+
+        let resv_obj = if data.flags & uapi::drm_asahi_gem_flags_DRM_ASAHI_GEM_VM_PRIVATE != 0 {
+            Some(
+                file.inner()
+                    .vms()
+                    .get(data.vm_id.try_into()?)
+                    .ok_or(ENOENT)?
+                    .borrow()
+                    .vm
+                    .get_resv_obj(),
+            )
+        } else {
+            None
+        };
+
+        let bo = gem::new_object(device, data.size.try_into()?, data.flags, resv_obj.as_ref())?;
+
+        let handle = bo.gem.create_handle(file)?;
+        data.handle = handle;
+
+        mod_dev_dbg!(
+            device,
+            "[File {}]: IOCTL: gem_create size={:#x} handle={:#x?}\n",
+            file.inner().id,
+            data.size,
+            data.handle
+        );
+
+        Ok(0)
+    }
+
+    /// IOCTL: gem_mmap_offset: Assign an mmap offset to a GEM object.
+    pub(crate) fn gem_mmap_offset(
+        device: &AsahiDevice,
+        _dev_data: <Self as drm::file::DriverFile>::BorrowedData<'_>,
+        data: &mut uapi::drm_asahi_gem_mmap_offset,
+        file: &DrmFile,
+    ) -> Result<u32> {
+        mod_dev_dbg!(
+            device,
+            "[File {}]: IOCTL: gem_mmap_offset handle={:#x?}\n",
+            file.inner().id,
+            data.handle
+        );
+
+        if data.flags != 0 {
+            cls_pr_debug!(Errors, "gem_mmap_offset: Unexpected flags\n");
+            return Err(EINVAL);
+        }
+
+        let bo = gem::lookup_handle(file, data.handle)?;
+        data.offset = bo.gem.create_mmap_offset()?;
+        Ok(0)
+    }
+
+    /// IOCTL: vm_bind: Map or unmap memory into a Vm.
+    pub(crate) fn vm_bind(
+        device: &AsahiDevice,
+        _dev_data: <Self as drm::file::DriverFile>::BorrowedData<'_>,
+        data: &uapi::drm_asahi_vm_bind,
+        file: &DrmFile,
+    ) -> Result<u32> {
+        mod_dev_dbg!(
+            device,
+            "[File {} VM {}]: IOCTL: vm_bind\n",
+            file.inner().id,
+            data.vm_id,
+        );
+
+        if data.stride == 0 || data.pad != 0 {
+            cls_pr_debug!(Errors, "vm_bind: Unexpected headers\n");
+            return Err(EINVAL);
+        }
+
+        let vm_id = data.vm_id.try_into()?;
+
+        let mut vec = KVec::new();
+        let size = (data.stride * data.num_binds) as usize;
+        let reader = UserSlice::new(data.userptr as UserPtr, size).reader();
+        reader.read_all(&mut vec, GFP_KERNEL)?;
+        let mut reader = Reader::new(&vec);
+
+        for _i in 0..data.num_binds {
+            let bind: uapi::drm_asahi_gem_bind_op = reader.read_up_to(data.stride as usize)?;
+            Self::do_gem_bind_unbind(vm_id, &bind, file)?;
+        }
+
+        Ok(0)
+    }
+
+    pub(crate) fn do_gem_bind_unbind(
+        vm_id: usize,
+        data: &uapi::drm_asahi_gem_bind_op,
+        file: &DrmFile,
+    ) -> Result<u32> {
+        if (data.flags & uapi::drm_asahi_bind_flags_DRM_ASAHI_BIND_UNBIND) != 0 {
+            Self::do_gem_unbind(vm_id, data, file)
+        } else {
+            Self::do_gem_bind(vm_id, data, file)
+        }
+    }
+
+    pub(crate) fn do_gem_bind(
+        vm_id: usize,
+        data: &uapi::drm_asahi_gem_bind_op,
+        file: &DrmFile,
+    ) -> Result<u32> {
+        if (data.addr | data.range | data.offset) as usize & mmu::UAT_PGMSK != 0 {
+            cls_pr_debug!(
+                Errors,
+                "gem_bind: Addr/range/offset not page aligned: {:#x} {:#x}\n",
+                data.addr,
+                data.range
+            );
+            return Err(EINVAL); // Must be page aligned
+        }
+
+        if (data.flags
+            & !(uapi::drm_asahi_bind_flags_DRM_ASAHI_BIND_READ
+                | uapi::drm_asahi_bind_flags_DRM_ASAHI_BIND_WRITE
+                | uapi::drm_asahi_bind_flags_DRM_ASAHI_BIND_SINGLE_PAGE))
+            != 0
+        {
+            cls_pr_debug!(Errors, "gem_bind: Invalid flags {:#x}\n", data.flags);
+            return Err(EINVAL);
+        }
+
+        let single_page = data.flags & uapi::drm_asahi_bind_flags_DRM_ASAHI_BIND_SINGLE_PAGE != 0;
+
+        let bo = gem::lookup_handle(file, data.handle)?;
+
+        let start = data.addr;
+        let end = data.addr.checked_add(data.range).ok_or(EINVAL)?;
+        let range = start..end;
+
+        let bo_accessed_size = if single_page {
+            mmu::UAT_PGMSK as u64
+        } else {
+            data.range
+        };
+        let end_off = data.offset.checked_add(bo_accessed_size).ok_or(EINVAL)?;
+        if end_off as usize > bo.size() {
+            return Err(EINVAL);
+        }
+
+        if !VM_USER_RANGE.is_superset(range.clone()) {
+            cls_pr_debug!(
+                Errors,
+                "gem_bind: Invalid map range {:#x}..{:#x} (not contained in user range)\n",
+                start,
+                end
+            );
+            return Err(EINVAL); // Invalid map range
+        }
+
+        let prot = if data.flags & uapi::drm_asahi_bind_flags_DRM_ASAHI_BIND_READ != 0 {
+            if data.flags & uapi::drm_asahi_bind_flags_DRM_ASAHI_BIND_WRITE != 0 {
+                mmu::PROT_GPU_SHARED_RW
+            } else {
+                mmu::PROT_GPU_SHARED_RO
+            }
+        } else if data.flags & uapi::drm_asahi_bind_flags_DRM_ASAHI_BIND_WRITE != 0 {
+            mmu::PROT_GPU_SHARED_WO
+        } else {
+            cls_pr_debug!(
+                Errors,
+                "gem_bind: Must specify read or write (flags: {:#x})\n",
+                data.flags
+            );
+            return Err(EINVAL); // Must specify one of DRM_ASAHI_BIND_{READ,WRITE}
+        };
+
+        let guard = file.inner().vms().get(vm_id).ok_or(ENOENT)?;
+
+        // Clone it immediately so we aren't holding the XArray lock
+        let vm = guard.borrow().vm.clone();
+        let kernel_range = guard.borrow().kernel_range.clone();
+        core::mem::drop(guard);
+
+        if kernel_range.overlaps(range) {
+            cls_pr_debug!(
+                Errors,
+                "gem_bind: Invalid map range {:#x}..{:#x} (intrudes in kernel range)\n",
+                start,
+                end
+            );
+            return Err(EINVAL);
+        }
+
+        vm.bind_object(
+            &bo.gem,
+            data.addr,
+            data.range,
+            data.offset,
+            prot,
+            single_page,
+        )?;
+
+        Ok(0)
+    }
+
+    pub(crate) fn do_gem_unbind(
+        vm_id: usize,
+        data: &uapi::drm_asahi_gem_bind_op,
+        file: &DrmFile,
+    ) -> Result<u32> {
+        if data.offset != 0
+            || data.flags != uapi::drm_asahi_bind_flags_DRM_ASAHI_BIND_UNBIND
+            || data.handle != 0
+        {
+            cls_pr_debug!(Errors, "gem_unbind: offset/flags/handle not zero\n");
+            return Err(EINVAL);
+        }
+
+        if (data.addr | data.range) as usize & mmu::UAT_PGMSK != 0 {
+            cls_pr_debug!(
+                Errors,
+                "gem_bind: Addr/range/offset not page aligned: {:#x} {:#x}\n",
+                data.addr,
+                data.range
+            );
+            return Err(EINVAL); // Must be page aligned
+        }
+
+        let start = data.addr;
+        let end = data.addr.checked_add(data.range).ok_or(EINVAL)?;
+        let range = start..end;
+
+        if !VM_USER_RANGE.is_superset(range.clone()) {
+            cls_pr_debug!(
+                Errors,
+                "gem_bind: Invalid unmap range {:#x}..{:#x} (not contained in user range)\n",
+                start,
+                end
+            );
+            return Err(EINVAL); // Invalid map range
+        }
+
+        let guard = file.inner().vms().get(vm_id).ok_or(ENOENT)?;
+
+        // Clone it immediately so we aren't holding the XArray lock
+        let vm = guard.borrow().vm.clone();
+        let kernel_range = guard.borrow().kernel_range.clone();
+        core::mem::drop(guard);
+
+        if kernel_range.overlaps(range.clone()) {
+            cls_pr_debug!(
+                Errors,
+                "gem_bind: Invalid unmap range {:#x}..{:#x} (intrudes in kernel range)\n",
+                start,
+                end
+            );
+            return Err(EINVAL);
+        }
+
+        vm.unmap_range(range.start, range.range())?;
+
+        Ok(0)
+    }
+
+    pub(crate) fn unbind_gem_object(file: &DrmFile, bo: &gem::Object) -> Result {
+        let mut index = 0;
+        loop {
+            let item = file
+                .inner()
+                .vms()
+                .find(index, xarray::XArray::<KBox<Vm>>::MAX);
+            match item {
+                Some((idx, file_vm)) => {
+                    // Clone since we can't hold the xarray spinlock while
+                    // calling drop_mappings()
+                    let vm = file_vm.borrow().vm.clone();
+                    core::mem::drop(file_vm);
+                    vm.drop_mappings(bo)?;
+                    if idx == xarray::XArray::<KBox<Vm>>::MAX {
+                        break;
+                    }
+                    index = idx + 1;
+                }
+                None => break,
+            }
+        }
+        Ok(())
+    }
+
+    /// IOCTL: gem_bind_object: Map or unmap a GEM object as a special object.
+    pub(crate) fn gem_bind_object(
+        device: &AsahiDevice,
+        dev_data: <Self as drm::file::DriverFile>::BorrowedData<'_>,
+        data: &mut uapi::drm_asahi_gem_bind_object,
+        file: &DrmFile,
+    ) -> Result<u32> {
+        mod_dev_dbg!(
+            device,
+            "[File {} VM {}]: IOCTL: gem_bind_object op={:?} handle={:#x?} flags={:#x?} {:#x?}:{:#x?} object_handle={:#x?}\n",
+            file.inner().id,
+            data.vm_id,
+            data.op,
+            data.handle,
+            data.flags,
+            data.offset,
+            data.range,
+            data.object_handle
+        );
+
+        if data.pad != 0 {
+            cls_pr_debug!(Errors, "gem_bind_object: Unexpected pad\n");
+            return Err(EINVAL);
+        }
+
+        if data.vm_id != 0 {
+            cls_pr_debug!(Errors, "gem_bind_object: Unexpected vm_id\n");
+            return Err(EINVAL);
+        }
+
+        match data.op {
+            uapi::drm_asahi_bind_object_op_DRM_ASAHI_BIND_OBJECT_OP_BIND => {
+                Self::do_gem_bind_object(device, dev_data, data, file)
+            }
+            uapi::drm_asahi_bind_object_op_DRM_ASAHI_BIND_OBJECT_OP_UNBIND => {
+                Self::do_gem_unbind_object(device, dev_data, data, file)
+            }
+            _ => {
+                cls_pr_debug!(Errors, "gem_bind_object: Invalid op {}\n", data.op);
+                Err(EINVAL)
+            }
+        }
+    }
+
+    pub(crate) fn do_gem_bind_object(
+        _device: &AsahiDevice,
+        dev_data: <Self as drm::file::DriverFile>::BorrowedData<'_>,
+        data: &mut uapi::drm_asahi_gem_bind_object,
+        file: &DrmFile,
+    ) -> Result<u32> {
+        if (data.range | data.offset) as usize & mmu::UAT_PGMSK != 0 {
+            cls_pr_debug!(
+                Errors,
+                "gem_bind_object: Range/offset not page aligned: {:#x} {:#x}\n",
+                data.range,
+                data.offset
+            );
+            return Err(EINVAL); // Must be page aligned
+        }
+
+        if data.flags != uapi::drm_asahi_bind_object_flags_DRM_ASAHI_BIND_OBJECT_USAGE_TIMESTAMPS {
+            cls_pr_debug!(Errors, "gem_bind_object: Invalid flags {:#x}\n", data.flags);
+            return Err(EINVAL);
+        }
+
+        let offset = data.offset.try_into()?;
+        let end_offset = data
+            .offset
+            .checked_add(data.range)
+            .ok_or(EINVAL)?
+            .try_into()?;
+        let bo = gem::lookup_handle(file, data.handle)?;
+
+        let mapping = Arc::new(
+            dev_data.gpu.map_timestamp_buffer(bo, offset..end_offset)?,
+            GFP_KERNEL,
+        )?;
+        let obj = KBox::new(Object::TimestampBuffer(mapping), GFP_KERNEL)?;
+        let handle = file.inner().objects().alloc(obj)? as u64;
+
+        data.object_handle = handle as u32;
+        Ok(0)
+    }
+
+    pub(crate) fn do_gem_unbind_object(
+        _device: &AsahiDevice,
+        _dev_data: <Self as drm::file::DriverFile>::BorrowedData<'_>,
+        data: &mut uapi::drm_asahi_gem_bind_object,
+        file: &DrmFile,
+    ) -> Result<u32> {
+        if data.range != 0 || data.offset != 0 {
+            cls_pr_debug!(
+                Errors,
+                "gem_unbind_object: Range/offset not zero: {:#x} {:#x}\n",
+                data.range,
+                data.offset
+            );
+            return Err(EINVAL);
+        }
+
+        if data.flags != 0 {
+            cls_pr_debug!(
+                Errors,
+                "gem_unbind_object: Invalid flags {:#x}\n",
+                data.flags
+            );
+            return Err(EINVAL);
+        }
+
+        if data.handle != 0 {
+            cls_pr_debug!(
+                Errors,
+                "gem_unbind_object: Invalid handle {}\n",
+                data.handle
+            );
+            return Err(EINVAL);
+        }
+
+        if file
+            .inner()
+            .objects()
+            .remove(data.object_handle as usize)
+            .is_none()
+        {
+            Err(ENOENT)
+        } else {
+            Ok(0)
+        }
+    }
+
+    /// IOCTL: queue_create: Create a new command submission queue of a given type.
+    pub(crate) fn queue_create(
+        device: &AsahiDevice,
+        dev_data: <Self as drm::file::DriverFile>::BorrowedData<'_>,
+        data: &mut uapi::drm_asahi_queue_create,
+        file: &DrmFile,
+    ) -> Result<u32> {
+        let file_id = file.inner().id;
+
+        mod_dev_dbg!(
+            device,
+            "[File {} VM {}]: Creating queue prio={:?} flags={:#x?}\n",
+            file_id,
+            data.vm_id,
+            data.priority,
+            data.flags,
+        );
+
+        if data.flags != 0 || data.priority > uapi::drm_asahi_priority_DRM_ASAHI_PRIORITY_REALTIME {
+            cls_pr_debug!(Errors, "queue_create: Invalid arguments\n");
+            return Err(EINVAL);
+        }
+
+        // TODO: Allow with CAP_SYS_NICE
+        if data.priority >= uapi::drm_asahi_priority_DRM_ASAHI_PRIORITY_HIGH {
+            cls_pr_debug!(Errors, "queue_create: Invalid priority\n");
+            return Err(EINVAL);
+        }
+
+        let resv = file.inner().queues().reserve()?;
+        let file_vm = file
+            .inner()
+            .vms()
+            .get(data.vm_id.try_into()?)
+            .ok_or(ENOENT)?;
+        let vm = file_vm.borrow().vm.clone();
+        let ualloc = file_vm.borrow().ualloc.clone();
+        let ualloc_priv = file_vm.borrow().ualloc_priv.clone();
+        // Drop the vms lock eagerly
+        core::mem::drop(file_vm);
+
+        let queue = dev_data.gpu.new_queue(
+            vm,
+            ualloc,
+            ualloc_priv,
+            // TODO: Plumb deeper the enum
+            uapi::drm_asahi_priority_DRM_ASAHI_PRIORITY_REALTIME - data.priority,
+            data.usc_exec_base,
+        )?;
+
+        data.queue_id = resv.index().try_into()?;
+        resv.store(Arc::pin_init(Mutex::new(queue), GFP_KERNEL)?)?;
+
+        Ok(0)
+    }
+
+    /// IOCTL: queue_destroy: Destroy a command submission queue.
+    pub(crate) fn queue_destroy(
+        _device: &AsahiDevice,
+        _dev_data: <Self as drm::file::DriverFile>::BorrowedData<'_>,
+        data: &mut uapi::drm_asahi_queue_destroy,
+        file: &DrmFile,
+    ) -> Result<u32> {
+        if file
+            .inner()
+            .queues()
+            .remove(data.queue_id as usize)
+            .is_none()
+        {
+            Err(ENOENT)
+        } else {
+            Ok(0)
+        }
+    }
+
+    /// IOCTL: submit: Submit GPU work to a command submission queue.
+    pub(crate) fn submit(
+        device: &AsahiDevice,
+        dev_data: <Self as drm::file::DriverFile>::BorrowedData<'_>,
+        data: &mut uapi::drm_asahi_submit,
+        file: &DrmFile,
+    ) -> Result<u32> {
+        debug::update_debug_flags();
+
+        if data.flags != 0 || data.pad != 0 {
+            cls_pr_debug!(Errors, "submit: Invalid arguments\n");
+            return Err(EINVAL);
+        }
+
+        let gpu = &dev_data.gpu;
+        gpu.update_globals();
+
+        // Upgrade to Arc<T> to drop the XArray lock early
+        let queue: Arc<Mutex<KBox<dyn queue::Queue>>> = file
+            .inner()
+            .queues()
+            .get(data.queue_id.try_into()?)
+            .ok_or(ENOENT)?
+            .borrow()
+            .into();
+
+        let id = gpu.ids().submission.next();
+        mod_dev_dbg!(
+            device,
+            "[File {} Queue {}]: IOCTL: submit (submission ID: {})\n",
+            file.inner().id,
+            data.queue_id,
+            id
+        );
+
+        mod_dev_dbg!(
+            device,
+            "[File {} Queue {}]: IOCTL: submit({}): Parsing syncs\n",
+            file.inner().id,
+            data.queue_id,
+            id
+        );
+        let syncs =
+            SyncItem::parse_array(file, data.syncs, data.in_sync_count, data.out_sync_count)?;
+
+        mod_dev_dbg!(
+            device,
+            "[File {} Queue {}]: IOCTL: submit({}): Parsing commands\n",
+            file.inner().id,
+            data.queue_id,
+            id
+        );
+
+        let mut vec = KVec::new();
+
+        // Copy the command buffer into the kernel. Because we need to iterate
+        // the command buffer twice, we do this in one big copy_from_user to
+        // avoid TOCTOU issues.
+        let reader = UserSlice::new(data.cmdbuf as UserPtr, data.cmdbuf_size as usize).reader();
+        reader.read_all(&mut vec, GFP_KERNEL)?;
+
+        let objects = file.inner().objects();
+        let ret = queue
+            .lock()
+            .submit(id, syncs, data.in_sync_count as usize, &vec, objects);
+
+        match ret {
+            Err(ERESTARTSYS) => Err(ERESTARTSYS),
+            Err(e) => {
+                dev_info!(
+                    device.as_ref(),
+                    "[File {} Queue {}]: IOCTL: submit failed! (submission ID: {} err: {:?})\n",
+                    file.inner().id,
+                    data.queue_id,
+                    id,
+                    e
+                );
+                Err(e)
+            }
+            Ok(()) => Ok(0),
+        }
+    }
+
+    /// IOCTL: get_time: Get the current GPU timer value.
+    pub(crate) fn get_time(
+        _device: &AsahiDevice,
+        dev_data: <Self as drm::file::DriverFile>::BorrowedData<'_>,
+        data: &mut uapi::drm_asahi_get_time,
+        _file: &DrmFile,
+    ) -> Result<u32> {
+        if data.flags != 0 {
+            cls_pr_debug!(Errors, "get_time: Unexpected flags\n");
+            return Err(EINVAL);
+        }
+
+        // TODO: Do this on device-init for perf.
+        let gpu = &dev_data.gpu;
+        let frequency_hz = gpu.get_cfg().base_clock_hz as u64;
+        let ts_gcd = gcd(frequency_hz, NSEC_PER_SEC as u64);
+
+        let num = (NSEC_PER_SEC as u64) / ts_gcd;
+        let den = frequency_hz / ts_gcd;
+
+        let raw: u64;
+
+        // SAFETY: Assembly only loads the timer
+        unsafe {
+            core::arch::asm!(
+                "mrs {x}, CNTPCT_EL0",
+                x = out(reg) raw
+            );
+        }
+
+        data.gpu_timestamp = (raw * num) / den;
+
+        Ok(0)
+    }
+}
+
+impl Drop for File {
+    fn drop(&mut self) {
+        mod_pr_debug!("[File {}]: Closing...\n", self.id);
+    }
+}
diff --git a/drivers/gpu/drm/asahi/float.rs b/drivers/gpu/drm/asahi/float.rs
new file mode 100644
index 00000000000000..6feddd399ad9e6
--- /dev/null
+++ b/drivers/gpu/drm/asahi/float.rs
@@ -0,0 +1,388 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+//! Basic soft floating-point support
+//!
+//! The GPU firmware requires a large number of power-related configuration values, many of which
+//! are IEEE 754 32-bit floating point values. These values change not only between GPU/SoC
+//! variants, but also between specific hardware platforms using these SoCs, so they must be
+//! derived from device tree properties. There are many redundant values computed from the same
+//! inputs with simple add/sub/mul/div calculations, plus a few values that are actually specific
+//! to each individual device depending on its binning and fused voltage configuration, so it
+//! doesn't make sense to store the final values to be passed to the firmware in the device tree.
+//!
+//! Therefore, we need a way to perform floating-point calculations in the kernel.
+//!
+//! Using the actual FPU from kernel mode is asking for trouble, since there is no way to bound
+//! the execution of FPU instructions to a controlled section of code without outright putting it
+//! in its own compilation unit, which is quite painful for Rust. Since these calculations only
+//! have to happen at initialization time and there is no need for performance, let's use a simple
+//! software float implementation instead.
+//!
+//! This implementation makes no attempt to be fully IEEE754 compliant, but it's good enough and
+//! gives bit-identical results to macOS in the vast majority of cases, with one or two exceptions
+//! related to slightly non-compliant rounding.
+
+use core::ops;
+use kernel::{init::Zeroable, of, prelude::*};
+
+/// An IEEE754-compatible floating point number implemented in software.
+#[derive(Default, Debug, Copy, Clone)]
+#[repr(transparent)]
+pub(crate) struct F32(u32);
+
+// SAFETY: F32 is a transparent repr of `u32` and therefore zeroable
+unsafe impl Zeroable for F32 {}
+
+#[derive(Default, Debug, Copy, Clone)]
+struct F32U {
+    sign: bool,
+    exp: i32,
+    frac: i64,
+}
+
+impl F32 {
+    /// Convert a raw 32-bit representation into an F32
+    pub(crate) const fn from_bits(u: u32) -> F32 {
+        F32(u)
+    }
+
+    // Convert a `f32` value into an F32
+    //
+    // This must ONLY be used in const context. Use the `f32!{}` macro to do it safely.
+    #[doc(hidden)]
+    pub(crate) const fn from_f32(v: f32) -> F32 {
+        // Replace with to_bits() after kernel Rust minreq is >= 1.83.0
+        #[allow(clippy::transmute_float_to_int)]
+        // SAFETY: Transmuting f32 to u32 is always safe
+        F32(unsafe { core::mem::transmute::<f32, u32>(v) })
+    }
+
+    // Convert an F32 into a `f32` value
+    //
+    // For testing only.
+    #[doc(hidden)]
+    #[cfg(test)]
+    pub(crate) fn to_f32(self) -> f32 {
+        f32::from_bits(self.0)
+    }
+
+    const fn unpack(&self) -> F32U {
+        F32U {
+            sign: self.0 & (1 << 31) != 0,
+            exp: ((self.0 >> 23) & 0xff) as i32 - 127,
+            frac: (((self.0 & 0x7fffff) | 0x800000) as i64) << 9,
+        }
+        .norm()
+    }
+}
+
+/// Safely construct an `F32` out of a constant floating-point value.
+///
+/// This ensures that the conversion happens in const context, so no floating point operations are
+/// emitted.
+#[macro_export]
+macro_rules! f32 {
+    ([$($val:expr),*]) => {{
+        [$(f32!($val)),*]
+    }};
+    ($val:expr) => {{
+        const _K: $crate::float::F32 = $crate::float::F32::from_f32($val);
+        _K
+    }};
+}
+
+impl ops::Neg for F32 {
+    type Output = F32;
+
+    fn neg(self) -> F32 {
+        F32(self.0 ^ (1 << 31))
+    }
+}
+
+impl ops::Add<F32> for F32 {
+    type Output = F32;
+
+    fn add(self, rhs: F32) -> F32 {
+        self.unpack().add(rhs.unpack()).pack()
+    }
+}
+
+impl ops::Sub<F32> for F32 {
+    type Output = F32;
+
+    fn sub(self, rhs: F32) -> F32 {
+        self.unpack().add((-rhs).unpack()).pack()
+    }
+}
+
+impl ops::Mul<F32> for F32 {
+    type Output = F32;
+
+    fn mul(self, rhs: F32) -> F32 {
+        self.unpack().mul(rhs.unpack()).pack()
+    }
+}
+
+impl ops::Div<F32> for F32 {
+    type Output = F32;
+
+    fn div(self, rhs: F32) -> F32 {
+        self.unpack().div(rhs.unpack()).pack()
+    }
+}
+
+macro_rules! from_ints {
+    ($u:ty, $i:ty) => {
+        impl From<$i> for F32 {
+            fn from(v: $i) -> F32 {
+                F32U::from_i64(v as i64).pack()
+            }
+        }
+        impl From<$u> for F32 {
+            fn from(v: $u) -> F32 {
+                F32U::from_u64(v as u64).pack()
+            }
+        }
+    };
+}
+
+from_ints!(u8, i8);
+from_ints!(u16, i16);
+from_ints!(u32, i32);
+from_ints!(u64, i64);
+
+impl F32U {
+    const INFINITY: F32U = f32!(f32::INFINITY).unpack();
+    const NEG_INFINITY: F32U = f32!(f32::NEG_INFINITY).unpack();
+
+    fn from_i64(v: i64) -> F32U {
+        F32U {
+            sign: v < 0,
+            exp: 32,
+            frac: v.abs(),
+        }
+        .norm()
+    }
+
+    fn from_u64(mut v: u64) -> F32U {
+        let mut exp = 32;
+        if v >= (1 << 63) {
+            exp = 31;
+            v >>= 1;
+        }
+        F32U {
+            sign: false,
+            exp,
+            frac: v as i64,
+        }
+        .norm()
+    }
+
+    fn shr(&mut self, shift: i32) {
+        if shift > 63 {
+            self.exp = 0;
+            self.frac = 0;
+        } else {
+            self.frac >>= shift;
+        }
+    }
+
+    fn align(a: &mut F32U, b: &mut F32U) {
+        if a.exp > b.exp {
+            b.shr(a.exp - b.exp);
+            b.exp = a.exp;
+        } else {
+            a.shr(b.exp - a.exp);
+            a.exp = b.exp;
+        }
+    }
+
+    fn mul(self, other: F32U) -> F32U {
+        F32U {
+            sign: self.sign != other.sign,
+            exp: self.exp + other.exp,
+            frac: ((self.frac >> 8) * (other.frac >> 8)) >> 16,
+        }
+    }
+
+    fn div(self, other: F32U) -> F32U {
+        if other.frac == 0 || self.is_inf() {
+            if self.sign {
+                F32U::NEG_INFINITY
+            } else {
+                F32U::INFINITY
+            }
+        } else {
+            F32U {
+                sign: self.sign != other.sign,
+                exp: self.exp - other.exp,
+                frac: ((self.frac << 24) / (other.frac >> 8)),
+            }
+        }
+    }
+
+    fn add(mut self, mut other: F32U) -> F32U {
+        F32U::align(&mut self, &mut other);
+        if self.sign == other.sign {
+            self.frac += other.frac;
+        } else {
+            self.frac -= other.frac;
+        }
+        if self.frac < 0 {
+            self.sign = !self.sign;
+            self.frac = -self.frac;
+        }
+        self
+    }
+
+    const fn norm(mut self) -> F32U {
+        let lz = self.frac.leading_zeros() as i32;
+        if lz > 31 {
+            self.frac <<= lz - 31;
+            self.exp -= lz - 31;
+        } else if lz < 31 {
+            self.frac >>= 31 - lz;
+            self.exp += 31 - lz;
+        }
+
+        if self.is_zero() {
+            return F32U {
+                sign: self.sign,
+                frac: 0,
+                exp: 0,
+            };
+        }
+        self
+    }
+
+    const fn is_zero(&self) -> bool {
+        self.frac == 0 || self.exp < -126
+    }
+
+    const fn is_inf(&self) -> bool {
+        self.exp > 127
+    }
+
+    const fn pack(mut self) -> F32 {
+        self = self.norm();
+        if !self.is_zero() {
+            self.frac += 0x100;
+            self = self.norm();
+        }
+
+        if self.is_inf() {
+            if self.sign {
+                return f32!(f32::NEG_INFINITY);
+            } else {
+                return f32!(f32::INFINITY);
+            }
+        } else if self.is_zero() {
+            if self.sign {
+                return f32!(-0.0);
+            } else {
+                return f32!(0.0);
+            }
+        }
+
+        F32(if self.sign { 1u32 << 31 } else { 0u32 }
+            | ((self.exp + 127) as u32) << 23
+            | ((self.frac >> 9) & 0x7fffff) as u32)
+    }
+}
+
+impl<'a> TryFrom<of::Property<'a>> for F32 {
+    type Error = Error;
+
+    fn try_from(p: of::Property<'_>) -> core::result::Result<F32, Self::Error> {
+        let bits: u32 = p.try_into()?;
+        Ok(F32::from_bits(bits))
+    }
+}
+
+impl of::PropertyUnit for F32 {
+    const UNIT_SIZE: usize = 4;
+
+    fn from_bytes(data: &[u8]) -> Result<Self> {
+        Ok(F32::from_bits(<u32 as of::PropertyUnit>::from_bytes(data)?))
+    }
+}
+
+// TODO: Make this an actual test and figure out how to make it run.
+#[cfg(test)]
+mod tests {
+    #[test]
+    fn test_all() {
+        fn add(a: f32, b: f32) {
+            println!(
+                "{} + {} = {} {}",
+                a,
+                b,
+                (F32::from_f32(a) + F32::from_f32(b)).to_f32(),
+                a + b
+            );
+        }
+        fn sub(a: f32, b: f32) {
+            println!(
+                "{} - {} = {} {}",
+                a,
+                b,
+                (F32::from_f32(a) - F32::from_f32(b)).to_f32(),
+                a - b
+            );
+        }
+        fn mul(a: f32, b: f32) {
+            println!(
+                "{} * {} = {} {}",
+                a,
+                b,
+                (F32::from_f32(a) * F32::from_f32(b)).to_f32(),
+                a * b
+            );
+        }
+        fn div(a: f32, b: f32) {
+            println!(
+                "{} / {} = {} {}",
+                a,
+                b,
+                (F32::from_f32(a) / F32::from_f32(b)).to_f32(),
+                a / b
+            );
+        }
+
+        fn test(a: f32, b: f32) {
+            add(a, b);
+            sub(a, b);
+            mul(a, b);
+            div(a, b);
+        }
+
+        test(1.123, 7.567);
+        test(1.123, 1.456);
+        test(7.567, 1.123);
+        test(1.123, -7.567);
+        test(1.123, -1.456);
+        test(7.567, -1.123);
+        test(-1.123, -7.567);
+        test(-1.123, -1.456);
+        test(-7.567, -1.123);
+        test(1000.123, 0.001);
+        test(1000.123, 0.0000001);
+        test(0.0012, 1000.123);
+        test(0.0000001, 1000.123);
+        test(0., 0.);
+        test(0., 1.);
+        test(1., 0.);
+        test(1., 1.);
+        test(2., f32::INFINITY);
+        test(2., f32::NEG_INFINITY);
+        test(f32::INFINITY, 2.);
+        test(f32::NEG_INFINITY, 2.);
+        test(f32::NEG_INFINITY, 2.);
+        test(f32::MAX, 2.);
+        test(f32::MIN, 2.);
+        test(f32::MIN_POSITIVE, 2.);
+        test(2., f32::MAX);
+        test(2., f32::MIN);
+        test(2., f32::MIN_POSITIVE);
+    }
+}
diff --git a/drivers/gpu/drm/asahi/fw/buffer.rs b/drivers/gpu/drm/asahi/fw/buffer.rs
new file mode 100644
index 00000000000000..fafee8357a4fb2
--- /dev/null
+++ b/drivers/gpu/drm/asahi/fw/buffer.rs
@@ -0,0 +1,180 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+//! GPU tiled vertex buffer control firmware structures
+
+use super::types::*;
+use super::workqueue;
+use crate::{default_zeroed, no_debug, trivial_gpustruct};
+use kernel::sync::Arc;
+
+pub(crate) mod raw {
+    use super::*;
+
+    #[derive(Debug)]
+    #[repr(C)]
+    pub(crate) struct BlockControl {
+        pub(crate) total: AtomicU32,
+        pub(crate) wptr: AtomicU32,
+        pub(crate) unk: AtomicU32,
+        pub(crate) pad: Pad<0x34>,
+    }
+    default_zeroed!(BlockControl);
+
+    #[derive(Debug)]
+    #[repr(C)]
+    pub(crate) struct Counter {
+        pub(crate) count: AtomicU32,
+        __pad: Pad<0x3c>,
+    }
+    default_zeroed!(Counter);
+
+    #[derive(Debug, Default)]
+    #[repr(C)]
+    pub(crate) struct Stats {
+        pub(crate) max_pages: AtomicU32,
+        pub(crate) max_b: AtomicU32,
+        pub(crate) overflow_count: AtomicU32,
+        pub(crate) gpu_c: AtomicU32,
+        pub(crate) __pad0: Pad<0x10>,
+        pub(crate) reset: AtomicU32,
+        pub(crate) __pad1: Pad<0x1c>,
+    }
+
+    #[versions(AGX)]
+    #[derive(Debug)]
+    #[repr(C)]
+    pub(crate) struct Info<'a> {
+        pub(crate) gpu_counter: u32,
+        pub(crate) unk_4: u32,
+        pub(crate) last_id: i32,
+        pub(crate) cur_id: i32,
+        pub(crate) unk_10: u32,
+        pub(crate) gpu_counter2: u32,
+        pub(crate) unk_18: u32,
+
+        #[ver(V < V13_0B4 || G >= G14X)]
+        pub(crate) unk_1c: u32,
+
+        pub(crate) page_list: GpuPointer<'a, &'a [u32]>,
+        pub(crate) page_list_size: u32,
+        pub(crate) page_count: AtomicU32,
+        pub(crate) max_blocks: u32,
+        pub(crate) block_count: AtomicU32,
+        pub(crate) unk_38: u32,
+        pub(crate) block_list: GpuPointer<'a, &'a [u32]>,
+        pub(crate) block_ctl: GpuPointer<'a, super::BlockControl>,
+        pub(crate) last_page: AtomicU32,
+        pub(crate) gpu_page_ptr1: u32,
+        pub(crate) gpu_page_ptr2: u32,
+        pub(crate) unk_58: u32,
+        pub(crate) block_size: u32,
+        pub(crate) unk_60: U64,
+        pub(crate) counter: GpuPointer<'a, super::Counter>,
+        pub(crate) unk_70: u32,
+        pub(crate) unk_74: u32,
+        pub(crate) unk_78: u32,
+        pub(crate) unk_7c: u32,
+        pub(crate) unk_80: u32,
+        pub(crate) max_pages: u32,
+        pub(crate) max_pages_nomemless: u32,
+        pub(crate) unk_8c: u32,
+        pub(crate) unk_90: Array<0x30, u8>,
+    }
+
+    #[versions(AGX)]
+    #[derive(Debug)]
+    #[repr(C)]
+    pub(crate) struct Scene<'a> {
+        #[ver(G >= G14X)]
+        pub(crate) control_word: GpuPointer<'a, &'a [u32]>,
+        #[ver(G >= G14X)]
+        pub(crate) control_word2: GpuPointer<'a, &'a [u32]>,
+        pub(crate) pass_page_count: AtomicU32,
+        pub(crate) unk_4: u32,
+        pub(crate) unk_8: U64,
+        pub(crate) unk_10: U64,
+        pub(crate) user_buffer: GpuPointer<'a, &'a [u8]>,
+        pub(crate) unk_20: u32,
+        #[ver(V >= V13_3)]
+        pub(crate) unk_28: U64,
+        pub(crate) stats: GpuWeakPointer<super::Stats>,
+        pub(crate) total_page_count: AtomicU32,
+        #[ver(G < G14X)]
+        pub(crate) unk_30: U64, // pad
+        #[ver(G < G14X)]
+        pub(crate) unk_38: U64, // pad
+    }
+
+    #[versions(AGX)]
+    #[derive(Debug)]
+    #[repr(C)]
+    pub(crate) struct InitBuffer<'a> {
+        pub(crate) tag: workqueue::CommandType,
+        pub(crate) vm_slot: u32,
+        pub(crate) buffer_slot: u32,
+        pub(crate) unk_c: u32,
+        pub(crate) block_count: u32,
+        pub(crate) buffer: GpuPointer<'a, super::Info::ver>,
+        pub(crate) stamp_value: EventValue,
+    }
+}
+
+trivial_gpustruct!(BlockControl);
+trivial_gpustruct!(Counter);
+trivial_gpustruct!(Stats);
+
+#[versions(AGX)]
+#[derive(Debug)]
+pub(crate) struct Info {
+    pub(crate) block_ctl: GpuObject<BlockControl>,
+    pub(crate) counter: GpuObject<Counter>,
+    pub(crate) page_list: GpuArray<u32>,
+    pub(crate) block_list: GpuArray<u32>,
+}
+
+#[versions(AGX)]
+impl GpuStruct for Info::ver {
+    type Raw<'a> = raw::Info::ver<'a>;
+}
+
+pub(crate) struct ClusterBuffers {
+    pub(crate) tilemaps: GpuArray<u8>,
+    pub(crate) meta: GpuArray<u8>,
+}
+
+#[versions(AGX)]
+pub(crate) struct Scene {
+    pub(crate) user_buffer: GpuArray<u8>,
+    pub(crate) buffer: crate::buffer::Buffer::ver,
+    pub(crate) tvb_heapmeta: GpuArray<u8>,
+    pub(crate) tvb_tilemap: GpuArray<u8>,
+    pub(crate) tpc: Arc<GpuArray<u8>>,
+    pub(crate) clustering: Option<ClusterBuffers>,
+    pub(crate) preempt_buf: GpuArray<u8>,
+    #[ver(G >= G14X)]
+    pub(crate) control_word: GpuArray<u32>,
+}
+
+#[versions(AGX)]
+no_debug!(Scene::ver);
+
+#[versions(AGX)]
+impl GpuStruct for Scene::ver {
+    type Raw<'a> = raw::Scene::ver<'a>;
+}
+
+#[versions(AGX)]
+pub(crate) struct InitBuffer {
+    pub(crate) scene: Arc<crate::buffer::Scene::ver>,
+}
+
+#[versions(AGX)]
+no_debug!(InitBuffer::ver);
+
+#[versions(AGX)]
+impl workqueue::Command for InitBuffer::ver {}
+
+#[versions(AGX)]
+impl GpuStruct for InitBuffer::ver {
+    type Raw<'a> = raw::InitBuffer::ver<'a>;
+}
diff --git a/drivers/gpu/drm/asahi/fw/channels.rs b/drivers/gpu/drm/asahi/fw/channels.rs
new file mode 100644
index 00000000000000..c1a7ec82aad1e2
--- /dev/null
+++ b/drivers/gpu/drm/asahi/fw/channels.rs
@@ -0,0 +1,443 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+//! GPU communication channel firmware structures (ring buffers)
+
+use super::types::*;
+use crate::default_zeroed;
+use core::sync::atomic::Ordering;
+use kernel::static_assert;
+
+pub(crate) mod raw {
+    use super::*;
+
+    #[derive(Debug)]
+    #[repr(C)]
+    pub(crate) struct ChannelState<'a> {
+        pub(crate) read_ptr: AtomicU32,
+        __pad0: Pad<0x1c>,
+        pub(crate) write_ptr: AtomicU32,
+        __pad1: Pad<0xc>,
+        _p: PhantomData<&'a ()>,
+    }
+    default_zeroed!(<'a>, ChannelState<'a>);
+
+    #[derive(Debug)]
+    #[repr(C)]
+    pub(crate) struct FwCtlChannelState<'a> {
+        pub(crate) read_ptr: AtomicU32,
+        __pad0: Pad<0xc>,
+        pub(crate) write_ptr: AtomicU32,
+        __pad1: Pad<0xc>,
+        _p: PhantomData<&'a ()>,
+    }
+    default_zeroed!(<'a>, FwCtlChannelState<'a>);
+}
+
+pub(crate) trait RxChannelState: GpuStruct + Debug + Default
+where
+    for<'a> <Self as GpuStruct>::Raw<'a>: Default + Zeroable,
+{
+    const SUB_CHANNELS: usize;
+
+    fn wptr(raw: &Self::Raw<'_>, index: usize) -> u32;
+    fn set_rptr(raw: &Self::Raw<'_>, index: usize, rptr: u32);
+}
+
+#[derive(Debug, Default)]
+pub(crate) struct ChannelState {}
+
+impl GpuStruct for ChannelState {
+    type Raw<'a> = raw::ChannelState<'a>;
+}
+
+impl RxChannelState for ChannelState {
+    const SUB_CHANNELS: usize = 1;
+
+    fn wptr(raw: &Self::Raw<'_>, _index: usize) -> u32 {
+        raw.write_ptr.load(Ordering::Acquire)
+    }
+
+    fn set_rptr(raw: &Self::Raw<'_>, _index: usize, rptr: u32) {
+        raw.read_ptr.store(rptr, Ordering::Release);
+    }
+}
+
+#[derive(Debug, Default)]
+pub(crate) struct FwLogChannelState {}
+
+impl GpuStruct for FwLogChannelState {
+    type Raw<'a> = Array<6, raw::ChannelState<'a>>;
+}
+
+impl RxChannelState for FwLogChannelState {
+    const SUB_CHANNELS: usize = 6;
+
+    fn wptr(raw: &Self::Raw<'_>, index: usize) -> u32 {
+        raw[index].write_ptr.load(Ordering::Acquire)
+    }
+
+    fn set_rptr(raw: &Self::Raw<'_>, index: usize, rptr: u32) {
+        raw[index].read_ptr.store(rptr, Ordering::Release);
+    }
+}
+
+#[derive(Debug, Default)]
+pub(crate) struct FwCtlChannelState {}
+
+impl GpuStruct for FwCtlChannelState {
+    type Raw<'a> = raw::FwCtlChannelState<'a>;
+}
+
+pub(crate) trait TxChannelState: GpuStruct + Debug + Default {
+    fn rptr(raw: &Self::Raw<'_>) -> u32;
+    fn set_wptr(raw: &Self::Raw<'_>, wptr: u32);
+}
+
+impl TxChannelState for ChannelState {
+    fn rptr(raw: &Self::Raw<'_>) -> u32 {
+        raw.read_ptr.load(Ordering::Acquire)
+    }
+
+    fn set_wptr(raw: &Self::Raw<'_>, wptr: u32) {
+        raw.write_ptr.store(wptr, Ordering::Release);
+    }
+}
+
+impl TxChannelState for FwCtlChannelState {
+    fn rptr(raw: &Self::Raw<'_>) -> u32 {
+        raw.read_ptr.load(Ordering::Acquire)
+    }
+
+    fn set_wptr(raw: &Self::Raw<'_>, wptr: u32) {
+        raw.write_ptr.store(wptr, Ordering::Release);
+    }
+}
+
+#[derive(Debug, Copy, Clone, Default, PartialEq, Eq)]
+#[repr(u32)]
+pub(crate) enum PipeType {
+    #[default]
+    Vertex = 0,
+    Fragment = 1,
+    Compute = 2,
+}
+
+#[versions(AGX)]
+#[derive(Debug, Copy, Clone, Default)]
+#[repr(C)]
+pub(crate) struct RunWorkQueueMsg {
+    pub(crate) pipe_type: PipeType,
+    pub(crate) work_queue: Option<GpuWeakPointer<super::workqueue::QueueInfo::ver>>,
+    pub(crate) wptr: u32,
+    pub(crate) event_slot: u32,
+    pub(crate) is_new: bool,
+    #[ver(V >= V13_2 && G == G14)]
+    pub(crate) __pad: Pad<0x2b>,
+    #[ver(V < V13_2 || G != G14)]
+    pub(crate) __pad: Pad<0x1b>,
+}
+
+#[versions(AGX)]
+pub(crate) type PipeMsg = RunWorkQueueMsg::ver;
+
+#[versions(AGX)]
+pub(crate) const DEVICECONTROL_SZ: usize = {
+    #[ver(V < V13_2 || G != G14)]
+    {
+        0x2c
+    }
+    #[ver(V >= V13_2 && G == G14)]
+    {
+        0x3c
+    }
+};
+
+// TODO: clean up when arbitrary_enum_discriminant is stable
+// https://github.com/rust-lang/rust/issues/60553
+
+#[versions(AGX)]
+#[derive(Debug, Copy, Clone)]
+#[repr(C, u32)]
+#[allow(dead_code)]
+pub(crate) enum DeviceControlMsg {
+    Unk00(Array<DEVICECONTROL_SZ::ver, u8>),
+    Unk01(Array<DEVICECONTROL_SZ::ver, u8>),
+    Unk02(Array<DEVICECONTROL_SZ::ver, u8>),
+    Unk03(Array<DEVICECONTROL_SZ::ver, u8>),
+    Unk04(Array<DEVICECONTROL_SZ::ver, u8>),
+    Unk05(Array<DEVICECONTROL_SZ::ver, u8>),
+    Unk06(Array<DEVICECONTROL_SZ::ver, u8>),
+    Unk07(Array<DEVICECONTROL_SZ::ver, u8>),
+    Unk08(Array<DEVICECONTROL_SZ::ver, u8>),
+    Unk09(Array<DEVICECONTROL_SZ::ver, u8>),
+    Unk0a(Array<DEVICECONTROL_SZ::ver, u8>),
+    Unk0b(Array<DEVICECONTROL_SZ::ver, u8>),
+    Unk0c(Array<DEVICECONTROL_SZ::ver, u8>),
+    #[ver(V >= V13_3)]
+    Unk0d(Array<DEVICECONTROL_SZ::ver, u8>),
+    GrowTVBAck {
+        unk_4: u32,
+        buffer_slot: u32,
+        vm_slot: u32,
+        counter: u32,
+        subpipe: u32,
+        halt_count: U64,
+        __pad: Pad<{ DEVICECONTROL_SZ::ver - 0x1c }>,
+    },
+    RecoverChannel {
+        pipe_type: u32,
+        work_queue: GpuWeakPointer<super::workqueue::QueueInfo::ver>,
+        event_value: u32,
+        __pad: Pad<{ DEVICECONTROL_SZ::ver - 0x10 }>,
+    },
+    IdlePowerOff {
+        val: u32,
+        __pad: Pad<{ DEVICECONTROL_SZ::ver - 0x4 }>,
+    },
+    Unk10(Array<DEVICECONTROL_SZ::ver, u8>),
+    Unk11(Array<DEVICECONTROL_SZ::ver, u8>),
+    Unk12(Array<DEVICECONTROL_SZ::ver, u8>),
+    Unk13(Array<DEVICECONTROL_SZ::ver, u8>),
+    Unk14(Array<DEVICECONTROL_SZ::ver, u8>), // Init?
+    Unk15(Array<DEVICECONTROL_SZ::ver, u8>), // Enable something
+    Unk16(Array<DEVICECONTROL_SZ::ver, u8>), // Disable something
+    DestroyContext {
+        unk_4: u32,
+        ctx_23: u8,
+        #[ver(V < V13_3)]
+        __pad0: Pad<3>,
+        unk_c: U32,
+        unk_10: U32,
+        ctx_0: u8,
+        ctx_1: u8,
+        ctx_4: u8,
+        #[ver(V < V13_3)]
+        __pad1: Pad<1>,
+        #[ver(V < V13_3)]
+        unk_18: u32,
+        gpu_context: Option<GpuWeakPointer<super::workqueue::GpuContextData>>,
+        #[ver(V < V13_3)]
+        __pad2: Pad<{ DEVICECONTROL_SZ::ver - 0x20 }>,
+        #[ver(V >= V13_3)]
+        __pad2: Pad<{ DEVICECONTROL_SZ::ver - 0x18 }>,
+    },
+    Unk18(Array<DEVICECONTROL_SZ::ver, u8>),
+    Initialize(Pad<DEVICECONTROL_SZ::ver>), // Update RegionC
+}
+
+#[versions(AGX)]
+static_assert!(core::mem::size_of::<DeviceControlMsg::ver>() == 4 + DEVICECONTROL_SZ::ver);
+
+#[versions(AGX)]
+default_zeroed!(DeviceControlMsg::ver);
+
+#[derive(Copy, Clone, Default, Debug)]
+#[repr(C)]
+#[allow(dead_code)]
+pub(crate) struct FwCtlMsg {
+    pub(crate) addr: U64,
+    pub(crate) unk_8: u32,
+    pub(crate) slot: u32,
+    pub(crate) page_count: u16,
+    pub(crate) unk_12: u16,
+}
+
+pub(crate) const EVENT_SZ: usize = 0x34;
+
+#[derive(Debug, Copy, Clone, PartialEq, Eq)]
+#[repr(C, u32)]
+#[allow(dead_code)]
+pub(crate) enum ChannelErrorType {
+    MemoryError,
+    DMKill,
+    Aborted,
+    Unk3,
+    Unknown(u32),
+}
+
+#[derive(Debug, Copy, Clone)]
+#[repr(C, u32)]
+#[allow(dead_code)]
+pub(crate) enum EventMsg {
+    Fault,
+    Flag {
+        firing: [u32; 4],
+        unk_14: u16,
+    },
+    Unk2(Array<EVENT_SZ, u8>),
+    Unk3(Array<EVENT_SZ, u8>),
+    Timeout {
+        counter: u32,
+        unk_8: u32,
+        event_slot: i32,
+    },
+    Unk5(Array<EVENT_SZ, u8>),
+    Unk6(Array<EVENT_SZ, u8>),
+    GrowTVB {
+        vm_slot: u32,
+        buffer_slot: u32,
+        counter: u32,
+    },
+    ChannelError {
+        error_type: u32,
+        pipe_type: u32,
+        event_slot: u32,
+        event_value: u32,
+    },
+    // Max discriminant: 0x8
+}
+
+static_assert!(core::mem::size_of::<EventMsg>() == 4 + EVENT_SZ);
+
+pub(crate) const EVENT_MAX: u32 = 0x8;
+
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub(crate) union RawEventMsg {
+    pub(crate) raw: (u32, Array<EVENT_SZ, u8>),
+    pub(crate) msg: EventMsg,
+}
+
+default_zeroed!(RawEventMsg);
+
+#[derive(Debug, Copy, Clone, Default)]
+#[repr(C)]
+pub(crate) struct RawFwLogMsg {
+    pub(crate) msg_type: u32,
+    __pad0: u32,
+    pub(crate) msg_index: U64,
+    __pad1: Pad<0x28>,
+}
+
+#[derive(Debug, Copy, Clone, Default)]
+#[repr(C)]
+pub(crate) struct RawFwLogPayloadMsg {
+    pub(crate) msg_type: u32,
+    pub(crate) seq_no: u32,
+    pub(crate) timestamp: U64,
+    pub(crate) msg: Array<0xc8, u8>,
+}
+
+#[derive(Debug, Copy, Clone, Default)]
+#[repr(C)]
+pub(crate) struct RawKTraceMsg {
+    pub(crate) msg_type: u32,
+    pub(crate) timestamp: U64,
+    pub(crate) args: Array<4, U64>,
+    pub(crate) code: u8,
+    pub(crate) channel: u8,
+    __pad: Pad<1>,
+    pub(crate) thread: u8,
+    pub(crate) unk_flag: U64,
+}
+
+#[versions(AGX)]
+pub(crate) const STATS_SZ: usize = {
+    #[ver(V < V13_0B4)]
+    {
+        0x2c
+    }
+    #[ver(V >= V13_0B4)]
+    {
+        0x3c
+    }
+};
+
+#[versions(AGX)]
+#[derive(Debug, Copy, Clone)]
+#[repr(C, u32)]
+#[allow(dead_code)]
+pub(crate) enum StatsMsg {
+    Power {
+        // 0x00
+        __pad: Pad<0x18>,
+        power: U64,
+    },
+    Unk1(Array<{ STATS_SZ::ver }, u8>),
+    PowerOn {
+        // 0x02
+        off_time: U64,
+    },
+    PowerOff {
+        // 0x03
+        on_time: U64,
+    },
+    Utilization {
+        // 0x04
+        timestamp: U64,
+        util1: u32,
+        util2: u32,
+        util3: u32,
+        util4: u32,
+    },
+    Unk5(Array<{ STATS_SZ::ver }, u8>),
+    Unk6(Array<{ STATS_SZ::ver }, u8>),
+    Unk7(Array<{ STATS_SZ::ver }, u8>),
+    Unk8(Array<{ STATS_SZ::ver }, u8>),
+    AvgPower {
+        // 0x09
+        active_cs: U64,
+        unk2: u32,
+        unk3: u32,
+        unk4: u32,
+        avg_power: u32,
+    },
+    Temperature {
+        // 0x0a
+        __pad: Pad<0x8>,
+        raw_value: u32,
+        scale: u32,
+        tmin: u32,
+        tmax: u32,
+    },
+    PowerState {
+        // 0x0b
+        timestamp: U64,
+        last_busy_ts: U64,
+        active: u32,
+        poweroff: u32,
+        unk1: u32,
+        pstate: u32,
+        unk2: u32,
+        unk3: u32,
+    },
+    FwBusy {
+        // 0x0c
+        timestamp: U64,
+        busy: u32,
+    },
+    PState {
+        // 0x0d
+        __pad: Pad<0x8>,
+        ps_min: u32,
+        unk1: u32,
+        ps_max: u32,
+        unk2: u32,
+    },
+    TempSensor {
+        // 0x0e
+        __pad: Pad<0x4>,
+        sensor_id: u32,
+        raw_value: u32,
+        scale: u32,
+        tmin: u32,
+        tmax: u32,
+    }, // Max discriminant: 0xe
+}
+
+#[versions(AGX)]
+static_assert!(core::mem::size_of::<StatsMsg::ver>() == 4 + STATS_SZ::ver);
+
+#[versions(AGX)]
+pub(crate) const STATS_MAX: u32 = 0xe;
+
+#[versions(AGX)]
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub(crate) union RawStatsMsg {
+    pub(crate) raw: (u32, Array<{ STATS_SZ::ver }, u8>),
+    pub(crate) msg: StatsMsg::ver,
+}
+
+#[versions(AGX)]
+default_zeroed!(RawStatsMsg::ver);
diff --git a/drivers/gpu/drm/asahi/fw/compute.rs b/drivers/gpu/drm/asahi/fw/compute.rs
new file mode 100644
index 00000000000000..ae98dbbd09a964
--- /dev/null
+++ b/drivers/gpu/drm/asahi/fw/compute.rs
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+//! GPU compute job firmware structures
+
+use super::types::*;
+use super::{event, job, workqueue};
+use crate::{microseq, mmu};
+use kernel::sync::Arc;
+
+pub(crate) mod raw {
+    use super::*;
+
+    #[derive(Debug)]
+    #[repr(C)]
+    pub(crate) struct JobParameters1<'a> {
+        pub(crate) preempt_buf1: GpuPointer<'a, &'a [u8]>,
+        pub(crate) cdm_ctrl_stream_base: U64,
+        pub(crate) preempt_buf2: GpuPointer<'a, &'a [u8]>,
+        pub(crate) preempt_buf3: GpuPointer<'a, &'a [u8]>,
+        pub(crate) preempt_buf4: GpuPointer<'a, &'a [u8]>,
+        pub(crate) preempt_buf5: GpuPointer<'a, &'a [u8]>,
+        pub(crate) usc_exec_base_cp: U64,
+        pub(crate) unk_38: U64,
+        pub(crate) helper_program: u32,
+        pub(crate) unk_44: u32,
+        pub(crate) helper_arg: U64,
+        pub(crate) helper_cfg: u32,
+        pub(crate) unk_54: u32,
+        pub(crate) unk_58: u32,
+        pub(crate) unk_5c: u32,
+        pub(crate) iogpu_unk_40: u32,
+        pub(crate) __pad: Pad<0xfc>,
+    }
+
+    #[versions(AGX)]
+    #[derive(Debug)]
+    #[repr(C)]
+    pub(crate) struct JobParameters2<'a> {
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_0_0: u32,
+        pub(crate) unk_0: Array<0x24, u8>,
+        pub(crate) preempt_buf1: GpuPointer<'a, &'a [u8]>,
+        pub(crate) cdm_ctrl_stream_end: U64,
+        pub(crate) unk_34: Array<0x20, u8>,
+        pub(crate) unk_g14x: u32,
+        pub(crate) unk_58: u32,
+        #[ver(V < V13_0B4)]
+        pub(crate) unk_5c: u32,
+    }
+
+    #[versions(AGX)]
+    #[derive(Debug)]
+    #[repr(C)]
+    pub(crate) struct RunCompute<'a> {
+        pub(crate) tag: workqueue::CommandType,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) counter: U64,
+
+        pub(crate) unk_4: u32,
+        pub(crate) vm_slot: u32,
+        pub(crate) notifier: GpuPointer<'a, event::Notifier::ver>,
+        pub(crate) unk_pointee: u32,
+        #[ver(G < G14X)]
+        pub(crate) __pad0: Array<0x50, u8>,
+        #[ver(G < G14X)]
+        pub(crate) job_params1: JobParameters1<'a>,
+        #[ver(G >= G14X)]
+        pub(crate) registers: job::raw::RegisterArray,
+        pub(crate) __pad1: Array<0x20, u8>,
+        pub(crate) microsequence: GpuPointer<'a, &'a [u8]>,
+        pub(crate) microsequence_size: u32,
+        pub(crate) job_params2: JobParameters2::ver<'a>,
+        pub(crate) encoder_params: job::raw::EncoderParams,
+        pub(crate) meta: job::raw::JobMeta,
+        pub(crate) command_time: U64,
+        pub(crate) timestamp_pointers: job::raw::TimestampPointers<'a>,
+        pub(crate) user_timestamp_pointers: job::raw::TimestampPointers<'a>,
+        pub(crate) client_sequence: u8,
+        pub(crate) pad_2d1: Array<3, u8>,
+        pub(crate) unk_2d4: u32,
+        pub(crate) unk_2d8: u8,
+        #[ver(V >= V13_0B4)]
+        pub(crate) context_store_req: U64,
+        #[ver(V >= V13_0B4)]
+        pub(crate) context_store_compl: U64,
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_2e9: Array<0x14, u8>,
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_flag: U32,
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_pad: Array<0x10, u8>,
+    }
+}
+
+#[versions(AGX)]
+#[derive(Debug)]
+pub(crate) struct RunCompute {
+    pub(crate) notifier: Arc<GpuObject<event::Notifier::ver>>,
+    pub(crate) preempt_buf: GpuArray<u8>,
+    pub(crate) micro_seq: microseq::MicroSequence,
+    pub(crate) vm_bind: mmu::VmBind,
+    pub(crate) timestamps: Arc<GpuObject<job::JobTimestamps>>,
+    pub(crate) user_timestamps: job::UserTimestamps,
+}
+
+#[versions(AGX)]
+impl GpuStruct for RunCompute::ver {
+    type Raw<'a> = raw::RunCompute::ver<'a>;
+}
+
+#[versions(AGX)]
+impl workqueue::Command for RunCompute::ver {}
diff --git a/drivers/gpu/drm/asahi/fw/event.rs b/drivers/gpu/drm/asahi/fw/event.rs
new file mode 100644
index 00000000000000..66f78fa170ba77
--- /dev/null
+++ b/drivers/gpu/drm/asahi/fw/event.rs
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+//! GPU events control structures & stamps
+
+use super::types::*;
+use crate::{default_zeroed, trivial_gpustruct};
+use core::sync::atomic::Ordering;
+
+pub(crate) mod raw {
+    use super::*;
+
+    #[derive(Debug, Clone, Copy, Default)]
+    #[repr(C)]
+    pub(crate) struct LinkedListHead {
+        pub(crate) prev: Option<GpuWeakPointer<LinkedListHead>>,
+        pub(crate) next: Option<GpuWeakPointer<LinkedListHead>>,
+    }
+
+    #[derive(Debug, Clone, Copy)]
+    #[repr(C)]
+    pub(crate) struct NotifierList {
+        pub(crate) list_head: LinkedListHead,
+        pub(crate) unkptr_10: U64,
+    }
+    default_zeroed!(NotifierList);
+
+    #[versions(AGX)]
+    #[derive(Debug, Clone, Copy)]
+    #[repr(C)]
+    pub(crate) struct NotifierState {
+        unk_14: u32,
+        unk_18: U64,
+        unk_20: u32,
+        vm_slot: u32,
+        has_vtx: u32,
+        pstamp_vtx: Array<4, U64>,
+        has_frag: u32,
+        pstamp_frag: Array<4, U64>,
+        has_comp: u32,
+        pstamp_comp: Array<4, U64>,
+        #[ver(G >= G14 && V < V13_0B4)]
+        unk_98_g14_0: Array<0x14, u8>,
+        in_list: u32,
+        list_head: LinkedListHead,
+        #[ver(G >= G14 && V < V13_0B4)]
+        unk_a8_g14_0: Pad<4>,
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_buf: Array<0x8, u8>, // Init to all-ff
+    }
+
+    #[versions(AGX)]
+    impl Default for NotifierState::ver {
+        fn default() -> Self {
+            #[allow(unused_mut)]
+            // SAFETY: All bit patterns are valid for this type.
+            let mut s: Self = unsafe { core::mem::zeroed() };
+            #[ver(V >= V13_0B4)]
+            s.unk_buf = Array::new([0xff; 0x8]);
+            s
+        }
+    }
+
+    #[derive(Debug)]
+    #[repr(transparent)]
+    pub(crate) struct Threshold(AtomicU64);
+    default_zeroed!(Threshold);
+
+    impl Threshold {
+        pub(crate) fn increase(&self, amount: u32) {
+            // We could use fetch_add, but the non-LSE atomic
+            // sequence Rust produces confuses the hypervisor.
+            let v = self.0.load(Ordering::Relaxed);
+            self.0.store(v + (amount as u64), Ordering::Relaxed);
+        }
+    }
+
+    #[versions(AGX)]
+    #[derive(Debug)]
+    #[repr(C)]
+    pub(crate) struct Notifier<'a> {
+        pub(crate) threshold: GpuPointer<'a, super::Threshold>,
+        pub(crate) generation: AtomicU32,
+        pub(crate) cur_count: AtomicU32,
+        pub(crate) unk_10: AtomicU32,
+        pub(crate) state: NotifierState::ver,
+    }
+}
+
+trivial_gpustruct!(Threshold);
+trivial_gpustruct!(NotifierList);
+
+#[versions(AGX)]
+#[derive(Debug)]
+pub(crate) struct Notifier {
+    pub(crate) threshold: GpuObject<Threshold>,
+}
+
+#[versions(AGX)]
+impl GpuStruct for Notifier::ver {
+    type Raw<'a> = raw::Notifier::ver<'a>;
+}
diff --git a/drivers/gpu/drm/asahi/fw/fragment.rs b/drivers/gpu/drm/asahi/fw/fragment.rs
new file mode 100644
index 00000000000000..95373d8cc137b8
--- /dev/null
+++ b/drivers/gpu/drm/asahi/fw/fragment.rs
@@ -0,0 +1,287 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+//! GPU fragment job firmware structures
+
+use super::types::*;
+use super::{event, job, workqueue};
+use crate::{buffer, fw, microseq, mmu};
+use kernel::sync::Arc;
+
+pub(crate) mod raw {
+    use super::*;
+
+    #[derive(Debug, Clone, Copy)]
+    #[repr(C)]
+    pub(crate) struct BackgroundProgram {
+        pub(crate) rsrc_spec: U64,
+        pub(crate) address: U64,
+    }
+
+    #[derive(Debug, Clone, Copy, Default)]
+    #[repr(C)]
+    pub(crate) struct EotProgram {
+        pub(crate) unk_0: U64,
+        pub(crate) unk_8: u32,
+        pub(crate) rsrc_spec: u32,
+        pub(crate) unk_10: u32,
+        pub(crate) address: u32,
+        pub(crate) unk_18: u32,
+        pub(crate) unk_1c_padding: u32,
+    }
+
+    impl EotProgram {
+        pub(crate) fn new(rsrc_spec: u32, address: u32) -> EotProgram {
+            EotProgram {
+                rsrc_spec,
+                address,
+                ..Default::default()
+            }
+        }
+    }
+
+    #[derive(Debug)]
+    #[repr(C)]
+    pub(crate) struct ArrayAddr {
+        pub(crate) ptr: U64,
+        pub(crate) unk_padding: U64,
+    }
+
+    #[versions(AGX)]
+    #[derive(Debug, Clone, Copy)]
+    #[repr(C)]
+    pub(crate) struct AuxFBInfo {
+        pub(crate) isp_ctl: u32,
+        pub(crate) unk2: u32,
+        pub(crate) width: u32,
+        pub(crate) height: u32,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk3: U64,
+    }
+
+    #[versions(AGX)]
+    #[derive(Debug)]
+    #[repr(C)]
+    pub(crate) struct JobParameters1<'a> {
+        pub(crate) utile_config: u32,
+        pub(crate) unk_4: u32,
+        pub(crate) bg: BackgroundProgram,
+        pub(crate) ppp_multisamplectl: U64,
+        pub(crate) isp_scissor_base: U64,
+        pub(crate) isp_dbias_base: U64,
+        pub(crate) aux_fb_info: AuxFBInfo::ver,
+        pub(crate) isp_zls_pixels: U64,
+        pub(crate) isp_oclqry_base: U64,
+        pub(crate) zls_ctrl: U64,
+
+        #[ver(G >= G14)]
+        pub(crate) unk_58_g14_0: U64,
+        #[ver(G >= G14)]
+        pub(crate) unk_58_g14_8: U64,
+
+        pub(crate) z_load: U64,
+        pub(crate) z_store: U64,
+        pub(crate) s_load: U64,
+        pub(crate) s_store: U64,
+
+        #[ver(G >= G14)]
+        pub(crate) unk_68_g14_0: Array<0x20, u8>,
+
+        pub(crate) z_load_stride: U64,
+        pub(crate) z_store_stride: U64,
+        pub(crate) s_load_stride: U64,
+        pub(crate) s_store_stride: U64,
+        pub(crate) z_load_comp: U64,
+        pub(crate) z_load_comp_stride: U64,
+        pub(crate) z_store_comp: U64,
+        pub(crate) z_store_comp_stride: U64,
+        pub(crate) s_load_comp: U64,
+        pub(crate) s_load_comp_stride: U64,
+        pub(crate) s_store_comp: U64,
+        pub(crate) s_store_comp_stride: U64,
+        pub(crate) tvb_tilemap: GpuPointer<'a, &'a [u8]>,
+        pub(crate) tvb_layermeta: GpuPointer<'a, &'a [u8]>,
+        pub(crate) mtile_stride_dwords: U64,
+        pub(crate) tvb_heapmeta: GpuPointer<'a, &'a [u8]>,
+        pub(crate) tile_config: U64,
+        pub(crate) aux_fb: GpuPointer<'a, &'a [u8]>,
+        pub(crate) unk_108: Array<0x6, U64>,
+        pub(crate) usc_exec_base_isp: U64,
+        pub(crate) unk_140: U64,
+        pub(crate) helper_program: u32,
+        pub(crate) unk_14c: u32,
+        pub(crate) helper_arg: U64,
+        pub(crate) unk_158: U64,
+        pub(crate) unk_160: U64,
+
+        #[ver(G < G14)]
+        pub(crate) __pad: Pad<0x1d8>,
+        #[ver(G >= G14)]
+        pub(crate) __pad: Pad<0x1a8>,
+        #[ver(V < V13_0B4)]
+        pub(crate) __pad1: Pad<0x8>,
+    }
+
+    #[derive(Debug)]
+    #[repr(C)]
+    pub(crate) struct JobParameters2 {
+        pub(crate) eot_rsrc_spec: u32,
+        pub(crate) eot_usc: u32,
+        pub(crate) unk_8: u32,
+        pub(crate) unk_c: u32,
+        pub(crate) isp_merge_upper_x: F32,
+        pub(crate) isp_merge_upper_y: F32,
+        pub(crate) unk_18: U64,
+        pub(crate) utiles_per_mtile_y: u16,
+        pub(crate) utiles_per_mtile_x: u16,
+        pub(crate) unk_24: u32,
+        pub(crate) tile_counts: u32,
+        pub(crate) tib_blocks: u32,
+        pub(crate) isp_bgobjdepth: u32,
+        pub(crate) isp_bgobjvals: u32,
+        pub(crate) unk_38: u32,
+        pub(crate) unk_3c: u32,
+        pub(crate) helper_cfg: u32,
+        pub(crate) __pad: Pad<0xac>,
+    }
+
+    #[versions(AGX)]
+    #[derive(Debug)]
+    #[repr(C)]
+    pub(crate) struct JobParameters3 {
+        pub(crate) isp_dbias_base: ArrayAddr,
+        pub(crate) isp_scissor_base: ArrayAddr,
+        pub(crate) isp_oclqry_base: U64,
+        pub(crate) unk_118: U64,
+        pub(crate) unk_120: Array<0x25, U64>,
+        pub(crate) unk_partial_bg: BackgroundProgram,
+        pub(crate) unk_258: U64,
+        pub(crate) unk_260: U64,
+        pub(crate) unk_268: U64,
+        pub(crate) unk_270: U64,
+        pub(crate) partial_bg: BackgroundProgram,
+        pub(crate) zls_ctrl: U64,
+        pub(crate) unk_290: U64,
+        pub(crate) z_load: U64,
+        pub(crate) z_partial_stride: U64,
+        pub(crate) z_partial_comp_stride: U64,
+        pub(crate) z_store: U64,
+        pub(crate) z_partial: U64,
+        pub(crate) z_partial_comp: U64,
+        pub(crate) s_load: U64,
+        pub(crate) s_partial_stride: U64,
+        pub(crate) s_partial_comp_stride: U64,
+        pub(crate) s_store: U64,
+        pub(crate) s_partial: U64,
+        pub(crate) s_partial_comp: U64,
+        pub(crate) unk_2f8: Array<2, U64>,
+        pub(crate) tib_blocks: u32,
+        pub(crate) unk_30c: u32,
+        pub(crate) aux_fb_info: AuxFBInfo::ver,
+        pub(crate) tile_config: U64,
+        pub(crate) unk_328_padding: Array<0x8, u8>,
+        pub(crate) unk_partial_eot: EotProgram,
+        pub(crate) partial_eot: EotProgram,
+        pub(crate) isp_bgobjdepth: u32,
+        pub(crate) isp_bgobjvals: u32,
+        pub(crate) sample_size: u32,
+        pub(crate) unk_37c: u32,
+        pub(crate) unk_380: U64,
+        pub(crate) unk_388: U64,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_390_0: U64,
+
+        pub(crate) isp_zls_pixels: U64,
+    }
+
+    #[versions(AGX)]
+    #[derive(Debug)]
+    #[repr(C)]
+    pub(crate) struct RunFragment<'a> {
+        pub(crate) tag: workqueue::CommandType,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) counter: U64,
+
+        pub(crate) vm_slot: u32,
+        pub(crate) unk_8: u32,
+        pub(crate) microsequence: GpuPointer<'a, &'a [u8]>,
+        pub(crate) microsequence_size: u32,
+        pub(crate) notifier: GpuPointer<'a, event::Notifier::ver>,
+        pub(crate) buffer: GpuPointer<'a, fw::buffer::Info::ver>,
+        pub(crate) scene: GpuPointer<'a, fw::buffer::Scene::ver>,
+        pub(crate) unk_buffer_buf: GpuWeakPointer<[u8]>,
+        pub(crate) tvb_tilemap: GpuPointer<'a, &'a [u8]>,
+        pub(crate) ppp_multisamplectl: U64,
+        pub(crate) samples: u32,
+        pub(crate) tiles_per_mtile_y: u16,
+        pub(crate) tiles_per_mtile_x: u16,
+        pub(crate) unk_50: U64,
+        pub(crate) unk_58: U64,
+        pub(crate) isp_merge_upper_x: F32,
+        pub(crate) isp_merge_upper_y: F32,
+        pub(crate) unk_68: U64,
+        pub(crate) tile_count: U64,
+
+        #[ver(G < G14X)]
+        pub(crate) job_params1: JobParameters1::ver<'a>,
+        #[ver(G < G14X)]
+        pub(crate) job_params2: JobParameters2,
+        #[ver(G >= G14X)]
+        pub(crate) registers: job::raw::RegisterArray,
+
+        pub(crate) job_params3: JobParameters3::ver,
+        pub(crate) unk_758_flag: u32,
+        pub(crate) unk_75c_flag: u32,
+        pub(crate) unk_buf: Array<0x110, u8>,
+        pub(crate) busy_flag: u32,
+        pub(crate) tvb_overflow_count: u32,
+        pub(crate) unk_878: u32,
+        pub(crate) encoder_params: job::raw::EncoderParams,
+        pub(crate) process_empty_tiles: u32,
+        pub(crate) no_clear_pipeline_textures: u32,
+        pub(crate) msaa_zs: u32,
+        pub(crate) unk_pointee: u32,
+        #[ver(V >= V13_3)]
+        pub(crate) unk_v13_3: u32,
+        pub(crate) meta: job::raw::JobMeta,
+        pub(crate) unk_after_meta: u32,
+        pub(crate) unk_buf_0: U64,
+        pub(crate) unk_buf_8: U64,
+        pub(crate) unk_buf_10: U64,
+        pub(crate) command_time: U64,
+        pub(crate) timestamp_pointers: job::raw::TimestampPointers<'a>,
+        pub(crate) user_timestamp_pointers: job::raw::TimestampPointers<'a>,
+        pub(crate) client_sequence: u8,
+        pub(crate) pad_925: Array<3, u8>,
+        pub(crate) unk_928: u32,
+        pub(crate) unk_92c: u8,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_ts: U64,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_92d_8: Array<0x1b, u8>,
+    }
+}
+
+#[versions(AGX)]
+#[derive(Debug)]
+pub(crate) struct RunFragment {
+    pub(crate) notifier: Arc<GpuObject<event::Notifier::ver>>,
+    pub(crate) scene: Arc<buffer::Scene::ver>,
+    pub(crate) micro_seq: microseq::MicroSequence,
+    pub(crate) vm_bind: mmu::VmBind,
+    pub(crate) aux_fb: GpuArray<u8>,
+    pub(crate) timestamps: Arc<GpuObject<job::RenderTimestamps>>,
+    pub(crate) user_timestamps: job::UserTimestamps,
+}
+
+#[versions(AGX)]
+impl GpuStruct for RunFragment::ver {
+    type Raw<'a> = raw::RunFragment::ver<'a>;
+}
+
+#[versions(AGX)]
+impl workqueue::Command for RunFragment::ver {}
diff --git a/drivers/gpu/drm/asahi/fw/initdata.rs b/drivers/gpu/drm/asahi/fw/initdata.rs
new file mode 100644
index 00000000000000..358123bfa96f66
--- /dev/null
+++ b/drivers/gpu/drm/asahi/fw/initdata.rs
@@ -0,0 +1,1353 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+//! GPU initialization / global structures
+
+use super::channels;
+use super::types::*;
+use crate::{default_zeroed, gem, mmu, no_debug, trivial_gpustruct};
+
+pub(crate) mod raw {
+    use super::*;
+
+    #[derive(Debug, Default)]
+    #[repr(C)]
+    pub(crate) struct ChannelRing<T: GpuStruct + Debug + Default, U: Copy> {
+        pub(crate) state: Option<GpuWeakPointer<T>>,
+        pub(crate) ring: Option<GpuWeakPointer<[U]>>,
+    }
+
+    #[versions(AGX)]
+    #[derive(Debug)]
+    #[repr(C)]
+    pub(crate) struct PipeChannels {
+        pub(crate) vtx: ChannelRing<channels::ChannelState, channels::PipeMsg::ver>,
+        pub(crate) frag: ChannelRing<channels::ChannelState, channels::PipeMsg::ver>,
+        pub(crate) comp: ChannelRing<channels::ChannelState, channels::PipeMsg::ver>,
+    }
+    #[versions(AGX)]
+    default_zeroed!(PipeChannels::ver);
+
+    #[derive(Debug, Default)]
+    #[repr(C)]
+    pub(crate) struct FwStatusFlags {
+        pub(crate) halt_count: AtomicU64,
+        __pad0: Pad<0x8>,
+        pub(crate) halted: AtomicU32,
+        __pad1: Pad<0xc>,
+        pub(crate) resume: AtomicU32,
+        __pad2: Pad<0xc>,
+        pub(crate) unk_40: u32,
+        __pad3: Pad<0xc>,
+        pub(crate) unk_ctr: u32,
+        __pad4: Pad<0xc>,
+        pub(crate) unk_60: u32,
+        __pad5: Pad<0xc>,
+        pub(crate) unk_70: u32,
+        __pad6: Pad<0xc>,
+    }
+
+    #[derive(Debug, Default)]
+    #[repr(C)]
+    pub(crate) struct FwStatus {
+        pub(crate) fwctl_channel: ChannelRing<channels::FwCtlChannelState, channels::FwCtlMsg>,
+        pub(crate) flags: FwStatusFlags,
+    }
+
+    #[derive(Debug)]
+    #[repr(C)]
+    pub(crate) struct HwDataShared1 {
+        pub(crate) table: Array<16, i32>,
+        pub(crate) unk_44: Array<0x60, u8>,
+        pub(crate) unk_a4: u32,
+        pub(crate) unk_a8: u32,
+    }
+    default_zeroed!(HwDataShared1);
+
+    #[derive(Debug, Default)]
+    #[repr(C)]
+    pub(crate) struct HwDataShared2Curve {
+        pub(crate) unk_0: u32,
+        pub(crate) unk_4: u32,
+        pub(crate) t1: Array<16, u16>,
+        pub(crate) t2: Array<16, i16>,
+        pub(crate) t3: Array<8, Array<16, i32>>,
+    }
+
+    #[derive(Debug, Default)]
+    #[repr(C)]
+    pub(crate) struct HwDataShared2G14 {
+        pub(crate) unk_0: Array<5, u32>,
+        pub(crate) unk_14: u32,
+        pub(crate) unk_18: Array<8, u32>,
+        pub(crate) curve1: HwDataShared2Curve,
+        pub(crate) curve2: HwDataShared2Curve,
+    }
+
+    #[derive(Debug)]
+    #[repr(C)]
+    pub(crate) struct HwDataShared2 {
+        pub(crate) table: Array<10, i32>,
+        pub(crate) unk_28: Array<0x10, u8>,
+        pub(crate) g14: HwDataShared2G14,
+        pub(crate) unk_500: u32,
+        pub(crate) unk_504: u32,
+        pub(crate) unk_508: u32,
+        pub(crate) unk_50c: u32,
+    }
+    default_zeroed!(HwDataShared2);
+
+    #[derive(Debug)]
+    #[repr(C)]
+    pub(crate) struct HwDataShared3 {
+        pub(crate) unk_0: u32,
+        pub(crate) unk_4: u32,
+        pub(crate) unk_8: u32,
+        pub(crate) table: Array<16, u32>,
+        pub(crate) unk_4c: u32,
+    }
+    default_zeroed!(HwDataShared3);
+
+    #[derive(Debug)]
+    #[repr(C)]
+    pub(crate) struct HwDataA130Extra {
+        pub(crate) unk_0: Array<0x38, u8>,
+        pub(crate) unk_38: u32,
+        pub(crate) unk_3c: u32,
+        pub(crate) gpu_se_inactive_threshold: u32,
+        pub(crate) unk_44: u32,
+        pub(crate) gpu_se_engagement_criteria: i32,
+        pub(crate) gpu_se_reset_criteria: u32,
+        pub(crate) unk_50: u32,
+        pub(crate) unk_54: u32,
+        pub(crate) unk_58: u32,
+        pub(crate) unk_5c: u32,
+        pub(crate) gpu_se_filter_a_neg: F32,
+        pub(crate) gpu_se_filter_1_a_neg: F32,
+        pub(crate) gpu_se_filter_a: F32,
+        pub(crate) gpu_se_filter_1_a: F32,
+        pub(crate) gpu_se_ki_dt: F32,
+        pub(crate) gpu_se_ki_1_dt: F32,
+        pub(crate) unk_78: F32,
+        pub(crate) unk_7c: F32,
+        pub(crate) gpu_se_kp: F32,
+        pub(crate) gpu_se_kp_1: F32,
+        pub(crate) unk_88: u32,
+        pub(crate) unk_8c: u32,
+        pub(crate) max_pstate_scaled_1: u32,
+        pub(crate) unk_94: u32,
+        pub(crate) unk_98: u32,
+        pub(crate) unk_9c: F32,
+        pub(crate) unk_a0: u32,
+        pub(crate) unk_a4: u32,
+        pub(crate) gpu_se_filter_time_constant_ms: u32,
+        pub(crate) gpu_se_filter_time_constant_1_ms: u32,
+        pub(crate) gpu_se_filter_time_constant_clks: U64,
+        pub(crate) gpu_se_filter_time_constant_1_clks: U64,
+        pub(crate) unk_c0: u32,
+        pub(crate) unk_c4: F32,
+        pub(crate) unk_c8: Array<0x4c, u8>,
+        pub(crate) unk_114: F32,
+        pub(crate) unk_118: u32,
+        pub(crate) unk_11c: u32,
+        pub(crate) unk_120: u32,
+        pub(crate) unk_124: u32,
+        pub(crate) max_pstate_scaled_2: u32,
+        pub(crate) unk_12c: Array<0x8c, u8>,
+    }
+    default_zeroed!(HwDataA130Extra);
+
+    #[repr(C)]
+    pub(crate) struct T81xxData {
+        pub(crate) unk_d8c: u32,
+        pub(crate) unk_d90: u32,
+        pub(crate) unk_d94: u32,
+        pub(crate) unk_d98: u32,
+        pub(crate) unk_d9c: F32,
+        pub(crate) unk_da0: u32,
+        pub(crate) unk_da4: F32,
+        pub(crate) unk_da8: u32,
+        pub(crate) unk_dac: F32,
+        pub(crate) unk_db0: u32,
+        pub(crate) unk_db4: u32,
+        pub(crate) unk_db8: F32,
+        pub(crate) unk_dbc: F32,
+        pub(crate) unk_dc0: u32,
+        pub(crate) unk_dc4: u32,
+        pub(crate) unk_dc8: u32,
+        pub(crate) max_pstate_scaled: u32,
+    }
+    default_zeroed!(T81xxData);
+
+    #[versions(AGX)]
+    #[derive(Default, Copy, Clone)]
+    #[repr(C)]
+    pub(crate) struct PowerZone {
+        pub(crate) val: F32,
+        pub(crate) target: u32,
+        pub(crate) target_off: u32,
+        pub(crate) filter_tc_x4: u32,
+        pub(crate) filter_tc_xperiod: u32,
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_10: u32,
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_14: u32,
+        pub(crate) filter_a_neg: F32,
+        pub(crate) filter_a: F32,
+        pub(crate) pad: u32,
+    }
+
+    #[versions(AGX)]
+    const MAX_CORES_PER_CLUSTER: usize = {
+        #[ver(G >= G14X)]
+        {
+            16
+        }
+        #[ver(G < G14X)]
+        {
+            8
+        }
+    };
+
+    #[derive(Debug, Default)]
+    #[repr(C)]
+    pub(crate) struct AuxLeakCoef {
+        pub(crate) afr_1: Array<2, F32>,
+        pub(crate) cs_1: Array<2, F32>,
+        pub(crate) afr_2: Array<2, F32>,
+        pub(crate) cs_2: Array<2, F32>,
+    }
+
+    #[versions(AGX)]
+    #[repr(C)]
+    pub(crate) struct HwDataA {
+        pub(crate) unk_0: u32,
+        pub(crate) clocks_per_period: u32,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) clocks_per_period_2: u32,
+
+        pub(crate) unk_8: u32,
+        pub(crate) pwr_status: AtomicU32,
+        pub(crate) unk_10: F32,
+        pub(crate) unk_14: u32,
+        pub(crate) unk_18: u32,
+        pub(crate) unk_1c: u32,
+        pub(crate) unk_20: u32,
+        pub(crate) unk_24: u32,
+        pub(crate) actual_pstate: u32,
+        pub(crate) tgt_pstate: u32,
+        pub(crate) unk_30: u32,
+        pub(crate) cur_pstate: u32,
+        pub(crate) unk_38: u32,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_3c_0: u32,
+
+        pub(crate) base_pstate_scaled: u32,
+        pub(crate) unk_40: u32,
+        pub(crate) max_pstate_scaled: u32,
+        pub(crate) unk_48: u32,
+        pub(crate) min_pstate_scaled: u32,
+        pub(crate) freq_mhz: F32,
+        pub(crate) unk_54: Array<0x20, u8>,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_74_0: u32,
+
+        pub(crate) sram_k: Array<0x10, F32>,
+        pub(crate) unk_b4: Array<0x100, u8>,
+        pub(crate) unk_1b4: u32,
+        pub(crate) temp_c: u32,
+        pub(crate) avg_power_mw: u32,
+        pub(crate) update_ts: U64,
+        pub(crate) unk_1c8: u32,
+        pub(crate) unk_1cc: Array<0x478, u8>,
+        pub(crate) pad_644: Pad<0x8>,
+        pub(crate) unk_64c: u32,
+        pub(crate) unk_650: u32,
+        pub(crate) pad_654: u32,
+        pub(crate) pwr_filter_a_neg: F32,
+        pub(crate) pad_65c: u32,
+        pub(crate) pwr_filter_a: F32,
+        pub(crate) pad_664: u32,
+        pub(crate) pwr_integral_gain: F32,
+        pub(crate) pad_66c: u32,
+        pub(crate) pwr_integral_min_clamp: F32,
+        pub(crate) max_power_1: F32,
+        pub(crate) pwr_proportional_gain: F32,
+        pub(crate) pad_67c: u32,
+        pub(crate) pwr_pstate_related_k: F32,
+        pub(crate) pwr_pstate_max_dc_offset: i32,
+        pub(crate) unk_688: u32,
+        pub(crate) max_pstate_scaled_2: u32,
+        pub(crate) pad_690: u32,
+        pub(crate) unk_694: u32,
+        pub(crate) max_power_2: u32,
+        pub(crate) pad_69c: Pad<0x18>,
+        pub(crate) unk_6b4: u32,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_6b8_0: Array<0x10, u8>,
+
+        pub(crate) max_pstate_scaled_3: u32,
+        pub(crate) unk_6bc: u32,
+        pub(crate) pad_6c0: Pad<0x14>,
+        pub(crate) ppm_filter_tc_periods_x4: u32,
+        pub(crate) unk_6d8: u32,
+        pub(crate) pad_6dc: u32,
+        pub(crate) ppm_filter_a_neg: F32,
+        pub(crate) pad_6e4: u32,
+        pub(crate) ppm_filter_a: F32,
+        pub(crate) pad_6ec: u32,
+        pub(crate) ppm_ki_dt: F32,
+        pub(crate) pad_6f4: u32,
+        pub(crate) pwr_integral_min_clamp_2: u32,
+        pub(crate) unk_6fc: F32,
+        pub(crate) ppm_kp: F32,
+        pub(crate) pad_704: u32,
+        pub(crate) unk_708: u32,
+        pub(crate) pwr_min_duty_cycle: u32,
+        pub(crate) max_pstate_scaled_4: u32,
+        pub(crate) unk_714: u32,
+        pub(crate) pad_718: u32,
+        pub(crate) unk_71c: F32,
+        pub(crate) max_power_3: u32,
+        pub(crate) cur_power_mw_2: u32,
+        pub(crate) ppm_filter_tc_ms: u32,
+        pub(crate) unk_72c: u32,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) ppm_filter_tc_clks: u32,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_730_4: u32,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_730_8: u32,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_730_c: u32,
+
+        pub(crate) unk_730: F32,
+        pub(crate) unk_734: u32,
+        pub(crate) unk_738: u32,
+        pub(crate) unk_73c: u32,
+        pub(crate) unk_740: u32,
+        pub(crate) unk_744: u32,
+        pub(crate) unk_748: Array<0x4, F32>,
+        pub(crate) unk_758: u32,
+        pub(crate) perf_tgt_utilization: u32,
+        pub(crate) pad_760: u32,
+        pub(crate) perf_boost_min_util: u32,
+        pub(crate) perf_boost_ce_step: u32,
+        pub(crate) perf_reset_iters: u32,
+        pub(crate) pad_770: u32,
+        pub(crate) unk_774: u32,
+        pub(crate) unk_778: u32,
+        pub(crate) perf_filter_drop_threshold: u32,
+        pub(crate) perf_filter_a_neg: F32,
+        pub(crate) perf_filter_a2_neg: F32,
+        pub(crate) perf_filter_a: F32,
+        pub(crate) perf_filter_a2: F32,
+        pub(crate) perf_ki: F32,
+        pub(crate) perf_ki2: F32,
+        pub(crate) perf_integral_min_clamp: F32,
+        pub(crate) unk_79c: F32,
+        pub(crate) perf_kp: F32,
+        pub(crate) perf_kp2: F32,
+        pub(crate) boost_state_unk_k: F32,
+        pub(crate) base_pstate_scaled_2: u32,
+        pub(crate) max_pstate_scaled_5: u32,
+        pub(crate) base_pstate_scaled_3: u32,
+        pub(crate) pad_7b8: u32,
+        pub(crate) perf_cur_utilization: F32,
+        pub(crate) perf_tgt_utilization_2: u32,
+        pub(crate) pad_7c4: Pad<0x18>,
+        pub(crate) unk_7dc: u32,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_7e0_0: Array<0x10, u8>,
+
+        pub(crate) base_pstate_scaled_4: u32,
+        pub(crate) pad_7e4: u32,
+        pub(crate) unk_7e8: Array<0x14, u8>,
+        pub(crate) unk_7fc: F32,
+        pub(crate) pwr_min_duty_cycle_2: F32,
+        pub(crate) max_pstate_scaled_6: F32,
+        pub(crate) max_freq_mhz: u32,
+        pub(crate) pad_80c: u32,
+        pub(crate) unk_810: u32,
+        pub(crate) pad_814: u32,
+        pub(crate) pwr_min_duty_cycle_3: u32,
+        pub(crate) unk_81c: u32,
+        pub(crate) pad_820: u32,
+        pub(crate) min_pstate_scaled_4: F32,
+        pub(crate) max_pstate_scaled_7: u32,
+        pub(crate) unk_82c: u32,
+        pub(crate) unk_alpha_neg: F32,
+        pub(crate) unk_alpha: F32,
+        pub(crate) unk_838: u32,
+        pub(crate) unk_83c: u32,
+        pub(crate) pad_840: Pad<0x2c>,
+        pub(crate) unk_86c: u32,
+        pub(crate) fast_die0_sensor_mask: U64,
+        #[ver(G >= G14X)]
+        pub(crate) fast_die1_sensor_mask: U64,
+        pub(crate) fast_die0_release_temp_cc: u32,
+        pub(crate) unk_87c: i32,
+        pub(crate) unk_880: u32,
+        pub(crate) unk_884: u32,
+        pub(crate) pad_888: u32,
+        pub(crate) unk_88c: u32,
+        pub(crate) pad_890: u32,
+        pub(crate) unk_894: F32,
+        pub(crate) pad_898: u32,
+        pub(crate) fast_die0_ki_dt: F32,
+        pub(crate) pad_8a0: u32,
+        pub(crate) unk_8a4: u32,
+        pub(crate) unk_8a8: F32,
+        pub(crate) fast_die0_kp: F32,
+        pub(crate) pad_8b0: u32,
+        pub(crate) unk_8b4: u32,
+        pub(crate) pwr_min_duty_cycle_4: u32,
+        pub(crate) max_pstate_scaled_8: u32,
+        pub(crate) max_pstate_scaled_9: u32,
+        pub(crate) fast_die0_prop_tgt_delta: u32,
+        pub(crate) unk_8c8: u32,
+        pub(crate) unk_8cc: u32,
+        pub(crate) pad_8d0: Pad<0x14>,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_8e4_0: Array<0x10, u8>,
+
+        pub(crate) unk_8e4: u32,
+        pub(crate) unk_8e8: u32,
+        pub(crate) max_pstate_scaled_10: u32,
+        pub(crate) unk_8f0: u32,
+        pub(crate) unk_8f4: u32,
+        pub(crate) pad_8f8: u32,
+        pub(crate) pad_8fc: u32,
+        pub(crate) unk_900: Array<0x24, u8>,
+
+        pub(crate) unk_coef_a1: Array<8, Array<MAX_CORES_PER_CLUSTER::ver, F32>>,
+        pub(crate) unk_coef_a2: Array<8, Array<MAX_CORES_PER_CLUSTER::ver, F32>>,
+
+        pub(crate) pad_b24: Pad<0x70>,
+        pub(crate) max_pstate_scaled_11: u32,
+        pub(crate) freq_with_off: u32,
+        pub(crate) unk_b9c: u32,
+        pub(crate) unk_ba0: U64,
+        pub(crate) unk_ba8: U64,
+        pub(crate) unk_bb0: u32,
+        pub(crate) unk_bb4: u32,
+
+        #[ver(V >= V13_3)]
+        pub(crate) pad_bb8_0: Pad<0x200>,
+        #[ver(V >= V13_5)]
+        pub(crate) pad_bb8_200: Pad<0x8>,
+
+        pub(crate) pad_bb8: Pad<0x74>,
+        pub(crate) unk_c2c: u32,
+        pub(crate) power_zone_count: u32,
+        pub(crate) max_power_4: u32,
+        pub(crate) max_power_5: u32,
+        pub(crate) max_power_6: u32,
+        pub(crate) unk_c40: u32,
+        pub(crate) unk_c44: F32,
+        pub(crate) avg_power_target_filter_a_neg: F32,
+        pub(crate) avg_power_target_filter_a: F32,
+        pub(crate) avg_power_target_filter_tc_x4: u32,
+        pub(crate) avg_power_target_filter_tc_xperiod: u32,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) avg_power_target_filter_tc_clks: u32,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_c58_4: u32,
+
+        pub(crate) power_zones: Array<5, PowerZone::ver>,
+        pub(crate) avg_power_filter_tc_periods_x4: u32,
+        pub(crate) unk_cfc: u32,
+        pub(crate) unk_d00: u32,
+        pub(crate) avg_power_filter_a_neg: F32,
+        pub(crate) unk_d08: u32,
+        pub(crate) avg_power_filter_a: F32,
+        pub(crate) unk_d10: u32,
+        pub(crate) avg_power_ki_dt: F32,
+        pub(crate) unk_d18: u32,
+        pub(crate) unk_d1c: u32,
+        pub(crate) unk_d20: F32,
+        pub(crate) avg_power_kp: F32,
+        pub(crate) unk_d28: u32,
+        pub(crate) unk_d2c: u32,
+        pub(crate) avg_power_min_duty_cycle: u32,
+        pub(crate) max_pstate_scaled_12: u32,
+        pub(crate) max_pstate_scaled_13: u32,
+        pub(crate) unk_d3c: u32,
+        pub(crate) max_power_7: F32,
+        pub(crate) max_power_8: u32,
+        pub(crate) unk_d48: u32,
+        pub(crate) avg_power_filter_tc_ms: u32,
+        pub(crate) unk_d50: u32,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) avg_power_filter_tc_clks: u32,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_d54_4: Array<0xc, u8>,
+
+        pub(crate) unk_d54: Array<0x10, u8>,
+        pub(crate) max_pstate_scaled_14: u32,
+        pub(crate) unk_d68: Array<0x24, u8>,
+
+        pub(crate) t81xx_data: T81xxData,
+
+        pub(crate) unk_dd0: Array<0x40, u8>,
+
+        #[ver(V >= V13_2)]
+        pub(crate) unk_e10_pad: Array<0x10, u8>,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_e10_0: HwDataA130Extra,
+
+        pub(crate) unk_e10: Array<0xc, u8>,
+
+        pub(crate) fast_die0_sensor_mask_2: U64,
+        #[ver(G >= G14X)]
+        pub(crate) fast_die1_sensor_mask_2: U64,
+
+        pub(crate) unk_e24: u32,
+        pub(crate) unk_e28: u32,
+        pub(crate) unk_e2c: Pad<0x1c>,
+        pub(crate) unk_coef_b1: Array<8, Array<MAX_CORES_PER_CLUSTER::ver, F32>>,
+        pub(crate) unk_coef_b2: Array<8, Array<MAX_CORES_PER_CLUSTER::ver, F32>>,
+
+        #[ver(G >= G14X)]
+        pub(crate) pad_1048_0: Pad<0x600>,
+
+        pub(crate) pad_1048: Pad<0x5e4>,
+
+        pub(crate) fast_die0_sensor_mask_alt: U64,
+        #[ver(G >= G14X)]
+        pub(crate) fast_die1_sensor_mask_alt: U64,
+        #[ver(V < V13_0B4)]
+        pub(crate) fast_die0_sensor_present: U64,
+
+        pub(crate) unk_163c: u32,
+
+        pub(crate) unk_1640: Array<0x2000, u8>,
+
+        #[ver(G >= G14X)]
+        pub(crate) unk_3640_0: Array<0x2000, u8>,
+
+        pub(crate) unk_3640: u32,
+        pub(crate) unk_3644: u32,
+        pub(crate) hws1: HwDataShared1,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_hws2: Array<16, u16>,
+
+        pub(crate) hws2: HwDataShared2,
+        pub(crate) unk_3c00: u32,
+        pub(crate) unk_3c04: u32,
+        pub(crate) hws3: HwDataShared3,
+        pub(crate) unk_3c58: Array<0x3c, u8>,
+        pub(crate) unk_3c94: u32,
+        pub(crate) unk_3c98: U64,
+        pub(crate) unk_3ca0: U64,
+        pub(crate) unk_3ca8: U64,
+        pub(crate) unk_3cb0: U64,
+        pub(crate) ts_last_idle: U64,
+        pub(crate) ts_last_poweron: U64,
+        pub(crate) ts_last_poweroff: U64,
+        pub(crate) unk_3cd0: U64,
+        pub(crate) unk_3cd8: U64,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_3ce0_0: u32,
+
+        pub(crate) unk_3ce0: u32,
+        pub(crate) unk_3ce4: u32,
+        pub(crate) unk_3ce8: u32,
+        pub(crate) unk_3cec: u32,
+        pub(crate) unk_3cf0: u32,
+        pub(crate) core_leak_coef: Array<8, F32>,
+        pub(crate) sram_leak_coef: Array<8, F32>,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) aux_leak_coef: AuxLeakCoef,
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_3d34_0: Array<0x18, u8>,
+
+        pub(crate) unk_3d34: Array<0x38, u8>,
+    }
+    #[versions(AGX)]
+    default_zeroed!(HwDataA::ver);
+    #[versions(AGX)]
+    no_debug!(HwDataA::ver);
+
+    #[derive(Debug, Default, Clone, Copy)]
+    #[repr(C)]
+    pub(crate) struct IOMapping {
+        pub(crate) phys_addr: U64,
+        pub(crate) virt_addr: U64,
+        pub(crate) total_size: u32,
+        pub(crate) element_size: u32,
+        pub(crate) readwrite: U64,
+    }
+
+    #[versions(AGX)]
+    const IO_MAPPING_COUNT: usize = {
+        #[ver(V < V13_0B4)]
+        {
+            0x14
+        }
+        #[ver(V >= V13_0B4 && V < V13_3)]
+        {
+            0x17
+        }
+        #[ver(V >= V13_3 && V < V13_5)]
+        {
+            0x18
+        }
+        #[ver(V >= V13_5)]
+        {
+            0x19
+        }
+    };
+
+    #[derive(Debug)]
+    #[repr(C)]
+    pub(crate) struct HwDataBAuxPStates {
+        pub(crate) cs_max_pstate: u32,
+        pub(crate) cs_frequencies: Array<0x10, u32>,
+        pub(crate) cs_voltages: Array<0x10, Array<0x2, u32>>,
+        pub(crate) cs_voltages_sram: Array<0x10, Array<0x2, u32>>,
+        pub(crate) cs_unkpad: u32,
+        pub(crate) afr_max_pstate: u32,
+        pub(crate) afr_frequencies: Array<0x8, u32>,
+        pub(crate) afr_voltages: Array<0x8, Array<0x2, u32>>,
+        pub(crate) afr_voltages_sram: Array<0x8, Array<0x2, u32>>,
+        pub(crate) afr_unkpad: u32,
+    }
+
+    #[versions(AGX)]
+    #[derive(Debug)]
+    #[repr(C)]
+    pub(crate) struct HwDataB {
+        #[ver(V < V13_0B4)]
+        pub(crate) unk_0: U64,
+
+        pub(crate) unk_8: U64,
+
+        #[ver(V < V13_0B4)]
+        pub(crate) unk_10: U64,
+
+        pub(crate) unk_18: U64,
+        pub(crate) unk_20: U64,
+        pub(crate) unk_28: U64,
+        pub(crate) unk_30: U64,
+        pub(crate) timestamp_area_base: U64,
+        pub(crate) pad_40: Pad<0x20>,
+
+        #[ver(V < V13_0B4)]
+        pub(crate) yuv_matrices: Array<0xf, Array<3, Array<4, i16>>>,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) yuv_matrices: Array<0x3f, Array<3, Array<4, i16>>>,
+
+        pub(crate) pad_1c8: Pad<0x8>,
+        pub(crate) io_mappings: Array<IO_MAPPING_COUNT::ver, IOMapping>,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) sgx_sram_ptr: U64,
+
+        pub(crate) chip_id: u32,
+        pub(crate) unk_454: u32,
+        pub(crate) unk_458: u32,
+        pub(crate) unk_45c: u32,
+        pub(crate) unk_460: u32,
+        pub(crate) unk_464: u32,
+        pub(crate) unk_468: u32,
+        pub(crate) unk_46c: u32,
+        pub(crate) unk_470: u32,
+        pub(crate) unk_474: u32,
+        pub(crate) unk_478: u32,
+        pub(crate) unk_47c: u32,
+        pub(crate) unk_480: u32,
+        pub(crate) unk_484: u32,
+        pub(crate) unk_488: u32,
+        pub(crate) unk_48c: u32,
+        pub(crate) base_clock_khz: u32,
+        pub(crate) power_sample_period: u32,
+        pub(crate) pad_498: Pad<0x4>,
+        pub(crate) unk_49c: u32,
+        pub(crate) unk_4a0: u32,
+        pub(crate) unk_4a4: u32,
+        pub(crate) pad_4a8: Pad<0x4>,
+        pub(crate) unk_4ac: u32,
+        pub(crate) pad_4b0: Pad<0x8>,
+        pub(crate) unk_4b8: u32,
+        pub(crate) unk_4bc: Array<0x4, u8>,
+        pub(crate) unk_4c0: u32,
+        pub(crate) unk_4c4: u32,
+        pub(crate) unk_4c8: u32,
+        pub(crate) unk_4cc: u32,
+        pub(crate) unk_4d0: u32,
+        pub(crate) unk_4d4: u32,
+        pub(crate) unk_4d8: Array<0x4, u8>,
+        pub(crate) unk_4dc: u32,
+        pub(crate) unk_4e0: U64,
+        pub(crate) unk_4e8: u32,
+        pub(crate) unk_4ec: u32,
+        pub(crate) unk_4f0: u32,
+        pub(crate) unk_4f4: u32,
+        pub(crate) unk_4f8: u32,
+        pub(crate) unk_4fc: u32,
+        pub(crate) unk_500: u32,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_504_0: u32,
+
+        pub(crate) unk_504: u32,
+        pub(crate) unk_508: u32,
+        pub(crate) unk_50c: u32,
+        pub(crate) unk_510: u32,
+        pub(crate) unk_514: u32,
+        pub(crate) unk_518: u32,
+        pub(crate) unk_51c: u32,
+        pub(crate) unk_520: u32,
+        pub(crate) unk_524: u32,
+        pub(crate) unk_528: u32,
+        pub(crate) unk_52c: u32,
+        pub(crate) unk_530: u32,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_534_0: u32,
+
+        pub(crate) unk_534: u32,
+        pub(crate) unk_538: u32,
+
+        pub(crate) num_frags: u32,
+        pub(crate) unk_540: u32,
+        pub(crate) unk_544: u32,
+        pub(crate) unk_548: u32,
+        pub(crate) unk_54c: u32,
+        pub(crate) unk_550: u32,
+        pub(crate) unk_554: u32,
+        pub(crate) uat_ttb_base: U64,
+        pub(crate) gpu_core_id: u32,
+        pub(crate) gpu_rev_id: u32,
+        pub(crate) num_cores: u32,
+        pub(crate) max_pstate: u32,
+
+        #[ver(V < V13_0B4)]
+        pub(crate) num_pstates: u32,
+
+        pub(crate) frequencies: Array<0x10, u32>,
+        pub(crate) voltages: Array<0x10, [u32; 0x8]>,
+        pub(crate) voltages_sram: Array<0x10, [u32; 0x8]>,
+
+        #[ver(V >= V13_3)]
+        pub(crate) unk_9f4_0: Pad<64>,
+
+        pub(crate) sram_k: Array<0x10, F32>,
+        pub(crate) unk_9f4: Array<0x10, u32>,
+        pub(crate) rel_max_powers: Array<0x10, u32>,
+        pub(crate) rel_boost_freqs: Array<0x10, u32>,
+
+        #[ver(V >= V13_3)]
+        pub(crate) unk_arr_0: Array<32, u32>,
+
+        #[ver(V < V13_0B4)]
+        pub(crate) min_sram_volt: u32,
+
+        #[ver(V < V13_0B4)]
+        pub(crate) unk_ab8: u32,
+
+        #[ver(V < V13_0B4)]
+        pub(crate) unk_abc: u32,
+
+        #[ver(V < V13_0B4)]
+        pub(crate) unk_ac0: u32,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) aux_ps: HwDataBAuxPStates,
+
+        #[ver(V >= V13_3)]
+        pub(crate) pad_ac4_0: Array<0x44c, u8>,
+
+        pub(crate) pad_ac4: Pad<0x8>,
+        pub(crate) unk_acc: u32,
+        pub(crate) unk_ad0: u32,
+        pub(crate) pad_ad4: Pad<0x10>,
+        pub(crate) unk_ae4: Array<0x4, u32>,
+        pub(crate) pad_af4: Pad<0x4>,
+        pub(crate) unk_af8: u32,
+        pub(crate) pad_afc: Pad<0x8>,
+        pub(crate) unk_b04: u32,
+        pub(crate) unk_b08: u32,
+        pub(crate) unk_b0c: u32,
+
+        #[ver(G >= G14X)]
+        pub(crate) pad_b10_0: Array<0x8, u8>,
+
+        pub(crate) unk_b10: u32,
+        pub(crate) timer_offset: U64,
+        pub(crate) unk_b1c: u32,
+        pub(crate) unk_b20: u32,
+        pub(crate) unk_b24: u32,
+        pub(crate) unk_b28: u32,
+        pub(crate) unk_b2c: u32,
+        pub(crate) unk_b30: u32,
+        pub(crate) unk_b34: u32,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_b38_0: u32,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_b38_4: u32,
+
+        #[ver(V >= V13_3)]
+        pub(crate) unk_b38_8: u32,
+
+        pub(crate) unk_b38: Array<0xc, u32>,
+        pub(crate) unk_b68: u32,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_b6c: Array<0xd0, u8>,
+
+        #[ver(G >= G14X)]
+        pub(crate) unk_c3c_0: Array<0x8, u8>,
+
+        #[ver(G < G14X && V >= V13_5)]
+        pub(crate) unk_c3c_8: Array<0x10, u8>,
+
+        #[ver(V >= V13_5)]
+        pub(crate) unk_c3c_18: Array<0x20, u8>,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_c3c: u32,
+    }
+    #[versions(AGX)]
+    default_zeroed!(HwDataB::ver);
+
+    #[derive(Debug)]
+    #[repr(C, packed)]
+    pub(crate) struct GpuStatsVtx {
+        // This changes all the time and we don't use it, let's just make it a big buffer
+        pub(crate) opaque: Array<0x3000, u8>,
+    }
+    default_zeroed!(GpuStatsVtx);
+
+    #[versions(AGX)]
+    #[derive(Debug)]
+    #[repr(C)]
+    pub(crate) struct GpuStatsFrag {
+        // This changes all the time and we don't use it, let's just make it a big buffer
+        // except for these two fields which may need init.
+        #[ver(G >= G14X)]
+        pub(crate) unk1_0: Array<0x910, u8>,
+        pub(crate) unk1: Array<0x100, u8>,
+        pub(crate) cur_stamp_id: i32,
+        pub(crate) unk2: Array<0x14, u8>,
+        pub(crate) unk_id: i32,
+        pub(crate) unk3: Array<0x1000, u8>,
+    }
+
+    #[versions(AGX)]
+    impl Default for GpuStatsFrag::ver {
+        fn default() -> Self {
+            Self {
+                #[ver(G >= G14X)]
+                unk1_0: Default::default(),
+                unk1: Default::default(),
+                cur_stamp_id: -1,
+                unk2: Default::default(),
+                unk_id: -1,
+                unk3: Default::default(),
+            }
+        }
+    }
+
+    #[derive(Debug)]
+    #[repr(C)]
+    pub(crate) struct GpuGlobalStatsVtx {
+        pub(crate) total_cmds: u32,
+        pub(crate) stats: GpuStatsVtx,
+    }
+    default_zeroed!(GpuGlobalStatsVtx);
+
+    #[versions(AGX)]
+    #[derive(Debug, Default)]
+    #[repr(C)]
+    pub(crate) struct GpuGlobalStatsFrag {
+        pub(crate) total_cmds: u32,
+        pub(crate) unk_4: u32,
+        pub(crate) stats: GpuStatsFrag::ver,
+    }
+
+    #[derive(Debug)]
+    #[repr(C)]
+    pub(crate) struct GpuStatsComp {
+        // This changes all the time and we don't use it, let's just make it a big buffer
+        pub(crate) opaque: Array<0x3000, u8>,
+    }
+    default_zeroed!(GpuStatsComp);
+
+    #[versions(AGX)]
+    #[derive(Debug)]
+    #[repr(C)]
+    pub(crate) struct RuntimeScratch {
+        pub(crate) unk_280: Array<0x6800, u8>,
+        pub(crate) unk_6a80: u32,
+        pub(crate) gpu_idle: u32,
+        pub(crate) unkpad_6a88: Pad<0x14>,
+        pub(crate) unk_6a9c: u32,
+        pub(crate) unk_ctr0: u32,
+        pub(crate) unk_ctr1: u32,
+        pub(crate) unk_6aa8: u32,
+        pub(crate) unk_6aac: u32,
+        pub(crate) unk_ctr2: u32,
+        pub(crate) unk_6ab4: u32,
+        pub(crate) unk_6ab8: u32,
+        pub(crate) unk_6abc: u32,
+        pub(crate) unk_6ac0: u32,
+        pub(crate) unk_6ac4: u32,
+        pub(crate) unk_ctr3: u32,
+        pub(crate) unk_6acc: u32,
+        pub(crate) unk_6ad0: u32,
+        pub(crate) unk_6ad4: u32,
+        pub(crate) unk_6ad8: u32,
+        pub(crate) unk_6adc: u32,
+        pub(crate) unk_6ae0: u32,
+        pub(crate) unk_6ae4: u32,
+        pub(crate) unk_6ae8: u32,
+        pub(crate) unk_6aec: u32,
+        pub(crate) unk_6af0: u32,
+        pub(crate) unk_ctr4: u32,
+        pub(crate) unk_ctr5: u32,
+        pub(crate) unk_6afc: u32,
+        pub(crate) pad_6b00: Pad<0x38>,
+
+        #[ver(G >= G14X)]
+        pub(crate) pad_6b00_extra: Array<0x4800, u8>,
+
+        pub(crate) unk_6b38: u32,
+        pub(crate) pad_6b3c: Pad<0x84>,
+    }
+    #[versions(AGX)]
+    default_zeroed!(RuntimeScratch::ver);
+
+    #[versions(AGX)]
+    #[repr(C)]
+    pub(crate) struct RuntimePointers<'a> {
+        pub(crate) pipes: Array<4, PipeChannels::ver>,
+
+        pub(crate) device_control:
+            ChannelRing<channels::ChannelState, channels::DeviceControlMsg::ver>,
+        pub(crate) event: ChannelRing<channels::ChannelState, channels::RawEventMsg>,
+        pub(crate) fw_log: ChannelRing<channels::FwLogChannelState, channels::RawFwLogMsg>,
+        pub(crate) ktrace: ChannelRing<channels::ChannelState, channels::RawKTraceMsg>,
+        pub(crate) stats: ChannelRing<channels::ChannelState, channels::RawStatsMsg::ver>,
+
+        pub(crate) __pad0: Pad<0x50>,
+        pub(crate) unk_160: U64,
+        pub(crate) unk_168: U64,
+        pub(crate) stats_vtx: GpuPointer<'a, super::GpuGlobalStatsVtx>,
+        pub(crate) stats_frag: GpuPointer<'a, super::GpuGlobalStatsFrag::ver>,
+        pub(crate) stats_comp: GpuPointer<'a, super::GpuStatsComp>,
+        pub(crate) hwdata_a: GpuPointer<'a, super::HwDataA::ver>,
+        pub(crate) unkptr_190: GpuPointer<'a, &'a [u8]>,
+        pub(crate) unkptr_198: GpuPointer<'a, &'a [u8]>,
+        pub(crate) hwdata_b: GpuPointer<'a, super::HwDataB::ver>,
+        pub(crate) hwdata_b_2: GpuPointer<'a, super::HwDataB::ver>,
+        pub(crate) fwlog_buf: Option<GpuWeakPointer<[channels::RawFwLogPayloadMsg]>>,
+        pub(crate) unkptr_1b8: GpuPointer<'a, &'a [u8]>,
+
+        #[ver(G < G14X)]
+        pub(crate) unkptr_1c0: GpuPointer<'a, &'a [u8]>,
+        #[ver(G < G14X)]
+        pub(crate) unkptr_1c8: GpuPointer<'a, &'a [u8]>,
+
+        pub(crate) unk_1d0: u32,
+        pub(crate) unk_1d4: u32,
+        pub(crate) unk_1d8: Array<0x3c, u8>,
+        pub(crate) buffer_mgr_ctl_gpu_addr: U64,
+        pub(crate) buffer_mgr_ctl_fw_addr: U64,
+        pub(crate) __pad1: Pad<0x5c>,
+        pub(crate) gpu_scratch: RuntimeScratch::ver,
+    }
+    #[versions(AGX)]
+    no_debug!(RuntimePointers::ver<'_>);
+
+    #[derive(Debug)]
+    #[repr(C)]
+    pub(crate) struct PendingStamp {
+        pub(crate) info: AtomicU32,
+        pub(crate) wait_value: AtomicU32,
+    }
+    default_zeroed!(PendingStamp);
+
+    #[derive(Debug, Clone, Copy)]
+    #[repr(C, packed)]
+    pub(crate) struct FaultInfo {
+        pub(crate) unk_0: u32,
+        pub(crate) unk_4: u32,
+        pub(crate) queue_uuid: u32,
+        pub(crate) unk_c: u32,
+        pub(crate) unk_10: u32,
+        pub(crate) unk_14: u32,
+    }
+    default_zeroed!(FaultInfo);
+
+    #[versions(AGX)]
+    #[derive(Debug, Clone, Copy)]
+    #[repr(C, packed)]
+    pub(crate) struct GlobalsSub {
+        pub(crate) unk_54: u16,
+        pub(crate) unk_56: u16,
+        pub(crate) unk_58: u16,
+        pub(crate) unk_5a: U32,
+        pub(crate) unk_5e: U32,
+        pub(crate) unk_62: U32,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_66_0: Array<0xc, u8>,
+
+        pub(crate) unk_66: U32,
+        pub(crate) unk_6a: Array<0x16, u8>,
+    }
+    #[versions(AGX)]
+    default_zeroed!(GlobalsSub::ver);
+
+    #[derive(Debug, Clone, Copy)]
+    #[repr(C)]
+    pub(crate) struct PowerZoneGlobal {
+        pub(crate) target: u32,
+        pub(crate) target_off: u32,
+        pub(crate) filter_tc: u32,
+    }
+    default_zeroed!(PowerZoneGlobal);
+
+    #[versions(AGX)]
+    #[derive(Debug)]
+    #[repr(C)]
+    pub(crate) struct Globals {
+        pub(crate) ktrace_enable: u32,
+        pub(crate) unk_4: Array<0x20, u8>,
+
+        #[ver(V >= V13_2)]
+        pub(crate) unk_24_0: u32,
+
+        pub(crate) unk_24: u32,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) debug: u32,
+
+        #[ver(V >= V13_3)]
+        pub(crate) unk_28_4: u32,
+
+        pub(crate) unk_28: u32,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_2c_0: u32,
+
+        pub(crate) unk_2c: u32,
+        pub(crate) unk_30: u32,
+        pub(crate) unk_34: u32,
+        pub(crate) unk_38: Array<0x1c, u8>,
+
+        pub(crate) sub: GlobalsSub::ver,
+
+        pub(crate) unk_80: Array<0xf80, u8>,
+        pub(crate) unk_1000: Array<0x7000, u8>,
+        pub(crate) unk_8000: Array<0x900, u8>,
+
+        #[ver(G >= G14X)]
+        pub(crate) unk_8900_pad: Array<0x484c, u8>,
+
+        #[ver(V >= V13_3)]
+        pub(crate) unk_8900_pad2: Array<0x54, u8>,
+
+        pub(crate) unk_8900: u32,
+        pub(crate) pending_submissions: AtomicU32,
+        pub(crate) max_power: u32,
+        pub(crate) max_pstate_scaled: u32,
+        pub(crate) max_pstate_scaled_2: u32,
+        pub(crate) unk_8914: u32,
+        pub(crate) unk_8918: u32,
+        pub(crate) max_pstate_scaled_3: u32,
+        pub(crate) unk_8920: u32,
+        pub(crate) power_zone_count: u32,
+        pub(crate) avg_power_filter_tc_periods: u32,
+        pub(crate) avg_power_ki_dt: F32,
+        pub(crate) avg_power_kp: F32,
+        pub(crate) avg_power_min_duty_cycle: u32,
+        pub(crate) avg_power_target_filter_tc: u32,
+        pub(crate) power_zones: Array<5, PowerZoneGlobal>,
+        pub(crate) unk_8978: Array<0x44, u8>,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_89bc_0: Array<0x3c, u8>,
+
+        pub(crate) unk_89bc: u32,
+        pub(crate) fast_die0_release_temp: u32,
+        pub(crate) unk_89c4: i32,
+        pub(crate) fast_die0_prop_tgt_delta: u32,
+        pub(crate) fast_die0_kp: F32,
+        pub(crate) fast_die0_ki_dt: F32,
+        pub(crate) unk_89d4: Array<0xc, u8>,
+        pub(crate) unk_89e0: u32,
+        pub(crate) max_power_2: u32,
+        pub(crate) ppm_kp: F32,
+        pub(crate) ppm_ki_dt: F32,
+        pub(crate) unk_89f0: u32,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_89f4_0: Array<0x8, u8>,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_89f4_8: u32,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_89f4_c: Array<0x50, u8>,
+
+        #[ver(V >= V13_3)]
+        pub(crate) unk_89f4_5c: Array<0xc, u8>,
+
+        pub(crate) unk_89f4: u32,
+        pub(crate) hws1: HwDataShared1,
+        pub(crate) hws2: HwDataShared2,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) idle_off_standby_timer: u32,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_hws2_4: Array<0x8, F32>,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_hws2_24: u32,
+
+        pub(crate) unk_hws2_28: u32,
+
+        pub(crate) hws3: HwDataShared3,
+        pub(crate) unk_9004: Array<8, u8>,
+        pub(crate) unk_900c: u32,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_9010_0: u32,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_9010_4: Array<0x14, u8>,
+
+        pub(crate) unk_9010: Array<0x2c, u8>,
+        pub(crate) unk_903c: u32,
+        pub(crate) unk_9040: Array<0xc0, u8>,
+        pub(crate) unk_9100: Array<0x6f00, u8>,
+        pub(crate) unk_10000: Array<0xe50, u8>,
+        pub(crate) unk_10e50: u32,
+        pub(crate) unk_10e54: Array<0x2c, u8>,
+
+        #[ver((G >= G14X && V < V13_3) || (G <= G14 && V >= V13_3))]
+        pub(crate) unk_x_pad: Array<0x4, u8>,
+
+        // bit 0: sets sgx_reg 0x17620
+        // bit 1: sets sgx_reg 0x17630
+        pub(crate) fault_control: u32,
+        pub(crate) do_init: u32,
+        pub(crate) unk_10e88: Array<0x188, u8>,
+        pub(crate) idle_ts: U64,
+        pub(crate) idle_unk: U64,
+        pub(crate) progress_check_interval_3d: u32,
+        pub(crate) progress_check_interval_ta: u32,
+        pub(crate) progress_check_interval_cl: u32,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_1102c_0: u32,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_1102c_4: u32,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_1102c_8: u32,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_1102c_c: u32,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_1102c_10: u32,
+
+        pub(crate) unk_1102c: u32,
+        pub(crate) idle_off_delay_ms: AtomicU32,
+        pub(crate) fender_idle_off_delay_ms: u32,
+        pub(crate) fw_early_wake_timeout_ms: u32,
+        #[ver(V == V13_3)]
+        pub(crate) ps_pad_0: Pad<0x8>,
+        pub(crate) pending_stamps: Array<0x100, PendingStamp>,
+        #[ver(V != V13_3)]
+        pub(crate) ps_pad_0: Pad<0x8>,
+        pub(crate) unkpad_ps: Pad<0x78>,
+        pub(crate) unk_117bc: u32,
+        pub(crate) fault_info: FaultInfo,
+        pub(crate) counter: u32,
+        pub(crate) unk_118dc: u32,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_118e0_0: Array<0x9c, u8>,
+
+        #[ver(G >= G14X)]
+        pub(crate) unk_118e0_9c: Array<0x580, u8>,
+
+        #[ver(V >= V13_3)]
+        pub(crate) unk_118e0_9c_x: Array<0x8, u8>,
+
+        pub(crate) cl_context_switch_timeout_ms: u32,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) cl_kill_timeout_ms: u32,
+
+        pub(crate) cdm_context_store_latency_threshold: u32,
+        pub(crate) unk_118e8: u32,
+        pub(crate) unk_118ec: Array<0x400, u8>,
+        pub(crate) unk_11cec: Array<0x54, u8>,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_11d40: Array<0x19c, u8>,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_11edc: u32,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_11ee0: Array<0x1c, u8>,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_11efc: u32,
+
+        #[ver(V >= V13_3)]
+        pub(crate) unk_11f00: Array<0x280, u8>,
+    }
+    #[versions(AGX)]
+    default_zeroed!(Globals::ver);
+
+    #[derive(Debug, Default, Clone, Copy)]
+    #[repr(C, packed)]
+    pub(crate) struct UatLevelInfo {
+        pub(crate) unk_3: u8,
+        pub(crate) unk_1: u8,
+        pub(crate) unk_2: u8,
+        pub(crate) index_shift: u8,
+        pub(crate) num_entries: u16,
+        pub(crate) unk_4: u16,
+        pub(crate) unk_8: U64,
+        pub(crate) unk_10: U64,
+        pub(crate) index_mask: U64,
+    }
+
+    #[versions(AGX)]
+    #[derive(Debug)]
+    #[repr(C)]
+    pub(crate) struct InitData<'a> {
+        #[ver(V >= V13_0B4)]
+        pub(crate) ver_info: Array<0x4, u16>,
+
+        pub(crate) unk_buf: GpuPointer<'a, &'a [u8]>,
+        pub(crate) unk_8: u32,
+        pub(crate) unk_c: u32,
+        pub(crate) runtime_pointers: GpuPointer<'a, super::RuntimePointers::ver>,
+        pub(crate) globals: GpuPointer<'a, super::Globals::ver>,
+        pub(crate) fw_status: GpuPointer<'a, super::FwStatus>,
+        pub(crate) uat_page_size: u16,
+        pub(crate) uat_page_bits: u8,
+        pub(crate) uat_num_levels: u8,
+        pub(crate) uat_level_info: Array<0x3, UatLevelInfo>,
+        pub(crate) __pad0: Pad<0x14>,
+        pub(crate) host_mapped_fw_allocations: u32,
+        pub(crate) unk_ac: u32,
+        pub(crate) unk_b0: u32,
+        pub(crate) unk_b4: u32,
+        pub(crate) unk_b8: u32,
+    }
+}
+
+#[derive(Debug)]
+pub(crate) struct ChannelRing<T: GpuStruct + Debug + Default, U: Copy>
+where
+    for<'a> <T as GpuStruct>::Raw<'a>: Debug,
+{
+    pub(crate) state: GpuObject<T>,
+    pub(crate) ring: GpuArray<U>,
+}
+
+impl<T: GpuStruct + Debug + Default, U: Copy> ChannelRing<T, U>
+where
+    for<'a> <T as GpuStruct>::Raw<'a>: Debug,
+{
+    pub(crate) fn to_raw(&self) -> raw::ChannelRing<T, U> {
+        raw::ChannelRing {
+            state: Some(self.state.weak_pointer()),
+            ring: Some(self.ring.weak_pointer()),
+        }
+    }
+}
+
+trivial_gpustruct!(FwStatus);
+trivial_gpustruct!(GpuGlobalStatsVtx);
+#[versions(AGX)]
+trivial_gpustruct!(GpuGlobalStatsFrag::ver);
+trivial_gpustruct!(GpuStatsComp);
+
+#[versions(AGX)]
+trivial_gpustruct!(HwDataA::ver);
+
+#[versions(AGX)]
+trivial_gpustruct!(HwDataB::ver);
+
+#[versions(AGX)]
+#[derive(Debug)]
+pub(crate) struct Stats {
+    pub(crate) vtx: GpuObject<GpuGlobalStatsVtx>,
+    pub(crate) frag: GpuObject<GpuGlobalStatsFrag::ver>,
+    pub(crate) comp: GpuObject<GpuStatsComp>,
+}
+
+#[versions(AGX)]
+#[derive(Debug)]
+pub(crate) struct RuntimePointers {
+    pub(crate) stats: Stats::ver,
+
+    pub(crate) hwdata_a: GpuObject<HwDataA::ver>,
+    pub(crate) unkptr_190: GpuArray<u8>,
+    pub(crate) unkptr_198: GpuArray<u8>,
+    pub(crate) hwdata_b: GpuObject<HwDataB::ver>,
+
+    pub(crate) unkptr_1b8: GpuArray<u8>,
+    pub(crate) unkptr_1c0: GpuArray<u8>,
+    pub(crate) unkptr_1c8: GpuArray<u8>,
+
+    pub(crate) buffer_mgr_ctl: gem::ObjectRef,
+    pub(crate) buffer_mgr_ctl_low_mapping: Option<mmu::KernelMapping>,
+    pub(crate) buffer_mgr_ctl_high_mapping: Option<mmu::KernelMapping>,
+}
+
+#[versions(AGX)]
+impl GpuStruct for RuntimePointers::ver {
+    type Raw<'a> = raw::RuntimePointers::ver<'a>;
+}
+
+#[versions(AGX)]
+trivial_gpustruct!(Globals::ver);
+
+#[versions(AGX)]
+#[derive(Debug)]
+pub(crate) struct InitData {
+    pub(crate) unk_buf: GpuArray<u8>,
+    pub(crate) runtime_pointers: GpuObject<RuntimePointers::ver>,
+    pub(crate) globals: GpuObject<Globals::ver>,
+    pub(crate) fw_status: GpuObject<FwStatus>,
+}
+
+#[versions(AGX)]
+impl GpuStruct for InitData::ver {
+    type Raw<'a> = raw::InitData::ver<'a>;
+}
diff --git a/drivers/gpu/drm/asahi/fw/job.rs b/drivers/gpu/drm/asahi/fw/job.rs
new file mode 100644
index 00000000000000..4082f4d9ff59d1
--- /dev/null
+++ b/drivers/gpu/drm/asahi/fw/job.rs
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+//! Common GPU job firmware structures
+
+use super::types::*;
+use crate::{default_zeroed, mmu, trivial_gpustruct};
+use kernel::prelude::Result;
+use kernel::sync::Arc;
+
+pub(crate) mod raw {
+    use super::*;
+
+    #[derive(Debug, Clone, Copy)]
+    #[repr(C)]
+    pub(crate) struct JobMeta {
+        pub(crate) unk_0: u16,
+        pub(crate) unk_2: u8,
+        pub(crate) no_preemption: u8,
+        pub(crate) stamp: GpuWeakPointer<Stamp>,
+        pub(crate) fw_stamp: GpuWeakPointer<FwStamp>,
+        pub(crate) stamp_value: EventValue,
+        pub(crate) stamp_slot: u32,
+        pub(crate) evctl_index: u32,
+        pub(crate) flush_stamps: u32,
+        pub(crate) uuid: u32,
+        pub(crate) event_seq: u32,
+    }
+
+    #[derive(Debug)]
+    #[repr(C)]
+    pub(crate) struct EncoderParams {
+        pub(crate) unk_8: u32,
+        pub(crate) sync_grow: u32,
+        pub(crate) unk_10: u32,
+        pub(crate) encoder_id: u32,
+        pub(crate) unk_18: u32,
+        pub(crate) unk_mask: u32,
+        pub(crate) sampler_array: U64,
+        pub(crate) sampler_count: u32,
+        pub(crate) sampler_max: u32,
+    }
+
+    #[derive(Debug)]
+    #[repr(C)]
+    pub(crate) struct JobTimestamps {
+        pub(crate) start: AtomicU64,
+        pub(crate) end: AtomicU64,
+    }
+    default_zeroed!(JobTimestamps);
+
+    #[derive(Debug)]
+    #[repr(C)]
+    pub(crate) struct RenderTimestamps {
+        pub(crate) vtx: JobTimestamps,
+        pub(crate) frag: JobTimestamps,
+    }
+    default_zeroed!(RenderTimestamps);
+
+    #[derive(Debug)]
+    #[repr(C)]
+    pub(crate) struct Register {
+        pub(crate) number: u32,
+        pub(crate) value: U64,
+    }
+    default_zeroed!(Register);
+
+    impl Register {
+        fn new(number: u32, value: u64) -> Register {
+            Register {
+                number,
+                value: U64(value),
+            }
+        }
+    }
+
+    #[derive(Debug)]
+    #[repr(C)]
+    pub(crate) struct RegisterArray {
+        pub(crate) registers: Array<128, Register>,
+        pub(crate) pad: Array<0x100, u8>,
+
+        pub(crate) addr: GpuWeakPointer<Array<128, Register>>,
+        pub(crate) count: u16,
+        pub(crate) length: u16,
+        pub(crate) unk_pad: u32,
+    }
+
+    impl RegisterArray {
+        pub(crate) fn new(
+            self_ptr: GpuWeakPointer<Array<128, Register>>,
+            cb: impl FnOnce(&mut RegisterArray),
+        ) -> RegisterArray {
+            let mut array = RegisterArray {
+                registers: Default::default(),
+                pad: Default::default(),
+                addr: self_ptr,
+                count: 0,
+                length: 0,
+                unk_pad: 0,
+            };
+
+            cb(&mut array);
+
+            array
+        }
+
+        pub(crate) fn add(&mut self, number: u32, value: u64) {
+            self.registers[self.count as usize] = Register::new(number, value);
+            self.count += 1;
+            self.length += core::mem::size_of::<Register>() as u16;
+        }
+    }
+
+    #[derive(Debug)]
+    #[repr(C)]
+    pub(crate) struct TimestampPointers<'a> {
+        pub(crate) start_addr: Option<GpuPointer<'a, AtomicU64>>,
+        pub(crate) end_addr: Option<GpuPointer<'a, AtomicU64>>,
+    }
+}
+
+trivial_gpustruct!(JobTimestamps);
+trivial_gpustruct!(RenderTimestamps);
+
+#[derive(Debug)]
+pub(crate) struct UserTimestamp {
+    pub(crate) mapping: Arc<mmu::KernelMapping>,
+    pub(crate) offset: usize,
+}
+
+#[derive(Debug, Default)]
+pub(crate) struct UserTimestamps {
+    pub(crate) start: Option<UserTimestamp>,
+    pub(crate) end: Option<UserTimestamp>,
+}
+
+impl UserTimestamps {
+    pub(crate) fn any(&self) -> bool {
+        self.start.is_some() || self.end.is_some()
+    }
+
+    pub(crate) fn pointers(&self) -> Result<raw::TimestampPointers<'_>> {
+        Ok(raw::TimestampPointers {
+            start_addr: self
+                .start
+                .as_ref()
+                .map(|a| GpuPointer::from_mapping(&a.mapping, a.offset))
+                .transpose()?,
+            end_addr: self
+                .end
+                .as_ref()
+                .map(|a| GpuPointer::from_mapping(&a.mapping, a.offset))
+                .transpose()?,
+        })
+    }
+}
diff --git a/drivers/gpu/drm/asahi/fw/microseq.rs b/drivers/gpu/drm/asahi/fw/microseq.rs
new file mode 100644
index 00000000000000..554f6a308662a4
--- /dev/null
+++ b/drivers/gpu/drm/asahi/fw/microseq.rs
@@ -0,0 +1,404 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+//! GPU firmware microsequence operations
+
+use super::types::*;
+use super::{buffer, compute, fragment, initdata, job, vertex, workqueue};
+use crate::default_zeroed;
+
+pub(crate) trait Operation {}
+
+#[derive(Debug, Copy, Clone)]
+#[repr(u32)]
+enum OpCode {
+    WaitForIdle = 0x01,
+    WaitForIdle2 = 0x02,
+    RetireStamp = 0x18,
+    #[allow(dead_code)]
+    Timestamp = 0x19,
+    StartVertex = 0x22,
+    FinalizeVertex = 0x23,
+    StartFragment = 0x24,
+    FinalizeFragment = 0x25,
+    StartCompute = 0x29,
+    FinalizeCompute = 0x2a,
+}
+
+#[derive(Debug, Copy, Clone)]
+#[repr(u32)]
+pub(crate) enum Pipe {
+    Vertex = 1 << 0,
+    Fragment = 1 << 8,
+    Compute = 1 << 15,
+}
+
+pub(crate) const MAX_ATTACHMENTS: usize = 16;
+
+#[derive(Debug, Clone, Copy)]
+#[repr(C)]
+pub(crate) struct Attachment {
+    pub(crate) address: U64,
+    pub(crate) size: u32,
+    pub(crate) unk_c: u16,
+    pub(crate) unk_e: u16,
+}
+default_zeroed!(Attachment);
+
+#[derive(Debug, Clone, Copy, Default)]
+#[repr(C)]
+pub(crate) struct Attachments {
+    pub(crate) list: Array<MAX_ATTACHMENTS, Attachment>,
+    pub(crate) count: u32,
+}
+
+#[derive(Debug, Copy, Clone)]
+#[repr(transparent)]
+pub(crate) struct OpHeader(u32);
+
+impl OpHeader {
+    const fn new(opcode: OpCode) -> OpHeader {
+        OpHeader(opcode as u32)
+    }
+    const fn with_args(opcode: OpCode, args: u32) -> OpHeader {
+        OpHeader(opcode as u32 | args)
+    }
+}
+
+macro_rules! simple_op {
+    ($name:ident) => {
+        #[allow(dead_code)]
+        #[derive(Debug, Copy, Clone)]
+        pub(crate) struct $name(OpHeader);
+
+        impl $name {
+            pub(crate) const HEADER: $name = $name(OpHeader::new(OpCode::$name));
+        }
+    };
+}
+
+pub(crate) mod op {
+    use super::*;
+
+    simple_op!(StartVertex);
+    simple_op!(FinalizeVertex);
+    simple_op!(StartFragment);
+    simple_op!(FinalizeFragment);
+    simple_op!(StartCompute);
+    simple_op!(FinalizeCompute);
+    simple_op!(WaitForIdle2);
+
+    #[allow(dead_code)]
+    #[derive(Debug, Copy, Clone)]
+    pub(crate) struct RetireStamp(OpHeader);
+    impl RetireStamp {
+        pub(crate) const HEADER: RetireStamp =
+            RetireStamp(OpHeader::with_args(OpCode::RetireStamp, 0x40000000));
+    }
+
+    #[allow(dead_code)]
+    #[derive(Debug, Copy, Clone)]
+    pub(crate) struct WaitForIdle(OpHeader);
+    impl WaitForIdle {
+        pub(crate) const fn new(pipe: Pipe) -> WaitForIdle {
+            WaitForIdle(OpHeader::with_args(OpCode::WaitForIdle, (pipe as u32) << 8))
+        }
+    }
+
+    #[allow(dead_code)]
+    #[derive(Debug, Copy, Clone)]
+    pub(crate) struct Timestamp(OpHeader);
+    impl Timestamp {
+        #[allow(dead_code)]
+        pub(crate) const fn new(flag: bool) -> Timestamp {
+            Timestamp(OpHeader::with_args(OpCode::Timestamp, (flag as u32) << 31))
+        }
+    }
+}
+
+#[derive(Debug)]
+#[repr(C)]
+pub(crate) struct WaitForIdle {
+    pub(crate) header: op::WaitForIdle,
+}
+
+impl Operation for WaitForIdle {}
+
+#[derive(Debug)]
+#[repr(C)]
+pub(crate) struct WaitForIdle2 {
+    pub(crate) header: op::WaitForIdle2,
+}
+
+impl Operation for WaitForIdle2 {}
+
+#[derive(Debug)]
+#[repr(C)]
+pub(crate) struct RetireStamp {
+    pub(crate) header: op::RetireStamp,
+}
+
+impl Operation for RetireStamp {}
+
+#[versions(AGX)]
+#[derive(Debug)]
+#[repr(C)]
+pub(crate) struct Timestamp<'a> {
+    pub(crate) header: op::Timestamp,
+    pub(crate) command_time: GpuWeakPointer<U64>,
+    pub(crate) ts_pointers: GpuWeakPointer<job::raw::TimestampPointers<'a>>,
+    // Unused?
+    pub(crate) update_ts: GpuWeakPointer<Option<GpuPointer<'a, AtomicU64>>>,
+    pub(crate) work_queue: GpuWeakPointer<workqueue::QueueInfo::ver>,
+    pub(crate) user_ts_pointers: GpuWeakPointer<job::raw::TimestampPointers<'a>>,
+
+    #[ver(V >= V13_0B4)]
+    pub(crate) unk_ts: GpuWeakPointer<U64>,
+
+    pub(crate) uuid: u32,
+    pub(crate) unk_30_padding: u32,
+}
+
+#[versions(AGX)]
+impl<'a> Operation for Timestamp::ver<'a> {}
+
+#[versions(AGX)]
+#[derive(Debug)]
+#[repr(C)]
+pub(crate) struct StartVertex<'a> {
+    pub(crate) header: op::StartVertex,
+    pub(crate) tiling_params: Option<GpuWeakPointer<vertex::raw::TilingParameters>>,
+    pub(crate) job_params1: Option<GpuWeakPointer<vertex::raw::JobParameters1::ver<'a>>>,
+    #[ver(G >= G14X)]
+    pub(crate) registers: GpuWeakPointer<job::raw::RegisterArray>,
+    pub(crate) buffer: GpuWeakPointer<buffer::Info::ver>,
+    pub(crate) scene: GpuWeakPointer<buffer::Scene::ver>,
+    pub(crate) stats: GpuWeakPointer<initdata::raw::GpuStatsVtx>,
+    pub(crate) work_queue: GpuWeakPointer<workqueue::QueueInfo::ver>,
+    pub(crate) vm_slot: u32,
+    pub(crate) unk_38: u32,
+    pub(crate) event_generation: u32,
+    pub(crate) buffer_slot: u32,
+    pub(crate) unk_44: u32,
+    pub(crate) event_seq: U64,
+    pub(crate) unk_50: u32,
+    pub(crate) unk_pointer: GpuWeakPointer<u32>,
+    pub(crate) unk_job_buf: GpuWeakPointer<U64>,
+    pub(crate) unk_64: u32,
+    pub(crate) unk_68: u32,
+    pub(crate) uuid: u32,
+    pub(crate) attachments: Attachments,
+    pub(crate) padding: u32,
+
+    #[ver(V >= V13_0B4)]
+    pub(crate) counter: U64,
+
+    #[ver(V >= V13_0B4)]
+    pub(crate) notifier_buf: GpuWeakPointer<Array<0x8, u8>>,
+
+    pub(crate) unk_178: u32,
+}
+
+#[versions(AGX)]
+impl<'a> Operation for StartVertex::ver<'a> {}
+
+#[versions(AGX)]
+#[derive(Debug)]
+#[repr(C)]
+pub(crate) struct FinalizeVertex {
+    pub(crate) header: op::FinalizeVertex,
+    pub(crate) scene: GpuWeakPointer<buffer::Scene::ver>,
+    pub(crate) buffer: GpuWeakPointer<buffer::Info::ver>,
+    pub(crate) stats: GpuWeakPointer<initdata::raw::GpuStatsVtx>,
+    pub(crate) work_queue: GpuWeakPointer<workqueue::QueueInfo::ver>,
+    pub(crate) vm_slot: u32,
+    pub(crate) unk_28: u32,
+    pub(crate) unk_pointer: GpuWeakPointer<u32>,
+    pub(crate) unk_34: u32,
+    pub(crate) uuid: u32,
+    pub(crate) fw_stamp: GpuWeakPointer<FwStamp>,
+    pub(crate) stamp_value: EventValue,
+    pub(crate) unk_48: U64,
+    pub(crate) unk_50: u32,
+    pub(crate) unk_54: u32,
+    pub(crate) unk_58: U64,
+    pub(crate) unk_60: u32,
+    pub(crate) unk_64: u32,
+    pub(crate) unk_68: u32,
+
+    #[ver(G >= G14 && V < V13_0B4)]
+    pub(crate) unk_68_g14: U64,
+
+    pub(crate) restart_branch_offset: i32,
+    pub(crate) has_attachments: u32, // Check DCMP errors bits 2,3 1=ktrace 2=log 3=panic
+
+    #[ver(V >= V13_0B4)]
+    pub(crate) unk_74: Array<0x10, u8>,
+}
+
+#[versions(AGX)]
+impl Operation for FinalizeVertex::ver {}
+
+#[versions(AGX)]
+#[derive(Debug)]
+#[repr(C)]
+pub(crate) struct StartFragment<'a> {
+    pub(crate) header: op::StartFragment,
+    pub(crate) job_params2: Option<GpuWeakPointer<fragment::raw::JobParameters2>>,
+    pub(crate) job_params1: Option<GpuWeakPointer<fragment::raw::JobParameters1::ver<'a>>>,
+    #[ver(G >= G14X)]
+    pub(crate) registers: GpuWeakPointer<job::raw::RegisterArray>,
+    pub(crate) scene: GpuPointer<'a, buffer::Scene::ver>,
+    pub(crate) stats: GpuWeakPointer<initdata::raw::GpuStatsFrag::ver>,
+    pub(crate) busy_flag: GpuWeakPointer<u32>,
+    pub(crate) tvb_overflow_count: GpuWeakPointer<u32>,
+    pub(crate) unk_pointer: GpuWeakPointer<u32>,
+    pub(crate) work_queue: GpuWeakPointer<workqueue::QueueInfo::ver>,
+    pub(crate) work_item: GpuWeakPointer<fragment::RunFragment::ver>,
+    pub(crate) vm_slot: u32,
+    pub(crate) unk_50: u32,
+    pub(crate) event_generation: u32,
+    pub(crate) buffer_slot: u32,
+    pub(crate) sync_grow: u32,
+    pub(crate) event_seq: U64,
+    pub(crate) unk_68: u32,
+    pub(crate) unk_758_flag: GpuWeakPointer<u32>,
+    pub(crate) unk_job_buf: GpuWeakPointer<U64>,
+    #[ver(V >= V13_3)]
+    pub(crate) unk_7c_0: U64,
+    pub(crate) unk_7c: u32,
+    pub(crate) unk_80: u32,
+    pub(crate) unk_84: u32,
+    pub(crate) uuid: u32,
+    pub(crate) attachments: Attachments,
+    pub(crate) padding: u32,
+
+    #[ver(V >= V13_0B4)]
+    pub(crate) counter: U64,
+
+    #[ver(V >= V13_0B4)]
+    pub(crate) notifier_buf: GpuWeakPointer<Array<0x8, u8>>,
+}
+
+#[versions(AGX)]
+impl<'a> Operation for StartFragment::ver<'a> {}
+
+#[versions(AGX)]
+#[derive(Debug)]
+#[repr(C)]
+pub(crate) struct FinalizeFragment {
+    pub(crate) header: op::FinalizeFragment,
+    pub(crate) uuid: u32,
+    pub(crate) unk_8: u32,
+    pub(crate) fw_stamp: GpuWeakPointer<FwStamp>,
+    pub(crate) stamp_value: EventValue,
+    pub(crate) unk_18: u32,
+    pub(crate) scene: GpuWeakPointer<buffer::Scene::ver>,
+    pub(crate) buffer: GpuWeakPointer<buffer::Info::ver>,
+    pub(crate) unk_2c: U64,
+    pub(crate) stats: GpuWeakPointer<initdata::raw::GpuStatsFrag::ver>,
+    pub(crate) unk_pointer: GpuWeakPointer<u32>,
+    pub(crate) busy_flag: GpuWeakPointer<u32>,
+    pub(crate) work_queue: GpuWeakPointer<workqueue::QueueInfo::ver>,
+    pub(crate) work_item: GpuWeakPointer<fragment::RunFragment::ver>,
+    pub(crate) vm_slot: u32,
+    pub(crate) unk_60: u32,
+    pub(crate) unk_758_flag: GpuWeakPointer<u32>,
+    #[ver(V >= V13_3)]
+    pub(crate) unk_6c_0: U64,
+    pub(crate) unk_6c: U64,
+    pub(crate) unk_74: U64,
+    pub(crate) unk_7c: U64,
+    pub(crate) unk_84: U64,
+    pub(crate) unk_8c: U64,
+
+    #[ver(G == G14 && V < V13_0B4)]
+    pub(crate) unk_8c_g14: U64,
+
+    pub(crate) restart_branch_offset: i32,
+    pub(crate) has_attachments: u32, // Check DCMP errors bits 2,3 1=ktrace 2=log 3=panic
+
+    #[ver(V >= V13_0B4)]
+    pub(crate) unk_9c: Array<0x10, u8>,
+}
+
+#[versions(AGX)]
+impl Operation for FinalizeFragment::ver {}
+
+#[versions(AGX)]
+#[derive(Debug)]
+#[repr(C)]
+pub(crate) struct StartCompute<'a> {
+    pub(crate) header: op::StartCompute,
+    pub(crate) unk_pointer: GpuWeakPointer<u32>,
+    pub(crate) job_params1: Option<GpuWeakPointer<compute::raw::JobParameters1<'a>>>,
+    #[ver(G >= G14X)]
+    pub(crate) registers: GpuWeakPointer<job::raw::RegisterArray>,
+    pub(crate) stats: GpuWeakPointer<initdata::GpuStatsComp>,
+    pub(crate) work_queue: GpuWeakPointer<workqueue::QueueInfo::ver>,
+    pub(crate) vm_slot: u32,
+    pub(crate) unk_28: u32,
+    pub(crate) event_generation: u32,
+    pub(crate) event_seq: U64,
+    pub(crate) unk_38: u32,
+    pub(crate) job_params2: GpuWeakPointer<compute::raw::JobParameters2::ver<'a>>,
+    pub(crate) unk_44: u32,
+    pub(crate) uuid: u32,
+    pub(crate) attachments: Attachments,
+    pub(crate) padding: u32,
+
+    #[ver(V >= V13_0B4)]
+    pub(crate) unk_flag: GpuWeakPointer<U32>,
+
+    #[ver(V >= V13_0B4)]
+    pub(crate) counter: U64,
+
+    #[ver(V >= V13_0B4)]
+    pub(crate) notifier_buf: GpuWeakPointer<Array<0x8, u8>>,
+}
+
+#[versions(AGX)]
+impl<'a> Operation for StartCompute::ver<'a> {}
+
+#[versions(AGX)]
+#[derive(Debug)]
+#[repr(C)]
+pub(crate) struct FinalizeCompute<'a> {
+    pub(crate) header: op::FinalizeCompute,
+    pub(crate) stats: GpuWeakPointer<initdata::GpuStatsComp>,
+    pub(crate) work_queue: GpuWeakPointer<workqueue::QueueInfo::ver>,
+    pub(crate) vm_slot: u32,
+    #[ver(V < V13_0B4)]
+    pub(crate) unk_18: u32,
+    pub(crate) job_params2: GpuWeakPointer<compute::raw::JobParameters2::ver<'a>>,
+    pub(crate) unk_24: u32,
+    pub(crate) uuid: u32,
+    pub(crate) fw_stamp: GpuWeakPointer<FwStamp>,
+    pub(crate) stamp_value: EventValue,
+    pub(crate) unk_38: u32,
+    pub(crate) unk_3c: u32,
+    pub(crate) unk_40: u32,
+    pub(crate) unk_44: u32,
+    pub(crate) unk_48: u32,
+    pub(crate) unk_4c: u32,
+    pub(crate) unk_50: u32,
+    pub(crate) unk_54: u32,
+    pub(crate) unk_58: u32,
+
+    #[ver(G == G14 && V < V13_0B4)]
+    pub(crate) unk_5c_g14: U64,
+
+    pub(crate) restart_branch_offset: i32,
+    pub(crate) has_attachments: u32, // Check DCMP errors bits 2,3 1=ktrace 2=log 3=panic
+
+    #[ver(V >= V13_0B4)]
+    pub(crate) unk_64: Array<0xd, u8>,
+
+    #[ver(V >= V13_0B4)]
+    pub(crate) unk_flag: GpuWeakPointer<U32>,
+
+    #[ver(V >= V13_0B4)]
+    pub(crate) unk_79: Array<0x7, u8>,
+}
+
+#[versions(AGX)]
+impl<'a> Operation for FinalizeCompute::ver<'a> {}
diff --git a/drivers/gpu/drm/asahi/fw/mod.rs b/drivers/gpu/drm/asahi/fw/mod.rs
new file mode 100644
index 00000000000000..a5649aa20d3a8e
--- /dev/null
+++ b/drivers/gpu/drm/asahi/fw/mod.rs
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+//! Firmware structures for Apple AGX GPUs
+
+pub(crate) mod buffer;
+pub(crate) mod channels;
+pub(crate) mod compute;
+pub(crate) mod event;
+pub(crate) mod fragment;
+pub(crate) mod initdata;
+pub(crate) mod job;
+pub(crate) mod microseq;
+pub(crate) mod types;
+pub(crate) mod vertex;
+pub(crate) mod workqueue;
diff --git a/drivers/gpu/drm/asahi/fw/types.rs b/drivers/gpu/drm/asahi/fw/types.rs
new file mode 100644
index 00000000000000..65995170accef1
--- /dev/null
+++ b/drivers/gpu/drm/asahi/fw/types.rs
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+//! Common types for firmware structure definitions
+
+use crate::{alloc, object};
+use core::fmt;
+use core::ops::{Deref, DerefMut, Index, IndexMut};
+
+pub(crate) use crate::event::EventValue;
+pub(crate) use crate::object::{GpuPointer, GpuStruct, GpuWeakPointer};
+pub(crate) use crate::{f32, float::F32};
+
+pub(crate) use core::fmt::Debug;
+pub(crate) use core::marker::PhantomData;
+pub(crate) use core::sync::atomic::{AtomicI32, AtomicU32, AtomicU64};
+pub(crate) use kernel::init::Zeroable;
+pub(crate) use kernel::macros::versions;
+
+// Make the trait visible
+pub(crate) use crate::alloc::Allocator as _Allocator;
+
+/// General allocator type used for the driver
+pub(crate) type Allocator = alloc::DefaultAllocator;
+
+/// General GpuObject type used for the driver
+pub(crate) type GpuObject<T> =
+    object::GpuObject<T, alloc::GenericAlloc<T, alloc::DefaultAllocation>>;
+
+/// General GpuArray type used for the driver
+pub(crate) type GpuArray<T> = object::GpuArray<T, alloc::GenericAlloc<T, alloc::DefaultAllocation>>;
+
+/// General GpuOnlyArray type used for the driver
+pub(crate) type GpuOnlyArray<T> =
+    object::GpuOnlyArray<T, alloc::GenericAlloc<T, alloc::DefaultAllocation>>;
+
+/// A stamp slot that is shared between firmware and the driver.
+#[derive(Debug, Default)]
+#[repr(transparent)]
+pub(crate) struct Stamp(pub(crate) AtomicU32);
+
+/// A stamp slot that is for private firmware use.
+///
+/// This is a separate type to guard against pointer type confusion.
+#[derive(Debug, Default)]
+#[repr(transparent)]
+pub(crate) struct FwStamp(pub(crate) AtomicU32);
+
+/// An unaligned u64 type.
+///
+/// This is useful to avoid having to pack firmware structures entirely, since that is incompatible
+/// with `#[derive(Debug)]` and atomics.
+#[derive(Copy, Clone, Default)]
+#[repr(C, packed(1))]
+pub(crate) struct U64(pub(crate) u64);
+
+// SAFETY: U64 is zeroable just like u64
+unsafe impl Zeroable for U64 {}
+
+impl fmt::Debug for U64 {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        let v = self.0;
+        f.write_fmt(format_args!("{:#x}", v))
+    }
+}
+
+/// An unaligned u32 type.
+///
+/// This is useful to avoid having to pack firmware structures entirely, since that is incompatible
+/// with `#[derive(Debug)]` and atomics.
+#[derive(Copy, Clone, Default)]
+#[repr(C, packed(1))]
+pub(crate) struct U32(pub(crate) u32);
+
+// SAFETY: U32 is zeroable just like u32
+unsafe impl Zeroable for U32 {}
+
+impl fmt::Debug for U32 {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        let v = self.0;
+        f.write_fmt(format_args!("{:#x}", v))
+    }
+}
+
+/// Create a dummy `Debug` implementation, for when we need it but it's too painful to write by
+/// hand or not very useful.
+#[macro_export]
+macro_rules! no_debug {
+    ($type:ty) => {
+        impl ::core::fmt::Debug for $type {
+            fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+                write!(f, "...")
+            }
+        }
+    };
+}
+
+/// Implement Zeroable for a given type (and Default along with it).
+///
+/// # Safety
+///
+/// This macro must only be used if a type only contains primitive types which can be
+/// zero-initialized, FFI structs intended to be zero-initialized, or other types which
+/// impl Zeroable.
+#[macro_export]
+macro_rules! default_zeroed {
+    (<$($lt:lifetime),*>, $type:ty) => {
+        impl<$($lt),*> Default for $type {
+            fn default() -> $type {
+                ::kernel::init::Zeroable::zeroed()
+            }
+        }
+        // SAFETY: The user is responsible for ensuring this is safe.
+        unsafe impl<$($lt),*> ::kernel::init::Zeroable for $type {}
+    };
+    ($type:ty) => {
+        impl Default for $type {
+            fn default() -> $type {
+                ::kernel::init::Zeroable::zeroed()
+            }
+        }
+        // SAFETY: The user is responsible for ensuring this is safe.
+        unsafe impl ::kernel::init::Zeroable for $type {}
+    };
+}
+
+/// A convenience type for a number of padding bytes. Hidden from Debug formatting.
+#[derive(Copy, Clone)]
+#[repr(C, packed)]
+pub(crate) struct Pad<const N: usize>([u8; N]);
+
+/// SAFETY: Primitive type, safe to zero-init.
+unsafe impl<const N: usize> Zeroable for Pad<N> {}
+
+impl<const N: usize> Default for Pad<N> {
+    fn default() -> Self {
+        Zeroable::zeroed()
+    }
+}
+
+impl<const N: usize> fmt::Debug for Pad<N> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        f.write_fmt(format_args!("<pad>"))
+    }
+}
+
+/// A convenience type for a fixed-sized array with Default/Zeroable impls.
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub(crate) struct Array<const N: usize, T>([T; N]);
+
+impl<const N: usize, T> Array<N, T> {
+    pub(crate) fn new(data: [T; N]) -> Self {
+        Self(data)
+    }
+}
+
+// SAFETY: Arrays of Zeroable values can be safely Zeroable.
+unsafe impl<const N: usize, T: Zeroable> Zeroable for Array<N, T> {}
+
+impl<const N: usize, T: Zeroable> Default for Array<N, T> {
+    fn default() -> Self {
+        Zeroable::zeroed()
+    }
+}
+
+impl<const N: usize, T> Index<usize> for Array<N, T> {
+    type Output = T;
+
+    fn index(&self, index: usize) -> &Self::Output {
+        &self.0[index]
+    }
+}
+
+impl<const N: usize, T> IndexMut<usize> for Array<N, T> {
+    fn index_mut(&mut self, index: usize) -> &mut Self::Output {
+        &mut self.0[index]
+    }
+}
+
+impl<const N: usize, T> Deref for Array<N, T> {
+    type Target = [T; N];
+
+    fn deref(&self) -> &Self::Target {
+        &self.0
+    }
+}
+
+impl<const N: usize, T> DerefMut for Array<N, T> {
+    fn deref_mut(&mut self) -> &mut Self::Target {
+        &mut self.0
+    }
+}
+
+impl<const N: usize, T: Sized + fmt::Debug> fmt::Debug for Array<N, T> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        self.0.fmt(f)
+    }
+}
+
+/// Convenience macro to define an identically-named trivial GpuStruct with no inner fields for a
+/// given raw type name.
+#[macro_export]
+macro_rules! trivial_gpustruct {
+    ($type:ident) => {
+        #[derive(Debug)]
+        pub(crate) struct $type {}
+
+        impl GpuStruct for $type {
+            type Raw<'a> = raw::$type;
+        }
+        $crate::default_zeroed!($type);
+    };
+}
diff --git a/drivers/gpu/drm/asahi/fw/vertex.rs b/drivers/gpu/drm/asahi/fw/vertex.rs
new file mode 100644
index 00000000000000..25915a36adf9a3
--- /dev/null
+++ b/drivers/gpu/drm/asahi/fw/vertex.rs
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+//! GPU vertex job firmware structures
+
+use super::types::*;
+use super::{event, job, workqueue};
+use crate::{buffer, fw, microseq, mmu};
+use kernel::sync::Arc;
+
+pub(crate) mod raw {
+    use super::*;
+
+    #[derive(Debug, Default, Copy, Clone)]
+    #[repr(C)]
+    pub(crate) struct TilingParameters {
+        pub(crate) rgn_size: u32,
+        pub(crate) unk_4: u32,
+        pub(crate) ppp_ctrl: u32,
+        pub(crate) x_max: u16,
+        pub(crate) y_max: u16,
+        pub(crate) te_screen: u32,
+        pub(crate) te_mtile1: u32,
+        pub(crate) te_mtile2: u32,
+        pub(crate) tiles_per_mtile: u32,
+        pub(crate) tpc_stride: u32,
+        pub(crate) unk_24: u32,
+        pub(crate) unk_28: u32,
+        pub(crate) helper_cfg: u32,
+        pub(crate) __pad: Pad<0x70>,
+    }
+
+    #[versions(AGX)]
+    #[derive(Debug)]
+    #[repr(C)]
+    pub(crate) struct JobParameters1<'a> {
+        pub(crate) unk_0: U64,
+        pub(crate) unk_8: F32,
+        pub(crate) unk_c: F32,
+        pub(crate) tvb_tilemap: GpuPointer<'a, &'a [u8]>,
+        #[ver(G < G14)]
+        pub(crate) tvb_cluster_tilemaps: Option<GpuPointer<'a, &'a [u8]>>,
+        pub(crate) tpc: GpuPointer<'a, &'a [u8]>,
+        pub(crate) tvb_heapmeta: GpuPointer<'a, &'a [u8]>,
+        pub(crate) iogpu_unk_54: U64,
+        pub(crate) iogpu_unk_56: U64,
+        #[ver(G < G14)]
+        pub(crate) tvb_cluster_meta1: Option<GpuPointer<'a, &'a [u8]>>,
+        pub(crate) utile_config: u32,
+        pub(crate) unk_4c: u32,
+        pub(crate) ppp_multisamplectl: U64,
+        pub(crate) tvb_layermeta: GpuPointer<'a, &'a [u8]>,
+        #[ver(G < G14)]
+        pub(crate) tvb_cluster_layermeta: Option<GpuPointer<'a, &'a [u8]>>,
+        #[ver(G < G14)]
+        pub(crate) core_mask: Array<2, u32>,
+        pub(crate) preempt_buf1: GpuPointer<'a, &'a [u8]>,
+        pub(crate) preempt_buf2: GpuPointer<'a, &'a [u8]>,
+        pub(crate) unk_80: U64,
+        pub(crate) preempt_buf3: GpuPointer<'a, &'a [u8]>,
+        pub(crate) vdm_ctrl_stream_base: U64,
+        #[ver(G < G14)]
+        pub(crate) tvb_cluster_meta2: Option<GpuPointer<'a, &'a [u8]>>,
+        #[ver(G < G14)]
+        pub(crate) tvb_cluster_meta3: Option<GpuPointer<'a, &'a [u8]>>,
+        #[ver(G < G14)]
+        pub(crate) tiling_control: u32,
+        #[ver(G < G14)]
+        pub(crate) unk_ac: u32,
+        pub(crate) unk_b0: Array<6, U64>,
+        pub(crate) usc_exec_base_ta: U64,
+        #[ver(G < G14)]
+        pub(crate) tvb_cluster_meta4: Option<GpuPointer<'a, &'a [u8]>>,
+        #[ver(G < G14)]
+        pub(crate) unk_f0: U64,
+        pub(crate) unk_f8: U64,
+        pub(crate) helper_program: u32,
+        pub(crate) unk_104: u32,
+        pub(crate) helper_arg: U64,
+        pub(crate) unk_110: U64,
+        pub(crate) unk_118: u32,
+        #[ver(G >= G14)]
+        pub(crate) __pad: Pad<{ 8 * 9 + 0x268 }>,
+        #[ver(G < G14)]
+        pub(crate) __pad: Pad<0x268>,
+    }
+
+    #[derive(Debug)]
+    #[repr(C)]
+    pub(crate) struct JobParameters2<'a> {
+        pub(crate) unk_480: Array<4, u32>,
+        pub(crate) unk_498: U64,
+        pub(crate) unk_4a0: u32,
+        pub(crate) preempt_buf1: GpuPointer<'a, &'a [u8]>,
+        pub(crate) unk_4ac: u32,
+        pub(crate) unk_4b0: U64,
+        pub(crate) unk_4b8: u32,
+        pub(crate) unk_4bc: U64,
+        pub(crate) unk_4c4_padding: Array<0x48, u8>,
+        pub(crate) unk_50c: u32,
+        pub(crate) unk_510: U64,
+        pub(crate) unk_518: U64,
+        pub(crate) unk_520: U64,
+    }
+
+    #[versions(AGX)]
+    #[derive(Debug)]
+    #[repr(C)]
+    pub(crate) struct RunVertex<'a> {
+        pub(crate) tag: workqueue::CommandType,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) counter: U64,
+
+        pub(crate) vm_slot: u32,
+        pub(crate) unk_8: u32,
+        pub(crate) notifier: GpuPointer<'a, event::Notifier::ver>,
+        pub(crate) buffer_slot: u32,
+        pub(crate) unk_1c: u32,
+        pub(crate) buffer: GpuPointer<'a, fw::buffer::Info::ver>,
+        pub(crate) scene: GpuPointer<'a, fw::buffer::Scene::ver>,
+        pub(crate) unk_buffer_buf: GpuWeakPointer<[u8]>,
+        pub(crate) unk_34: u32,
+
+        #[ver(G < G14X)]
+        pub(crate) job_params1: JobParameters1::ver<'a>,
+        #[ver(G < G14X)]
+        pub(crate) tiling_params: TilingParameters,
+        #[ver(G >= G14X)]
+        pub(crate) registers: job::raw::RegisterArray,
+
+        pub(crate) tpc: GpuPointer<'a, &'a [u8]>,
+        pub(crate) tpc_size: U64,
+        pub(crate) microsequence: GpuPointer<'a, &'a [u8]>,
+        pub(crate) microsequence_size: u32,
+        pub(crate) fragment_stamp_slot: u32,
+        pub(crate) fragment_stamp_value: EventValue,
+        pub(crate) unk_pointee: u32,
+        pub(crate) unk_pad: u32,
+        pub(crate) job_params2: JobParameters2<'a>,
+        pub(crate) encoder_params: job::raw::EncoderParams,
+        pub(crate) unk_55c: u32,
+        pub(crate) unk_560: u32,
+        pub(crate) sync_grow: u32,
+        pub(crate) unk_568: u32,
+        pub(crate) uses_scratch: u32,
+        pub(crate) meta: job::raw::JobMeta,
+        pub(crate) unk_after_meta: u32,
+        pub(crate) unk_buf_0: U64,
+        pub(crate) unk_buf_8: U64,
+        pub(crate) unk_buf_10: U64,
+        pub(crate) command_time: U64,
+        pub(crate) timestamp_pointers: job::raw::TimestampPointers<'a>,
+        pub(crate) user_timestamp_pointers: job::raw::TimestampPointers<'a>,
+        pub(crate) client_sequence: u8,
+        pub(crate) pad_5d5: Array<3, u8>,
+        pub(crate) unk_5d8: u32,
+        pub(crate) unk_5dc: u8,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_ts: U64,
+
+        #[ver(V >= V13_0B4)]
+        pub(crate) unk_5dd_8: Array<0x1b, u8>,
+    }
+}
+
+#[versions(AGX)]
+#[derive(Debug)]
+pub(crate) struct RunVertex {
+    pub(crate) notifier: Arc<GpuObject<event::Notifier::ver>>,
+    pub(crate) scene: Arc<buffer::Scene::ver>,
+    pub(crate) micro_seq: microseq::MicroSequence,
+    pub(crate) vm_bind: mmu::VmBind,
+    pub(crate) timestamps: Arc<GpuObject<job::RenderTimestamps>>,
+    pub(crate) user_timestamps: job::UserTimestamps,
+}
+
+#[versions(AGX)]
+impl GpuStruct for RunVertex::ver {
+    type Raw<'a> = raw::RunVertex::ver<'a>;
+}
+
+#[versions(AGX)]
+impl workqueue::Command for RunVertex::ver {}
diff --git a/drivers/gpu/drm/asahi/fw/workqueue.rs b/drivers/gpu/drm/asahi/fw/workqueue.rs
new file mode 100644
index 00000000000000..a477c3f1abcd9e
--- /dev/null
+++ b/drivers/gpu/drm/asahi/fw/workqueue.rs
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+//! GPU work queue firmware structes
+
+use super::event;
+use super::types::*;
+use crate::event::EventValue;
+use crate::{default_zeroed, trivial_gpustruct};
+use kernel::sync::Arc;
+
+#[derive(Debug)]
+#[repr(u32)]
+pub(crate) enum CommandType {
+    RunVertex = 0,
+    RunFragment = 1,
+    #[allow(dead_code)]
+    RunBlitter = 2,
+    RunCompute = 3,
+    Barrier = 4,
+    InitBuffer = 6,
+}
+
+pub(crate) trait Command: GpuStruct + Send + Sync {}
+
+pub(crate) mod raw {
+    use super::*;
+
+    #[derive(Debug)]
+    #[repr(C)]
+    pub(crate) struct Barrier {
+        pub(crate) tag: CommandType,
+        pub(crate) wait_stamp: GpuWeakPointer<FwStamp>,
+        pub(crate) wait_value: EventValue,
+        pub(crate) wait_slot: u32,
+        pub(crate) stamp_self: EventValue,
+        pub(crate) uuid: u32,
+        pub(crate) external_barrier: u32,
+        // G14X addition
+        pub(crate) internal_barrier_type: u32,
+        pub(crate) padding: Pad<0x1c>,
+    }
+
+    #[derive(Debug, Clone, Copy)]
+    #[repr(C)]
+    pub(crate) struct GpuContextData {
+        pub(crate) unk_0: u8,
+        pub(crate) unk_1: u8,
+        unk_2: Array<0x2, u8>,
+        pub(crate) unk_4: u8,
+        pub(crate) unk_5: u8,
+        unk_6: Array<0x18, u8>,
+        pub(crate) unk_1e: u8,
+        pub(crate) unk_1f: u8,
+        unk_20: Array<0x3, u8>,
+        pub(crate) unk_23: u8,
+        unk_24: Array<0x1c, u8>,
+    }
+
+    impl Default for GpuContextData {
+        fn default() -> Self {
+            Self {
+                unk_0: 0xff,
+                unk_1: 0xff,
+                unk_2: Default::default(),
+                unk_4: 0,
+                unk_5: 1,
+                unk_6: Default::default(),
+                unk_1e: 0xff,
+                unk_1f: 0,
+                unk_20: Default::default(),
+                unk_23: 2,
+                unk_24: Default::default(),
+            }
+        }
+    }
+
+    #[derive(Debug)]
+    #[repr(C)]
+    pub(crate) struct RingState {
+        pub(crate) gpu_doneptr: AtomicU32,
+        __pad0: Pad<0xc>,
+        pub(crate) unk_10: AtomicU32,
+        __pad1: Pad<0xc>,
+        pub(crate) unk_20: AtomicU32,
+        __pad2: Pad<0xc>,
+        pub(crate) gpu_rptr: AtomicU32,
+        __pad3: Pad<0xc>,
+        pub(crate) cpu_wptr: AtomicU32,
+        __pad4: Pad<0xc>,
+        pub(crate) rb_size: u32,
+        __pad5: Pad<0xc>,
+        // This isn't part of the structure, but it's here as a
+        // debugging hack so we can inspect what ring position
+        // the driver considered complete and freeable.
+        pub(crate) cpu_freeptr: AtomicU32,
+        __pad6: Pad<0xc>,
+    }
+    default_zeroed!(RingState);
+
+    #[derive(Debug, Clone, Copy)]
+    #[repr(C)]
+    pub(crate) struct Priority(
+        pub(crate) u32,
+        pub(crate) u32,
+        pub(crate) U64,
+        pub(crate) u32,
+        pub(crate) u32,
+        pub(crate) u32,
+    );
+
+    pub(crate) const PRIORITY: [Priority; 4] = [
+        Priority(0, 0, U64(0xffff_ffff_ffff_0000), 1, 0, 1),
+        Priority(1, 1, U64(0xffff_ffff_0000_0000), 0, 0, 0),
+        Priority(2, 2, U64(0xffff_0000_0000_0000), 0, 0, 2),
+        Priority(3, 3, U64(0x0000_0000_0000_0000), 0, 0, 3),
+    ];
+
+    impl Default for Priority {
+        fn default() -> Priority {
+            PRIORITY[2]
+        }
+    }
+
+    #[versions(AGX)]
+    #[derive(Debug)]
+    #[repr(C)]
+    pub(crate) struct QueueInfo<'a> {
+        pub(crate) state: GpuPointer<'a, super::RingState>,
+        pub(crate) ring: GpuPointer<'a, &'a [u64]>,
+        pub(crate) notifier_list: GpuPointer<'a, event::NotifierList>,
+        pub(crate) gpu_buf: GpuPointer<'a, &'a [u8]>,
+        pub(crate) gpu_rptr1: AtomicU32,
+        pub(crate) gpu_rptr2: AtomicU32,
+        pub(crate) gpu_rptr3: AtomicU32,
+        pub(crate) event_id: AtomicI32,
+        pub(crate) priority: Priority,
+        pub(crate) unk_4c: i32,
+        pub(crate) uuid: u32,
+        pub(crate) unk_54: i32,
+        pub(crate) unk_58: U64,
+        pub(crate) busy: AtomicU32,
+        pub(crate) __pad: Pad<0x20>,
+        #[ver(V >= V13_2 && G < G14X)]
+        pub(crate) unk_84_0: u32,
+        pub(crate) unk_84_state: AtomicU32,
+        pub(crate) error_count: AtomicU32,
+        pub(crate) unk_8c: u32,
+        pub(crate) unk_90: u32,
+        pub(crate) unk_94: u32,
+        pub(crate) pending: AtomicU32,
+        pub(crate) unk_9c: u32,
+        pub(crate) gpu_context: GpuPointer<'a, super::GpuContextData>,
+        pub(crate) unk_a8: U64,
+        #[ver(V >= V13_2 && G < G14X)]
+        pub(crate) unk_b0: u32,
+    }
+}
+
+trivial_gpustruct!(Barrier);
+trivial_gpustruct!(RingState);
+
+impl Command for Barrier {}
+
+pub(crate) struct GpuContextData {
+    pub(crate) _buffer: Arc<dyn core::any::Any + Send + Sync>,
+}
+impl GpuStruct for GpuContextData {
+    type Raw<'a> = raw::GpuContextData;
+}
+
+#[versions(AGX)]
+#[derive(Debug)]
+pub(crate) struct QueueInfo {
+    pub(crate) state: GpuObject<RingState>,
+    pub(crate) ring: GpuArray<u64>,
+    pub(crate) gpu_buf: GpuArray<u8>,
+    pub(crate) notifier_list: Arc<GpuObject<event::NotifierList>>,
+    pub(crate) gpu_context: Arc<crate::workqueue::GpuContext>,
+}
+
+#[versions(AGX)]
+impl GpuStruct for QueueInfo::ver {
+    type Raw<'a> = raw::QueueInfo::ver<'a>;
+}
diff --git a/drivers/gpu/drm/asahi/gem.rs b/drivers/gpu/drm/asahi/gem.rs
new file mode 100644
index 00000000000000..c641f09829b72d
--- /dev/null
+++ b/drivers/gpu/drm/asahi/gem.rs
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+//! Asahi driver GEM object implementation
+//!
+//! Basic wrappers and adaptations between generic GEM shmem objects and this driver's
+//! view of what a GPU buffer object is. It is in charge of keeping track of all mappings for
+//! each GEM object so we can remove them when a client (File) or a Vm are destroyed, as well as
+//! implementing RTKit buffers on top of GEM objects for firmware use.
+
+use kernel::{
+    drm::{gem, gem::shmem},
+    error::Result,
+    prelude::*,
+    uapi,
+};
+
+use kernel::drm::gem::BaseObject;
+
+use core::ops::Range;
+use core::sync::atomic::{AtomicU64, Ordering};
+
+use crate::{debug::*, driver::AsahiDevice, file, file::DrmFile, mmu, util::*};
+
+const DEBUG_CLASS: DebugFlags = DebugFlags::Gem;
+
+/// Represents the inner data of a GEM object for this driver.
+#[pin_data]
+pub(crate) struct DriverObject {
+    /// Whether this is a kernel-created object.
+    kernel: bool,
+    /// Object creation flags.
+    flags: u32,
+    /// ID for debug
+    id: u64,
+}
+
+/// Type alias for the shmem GEM object type for this driver.
+pub(crate) type Object = shmem::Object<DriverObject>;
+
+/// Type alias for the SGTable type for this driver.
+pub(crate) type SGTable = shmem::SGTable<DriverObject>;
+
+/// A shared reference to a GEM object for this driver.
+pub(crate) struct ObjectRef {
+    /// The underlying GEM object reference
+    pub(crate) gem: gem::ObjectRef<shmem::Object<DriverObject>>,
+    /// The kernel-side VMap of this object, if needed
+    vmap: Option<shmem::VMap<DriverObject>>,
+}
+
+crate::no_debug!(ObjectRef);
+
+static GEM_ID: AtomicU64 = AtomicU64::new(0);
+
+impl ObjectRef {
+    /// Create a new wrapper for a raw GEM object reference.
+    pub(crate) fn new(gem: gem::ObjectRef<shmem::Object<DriverObject>>) -> ObjectRef {
+        ObjectRef { gem, vmap: None }
+    }
+
+    /// Return the `VMap` for this object, creating it if necessary.
+    pub(crate) fn vmap(&mut self) -> Result<&mut shmem::VMap<DriverObject>> {
+        if self.vmap.is_none() {
+            self.vmap = Some(self.gem.vmap()?);
+        }
+        Ok(self.vmap.as_mut().unwrap())
+    }
+
+    /// Returns the size of an object in bytes
+    pub(crate) fn size(&self) -> usize {
+        self.gem.size()
+    }
+
+    /// Maps an object into a given `Vm` at any free address within a given range.
+    pub(crate) fn map_into_range(
+        &mut self,
+        vm: &crate::mmu::Vm,
+        range: Range<u64>,
+        alignment: u64,
+        prot: mmu::Prot,
+        guard: bool,
+    ) -> Result<crate::mmu::KernelMapping> {
+        // Only used for kernel objects now
+        if !self.gem.kernel {
+            return Err(EINVAL);
+        }
+        vm.map_in_range(&self.gem, 0..self.gem.size(), alignment, range, prot, guard)
+    }
+
+    /// Maps a range within an object into a given `Vm` at any free address within a given range.
+    pub(crate) fn map_range_into_range(
+        &mut self,
+        vm: &crate::mmu::Vm,
+        obj_range: Range<usize>,
+        range: Range<u64>,
+        alignment: u64,
+        prot: mmu::Prot,
+        guard: bool,
+    ) -> Result<crate::mmu::KernelMapping> {
+        if obj_range.end > self.gem.size() {
+            return Err(EINVAL);
+        }
+        if self.gem.flags & uapi::drm_asahi_gem_flags_DRM_ASAHI_GEM_VM_PRIVATE != 0
+            && vm.is_extobj(&self.gem)
+        {
+            return Err(EINVAL);
+        }
+        vm.map_in_range(&self.gem, obj_range, alignment, range, prot, guard)
+    }
+
+    /// Maps an object into a given `Vm` at a specific address.
+    ///
+    /// Returns Err(ENOSPC) if the requested address is already busy.
+    pub(crate) fn map_at(
+        &mut self,
+        vm: &crate::mmu::Vm,
+        addr: u64,
+        prot: mmu::Prot,
+        guard: bool,
+    ) -> Result<crate::mmu::KernelMapping> {
+        if self.gem.flags & uapi::drm_asahi_gem_flags_DRM_ASAHI_GEM_VM_PRIVATE != 0
+            && vm.is_extobj(&self.gem)
+        {
+            return Err(EINVAL);
+        }
+
+        vm.map_at(addr, self.gem.size(), &self.gem, prot, guard)
+    }
+}
+
+/// Create a new kernel-owned GEM object.
+pub(crate) fn new_kernel_object(dev: &AsahiDevice, size: usize) -> Result<ObjectRef> {
+    let mut gem = shmem::Object::<DriverObject>::new(dev, align(size, mmu::UAT_PGSZ))?;
+    gem.kernel = true;
+    gem.flags = 0;
+
+    gem.set_exportable(false);
+
+    mod_pr_debug!("DriverObject new kernel object id={}\n", gem.id);
+    Ok(ObjectRef::new(gem.into_ref()))
+}
+
+/// Create a new user-owned GEM object with the given flags.
+pub(crate) fn new_object(
+    dev: &AsahiDevice,
+    size: usize,
+    flags: u32,
+    parent_object: Option<&gem::ObjectRef<Object>>,
+) -> Result<ObjectRef> {
+    if (flags & uapi::drm_asahi_gem_flags_DRM_ASAHI_GEM_VM_PRIVATE != 0) != parent_object.is_some()
+    {
+        return Err(EINVAL);
+    }
+
+    let mut gem = shmem::Object::<DriverObject>::new(dev, align(size, mmu::UAT_PGSZ))?;
+    gem.kernel = false;
+    gem.flags = flags;
+
+    gem.set_exportable(parent_object.is_none());
+    gem.set_wc(flags & uapi::drm_asahi_gem_flags_DRM_ASAHI_GEM_WRITEBACK == 0);
+    if let Some(parent) = parent_object {
+        gem.share_dma_resv(&**parent)?;
+    }
+
+    mod_pr_debug!("DriverObject new user object: id={}\n", gem.id);
+    Ok(ObjectRef::new(gem.into_ref()))
+}
+
+/// Look up a GEM object handle for a `File` and return an `ObjectRef` for it.
+pub(crate) fn lookup_handle(file: &DrmFile, handle: u32) -> Result<ObjectRef> {
+    Ok(ObjectRef::new(shmem::Object::lookup_handle(file, handle)?))
+}
+
+impl gem::BaseDriverObject<Object> for DriverObject {
+    /// Callback to create the inner data of a GEM object
+    fn new(_dev: &AsahiDevice, _size: usize) -> impl PinInit<Self, Error> {
+        let id = GEM_ID.fetch_add(1, Ordering::Relaxed);
+        mod_pr_debug!("DriverObject::new id={}\n", id);
+        try_pin_init!(DriverObject {
+            kernel: false,
+            flags: 0,
+            id,
+        })
+    }
+
+    /// Callback to drop all mappings for a GEM object owned by a given `File`
+    fn close(obj: &Object, file: &DrmFile) {
+        mod_pr_debug!("DriverObject::close id={}\n", obj.id);
+        if file::File::unbind_gem_object(file, obj).is_err() {
+            pr_err!("DriverObject::close: Failed to unbind GEM object\n");
+        }
+    }
+}
+
+impl shmem::DriverObject for DriverObject {
+    type Driver = crate::driver::AsahiDriver;
+}
diff --git a/drivers/gpu/drm/asahi/gpu.rs b/drivers/gpu/drm/asahi/gpu.rs
new file mode 100644
index 00000000000000..5b8f2d0217089f
--- /dev/null
+++ b/drivers/gpu/drm/asahi/gpu.rs
@@ -0,0 +1,1578 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+//! Top-level GPU manager
+//!
+//! This module is the root of all GPU firmware management for a given driver instance. It is
+//! responsible for initialization, owning the top-level managers (events, UAT, etc.), and
+//! communicating with the raw RtKit endpoints to send and receive messages to/from the GPU
+//! firmware.
+//!
+//! It is also the point where diverging driver firmware/GPU variants (using the versions macro)
+//! are unified, so that the top level of the driver itself (in `driver`) does not have to concern
+//! itself with version dependence.
+
+use core::any::Any;
+use core::ops::Range;
+use core::sync::atomic::{AtomicBool, AtomicU64, Ordering};
+use core::time::Duration;
+
+use kernel::{
+    c_str, devcoredump,
+    error::code::*,
+    macros::versions,
+    prelude::*,
+    soc::apple::rtkit,
+    sync::{
+        lock::{mutex::MutexBackend, Guard},
+        Arc, Mutex, UniqueArc,
+    },
+    time::{clock, msecs_to_jiffies, Now},
+    types::ForeignOwnable,
+};
+
+use crate::alloc::Allocator;
+use crate::debug::*;
+use crate::driver::{AsahiDevRef, AsahiDevice, AsahiDriver};
+use crate::fw::channels::{ChannelErrorType, PipeType};
+use crate::fw::types::{U32, U64};
+use crate::module_parameters;
+use crate::{
+    alloc, buffer, channel, event, fw, gem, hw, initdata, mem, mmu, queue, regs, workqueue,
+};
+
+const DEBUG_CLASS: DebugFlags = DebugFlags::Gpu;
+
+/// Firmware endpoint for init & incoming notifications.
+const EP_FIRMWARE: u8 = 0x20;
+
+/// Doorbell endpoint for work/message submissions.
+const EP_DOORBELL: u8 = 0x21;
+
+/// Initialize the GPU firmware.
+const MSG_INIT: u64 = 0x81 << 48;
+const INIT_DATA_MASK: u64 = (1 << 44) - 1;
+
+/// TX channel doorbell.
+const MSG_TX_DOORBELL: u64 = 0x83 << 48;
+/// Firmware control channel doorbell.
+const MSG_FWCTL: u64 = 0x84 << 48;
+// /// Halt the firmware (?).
+// const MSG_HALT: u64 = 0x85 << 48;
+
+/// Receive channel doorbell notification.
+const MSG_RX_DOORBELL: u64 = 0x42 << 48;
+
+/// Doorbell number for firmware kicks/wakeups.
+const DOORBELL_KICKFW: u64 = 0x10;
+/// Doorbell number for device control channel kicks.
+const DOORBELL_DEVCTRL: u64 = 0x11;
+
+// Upper kernel half VA address ranges.
+/// Private (cached) firmware structure VA range base.
+const IOVA_KERN_PRIV_RANGE: Range<u64> = 0xffffffa000000000..0xffffffa600000000;
+/// Private (cached) GPU-RO firmware structure VA range base.
+const IOVA_KERN_GPU_RO_RANGE: Range<u64> = 0xffffffa600000000..0xffffffa800000000;
+/// Shared (uncached) firmware structure VA range base.
+const IOVA_KERN_SHARED_RANGE: Range<u64> = 0xffffffa800000000..0xffffffaa00000000;
+/// Shared (uncached) read-only firmware structure VA range base.
+const IOVA_KERN_SHARED_RO_RANGE: Range<u64> = 0xffffffaa00000000..0xffffffac00000000;
+/// GPU/FW shared structure VA range base.
+const IOVA_KERN_GPU_RANGE: Range<u64> = 0xffffffac00000000..0xffffffae00000000;
+/// GPU/FW shared structure VA range base.
+const IOVA_KERN_RTKIT_RANGE: Range<u64> = 0xffffffae00000000..0xffffffae10000000;
+/// Shared (uncached) timestamp region.
+pub(crate) const IOVA_KERN_TIMESTAMP_RANGE: Range<u64> = 0xffffffae10000000..0xffffffae14000000;
+/// FW MMIO VA range base.
+const IOVA_KERN_MMIO_RANGE: Range<u64> = 0xffffffaf00000000..0xffffffb000000000;
+
+/// GPU/FW buffer manager control address (context 0 low)
+pub(crate) const IOVA_KERN_GPU_BUFMGR_LOW: u64 = 0x20_0000_0000;
+/// GPU/FW buffer manager control address (context 0 high)
+pub(crate) const IOVA_KERN_GPU_BUFMGR_HIGH: u64 = 0xffffffaeffff0000;
+
+/// Timeout for entering the halt state after a fault or request.
+const HALT_ENTER_TIMEOUT: Duration = Duration::from_millis(100);
+
+/// Maximum amount of firmware-private memory garbage allowed before collection.
+/// Collection flushes the FW cache and is expensive, so this needs to be
+/// reasonably high.
+const MAX_FW_ALLOC_GARBAGE_BYTES: usize = 16 * 1024 * 1024;
+/// Maximum count of firmware-private memory garbage objects allowed before collection.
+/// This works out to 16K of memory in the garbage list (8 bytes each), which keeps us
+/// within the safe range for kmalloc (on 16K page systems).
+const MAX_FW_ALLOC_GARBAGE_OBJECTS: usize = 2048;
+
+/// Global allocators used for kernel-half structures.
+pub(crate) struct KernelAllocators {
+    pub(crate) private: alloc::DefaultAllocator,
+    pub(crate) shared: alloc::DefaultAllocator,
+    pub(crate) shared_ro: alloc::DefaultAllocator,
+    #[allow(dead_code)]
+    pub(crate) gpu: alloc::DefaultAllocator,
+    pub(crate) gpu_ro: alloc::DefaultAllocator,
+}
+
+/// Receive (GPU->driver) ring buffer channels.
+#[versions(AGX)]
+#[pin_data]
+struct RxChannels {
+    event: channel::EventChannel::ver,
+    fw_log: channel::FwLogChannel,
+    ktrace: channel::KTraceChannel,
+    stats: channel::StatsChannel::ver,
+}
+
+/// GPU work submission pipe channels (driver->GPU).
+#[versions(AGX)]
+struct PipeChannels {
+    pub(crate) vtx: KVec<Pin<KBox<Mutex<channel::PipeChannel::ver>>>>,
+    pub(crate) frag: KVec<Pin<KBox<Mutex<channel::PipeChannel::ver>>>>,
+    pub(crate) comp: KVec<Pin<KBox<Mutex<channel::PipeChannel::ver>>>>,
+}
+
+/// Misc command transmit (driver->GPU) channels.
+#[versions(AGX)]
+#[pin_data]
+struct TxChannels {
+    pub(crate) device_control: channel::DeviceControlChannel::ver,
+}
+
+/// Number of work submission pipes per type, one for each priority level.
+const NUM_PIPES: usize = 4;
+
+/// A generic monotonically incrementing ID used to uniquely identify object instances within the
+/// driver.
+pub(crate) struct ID(AtomicU64);
+
+impl ID {
+    /// Create a new ID counter with a given value.
+    fn new(val: u64) -> ID {
+        ID(AtomicU64::new(val))
+    }
+
+    /// Fetch the next unique ID.
+    pub(crate) fn next(&self) -> u64 {
+        self.0.fetch_add(1, Ordering::Relaxed)
+    }
+}
+
+impl Default for ID {
+    /// IDs default to starting at 2, as 0/1 are considered reserved for the system.
+    fn default() -> Self {
+        Self::new(2)
+    }
+}
+
+/// A guard representing one active submission on the GPU. When dropped, decrements the active
+/// submission count.
+pub(crate) struct OpGuard(Arc<dyn GpuManagerPriv>);
+
+impl Drop for OpGuard {
+    fn drop(&mut self) {
+        self.0.end_op();
+    }
+}
+
+/// Set of global sequence IDs used in the driver.
+#[derive(Default)]
+pub(crate) struct SequenceIDs {
+    /// `File` instance ID.
+    pub(crate) file: ID,
+    /// `Vm` instance ID.
+    pub(crate) vm: ID,
+    /// Submission instance ID.
+    pub(crate) submission: ID,
+    /// `Queue` instance ID.
+    pub(crate) queue: ID,
+}
+
+/// Top-level GPU manager that owns all the global state relevant to the driver instance.
+#[versions(AGX)]
+#[pin_data]
+pub(crate) struct GpuManager {
+    dev: AsahiDevRef,
+    cfg: &'static hw::HwConfig,
+    dyncfg: hw::DynConfig,
+    pub(crate) initdata: fw::types::GpuObject<fw::initdata::InitData::ver>,
+    uat: mmu::Uat,
+    crashed: AtomicBool,
+    #[pin]
+    alloc: Mutex<KernelAllocators>,
+    io_mappings: KVec<mmu::KernelMapping>,
+    next_mmio_iova: u64,
+    #[pin]
+    rtkit: Mutex<Option<rtkit::RtKit<GpuManager::ver>>>,
+    #[pin]
+    rx_channels: Mutex<RxChannels::ver>,
+    #[pin]
+    tx_channels: Mutex<TxChannels::ver>,
+    #[pin]
+    fwctl_channel: Mutex<channel::FwCtlChannel>,
+    pipes: PipeChannels::ver,
+    event_manager: Arc<event::EventManager>,
+    buffer_mgr: buffer::BufferManager::ver,
+    ids: SequenceIDs,
+    #[allow(clippy::vec_box)]
+    #[pin]
+    garbage_contexts: Mutex<KVec<KBox<fw::types::GpuObject<fw::workqueue::GpuContextData>>>>,
+}
+
+/// Trait used to abstract the firmware/GPU-dependent variants of the GpuManager.
+pub(crate) trait GpuManager: Send + Sync {
+    /// Cast as an Any type.
+    fn as_any(&self) -> &dyn Any;
+    /// Cast Arc<Self> as an Any type.
+    fn arc_as_any(self: Arc<Self>) -> Arc<dyn Any + Sync + Send>;
+    /// Initialize the GPU.
+    fn init(&self) -> Result;
+    /// Update the GPU globals from global info
+    ///
+    /// TODO: Unclear what can and cannot be updated like this.
+    fn update_globals(&self);
+    /// Get a reference to the KernelAllocators.
+    fn alloc(&self) -> Guard<'_, KernelAllocators, MutexBackend>;
+    /// Create a new `Vm` given a unique `File` ID.
+    fn new_vm(&self, kernel_range: Range<u64>) -> Result<mmu::Vm>;
+    /// Bind a `Vm` to an available slot and return the `VmBind`.
+    fn bind_vm(&self, vm: &mmu::Vm) -> Result<mmu::VmBind>;
+    /// Create a new user command queue.
+    fn new_queue(
+        &self,
+        vm: mmu::Vm,
+        ualloc: Arc<Mutex<alloc::DefaultAllocator>>,
+        ualloc_priv: Arc<Mutex<alloc::DefaultAllocator>>,
+        priority: u32,
+        usc_exec_base: u64,
+    ) -> Result<KBox<dyn queue::Queue>>;
+    /// Return a reference to the global `SequenceIDs` instance.
+    fn ids(&self) -> &SequenceIDs;
+    /// Kick the firmware (wake it up if asleep).
+    ///
+    /// This should be useful to reduce latency on work submission, so we can ask the firmware to
+    /// wake up while we do some preparatory work for the work submission.
+    fn kick_firmware(&self) -> Result;
+    /// Flush the entire firmware cache.
+    ///
+    /// TODO: Does this actually work?
+    fn flush_fw_cache(&self) -> Result;
+    /// Handle a GPU work timeout event.
+    fn handle_timeout(&self, counter: u32, event_slot: i32, unk: u32);
+    /// Handle a GPU fault event.
+    fn handle_fault(&self);
+    /// Handle a channel error event.
+    fn handle_channel_error(
+        &self,
+        error_type: ChannelErrorType,
+        pipe_type: u32,
+        event_slot: u32,
+        event_value: u32,
+    );
+    /// Acknowledge a Buffer grow op.
+    fn ack_grow(&self, buffer_slot: u32, vm_slot: u32, counter: u32);
+    /// Send a firmware control command (secure cache flush).
+    fn fwctl(&self, msg: fw::channels::FwCtlMsg) -> Result;
+    /// Get the static GPU configuration for this SoC.
+    fn get_cfg(&self) -> &'static hw::HwConfig;
+    /// Get the dynamic GPU configuration for this SoC.
+    fn get_dyncfg(&self) -> &hw::DynConfig;
+    /// Register an unused context as garbage
+    fn free_context(&self, data: KBox<fw::types::GpuObject<fw::workqueue::GpuContextData>>);
+    /// Check whether the GPU is crashed
+    fn is_crashed(&self) -> bool;
+    /// Map a BO as a timestamp buffer
+    fn map_timestamp_buffer(
+        &self,
+        bo: gem::ObjectRef,
+        range: Range<usize>,
+    ) -> Result<mmu::KernelMapping>;
+}
+
+/// Private generic trait for functions that don't need to escape this module.
+trait GpuManagerPriv {
+    /// Decrement the pending submission counter.
+    fn end_op(&self);
+}
+
+pub(crate) struct RtkitObject {
+    obj: gem::ObjectRef,
+    mapping: mmu::KernelMapping,
+}
+
+impl rtkit::Buffer for RtkitObject {
+    fn iova(&self) -> Result<usize> {
+        Ok(self.mapping.iova() as usize)
+    }
+    fn buf(&mut self) -> Result<&mut [u8]> {
+        let vmap = self.obj.vmap()?;
+        Ok(vmap.as_mut_slice())
+    }
+}
+
+#[versions(AGX)]
+#[vtable]
+impl rtkit::Operations for GpuManager::ver {
+    type Data = Arc<GpuManager::ver>;
+    type Buffer = RtkitObject;
+
+    fn recv_message(data: <Self::Data as ForeignOwnable>::Borrowed<'_>, ep: u8, msg: u64) {
+        let dev = &data.dev;
+        //dev_info!(dev.as_ref(), "RtKit message: {:#x}:{:#x}\n", ep, msg);
+
+        if ep != EP_FIRMWARE || msg != MSG_RX_DOORBELL {
+            dev_err!(dev.as_ref(), "Unknown message: {:#x}:{:#x}\n", ep, msg);
+            return;
+        }
+
+        let mut ch = data.rx_channels.lock();
+
+        ch.fw_log.poll();
+        ch.ktrace.poll();
+        ch.stats.poll();
+        ch.event.poll();
+    }
+
+    fn crashed(data: <Self::Data as ForeignOwnable>::Borrowed<'_>, crashlog: Option<&[u8]>) {
+        let dev = &data.dev;
+
+        data.crashed.store(true, Ordering::Relaxed);
+
+        #[cfg(CONFIG_DEV_COREDUMP)]
+        if let Err(e) = data.generate_crashdump(crashlog) {
+            dev_err!(dev.as_ref(), "Could not generate crashdump: {:?}\n", e);
+        }
+
+        if debug_enabled(DebugFlags::OopsOnGpuCrash) {
+            panic!("GPU firmware crashed");
+        } else {
+            dev_err!(dev.as_ref(), "GPU firmware crashed, failing all jobs\n");
+            data.event_manager.fail_all(workqueue::WorkError::NoDevice);
+        }
+    }
+
+    fn shmem_alloc(
+        data: <Self::Data as ForeignOwnable>::Borrowed<'_>,
+        size: usize,
+    ) -> Result<Self::Buffer> {
+        let dev = &data.dev;
+        mod_dev_dbg!(dev, "shmem_alloc() {:#x} bytes\n", size);
+
+        let mut obj = gem::new_kernel_object(dev, size)?;
+        obj.vmap()?;
+        let mapping = obj.map_into_range(
+            data.uat.kernel_vm(),
+            IOVA_KERN_RTKIT_RANGE,
+            mmu::UAT_PGSZ as u64,
+            mmu::PROT_FW_SHARED_RW,
+            true,
+        )?;
+        mod_dev_dbg!(dev, "shmem_alloc() -> VA {:#x}\n", mapping.iova());
+        Ok(RtkitObject { obj, mapping })
+    }
+}
+
+#[versions(AGX)]
+impl GpuManager::ver {
+    /// Create a new GpuManager of this version/GPU combination.
+    #[inline(never)]
+    pub(crate) fn new(
+        dev: &AsahiDevice,
+        res: &regs::Resources,
+        cfg: &'static hw::HwConfig,
+    ) -> Result<Arc<GpuManager::ver>> {
+        let uat = Self::make_uat(dev, cfg)?;
+        let dyncfg = Self::make_dyncfg(dev, res, cfg, &uat)?;
+
+        let mut alloc = KernelAllocators {
+            private: alloc::DefaultAllocator::new(
+                dev,
+                uat.kernel_vm(),
+                IOVA_KERN_PRIV_RANGE,
+                0x80,
+                mmu::PROT_FW_PRIV_RW,
+                1024 * 1024,
+                true,
+                fmt!("Kernel Private"),
+                true,
+            )?,
+            shared: alloc::DefaultAllocator::new(
+                dev,
+                uat.kernel_vm(),
+                IOVA_KERN_SHARED_RANGE,
+                0x80,
+                mmu::PROT_FW_SHARED_RW,
+                1024 * 1024,
+                true,
+                fmt!("Kernel Shared"),
+                false,
+            )?,
+            shared_ro: alloc::DefaultAllocator::new(
+                dev,
+                uat.kernel_vm(),
+                IOVA_KERN_SHARED_RO_RANGE,
+                0x80,
+                mmu::PROT_FW_SHARED_RO,
+                64 * 1024,
+                true,
+                fmt!("Kernel RO Shared"),
+                false,
+            )?,
+            gpu: alloc::DefaultAllocator::new(
+                dev,
+                uat.kernel_vm(),
+                IOVA_KERN_GPU_RANGE,
+                0x80,
+                mmu::PROT_GPU_FW_SHARED_RW,
+                64 * 1024,
+                true,
+                fmt!("Kernel GPU Shared"),
+                false,
+            )?,
+            gpu_ro: alloc::DefaultAllocator::new(
+                dev,
+                uat.kernel_vm(),
+                IOVA_KERN_GPU_RO_RANGE,
+                0x80,
+                mmu::PROT_GPU_RO_FW_PRIV_RW,
+                1024 * 1024,
+                true,
+                fmt!("Kernel GPU RO Shared"),
+                true,
+            )?,
+        };
+
+        let event_manager = Self::make_event_manager(&mut alloc)?;
+        let mut initdata = Self::make_initdata(dev, cfg, &dyncfg, &mut alloc)?;
+
+        initdata.runtime_pointers.buffer_mgr_ctl_low_mapping =
+            Some(initdata.runtime_pointers.buffer_mgr_ctl.map_at(
+                uat.kernel_lower_vm(),
+                IOVA_KERN_GPU_BUFMGR_LOW,
+                mmu::PROT_GPU_SHARED_RW,
+                false,
+            )?);
+        initdata.runtime_pointers.buffer_mgr_ctl_high_mapping =
+            Some(initdata.runtime_pointers.buffer_mgr_ctl.map_at(
+                uat.kernel_vm(),
+                IOVA_KERN_GPU_BUFMGR_HIGH,
+                mmu::PROT_FW_SHARED_RW,
+                false,
+            )?);
+
+        let mut mgr = Self::make_mgr(dev, cfg, dyncfg, uat, alloc, event_manager, initdata)?;
+
+        {
+            let fwctl = mgr.fwctl_channel.lock();
+            let p_fwctl = fwctl.to_raw();
+            core::mem::drop(fwctl);
+
+            mgr.as_mut()
+                .initdata_mut()
+                .fw_status
+                .with_mut(|raw, _inner| {
+                    raw.fwctl_channel = p_fwctl;
+                });
+        }
+
+        {
+            let txc = mgr.tx_channels.lock();
+            let p_device_control = txc.device_control.to_raw();
+            core::mem::drop(txc);
+
+            let rxc = mgr.rx_channels.lock();
+            let p_event = rxc.event.to_raw();
+            let p_fw_log = rxc.fw_log.to_raw();
+            let p_ktrace = rxc.ktrace.to_raw();
+            let p_stats = rxc.stats.to_raw();
+            let p_fwlog_buf = rxc.fw_log.get_buf();
+            core::mem::drop(rxc);
+
+            mgr.as_mut()
+                .initdata_mut()
+                .runtime_pointers
+                .with_mut(|raw, _inner| {
+                    raw.device_control = p_device_control;
+                    raw.event = p_event;
+                    raw.fw_log = p_fw_log;
+                    raw.ktrace = p_ktrace;
+                    raw.stats = p_stats;
+                    raw.fwlog_buf = Some(p_fwlog_buf);
+                });
+        }
+
+        let mut p_pipes: KVec<fw::initdata::raw::PipeChannels::ver> = KVec::new();
+
+        for ((v, f), c) in mgr
+            .pipes
+            .vtx
+            .iter()
+            .zip(&mgr.pipes.frag)
+            .zip(&mgr.pipes.comp)
+        {
+            p_pipes.push(
+                fw::initdata::raw::PipeChannels::ver {
+                    vtx: v.lock().to_raw(),
+                    frag: f.lock().to_raw(),
+                    comp: c.lock().to_raw(),
+                },
+                GFP_KERNEL,
+            )?;
+        }
+
+        mgr.as_mut()
+            .initdata_mut()
+            .runtime_pointers
+            .with_mut(|raw, _inner| {
+                for (i, p) in p_pipes.into_iter().enumerate() {
+                    raw.pipes[i].vtx = p.vtx;
+                    raw.pipes[i].frag = p.frag;
+                    raw.pipes[i].comp = p.comp;
+                }
+            });
+
+        for (i, map) in cfg.io_mappings.iter().enumerate() {
+            if let Some(map) = map.as_ref() {
+                Self::iomap(&mut mgr, cfg, i, map)?;
+            }
+        }
+
+        #[ver(V >= V13_0B4)]
+        if let Some(base) = cfg.sram_base {
+            let size = cfg.sram_size.unwrap();
+            let iova = mgr.as_mut().alloc_mmio_iova(size);
+
+            let mapping = mgr
+                .uat
+                .kernel_vm()
+                .map_io(iova, base, size, mmu::PROT_FW_SHARED_RW)?;
+
+            mgr.as_mut()
+                .initdata_mut()
+                .runtime_pointers
+                .hwdata_b
+                .with_mut(|raw, _| {
+                    raw.sgx_sram_ptr = U64(mapping.iova());
+                });
+
+            mgr.as_mut().io_mappings_mut().push(mapping, GFP_KERNEL)?;
+        }
+
+        let mgr = Arc::from(mgr);
+
+        let rtkit = rtkit::RtKit::<GpuManager::ver>::new(dev.as_ref(), None, 0, mgr.clone())?;
+
+        *mgr.rtkit.lock() = Some(rtkit);
+
+        {
+            let mut rxc = mgr.rx_channels.lock();
+            rxc.event.set_manager(mgr.clone());
+        }
+
+        Ok(mgr)
+    }
+
+    /// Return a mutable reference to the initdata member
+    fn initdata_mut(
+        self: Pin<&mut Self>,
+    ) -> &mut fw::types::GpuObject<fw::initdata::InitData::ver> {
+        // SAFETY: initdata does not require structural pinning.
+        unsafe { &mut self.get_unchecked_mut().initdata }
+    }
+
+    /// Return a mutable reference to the io_mappings member
+    fn io_mappings_mut(self: Pin<&mut Self>) -> &mut KVec<mmu::KernelMapping> {
+        // SAFETY: io_mappings does not require structural pinning.
+        unsafe { &mut self.get_unchecked_mut().io_mappings }
+    }
+
+    /// Allocate an MMIO iova range
+    fn alloc_mmio_iova(self: Pin<&mut Self>, size: usize) -> u64 {
+        // SAFETY: next_mmio_iova does not require structural pinning.
+        let next_ref = unsafe { &mut self.get_unchecked_mut().next_mmio_iova };
+
+        let addr = *next_ref;
+        let next = addr + (size + mmu::UAT_PGSZ) as u64;
+
+        assert!(next <= IOVA_KERN_MMIO_RANGE.end);
+
+        *next_ref = next;
+
+        addr
+    }
+
+    /// Build the entire GPU InitData structure tree and return it as a boxed GpuObject.
+    fn make_initdata(
+        dev: &AsahiDevice,
+        cfg: &'static hw::HwConfig,
+        dyncfg: &hw::DynConfig,
+        alloc: &mut KernelAllocators,
+    ) -> Result<KBox<fw::types::GpuObject<fw::initdata::InitData::ver>>> {
+        let mut builder = initdata::InitDataBuilder::ver::new(dev, alloc, cfg, dyncfg);
+        builder.build()
+    }
+
+    /// Create a fresh boxed Uat instance.
+    ///
+    /// Force disable inlining to avoid blowing up the stack.
+    #[inline(never)]
+    fn make_uat(dev: &AsahiDevice, cfg: &'static hw::HwConfig) -> Result<KBox<mmu::Uat>> {
+        // G14X has a new thing in the Scene structure that unfortunately requires
+        // write access from user contexts. Hopefully it's not security-sensitive.
+        #[ver(G >= G14X)]
+        let map_kernel_to_user = true;
+        #[ver(G < G14X)]
+        let map_kernel_to_user = false;
+
+        Ok(KBox::new(
+            mmu::Uat::new(dev, cfg, map_kernel_to_user)?,
+            GFP_KERNEL,
+        )?)
+    }
+
+    /// Actually create the final GpuManager instance, as a UniqueArc.
+    ///
+    /// Force disable inlining to avoid blowing up the stack.
+    #[inline(never)]
+    fn make_mgr(
+        dev: &AsahiDevice,
+        cfg: &'static hw::HwConfig,
+        dyncfg: KBox<hw::DynConfig>,
+        uat: KBox<mmu::Uat>,
+        mut alloc: KernelAllocators,
+        event_manager: Arc<event::EventManager>,
+        initdata: KBox<fw::types::GpuObject<fw::initdata::InitData::ver>>,
+    ) -> Result<Pin<UniqueArc<GpuManager::ver>>> {
+        let mut pipes = PipeChannels::ver {
+            vtx: KVec::new(),
+            frag: KVec::new(),
+            comp: KVec::new(),
+        };
+
+        for _i in 0..=NUM_PIPES - 1 {
+            pipes.vtx.push(
+                KBox::pin_init(
+                    Mutex::new_named(
+                        channel::PipeChannel::ver::new(dev, &mut alloc)?,
+                        c_str!("pipe_vtx"),
+                    ),
+                    GFP_KERNEL,
+                )?,
+                GFP_KERNEL,
+            )?;
+            pipes.frag.push(
+                KBox::pin_init(
+                    Mutex::new_named(
+                        channel::PipeChannel::ver::new(dev, &mut alloc)?,
+                        c_str!("pipe_frag"),
+                    ),
+                    GFP_KERNEL,
+                )?,
+                GFP_KERNEL,
+            )?;
+            pipes.comp.push(
+                KBox::pin_init(
+                    Mutex::new_named(
+                        channel::PipeChannel::ver::new(dev, &mut alloc)?,
+                        c_str!("pipe_comp"),
+                    ),
+                    GFP_KERNEL,
+                )?,
+                GFP_KERNEL,
+            )?;
+        }
+
+        let fwctl_channel = channel::FwCtlChannel::new(dev, &mut alloc)?;
+
+        let buffer_mgr = buffer::BufferManager::ver::new()?;
+        let event_manager_clone = event_manager.clone();
+        let buffer_mgr_clone = buffer_mgr.clone();
+        let alloc_ref = &mut alloc;
+        let rx_channels = KBox::init(
+            try_init!(RxChannels::ver {
+                event: channel::EventChannel::ver::new(
+                    dev,
+                    alloc_ref,
+                    event_manager_clone,
+                    buffer_mgr_clone,
+                )?,
+                fw_log: channel::FwLogChannel::new(dev, alloc_ref)?,
+                ktrace: channel::KTraceChannel::new(dev, alloc_ref)?,
+                stats: channel::StatsChannel::ver::new(dev, alloc_ref)?,
+            }),
+            GFP_KERNEL,
+        )?;
+
+        let alloc_ref = &mut alloc;
+        let tx_channels = KBox::init(
+            try_init!(TxChannels::ver {
+                device_control: channel::DeviceControlChannel::ver::new(dev, alloc_ref)?,
+            }),
+            GFP_KERNEL,
+        )?;
+
+        let x = UniqueArc::pin_init(
+            try_pin_init!(GpuManager::ver {
+                dev: dev.into(),
+                cfg,
+                dyncfg: KBox::<hw::DynConfig>::into_inner(dyncfg),
+                initdata: KBox::<fw::types::GpuObject<fw::initdata::InitData::ver>>::into_inner(initdata),
+                uat: KBox::<mmu::Uat>::into_inner(uat),
+                io_mappings: KVec::new(),
+                next_mmio_iova: IOVA_KERN_MMIO_RANGE.start,
+                rtkit <- Mutex::new_named(None, c_str!("rtkit")),
+                crashed: AtomicBool::new(false),
+                event_manager,
+                alloc <- Mutex::new_named(alloc, c_str!("alloc")),
+                fwctl_channel <- Mutex::new_named(fwctl_channel, c_str!("fwctl_channel")),
+                rx_channels <- Mutex::new_named(KBox::<RxChannels::ver>::into_inner(rx_channels), c_str!("rx_channels")),
+                tx_channels <- Mutex::new_named(KBox::<TxChannels::ver>::into_inner(tx_channels), c_str!("tx_channels")),
+                pipes,
+                buffer_mgr,
+                ids: Default::default(),
+                garbage_contexts <- Mutex::new_named(KVec::new(), c_str!("garbage_contexts")),
+            }),
+            GFP_KERNEL,
+        )?;
+
+        Ok(x)
+    }
+
+    /// Fetch and validate the GPU dynamic configuration from the device tree and hardware.
+    ///
+    /// Force disable inlining to avoid blowing up the stack.
+    #[inline(never)]
+    fn make_dyncfg(
+        dev: &AsahiDevice,
+        res: &regs::Resources,
+        cfg: &'static hw::HwConfig,
+        uat: &mmu::Uat,
+    ) -> Result<KBox<hw::DynConfig>> {
+        let gpu_id = res.get_gpu_id()?;
+
+        dev_info!(dev.as_ref(), "GPU Information:\n");
+        dev_info!(
+            dev.as_ref(),
+            "  Type: {:?}{:?}\n",
+            gpu_id.gpu_gen,
+            gpu_id.gpu_variant
+        );
+        dev_info!(dev.as_ref(), "  Clusters: {}\n", gpu_id.num_clusters);
+        dev_info!(
+            dev.as_ref(),
+            "  Cores: {} ({})\n",
+            gpu_id.num_cores,
+            gpu_id.num_cores * gpu_id.num_clusters
+        );
+        dev_info!(
+            dev.as_ref(),
+            "  Frags: {} ({})\n",
+            gpu_id.num_frags,
+            gpu_id.num_frags * gpu_id.num_clusters
+        );
+        dev_info!(
+            dev.as_ref(),
+            "  GPs: {} ({})\n",
+            gpu_id.num_gps,
+            gpu_id.num_gps * gpu_id.num_clusters
+        );
+        dev_info!(dev.as_ref(), "  Core masks: {:#x?}\n", gpu_id.core_masks);
+        dev_info!(
+            dev.as_ref(),
+            "  Active cores: {}\n",
+            gpu_id.total_active_cores
+        );
+
+        dev_info!(dev.as_ref(), "Getting configuration from device tree...\n");
+        let pwr_cfg = hw::PwrConfig::load(dev, cfg)?;
+        dev_info!(dev.as_ref(), "Dynamic configuration fetched\n");
+
+        if gpu_id.gpu_gen != cfg.gpu_gen || gpu_id.gpu_variant != cfg.gpu_variant {
+            dev_err!(
+                dev.as_ref(),
+                "GPU type mismatch (expected {:?}{:?}, found {:?}{:?})\n",
+                cfg.gpu_gen,
+                cfg.gpu_variant,
+                gpu_id.gpu_gen,
+                gpu_id.gpu_variant
+            );
+            return Err(EIO);
+        }
+        if gpu_id.num_clusters > cfg.max_num_clusters {
+            dev_err!(
+                dev.as_ref(),
+                "Too many clusters ({} > {})\n",
+                gpu_id.num_clusters,
+                cfg.max_num_clusters
+            );
+            return Err(EIO);
+        }
+        if gpu_id.num_cores > cfg.max_num_cores {
+            dev_err!(
+                dev.as_ref(),
+                "Too many cores ({} > {})\n",
+                gpu_id.num_cores,
+                cfg.max_num_cores
+            );
+            return Err(EIO);
+        }
+        if gpu_id.num_frags > cfg.max_num_frags {
+            dev_err!(
+                dev.as_ref(),
+                "Too many frags ({} > {})\n",
+                gpu_id.num_frags,
+                cfg.max_num_frags
+            );
+            return Err(EIO);
+        }
+        if gpu_id.num_gps > cfg.max_num_gps {
+            dev_err!(
+                dev.as_ref(),
+                "Too many GPs ({} > {})\n",
+                gpu_id.num_gps,
+                cfg.max_num_gps
+            );
+            return Err(EIO);
+        }
+
+        let node = dev.as_ref().of_node().ok_or(EIO)?;
+
+        let hw_data_a: KVVec<u8>;
+        let hw_data_b: KVVec<u8>;
+        let hw_globals: KVVec<u8>;
+        if *module_parameters::starlight_debug.get() != 0
+            && dev.as_ref().property_present(c_str!("apple,hw-cal-a"))
+            && dev.as_ref().property_present(c_str!("apple,hw-cal-b"))
+            && dev
+                .as_ref()
+                .property_present(c_str!("apple,hw-cal-globals"))
+        {
+            hw_data_a = node.get_property(c_str!("apple,hw-cal-a"))?;
+            hw_data_b = node.get_property(c_str!("apple,hw-cal-b"))?;
+            hw_globals = node.get_property(c_str!("apple,hw-cal-globals"))?;
+        } else {
+            hw_data_a = KVVec::new();
+            hw_data_b = KVVec::new();
+            hw_globals = KVVec::new();
+        }
+
+        Ok(KBox::new(
+            hw::DynConfig {
+                pwr: pwr_cfg,
+                uat_ttb_base: uat.ttb_base(),
+                id: gpu_id,
+                firmware_version: node.get_property(c_str!("apple,firmware-version"))?,
+                hw_data_a,
+                hw_data_b,
+                hw_globals,
+            },
+            GFP_KERNEL,
+        )?)
+    }
+
+    /// Create the global GPU event manager, and return an `Arc<>` to it.
+    fn make_event_manager(alloc: &mut KernelAllocators) -> Result<Arc<event::EventManager>> {
+        Ok(Arc::new(event::EventManager::new(alloc)?, GFP_KERNEL)?)
+    }
+
+    /// Create a new MMIO mapping and add it to the mappings list in initdata at the specified
+    /// index.
+    fn iomap(
+        this: &mut Pin<UniqueArc<GpuManager::ver>>,
+        cfg: &'static hw::HwConfig,
+        index: usize,
+        map: &hw::IOMapping,
+    ) -> Result {
+        let dies = if map.per_die {
+            cfg.num_dies as usize
+        } else {
+            1
+        };
+
+        let off = map.base & mmu::UAT_PGMSK;
+        let base = map.base - off;
+        let end = (map.base + map.size + mmu::UAT_PGMSK) & !mmu::UAT_PGMSK;
+        let map_size = end - base;
+
+        // Array mappings must be aligned
+        assert!((off == 0 && map_size == map.size) || (map.count == 1 && !map.per_die));
+        assert!(map.count > 0);
+
+        let iova = this.as_mut().alloc_mmio_iova(map_size * map.count * dies);
+        let mut cur_iova = iova;
+
+        for die in 0..dies {
+            for i in 0..map.count {
+                let phys_off = die * 0x20_0000_0000 + i * map.stride;
+
+                let mapping = this.uat.kernel_vm().map_io(
+                    cur_iova,
+                    base + phys_off,
+                    map_size,
+                    if map.writable {
+                        mmu::PROT_FW_MMIO_RW
+                    } else {
+                        mmu::PROT_FW_MMIO_RO
+                    },
+                )?;
+
+                this.as_mut().io_mappings_mut().push(mapping, GFP_KERNEL)?;
+                cur_iova += map_size as u64;
+            }
+        }
+
+        this.as_mut()
+            .initdata_mut()
+            .runtime_pointers
+            .hwdata_b
+            .with_mut(|raw, _| {
+                raw.io_mappings[index] = fw::initdata::raw::IOMapping {
+                    phys_addr: U64(map.base as u64),
+                    virt_addr: U64(iova + off as u64),
+                    total_size: (map.size * map.count * dies) as u32,
+                    element_size: map.size as u32,
+                    readwrite: U64(map.writable as u64),
+                };
+            });
+
+        Ok(())
+    }
+
+    /// Mark work associated with currently in-progress event slots as failed, after a fault or
+    /// timeout.
+    fn mark_pending_events(&self, culprit_slot: Option<u32>, error: workqueue::WorkError) {
+        dev_err!(self.dev.as_ref(), "  Pending events:\n");
+
+        self.initdata.globals.with(|raw, _inner| {
+            for (index, i) in raw.pending_stamps.iter().enumerate() {
+                let info = i.info.load(Ordering::Relaxed);
+                let wait_value = i.wait_value.load(Ordering::Relaxed);
+
+                if info & 1 != 0 {
+                    #[ver(V >= V13_5)]
+                    let slot = (info >> 4) & 0x7f;
+                    #[ver(V < V13_5)]
+                    let slot = (info >> 3) & 0x7f;
+                    #[ver(V >= V13_5)]
+                    let flags = info & 0xf;
+                    #[ver(V < V13_5)]
+                    let flags = info & 0x7;
+                    dev_err!(
+                        self.dev.as_ref(),
+                        "    [{}:{}] flags={} value={:#x}\n",
+                        index,
+                        slot,
+                        flags,
+                        wait_value
+                    );
+                    let error = if culprit_slot.is_some() && culprit_slot != Some(slot) {
+                        workqueue::WorkError::Killed
+                    } else {
+                        error
+                    };
+                    self.event_manager.mark_error(slot, wait_value, error);
+                    i.info.store(0, Ordering::Relaxed);
+                    i.wait_value.store(0, Ordering::Relaxed);
+                }
+            }
+        });
+    }
+
+    /// Fetch the GPU MMU fault information from the hardware registers.
+    fn get_fault_info(&self) -> Option<regs::FaultInfo> {
+        let data = unsafe { &<KBox<AsahiDriver>>::borrow(self.dev.as_ref().get_drvdata()).data };
+
+        let res = &data.resources;
+
+        let info = res.get_fault_info(self.cfg);
+        if info.is_some() {
+            dev_err!(
+                self.dev.as_ref(),
+                "  Fault info: {:#x?}\n",
+                info.as_ref().unwrap()
+            );
+        }
+        info
+    }
+
+    /// Resume the GPU firmware after it halts (due to a timeout, fault, or request).
+    fn recover(&self) {
+        self.initdata.fw_status.with(|raw, _inner| {
+            let halt_count = raw.flags.halt_count.load(Ordering::Relaxed);
+            let mut halted = raw.flags.halted.load(Ordering::Relaxed);
+            dev_err!(self.dev.as_ref(), "  Halt count: {}\n", halt_count);
+            dev_err!(self.dev.as_ref(), "  Halted: {}\n", halted);
+
+            if halted == 0 {
+                let start = clock::KernelTime::now();
+                while start.elapsed() < HALT_ENTER_TIMEOUT {
+                    halted = raw.flags.halted.load(Ordering::Relaxed);
+                    if halted != 0 {
+                        break;
+                    }
+                    mem::sync();
+                }
+                halted = raw.flags.halted.load(Ordering::Relaxed);
+            }
+
+            if debug_enabled(DebugFlags::NoGpuRecovery) {
+                dev_crit!(
+                    self.dev.as_ref(),
+                    "  GPU recovery is disabled, wedging forever!\n"
+                );
+            } else if halted != 0 {
+                dev_err!(self.dev.as_ref(), "  Attempting recovery...\n");
+                raw.flags.halted.store(0, Ordering::SeqCst);
+                raw.flags.resume.store(1, Ordering::SeqCst);
+            } else {
+                dev_err!(self.dev.as_ref(), "  Cannot recover.\n");
+            }
+        });
+    }
+
+    /// Return the packed GPU enabled core masks.
+    // Only used for some versions
+    #[allow(dead_code)]
+    pub(crate) fn core_masks_packed(&self) -> &[u32] {
+        self.dyncfg.id.core_masks_packed.as_slice()
+    }
+
+    /// Kick a submission pipe for a submitted job to tell the firmware to start processing it.
+    pub(crate) fn run_job(&self, job: workqueue::JobSubmission::ver<'_>) -> Result {
+        mod_dev_dbg!(self.dev, "GPU: run_job\n");
+
+        let pipe_type = job.pipe_type();
+        mod_dev_dbg!(self.dev, "GPU: run_job: pipe_type={:?}\n", pipe_type);
+
+        let pipes = match pipe_type {
+            PipeType::Vertex => &self.pipes.vtx,
+            PipeType::Fragment => &self.pipes.frag,
+            PipeType::Compute => &self.pipes.comp,
+        };
+
+        let index: usize = job.priority() as usize;
+        let mut pipe = pipes.get(index).ok_or(EIO)?.lock();
+
+        mod_dev_dbg!(self.dev, "GPU: run_job: run()\n");
+        job.run(&mut pipe);
+        mod_dev_dbg!(self.dev, "GPU: run_job: ring doorbell\n");
+
+        let mut guard = self.rtkit.lock();
+        let rtk = guard.as_mut().unwrap();
+        rtk.send_message(
+            EP_DOORBELL,
+            MSG_TX_DOORBELL | pipe_type as u64 | ((index as u64) << 2),
+        )?;
+        mod_dev_dbg!(self.dev, "GPU: run_job: done\n");
+
+        Ok(())
+    }
+
+    pub(crate) fn start_op(self: &Arc<GpuManager::ver>) -> Result<OpGuard> {
+        if self.is_crashed() {
+            return Err(ENODEV);
+        }
+
+        let val = self
+            .initdata
+            .globals
+            .with(|raw, _inner| raw.pending_submissions.fetch_add(1, Ordering::Acquire));
+
+        mod_dev_dbg!(self.dev, "OP start (pending: {})\n", val + 1);
+        self.kick_firmware()?;
+        Ok(OpGuard(self.clone()))
+    }
+
+    fn invalidate_context(
+        &self,
+        context: &fw::types::GpuObject<fw::workqueue::GpuContextData>,
+    ) -> Result {
+        mod_dev_dbg!(
+            self.dev,
+            "Invalidating GPU context @ {:?}\n",
+            context.weak_pointer()
+        );
+
+        if self.is_crashed() {
+            return Err(ENODEV);
+        }
+
+        let mut guard = self.alloc.lock();
+        let (garbage_count, _) = guard.private.garbage();
+        let (garbage_count_gpuro, _) = guard.gpu_ro.garbage();
+
+        let dc = context.with(
+            |raw, _inner| fw::channels::DeviceControlMsg::ver::DestroyContext {
+                unk_4: 0,
+                ctx_23: raw.unk_23,
+                #[ver(V < V13_3)]
+                __pad0: Default::default(),
+                unk_c: U32(0),
+                unk_10: U32(0),
+                ctx_0: raw.unk_0,
+                ctx_1: raw.unk_1,
+                ctx_4: raw.unk_4,
+                #[ver(V < V13_3)]
+                __pad1: Default::default(),
+                #[ver(V < V13_3)]
+                unk_18: 0,
+                gpu_context: Some(context.weak_pointer()),
+                __pad2: Default::default(),
+            },
+        );
+
+        mod_dev_dbg!(self.dev, "Context invalidation command: {:?}\n", &dc);
+
+        let mut txch = self.tx_channels.lock();
+
+        let token = txch.device_control.send(&dc);
+
+        {
+            let mut guard = self.rtkit.lock();
+            let rtk = guard.as_mut().unwrap();
+            rtk.send_message(EP_DOORBELL, MSG_TX_DOORBELL | DOORBELL_DEVCTRL)?;
+        }
+
+        txch.device_control.wait_for(token)?;
+
+        mod_dev_dbg!(
+            self.dev,
+            "GPU context invalidated: {:?}\n",
+            context.weak_pointer()
+        );
+
+        // The invalidation does a cache flush, so it is okay to collect garbage
+        guard.private.collect_garbage(garbage_count);
+        guard.gpu_ro.collect_garbage(garbage_count_gpuro);
+
+        Ok(())
+    }
+
+    #[cfg(CONFIG_DEV_COREDUMP)]
+    fn generate_crashdump(&self, crashlog: Option<&[u8]>) -> Result {
+        // Lock the allocators, to block kernel/FW memory mutations (mostly)
+        let kalloc = self.alloc();
+        let pages = self.uat.dump_kernel_pages()?;
+        core::mem::drop(kalloc);
+
+        let mut crashdump = crate::crashdump::CrashDumpBuilder::new(pages)?;
+        let initdata_addr = self.initdata.gpu_va().get();
+        crashdump.add_agx_info(self.cfg, &self.dyncfg, initdata_addr)?;
+        if let Some(crashlog) = crashlog {
+            crashdump.add_crashlog(crashlog)?;
+        }
+        let crashdump = KBox::new(crashdump.finalize()?, GFP_KERNEL)?;
+
+        devcoredump::dev_coredump(
+            self.dev.as_ref(),
+            &crate::THIS_MODULE,
+            crashdump,
+            GFP_KERNEL,
+            msecs_to_jiffies(60 * 60 * 1000),
+        );
+
+        Ok(())
+    }
+}
+
+#[versions(AGX)]
+impl GpuManager for GpuManager::ver {
+    fn as_any(&self) -> &dyn Any {
+        self
+    }
+
+    fn arc_as_any(self: Arc<Self>) -> Arc<dyn Any + Sync + Send> {
+        self as Arc<dyn Any + Sync + Send>
+    }
+
+    fn init(&self) -> Result {
+        self.tx_channels.lock().device_control.send(
+            &fw::channels::DeviceControlMsg::ver::Initialize(Default::default()),
+        );
+
+        let initdata = self.initdata.gpu_va().get();
+        let mut guard = self.rtkit.lock();
+        let rtk = guard.as_mut().unwrap();
+
+        rtk.boot()?;
+        rtk.start_endpoint(EP_FIRMWARE)?;
+        rtk.start_endpoint(EP_DOORBELL)?;
+        rtk.send_message(EP_FIRMWARE, MSG_INIT | (initdata & INIT_DATA_MASK))?;
+        rtk.send_message(EP_DOORBELL, MSG_TX_DOORBELL | DOORBELL_DEVCTRL)?;
+        core::mem::drop(guard);
+
+        self.kick_firmware()?;
+        Ok(())
+    }
+
+    fn update_globals(&self) {
+        let mut timeout: u32 = 2;
+        if debug_enabled(DebugFlags::WaitForPowerOff) {
+            timeout = 0;
+        } else if debug_enabled(DebugFlags::KeepGpuPowered) {
+            timeout = 5000;
+        }
+
+        self.initdata.globals.with(|raw, _inner| {
+            raw.idle_off_delay_ms.store(timeout, Ordering::Relaxed);
+        });
+    }
+
+    fn alloc(&self) -> Guard<'_, KernelAllocators, MutexBackend> {
+        /* Clean up idle contexts */
+        let mut garbage_ctx = KVec::new();
+        core::mem::swap(&mut *self.garbage_contexts.lock(), &mut garbage_ctx);
+
+        for ctx in garbage_ctx {
+            if self.invalidate_context(&ctx).is_err() {
+                dev_err!(
+                    self.dev.as_ref(),
+                    "GpuContext: Failed to invalidate GPU context!\n"
+                );
+                if debug_enabled(DebugFlags::OopsOnGpuCrash) {
+                    panic!("GPU firmware timed out");
+                }
+            }
+        }
+
+        let mut guard = self.alloc.lock();
+        let (garbage_count, garbage_bytes) = guard.private.garbage();
+        let (ro_garbage_count, ro_garbage_bytes) = guard.gpu_ro.garbage();
+
+        if garbage_bytes > MAX_FW_ALLOC_GARBAGE_BYTES
+            || ro_garbage_bytes > MAX_FW_ALLOC_GARBAGE_BYTES
+            || garbage_count > MAX_FW_ALLOC_GARBAGE_OBJECTS
+            || ro_garbage_count > MAX_FW_ALLOC_GARBAGE_OBJECTS
+        {
+            mod_dev_dbg!(
+                self.dev,
+                "Collecting kalloc garbage (private: {} objects, {} bytes, gpuro: {} objects, {} bytes)\n",
+                garbage_count,
+                garbage_bytes,
+                ro_garbage_count,
+                ro_garbage_bytes
+            );
+            if self.flush_fw_cache().is_err() {
+                dev_err!(self.dev.as_ref(), "Failed to flush FW cache\n");
+            } else {
+                guard.private.collect_garbage(garbage_count);
+                guard.gpu_ro.collect_garbage(ro_garbage_count);
+            }
+        }
+
+        guard
+    }
+
+    fn new_vm(&self, kernel_range: Range<u64>) -> Result<mmu::Vm> {
+        self.uat.new_vm(self.ids.vm.next(), kernel_range)
+    }
+
+    fn bind_vm(&self, vm: &mmu::Vm) -> Result<mmu::VmBind> {
+        self.uat.bind(vm)
+    }
+
+    fn new_queue(
+        &self,
+        vm: mmu::Vm,
+        ualloc: Arc<Mutex<alloc::DefaultAllocator>>,
+        ualloc_priv: Arc<Mutex<alloc::DefaultAllocator>>,
+        priority: u32,
+        usc_exec_base: u64,
+    ) -> Result<KBox<dyn queue::Queue>> {
+        let mut kalloc = self.alloc();
+        let id = self.ids.queue.next();
+        Ok(KBox::new(
+            queue::Queue::ver::new(
+                &self.dev,
+                vm,
+                &mut kalloc,
+                ualloc,
+                ualloc_priv,
+                self.event_manager.clone(),
+                &self.buffer_mgr,
+                id,
+                priority,
+                usc_exec_base,
+            )?,
+            GFP_KERNEL,
+        )?)
+    }
+
+    fn kick_firmware(&self) -> Result {
+        if self.is_crashed() {
+            return Err(ENODEV);
+        }
+
+        let mut guard = self.rtkit.lock();
+        let rtk = guard.as_mut().unwrap();
+        rtk.send_message(EP_DOORBELL, MSG_TX_DOORBELL | DOORBELL_KICKFW)?;
+
+        Ok(())
+    }
+
+    fn flush_fw_cache(&self) -> Result {
+        mod_dev_dbg!(self.dev, "Flushing coprocessor data cache\n");
+
+        if self.is_crashed() {
+            return Err(ENODEV);
+        }
+
+        // ctx_0 == 0xff or ctx_1 == 0xff cause no effect on context,
+        // but this command does a full cache flush too, so abuse it
+        // for that.
+
+        let dc = fw::channels::DeviceControlMsg::ver::DestroyContext {
+            unk_4: 0,
+
+            ctx_23: 0,
+            #[ver(V < V13_3)]
+            __pad0: Default::default(),
+            unk_c: U32(0),
+            unk_10: U32(0),
+            ctx_0: 0xff,
+            ctx_1: 0xff,
+            ctx_4: 0,
+            #[ver(V < V13_3)]
+            __pad1: Default::default(),
+            #[ver(V < V13_3)]
+            unk_18: 0,
+            gpu_context: None,
+            __pad2: Default::default(),
+        };
+
+        let mut txch = self.tx_channels.lock();
+
+        let token = txch.device_control.send(&dc);
+        {
+            let mut guard = self.rtkit.lock();
+            let rtk = guard.as_mut().unwrap();
+            rtk.send_message(EP_DOORBELL, MSG_TX_DOORBELL | DOORBELL_DEVCTRL)?;
+        }
+
+        txch.device_control.wait_for(token)?;
+        Ok(())
+    }
+
+    fn ids(&self) -> &SequenceIDs {
+        &self.ids
+    }
+
+    fn handle_timeout(&self, counter: u32, event_slot: i32, unk: u32) {
+        dev_err!(self.dev.as_ref(), " (\\________/) \n");
+        dev_err!(self.dev.as_ref(), "  |        |  \n");
+        dev_err!(self.dev.as_ref(), "'.| \\  , / |.'\n");
+        dev_err!(self.dev.as_ref(), "--| / (( \\ |--\n");
+        dev_err!(self.dev.as_ref(), ".'|  _-_-  |'.\n");
+        dev_err!(self.dev.as_ref(), "  |________|  \n");
+        dev_err!(self.dev.as_ref(), "** GPU timeout nya~!!!!! **\n");
+        dev_err!(self.dev.as_ref(), "  Event slot: {}\n", event_slot);
+        dev_err!(self.dev.as_ref(), "  Timeout count: {}\n", counter);
+        dev_err!(self.dev.as_ref(), "  Unk: {}\n", unk);
+
+        // If we have fault info, consider it a fault.
+        let error = match self.get_fault_info() {
+            Some(info) => workqueue::WorkError::Fault(info),
+            None => workqueue::WorkError::Timeout,
+        };
+        self.mark_pending_events(event_slot.try_into().ok(), error);
+        self.recover();
+    }
+
+    fn handle_fault(&self) {
+        dev_err!(self.dev.as_ref(), " (\\________/) \n");
+        dev_err!(self.dev.as_ref(), "  |        |  \n");
+        dev_err!(self.dev.as_ref(), "'.| \\  , / |.'\n");
+        dev_err!(self.dev.as_ref(), "--| / (( \\ |--\n");
+        dev_err!(self.dev.as_ref(), ".'|  _-_-  |'.\n");
+        dev_err!(self.dev.as_ref(), "  |________|  \n");
+        dev_err!(self.dev.as_ref(), "GPU fault nya~!!!!!\n");
+        let error = match self.get_fault_info() {
+            Some(info) => workqueue::WorkError::Fault(info),
+            None => workqueue::WorkError::Unknown,
+        };
+        self.mark_pending_events(None, error);
+        self.recover();
+    }
+
+    fn handle_channel_error(
+        &self,
+        error_type: ChannelErrorType,
+        pipe_type: u32,
+        event_slot: u32,
+        event_value: u32,
+    ) {
+        dev_err!(self.dev.as_ref(), " (\\________/) \n");
+        dev_err!(self.dev.as_ref(), "  |        |  \n");
+        dev_err!(self.dev.as_ref(), "'.| \\  , / |.'\n");
+        dev_err!(self.dev.as_ref(), "--| / (( \\ |--\n");
+        dev_err!(self.dev.as_ref(), ".'|  _-_-  |'.\n");
+        dev_err!(self.dev.as_ref(), "  |________|  \n");
+        dev_err!(self.dev.as_ref(), "GPU channel error nya~!!!!!\n");
+        dev_err!(self.dev.as_ref(), "  Error type: {:?}\n", error_type);
+        dev_err!(self.dev.as_ref(), "  Pipe type: {}\n", pipe_type);
+        dev_err!(self.dev.as_ref(), "  Event slot: {}\n", event_slot);
+        dev_err!(self.dev.as_ref(), "  Event value: {:#x?}\n", event_value);
+
+        self.event_manager.mark_error(
+            event_slot,
+            event_value,
+            workqueue::WorkError::ChannelError(error_type),
+        );
+
+        let wq = match self.event_manager.get_owner(event_slot) {
+            Some(wq) => wq,
+            None => {
+                dev_err!(
+                    self.dev.as_ref(),
+                    "Workqueue not found for this event slot!\n"
+                );
+                return;
+            }
+        };
+
+        let wq = match wq.as_any().downcast_ref::<workqueue::WorkQueue::ver>() {
+            Some(wq) => wq,
+            None => {
+                dev_crit!(self.dev.as_ref(), "GpuManager mismatched with WorkQueue!\n");
+                return;
+            }
+        };
+
+        if debug_enabled(DebugFlags::VerboseFaults) {
+            wq.dump_info();
+        }
+
+        let dc = fw::channels::DeviceControlMsg::ver::RecoverChannel {
+            pipe_type,
+            work_queue: wq.info_pointer(),
+            event_value,
+            __pad: Default::default(),
+        };
+
+        mod_dev_dbg!(self.dev, "Recover Channel command: {:?}\n", &dc);
+        let mut txch = self.tx_channels.lock();
+
+        let token = txch.device_control.send(&dc);
+        {
+            let mut guard = self.rtkit.lock();
+            let rtk = guard.as_mut().unwrap();
+            if rtk
+                .send_message(EP_DOORBELL, MSG_TX_DOORBELL | DOORBELL_DEVCTRL)
+                .is_err()
+            {
+                dev_err!(
+                    self.dev.as_ref(),
+                    "Failed to send Recover Channel command\n"
+                );
+            }
+        }
+
+        if txch.device_control.wait_for(token).is_err() {
+            dev_err!(
+                self.dev.as_ref(),
+                "Timed out waiting for Recover Channel command\n"
+            );
+        }
+
+        if debug_enabled(DebugFlags::VerboseFaults) {
+            wq.dump_info();
+        }
+    }
+
+    fn ack_grow(&self, buffer_slot: u32, vm_slot: u32, counter: u32) {
+        let halt_count = self
+            .initdata
+            .fw_status
+            .with(|raw, _inner| raw.flags.halt_count.load(Ordering::Relaxed));
+
+        let dc = fw::channels::DeviceControlMsg::ver::GrowTVBAck {
+            unk_4: 1,
+            buffer_slot,
+            vm_slot,
+            counter,
+            subpipe: 0, // TODO
+            halt_count: U64(halt_count),
+            __pad: Default::default(),
+        };
+
+        mod_dev_dbg!(self.dev, "TVB Grow Ack command: {:?}\n", &dc);
+
+        let mut txch = self.tx_channels.lock();
+
+        txch.device_control.send(&dc);
+        {
+            let mut guard = self.rtkit.lock();
+            let rtk = guard.as_mut().unwrap();
+            if rtk
+                .send_message(EP_DOORBELL, MSG_TX_DOORBELL | DOORBELL_DEVCTRL)
+                .is_err()
+            {
+                dev_err!(self.dev.as_ref(), "Failed to send TVB Grow Ack command\n");
+            }
+        }
+    }
+
+    fn fwctl(&self, msg: fw::channels::FwCtlMsg) -> Result {
+        if self.is_crashed() {
+            return Err(ENODEV);
+        }
+
+        let mut fwctl = self.fwctl_channel.lock();
+        let token = fwctl.send(&msg);
+        {
+            let mut guard = self.rtkit.lock();
+            let rtk = guard.as_mut().unwrap();
+            rtk.send_message(EP_DOORBELL, MSG_FWCTL)?;
+        }
+        fwctl.wait_for(token)?;
+        Ok(())
+    }
+
+    fn get_cfg(&self) -> &'static hw::HwConfig {
+        self.cfg
+    }
+
+    fn get_dyncfg(&self) -> &hw::DynConfig {
+        &self.dyncfg
+    }
+
+    fn free_context(&self, ctx: KBox<fw::types::GpuObject<fw::workqueue::GpuContextData>>) {
+        let mut garbage = self.garbage_contexts.lock();
+
+        if garbage.push(ctx, GFP_KERNEL).is_err() {
+            dev_err!(
+                self.dev.as_ref(),
+                "Failed to reserve space for freed context, deadlock possible.\n"
+            );
+        }
+    }
+
+    fn is_crashed(&self) -> bool {
+        self.crashed.load(Ordering::Relaxed)
+    }
+
+    fn map_timestamp_buffer(
+        &self,
+        mut bo: gem::ObjectRef,
+        range: Range<usize>,
+    ) -> Result<mmu::KernelMapping> {
+        bo.map_range_into_range(
+            self.uat.kernel_vm(),
+            range,
+            IOVA_KERN_TIMESTAMP_RANGE,
+            mmu::UAT_PGSZ as u64,
+            mmu::PROT_FW_SHARED_RW,
+            false,
+        )
+    }
+}
+
+#[versions(AGX)]
+impl GpuManagerPriv for GpuManager::ver {
+    fn end_op(&self) {
+        let val = self
+            .initdata
+            .globals
+            .with(|raw, _inner| raw.pending_submissions.fetch_sub(1, Ordering::Release));
+
+        mod_dev_dbg!(self.dev, "OP end (pending: {})\n", val - 1);
+    }
+}
diff --git a/drivers/gpu/drm/asahi/hw/mod.rs b/drivers/gpu/drm/asahi/hw/mod.rs
new file mode 100644
index 00000000000000..107642a02fecd2
--- /dev/null
+++ b/drivers/gpu/drm/asahi/hw/mod.rs
@@ -0,0 +1,656 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+//! Per-SoC hardware configuration structures
+//!
+//! This module contains the definitions used to store per-GPU and per-SoC configuration data.
+
+use crate::driver::AsahiDevice;
+use crate::fw::types::*;
+use kernel::c_str;
+use kernel::prelude::*;
+
+const MAX_POWERZONES: usize = 5;
+
+pub(crate) mod t600x;
+pub(crate) mod t602x;
+pub(crate) mod t8103;
+pub(crate) mod t8112;
+
+/// GPU generation enumeration. Note: Part of the UABI.
+#[derive(Debug, PartialEq, Copy, Clone)]
+#[repr(u32)]
+pub(crate) enum GpuGen {
+    G13 = 13,
+    G14 = 14,
+}
+
+/// GPU variant enumeration. Note: Part of the UABI.
+#[derive(Debug, PartialEq, Copy, Clone)]
+#[repr(u32)]
+pub(crate) enum GpuVariant {
+    P = 'P' as u32,
+    G = 'G' as u32,
+    S = 'S' as u32,
+    C = 'C' as u32,
+    D = 'D' as u32,
+}
+
+/// GPU revision enumeration. Note: Part of the UABI.
+#[derive(Debug, PartialEq, Copy, Clone)]
+#[repr(u32)]
+pub(crate) enum GpuRevision {
+    A0 = 0x00,
+    A1 = 0x01,
+    B0 = 0x10,
+    B1 = 0x11,
+    C0 = 0x20,
+    C1 = 0x21,
+}
+
+/// GPU core type enumeration. Note: Part of the firmware ABI.
+#[derive(Debug, Copy, Clone)]
+#[repr(u32)]
+pub(crate) enum GpuCore {
+    // Unknown = 0,
+    // G5P = 1,
+    // G5G = 2,
+    // G9P = 3,
+    // G9G = 4,
+    // G10P = 5,
+    // G11P = 6,
+    // G11M = 7,
+    // G11G = 8,
+    // G12P = 9,
+    // G13P = 10,
+    G13G = 11,
+    G13S = 12,
+    G13C = 13,
+    // G14P = 14,
+    G14G = 15,
+    G14S = 16,
+    G14C = 17,
+    G14D = 18, // Split out, unlike G13D
+}
+
+/// GPU revision ID. Note: Part of the firmware ABI.
+#[derive(Debug, PartialEq, Copy, Clone)]
+#[repr(u32)]
+pub(crate) enum GpuRevisionID {
+    // Unknown = 0,
+    A0 = 1,
+    A1 = 2,
+    B0 = 3,
+    B1 = 4,
+    C0 = 5,
+    C1 = 6,
+}
+
+/// A single performance state of the GPU.
+#[derive(Debug)]
+pub(crate) struct PState {
+    /// Voltage in millivolts, per GPU cluster.
+    pub(crate) volt_mv: KVec<u32>,
+    /// Frequency in hertz.
+    pub(crate) freq_hz: u32,
+    /// Maximum power consumption of the GPU at this pstate, in milliwatts.
+    pub(crate) pwr_mw: u32,
+}
+
+impl PState {
+    pub(crate) fn max_volt_mv(&self) -> u32 {
+        *self.volt_mv.iter().max().expect("No voltages")
+    }
+}
+
+/// A power zone definition (we have no idea what this is but Apple puts them in the DT).
+#[allow(missing_docs)]
+#[derive(Debug, Copy, Clone)]
+pub(crate) struct PowerZone {
+    pub(crate) target: u32,
+    pub(crate) target_offset: u32,
+    pub(crate) filter_tc: u32,
+}
+
+/// An MMIO mapping used by the firmware.
+#[derive(Debug, Copy, Clone)]
+pub(crate) struct IOMapping {
+    /// Base physical address of the mapping.
+    pub(crate) base: usize,
+    /// Whether this mapping should be replicated to all dies
+    pub(crate) per_die: bool,
+    /// Number of mappings.
+    pub(crate) count: usize,
+    /// Size of one mapping.
+    pub(crate) size: usize,
+    /// Stride between mappings.
+    pub(crate) stride: usize,
+    /// Whether the mapping should be writable.
+    pub(crate) writable: bool,
+}
+
+impl IOMapping {
+    /// Convenience constructor for a new IOMapping.
+    pub(crate) const fn new(
+        base: usize,
+        per_die: bool,
+        count: usize,
+        size: usize,
+        stride: usize,
+        writable: bool,
+    ) -> IOMapping {
+        IOMapping {
+            base,
+            per_die,
+            count,
+            size,
+            stride,
+            writable,
+        }
+    }
+}
+
+/// Unknown HwConfigA fields that vary from SoC to SoC.
+#[allow(missing_docs)]
+#[derive(Debug, Copy, Clone)]
+pub(crate) struct HwConfigA {
+    pub(crate) unk_87c: i32,
+    pub(crate) unk_8cc: u32,
+    pub(crate) unk_e24: u32,
+}
+
+/// Unknown HwConfigB fields that vary from SoC to SoC.
+#[allow(missing_docs)]
+#[derive(Debug, Copy, Clone)]
+pub(crate) struct HwConfigB {
+    pub(crate) unk_454: u32,
+    pub(crate) unk_4e0: u64,
+    pub(crate) unk_534: u32,
+    pub(crate) unk_ab8: u32,
+    pub(crate) unk_abc: u32,
+    pub(crate) unk_b30: u32,
+}
+
+/// Render command configs that vary from SoC to SoC.
+#[derive(Debug, Copy, Clone)]
+pub(crate) struct HwRenderConfig {
+    /// Vertex/tiling-related configuration register (lsb: disable clustering)
+    pub(crate) tiling_control: u32,
+}
+
+#[derive(Debug)]
+pub(crate) struct HwConfigShared2Curves {
+    pub(crate) t1_coef: u32,
+    pub(crate) t2: &'static [i16],
+    pub(crate) t3_coefs: &'static [u32],
+    pub(crate) t3_scales: &'static [u32],
+}
+
+/// Static hardware clustering configuration for multi-cluster SoCs.
+#[derive(Debug)]
+pub(crate) struct HwClusteringConfig {
+    pub(crate) meta1_blocksize: usize,
+    pub(crate) meta2_size: usize,
+    pub(crate) meta3_size: usize,
+    pub(crate) meta4_size: usize,
+    pub(crate) max_splits: usize,
+}
+
+/// Static hardware configuration for a given SoC model.
+#[derive(Debug)]
+pub(crate) struct HwConfig {
+    /// Chip ID in hex format (e.g. 0x8103 for t8103).
+    pub(crate) chip_id: u32,
+    /// GPU generation.
+    pub(crate) gpu_gen: GpuGen,
+    /// GPU variant type.
+    pub(crate) gpu_variant: GpuVariant,
+    /// GPU core type ID (as known by the firmware).
+    pub(crate) gpu_core: GpuCore,
+
+    /// Base clock used used for timekeeping.
+    pub(crate) base_clock_hz: u32,
+    /// Output address space for the UAT on this SoC.
+    pub(crate) uat_oas: usize,
+    /// Number of dies on this SoC.
+    pub(crate) num_dies: u32,
+    /// Maximum number of clusters on this SoC.
+    pub(crate) max_num_clusters: u32,
+    /// Maximum number of cores per cluster for this GPU.
+    pub(crate) max_num_cores: u32,
+    /// Maximum number of frags per cluster for this GPU.
+    pub(crate) max_num_frags: u32,
+    /// Maximum number of GPs per cluster for this GPU.
+    pub(crate) max_num_gps: u32,
+
+    /// Required size of the first preemption buffer.
+    pub(crate) preempt1_size: usize,
+    /// Required size of the second preemption buffer.
+    pub(crate) preempt2_size: usize,
+    /// Required size of the third preemption buffer.
+    pub(crate) preempt3_size: usize,
+
+    /// Required size of the compute preemption buffer.
+    pub(crate) compute_preempt1_size: usize,
+
+    pub(crate) clustering: Option<HwClusteringConfig>,
+
+    /// Rendering-relevant configuration.
+    pub(crate) render: HwRenderConfig,
+
+    /// Misc HWDataA field values.
+    pub(crate) da: HwConfigA,
+    /// Misc HWDataB field values.
+    pub(crate) db: HwConfigB,
+    /// HwDataShared1.table.
+    pub(crate) shared1_tab: &'static [i32],
+    /// HwDataShared1.unk_a4.
+    pub(crate) shared1_a4: u32,
+    /// HwDataShared2.table.
+    pub(crate) shared2_tab: &'static [i32],
+    /// HwDataShared2.unk_508.
+    pub(crate) shared2_unk_508: u32,
+    /// HwDataShared2.unk_508.
+    pub(crate) shared2_curves: Option<HwConfigShared2Curves>,
+
+    /// HwDataShared3.unk_8.
+    pub(crate) shared3_unk: u32,
+    /// HwDataShared3.table.
+    pub(crate) shared3_tab: &'static [u32],
+
+    /// Globals.idle_off_standby_timer.
+    pub(crate) idle_off_standby_timer_default: u32,
+    /// Globals.unk_hws2_4.
+    pub(crate) unk_hws2_4: Option<[F32; 8]>,
+    /// Globals.unk_hws2_24.
+    pub(crate) unk_hws2_24: u32,
+    /// Globals.unk_54
+    pub(crate) global_unk_54: u16,
+
+    /// Constant related to SRAM voltages.
+    pub(crate) sram_k: F32,
+    /// Unknown per-cluster coefficients 1.
+    pub(crate) unk_coef_a: &'static [&'static [F32]],
+    /// Unknown per-cluster coefficients 2.
+    pub(crate) unk_coef_b: &'static [&'static [F32]],
+    /// Unknown table in Global struct.
+    pub(crate) global_tab: Option<&'static [u8]>,
+    /// Whether this GPU has CS/AFR performance states
+    pub(crate) has_csafr: bool,
+
+    /// Temperature sensor list (8 bits per sensor).
+    pub(crate) fast_sensor_mask: [u64; 2],
+    /// Temperature sensor list (alternate).
+    pub(crate) fast_sensor_mask_alt: [u64; 2],
+    /// Temperature sensor present bitmask.
+    pub(crate) fast_die0_sensor_present: u32,
+    /// Required MMIO mappings for this GPU/firmware.
+    pub(crate) io_mappings: &'static [Option<IOMapping>],
+    /// SRAM base
+    pub(crate) sram_base: Option<usize>,
+    /// SRAM size
+    pub(crate) sram_size: Option<usize>,
+}
+
+/// Dynamic (fetched from hardware/DT) configuration.
+#[derive(Debug)]
+pub(crate) struct DynConfig {
+    /// Base physical address of the UAT TTB (from DT reserved memory region).
+    pub(crate) uat_ttb_base: u64,
+    /// GPU ID configuration read from hardware.
+    pub(crate) id: GpuIdConfig,
+    /// Power calibration configuration for this specific chip/device.
+    pub(crate) pwr: PwrConfig,
+    /// Firmware version.
+    pub(crate) firmware_version: KVec<u32>,
+
+    pub(crate) hw_data_a: KVVec<u8>,
+    pub(crate) hw_data_b: KVVec<u8>,
+    pub(crate) hw_globals: KVVec<u8>,
+}
+
+/// Specific GPU ID configuration fetched from SGX MMIO registers.
+#[derive(Debug)]
+pub(crate) struct GpuIdConfig {
+    /// GPU generation (should match static config).
+    pub(crate) gpu_gen: GpuGen,
+    /// GPU variant type (should match static config).
+    pub(crate) gpu_variant: GpuVariant,
+    /// GPU silicon revision.
+    pub(crate) gpu_rev: GpuRevision,
+    /// GPU silicon revision ID (firmware enum).
+    pub(crate) gpu_rev_id: GpuRevisionID,
+    /// Total number of GPU clusters.
+    pub(crate) num_clusters: u32,
+    /// Maximum number of GPU cores per cluster.
+    pub(crate) num_cores: u32,
+    /// Number of frags per cluster.
+    pub(crate) num_frags: u32,
+    /// Number of GPs per cluster.
+    pub(crate) num_gps: u32,
+    /// Total number of active cores for the whole GPU.
+    pub(crate) total_active_cores: u32,
+    /// Mask of active cores per cluster.
+    pub(crate) core_masks: KVec<u32>,
+    /// Packed mask of all active cores.
+    pub(crate) core_masks_packed: KVec<u32>,
+}
+
+/// Configurable CS/AFR GPU power settings from the device tree.
+#[derive(Debug)]
+pub(crate) struct CsAfrPwrConfig {
+    /// GPU CS performance state list.
+    pub(crate) perf_states_cs: KVec<PState>,
+    /// GPU AFR performance state list.
+    pub(crate) perf_states_afr: KVec<PState>,
+
+    /// CS leakage coefficient per die.
+    pub(crate) leak_coef_cs: KVec<F32>,
+    /// AFR leakage coefficient per die.
+    pub(crate) leak_coef_afr: KVec<F32>,
+
+    /// Minimum voltage for the CS/AFR SRAM power domain in microvolts.
+    pub(crate) min_sram_microvolt: u32,
+}
+
+/// Configurable GPU power settings from the device tree.
+#[derive(Debug)]
+pub(crate) struct PwrConfig {
+    /// GPU performance state list.
+    pub(crate) perf_states: KVec<PState>,
+    /// GPU power zone list.
+    pub(crate) power_zones: KVec<PowerZone>,
+
+    /// Core leakage coefficient per cluster.
+    pub(crate) core_leak_coef: KVec<F32>,
+    /// SRAM leakage coefficient per cluster.
+    pub(crate) sram_leak_coef: KVec<F32>,
+
+    pub(crate) csafr: Option<CsAfrPwrConfig>,
+
+    /// Maximum total power of the GPU in milliwatts.
+    pub(crate) max_power_mw: u32,
+    /// Maximum frequency of the GPU in megahertz.
+    pub(crate) max_freq_mhz: u32,
+
+    /// Minimum performance state to start at.
+    pub(crate) perf_base_pstate: u32,
+    /// Maximum enabled performance state.
+    pub(crate) perf_max_pstate: u32,
+
+    /// Minimum voltage for the SRAM power domain in microvolts.
+    pub(crate) min_sram_microvolt: u32,
+
+    // Most of these fields are just named after Apple ADT property names and we don't fully
+    // understand them. They configure various power-related PID loops and filters.
+    /// Average power filter time constant in milliseconds.
+    pub(crate) avg_power_filter_tc_ms: u32,
+    /// Average power filter PID integral gain?
+    pub(crate) avg_power_ki_only: F32,
+    /// Average power filter PID proportional gain?
+    pub(crate) avg_power_kp: F32,
+    pub(crate) avg_power_min_duty_cycle: u32,
+    /// Average power target filter time constant in periods.
+    pub(crate) avg_power_target_filter_tc: u32,
+    /// "Fast die0" (temperature?) PID integral gain.
+    pub(crate) fast_die0_integral_gain: F32,
+    /// "Fast die0" (temperature?) PID proportional gain.
+    pub(crate) fast_die0_proportional_gain: F32,
+    pub(crate) fast_die0_prop_tgt_delta: u32,
+    pub(crate) fast_die0_release_temp: u32,
+    /// Delay from the fender (?) becoming idle to powerdown
+    pub(crate) fender_idle_off_delay_ms: u32,
+    /// Timeout from firmware early wake to sleep if no work was submitted (?)
+    pub(crate) fw_early_wake_timeout_ms: u32,
+    /// Delay from the GPU becoming idle to powerdown
+    pub(crate) idle_off_delay_ms: u32,
+    /// Related to the above?
+    pub(crate) idle_off_standby_timer: u32,
+    /// Percent?
+    pub(crate) perf_boost_ce_step: u32,
+    /// Minimum utilization before performance state is increased in %.
+    pub(crate) perf_boost_min_util: u32,
+    pub(crate) perf_filter_drop_threshold: u32,
+    /// Performance PID filter time constant? (periods?)
+    pub(crate) perf_filter_time_constant: u32,
+    /// Performance PID filter time constant 2? (periods?)
+    pub(crate) perf_filter_time_constant2: u32,
+    /// Performance PID integral gain.
+    pub(crate) perf_integral_gain: F32,
+    /// Performance PID integral gain 2 (?).
+    pub(crate) perf_integral_gain2: F32,
+    pub(crate) perf_integral_min_clamp: u32,
+    /// Performance PID proportional gain.
+    pub(crate) perf_proportional_gain: F32,
+    /// Performance PID proportional gain 2 (?).
+    pub(crate) perf_proportional_gain2: F32,
+    pub(crate) perf_reset_iters: u32,
+    /// Target GPU utilization for the performance controller in %.
+    pub(crate) perf_tgt_utilization: u32,
+    /// Power sampling period in milliseconds.
+    pub(crate) power_sample_period: u32,
+    /// PPM (?) filter time constant in milliseconds.
+    pub(crate) ppm_filter_time_constant_ms: u32,
+    /// PPM (?) filter PID integral gain.
+    pub(crate) ppm_ki: F32,
+    /// PPM (?) filter PID proportional gain.
+    pub(crate) ppm_kp: F32,
+    /// Power consumption filter time constant (periods?)
+    pub(crate) pwr_filter_time_constant: u32,
+    /// Power consumption filter PID integral gain.
+    pub(crate) pwr_integral_gain: F32,
+    pub(crate) pwr_integral_min_clamp: u32,
+    pub(crate) pwr_min_duty_cycle: u32,
+    pub(crate) pwr_proportional_gain: F32,
+    /// Power sample period in base clocks, used when not an integer number of ms
+    pub(crate) pwr_sample_period_aic_clks: u32,
+
+    pub(crate) se_engagement_criteria: i32,
+    pub(crate) se_filter_time_constant: u32,
+    pub(crate) se_filter_time_constant_1: u32,
+    pub(crate) se_inactive_threshold: u32,
+    pub(crate) se_ki: F32,
+    pub(crate) se_ki_1: F32,
+    pub(crate) se_kp: F32,
+    pub(crate) se_kp_1: F32,
+    pub(crate) se_reset_criteria: u32,
+}
+
+impl PwrConfig {
+    fn load_opp(
+        dev: &AsahiDevice,
+        name: &CStr,
+        cfg: &HwConfig,
+        is_main: bool,
+    ) -> Result<KVec<PState>> {
+        let mut perf_states = KVec::new();
+
+        let node = dev.as_ref().of_node().ok_or(EIO)?;
+        let opps = node.parse_phandle(name, 0).ok_or(EIO)?;
+
+        for opp in opps.children() {
+            let freq_hz: u64 = opp.get_property(c_str!("opp-hz"))?;
+            let mut volt_uv: KVec<u32> = opp.get_property(c_str!("opp-microvolt"))?;
+            let pwr_uw: u32 = if is_main {
+                opp.get_property(c_str!("opp-microwatt"))?
+            } else {
+                0
+            };
+
+            let voltage_count = if is_main {
+                cfg.max_num_clusters
+            } else {
+                cfg.num_dies
+            };
+
+            if volt_uv.len() != voltage_count as usize {
+                dev_err!(
+                    dev.as_ref(),
+                    "Invalid opp-microvolt length (expected {}, got {})\n",
+                    voltage_count,
+                    volt_uv.len()
+                );
+                return Err(EINVAL);
+            }
+
+            volt_uv.iter_mut().for_each(|a| *a /= 1000);
+            let volt_mv = volt_uv;
+
+            let pwr_mw = pwr_uw / 1000;
+
+            perf_states.push(
+                PState {
+                    freq_hz: freq_hz.try_into()?,
+                    volt_mv,
+                    pwr_mw,
+                },
+                GFP_KERNEL,
+            )?;
+        }
+
+        if perf_states.is_empty() {
+            Err(EINVAL)
+        } else {
+            Ok(perf_states)
+        }
+    }
+
+    /// Load the GPU power configuration from the device tree.
+    pub(crate) fn load(dev: &AsahiDevice, cfg: &HwConfig) -> Result<PwrConfig> {
+        let perf_states = Self::load_opp(dev, c_str!("operating-points-v2"), cfg, true)?;
+        let node = dev.as_ref().of_node().ok_or(EIO)?;
+
+        macro_rules! prop {
+            ($prop:expr, $default:expr) => {{
+                node.get_opt_property(c_str!($prop))
+                    .map_err(|e| {
+                        dev_err!(dev.as_ref(), "Error reading property {}: {:?}\n", $prop, e);
+                        e
+                    })?
+                    .unwrap_or($default)
+            }};
+            ($prop:expr) => {{
+                node.get_property(c_str!($prop)).map_err(|e| {
+                    dev_err!(dev.as_ref(), "Error reading property {}: {:?}\n", $prop, e);
+                    e
+                })?
+            }};
+        }
+
+        let pz_data = prop!("apple,power-zones", KVec::new());
+
+        if pz_data.len() > 3 * MAX_POWERZONES || pz_data.len() % 3 != 0 {
+            dev_err!(dev.as_ref(), "Invalid apple,power-zones value\n");
+            return Err(EINVAL);
+        }
+
+        let pz_count = pz_data.len() / 3;
+        let mut power_zones = KVec::new();
+        for i in (0..pz_count).step_by(3) {
+            power_zones.push(
+                PowerZone {
+                    target: pz_data[i],
+                    target_offset: pz_data[i + 1],
+                    filter_tc: pz_data[i + 2],
+                },
+                GFP_KERNEL,
+            )?;
+        }
+
+        let core_leak_coef: KVec<F32> = prop!("apple,core-leak-coef");
+        let sram_leak_coef: KVec<F32> = prop!("apple,sram-leak-coef");
+
+        if core_leak_coef.len() != cfg.max_num_clusters as usize {
+            dev_err!(dev.as_ref(), "Invalid apple,core-leak-coef\n");
+            return Err(EINVAL);
+        }
+        if sram_leak_coef.len() != cfg.max_num_clusters as usize {
+            dev_err!(dev.as_ref(), "Invalid apple,sram_leak_coef\n");
+            return Err(EINVAL);
+        }
+
+        let csafr = if cfg.has_csafr {
+            Some(CsAfrPwrConfig {
+                perf_states_cs: Self::load_opp(dev, c_str!("apple,cs-opp"), cfg, false)?,
+                perf_states_afr: Self::load_opp(dev, c_str!("apple,afr-opp"), cfg, false)?,
+                leak_coef_cs: prop!("apple,cs-leak-coef"),
+                leak_coef_afr: prop!("apple,afr-leak-coef"),
+                min_sram_microvolt: prop!("apple,csafr-min-sram-microvolt"),
+            })
+        } else {
+            None
+        };
+
+        let power_sample_period: u32 = prop!("apple,power-sample-period");
+
+        Ok(PwrConfig {
+            core_leak_coef,
+            sram_leak_coef,
+
+            max_power_mw: perf_states.iter().map(|a| a.pwr_mw).max().unwrap(),
+            max_freq_mhz: perf_states.iter().map(|a| a.freq_hz).max().unwrap() / 1_000_000,
+
+            perf_base_pstate: prop!("apple,perf-base-pstate", 1),
+            perf_max_pstate: perf_states.len() as u32 - 1,
+            min_sram_microvolt: prop!("apple,min-sram-microvolt"),
+
+            avg_power_filter_tc_ms: prop!("apple,avg-power-filter-tc-ms"),
+            avg_power_ki_only: prop!("apple,avg-power-ki-only"),
+            avg_power_kp: prop!("apple,avg-power-kp"),
+            avg_power_min_duty_cycle: prop!("apple,avg-power-min-duty-cycle"),
+            avg_power_target_filter_tc: prop!("apple,avg-power-target-filter-tc"),
+            fast_die0_integral_gain: prop!("apple,fast-die0-integral-gain"),
+            fast_die0_proportional_gain: prop!("apple,fast-die0-proportional-gain"),
+            fast_die0_prop_tgt_delta: prop!("apple,fast-die0-prop-tgt-delta", 0),
+            fast_die0_release_temp: prop!("apple,fast-die0-release-temp", 80),
+            fender_idle_off_delay_ms: prop!("apple,fender-idle-off-delay-ms", 40),
+            fw_early_wake_timeout_ms: prop!("apple,fw-early-wake-timeout-ms", 5),
+            idle_off_delay_ms: prop!("apple,idle-off-delay-ms", 2),
+            idle_off_standby_timer: prop!(
+                "apple,idleoff-standby-timer",
+                cfg.idle_off_standby_timer_default
+            ),
+            perf_boost_ce_step: prop!("apple,perf-boost-ce-step", 25),
+            perf_boost_min_util: prop!("apple,perf-boost-min-util", 100),
+            perf_filter_drop_threshold: prop!("apple,perf-filter-drop-threshold"),
+            perf_filter_time_constant2: prop!("apple,perf-filter-time-constant2"),
+            perf_filter_time_constant: prop!("apple,perf-filter-time-constant"),
+            perf_integral_gain2: prop!("apple,perf-integral-gain2"),
+            perf_integral_gain: prop!("apple,perf-integral-gain", f32!(7.8956833)),
+            perf_integral_min_clamp: prop!("apple,perf-integral-min-clamp"),
+            perf_proportional_gain2: prop!("apple,perf-proportional-gain2"),
+            perf_proportional_gain: prop!("apple,perf-proportional-gain", f32!(14.707963)),
+            perf_reset_iters: prop!("apple,perf-reset-iters", 6),
+            perf_tgt_utilization: prop!("apple,perf-tgt-utilization"),
+            power_sample_period,
+            ppm_filter_time_constant_ms: prop!("apple,ppm-filter-time-constant-ms"),
+            ppm_ki: prop!("apple,ppm-ki"),
+            ppm_kp: prop!("apple,ppm-kp"),
+            pwr_filter_time_constant: prop!("apple,pwr-filter-time-constant", 313),
+            pwr_integral_gain: prop!("apple,pwr-integral-gain", f32!(0.0202129)),
+            pwr_integral_min_clamp: prop!("apple,pwr-integral-min-clamp", 0),
+            pwr_min_duty_cycle: prop!("apple,pwr-min-duty-cycle"),
+            pwr_proportional_gain: prop!("apple,pwr-proportional-gain", f32!(5.2831855)),
+            pwr_sample_period_aic_clks: prop!(
+                "apple,pwr-sample-period-aic-clks",
+                cfg.base_clock_hz / 1000 * power_sample_period
+            ),
+            se_engagement_criteria: prop!("apple,se-engagement-criteria", -1),
+            se_filter_time_constant: prop!("apple,se-filter-time-constant", 9),
+            se_filter_time_constant_1: prop!("apple,se-filter-time-constant-1", 3),
+            se_inactive_threshold: prop!("apple,se-inactive-threshold", 2500),
+            se_ki: prop!("apple,se-ki", f32!(-50.0)),
+            se_ki_1: prop!("apple,se-ki-1", f32!(-100.0)),
+            se_kp: prop!("apple,se-kp", f32!(-5.0)),
+            se_kp_1: prop!("apple,se-kp-1", f32!(-10.0)),
+            se_reset_criteria: prop!("apple,se-reset-criteria", 50),
+
+            perf_states,
+            power_zones,
+            csafr,
+        })
+    }
+
+    pub(crate) fn max_frequency_khz(&self) -> u32 {
+        self.perf_states[self.perf_max_pstate as usize].freq_hz / 1000
+    }
+}
diff --git a/drivers/gpu/drm/asahi/hw/t600x.rs b/drivers/gpu/drm/asahi/hw/t600x.rs
new file mode 100644
index 00000000000000..58665f985ec38e
--- /dev/null
+++ b/drivers/gpu/drm/asahi/hw/t600x.rs
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+//! Hardware configuration for t600x (M1 Pro/Max/Ultra) platforms.
+
+use crate::f32;
+
+use super::*;
+
+const fn iomaps(mcc_count: usize, has_die1: bool) -> [Option<IOMapping>; 20] {
+    [
+        Some(IOMapping::new(0x404d00000, false, 1, 0x1c000, 0, true)), // Fender
+        Some(IOMapping::new(0x20e100000, false, 1, 0x4000, 0, false)), // AICTimer
+        Some(IOMapping::new(0x28e104000, false, 1, 0x4000, 0, true)),  // AICSWInt
+        Some(IOMapping::new(0x404000000, false, 1, 0x20000, 0, true)), // RGX
+        None,                                                          // UVD
+        None,                                                          // unused
+        None,                                                          // DisplayUnderrunWA
+        Some(IOMapping::new(0x28e494000, true, 1, 0x4000, 0, false)), // AnalogTempSensorControllerRegs
+        None,                                                         // PMPDoorbell
+        Some(IOMapping::new(0x404d80000, false, 1, 0x8000, 0, true)), // MetrologySensorRegs
+        Some(IOMapping::new(0x204d61000, false, 1, 0x1000, 0, true)), // GMGIFAFRegs
+        Some(IOMapping::new(
+            0x200000000,
+            true,
+            mcc_count,
+            0xd8000,
+            0x1000000,
+            true,
+        )), // MCache registers
+        None,                                                         // AICBankedRegisters
+        None,                                                         // PMGRScratch
+        Some(IOMapping::new(0x2643c4000, false, 1, 0x1000, 0, true)), // NIA Special agent idle register die 0
+        if has_die1 {
+            // NIA Special agent idle register die 1
+            Some(IOMapping::new(0x22643c4000, false, 1, 0x1000, 0, true))
+        } else {
+            None
+        },
+        None,                                                          // CRE registers
+        None,                                                          // Streaming codec registers
+        Some(IOMapping::new(0x28e3d0000, false, 1, 0x1000, 0, true)),  // ?
+        Some(IOMapping::new(0x28e3c0000, false, 1, 0x2000, 0, false)), // ?
+    ]
+}
+
+pub(crate) const HWCONFIG_T6002: super::HwConfig = HwConfig {
+    chip_id: 0x6002,
+    gpu_gen: GpuGen::G13,
+    gpu_variant: GpuVariant::D,
+    gpu_core: GpuCore::G13C,
+
+    base_clock_hz: 24_000_000,
+    uat_oas: 42,
+    num_dies: 2,
+    max_num_clusters: 8,
+    max_num_cores: 8,
+    max_num_frags: 8,
+    max_num_gps: 4,
+
+    preempt1_size: 0x540,
+    preempt2_size: 0x280,
+    preempt3_size: 0x20,
+    compute_preempt1_size: 0x3bd00,
+    clustering: Some(HwClusteringConfig {
+        meta1_blocksize: 0x44,
+        meta2_size: 0xc0 * 8,
+        meta3_size: 0x280 * 8,
+        meta4_size: 0x30 * 16,
+        max_splits: 16,
+    }),
+
+    render: HwRenderConfig {
+        tiling_control: 0xa540,
+    },
+
+    da: HwConfigA {
+        unk_87c: 900,
+        unk_8cc: 11000,
+        unk_e24: 125,
+    },
+    db: HwConfigB {
+        unk_454: 1,
+        unk_4e0: 4,
+        unk_534: 1,
+        unk_ab8: 0x2084,
+        unk_abc: 0x80,
+        unk_b30: 0,
+    },
+    shared1_tab: &[
+        0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
+        0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
+    ],
+    shared1_a4: 0xffff,
+    shared2_tab: &[-1, -1, -1, -1, 0x2aa, 0xaaa, -1, -1, 0, 0],
+    shared2_unk_508: 0xcc00001,
+    shared2_curves: None,
+    shared3_unk: 0,
+    shared3_tab: &[],
+    idle_off_standby_timer_default: 0,
+    unk_hws2_4: None,
+    unk_hws2_24: 0,
+    global_unk_54: 0xffff,
+    sram_k: f32!(1.02),
+    unk_coef_a: &[
+        &f32!([9.838]),
+        &f32!([9.819]),
+        &f32!([9.826]),
+        &f32!([9.799]),
+        &f32!([9.799]),
+        &f32!([9.826]),
+        &f32!([9.819]),
+        &f32!([9.838]),
+    ],
+    unk_coef_b: &[
+        &f32!([13.0]),
+        &f32!([13.0]),
+        &f32!([13.0]),
+        &f32!([13.0]),
+        &f32!([13.0]),
+        &f32!([13.0]),
+        &f32!([13.0]),
+        &f32!([13.0]),
+    ],
+    global_tab: Some(&[
+        0, 1, 2, 1, 1, 90, 75, 1, 1, 1, 2, 90, 75, 1, 1, 1, 1, 90, 75, 1, 1,
+    ]),
+    has_csafr: false,
+    fast_sensor_mask: [0x8080808080808080, 0],
+    fast_sensor_mask_alt: [0x9090909090909090, 0],
+    fast_die0_sensor_present: 0xff,
+    io_mappings: &iomaps(8, true),
+    sram_base: None,
+    sram_size: None,
+};
+
+pub(crate) const HWCONFIG_T6001: super::HwConfig = HwConfig {
+    chip_id: 0x6001,
+    gpu_variant: GpuVariant::C,
+    gpu_core: GpuCore::G13C,
+
+    num_dies: 1,
+    max_num_clusters: 4,
+    fast_sensor_mask: [0x80808080, 0],
+    fast_sensor_mask_alt: [0x90909090, 0],
+    fast_die0_sensor_present: 0x0f,
+    io_mappings: &iomaps(8, false),
+    ..HWCONFIG_T6002
+};
+
+pub(crate) const HWCONFIG_T6000: super::HwConfig = HwConfig {
+    chip_id: 0x6000,
+    gpu_variant: GpuVariant::S,
+    gpu_core: GpuCore::G13S,
+
+    max_num_clusters: 2,
+    fast_sensor_mask: [0x8080, 0],
+    fast_sensor_mask_alt: [0x9090, 0],
+    fast_die0_sensor_present: 0x03,
+    io_mappings: &iomaps(4, false),
+    ..HWCONFIG_T6001
+};
diff --git a/drivers/gpu/drm/asahi/hw/t602x.rs b/drivers/gpu/drm/asahi/hw/t602x.rs
new file mode 100644
index 00000000000000..98a7ac2b76e571
--- /dev/null
+++ b/drivers/gpu/drm/asahi/hw/t602x.rs
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+//! Hardware configuration for t600x (M1 Pro/Max/Ultra) platforms.
+
+use crate::f32;
+
+use super::*;
+
+const fn iomaps(chip_id: u32, mcc_count: usize) -> [Option<IOMapping>; 24] {
+    [
+        Some(IOMapping::new(0x404d00000, false, 1, 0x144000, 0, true)), // Fender
+        Some(IOMapping::new(0x20e100000, false, 1, 0x4000, 0, false)),  // AICTimer
+        Some(IOMapping::new(0x28e106000, false, 1, 0x4000, 0, true)),   // AICSWInt
+        Some(IOMapping::new(0x404000000, false, 1, 0x20000, 0, true)),  // RGX
+        None,                                                           // UVD
+        None,                                                           // unused
+        None,                                                           // DisplayUnderrunWA
+        Some(match chip_id {
+            0x6020 => IOMapping::new(0x28e460000, true, 1, 0x4000, 0, false),
+            _ => IOMapping::new(0x28e478000, true, 1, 0x4000, 0, false),
+        }), // AnalogTempSensorControllerRegs
+        None,                                                           // PMPDoorbell
+        Some(IOMapping::new(0x404e08000, false, 1, 0x8000, 0, true)),   // MetrologySensorRegs
+        None,                                                           // GMGIFAFRegs
+        Some(IOMapping::new(
+            0x200000000,
+            true,
+            mcc_count,
+            0xd8000,
+            0x1000000,
+            true,
+        )), // MCache registers
+        Some(IOMapping::new(0x28e118000, false, 1, 0x4000, 0, false)),  // AICBankedRegisters
+        None,                                                           // PMGRScratch
+        None, // NIA Special agent idle register die 0
+        None, // NIA Special agent idle register die 1
+        None, // CRE registers
+        None, // Streaming codec registers
+        Some(IOMapping::new(0x28e3d0000, false, 1, 0x4000, 0, true)), // ?
+        Some(IOMapping::new(0x28e3c0000, false, 1, 0x4000, 0, false)), // ?
+        Some(IOMapping::new(0x28e3d8000, false, 1, 0x4000, 0, true)), // ?
+        Some(IOMapping::new(0x404eac000, true, 1, 0x4000, 0, true)), // ?
+        None,
+        None,
+    ]
+}
+
+// TODO: Tentative
+pub(crate) const HWCONFIG_T6022: super::HwConfig = HwConfig {
+    chip_id: 0x6022,
+    gpu_gen: GpuGen::G14,
+    gpu_variant: GpuVariant::D,
+    gpu_core: GpuCore::G14D,
+
+    base_clock_hz: 24_000_000,
+    uat_oas: 42,
+    num_dies: 2,
+    max_num_clusters: 8,
+    max_num_cores: 10,
+    max_num_frags: 10,
+    max_num_gps: 4,
+
+    preempt1_size: 0x540,
+    preempt2_size: 0x280,
+    preempt3_size: 0x40,
+    compute_preempt1_size: 0x25980 * 2, // Conservative guess
+    clustering: Some(HwClusteringConfig {
+        meta1_blocksize: 0x44,
+        meta2_size: 0xc0 * 16,
+        meta3_size: 0x280 * 16,
+        meta4_size: 0x10 * 128,
+        max_splits: 64,
+    }),
+
+    render: HwRenderConfig {
+        tiling_control: 0x180340,
+    },
+
+    da: HwConfigA {
+        unk_87c: 500,
+        unk_8cc: 11000,
+        unk_e24: 125,
+    },
+    db: HwConfigB {
+        unk_454: 1,
+        unk_4e0: 4,
+        unk_534: 0,
+        unk_ab8: 0, // Unused
+        unk_abc: 0, // Unused
+        unk_b30: 0,
+    },
+    shared1_tab: &[
+        0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
+        0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
+    ],
+    shared1_a4: 0,
+    shared2_tab: &[0x800, 0x1555, -1, -1, -1, -1, -1, -1, 0xaaaaa, 0],
+    shared2_unk_508: 0xc00007,
+    shared2_curves: Some(HwConfigShared2Curves {
+        t1_coef: 11000,
+        t2: &[
+            0xf07, 0x4c0, 0x680, 0x8c0, 0xa80, 0xc40, 0xd80, 0xec0, 0xf40,
+        ],
+        t3_coefs: &[0, 20, 27, 36, 43, 50, 55, 60, 62],
+        t3_scales: &[9, 3209, 10400],
+    }),
+    shared3_unk: 8,
+    shared3_tab: &[
+        125, 125, 125, 125, 125, 125, 125, 125, 7500, 125, 125, 125, 125, 125, 125, 125,
+    ],
+    idle_off_standby_timer_default: 700,
+    unk_hws2_4: Some(f32!([1.0, 0.8, 0.2, 0.9, 0.1, 0.25, 0.5, 0.9])),
+    unk_hws2_24: 6,
+    global_unk_54: 4000,
+    sram_k: f32!(1.02),
+    unk_coef_a: &[
+        &f32!([0.0, 8.2, 0.0, 6.9, 6.9]),
+        &f32!([0.0, 0.0, 0.0, 6.9, 6.9]),
+        &f32!([0.0, 8.2, 0.0, 6.9, 0.0]),
+        &f32!([0.0, 0.0, 0.0, 6.9, 0.0]),
+        &f32!([0.0, 0.0, 0.0, 6.9, 0.0]),
+        &f32!([0.0, 8.2, 0.0, 6.9, 0.0]),
+        &f32!([0.0, 0.0, 0.0, 6.9, 6.9]),
+        &f32!([0.0, 8.2, 0.0, 6.9, 6.9]),
+    ],
+    unk_coef_b: &[
+        &f32!([0.0, 9.0, 0.0, 8.0, 8.0]),
+        &f32!([0.0, 0.0, 0.0, 8.0, 8.0]),
+        &f32!([0.0, 9.0, 0.0, 8.0, 0.0]),
+        &f32!([0.0, 0.0, 0.0, 8.0, 0.0]),
+        &f32!([0.0, 0.0, 0.0, 8.0, 0.0]),
+        &f32!([0.0, 9.0, 0.0, 8.0, 0.0]),
+        &f32!([0.0, 0.0, 0.0, 8.0, 8.0]),
+        &f32!([0.0, 9.0, 0.0, 8.0, 8.0]),
+    ],
+    global_tab: Some(&[
+        0, 2, 2, 1, 1, 90, 75, 1, 1, 1, 2, 90, 75, 1, 1, 1, 2, 90, 75, 1, 1, 1, 1, 90, 75, 1, 1,
+    ]),
+    has_csafr: true,
+    fast_sensor_mask: [0x40005000c000d00, 0xd000c0005000400],
+    // Apple typo? Should probably be 0x140015001c001d00
+    fast_sensor_mask_alt: [0x140015001d001d00, 0x1d001c0015001400],
+    fast_die0_sensor_present: 0, // Unused
+    io_mappings: &iomaps(0x6022, 8),
+    sram_base: Some(0x404d60000),
+    sram_size: Some(0x20000),
+};
+
+pub(crate) const HWCONFIG_T6021: super::HwConfig = HwConfig {
+    chip_id: 0x6021,
+    gpu_variant: GpuVariant::C,
+    gpu_core: GpuCore::G14C,
+
+    num_dies: 1,
+    max_num_clusters: 4,
+    compute_preempt1_size: 0x25980,
+    unk_hws2_4: Some(f32!([1.0, 0.8, 0.2, 0.9, 0.1, 0.25, 0.7, 0.9])),
+    fast_sensor_mask: [0x40005000c000d00, 0],
+    fast_sensor_mask_alt: [0x140015001d001d00, 0],
+    io_mappings: &iomaps(0x6021, 8),
+    ..HWCONFIG_T6022
+};
+
+pub(crate) const HWCONFIG_T6020: super::HwConfig = HwConfig {
+    chip_id: 0x6020,
+    gpu_variant: GpuVariant::S,
+    gpu_core: GpuCore::G14S,
+
+    db: HwConfigB {
+        unk_454: 0,
+        ..HWCONFIG_T6021.db
+    },
+
+    max_num_clusters: 2,
+    fast_sensor_mask: [0xc000d00, 0],
+    fast_sensor_mask_alt: [0x1d001d00, 0],
+    io_mappings: &iomaps(0x6020, 4),
+    ..HWCONFIG_T6021
+};
diff --git a/drivers/gpu/drm/asahi/hw/t8103.rs b/drivers/gpu/drm/asahi/hw/t8103.rs
new file mode 100644
index 00000000000000..484bf6c3414f2f
--- /dev/null
+++ b/drivers/gpu/drm/asahi/hw/t8103.rs
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+//! Hardware configuration for t8103 platforms (M1).
+
+use crate::f32;
+
+use super::*;
+
+pub(crate) const HWCONFIG: super::HwConfig = HwConfig {
+    chip_id: 0x8103,
+    gpu_gen: GpuGen::G13,
+    gpu_variant: GpuVariant::G,
+    gpu_core: GpuCore::G13G,
+
+    base_clock_hz: 24_000_000,
+    uat_oas: 40,
+    num_dies: 1,
+    max_num_clusters: 1,
+    max_num_cores: 8,
+    max_num_frags: 8,
+    max_num_gps: 4,
+
+    preempt1_size: 0x540,
+    preempt2_size: 0x280,
+    preempt3_size: 0x20,
+    compute_preempt1_size: 0x7f80,
+    clustering: None,
+
+    render: HwRenderConfig {
+        // bit 0: disable clustering (always)
+        tiling_control: 0xa041,
+    },
+
+    da: HwConfigA {
+        unk_87c: -220,
+        unk_8cc: 9880,
+        unk_e24: 112,
+    },
+    db: HwConfigB {
+        unk_454: 1,
+        unk_4e0: 0,
+        unk_534: 0,
+        unk_ab8: 0x48,
+        unk_abc: 0x8,
+        unk_b30: 0,
+    },
+    shared1_tab: &[
+        -1, 0x7282, 0x50ea, 0x370a, 0x25be, 0x1c1f, 0x16fb, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+    ],
+    shared1_a4: 0xffff,
+    shared2_tab: &[0x800, 0x1555, -1, -1, -1, -1, -1, -1, 0, 0],
+    shared2_unk_508: 0xc00007,
+    shared2_curves: None,
+    shared3_unk: 0,
+    shared3_tab: &[],
+    idle_off_standby_timer_default: 0,
+    unk_hws2_4: None,
+    unk_hws2_24: 0,
+    global_unk_54: 0xffff,
+    sram_k: f32!(1.02),
+    unk_coef_a: &[],
+    unk_coef_b: &[],
+    global_tab: None,
+    has_csafr: false,
+    fast_sensor_mask: [0x12, 0],
+    fast_sensor_mask_alt: [0x12, 0],
+    fast_die0_sensor_present: 0x01,
+    io_mappings: &[
+        Some(IOMapping::new(0x204d00000, false, 1, 0x1c000, 0, true)), // Fender
+        Some(IOMapping::new(0x20e100000, false, 1, 0x4000, 0, false)), // AICTimer
+        Some(IOMapping::new(0x23b104000, false, 1, 0x4000, 0, true)),  // AICSWInt
+        Some(IOMapping::new(0x204000000, false, 1, 0x20000, 0, true)), // RGX
+        None,                                                          // UVD
+        None,                                                          // unused
+        None,                                                          // DisplayUnderrunWA
+        Some(IOMapping::new(0x23b2e8000, false, 1, 0x1000, 0, false)), // AnalogTempSensorControllerRegs
+        Some(IOMapping::new(0x23bc00000, false, 1, 0x1000, 0, true)),  // PMPDoorbell
+        Some(IOMapping::new(0x204d80000, false, 1, 0x5000, 0, true)),  // MetrologySensorRegs
+        Some(IOMapping::new(0x204d61000, false, 1, 0x1000, 0, true)),  // GMGIFAFRegs
+        Some(IOMapping::new(0x200000000, false, 1, 0xd6400, 0, true)), // MCache registers
+        None,                                                          // AICBankedRegisters
+        Some(IOMapping::new(0x23b738000, false, 1, 0x1000, 0, true)),  // PMGRScratch
+        None, // NIA Special agent idle register die 0
+        None, // NIA Special agent idle register die 1
+        None, // CRE registers
+        None, // Streaming codec registers
+        None, //
+        None, //
+    ],
+    sram_base: None,
+    sram_size: None,
+};
diff --git a/drivers/gpu/drm/asahi/hw/t8112.rs b/drivers/gpu/drm/asahi/hw/t8112.rs
new file mode 100644
index 00000000000000..3eba0457d76ac9
--- /dev/null
+++ b/drivers/gpu/drm/asahi/hw/t8112.rs
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+//! Hardware configuration for t8112 platforms (M2).
+
+use crate::f32;
+
+use super::*;
+
+pub(crate) const HWCONFIG: super::HwConfig = HwConfig {
+    chip_id: 0x8112,
+    gpu_gen: GpuGen::G14,
+    gpu_variant: GpuVariant::G,
+    gpu_core: GpuCore::G14G,
+
+    base_clock_hz: 24_000_000,
+    uat_oas: 40,
+    num_dies: 1,
+    max_num_clusters: 1,
+    max_num_cores: 10,
+    max_num_frags: 10,
+    max_num_gps: 4,
+
+    preempt1_size: 0x540,
+    preempt2_size: 0x280,
+    preempt3_size: 0x20,
+    compute_preempt1_size: 0x10000, // TODO: Check
+    clustering: None,
+
+    render: HwRenderConfig {
+        // TODO: this is unused here, may be present in newer FW
+        tiling_control: 0xa041,
+    },
+
+    da: HwConfigA {
+        unk_87c: 900,
+        unk_8cc: 11000,
+        unk_e24: 125,
+    },
+    db: HwConfigB {
+        unk_454: 1,
+        unk_4e0: 4,
+        unk_534: 0,
+        unk_ab8: 0x2048,
+        unk_abc: 0x4000,
+        unk_b30: 1,
+    },
+    shared1_tab: &[
+        0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
+        0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
+    ],
+    shared1_a4: 0,
+    shared2_tab: &[-1, -1, -1, -1, -1, -1, -1, -1, 0xaa5aa, 0],
+    shared2_unk_508: 0xc00000,
+    shared2_curves: Some(HwConfigShared2Curves {
+        t1_coef: 7200,
+        t2: &[
+            0xf07, 0x4c0, 0x6c0, 0x8c0, 0xac0, 0xc40, 0xdc0, 0xec0, 0xf80,
+        ],
+        t3_coefs: &[0, 20, 28, 36, 44, 50, 56, 60, 63],
+        t3_scales: &[9, 3209, 10400],
+    }),
+    shared3_unk: 5,
+    shared3_tab: &[
+        10700, 10700, 10700, 10700, 10700, 6000, 1000, 1000, 1000, 10700, 10700, 10700, 10700,
+        10700, 10700, 10700,
+    ],
+    idle_off_standby_timer_default: 0,
+    unk_hws2_4: None,
+    unk_hws2_24: 0,
+    global_unk_54: 0xffff,
+
+    sram_k: f32!(1.02),
+    // 13.2: last coef changed from 6.6 to 5.3, assuming that was a fix we can backport
+    unk_coef_a: &[&f32!([0.0, 0.0, 0.0, 0.0, 5.3, 0.0, 5.3, /*6.6*/ 5.3])],
+    unk_coef_b: &[&f32!([0.0, 0.0, 0.0, 0.0, 5.3, 0.0, 5.3, /*6.6*/ 5.3])],
+    global_tab: None,
+    has_csafr: false,
+    fast_sensor_mask: [0x6800, 0],
+    fast_sensor_mask_alt: [0x6800, 0],
+    fast_die0_sensor_present: 0x02,
+    io_mappings: &[
+        Some(IOMapping::new(0x204d00000, false, 1, 0x14000, 0, true)), // Fender
+        Some(IOMapping::new(0x20e100000, false, 1, 0x4000, 0, false)), // AICTimer
+        Some(IOMapping::new(0x23b0c4000, false, 1, 0x4000, 0, true)),  // AICSWInt
+        Some(IOMapping::new(0x204000000, false, 1, 0x20000, 0, true)), // RGX
+        None,                                                          // UVD
+        None,                                                          // unused
+        None,                                                          // DisplayUnderrunWA
+        Some(IOMapping::new(0x23b2c0000, false, 1, 0x1000, 0, false)), // AnalogTempSensorControllerRegs
+        None,                                                          // PMPDoorbell
+        Some(IOMapping::new(0x204d80000, false, 1, 0x8000, 0, true)),  // MetrologySensorRegs
+        Some(IOMapping::new(0x204d61000, false, 1, 0x1000, 0, true)),  // GMGIFAFRegs
+        Some(IOMapping::new(0x200000000, false, 1, 0xd6400, 0, true)), // MCache registers
+        None,                                                          // AICBankedRegisters
+        None,                                                          // PMGRScratch
+        None, // NIA Special agent idle register die 0
+        None, // NIA Special agent idle register die 1
+        Some(IOMapping::new(0x204e00000, false, 1, 0x10000, 0, true)), // CRE registers
+        Some(IOMapping::new(0x27d050000, false, 1, 0x4000, 0, true)), // Streaming codec registers
+        Some(IOMapping::new(0x23b3d0000, false, 1, 0x1000, 0, true)), //
+        Some(IOMapping::new(0x23b3c0000, false, 1, 0x1000, 0, false)), //
+    ],
+    sram_base: None,
+    sram_size: None,
+};
diff --git a/drivers/gpu/drm/asahi/initdata.rs b/drivers/gpu/drm/asahi/initdata.rs
new file mode 100644
index 00000000000000..cdd8bd415d284b
--- /dev/null
+++ b/drivers/gpu/drm/asahi/initdata.rs
@@ -0,0 +1,1017 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+#![allow(clippy::unusual_byte_groupings)]
+
+//! GPU initialization data builder.
+//!
+//! The root of all interaction between the GPU firmware and the host driver is a complex set of
+//! nested structures that we call InitData. This includes both GPU hardware/firmware configuration
+//! and the pointers to the ring buffers and global data fields that are used for communication at
+//! runtime.
+//!
+//! Many of these structures are poorly understood, so there are lots of hardcoded unknown values
+//! derived from observing the InitData structures that macOS generates.
+
+use crate::f32;
+use crate::fw::initdata::*;
+use crate::fw::types::*;
+use crate::module_parameters;
+use crate::{driver::AsahiDevice, gem, gpu, hw, mmu};
+use kernel::error::{Error, Result};
+use kernel::macros::versions;
+use kernel::prelude::*;
+use kernel::{init, init::Init, try_init};
+
+/// Builder helper for the global GPU InitData.
+#[versions(AGX)]
+pub(crate) struct InitDataBuilder<'a> {
+    dev: &'a AsahiDevice,
+    alloc: &'a mut gpu::KernelAllocators,
+    cfg: &'static hw::HwConfig,
+    dyncfg: &'a hw::DynConfig,
+}
+
+#[versions(AGX)]
+impl<'a> InitDataBuilder::ver<'a> {
+    /// Create a new InitData builder
+    pub(crate) fn new(
+        dev: &'a AsahiDevice,
+        alloc: &'a mut gpu::KernelAllocators,
+        cfg: &'static hw::HwConfig,
+        dyncfg: &'a hw::DynConfig,
+    ) -> InitDataBuilder::ver<'a> {
+        InitDataBuilder::ver {
+            dev,
+            alloc,
+            cfg,
+            dyncfg,
+        }
+    }
+
+    /// Create the HwDataShared1 structure, which is used in two places in InitData.
+    fn hw_shared1(cfg: &'static hw::HwConfig) -> impl Init<raw::HwDataShared1> {
+        init!(raw::HwDataShared1 {
+            unk_a4: cfg.shared1_a4,
+            ..Zeroable::zeroed()
+        })
+        .chain(|ret| {
+            for (i, val) in cfg.shared1_tab.iter().enumerate() {
+                ret.table[i] = *val;
+            }
+            Ok(())
+        })
+    }
+
+    fn init_curve(
+        curve: &mut raw::HwDataShared2Curve,
+        unk_0: u32,
+        unk_4: u32,
+        t1: &[u16],
+        t2: &[i16],
+        t3: &[KVec<i32>],
+    ) {
+        curve.unk_0 = unk_0;
+        curve.unk_4 = unk_4;
+        (*curve.t1)[..t1.len()].copy_from_slice(t1);
+        (*curve.t1)[t1.len()..].fill(t1[0]);
+        (*curve.t2)[..t2.len()].copy_from_slice(t2);
+        (*curve.t2)[t2.len()..].fill(t2[0]);
+        for (i, a) in curve.t3.iter_mut().enumerate() {
+            a.fill(0x3ffffff);
+            if i < t3.len() {
+                let b = &t3[i];
+                (**a)[..b.len()].copy_from_slice(b);
+            }
+        }
+    }
+
+    /// Create the HwDataShared2 structure, which is used in two places in InitData.
+    fn hw_shared2(
+        cfg: &'static hw::HwConfig,
+        dyncfg: &'a hw::DynConfig,
+    ) -> impl Init<raw::HwDataShared2, Error> + 'a {
+        try_init!(raw::HwDataShared2 {
+            unk_28: Array::new([0xff; 16]),
+            g14: Default::default(),
+            unk_508: cfg.shared2_unk_508,
+            ..Zeroable::zeroed()
+        })
+        .chain(|ret| {
+            for (i, val) in cfg.shared2_tab.iter().enumerate() {
+                ret.table[i] = *val;
+            }
+
+            let curve_cfg = match cfg.shared2_curves.as_ref() {
+                None => return Ok(()),
+                Some(a) => a,
+            };
+
+            let mut t1 = KVec::new();
+            let mut t3 = KVec::new();
+
+            for _ in 0..curve_cfg.t3_scales.len() {
+                t3.push(KVec::new(), GFP_KERNEL)?;
+            }
+
+            for (i, ps) in dyncfg.pwr.perf_states.iter().enumerate() {
+                let t3_coef = curve_cfg.t3_coefs[i];
+                if t3_coef == 0 {
+                    t1.push(0xffff, GFP_KERNEL)?;
+                    for j in t3.iter_mut() {
+                        j.push(0x3ffffff, GFP_KERNEL)?;
+                    }
+                    continue;
+                }
+
+                let f_khz = (ps.freq_hz / 1000) as u64;
+                let v_max = ps.max_volt_mv() as u64;
+
+                t1.push(
+                    (1000000000 * (curve_cfg.t1_coef as u64) / (f_khz * v_max))
+                        .try_into()
+                        .unwrap(),
+                    GFP_KERNEL,
+                )?;
+
+                for (j, scale) in curve_cfg.t3_scales.iter().enumerate() {
+                    t3[j].push(
+                        (t3_coef as u64 * 1000000100 * *scale as u64 / (f_khz * v_max * 6))
+                            .try_into()
+                            .unwrap(),
+                        GFP_KERNEL,
+                    )?;
+                }
+            }
+
+            ret.g14.unk_14 = 0x6000000;
+            Self::init_curve(
+                &mut ret.g14.curve1,
+                0,
+                0x20000000,
+                &[0xffff],
+                &[0x0f07],
+                &[],
+            );
+            Self::init_curve(&mut ret.g14.curve2, 7, 0x80000000, &t1, curve_cfg.t2, &t3);
+
+            Ok(())
+        })
+    }
+
+    /// Create the HwDataShared3 structure, which is used in two places in InitData.
+    fn hw_shared3(cfg: &'static hw::HwConfig) -> impl Init<raw::HwDataShared3> {
+        init::zeroed::<raw::HwDataShared3>().chain(|ret| {
+            if !cfg.shared3_tab.is_empty() {
+                ret.unk_0 = 1;
+                ret.unk_4 = 500;
+                ret.unk_8 = cfg.shared3_unk;
+                ret.table.copy_from_slice(cfg.shared3_tab);
+                ret.unk_4c = 1;
+            }
+            Ok(())
+        })
+    }
+
+    /// Create an unknown T81xx-specific data structure.
+    fn t81xx_data(
+        cfg: &'static hw::HwConfig,
+        dyncfg: &'a hw::DynConfig,
+    ) -> impl Init<raw::T81xxData> {
+        let _perf_max_pstate = dyncfg.pwr.perf_max_pstate;
+
+        init::zeroed::<raw::T81xxData>().chain(move |_ret| {
+            match cfg.chip_id {
+                0x8103 | 0x8112 => {
+                    #[ver(V < V13_3)]
+                    {
+                        _ret.unk_d8c = 0x80000000;
+                        _ret.unk_d90 = 4;
+                        _ret.unk_d9c = f32!(0.6);
+                        _ret.unk_da4 = f32!(0.4);
+                        _ret.unk_dac = f32!(0.38552);
+                        _ret.unk_db8 = f32!(65536.0);
+                        _ret.unk_dbc = f32!(13.56);
+                        _ret.max_pstate_scaled = 100 * _perf_max_pstate;
+                    }
+                }
+                _ => (),
+            }
+            Ok(())
+        })
+    }
+
+    /// Create the HwDataA structure. This mostly contains power-related configuration.
+    fn hwdata_a(&mut self) -> Result<GpuObject<HwDataA::ver>> {
+        let pwr = &self.dyncfg.pwr;
+        let period_ms = pwr.power_sample_period;
+        let period_s = F32::from(period_ms) / f32!(1000.0);
+        let ppm_filter_tc_periods = pwr.ppm_filter_time_constant_ms / period_ms;
+        #[ver(V >= V13_0B4)]
+        let ppm_filter_tc_ms_rounded = ppm_filter_tc_periods * period_ms;
+        let ppm_filter_a = f32!(1.0) / ppm_filter_tc_periods.into();
+        let perf_filter_a = f32!(1.0) / pwr.perf_filter_time_constant.into();
+        let perf_filter_a2 = f32!(1.0) / pwr.perf_filter_time_constant2.into();
+        let avg_power_target_filter_a = f32!(1.0) / pwr.avg_power_target_filter_tc.into();
+        let avg_power_filter_tc_periods = pwr.avg_power_filter_tc_ms / period_ms;
+        #[ver(V >= V13_0B4)]
+        let avg_power_filter_tc_ms_rounded = avg_power_filter_tc_periods * period_ms;
+        let avg_power_filter_a = f32!(1.0) / avg_power_filter_tc_periods.into();
+        let pwr_filter_a = f32!(1.0) / pwr.pwr_filter_time_constant.into();
+
+        let base_ps = pwr.perf_base_pstate;
+        let base_ps_scaled = 100 * base_ps;
+        let max_ps = pwr.perf_max_pstate;
+        let max_ps_scaled = 100 * max_ps;
+        let boost_ps_count = max_ps - base_ps;
+
+        #[allow(unused_variables)]
+        let base_clock_khz = self.cfg.base_clock_hz / 1000;
+        let clocks_per_period = pwr.pwr_sample_period_aic_clks;
+
+        #[allow(unused_variables)]
+        let clocks_per_period_coarse = self.cfg.base_clock_hz / 1000 * pwr.power_sample_period;
+
+        self.alloc.private.new_init(init::zeroed(), |_inner, _ptr| {
+            let cfg = &self.cfg;
+            let dyncfg = &self.dyncfg;
+            try_init!(raw::HwDataA::ver {
+                clocks_per_period: clocks_per_period,
+                #[ver(V >= V13_0B4)]
+                clocks_per_period_2: clocks_per_period,
+                pwr_status: AtomicU32::new(4),
+                unk_10: f32!(1.0),
+                actual_pstate: 1,
+                tgt_pstate: 1,
+                base_pstate_scaled: base_ps_scaled,
+                unk_40: 1,
+                max_pstate_scaled: max_ps_scaled,
+                min_pstate_scaled: 100,
+                unk_64c: 625,
+                pwr_filter_a_neg: f32!(1.0) - pwr_filter_a,
+                pwr_filter_a: pwr_filter_a,
+                pwr_integral_gain: pwr.pwr_integral_gain,
+                pwr_integral_min_clamp: pwr.pwr_integral_min_clamp.into(),
+                max_power_1: pwr.max_power_mw.into(),
+                pwr_proportional_gain: pwr.pwr_proportional_gain,
+                pwr_pstate_related_k: -F32::from(max_ps_scaled) / pwr.max_power_mw.into(),
+                pwr_pstate_max_dc_offset: pwr.pwr_min_duty_cycle as i32 - max_ps_scaled as i32,
+                max_pstate_scaled_2: max_ps_scaled,
+                max_power_2: pwr.max_power_mw,
+                max_pstate_scaled_3: max_ps_scaled,
+                ppm_filter_tc_periods_x4: ppm_filter_tc_periods * 4,
+                ppm_filter_a_neg: f32!(1.0) - ppm_filter_a,
+                ppm_filter_a: ppm_filter_a,
+                ppm_ki_dt: pwr.ppm_ki * period_s,
+                unk_6fc: f32!(65536.0),
+                ppm_kp: pwr.ppm_kp,
+                pwr_min_duty_cycle: pwr.pwr_min_duty_cycle,
+                max_pstate_scaled_4: max_ps_scaled,
+                unk_71c: f32!(0.0),
+                max_power_3: pwr.max_power_mw,
+                cur_power_mw_2: 0x0,
+                ppm_filter_tc_ms: pwr.ppm_filter_time_constant_ms,
+                #[ver(V >= V13_0B4)]
+                ppm_filter_tc_clks: ppm_filter_tc_ms_rounded * base_clock_khz,
+                perf_tgt_utilization: pwr.perf_tgt_utilization,
+                perf_boost_min_util: pwr.perf_boost_min_util,
+                perf_boost_ce_step: pwr.perf_boost_ce_step,
+                perf_reset_iters: pwr.perf_reset_iters,
+                unk_774: 6,
+                unk_778: 1,
+                perf_filter_drop_threshold: pwr.perf_filter_drop_threshold,
+                perf_filter_a_neg: f32!(1.0) - perf_filter_a,
+                perf_filter_a2_neg: f32!(1.0) - perf_filter_a2,
+                perf_filter_a: perf_filter_a,
+                perf_filter_a2: perf_filter_a2,
+                perf_ki: pwr.perf_integral_gain,
+                perf_ki2: pwr.perf_integral_gain2,
+                perf_integral_min_clamp: pwr.perf_integral_min_clamp.into(),
+                unk_79c: f32!(95.0),
+                perf_kp: pwr.perf_proportional_gain,
+                perf_kp2: pwr.perf_proportional_gain2,
+                boost_state_unk_k: F32::from(boost_ps_count) / f32!(0.95),
+                base_pstate_scaled_2: base_ps_scaled,
+                max_pstate_scaled_5: max_ps_scaled,
+                base_pstate_scaled_3: base_ps_scaled,
+                perf_tgt_utilization_2: pwr.perf_tgt_utilization,
+                base_pstate_scaled_4: base_ps_scaled,
+                unk_7fc: f32!(65536.0),
+                pwr_min_duty_cycle_2: pwr.pwr_min_duty_cycle.into(),
+                max_pstate_scaled_6: max_ps_scaled.into(),
+                max_freq_mhz: pwr.max_freq_mhz,
+                pwr_min_duty_cycle_3: pwr.pwr_min_duty_cycle,
+                min_pstate_scaled_4: f32!(100.0),
+                max_pstate_scaled_7: max_ps_scaled,
+                unk_alpha_neg: f32!(0.8),
+                unk_alpha: f32!(0.2),
+                fast_die0_sensor_mask: U64(cfg.fast_sensor_mask[0]),
+                #[ver(G >= G14X)]
+                fast_die1_sensor_mask: U64(cfg.fast_sensor_mask[1]),
+                fast_die0_release_temp_cc: 100 * pwr.fast_die0_release_temp,
+                unk_87c: cfg.da.unk_87c,
+                unk_880: 0x4,
+                unk_894: f32!(1.0),
+
+                fast_die0_ki_dt: pwr.fast_die0_integral_gain * period_s,
+                unk_8a8: f32!(65536.0),
+                fast_die0_kp: pwr.fast_die0_proportional_gain,
+                pwr_min_duty_cycle_4: pwr.pwr_min_duty_cycle,
+                max_pstate_scaled_8: max_ps_scaled,
+                max_pstate_scaled_9: max_ps_scaled,
+                fast_die0_prop_tgt_delta: 100 * pwr.fast_die0_prop_tgt_delta,
+                unk_8cc: cfg.da.unk_8cc,
+                max_pstate_scaled_10: max_ps_scaled,
+                max_pstate_scaled_11: max_ps_scaled,
+                unk_c2c: 1,
+                power_zone_count: pwr.power_zones.len() as u32,
+                max_power_4: pwr.max_power_mw,
+                max_power_5: pwr.max_power_mw,
+                max_power_6: pwr.max_power_mw,
+                avg_power_target_filter_a_neg: f32!(1.0) - avg_power_target_filter_a,
+                avg_power_target_filter_a: avg_power_target_filter_a,
+                avg_power_target_filter_tc_x4: 4 * pwr.avg_power_target_filter_tc,
+                avg_power_target_filter_tc_xperiod: period_ms * pwr.avg_power_target_filter_tc,
+                #[ver(V >= V13_0B4)]
+                avg_power_target_filter_tc_clks: period_ms
+                    * pwr.avg_power_target_filter_tc
+                    * base_clock_khz,
+                avg_power_filter_tc_periods_x4: 4 * avg_power_filter_tc_periods,
+                avg_power_filter_a_neg: f32!(1.0) - avg_power_filter_a,
+                avg_power_filter_a: avg_power_filter_a,
+                avg_power_ki_dt: pwr.avg_power_ki_only * period_s,
+                unk_d20: f32!(65536.0),
+                avg_power_kp: pwr.avg_power_kp,
+                avg_power_min_duty_cycle: pwr.avg_power_min_duty_cycle,
+                max_pstate_scaled_12: max_ps_scaled,
+                max_pstate_scaled_13: max_ps_scaled,
+                max_power_7: pwr.max_power_mw.into(),
+                max_power_8: pwr.max_power_mw,
+                avg_power_filter_tc_ms: pwr.avg_power_filter_tc_ms,
+                #[ver(V >= V13_0B4)]
+                avg_power_filter_tc_clks: avg_power_filter_tc_ms_rounded * base_clock_khz,
+                max_pstate_scaled_14: max_ps_scaled,
+                t81xx_data <- Self::t81xx_data(cfg, dyncfg),
+                #[ver(V >= V13_0B4)]
+                unk_e10_0 <- {
+                    let filter_a = f32!(1.0) / pwr.se_filter_time_constant.into();
+                    let filter_1_a = f32!(1.0) / pwr.se_filter_time_constant_1.into();
+                    try_init!(raw::HwDataA130Extra {
+                        unk_38: 4,
+                        unk_3c: 8000,
+                        gpu_se_inactive_threshold: pwr.se_inactive_threshold,
+                        gpu_se_engagement_criteria: pwr.se_engagement_criteria,
+                        gpu_se_reset_criteria: pwr.se_reset_criteria,
+                        unk_54: 50,
+                        unk_58: 0x1,
+                        gpu_se_filter_a_neg: f32!(1.0) - filter_a,
+                        gpu_se_filter_1_a_neg: f32!(1.0) - filter_1_a,
+                        gpu_se_filter_a: filter_a,
+                        gpu_se_filter_1_a: filter_1_a,
+                        gpu_se_ki_dt: pwr.se_ki * period_s,
+                        gpu_se_ki_1_dt: pwr.se_ki_1 * period_s,
+                        unk_7c: f32!(65536.0),
+                        gpu_se_kp: pwr.se_kp,
+                        gpu_se_kp_1: pwr.se_kp_1,
+
+                        #[ver(V >= V13_3)]
+                        unk_8c: 100,
+                        #[ver(V < V13_3)]
+                        unk_8c: 40,
+
+                        max_pstate_scaled_1: max_ps_scaled,
+                        unk_9c: f32!(8000.0),
+                        unk_a0: 1400,
+                        gpu_se_filter_time_constant_ms: pwr.se_filter_time_constant * period_ms,
+                        gpu_se_filter_time_constant_1_ms: pwr.se_filter_time_constant_1
+                            * period_ms,
+                        gpu_se_filter_time_constant_clks: U64((pwr.se_filter_time_constant
+                            * clocks_per_period_coarse)
+                            .into()),
+                        gpu_se_filter_time_constant_1_clks: U64((pwr
+                            .se_filter_time_constant_1
+                            * clocks_per_period_coarse)
+                            .into()),
+                        unk_c4: f32!(65536.0),
+                        unk_114: f32!(65536.0),
+                        unk_124: 40,
+                        max_pstate_scaled_2: max_ps_scaled,
+                        ..Zeroable::zeroed()
+                    })
+                },
+                fast_die0_sensor_mask_2: U64(cfg.fast_sensor_mask[0]),
+                #[ver(G >= G14X)]
+                fast_die1_sensor_mask_2: U64(cfg.fast_sensor_mask[1]),
+                unk_e24: cfg.da.unk_e24,
+                unk_e28: 1,
+                fast_die0_sensor_mask_alt: U64(cfg.fast_sensor_mask_alt[0]),
+                #[ver(G >= G14X)]
+                fast_die1_sensor_mask_alt: U64(cfg.fast_sensor_mask_alt[1]),
+                #[ver(V < V13_0B4)]
+                fast_die0_sensor_present: U64(cfg.fast_die0_sensor_present as u64),
+                unk_163c: 1,
+                unk_3644: 0,
+                hws1 <- Self::hw_shared1(cfg),
+                hws2 <- Self::hw_shared2(cfg, dyncfg),
+                hws3 <- Self::hw_shared3(cfg),
+                unk_3ce8: 1,
+                ..Zeroable::zeroed()
+            })
+            .chain(|raw| {
+                for i in 0..self.dyncfg.pwr.perf_states.len() {
+                    raw.sram_k[i] = self.cfg.sram_k;
+                }
+
+                for (i, coef) in pwr.core_leak_coef.iter().enumerate() {
+                    raw.core_leak_coef[i] = *coef;
+                }
+
+                for (i, coef) in pwr.sram_leak_coef.iter().enumerate() {
+                    raw.sram_leak_coef[i] = *coef;
+                }
+
+                #[ver(V >= V13_0B4)]
+                if let Some(csafr) = pwr.csafr.as_ref() {
+                    for (i, coef) in csafr.leak_coef_afr.iter().enumerate() {
+                        raw.aux_leak_coef.cs_1[i] = *coef;
+                        raw.aux_leak_coef.cs_2[i] = *coef;
+                    }
+
+                    for (i, coef) in csafr.leak_coef_cs.iter().enumerate() {
+                        raw.aux_leak_coef.afr_1[i] = *coef;
+                        raw.aux_leak_coef.afr_2[i] = *coef;
+                    }
+                }
+
+                for i in 0..self.dyncfg.id.num_clusters as usize {
+                    if let Some(coef_a) = self.cfg.unk_coef_a.get(i) {
+                        (*raw.unk_coef_a1[i])[..coef_a.len()].copy_from_slice(coef_a);
+                        (*raw.unk_coef_a2[i])[..coef_a.len()].copy_from_slice(coef_a);
+                    }
+                    if let Some(coef_b) = self.cfg.unk_coef_b.get(i) {
+                        (*raw.unk_coef_b1[i])[..coef_b.len()].copy_from_slice(coef_b);
+                        (*raw.unk_coef_b2[i])[..coef_b.len()].copy_from_slice(coef_b);
+                    }
+                }
+
+                for (i, pz) in pwr.power_zones.iter().enumerate() {
+                    raw.power_zones[i].target = pz.target;
+                    raw.power_zones[i].target_off = pz.target - pz.target_offset;
+                    raw.power_zones[i].filter_tc_x4 = 4 * pz.filter_tc;
+                    raw.power_zones[i].filter_tc_xperiod = period_ms * pz.filter_tc;
+                    let filter_a = f32!(1.0) / pz.filter_tc.into();
+                    raw.power_zones[i].filter_a = filter_a;
+                    raw.power_zones[i].filter_a_neg = f32!(1.0) - filter_a;
+                    #[ver(V >= V13_0B4)]
+                    raw.power_zones[i].unk_10 = 1320000000;
+                }
+
+                #[ver(V >= V13_0B4 && G >= G14X)]
+                for (i, j) in raw.hws2.g14.curve2.t1.iter().enumerate() {
+                    raw.unk_hws2[i] = if *j == 0xffff { 0 } else { j / 2 };
+                }
+
+                Ok(())
+            })
+        })
+    }
+
+    /// Create the HwDataB structure. This mostly contains GPU-related configuration.
+    fn hwdata_b(&mut self) -> Result<GpuObject<HwDataB::ver>> {
+        self.alloc.private.new_init(init::zeroed(), |_inner, _ptr| {
+            let cfg = &self.cfg;
+            let dyncfg = &self.dyncfg;
+            try_init!(raw::HwDataB::ver {
+                // Userspace VA map related
+                #[ver(V < V13_0B4)]
+                unk_0: U64(0x13_00000000),
+                unk_8: U64(0x14_00000000),
+                #[ver(V < V13_0B4)]
+                unk_10: U64(0x1_00000000),
+                unk_18: U64(0xffc00000),
+                // USC start
+                unk_20: U64(0), // U64(0x11_00000000),
+                unk_28: U64(0), // U64(0x11_00000000),
+                // Unknown page
+                //unk_30: U64(0x6f_ffff8000),
+                unk_30: U64(mmu::IOVA_UNK_PAGE),
+                timestamp_area_base: U64(gpu::IOVA_KERN_TIMESTAMP_RANGE.start),
+                // TODO: yuv matrices
+                chip_id: cfg.chip_id,
+                unk_454: cfg.db.unk_454,
+                unk_458: 0x1,
+                unk_460: 0x1,
+                unk_464: 0x1,
+                unk_468: 0x1,
+                unk_47c: 0x1,
+                unk_484: 0x1,
+                unk_48c: 0x1,
+                base_clock_khz: cfg.base_clock_hz / 1000,
+                power_sample_period: dyncfg.pwr.power_sample_period,
+                unk_49c: 0x1,
+                unk_4a0: 0x1,
+                unk_4a4: 0x1,
+                unk_4c0: 0x1f,
+                unk_4e0: U64(cfg.db.unk_4e0),
+                unk_4f0: 0x1,
+                unk_4f4: 0x1,
+                unk_504: 0x31,
+                unk_524: 0x1, // use_secure_cache_flush
+                unk_534: cfg.db.unk_534,
+                num_frags: dyncfg.id.num_frags * dyncfg.id.num_clusters,
+                unk_554: 0x1,
+                uat_ttb_base: U64(dyncfg.uat_ttb_base),
+                gpu_core_id: cfg.gpu_core as u32,
+                gpu_rev_id: dyncfg.id.gpu_rev_id as u32,
+                num_cores: dyncfg.id.num_cores * dyncfg.id.num_clusters,
+                max_pstate: dyncfg.pwr.perf_states.len() as u32 - 1,
+                #[ver(V < V13_0B4)]
+                num_pstates: dyncfg.pwr.perf_states.len() as u32,
+                #[ver(V < V13_0B4)]
+                min_sram_volt: dyncfg.pwr.min_sram_microvolt / 1000,
+                #[ver(V < V13_0B4)]
+                unk_ab8: cfg.db.unk_ab8,
+                #[ver(V < V13_0B4)]
+                unk_abc: cfg.db.unk_abc,
+                #[ver(V < V13_0B4)]
+                unk_ac0: 0x1020,
+
+                #[ver(V >= V13_0B4)]
+                unk_ae4: Array::new([0x0, 0x3, 0x7, 0x7]),
+                #[ver(V < V13_0B4)]
+                unk_ae4: Array::new([0x0, 0xf, 0x3f, 0x3f]),
+                unk_b10: 0x1,
+                timer_offset: U64(0),
+                unk_b24: 0x1,
+                unk_b28: 0x1,
+                unk_b2c: 0x1,
+                unk_b30: cfg.db.unk_b30,
+                #[ver(V >= V13_0B4)]
+                unk_b38_0: 1,
+                #[ver(V >= V13_0B4)]
+                unk_b38_4: 1,
+                unk_b38: Array::new([0xffffffff; 12]),
+                #[ver(V >= V13_0B4 && V < V13_3)]
+                unk_c3c: 0x19,
+                #[ver(V >= V13_3)]
+                unk_c3c: 0x1a,
+                ..Zeroable::zeroed()
+            })
+            .chain(|raw| {
+                #[ver(V >= V13_3)]
+                for i in 0..16 {
+                    raw.unk_arr_0[i] = i as u32;
+                }
+
+                let base_ps = self.dyncfg.pwr.perf_base_pstate as usize;
+                let max_ps = self.dyncfg.pwr.perf_max_pstate as usize;
+                let base_freq = self.dyncfg.pwr.perf_states[base_ps].freq_hz;
+                let max_freq = self.dyncfg.pwr.perf_states[max_ps].freq_hz;
+
+                for (i, ps) in self.dyncfg.pwr.perf_states.iter().enumerate() {
+                    raw.frequencies[i] = ps.freq_hz / 1000000;
+                    for (j, mv) in ps.volt_mv.iter().enumerate() {
+                        let sram_mv = (*mv).max(self.dyncfg.pwr.min_sram_microvolt / 1000);
+                        raw.voltages[i][j] = *mv;
+                        raw.voltages_sram[i][j] = sram_mv;
+                    }
+                    for j in ps.volt_mv.len()..raw.voltages[i].len() {
+                        raw.voltages[i][j] = raw.voltages[i][0];
+                        raw.voltages_sram[i][j] = raw.voltages_sram[i][0];
+                    }
+                    raw.sram_k[i] = self.cfg.sram_k;
+                    raw.rel_max_powers[i] = ps.pwr_mw * 100 / self.dyncfg.pwr.max_power_mw;
+                    raw.rel_boost_freqs[i] = if i > base_ps {
+                        (ps.freq_hz - base_freq) / ((max_freq - base_freq) / 100)
+                    } else {
+                        0
+                    };
+                }
+
+                #[ver(V >= V13_0B4)]
+                if let Some(csafr) = self.dyncfg.pwr.csafr.as_ref() {
+                    let aux = &mut raw.aux_ps;
+                    aux.cs_max_pstate = (csafr.perf_states_cs.len() - 1).try_into()?;
+                    aux.afr_max_pstate = (csafr.perf_states_afr.len() - 1).try_into()?;
+
+                    for (i, ps) in csafr.perf_states_cs.iter().enumerate() {
+                        aux.cs_frequencies[i] = ps.freq_hz / 1000000;
+                        for (j, mv) in ps.volt_mv.iter().enumerate() {
+                            let sram_mv = (*mv).max(csafr.min_sram_microvolt / 1000);
+                            aux.cs_voltages[i][j] = *mv;
+                            aux.cs_voltages_sram[i][j] = sram_mv;
+                        }
+                    }
+
+                    for (i, ps) in csafr.perf_states_afr.iter().enumerate() {
+                        aux.afr_frequencies[i] = ps.freq_hz / 1000000;
+                        for (j, mv) in ps.volt_mv.iter().enumerate() {
+                            let sram_mv = (*mv).max(csafr.min_sram_microvolt / 1000);
+                            aux.afr_voltages[i][j] = *mv;
+                            aux.afr_voltages_sram[i][j] = sram_mv;
+                        }
+                    }
+                }
+
+                // Special case override for T602x
+                #[ver(G == G14X)]
+                if dyncfg.id.gpu_rev_id == hw::GpuRevisionID::B1 {
+                    raw.gpu_rev_id = hw::GpuRevisionID::B0 as u32;
+                }
+
+                Ok(())
+            })
+        })
+    }
+
+    /// Create the Globals structure, which contains global firmware config including more power
+    /// configuration data and globals used to exchange state between the firmware and driver.
+    fn globals(&mut self) -> Result<GpuObject<Globals::ver>> {
+        self.alloc.private.new_init(init::zeroed(), |_inner, _ptr| {
+            let cfg = &self.cfg;
+            let dyncfg = &self.dyncfg;
+            let pwr = &dyncfg.pwr;
+            let period_ms = pwr.power_sample_period;
+            let period_s = F32::from(period_ms) / f32!(1000.0);
+            let avg_power_filter_tc_periods = pwr.avg_power_filter_tc_ms / period_ms;
+
+            let max_ps = pwr.perf_max_pstate;
+            let max_ps_scaled = 100 * max_ps;
+
+            try_init!(raw::Globals::ver {
+                //ktrace_enable: 0xffffffff,
+                ktrace_enable: 0,
+                #[ver(V >= V13_2)]
+                unk_24_0: 3000,
+                unk_24: 0,
+                #[ver(V >= V13_0B4)]
+                debug: 0,
+                unk_28: 1,
+                #[ver(G >= G14X)]
+                unk_2c_0: 1,
+                #[ver(V >= V13_0B4 && G < G14X)]
+                unk_2c_0: 0,
+                unk_2c: 1,
+                unk_30: 0,
+                unk_34: 120,
+                sub <- try_init!(raw::GlobalsSub::ver {
+                    unk_54: cfg.global_unk_54,
+                    unk_56: 40,
+                    unk_58: 0xffff,
+                    unk_5e: U32(1),
+                    unk_66: U32(1),
+                    ..Zeroable::zeroed()
+                }),
+                unk_8900: 1,
+                pending_submissions: AtomicU32::new(0),
+                max_power: pwr.max_power_mw,
+                max_pstate_scaled: max_ps_scaled,
+                max_pstate_scaled_2: max_ps_scaled,
+                max_pstate_scaled_3: max_ps_scaled,
+                power_zone_count: pwr.power_zones.len() as u32,
+                avg_power_filter_tc_periods: avg_power_filter_tc_periods,
+                avg_power_ki_dt: pwr.avg_power_ki_only * period_s,
+                avg_power_kp: pwr.avg_power_kp,
+                avg_power_min_duty_cycle: pwr.avg_power_min_duty_cycle,
+                avg_power_target_filter_tc: pwr.avg_power_target_filter_tc,
+                unk_89bc: cfg.da.unk_8cc,
+                fast_die0_release_temp: 100 * pwr.fast_die0_release_temp,
+                unk_89c4: cfg.da.unk_87c,
+                fast_die0_prop_tgt_delta: 100 * pwr.fast_die0_prop_tgt_delta,
+                fast_die0_kp: pwr.fast_die0_proportional_gain,
+                fast_die0_ki_dt: pwr.fast_die0_integral_gain * period_s,
+                unk_89e0: 1,
+                max_power_2: pwr.max_power_mw,
+                ppm_kp: pwr.ppm_kp,
+                ppm_ki_dt: pwr.ppm_ki * period_s,
+                #[ver(V >= V13_0B4)]
+                unk_89f4_8: 1,
+                unk_89f4: 0,
+                hws1 <- Self::hw_shared1(cfg),
+                hws2 <- Self::hw_shared2(cfg, dyncfg),
+                hws3 <- Self::hw_shared3(cfg),
+                #[ver(V >= V13_0B4)]
+                idle_off_standby_timer: pwr.idle_off_standby_timer,
+                #[ver(V >= V13_0B4)]
+                unk_hws2_4: cfg.unk_hws2_4.map(Array::new).unwrap_or_default(),
+                #[ver(V >= V13_0B4)]
+                unk_hws2_24: cfg.unk_hws2_24,
+                unk_900c: 1,
+                #[ver(V >= V13_0B4)]
+                unk_9010_0: 1,
+                #[ver(V >= V13_0B4)]
+                unk_903c: 1,
+                #[ver(V < V13_0B4)]
+                unk_903c: 0,
+                fault_control: *module_parameters::fault_control.get(),
+                do_init: 1,
+                progress_check_interval_3d: 40,
+                progress_check_interval_ta: 10,
+                progress_check_interval_cl: 250,
+                #[ver(V >= V13_0B4)]
+                unk_1102c_0: 1,
+                #[ver(V >= V13_0B4)]
+                unk_1102c_4: 1,
+                #[ver(V >= V13_0B4)]
+                unk_1102c_8: 100,
+                #[ver(V >= V13_0B4)]
+                unk_1102c_c: 1,
+                idle_off_delay_ms: AtomicU32::new(pwr.idle_off_delay_ms),
+                fender_idle_off_delay_ms: pwr.fender_idle_off_delay_ms,
+                fw_early_wake_timeout_ms: pwr.fw_early_wake_timeout_ms,
+                cl_context_switch_timeout_ms: 40,
+                #[ver(V >= V13_0B4)]
+                cl_kill_timeout_ms: 50,
+                #[ver(V >= V13_0B4)]
+                unk_11edc: 0,
+                #[ver(V >= V13_0B4)]
+                unk_11efc: 0,
+                ..Zeroable::zeroed()
+            })
+            .chain(|raw| {
+                for (i, pz) in self.dyncfg.pwr.power_zones.iter().enumerate() {
+                    raw.power_zones[i].target = pz.target;
+                    raw.power_zones[i].target_off = pz.target - pz.target_offset;
+                    raw.power_zones[i].filter_tc = pz.filter_tc;
+                }
+
+                if let Some(tab) = self.cfg.global_tab.as_ref() {
+                    for (i, x) in tab.iter().enumerate() {
+                        raw.unk_118ec[i] = *x;
+                    }
+                    raw.unk_118e8 = 1;
+                }
+
+                Ok(())
+            })
+        })
+    }
+
+    /// Create the RuntimePointers structure, which contains pointers to most of the other
+    /// structures including the ring buffer channels, statistics structures, and HwDataA/HwDataB.
+    fn runtime_pointers(&mut self) -> Result<GpuObject<RuntimePointers::ver>> {
+        let hwa = self.hwdata_a()?;
+        let hwb = self.hwdata_b()?;
+
+        if self.dyncfg.hw_data_a.len() > 0 {
+            let mut mismatch = false;
+            unsafe {
+                hwa.with(|raw, _inner| {
+                    let sla = core::slice::from_raw_parts(
+                        raw as *const raw::HwDataA::ver as *const u8,
+                        core::mem::size_of::<raw::HwDataA::ver>(),
+                    );
+
+                    dev_info!(
+                        self.dev.as_ref(),
+                        "Hwdata A len {} {}",
+                        sla.len(),
+                        self.dyncfg.hw_data_a.len()
+                    );
+                    for i in 0..core::cmp::min(sla.len(), self.dyncfg.hw_data_a.len()) {
+                        if sla[i] != self.dyncfg.hw_data_a[i] {
+                            mismatch = true;
+                            dev_err!(
+                                self.dev.as_ref(),
+                                "Hwdata A first mismatch at {i} {:#02x} != {:#02x}",
+                                sla[i],
+                                self.dyncfg.hw_data_a[i]
+                            );
+                            break;
+                        }
+                    }
+                });
+            }
+            if !mismatch {
+                dev_info!(self.dev.as_ref(), "Hwdata A match")
+            }
+        }
+
+        if self.dyncfg.hw_data_b.len() > 0 {
+            let mut mismatch = false;
+            unsafe {
+                hwb.with(|raw, _inner| {
+                    let sla = core::slice::from_raw_parts(
+                        raw as *const raw::HwDataB::ver as *const u8,
+                        core::mem::size_of::<raw::HwDataB::ver>(),
+                    );
+
+                    dev_info!(
+                        self.dev.as_ref(),
+                        "Hwdata B len {} {}",
+                        sla.len(),
+                        self.dyncfg.hw_data_b.len()
+                    );
+                    for i in 0..core::cmp::min(sla.len(), self.dyncfg.hw_data_b.len()) {
+                        if sla[i] != self.dyncfg.hw_data_b[i] {
+                            mismatch = true;
+                            dev_err!(
+                                self.dev.as_ref(),
+                                "Hwdata B first mismatch at {i} {:#02x} != {:#02x}",
+                                sla[i],
+                                self.dyncfg.hw_data_b[i]
+                            );
+                            break;
+                        }
+                    }
+                });
+            }
+            if !mismatch {
+                dev_info!(self.dev.as_ref(), "Hwdata B match")
+            }
+        }
+
+        let mut buffer_mgr_ctl = gem::new_kernel_object(self.dev, 0x4000)?;
+        buffer_mgr_ctl.vmap()?.as_mut_slice().fill(0);
+
+        GpuObject::new_init_prealloc(
+            self.alloc.private.alloc_object()?,
+            |_ptr| {
+                let alloc = &mut *self.alloc;
+                try_init!(RuntimePointers::ver {
+                    stats <- {
+                        let alloc = &mut *alloc;
+                        try_init!(Stats::ver {
+                            vtx: alloc.private.new_default::<GpuGlobalStatsVtx>()?,
+                            frag: alloc.private.new_init(
+                                init::zeroed::<GpuGlobalStatsFrag::ver>(),
+                                |_inner, _ptr| {
+                                    try_init!(raw::GpuGlobalStatsFrag::ver {
+                                        total_cmds: 0,
+                                        unk_4: 0,
+                                        stats: Default::default(),
+                                    })
+                                }
+                            )?,
+                            comp: alloc.private.new_default::<GpuStatsComp>()?,
+                        })
+                    },
+
+                    hwdata_a: hwa,
+                    unkptr_190: alloc.private.array_empty_tagged(0x80, b"I190")?,
+                    unkptr_198: alloc.private.array_empty_tagged(0xc0, b"I198")?,
+                    hwdata_b: hwb,
+
+                    unkptr_1b8: alloc.private.array_empty_tagged(0x1000, b"I1B8")?,
+                    unkptr_1c0: alloc.private.array_empty_tagged(0x300, b"I1C0")?,
+                    unkptr_1c8: alloc.private.array_empty_tagged(0x1000, b"I1C8")?,
+
+                    buffer_mgr_ctl,
+                    buffer_mgr_ctl_low_mapping: None,
+                    buffer_mgr_ctl_high_mapping: None,
+                })
+            },
+            |inner, _ptr| {
+                try_init!(raw::RuntimePointers::ver {
+                    pipes: Default::default(),
+                    device_control: Default::default(),
+                    event: Default::default(),
+                    fw_log: Default::default(),
+                    ktrace: Default::default(),
+                    stats: Default::default(),
+
+                    stats_vtx: inner.stats.vtx.gpu_pointer(),
+                    stats_frag: inner.stats.frag.gpu_pointer(),
+                    stats_comp: inner.stats.comp.gpu_pointer(),
+
+                    hwdata_a: inner.hwdata_a.gpu_pointer(),
+                    unkptr_190: inner.unkptr_190.gpu_pointer(),
+                    unkptr_198: inner.unkptr_198.gpu_pointer(),
+                    hwdata_b: inner.hwdata_b.gpu_pointer(),
+                    hwdata_b_2: inner.hwdata_b.gpu_pointer(),
+
+                    fwlog_buf: None,
+
+                    unkptr_1b8: inner.unkptr_1b8.gpu_pointer(),
+
+                    #[ver(G < G14X)]
+                    unkptr_1c0: inner.unkptr_1c0.gpu_pointer(),
+                    #[ver(G < G14X)]
+                    unkptr_1c8: inner.unkptr_1c8.gpu_pointer(),
+
+                    buffer_mgr_ctl_gpu_addr: U64(gpu::IOVA_KERN_GPU_BUFMGR_LOW),
+                    buffer_mgr_ctl_fw_addr: U64(gpu::IOVA_KERN_GPU_BUFMGR_HIGH),
+
+                    __pad0: Default::default(),
+                    unk_160: U64(0),
+                    unk_168: U64(0),
+                    unk_1d0: 0,
+                    unk_1d4: 0,
+                    unk_1d8: Default::default(),
+
+                    __pad1: Default::default(),
+                    gpu_scratch: raw::RuntimeScratch::ver {
+                        unk_6b38: 0xff,
+                        ..Default::default()
+                    },
+                })
+            },
+        )
+    }
+
+    /// Create the FwStatus structure, which is used to coordinate the firmware halt state between
+    /// the firmware and the driver.
+    fn fw_status(&mut self) -> Result<GpuObject<FwStatus>> {
+        self.alloc
+            .shared
+            .new_object(Default::default(), |_inner| Default::default())
+    }
+
+    /// Create one UatLevelInfo structure, which describes one level of translation for the UAT MMU.
+    fn uat_level_info(
+        cfg: &'static hw::HwConfig,
+        index_shift: usize,
+        num_entries: usize,
+    ) -> raw::UatLevelInfo {
+        raw::UatLevelInfo {
+            index_shift: index_shift as _,
+            unk_1: 14,
+            unk_2: 14,
+            unk_3: 8,
+            unk_4: 0x4000,
+            num_entries: num_entries as _,
+            unk_8: U64(1),
+            unk_10: U64(((1u64 << cfg.uat_oas) - 1) & !(mmu::UAT_PGMSK as u64)),
+            index_mask: U64(((num_entries - 1) << index_shift) as u64),
+        }
+    }
+
+    /// Build the top-level InitData object.
+    #[inline(never)]
+    pub(crate) fn build(&mut self) -> Result<KBox<GpuObject<InitData::ver>>> {
+        let runtime_pointers = self.runtime_pointers()?;
+        let globals = self.globals()?;
+        let fw_status = self.fw_status()?;
+        let shared_ro = &mut self.alloc.shared_ro;
+
+        if self.dyncfg.hw_globals.len() > 0 {
+            let mut mismatch = false;
+            unsafe {
+                globals.with(|raw, _inner| {
+                    let sla = core::slice::from_raw_parts(
+                        raw as *const raw::Globals::ver as *const u8,
+                        core::mem::size_of::<raw::Globals::ver>(),
+                    );
+
+                    dev_info!(
+                        self.dev.as_ref(),
+                        "Globals len {} {}",
+                        sla.len(),
+                        self.dyncfg.hw_globals.len()
+                    );
+                    for i in 0..core::cmp::min(sla.len(), self.dyncfg.hw_globals.len()) {
+                        if sla[i] != self.dyncfg.hw_globals[i] {
+                            mismatch = true;
+                            dev_err!(
+                                self.dev.as_ref(),
+                                "Globals first mismatch at {i} {:#02x} != {:#02x}",
+                                sla[i],
+                                self.dyncfg.hw_globals[i]
+                            );
+                            break;
+                        }
+                    }
+                });
+            }
+            if !mismatch {
+                dev_info!(self.dev.as_ref(), "Globals match")
+            }
+        }
+        let obj = self.alloc.private.new_init(
+            try_init!(InitData::ver {
+                unk_buf: shared_ro.array_empty_tagged(0x4000, b"IDTA")?,
+                runtime_pointers,
+                globals,
+                fw_status,
+            }),
+            |inner, _ptr| {
+                let cfg = &self.cfg;
+                try_init!(raw::InitData::ver {
+                    #[ver(V == V13_5 && G != G14X)]
+                    ver_info: Array::new([0x6ba0, 0x1f28, 0x601, 0xb0]),
+                    #[ver(V == V13_5 && G == G14X)]
+                    ver_info: Array::new([0xb390, 0x70f8, 0x601, 0xb0]),
+                    unk_buf: inner.unk_buf.gpu_pointer(),
+                    unk_8: 0,
+                    unk_c: 0,
+                    runtime_pointers: inner.runtime_pointers.gpu_pointer(),
+                    globals: inner.globals.gpu_pointer(),
+                    fw_status: inner.fw_status.gpu_pointer(),
+                    uat_page_size: 0x4000,
+                    uat_page_bits: 14,
+                    uat_num_levels: 3,
+                    uat_level_info: Array::new([
+                        Self::uat_level_info(cfg, 36, 8),
+                        Self::uat_level_info(cfg, 25, 2048),
+                        Self::uat_level_info(cfg, 14, 2048),
+                    ]),
+                    __pad0: Default::default(),
+                    host_mapped_fw_allocations: 1,
+                    unk_ac: 0,
+                    unk_b0: 0,
+                    unk_b4: 0,
+                    unk_b8: 0,
+                })
+            },
+        )?;
+        Ok(KBox::new(obj, GFP_KERNEL)?)
+    }
+}
diff --git a/drivers/gpu/drm/asahi/mem.rs b/drivers/gpu/drm/asahi/mem.rs
new file mode 100644
index 00000000000000..60a64e23a161c5
--- /dev/null
+++ b/drivers/gpu/drm/asahi/mem.rs
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+//! ARM64 low level memory operations.
+//!
+//! This GPU uses CPU-side `tlbi` outer-shareable instructions to manage its TLBs.
+//! Yes, really. Even though the VA address spaces are unrelated.
+//!
+//! Right now we pick our own ASIDs and don't coordinate with the CPU. This might result
+//! in needless TLB shootdowns on the CPU side... TODO: fix this.
+
+use core::arch::asm;
+use core::cmp::min;
+
+use crate::debug::*;
+use crate::mmu;
+
+type Asid = u8;
+
+/// Invalidate the entire GPU TLB.
+#[inline(always)]
+pub(crate) fn tlbi_all() {
+    // SAFETY: tlbi is always safe by definition
+    unsafe {
+        asm!(".arch armv8.4-a", "tlbi vmalle1os",);
+    }
+}
+
+/// Invalidate all TLB entries for a given ASID.
+#[inline(always)]
+pub(crate) fn tlbi_asid(asid: Asid) {
+    if debug_enabled(DebugFlags::ConservativeTlbi) {
+        tlbi_all();
+        sync();
+        return;
+    }
+
+    // SAFETY: tlbi is always safe by definition
+    unsafe {
+        asm!(
+            ".arch armv8.4-a",
+            "tlbi aside1os, {x}",
+            x = in(reg) ((asid as u64) << 48)
+        );
+    }
+}
+
+/// Invalidate a single page for a given ASID.
+#[inline(always)]
+pub(crate) fn tlbi_page(asid: Asid, va: usize) {
+    if debug_enabled(DebugFlags::ConservativeTlbi) {
+        tlbi_all();
+        sync();
+        return;
+    }
+
+    let val: u64 = ((asid as u64) << 48) | ((va as u64 >> 12) & 0xffffffffffc);
+    // SAFETY: tlbi is always safe by definition
+    unsafe {
+        asm!(
+            ".arch armv8.4-a",
+            "tlbi vae1os, {x}",
+            x = in(reg) val
+        );
+    }
+}
+
+/// Invalidate a range of pages for a given ASID.
+#[inline(always)]
+pub(crate) fn tlbi_range(asid: Asid, va: usize, len: usize) {
+    if debug_enabled(DebugFlags::ConservativeTlbi) {
+        tlbi_all();
+        sync();
+        return;
+    }
+
+    if len == 0 {
+        return;
+    }
+
+    let start_pg = va >> mmu::UAT_PGBIT;
+    let end_pg = (va + len + mmu::UAT_PGMSK) >> mmu::UAT_PGBIT;
+
+    let mut val: u64 = ((asid as u64) << 48) | (2 << 46) | (start_pg as u64 & 0x1fffffffff);
+    let pages = end_pg - start_pg;
+
+    // Guess? It's possible that the page count is in terms of 4K pages
+    // when the CPU is in 4K mode...
+    #[cfg(CONFIG_ARM64_4K_PAGES)]
+    let pages = 4 * pages;
+
+    if pages == 1 {
+        tlbi_page(asid, va);
+        return;
+    }
+
+    // Page count is always in units of 2
+    let num = ((pages + 1) >> 1) as u64;
+    // base: 5 bits
+    // exp: 2 bits
+    // pages = (base + 1) << (5 * exp + 1)
+    // 0:00000 ->                     2 pages = 2 << 0
+    // 0:11111 ->                32 * 2 pages = 2 << 5
+    // 1:00000 ->            1 * 32 * 2 pages = 2 << 5
+    // 1:11111 ->           32 * 32 * 2 pages = 2 << 10
+    // 2:00000 ->       1 * 32 * 32 * 2 pages = 2 << 10
+    // 2:11111 ->      32 * 32 * 32 * 2 pages = 2 << 15
+    // 3:00000 ->  1 * 32 * 32 * 32 * 2 pages = 2 << 15
+    // 3:11111 -> 32 * 32 * 32 * 32 * 2 pages = 2 << 20
+    let exp = min(3, (64 - num.leading_zeros()) / 5);
+    let bits = 5 * exp;
+    let mut base = (num + (1 << bits) - 1) >> bits;
+
+    val |= (exp as u64) << 44;
+
+    while base > 32 {
+        // SAFETY: tlbi is always safe by definition
+        unsafe {
+            asm!(
+                ".arch armv8.4-a",
+                "tlbi rvae1os, {x}",
+                x = in(reg) val | (31 << 39)
+            );
+        }
+        base -= 32;
+    }
+
+    // SAFETY: tlbi is always safe by definition
+    unsafe {
+        asm!(
+            ".arch armv8.4-a",
+            "tlbi rvae1os, {x}",
+            x = in(reg) val | ((base - 1) << 39)
+        );
+    }
+}
+
+/// Issue a memory barrier (`dsb sy`).
+#[inline(always)]
+pub(crate) fn sync() {
+    // SAFETY: Barriers are always safe
+    unsafe {
+        asm!("dsb sy");
+    }
+}
diff --git a/drivers/gpu/drm/asahi/microseq.rs b/drivers/gpu/drm/asahi/microseq.rs
new file mode 100644
index 00000000000000..cbdb5de62e9218
--- /dev/null
+++ b/drivers/gpu/drm/asahi/microseq.rs
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+//! GPU Micro operation sequence builder
+//!
+//! As part of a single job submisssion to the GPU, the GPU firmware interprets a sequence of
+//! commands that we call a "microsequence". These are responsible for setting up the job execution,
+//! timestamping the process, waiting for completion, tearing up any resources, and signaling
+//! completion to the driver via the event stamp mechanism.
+//!
+//! Although the microsequences used by the macOS driver are usually quite uniform and simple, the
+//! firmware actually implements enough operations to make this interpreter Turing-complete (!).
+//! Most of those aren't implemented yet, since we don't need them, but they could come in handy in
+//! the future to do strange things or work around firmware bugs...
+//!
+//! This module simply implements a collection of microsequence operations that can be appended to
+//! and later concatenated into one buffer, ready for firmware execution.
+
+use crate::fw::microseq;
+pub(crate) use crate::fw::microseq::*;
+use crate::fw::types::*;
+use kernel::prelude::*;
+
+/// MicroSequence object type, which is just an opaque byte array.
+pub(crate) type MicroSequence = GpuArray<u8>;
+
+/// MicroSequence builder.
+pub(crate) struct Builder {
+    ops: KVec<u8>,
+}
+
+impl Builder {
+    /// Create a new Builder object
+    pub(crate) fn new() -> Builder {
+        Builder { ops: KVec::new() }
+    }
+
+    /// Get the relative offset from the current pointer to a given target offset.
+    ///
+    /// Used for relative jumps.
+    pub(crate) fn offset_to(&self, target: i32) -> i32 {
+        target - self.ops.len() as i32
+    }
+
+    /// Add an operation to the end of the sequence.
+    pub(crate) fn add<T: microseq::Operation>(&mut self, op: T) -> Result<i32> {
+        let off = self.ops.len();
+        let p: *const T = &op;
+        let p: *const u8 = p as *const u8;
+        // SAFETY: Microseq operations always have no padding bytes, so it is safe to
+        // access them as a byte slice.
+        let s: &[u8] = unsafe { core::slice::from_raw_parts(p, core::mem::size_of::<T>()) };
+        self.ops.extend_from_slice(s, GFP_KERNEL)?;
+        Ok(off as i32)
+    }
+
+    /// Collect all submitted operations into a finalized GPU object.
+    pub(crate) fn build(self, alloc: &mut Allocator) -> Result<MicroSequence> {
+        let mut array = alloc.array_empty::<u8>(self.ops.len())?;
+
+        array.as_mut_slice().clone_from_slice(self.ops.as_slice());
+        Ok(array)
+    }
+}
diff --git a/drivers/gpu/drm/asahi/mmu.rs b/drivers/gpu/drm/asahi/mmu.rs
new file mode 100644
index 00000000000000..c52d20113ddd91
--- /dev/null
+++ b/drivers/gpu/drm/asahi/mmu.rs
@@ -0,0 +1,1574 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+//! GPU UAT (MMU) management
+//!
+//! AGX GPUs use an MMU called the UAT, which is largely compatible with the ARM64 page table
+//! format. This module manages the global MMU structures, including a shared handoff structure
+//! that is used to coordinate VM management operations with the firmware, the TTBAT which points
+//! to currently active GPU VM contexts, as well as the individual `Vm` operations to map and
+//! unmap buffer objects into a single user or kernel address space.
+//!
+//! The actual page table management is in the `pt` module.
+
+use core::fmt::Debug;
+use core::mem::size_of;
+use core::num::NonZeroUsize;
+use core::ops::Range;
+use core::sync::atomic::{fence, AtomicU32, AtomicU64, AtomicU8, Ordering};
+use core::time::Duration;
+
+use kernel::{
+    addr::PhysicalAddr,
+    c_str, delay, device, drm,
+    drm::{gem::BaseObject, gpuvm, mm},
+    error::Result,
+    io,
+    io::resource::Resource,
+    prelude::*,
+    static_lock_class,
+    sync::{
+        lock::{mutex::MutexBackend, Guard},
+        Arc, Mutex,
+    },
+    time::{clock, Now},
+    types::{ARef, ForeignOwnable},
+};
+
+use crate::debug::*;
+use crate::driver::AsahiDriver;
+use crate::module_parameters;
+use crate::no_debug;
+use crate::{driver, fw, gem, hw, mem, pgtable, slotalloc, util::RangeExt};
+
+// KernelMapping protection types
+pub(crate) use crate::pgtable::Prot;
+pub(crate) use pgtable::prot::*;
+pub(crate) use pgtable::{UatPageTable, UAT_PGBIT, UAT_PGMSK, UAT_PGSZ};
+
+use pgtable::UAT_IAS;
+
+const DEBUG_CLASS: DebugFlags = DebugFlags::Mmu;
+
+/// PPL magic number for the handoff region
+const PPL_MAGIC: u64 = 0x4b1d000000000002;
+
+/// Number of supported context entries in the TTBAT
+const UAT_NUM_CTX: usize = 64;
+/// First context available for users
+const UAT_USER_CTX_START: usize = 1;
+/// Number of available user contexts
+const UAT_USER_CTX: usize = UAT_NUM_CTX - UAT_USER_CTX_START;
+
+/// Lower/user base VA
+pub(crate) const IOVA_USER_BASE: u64 = UAT_PGSZ as u64;
+/// Lower/user top VA
+pub(crate) const IOVA_USER_TOP: u64 = 1 << (UAT_IAS as u64);
+/// Lower/user VA range
+pub(crate) const IOVA_USER_RANGE: Range<u64> = IOVA_USER_BASE..IOVA_USER_TOP;
+
+/// Upper/kernel base VA
+const IOVA_TTBR1_BASE: u64 = 0xffffff8000000000;
+/// Driver-managed kernel base VA
+const IOVA_KERN_BASE: u64 = 0xffffffa000000000;
+/// Driver-managed kernel top VA
+const IOVA_KERN_TOP: u64 = 0xffffffb000000000;
+/// Driver-managed kernel VA range
+const IOVA_KERN_RANGE: Range<u64> = IOVA_KERN_BASE..IOVA_KERN_TOP;
+/// Full kernel VA range
+const IOVA_KERN_FULL_RANGE: Range<u64> = IOVA_TTBR1_BASE..(!UAT_PGMSK as u64);
+
+const TTBR_VALID: u64 = 0x1; // BIT(0)
+const TTBR_ASID_SHIFT: usize = 48;
+
+/// Address of a special dummy page?
+//const IOVA_UNK_PAGE: u64 = 0x6f_ffff8000;
+pub(crate) const IOVA_UNK_PAGE: u64 = IOVA_USER_TOP - 2 * UAT_PGSZ as u64;
+/// User VA range excluding the unk page
+pub(crate) const IOVA_USER_USABLE_RANGE: Range<u64> = IOVA_USER_BASE..IOVA_UNK_PAGE;
+
+/// A pre-allocated memory region for UAT management
+struct UatRegion {
+    base: PhysicalAddr,
+    map: io::mem::Mem,
+}
+
+/// SAFETY: It's safe to share UAT region records across threads.
+unsafe impl Send for UatRegion {}
+/// SAFETY: It's safe to share UAT region records across threads.
+unsafe impl Sync for UatRegion {}
+
+/// Handoff region flush info structure
+#[repr(C)]
+struct FlushInfo {
+    state: AtomicU64,
+    addr: AtomicU64,
+    size: AtomicU64,
+}
+
+/// UAT Handoff region layout
+#[repr(C)]
+struct Handoff {
+    magic_ap: AtomicU64,
+    magic_fw: AtomicU64,
+
+    lock_ap: AtomicU8,
+    lock_fw: AtomicU8,
+    // Implicit padding: 2 bytes
+    turn: AtomicU32,
+    cur_slot: AtomicU32,
+    // Implicit padding: 4 bytes
+    flush: [FlushInfo; UAT_NUM_CTX + 1],
+
+    unk2: AtomicU8,
+    // Implicit padding: 7 bytes
+    unk3: AtomicU64,
+}
+
+const HANDOFF_SIZE: usize = size_of::<Handoff>();
+
+/// One VM slot in the TTBAT
+#[repr(C)]
+struct SlotTTBS {
+    ttb0: AtomicU64,
+    ttb1: AtomicU64,
+}
+
+const SLOTS_SIZE: usize = UAT_NUM_CTX * size_of::<SlotTTBS>();
+
+// We need at least page 0 (ttb0)
+const PAGETABLES_SIZE: usize = UAT_PGSZ;
+
+/// Inner data for a Vm instance. This is reference-counted by the outer Vm object.
+struct VmInner {
+    dev: driver::AsahiDevRef,
+    is_kernel: bool,
+    va_range: Range<u64>,
+    page_table: UatPageTable,
+    mm: mm::Allocator<(), KernelMappingInner>,
+    uat_inner: Arc<UatInner>,
+    binding: Arc<Mutex<VmBinding>>,
+    id: u64,
+}
+
+/// Slot binding-related inner data for a Vm instance.
+struct VmBinding {
+    active_users: usize,
+    binding: Option<slotalloc::Guard<SlotInner>>,
+    bind_token: Option<slotalloc::SlotToken>,
+    ttb: u64,
+}
+
+/// Data associated with a VM <=> BO pairing
+#[pin_data]
+struct VmBo {
+    #[pin]
+    sgt: Mutex<Option<gem::SGTable>>,
+}
+
+impl gpuvm::DriverGpuVmBo for VmBo {
+    fn new() -> impl PinInit<Self> {
+        pin_init!(VmBo {
+            sgt <- Mutex::new_named(None, c_str!("VmBinding")),
+        })
+    }
+}
+
+#[derive(Default)]
+struct StepContext {
+    new_va: Option<Pin<KBox<gpuvm::GpuVa<VmInner>>>>,
+    prev_va: Option<Pin<KBox<gpuvm::GpuVa<VmInner>>>>,
+    next_va: Option<Pin<KBox<gpuvm::GpuVa<VmInner>>>>,
+    vm_bo: Option<ARef<gpuvm::GpuVmBo<VmInner>>>,
+    prot: Prot,
+}
+
+impl gpuvm::DriverGpuVm for VmInner {
+    type Driver = driver::AsahiDriver;
+    type GpuVmBo = VmBo;
+    type StepContext = StepContext;
+
+    fn step_map(
+        self: &mut gpuvm::UpdatingGpuVm<'_, Self>,
+        op: &mut gpuvm::OpMap<Self>,
+        ctx: &mut Self::StepContext,
+    ) -> Result {
+        let mut iova = op.addr();
+        let mut left = op.range() as usize;
+        let mut offset = op.offset() as usize;
+
+        let bo = ctx.vm_bo.as_ref().expect("step_map with no BO");
+
+        let one_page = op.flags().contains(gpuvm::GpuVaFlags::SINGLE_PAGE);
+
+        let guard = bo.inner().sgt.lock();
+        for range in guard.as_ref().expect("step_map with no SGT").iter() {
+            let mut addr = range.dma_address();
+            let mut len = range.dma_len();
+
+            if left == 0 {
+                break;
+            }
+
+            if offset > 0 {
+                let skip = len.min(offset);
+                addr += skip;
+                len -= skip;
+                offset -= skip;
+            }
+
+            if len == 0 {
+                continue;
+            }
+
+            assert!(offset == 0);
+
+            if one_page {
+                len = left;
+            } else {
+                len = len.min(left);
+            }
+
+            mod_dev_dbg!(
+                self.dev,
+                "MMU: map: {:#x}:{:#x} -> {:#x} [OP={}]\n",
+                addr,
+                len,
+                iova,
+                one_page
+            );
+
+            self.page_table.map_pages(
+                iova..(iova + len as u64),
+                addr as PhysicalAddr,
+                ctx.prot,
+                one_page,
+            )?;
+
+            left -= len;
+            iova += len as u64;
+        }
+
+        let gpuva = ctx.new_va.take().expect("Multiple step_map calls");
+
+        if op
+            .map_and_link_va(
+                self,
+                gpuva,
+                ctx.vm_bo.as_ref().expect("step_map with no BO"),
+            )
+            .is_err()
+        {
+            dev_err!(
+                self.dev.as_ref(),
+                "map_and_link_va failed: {:#x} [{:#x}] -> {:#x}\n",
+                op.offset(),
+                op.range(),
+                op.addr()
+            );
+            return Err(EINVAL);
+        }
+        Ok(())
+    }
+    fn step_unmap(
+        self: &mut gpuvm::UpdatingGpuVm<'_, Self>,
+        op: &mut gpuvm::OpUnMap<Self>,
+        _ctx: &mut Self::StepContext,
+    ) -> Result {
+        let va = op.va().expect("step_unmap: missing VA");
+
+        mod_dev_dbg!(self.dev, "MMU: unmap: {:#x}:{:#x}\n", va.addr(), va.range());
+
+        self.page_table
+            .unmap_pages(va.addr()..(va.addr() + va.range()))?;
+
+        if let Some(asid) = self.slot() {
+            fence(Ordering::SeqCst);
+            mem::tlbi_range(asid as u8, va.addr() as usize, va.range() as usize);
+            mod_dev_dbg!(
+                self.dev,
+                "MMU: flush range: asid={:#x} start={:#x} len={:#x}\n",
+                asid,
+                va.addr(),
+                va.range(),
+            );
+            mem::sync();
+        }
+
+        if op.unmap_and_unlink_va().is_none() {
+            dev_err!(self.dev.as_ref(), "step_unmap: could not unlink gpuva");
+        }
+        Ok(())
+    }
+    fn step_remap(
+        self: &mut gpuvm::UpdatingGpuVm<'_, Self>,
+        op: &mut gpuvm::OpReMap<Self>,
+        vm_bo: &gpuvm::GpuVmBo<Self>,
+        ctx: &mut Self::StepContext,
+    ) -> Result {
+        let va = op.unmap().va().expect("No previous VA");
+        let orig_addr = va.addr();
+        let orig_range = va.range();
+
+        // Only unmap the hole between prev/next, if they exist
+        let unmap_start = if let Some(op) = op.prev_map() {
+            op.addr() + op.range()
+        } else {
+            orig_addr
+        };
+
+        let unmap_end = if let Some(op) = op.next_map() {
+            op.addr()
+        } else {
+            orig_addr + orig_range
+        };
+
+        mod_dev_dbg!(
+            self.dev,
+            "MMU: unmap for remap: {:#x}..{:#x} (from {:#x}:{:#x})\n",
+            unmap_start,
+            unmap_end,
+            orig_addr,
+            orig_range
+        );
+
+        let unmap_range = unmap_end - unmap_start;
+
+        self.page_table.unmap_pages(unmap_start..unmap_end)?;
+
+        if let Some(asid) = self.slot() {
+            fence(Ordering::SeqCst);
+            mem::tlbi_range(asid as u8, unmap_start as usize, unmap_range as usize);
+            mod_dev_dbg!(
+                self.dev,
+                "MMU: flush range: asid={:#x} start={:#x} len={:#x}\n",
+                asid,
+                unmap_start,
+                unmap_range,
+            );
+            mem::sync();
+        }
+
+        if op.unmap().unmap_and_unlink_va().is_none() {
+            dev_err!(self.dev.as_ref(), "step_unmap: could not unlink gpuva");
+        }
+
+        if let Some(prev_op) = op.prev_map() {
+            let prev_gpuva = ctx
+                .prev_va
+                .take()
+                .expect("Multiple step_remap calls with prev_op");
+            if prev_op.map_and_link_va(self, prev_gpuva, vm_bo).is_err() {
+                dev_err!(self.dev.as_ref(), "step_remap: could not relink prev gpuva");
+                return Err(EINVAL);
+            }
+        }
+
+        if let Some(next_op) = op.next_map() {
+            let next_gpuva = ctx
+                .next_va
+                .take()
+                .expect("Multiple step_remap calls with next_op");
+            if next_op.map_and_link_va(self, next_gpuva, vm_bo).is_err() {
+                dev_err!(self.dev.as_ref(), "step_remap: could not relink next gpuva");
+                return Err(EINVAL);
+            }
+        }
+
+        Ok(())
+    }
+}
+
+impl VmInner {
+    /// Returns the slot index, if this VM is bound.
+    fn slot(&self) -> Option<u32> {
+        if self.is_kernel {
+            // The GFX ASC does not care about the ASID. Pick an arbitrary one.
+            // TODO: This needs to be a persistently reserved ASID once we integrate
+            // with the ARM64 kernel ASID machinery to avoid overlap.
+            Some(0)
+        } else {
+            // We don't check whether we lost the slot, which could cause unnecessary
+            // invalidations against another Vm. However, this situation should be very
+            // rare (e.g. a Vm lost its slot, which means 63 other Vms bound in the
+            // interim, and then it gets killed / drops its mappings without doing any
+            // final rendering). Anything doing active maps/unmaps is probably also
+            // rendering and therefore likely bound.
+            self.binding
+                .lock()
+                .bind_token
+                .as_ref()
+                .map(|token| (token.last_slot() + UAT_USER_CTX_START as u32))
+        }
+    }
+
+    /// Returns the translation table base for this Vm
+    fn ttb(&self) -> u64 {
+        self.page_table.ttb()
+    }
+
+    /// Map an `mm::Node` representing an mapping in VA space.
+    fn map_node(&mut self, node: &mm::Node<(), KernelMappingInner>, prot: Prot) -> Result {
+        let mut iova = node.start();
+        let guard = node.bo.as_ref().ok_or(EINVAL)?.inner().sgt.lock();
+        let sgt = guard.as_ref().ok_or(EINVAL)?;
+        let mut offset = node.offset;
+        let mut left = node.mapped_size;
+
+        for range in sgt.iter() {
+            if left == 0 {
+                break;
+            }
+
+            let mut addr = range.dma_address();
+            let mut len = range.dma_len();
+
+            if (offset | addr | len | iova as usize) & UAT_PGMSK != 0 {
+                dev_err!(
+                    self.dev.as_ref(),
+                    "MMU: KernelMapping {:#x}:{:#x} -> {:#x} is not page-aligned\n",
+                    addr,
+                    len,
+                    iova
+                );
+                return Err(EINVAL);
+            }
+
+            if offset > 0 {
+                let skip = len.min(offset);
+                addr += skip;
+                len -= skip;
+                offset -= skip;
+            }
+
+            len = len.min(left);
+
+            if len == 0 {
+                continue;
+            }
+
+            mod_dev_dbg!(
+                self.dev,
+                "MMU: map: {:#x}:{:#x} -> {:#x}\n",
+                addr,
+                len,
+                iova
+            );
+
+            self.page_table.map_pages(
+                iova..(iova + len as u64),
+                addr as PhysicalAddr,
+                prot,
+                false,
+            )?;
+
+            iova += len as u64;
+            left -= len;
+        }
+        Ok(())
+    }
+}
+
+/// Shared reference to a virtual memory address space ([`Vm`]).
+#[derive(Clone)]
+pub(crate) struct Vm {
+    id: u64,
+    inner: ARef<gpuvm::GpuVm<VmInner>>,
+    dummy_obj: drm::gem::ObjectRef<gem::Object>,
+    binding: Arc<Mutex<VmBinding>>,
+}
+no_debug!(Vm);
+
+/// Slot data for a [`Vm`] slot (nothing, we only care about the indices).
+pub(crate) struct SlotInner();
+
+impl slotalloc::SlotItem for SlotInner {
+    type Data = ();
+}
+
+/// Represents a single user of a binding of a [`Vm`] to a slot.
+///
+/// The number of users is counted, and the slot will be freed when it drops to 0.
+#[derive(Debug)]
+pub(crate) struct VmBind(Vm, u32);
+
+impl VmBind {
+    /// Returns the slot that this `Vm` is bound to.
+    pub(crate) fn slot(&self) -> u32 {
+        self.1
+    }
+}
+
+impl Drop for VmBind {
+    fn drop(&mut self) {
+        let mut binding = self.0.binding.lock();
+
+        assert_ne!(binding.active_users, 0);
+        binding.active_users -= 1;
+        mod_pr_debug!(
+            "MMU: slot {} active users {}\n",
+            self.1,
+            binding.active_users
+        );
+        if binding.active_users == 0 {
+            binding.binding = None;
+        }
+    }
+}
+
+impl Clone for VmBind {
+    fn clone(&self) -> VmBind {
+        let mut binding = self.0.binding.lock();
+
+        binding.active_users += 1;
+        mod_pr_debug!(
+            "MMU: slot {} active users {}\n",
+            self.1,
+            binding.active_users
+        );
+        VmBind(self.0.clone(), self.1)
+    }
+}
+
+/// Inner data required for an object mapping into a [`Vm`].
+pub(crate) struct KernelMappingInner {
+    // Drop order matters:
+    // - Drop the GpuVmBo first, which resv locks its BO and drops a GpuVm reference
+    // - Drop the GEM BO next, since BO free can take the resv lock itself
+    // - Drop the owner GpuVm last, since that again can take resv locks when the refcount drops to 0
+    bo: Option<ARef<gpuvm::GpuVmBo<VmInner>>>,
+    _gem: Option<drm::gem::ObjectRef<gem::Object>>,
+    owner: ARef<gpuvm::GpuVm<VmInner>>,
+    uat_inner: Arc<UatInner>,
+    prot: Prot,
+    offset: usize,
+    mapped_size: usize,
+}
+
+/// An object mapping into a [`Vm`], which reserves the address range from use by other mappings.
+pub(crate) struct KernelMapping(mm::Node<(), KernelMappingInner>);
+
+impl KernelMapping {
+    /// Returns the IOVA base of this mapping
+    pub(crate) fn iova(&self) -> u64 {
+        self.0.start()
+    }
+
+    /// Returns the size of this mapping in bytes
+    pub(crate) fn size(&self) -> usize {
+        self.0.mapped_size
+    }
+
+    /// Returns the IOVA base of this mapping
+    pub(crate) fn iova_range(&self) -> Range<u64> {
+        self.0.start()..(self.0.start() + self.0.mapped_size as u64)
+    }
+
+    /// Remap a cached mapping as uncached, then synchronously flush that range of VAs from the
+    /// coprocessor cache. This is required to safely unmap cached/private mappings.
+    fn remap_uncached_and_flush(&mut self) {
+        let mut owner = self
+            .0
+            .owner
+            .exec_lock(None, false)
+            .expect("Failed to exec_lock in remap_uncached_and_flush");
+
+        mod_dev_dbg!(
+            owner.dev,
+            "MMU: remap as uncached {:#x}:{:#x}\n",
+            self.iova(),
+            self.size()
+        );
+
+        // Remap in-place as uncached.
+        // Do not try to unmap the guard page (-1)
+        let prot = self.0.prot.as_uncached();
+        if owner
+            .page_table
+            .reprot_pages(self.iova_range(), prot)
+            .is_err()
+        {
+            dev_err!(
+                owner.dev.as_ref(),
+                "MMU: remap {:#x}:{:#x} failed\n",
+                self.iova(),
+                self.size()
+            );
+        }
+        fence(Ordering::SeqCst);
+
+        // If we don't have (and have never had) a VM slot, just return
+        let slot = match owner.slot() {
+            None => return,
+            Some(slot) => slot,
+        };
+
+        let flush_slot = if owner.is_kernel {
+            // If this is a kernel mapping, always flush on index 64
+            UAT_NUM_CTX as u32
+        } else {
+            // Otherwise, check if this slot is the active one, otherwise return
+            // Also check that we actually own this slot
+            let ttb = owner.ttb() | TTBR_VALID | (slot as u64) << TTBR_ASID_SHIFT;
+
+            let uat_inner = self.0.uat_inner.lock();
+            uat_inner.handoff().lock();
+            let cur_slot = uat_inner.handoff().current_slot();
+            let ttb_cur = uat_inner.ttbs()[slot as usize].ttb0.load(Ordering::Relaxed);
+            uat_inner.handoff().unlock();
+            if cur_slot == Some(slot) && ttb_cur == ttb {
+                slot
+            } else {
+                return;
+            }
+        };
+
+        // FIXME: There is a race here, though it'll probably never happen in practice.
+        // In theory, it's possible for the ASC to finish using our slot, whatever command
+        // it was processing to complete, the slot to be lost to another context, and the ASC
+        // to begin using it again with a different page table, thus faulting when it gets a
+        // flush request here. In practice, the chance of this happening is probably vanishingly
+        // small, as all 62 other slots would have to be recycled or in use before that slot can
+        // be reused, and the ASC using user contexts at all is very rare.
+
+        // Still, the locking around UAT/Handoff/TTBs should probably be redesigned to better
+        // model the interactions with the firmware and avoid these races.
+        // Possibly TTB changes should be tied to slot locks:
+
+        // Flush:
+        //  - Can early check handoff here (no need to lock).
+        //      If user slot and it doesn't match the active ASC slot,
+        //      we can elide the flush as the ASC guarantees it flushes
+        //      TLBs/caches when it switches context. We just need a
+        //      barrier to ensure ordering.
+        //  - Lock TTB slot
+        //      - If user ctx:
+        //          - Lock handoff AP-side
+        //              - Lock handoff dekker
+        //                  - Check TTB & handoff cur ctx
+        //      - Perform flush if necessary
+        //          - This implies taking the fwring lock
+        //
+        // TTB change:
+        //  - lock TTB slot
+        //      - lock handoff AP-side
+        //          - lock handoff dekker
+        //              change TTB
+
+        // Lock this flush slot, and write the range to it
+        let flush = self.0.uat_inner.lock_flush(flush_slot);
+        let pages = self.size() >> UAT_PGBIT;
+        flush.begin_flush(self.iova(), self.size() as u64);
+        if pages >= 0x10000 {
+            dev_err!(
+                owner.dev.as_ref(),
+                "MMU: Flush too big ({:#x} pages))\n",
+                pages
+            );
+        }
+
+        let cmd = fw::channels::FwCtlMsg {
+            addr: fw::types::U64(self.iova()),
+            unk_8: 0,
+            slot: flush_slot,
+            page_count: pages as u16,
+            unk_12: 2, // ?
+        };
+        let data = unsafe { &<KBox<AsahiDriver>>::borrow(owner.dev.as_ref().get_drvdata()).data };
+
+        // Tell the firmware to do a cache flush
+        if let Err(e) = data.gpu.fwctl(cmd) {
+            dev_err!(
+                owner.dev.as_ref(),
+                "MMU: ASC cache flush {:#x}:{:#x} failed (err: {:?})\n",
+                self.iova(),
+                self.size(),
+                e
+            );
+        }
+
+        // Finish the flush
+        flush.end_flush();
+
+        // Slot is unlocked here
+    }
+}
+no_debug!(KernelMapping);
+
+impl Drop for KernelMapping {
+    fn drop(&mut self) {
+        // This is the main unmap function for UAT mappings.
+        // The sequence of operations here is finicky, due to the interaction
+        // between cached GFX ASC mappings and the page tables. These mappings
+        // always have to be flushed from the cache before being unmapped.
+
+        // For uncached mappings, just unmapping and flushing the TLB is sufficient.
+
+        // For cached mappings, this is the required sequence:
+        // 1. Remap it as uncached
+        // 2. Flush the TLB range
+        // 3. If kernel VA mapping OR user VA mapping and handoff.current_slot() == slot:
+        //    a. Take a lock for this slot
+        //    b. Write the flush range to the right context slot in handoff area
+        //    c. Issue a cache invalidation request via FwCtl queue
+        //    d. Poll for completion via queue
+        //    e. Check for completion flag in the handoff area
+        //    f. Drop the lock
+        // 4. Unmap
+        // 5. Flush the TLB range again
+
+        if self.0.prot.is_cached_noncoherent() {
+            mod_pr_debug!(
+                "MMU: remap as uncached {:#x}:{:#x}\n",
+                self.iova(),
+                self.size()
+            );
+            self.remap_uncached_and_flush();
+        }
+
+        let mut owner = self
+            .0
+            .owner
+            .exec_lock(None, false)
+            .expect("exec_lock failed in KernelMapping::drop");
+        mod_dev_dbg!(
+            owner.dev,
+            "MMU: unmap {:#x}:{:#x}\n",
+            self.iova(),
+            self.size()
+        );
+
+        if owner.page_table.unmap_pages(self.iova_range()).is_err() {
+            dev_err!(
+                owner.dev.as_ref(),
+                "MMU: unmap {:#x}:{:#x} failed\n",
+                self.iova(),
+                self.size()
+            );
+        }
+
+        if let Some(asid) = owner.slot() {
+            fence(Ordering::SeqCst);
+            mem::tlbi_range(asid as u8, self.iova() as usize, self.size());
+            mod_dev_dbg!(
+                owner.dev,
+                "MMU: flush range: asid={:#x} start={:#x} len={:#x}\n",
+                asid,
+                self.iova(),
+                self.size()
+            );
+            mem::sync();
+        }
+    }
+}
+
+/// Shared UAT global data structures
+struct UatShared {
+    kernel_ttb1: u64,
+    map_kernel_to_user: bool,
+    handoff_rgn: UatRegion,
+    ttbs_rgn: UatRegion,
+}
+
+impl UatShared {
+    /// Returns the handoff region area
+    fn handoff(&self) -> &Handoff {
+        // SAFETY: pointer is non-null per the type invariant
+        unsafe { (self.handoff_rgn.map.ptr() as *mut Handoff).as_ref() }.unwrap()
+    }
+
+    /// Returns the TTBAT area
+    fn ttbs(&self) -> &[SlotTTBS; UAT_NUM_CTX] {
+        // SAFETY: pointer is non-null per the type invariant
+        unsafe { (self.ttbs_rgn.map.ptr() as *mut [SlotTTBS; UAT_NUM_CTX]).as_ref() }.unwrap()
+    }
+}
+
+// SAFETY: Nothing here is unsafe to send across threads.
+unsafe impl Send for UatShared {}
+
+/// Inner data for the top-level UAT instance.
+#[pin_data]
+struct UatInner {
+    #[pin]
+    shared: Mutex<UatShared>,
+    #[pin]
+    handoff_flush: [Mutex<HandoffFlush>; UAT_NUM_CTX + 1],
+}
+
+impl UatInner {
+    /// Take the lock on the shared data and return the guard.
+    fn lock(&self) -> Guard<'_, UatShared, MutexBackend> {
+        self.shared.lock()
+    }
+
+    /// Take a lock on a handoff flush slot and return the guard.
+    fn lock_flush(&self, slot: u32) -> Guard<'_, HandoffFlush, MutexBackend> {
+        self.handoff_flush[slot as usize].lock()
+    }
+}
+
+/// Top-level UAT manager object
+pub(crate) struct Uat {
+    dev: driver::AsahiDevRef,
+    cfg: &'static hw::HwConfig,
+
+    inner: Arc<UatInner>,
+    slots: slotalloc::SlotAllocator<SlotInner>,
+
+    kernel_vm: Vm,
+    kernel_lower_vm: Vm,
+}
+
+impl Handoff {
+    /// Lock the handoff region from firmware access
+    fn lock(&self) {
+        self.lock_ap.store(1, Ordering::Relaxed);
+        fence(Ordering::SeqCst);
+
+        while self.lock_fw.load(Ordering::Relaxed) != 0 {
+            if self.turn.load(Ordering::Relaxed) != 0 {
+                self.lock_ap.store(0, Ordering::Relaxed);
+                while self.turn.load(Ordering::Relaxed) != 0 {}
+                self.lock_ap.store(1, Ordering::Relaxed);
+                fence(Ordering::SeqCst);
+            }
+        }
+        fence(Ordering::Acquire);
+    }
+
+    /// Unlock the handoff region, allowing firmware access
+    fn unlock(&self) {
+        self.turn.store(1, Ordering::Relaxed);
+        self.lock_ap.store(0, Ordering::Release);
+    }
+
+    /// Returns the current Vm slot mapped by the firmware for lower/unprivileged access, if any.
+    fn current_slot(&self) -> Option<u32> {
+        let slot = self.cur_slot.load(Ordering::Relaxed);
+        if slot == 0 || slot == u32::MAX {
+            None
+        } else {
+            Some(slot)
+        }
+    }
+
+    /// Initialize the handoff region
+    fn init(&self) -> Result {
+        self.magic_ap.store(PPL_MAGIC, Ordering::Relaxed);
+        self.cur_slot.store(0, Ordering::Relaxed);
+        self.unk3.store(0, Ordering::Relaxed);
+        fence(Ordering::SeqCst);
+
+        let start = clock::KernelTime::now();
+        const TIMEOUT: Duration = Duration::from_millis(1000);
+
+        self.lock();
+        while start.elapsed() < TIMEOUT {
+            if self.magic_fw.load(Ordering::Relaxed) == PPL_MAGIC {
+                break;
+            } else {
+                self.unlock();
+                delay::coarse_sleep(Duration::from_millis(10));
+                self.lock();
+            }
+        }
+
+        if self.magic_fw.load(Ordering::Relaxed) != PPL_MAGIC {
+            self.unlock();
+            pr_err!("Handoff: Failed to initialize (firmware not running?)\n");
+            return Err(EIO);
+        }
+
+        self.unlock();
+
+        for i in 0..=UAT_NUM_CTX {
+            self.flush[i].state.store(0, Ordering::Relaxed);
+            self.flush[i].addr.store(0, Ordering::Relaxed);
+            self.flush[i].size.store(0, Ordering::Relaxed);
+        }
+        fence(Ordering::SeqCst);
+        Ok(())
+    }
+}
+
+/// Represents a single flush info slot in the handoff region.
+///
+/// # Invariants
+/// The pointer is valid and there is no aliasing HandoffFlush instance.
+struct HandoffFlush(*const FlushInfo);
+
+// SAFETY: These pointers are safe to send across threads.
+unsafe impl Send for HandoffFlush {}
+
+impl HandoffFlush {
+    /// Set up a flush operation for the coprocessor
+    fn begin_flush(&self, start: u64, size: u64) {
+        // SAFETY: Per the type invariant, this is safe
+        let flush = unsafe { self.0.as_ref().unwrap() };
+
+        let state = flush.state.load(Ordering::Relaxed);
+        if state != 0 {
+            pr_err!("Handoff: expected flush state 0, got {}\n", state);
+        }
+        flush.addr.store(start, Ordering::Relaxed);
+        flush.size.store(size, Ordering::Relaxed);
+        flush.state.store(1, Ordering::Relaxed);
+    }
+
+    /// Complete a flush operation for the coprocessor
+    fn end_flush(&self) {
+        // SAFETY: Per the type invariant, this is safe
+        let flush = unsafe { self.0.as_ref().unwrap() };
+        let state = flush.state.load(Ordering::Relaxed);
+        if state != 2 {
+            pr_err!("Handoff: expected flush state 2, got {}\n", state);
+        }
+        flush.state.store(0, Ordering::Relaxed);
+    }
+}
+
+impl Vm {
+    /// Create a new virtual memory address space
+    fn new(
+        dev: &driver::AsahiDevice,
+        uat_inner: Arc<UatInner>,
+        kernel_range: Range<u64>,
+        cfg: &'static hw::HwConfig,
+        ttb: Option<PhysicalAddr>,
+        id: u64,
+    ) -> Result<Vm> {
+        let dummy_obj = gem::new_kernel_object(dev, UAT_PGSZ)?;
+        let is_kernel = ttb.is_some();
+
+        let page_table = if let Some(ttb) = ttb {
+            UatPageTable::new_with_ttb(ttb, IOVA_KERN_RANGE, cfg.uat_oas)?
+        } else {
+            UatPageTable::new(cfg.uat_oas)?
+        };
+
+        let (va_range, gpuvm_range) = if is_kernel {
+            (IOVA_KERN_RANGE, kernel_range.clone())
+        } else {
+            (IOVA_USER_RANGE, IOVA_USER_USABLE_RANGE)
+        };
+
+        let mm = mm::Allocator::new(va_range.start, va_range.range(), ())?;
+
+        let binding = Arc::pin_init(
+            Mutex::new_named(
+                VmBinding {
+                    binding: None,
+                    bind_token: None,
+                    active_users: 0,
+                    ttb: page_table.ttb(),
+                },
+                c_str!("VmBinding"),
+            ),
+            GFP_KERNEL,
+        )?;
+
+        let binding_clone = binding.clone();
+        Ok(Vm {
+            id,
+            dummy_obj: dummy_obj.gem.clone(),
+            inner: gpuvm::GpuVm::new(
+                c_str!("Asahi::GpuVm"),
+                dev,
+                &*(dummy_obj.gem),
+                gpuvm_range,
+                kernel_range,
+                init!(VmInner {
+                    dev: dev.into(),
+                    va_range,
+                    is_kernel,
+                    page_table,
+                    mm,
+                    uat_inner,
+                    binding: binding_clone,
+                    id,
+                }),
+            )?,
+            binding,
+        })
+    }
+
+    /// Get the translation table base for this Vm
+    fn ttb(&self) -> u64 {
+        self.binding.lock().ttb
+    }
+
+    /// Map a GEM object (using its `SGTable`) into this Vm at a free address in a given range.
+    #[allow(clippy::too_many_arguments)]
+    pub(crate) fn map_in_range(
+        &self,
+        gem: &gem::Object,
+        object_range: Range<usize>,
+        alignment: u64,
+        range: Range<u64>,
+        prot: Prot,
+        guard: bool,
+    ) -> Result<KernelMapping> {
+        let size = object_range.range();
+        let sgt = gem.sg_table()?;
+        let mut inner = self.inner.exec_lock(Some(gem), false)?;
+        let vm_bo = inner.obtain_bo()?;
+
+        let mut vm_bo_guard = vm_bo.inner().sgt.lock();
+        if vm_bo_guard.is_none() {
+            vm_bo_guard.replace(sgt);
+        }
+        core::mem::drop(vm_bo_guard);
+
+        let uat_inner = inner.uat_inner.clone();
+        let node = inner.mm.insert_node_in_range(
+            KernelMappingInner {
+                owner: self.inner.clone(),
+                uat_inner,
+                prot,
+                bo: Some(vm_bo),
+                _gem: Some(gem.reference()),
+                offset: object_range.start,
+                mapped_size: size,
+            },
+            (size + if guard { UAT_PGSZ } else { 0 }) as u64, // Add guard page
+            alignment,
+            0,
+            range.start,
+            range.end,
+            mm::InsertMode::Best,
+        )?;
+
+        let ret = inner.map_node(&node, prot);
+        // Drop the exec_lock first, so that if map_node failed the
+        // KernelMappingInner destructur does not deadlock.
+        core::mem::drop(inner);
+        ret?;
+        Ok(KernelMapping(node))
+    }
+
+    /// Map a GEM object into this Vm at a specific address.
+    #[allow(clippy::too_many_arguments)]
+    pub(crate) fn map_at(
+        &self,
+        addr: u64,
+        size: usize,
+        gem: &gem::Object,
+        prot: Prot,
+        guard: bool,
+    ) -> Result<KernelMapping> {
+        let sgt = gem.sg_table()?;
+        let mut inner = self.inner.exec_lock(Some(gem), false)?;
+
+        let vm_bo = inner.obtain_bo()?;
+
+        let mut vm_bo_guard = vm_bo.inner().sgt.lock();
+        if vm_bo_guard.is_none() {
+            vm_bo_guard.replace(sgt);
+        }
+        core::mem::drop(vm_bo_guard);
+
+        let uat_inner = inner.uat_inner.clone();
+        let node = inner.mm.reserve_node(
+            KernelMappingInner {
+                owner: self.inner.clone(),
+                uat_inner,
+                prot,
+                bo: Some(vm_bo),
+                _gem: Some(gem.reference()),
+                offset: 0,
+                mapped_size: size,
+            },
+            addr,
+            (size + if guard { UAT_PGSZ } else { 0 }) as u64, // Add guard page
+            0,
+        )?;
+
+        let ret = inner.map_node(&node, prot);
+        // Drop the exec_lock first, so that if map_node failed the
+        // KernelMappingInner destructur does not deadlock.
+        core::mem::drop(inner);
+        ret?;
+        Ok(KernelMapping(node))
+    }
+
+    /// Map a range of a GEM object into this Vm using GPUVM.
+    #[allow(clippy::too_many_arguments)]
+    pub(crate) fn bind_object(
+        &self,
+        gem: &gem::Object,
+        addr: u64,
+        size: u64,
+        offset: u64,
+        prot: Prot,
+        single_page: bool,
+    ) -> Result {
+        // Mapping needs a complete context
+        let mut ctx = StepContext {
+            new_va: Some(gpuvm::GpuVa::<VmInner>::new(init::default())?),
+            prev_va: Some(gpuvm::GpuVa::<VmInner>::new(init::default())?),
+            next_va: Some(gpuvm::GpuVa::<VmInner>::new(init::default())?),
+            prot,
+            ..Default::default()
+        };
+
+        let sgt = gem.sg_table()?;
+        let mut inner = self.inner.exec_lock(Some(gem), true)?;
+
+        // Preallocate the page tables, to fail early if we ENOMEM
+        inner.page_table.alloc_pages(addr..(addr + size))?;
+
+        let vm_bo = inner.obtain_bo()?;
+
+        let mut vm_bo_guard = vm_bo.inner().sgt.lock();
+        if vm_bo_guard.is_none() {
+            vm_bo_guard.replace(sgt);
+        }
+        core::mem::drop(vm_bo_guard);
+
+        ctx.vm_bo = Some(vm_bo);
+
+        if (addr | size | offset) & (UAT_PGMSK as u64) != 0 {
+            dev_err!(
+                inner.dev.as_ref(),
+                "MMU: Map step {:#x} [{:#x}] -> {:#x} is not page-aligned\n",
+                offset,
+                size,
+                addr
+            );
+            return Err(EINVAL);
+        }
+
+        let flags = if single_page {
+            gpuvm::GpuVaFlags::SINGLE_PAGE
+        } else {
+            gpuvm::GpuVaFlags::NONE
+        };
+
+        mod_dev_dbg!(
+            inner.dev,
+            "MMU: sm_map: {:#x} [{:#x}] -> {:#x}\n",
+            offset,
+            size,
+            addr
+        );
+        inner.sm_map(&mut ctx, addr, size, offset, flags)
+    }
+
+    /// Add a direct MMIO mapping to this Vm at a free address.
+    pub(crate) fn map_io(
+        &self,
+        iova: u64,
+        phys: usize,
+        size: usize,
+        prot: Prot,
+    ) -> Result<KernelMapping> {
+        let mut inner = self.inner.exec_lock(None, false)?;
+
+        if (iova as usize | phys | size) & UAT_PGMSK != 0 {
+            dev_err!(
+                inner.dev.as_ref(),
+                "MMU: KernelMapping {:#x}:{:#x} -> {:#x} is not page-aligned\n",
+                phys,
+                size,
+                iova
+            );
+            return Err(EINVAL);
+        }
+
+        dev_info!(
+            inner.dev.as_ref(),
+            "MMU: IO map: {:#x}:{:#x} -> {:#x}\n",
+            phys,
+            size,
+            iova
+        );
+
+        let uat_inner = inner.uat_inner.clone();
+        let node = inner.mm.reserve_node(
+            KernelMappingInner {
+                owner: self.inner.clone(),
+                uat_inner,
+                prot,
+                bo: None,
+                _gem: None,
+                offset: 0,
+                mapped_size: size,
+            },
+            iova,
+            size as u64,
+            0,
+        )?;
+
+        let ret = inner.page_table.map_pages(
+            iova..(iova + size as u64),
+            phys as PhysicalAddr,
+            prot,
+            false,
+        );
+        // Drop the exec_lock first, so that if map_node failed the
+        // KernelMappingInner destructur does not deadlock.
+        core::mem::drop(inner);
+        ret?;
+        Ok(KernelMapping(node))
+    }
+
+    /// Unmap everything in an address range.
+    pub(crate) fn unmap_range(&self, iova: u64, size: u64) -> Result {
+        // Unmapping a range can only do a single split, so just preallocate
+        // the prev and next GpuVas
+        let mut ctx = StepContext {
+            prev_va: Some(gpuvm::GpuVa::<VmInner>::new(init::default())?),
+            next_va: Some(gpuvm::GpuVa::<VmInner>::new(init::default())?),
+            ..Default::default()
+        };
+
+        let mut inner = self.inner.exec_lock(None, false)?;
+
+        mod_dev_dbg!(inner.dev, "MMU: sm_unmap: {:#x}:{:#x}\n", iova, size);
+        inner.sm_unmap(&mut ctx, iova, size)
+    }
+
+    /// Drop mappings for a given bo.
+    pub(crate) fn drop_mappings(&self, gem: &gem::Object) -> Result {
+        // Removing whole mappings only does unmaps, so no preallocated VAs
+        let mut ctx = Default::default();
+
+        let mut inner = self.inner.exec_lock(Some(gem), false)?;
+
+        if let Some(bo) = inner.find_bo() {
+            mod_dev_dbg!(inner.dev, "MMU: bo_unmap\n");
+            inner.bo_unmap(&mut ctx, &bo)?;
+            mod_dev_dbg!(inner.dev, "MMU: bo_unmap done\n");
+            // We need to drop the exec_lock first, then the GpuVmBo since that will take the lock itself.
+            core::mem::drop(inner);
+            core::mem::drop(bo);
+        }
+
+        Ok(())
+    }
+
+    /// Returns the dummy GEM object used to hold the shared DMA reservation locks
+    pub(crate) fn get_resv_obj(&self) -> drm::gem::ObjectRef<gem::Object> {
+        self.dummy_obj.clone()
+    }
+
+    /// Check whether an object is external to this GpuVm
+    pub(crate) fn is_extobj(&self, gem: &gem::Object) -> bool {
+        self.inner.is_extobj(gem)
+    }
+}
+
+impl Drop for VmInner {
+    fn drop(&mut self) {
+        let mut binding = self.binding.lock();
+        assert_eq!(binding.active_users, 0);
+
+        mod_pr_debug!(
+            "VmInner::Drop [{}]: bind_token={:?}\n",
+            self.id,
+            binding.bind_token
+        );
+
+        // Make sure this VM is not mapped to a TTB if it was
+        if let Some(token) = binding.bind_token.take() {
+            let idx = (token.last_slot() as usize) + UAT_USER_CTX_START;
+            let ttb = self.ttb() | TTBR_VALID | (idx as u64) << TTBR_ASID_SHIFT;
+
+            let uat_inner = self.uat_inner.lock();
+            uat_inner.handoff().lock();
+            let handoff_cur = uat_inner.handoff().current_slot();
+            let ttb_cur = uat_inner.ttbs()[idx].ttb0.load(Ordering::SeqCst);
+            let inval = ttb_cur == ttb;
+            if inval {
+                if handoff_cur == Some(idx as u32) {
+                    pr_err!(
+                        "VmInner::drop owning slot {}, but it is currently in use by the ASC?\n",
+                        idx
+                    );
+                }
+                uat_inner.ttbs()[idx].ttb0.store(0, Ordering::SeqCst);
+                uat_inner.ttbs()[idx].ttb1.store(0, Ordering::SeqCst);
+            }
+            uat_inner.handoff().unlock();
+            core::mem::drop(uat_inner);
+
+            // In principle we dropped all the KernelMappings already, but we might as
+            // well play it safe and invalidate the whole ASID.
+            if inval {
+                mod_pr_debug!(
+                    "VmInner::Drop [{}]: need inval for ASID {:#x}\n",
+                    self.id,
+                    idx
+                );
+                mem::tlbi_asid(idx as u8);
+                mem::sync();
+            }
+        }
+    }
+}
+
+impl Uat {
+    fn get_region(dev: &device::Device, name: &CStr) -> Result<Resource> {
+        let dev_node = dev.of_node().ok_or(EINVAL)?;
+
+        let node = dev_node.parse_phandle_by_name(
+            c_str!("memory-region"),
+            c_str!("memory-region-names"),
+            name,
+        );
+        let Some(node) = node else {
+            dev_err!(dev, "Missing {} region\n", name);
+            return Err(EINVAL);
+        };
+        let res = node.address_as_resource(0).inspect_err(|_| {
+            dev_err!(dev, "Failed to get {} region\n", name);
+        })?;
+
+        Ok(res)
+    }
+
+    /// Map a bootloader-preallocated memory region
+    fn map_region(
+        dev: &device::Device,
+        name: &CStr,
+        size: usize,
+        cached: bool,
+    ) -> Result<UatRegion> {
+        let res = Self::get_region(dev, name)?;
+        let base = res.start();
+        let res_size = res.size().try_into()?;
+
+        if size > res_size {
+            dev_err!(
+                dev,
+                "Region {} is too small (expected {}, got {})\n",
+                name,
+                size,
+                res_size
+            );
+            return Err(ENOMEM);
+        }
+
+        let flags = if cached {
+            io::mem::MemFlags::WB
+        } else {
+            io::mem::MemFlags::WC
+        };
+
+        // SAFETY: The safety of this operation hinges on the correctness of
+        // much of this file and also the `pgtable` module, so it is difficult
+        // to prove in a single safety comment. Such is life with raw GPU
+        // page table management...
+        let map = unsafe { io::mem::Mem::try_new(res, flags) }.inspect_err(|_| {
+            dev_err!(dev, "Failed to remap {} mem resource\n", name);
+        })?;
+
+        Ok(UatRegion { base, map })
+    }
+
+    /// Returns a reference to the global kernel (upper half) `Vm`
+    pub(crate) fn kernel_vm(&self) -> &Vm {
+        &self.kernel_vm
+    }
+
+    /// Returns a reference to the local kernel (lower half) `Vm`
+    pub(crate) fn kernel_lower_vm(&self) -> &Vm {
+        &self.kernel_lower_vm
+    }
+
+    pub(crate) fn dump_kernel_pages(&self) -> Result<KVVec<pgtable::DumpedPage>> {
+        let mut inner = self.kernel_vm.inner.exec_lock(None, false)?;
+        inner.page_table.dump_pages(IOVA_KERN_FULL_RANGE)
+    }
+
+    /// Returns the base physical address of the TTBAT region.
+    pub(crate) fn ttb_base(&self) -> u64 {
+        let inner = self.inner.lock();
+
+        inner.ttbs_rgn.base
+    }
+
+    /// Binds a `Vm` to a slot, preferring the last used one.
+    pub(crate) fn bind(&self, vm: &Vm) -> Result<VmBind> {
+        let mut binding = vm.binding.lock();
+
+        if binding.binding.is_none() {
+            assert_eq!(binding.active_users, 0);
+
+            let isolation = *module_parameters::robust_isolation.get() != 0;
+
+            self.slots.set_limit(if isolation {
+                NonZeroUsize::new(1)
+            } else {
+                None
+            });
+
+            let slot = self.slots.get(binding.bind_token)?;
+            if slot.changed() {
+                mod_pr_debug!("Vm Bind [{}]: bind_token={:?}\n", vm.id, slot.token(),);
+                let idx = (slot.slot() as usize) + UAT_USER_CTX_START;
+                let ttb = binding.ttb | TTBR_VALID | (idx as u64) << TTBR_ASID_SHIFT;
+
+                let uat_inner = self.inner.lock();
+
+                let ttb1 = if uat_inner.map_kernel_to_user {
+                    uat_inner.kernel_ttb1 | TTBR_VALID | (idx as u64) << TTBR_ASID_SHIFT
+                } else {
+                    0
+                };
+
+                let ttbs = uat_inner.ttbs();
+                uat_inner.handoff().lock();
+                if uat_inner.handoff().current_slot() == Some(idx as u32) {
+                    pr_err!(
+                        "Vm::bind to slot {}, but it is currently in use by the ASC?\n",
+                        idx
+                    );
+                }
+                ttbs[idx].ttb0.store(ttb, Ordering::Release);
+                ttbs[idx].ttb1.store(ttb1, Ordering::Release);
+                uat_inner.handoff().unlock();
+                core::mem::drop(uat_inner);
+
+                // Make sure all TLB entries from the previous owner of this ASID are gone
+                mem::tlbi_asid(idx as u8);
+                mem::sync();
+            }
+
+            binding.bind_token = Some(slot.token());
+            binding.binding = Some(slot);
+        }
+
+        binding.active_users += 1;
+
+        let slot = binding.binding.as_ref().unwrap().slot() + UAT_USER_CTX_START as u32;
+        mod_pr_debug!("MMU: slot {} active users {}\n", slot, binding.active_users);
+        Ok(VmBind(vm.clone(), slot))
+    }
+
+    /// Creates a new `Vm` linked to this UAT.
+    pub(crate) fn new_vm(&self, id: u64, kernel_range: Range<u64>) -> Result<Vm> {
+        Vm::new(
+            &self.dev,
+            self.inner.clone(),
+            kernel_range,
+            self.cfg,
+            None,
+            id,
+        )
+    }
+
+    /// Creates the reference-counted inner data for a new `Uat` instance.
+    #[inline(never)]
+    fn make_inner(dev: &driver::AsahiDevice) -> Result<Arc<UatInner>> {
+        let handoff_rgn = Self::map_region(dev.as_ref(), c_str!("handoff"), HANDOFF_SIZE, true)?;
+        let ttbs_rgn = Self::map_region(dev.as_ref(), c_str!("ttbs"), SLOTS_SIZE, true)?;
+
+        // SAFETY: The Handoff struct layout matches the firmware's view of memory at this address,
+        // and the region is at least large enough per the size specified above.
+        let handoff = unsafe { &(handoff_rgn.map.ptr() as *mut Handoff).as_ref().unwrap() };
+
+        dev_info!(dev.as_ref(), "MMU: Initializing kernel page table\n");
+
+        Arc::pin_init(
+            try_pin_init!(UatInner {
+                handoff_flush <- init::pin_init_array_from_fn(|i| {
+                    Mutex::new_named(HandoffFlush(&handoff.flush[i]), c_str!("handoff_flush"))
+                }),
+                shared <- Mutex::new_named(
+                    UatShared {
+                        kernel_ttb1: 0,
+                        map_kernel_to_user: false,
+                        handoff_rgn,
+                        ttbs_rgn,
+                    },
+                    c_str!("uat_shared")
+                ),
+            }),
+            GFP_KERNEL,
+        )
+    }
+
+    /// Creates a new `Uat` instance given the relevant hardware config.
+    #[inline(never)]
+    pub(crate) fn new(
+        dev: &driver::AsahiDevice,
+        cfg: &'static hw::HwConfig,
+        map_kernel_to_user: bool,
+    ) -> Result<Self> {
+        dev_info!(dev.as_ref(), "MMU: Initializing...\n");
+
+        let inner = Self::make_inner(dev)?;
+
+        let res = Self::get_region(dev.as_ref(), c_str!("pagetables"))?;
+        let ttb1 = res.start();
+        let ttb1size: usize = res.size().try_into()?;
+
+        if ttb1size < PAGETABLES_SIZE {
+            dev_err!(dev.as_ref(), "MMU: Pagetables region is too small\n");
+            return Err(ENOMEM);
+        }
+
+        dev_info!(dev.as_ref(), "MMU: Creating kernel page tables\n");
+        let kernel_lower_vm = Vm::new(dev, inner.clone(), IOVA_USER_RANGE, cfg, None, 1)?;
+        let kernel_vm = Vm::new(dev, inner.clone(), IOVA_KERN_RANGE, cfg, Some(ttb1), 0)?;
+
+        dev_info!(dev.as_ref(), "MMU: Kernel page tables created\n");
+
+        let ttb0 = kernel_lower_vm.ttb();
+
+        let uat = Self {
+            dev: dev.into(),
+            cfg,
+            kernel_vm,
+            kernel_lower_vm,
+            inner,
+            slots: slotalloc::SlotAllocator::new(
+                UAT_USER_CTX as u32,
+                (),
+                |_inner, _slot| Some(SlotInner()),
+                c_str!("Uat::SlotAllocator"),
+                static_lock_class!(),
+                static_lock_class!(),
+            )?,
+        };
+
+        let mut inner = uat.inner.lock();
+
+        inner.map_kernel_to_user = map_kernel_to_user;
+        inner.kernel_ttb1 = ttb1;
+
+        inner.handoff().init()?;
+
+        dev_info!(dev.as_ref(), "MMU: Initializing TTBs\n");
+
+        inner.handoff().lock();
+
+        let ttbs = inner.ttbs();
+
+        ttbs[0].ttb0.store(ttb0 | TTBR_VALID, Ordering::SeqCst);
+        ttbs[0].ttb1.store(ttb1 | TTBR_VALID, Ordering::SeqCst);
+
+        for ctx in &ttbs[1..] {
+            ctx.ttb0.store(0, Ordering::Relaxed);
+            ctx.ttb1.store(0, Ordering::Relaxed);
+        }
+
+        inner.handoff().unlock();
+
+        core::mem::drop(inner);
+
+        dev_info!(dev.as_ref(), "MMU: initialized\n");
+
+        Ok(uat)
+    }
+}
+
+impl Drop for Uat {
+    fn drop(&mut self) {
+        // Make sure we flush the TLBs
+        fence(Ordering::SeqCst);
+        mem::tlbi_all();
+        mem::sync();
+    }
+}
diff --git a/drivers/gpu/drm/asahi/object.rs b/drivers/gpu/drm/asahi/object.rs
new file mode 100644
index 00000000000000..5e20555a92841d
--- /dev/null
+++ b/drivers/gpu/drm/asahi/object.rs
@@ -0,0 +1,724 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+//! Asahi GPU object model
+//!
+//! The AGX GPU includes a coprocessor that uses a large number of shared memory structures to
+//! communicate with the driver. These structures contain GPU VA pointers to each other, which are
+//! directly dereferenced by the firmware and are expected to always be valid for the usage
+//! lifetime of the containing struct (which is an implicit contract, not explicitly managed).
+//! Any faults cause an unrecoverable firmware crash, requiring a full system reboot.
+//!
+//! In order to manage this complexity safely, we implement a GPU object model using Rust's type
+//! system to enforce GPU object lifetime relationships. GPU objects represent an allocated piece
+//! of memory of a given type, mapped to the GPU (and usually also the CPU). On the CPU side,
+//! these objects are associated with a pure Rust structure that contains the objects it depends
+//! on (or references to them). This allows us to map Rust lifetimes into the GPU object model
+//! system. Then, GPU VA pointers also inherit those lifetimes, which means the Rust borrow checker
+//! can ensure that all pointers are assigned an address that is guaranteed to outlive the GPU
+//! object it points to.
+//!
+//! Since the firmware object model does have self-referencing pointers (and there is of course no
+//! underlying revocability mechanism to make it safe), we must have an escape hatch. GPU pointers
+//! can be weak pointers, which do not enforce lifetimes. In those cases, it is the user's
+//! responsibility to ensure that lifetime requirements are met.
+//!
+//! In other words, the model is necessarily leaky and there is no way to fully map Rust safety to
+//! GPU firmware object safety. The goal of the model is to make it easy to model the lifetimes of
+//! GPU objects and have the compiler help in avoiding mistakes, rather than to guarantee safety
+//! 100% of the time as would be the case for CPU-side Rust code.
+
+// TODO: There is a fundamental soundness issue with sharing memory with the GPU (that even affects
+// C code too). Since the GPU is free to mutate that memory at any time, normal reference invariants
+// cannot be enforced on the CPU side. For example, the compiler could perform an optimization that
+// assumes that a given memory location does not change between two reads, and causes UB otherwise,
+// and then the GPU could mutate that memory out from under the CPU.
+//
+// For cases where we *expect* this to happen, we use atomic types, which avoid this issue. However,
+// doing so for every single field of every type is a non-starter. Right now, there seems to be no
+// good solution for this that does not come with significant performance or ergonomics downsides.
+//
+// In *practice* we are almost always only writing GPU memory, and only reading from atomics, so the
+// chances of this actually triggering UB (e.g. a security issue that can be triggered from the GPU
+// side) due to a compiler optimization are very slim.
+//
+// Further discussion: https://github.com/rust-lang/unsafe-code-guidelines/issues/152
+
+use kernel::{error::code::*, prelude::*, sync::Arc};
+
+use core::fmt;
+use core::fmt::Debug;
+use core::fmt::Formatter;
+use core::marker::PhantomData;
+use core::mem::MaybeUninit;
+use core::num::NonZeroU64;
+use core::ops::{Deref, DerefMut, Index, IndexMut};
+use core::{mem, ptr, slice};
+
+use crate::alloc::Allocation;
+use crate::debug::*;
+use crate::fw::types::Zeroable;
+use crate::mmu;
+
+const DEBUG_CLASS: DebugFlags = DebugFlags::Object;
+
+/// A GPU-side strong pointer, which is a 64-bit non-zero VA with an associated lifetime.
+///
+/// In rare cases these pointers are not aligned, so this is `packed(1)`.
+#[repr(C, packed(1))]
+pub(crate) struct GpuPointer<'a, T: ?Sized>(NonZeroU64, PhantomData<&'a T>);
+
+impl<'a, T: ?Sized> GpuPointer<'a, T> {
+    /// Logical OR the pointer with an arbitrary `u64`. This is used when GPU struct fields contain
+    /// misc flag fields in the upper bits. The lifetime is retained. This is GPU-unsafe in
+    /// principle, but we assert that only non-implemented address bits are touched, which is safe
+    /// for pointers used by the GPU (not by firmware).
+    pub(crate) fn or(&self, other: u64) -> GpuPointer<'a, T> {
+        // This will fail for kernel-half pointers, which should not be ORed.
+        assert_eq!(self.0.get() & other, 0);
+        // Assert that we only touch the high bits.
+        assert_eq!(other & 0xffffffffff, 0);
+        GpuPointer(self.0 | other, PhantomData)
+    }
+
+    /// Add an arbitrary offset to the pointer. This is not safe (from the GPU perspective), and
+    /// should only be used via the `inner_ptr` macro to get pointers to inner fields, hence we mark
+    /// it `unsafe` to discourage direct use.
+    ///
+    /// # Safety
+    /// Do not use directly, only via `inner_ptr`.
+    // NOTE: The third argument is a type inference hack.
+    pub(crate) unsafe fn offset<U>(&self, off: usize, _: *const U) -> GpuPointer<'a, U> {
+        GpuPointer::<'a, U>(
+            NonZeroU64::new(self.0.get() + (off as u64)).unwrap(),
+            PhantomData,
+        )
+    }
+}
+
+impl<'a, T> GpuPointer<'a, T> {
+    /// Create a GPU pointer from a KernelMapping and an offset.
+    /// TODO: Change all GPU pointers to point to the raw types so size_of here is GPU-sound.
+    pub(crate) fn from_mapping(
+        mapping: &'a Arc<mmu::KernelMapping>,
+        offset: usize,
+    ) -> Result<GpuPointer<'a, T>> {
+        let addr = mapping.iova().checked_add(offset as u64).ok_or(EINVAL)?;
+        let end = offset
+            .checked_add(core::mem::size_of::<T>())
+            .ok_or(EINVAL)?;
+        if end > mapping.size() {
+            Err(ERANGE)
+        } else {
+            Ok(Self(addr.try_into().unwrap(), PhantomData))
+        }
+    }
+}
+
+impl<'a, T: ?Sized> Debug for GpuPointer<'a, T> {
+    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
+        let val = self.0;
+        f.write_fmt(format_args!("{:#x} ({})", val, core::any::type_name::<T>()))
+    }
+}
+
+impl<'a, T: ?Sized> From<GpuPointer<'a, T>> for u64 {
+    fn from(value: GpuPointer<'a, T>) -> Self {
+        value.0.get()
+    }
+}
+
+/// Take a pointer to a sub-field within a structure pointed to by a GpuPointer, keeping the
+/// lifetime.
+#[macro_export]
+macro_rules! inner_ptr {
+    ($gpuva:expr, $($f:tt)*) => ({
+        // This mirrors kernel::offset_of(), except we use type inference to avoid having to know
+        // the type of the pointer explicitly.
+        fn uninit_from<T: GpuStruct>(_: GpuPointer<'_, T>) -> core::mem::MaybeUninit<T::Raw<'static>> {
+            core::mem::MaybeUninit::uninit()
+        }
+        let tmp = uninit_from($gpuva);
+        let outer = tmp.as_ptr();
+        // SAFETY: The pointer is valid and aligned, just not initialised; `addr_of` ensures that
+        // we don't actually read from `outer` (which would be UB) nor create an intermediate
+        // reference.
+        let p: *const _ = unsafe { core::ptr::addr_of!((*outer).$($f)*) };
+        let inner = p as *const u8;
+        // SAFETY: The two pointers are within the same allocation block.
+        let off = unsafe { inner.offset_from(outer as *const u8) };
+        // SAFETY: The resulting pointer is guaranteed to point to valid memory within the outer
+        // object.
+        unsafe { $gpuva.offset(off.try_into().unwrap(), p) }
+    })
+}
+
+/// A GPU-side weak pointer, which is a 64-bit non-zero VA with no lifetime.
+///
+/// In rare cases these pointers are not aligned, so this is `packed(1)`.
+#[repr(C, packed(1))]
+pub(crate) struct GpuWeakPointer<T: ?Sized>(NonZeroU64, PhantomData<*const T>);
+
+/// SAFETY: GPU weak pointers are always safe to share between threads.
+unsafe impl<T: ?Sized> Send for GpuWeakPointer<T> {}
+/// SAFETY: GPU weak pointers are always safe to share between threads.
+unsafe impl<T: ?Sized> Sync for GpuWeakPointer<T> {}
+
+// Weak pointers can be copied/cloned regardless of their target type.
+impl<T: ?Sized> Copy for GpuWeakPointer<T> {}
+
+impl<T: ?Sized> Clone for GpuWeakPointer<T> {
+    fn clone(&self) -> Self {
+        *self
+    }
+}
+
+impl<T: ?Sized> GpuWeakPointer<T> {
+    /// Add an arbitrary offset to the pointer. This is not safe (from the GPU perspective), and
+    /// should only be used via the `inner_weak_ptr` macro to get pointers to inner fields, hence we
+    /// mark it `unsafe` to discourage direct use.
+    ///
+    /// # Safety
+    /// Do not use directly, only via `inner_weak_ptr`.
+    // NOTE: The third argument is a type inference hack.
+    pub(crate) unsafe fn offset<U>(&self, off: usize, _: *const U) -> GpuWeakPointer<U> {
+        GpuWeakPointer::<U>(
+            NonZeroU64::new(self.0.get() + (off as u64)).unwrap(),
+            PhantomData,
+        )
+    }
+
+    /// Upgrade a weak pointer into a strong pointer. This is not considered safe from the GPU
+    /// perspective.
+    ///
+    /// # Safety
+    /// The caller must ensure tht the data pointed to lives in the GPU at least as long as the
+    /// returned lifetime.
+    pub(crate) unsafe fn upgrade<'a>(&self) -> GpuPointer<'a, T> {
+        GpuPointer(self.0, PhantomData)
+    }
+}
+
+impl<T: ?Sized> From<GpuWeakPointer<T>> for u64 {
+    fn from(value: GpuWeakPointer<T>) -> Self {
+        value.0.get()
+    }
+}
+
+impl<T: ?Sized> Debug for GpuWeakPointer<T> {
+    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
+        let val = self.0;
+        f.write_fmt(format_args!("{:#x} ({})", val, core::any::type_name::<T>()))
+    }
+}
+
+/// Take a pointer to a sub-field within a structure pointed to by a GpuWeakPointer.
+#[macro_export]
+macro_rules! inner_weak_ptr {
+    ($gpuva:expr, $($f:tt)*) => ({
+        // See inner_ptr()
+        fn uninit_from<T: GpuStruct>(_: GpuWeakPointer<T>) -> core::mem::MaybeUninit<T::Raw<'static>> {
+            core::mem::MaybeUninit::uninit()
+        }
+        let tmp = uninit_from($gpuva);
+        let outer = tmp.as_ptr();
+        // SAFETY: The pointer is valid and aligned, just not initialised; `addr_of` ensures that
+        // we don't actually read from `outer` (which would be UB) nor create an intermediate
+        // reference.
+        let p: *const _ = unsafe { core::ptr::addr_of!((*outer).$($f)*) };
+        let inner = p as *const u8;
+        // SAFETY: The two pointers are within the same allocation block.
+        let off = unsafe { inner.offset_from(outer as *const u8) };
+        // SAFETY: The resulting pointer is guaranteed to point to valid memory within the outer
+        // object.
+        unsafe { $gpuva.offset(off.try_into().unwrap(), p) }
+    })
+}
+
+/// Types that implement this trait represent a GPU structure from the CPU side.
+///
+/// The `Raw` type represents the actual raw structure definition on the GPU side.
+///
+/// Types implementing [`GpuStruct`] must have fields owning any objects (or strong references
+/// to them) that GPU pointers in the `Raw` structure point to. This mechanism is used to enforce
+/// lifetimes.
+pub(crate) trait GpuStruct: 'static {
+    /// The type of the GPU-side structure definition representing the firmware struct layout.
+    type Raw<'a>;
+}
+
+/// An instance of a GPU object in memory.
+///
+/// # Invariants
+/// `raw` must point to a valid mapping of the `T::Raw` type associated with the `alloc` allocation.
+/// `gpu_ptr` must be the GPU address of the same object.
+pub(crate) struct GpuObject<T: GpuStruct, U: Allocation<T>> {
+    raw: *mut T::Raw<'static>,
+    alloc: U,
+    gpu_ptr: GpuWeakPointer<T>,
+    inner: KBox<T>,
+}
+
+impl<T: GpuStruct, U: Allocation<T>> GpuObject<T, U> {
+    /// Create a new GpuObject given an allocator and the inner data (a type implementing
+    /// GpuStruct).
+    ///
+    /// The caller passes a closure that constructs the `T::Raw` type given a reference to the
+    /// `GpuStruct`. This is the mechanism used to enforce lifetimes.
+    pub(crate) fn new(
+        alloc: U,
+        inner: T,
+        callback: impl for<'a> FnOnce(&'a T) -> T::Raw<'a>,
+    ) -> Result<Self> {
+        let size = mem::size_of::<T::Raw<'static>>();
+        if size > 0x1000 {
+            dev_crit!(
+                alloc.device().as_ref(),
+                "Allocating {} of size {:#x}, with new, please use new_boxed!\n",
+                core::any::type_name::<T>(),
+                size
+            );
+        }
+        if alloc.size() < size {
+            return Err(ENOMEM);
+        }
+        let gpu_ptr =
+            GpuWeakPointer::<T>(NonZeroU64::new(alloc.gpu_ptr()).ok_or(EINVAL)?, PhantomData);
+        mod_dev_dbg!(
+            alloc.device(),
+            "Allocating {} @ {:#x}\n",
+            core::any::type_name::<T>(),
+            alloc.gpu_ptr()
+        );
+        let p = alloc.ptr().ok_or(EINVAL)?.as_ptr() as *mut T::Raw<'static>;
+        let mut raw = callback(&inner);
+        // SAFETY: `p` is guaranteed to be valid per the Allocation invariant, and the type is
+        // identical to the type of `raw` other than the lifetime.
+        unsafe { p.copy_from(&mut raw as *mut _ as *mut u8 as *mut _, 1) };
+        mem::forget(raw);
+        Ok(Self {
+            raw: p,
+            gpu_ptr,
+            alloc,
+            inner: KBox::new(inner, GFP_KERNEL)?,
+        })
+    }
+
+    /// Create a new GpuObject given an allocator and the boxed inner data (a type implementing
+    /// GpuStruct).
+    ///
+    /// The caller passes a closure that initializes the `T::Raw` type given a reference to the
+    /// `GpuStruct` and a `MaybeUninit<T::Raw>`. This is intended to be used with the place!()
+    /// macro to avoid constructing the whole `T::Raw` object on the stack.
+    pub(crate) fn new_boxed(
+        alloc: U,
+        inner: KBox<T>,
+        callback: impl for<'a> FnOnce(
+            &'a T,
+            &'a mut MaybeUninit<T::Raw<'a>>,
+        ) -> Result<&'a mut T::Raw<'a>>,
+    ) -> Result<Self> {
+        if alloc.size() < mem::size_of::<T::Raw<'static>>() {
+            return Err(ENOMEM);
+        }
+        let gpu_ptr =
+            GpuWeakPointer::<T>(NonZeroU64::new(alloc.gpu_ptr()).ok_or(EINVAL)?, PhantomData);
+        mod_dev_dbg!(
+            alloc.device(),
+            "Allocating {} @ {:#x}\n",
+            core::any::type_name::<T>(),
+            alloc.gpu_ptr()
+        );
+        let p = alloc.ptr().ok_or(EINVAL)?.as_ptr() as *mut MaybeUninit<T::Raw<'_>>;
+        // SAFETY: `p` is guaranteed to be valid per the Allocation invariant.
+        let raw = callback(&inner, unsafe { &mut *p })?;
+        if p as *mut T::Raw<'_> != raw as *mut _ {
+            dev_err!(
+                alloc.device().as_ref(),
+                "Allocation callback returned a mismatched reference ({})\n",
+                core::any::type_name::<T>(),
+            );
+            return Err(EINVAL);
+        }
+        Ok(Self {
+            raw: p as *mut u8 as *mut T::Raw<'static>,
+            gpu_ptr,
+            alloc,
+            inner,
+        })
+    }
+
+    /// Create a new GpuObject given an allocator and the inner data (a type implementing
+    /// GpuStruct).
+    ///
+    /// The caller passes a closure that initializes the `T::Raw` type given a reference to the
+    /// `GpuStruct` and a `MaybeUninit<T::Raw>`. This is intended to be used with the place!()
+    /// macro to avoid constructing the whole `T::Raw` object on the stack.
+    pub(crate) fn new_inplace(
+        alloc: U,
+        inner: T,
+        callback: impl for<'a> FnOnce(
+            &'a T,
+            &'a mut MaybeUninit<T::Raw<'a>>,
+        ) -> Result<&'a mut T::Raw<'a>>,
+    ) -> Result<Self> {
+        GpuObject::<T, U>::new_boxed(alloc, KBox::new(inner, GFP_KERNEL)?, callback)
+    }
+
+    /// Create a new GpuObject given an allocator and the boxed inner data (a type implementing
+    /// GpuStruct).
+    ///
+    /// The caller passes a closure that initializes the `T::Raw` type given a reference to the
+    /// `GpuStruct` and a `MaybeUninit<T::Raw>`. This is intended to be used with the place!()
+    /// macro to avoid constructing the whole `T::Raw` object on the stack.
+    pub(crate) fn new_init_prealloc<'a, I: Init<T, E>, R: PinInit<T::Raw<'a>, F>, E, F>(
+        alloc: U,
+        inner_init: impl FnOnce(GpuWeakPointer<T>) -> I,
+        raw_init: impl FnOnce(&'a T, GpuWeakPointer<T>) -> R,
+    ) -> Result<Self>
+    where
+        kernel::error::Error: core::convert::From<E>,
+        kernel::error::Error: core::convert::From<F>,
+    {
+        if alloc.size() < mem::size_of::<T::Raw<'static>>() {
+            return Err(ENOMEM);
+        }
+        let gpu_ptr =
+            GpuWeakPointer::<T>(NonZeroU64::new(alloc.gpu_ptr()).ok_or(EINVAL)?, PhantomData);
+        mod_dev_dbg!(
+            alloc.device(),
+            "Allocating {} @ {:#x}\n",
+            core::any::type_name::<T>(),
+            alloc.gpu_ptr()
+        );
+        let inner = inner_init(gpu_ptr);
+        let p = alloc.ptr().ok_or(EINVAL)?.as_ptr() as *mut T::Raw<'_>;
+        let ret = Self {
+            raw: p as *mut u8 as *mut T::Raw<'static>,
+            gpu_ptr,
+            alloc,
+            inner: KBox::init(inner, GFP_KERNEL)?,
+        };
+        let q = &*ret.inner as *const T;
+        // SAFETY: `p` is guaranteed to be valid per the Allocation invariant.
+        unsafe { raw_init(&*q, gpu_ptr).__pinned_init(p) }?;
+        Ok(ret)
+    }
+
+    /// Returns the GPU VA of this object (as a raw [`NonZeroU64`])
+    pub(crate) fn gpu_va(&self) -> NonZeroU64 {
+        self.gpu_ptr.0
+    }
+
+    /// Returns a strong GPU pointer to this object, with a lifetime.
+    pub(crate) fn gpu_pointer(&self) -> GpuPointer<'_, T> {
+        GpuPointer(self.gpu_ptr.0, PhantomData)
+    }
+
+    /// Returns a weak GPU pointer to this object, with no lifetime.
+    pub(crate) fn weak_pointer(&self) -> GpuWeakPointer<T> {
+        GpuWeakPointer(self.gpu_ptr.0, PhantomData)
+    }
+
+    /// Perform a mutation to the inner `Raw` data given a user-supplied callback.
+    ///
+    /// The callback gets a mutable reference to the `GpuStruct` type.
+    pub(crate) fn with_mut<RetVal>(
+        &mut self,
+        callback: impl for<'a> FnOnce(&'a mut <T as GpuStruct>::Raw<'a>, &'a mut T) -> RetVal,
+    ) -> RetVal {
+        // SAFETY: `self.raw` is valid per the type invariant, and the second half is just
+        // converting lifetimes.
+        unsafe { callback(&mut *self.raw, &mut *(&mut *self.inner as *mut _)) }
+    }
+
+    /// Access the inner `Raw` data given a user-supplied callback.
+    ///
+    /// The callback gets a reference to the `GpuStruct` type.
+    pub(crate) fn with<RetVal>(
+        &self,
+        callback: impl for<'a> FnOnce(&'a <T as GpuStruct>::Raw<'a>, &'a T) -> RetVal,
+    ) -> RetVal {
+        // SAFETY: `self.raw` is valid per the type invariant, and the second half is just
+        // converting lifetimes.
+        unsafe { callback(&*self.raw, &*(&*self.inner as *const _)) }
+    }
+}
+
+impl<T: GpuStruct, U: Allocation<T>> Deref for GpuObject<T, U> {
+    type Target = T;
+
+    fn deref(&self) -> &Self::Target {
+        &self.inner
+    }
+}
+
+impl<T: GpuStruct, U: Allocation<T>> DerefMut for GpuObject<T, U> {
+    fn deref_mut(&mut self) -> &mut Self::Target {
+        &mut self.inner
+    }
+}
+
+impl<T: GpuStruct + Debug, U: Allocation<T>> Debug for GpuObject<T, U>
+where
+    <T as GpuStruct>::Raw<'static>: Debug,
+{
+    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
+        f.debug_struct(core::any::type_name::<T>())
+            // SAFETY: `self.raw` is valid per the type invariant.
+            .field("raw", &format_args!("{:#X?}", unsafe { &*self.raw }))
+            .field("inner", &format_args!("{:#X?}", &self.inner))
+            .field("alloc", &format_args!("{:?}", &self.alloc))
+            .finish()
+    }
+}
+
+impl<T: GpuStruct + Default, U: Allocation<T>> GpuObject<T, U>
+where
+    for<'a> <T as GpuStruct>::Raw<'a>: Default + Zeroable,
+{
+    /// Create a new GpuObject with default data. `T` must implement `Default` and `T::Raw` must
+    /// implement `Zeroable`, since the GPU-side memory is initialized by zeroing.
+    pub(crate) fn new_default(alloc: U) -> Result<Self> {
+        GpuObject::<T, U>::new_inplace(alloc, Default::default(), |_inner, raw| {
+            // SAFETY: `raw` is valid here, and `T::Raw` implements `Zeroable`.
+            Ok(unsafe {
+                ptr::write_bytes(raw, 0, 1);
+                (*raw).assume_init_mut()
+            })
+        })
+    }
+}
+
+impl<T: GpuStruct, U: Allocation<T>> Drop for GpuObject<T, U> {
+    fn drop(&mut self) {
+        mod_dev_dbg!(
+            self.alloc.device(),
+            "Dropping {} @ {:?}\n",
+            core::any::type_name::<T>(),
+            self.gpu_pointer()
+        );
+    }
+}
+
+// SAFETY: GpuObjects are Send as long as the GpuStruct itself is Send
+unsafe impl<T: GpuStruct + Send, U: Allocation<T>> Send for GpuObject<T, U> {}
+// SAFETY: GpuObjects are Send as long as the GpuStruct itself is Send
+unsafe impl<T: GpuStruct + Sync, U: Allocation<T>> Sync for GpuObject<T, U> {}
+
+/// Trait used to erase the type of a GpuObject, used when we need to keep a list of heterogenous
+/// objects around.
+pub(crate) trait OpaqueGpuObject: Send + Sync {
+    fn gpu_va(&self) -> NonZeroU64;
+}
+
+impl<T: GpuStruct + Sync + Send, U: Allocation<T>> OpaqueGpuObject for GpuObject<T, U> {
+    fn gpu_va(&self) -> NonZeroU64 {
+        Self::gpu_va(self)
+    }
+}
+
+/// An array of raw GPU objects that is only accessible to the GPU (no CPU-side mapping required).
+///
+/// This must necessarily be uninitialized as far as the GPU is concerned, so it cannot be used
+/// when initialization is required.
+///
+/// # Invariants
+///
+/// `alloc` is valid and at least as large as `len` times the size of one `T`.
+/// `gpu_ptr` is valid and points to the allocation start.
+pub(crate) struct GpuOnlyArray<T, U: Allocation<T>> {
+    len: usize,
+    alloc: U,
+    gpu_ptr: NonZeroU64,
+    _p: PhantomData<T>,
+}
+
+impl<T, U: Allocation<T>> GpuOnlyArray<T, U> {
+    /// Allocate a new GPU-only array with the given length.
+    pub(crate) fn new(alloc: U, count: usize) -> Result<GpuOnlyArray<T, U>> {
+        let bytes = count * mem::size_of::<T>();
+        let gpu_ptr = NonZeroU64::new(alloc.gpu_ptr()).ok_or(EINVAL)?;
+        if alloc.size() < bytes {
+            return Err(ENOMEM);
+        }
+        Ok(Self {
+            len: count,
+            alloc,
+            gpu_ptr,
+            _p: PhantomData,
+        })
+    }
+
+    /// Returns the GPU VA of this arraw (as a raw [`NonZeroU64`])
+    pub(crate) fn gpu_va(&self) -> NonZeroU64 {
+        self.gpu_ptr
+    }
+
+    /// Returns a strong GPU pointer to this array, with a lifetime.
+    pub(crate) fn gpu_pointer(&self) -> GpuPointer<'_, &'_ [T]> {
+        GpuPointer(self.gpu_ptr, PhantomData)
+    }
+
+    /// Returns a weak GPU pointer to this array, with no lifetime.
+    pub(crate) fn weak_pointer(&self) -> GpuWeakPointer<[T]> {
+        GpuWeakPointer(self.gpu_ptr, PhantomData)
+    }
+
+    /// Returns a pointer to an offset within the array (as a subslice).
+    pub(crate) fn gpu_offset_pointer(&self, offset: usize) -> GpuPointer<'_, &'_ [T]> {
+        if offset > self.len {
+            panic!("Index {} out of bounds (len: {})", offset, self.len);
+        }
+        GpuPointer(
+            NonZeroU64::new(self.gpu_ptr.get() + (offset * mem::size_of::<T>()) as u64).unwrap(),
+            PhantomData,
+        )
+    }
+
+    /* Not used yet
+    /// Returns a weak pointer to an offset within the array (as a subslice).
+    pub(crate) fn weak_offset_pointer(&self, offset: usize) -> GpuWeakPointer<[T]> {
+        if offset > self.len {
+            panic!("Index {} out of bounds (len: {})", offset, self.len);
+        }
+        GpuWeakPointer(
+            NonZeroU64::new(self.gpu_ptr.get() + (offset * mem::size_of::<T>()) as u64).unwrap(),
+            PhantomData,
+        )
+    }
+
+    /// Returns a pointer to an element within the array.
+    pub(crate) fn gpu_item_pointer(&self, index: usize) -> GpuPointer<'_, &'_ T> {
+        if index >= self.len {
+            panic!("Index {} out of bounds (len: {})", index, self.len);
+        }
+        GpuPointer(
+            NonZeroU64::new(self.gpu_ptr.get() + (index * mem::size_of::<T>()) as u64).unwrap(),
+            PhantomData,
+        )
+    }
+    */
+
+    /// Returns a weak pointer to an element within the array.
+    pub(crate) fn weak_item_pointer(&self, index: usize) -> GpuWeakPointer<T> {
+        if index >= self.len {
+            panic!("Index {} out of bounds (len: {})", index, self.len);
+        }
+        GpuWeakPointer(
+            NonZeroU64::new(self.gpu_ptr.get() + (index * mem::size_of::<T>()) as u64).unwrap(),
+            PhantomData,
+        )
+    }
+
+    /// Returns the length of the array.
+    pub(crate) fn len(&self) -> usize {
+        self.len
+    }
+}
+
+impl<T: Debug, U: Allocation<T>> Debug for GpuOnlyArray<T, U> {
+    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
+        f.debug_struct(core::any::type_name::<T>())
+            .field("len", &format_args!("{:#X?}", self.len()))
+            .finish()
+    }
+}
+
+impl<T, U: Allocation<T>> Drop for GpuOnlyArray<T, U> {
+    fn drop(&mut self) {
+        mod_dev_dbg!(
+            self.alloc.device(),
+            "Dropping {} @ {:?}\n",
+            core::any::type_name::<T>(),
+            self.gpu_pointer()
+        );
+    }
+}
+
+/// An array of raw GPU objects that is also CPU-accessible.
+///
+/// # Invariants
+///
+/// `raw` is valid and points to the CPU-side view of the array (which must have one).
+pub(crate) struct GpuArray<T, U: Allocation<T>> {
+    raw: *mut T,
+    array: GpuOnlyArray<T, U>,
+}
+
+impl<T: Default, U: Allocation<T>> GpuArray<T, U> {
+    /// Allocate a new GPU array, initializing each element to its default.
+    pub(crate) fn empty(alloc: U, count: usize) -> Result<GpuArray<T, U>> {
+        let p = alloc.ptr().ok_or(EINVAL)?.as_ptr();
+        let inner = GpuOnlyArray::new(alloc, count)?;
+        let mut pi = p;
+        for _i in 0..count {
+            // SAFETY: `pi` is valid per the Allocation type invariant, and GpuOnlyArray guarantees
+            // that it can never iterate beyond the buffer length.
+            unsafe {
+                pi.write(Default::default());
+                pi = pi.add(1);
+            }
+        }
+        Ok(Self {
+            raw: p,
+            array: inner,
+        })
+    }
+}
+
+impl<T, U: Allocation<T>> GpuArray<T, U> {
+    /// Get a slice view of the array contents.
+    pub(crate) fn as_slice(&self) -> &[T] {
+        // SAFETY: self.raw / self.len are valid per the type invariant
+        unsafe { slice::from_raw_parts(self.raw, self.len) }
+    }
+
+    /// Get a mutable slice view of the array contents.
+    pub(crate) fn as_mut_slice(&mut self) -> &mut [T] {
+        // SAFETY: self.raw / self.len are valid per the type invariant
+        unsafe { slice::from_raw_parts_mut(self.raw, self.len) }
+    }
+}
+
+impl<T, U: Allocation<T>> Deref for GpuArray<T, U> {
+    type Target = GpuOnlyArray<T, U>;
+
+    fn deref(&self) -> &GpuOnlyArray<T, U> {
+        &self.array
+    }
+}
+
+impl<T, U: Allocation<T>> Index<usize> for GpuArray<T, U> {
+    type Output = T;
+
+    fn index(&self, index: usize) -> &T {
+        if index >= self.len {
+            panic!("Index {} out of bounds (len: {})", index, self.len);
+        }
+        // SAFETY: This is bounds checked above
+        unsafe { &*(self.raw.add(index)) }
+    }
+}
+
+impl<T, U: Allocation<T>> IndexMut<usize> for GpuArray<T, U> {
+    fn index_mut(&mut self, index: usize) -> &mut T {
+        if index >= self.len {
+            panic!("Index {} out of bounds (len: {})", index, self.len);
+        }
+        // SAFETY: This is bounds checked above
+        unsafe { &mut *(self.raw.add(index)) }
+    }
+}
+
+// SAFETY: GpuArray are Send as long as the contained type itself is Send
+unsafe impl<T: Send, U: Allocation<T>> Send for GpuArray<T, U> {}
+// SAFETY: GpuArray are Sync as long as the contained type itself is Sync
+unsafe impl<T: Sync, U: Allocation<T>> Sync for GpuArray<T, U> {}
+
+impl<T: Debug, U: Allocation<T>> Debug for GpuArray<T, U> {
+    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
+        f.debug_struct(core::any::type_name::<T>())
+            .field("array", &format_args!("{:#X?}", self.as_slice()))
+            .finish()
+    }
+}
diff --git a/drivers/gpu/drm/asahi/pgtable.rs b/drivers/gpu/drm/asahi/pgtable.rs
new file mode 100644
index 00000000000000..d2f74f95128b86
--- /dev/null
+++ b/drivers/gpu/drm/asahi/pgtable.rs
@@ -0,0 +1,669 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+//! UAT Page Table management
+//!
+//! AGX GPUs use an MMU called the UAT, which is largely compatible with the ARM64 page table
+//! format. This module manages the actual page tables by allocating raw memory pages from
+//! the kernel page allocator.
+
+use core::fmt::Debug;
+use core::mem::size_of;
+use core::ops::Range;
+use core::sync::atomic::{AtomicU64, Ordering};
+
+use kernel::uapi::{PF_R, PF_W, PF_X};
+use kernel::{addr::PhysicalAddr, error::Result, page::Page, prelude::*, types::Owned};
+
+use crate::debug::*;
+use crate::util::align;
+
+const DEBUG_CLASS: DebugFlags = DebugFlags::PgTable;
+
+/// Number of bits in a page offset.
+pub(crate) const UAT_PGBIT: usize = 14;
+/// UAT page size.
+pub(crate) const UAT_PGSZ: usize = 1 << UAT_PGBIT;
+/// UAT page offset mask.
+pub(crate) const UAT_PGMSK: usize = UAT_PGSZ - 1;
+
+type Pte = AtomicU64;
+
+const PTE_BIT: usize = 3; // log2(sizeof(Pte))
+const PTE_SIZE: usize = 1 << PTE_BIT;
+
+/// Number of PTEs per page.
+const UAT_NPTE: usize = UAT_PGSZ / size_of::<Pte>();
+
+/// Number of address bits to address a level
+const UAT_LVBIT: usize = UAT_PGBIT - PTE_BIT;
+/// Number of entries per level
+const UAT_LVSZ: usize = UAT_NPTE;
+/// Mask of level bits
+const UAT_LVMSK: u64 = (UAT_LVSZ - 1) as u64;
+
+const UAT_LEVELS: usize = 3;
+
+/// UAT input address space
+pub(crate) const UAT_IAS: usize = 39;
+const UAT_IASMSK: u64 = (1u64 << UAT_IAS) - 1;
+
+const PTE_TYPE_BITS: u64 = 3;
+const PTE_TYPE_LEAF_TABLE: u64 = 3;
+
+const UAT_NON_GLOBAL: u64 = 1 << 11;
+const UAT_AP_SHIFT: u32 = 6;
+const UAT_AP_BITS: u64 = 3 << UAT_AP_SHIFT;
+const UAT_HIGH_BITS_SHIFT: u32 = 52;
+const UAT_HIGH_BITS: u64 = 0xfff << UAT_HIGH_BITS_SHIFT;
+const UAT_MEMATTR_SHIFT: u32 = 2;
+const UAT_MEMATTR_BITS: u64 = 7 << UAT_MEMATTR_SHIFT;
+
+const UAT_PROT_BITS: u64 = UAT_AP_BITS | UAT_MEMATTR_BITS | UAT_HIGH_BITS;
+
+const UAT_AF: u64 = 1 << 10;
+
+const MEMATTR_CACHED: u8 = 0;
+const MEMATTR_DEV: u8 = 1;
+const MEMATTR_UNCACHED: u8 = 2;
+
+const AP_FW_GPU: u8 = 0;
+const AP_FW: u8 = 1;
+const AP_GPU: u8 = 2;
+
+const HIGH_BITS_PXN: u16 = 1 << 1;
+const HIGH_BITS_UXN: u16 = 1 << 2;
+const HIGH_BITS_GPU_ACCESS: u16 = 1 << 3;
+
+pub(crate) const PTE_ADDR_BITS: u64 = (!UAT_PGMSK as u64) & (!UAT_HIGH_BITS);
+
+#[derive(Debug, Copy, Clone)]
+pub(crate) struct Prot {
+    memattr: u8,
+    ap: u8,
+    high_bits: u16,
+}
+
+// Firmware + GPU access
+const PROT_FW_GPU_NA: Prot = Prot::from_bits(AP_FW_GPU, 0, 0);
+const _PROT_FW_GPU_RO: Prot = Prot::from_bits(AP_FW_GPU, 0, 1);
+const _PROT_FW_GPU_WO: Prot = Prot::from_bits(AP_FW_GPU, 1, 0);
+const PROT_FW_GPU_RW: Prot = Prot::from_bits(AP_FW_GPU, 1, 1);
+
+// Firmware only access
+const PROT_FW_RO: Prot = Prot::from_bits(AP_FW, 0, 0);
+const _PROT_FW_NA: Prot = Prot::from_bits(AP_FW, 0, 1);
+const PROT_FW_RW: Prot = Prot::from_bits(AP_FW, 1, 0);
+const PROT_FW_RW_GPU_RO: Prot = Prot::from_bits(AP_FW, 1, 1);
+
+// GPU only access
+const PROT_GPU_RO: Prot = Prot::from_bits(AP_GPU, 0, 0);
+const PROT_GPU_WO: Prot = Prot::from_bits(AP_GPU, 0, 1);
+const PROT_GPU_RW: Prot = Prot::from_bits(AP_GPU, 1, 0);
+const _PROT_GPU_NA: Prot = Prot::from_bits(AP_GPU, 1, 1);
+
+const PF_RW: u32 = PF_R | PF_W;
+const PF_RX: u32 = PF_R | PF_X;
+
+// For crash dumps
+const PROT_TO_PERMS_FW: [[u32; 4]; 4] = [
+    [0, 0, 0, PF_RW],
+    [0, PF_RW, 0, PF_RW],
+    [PF_RX, PF_RX, 0, PF_R],
+    [PF_RX, PF_RW, 0, PF_R],
+];
+const PROT_TO_PERMS_OS: [[u32; 4]; 4] = [
+    [0, PF_R, PF_W, PF_RW],
+    [PF_R, 0, PF_RW, PF_RW],
+    [0, 0, 0, 0],
+    [0, 0, 0, 0],
+];
+
+pub(crate) mod prot {
+    pub(crate) use super::Prot;
+    use super::*;
+
+    /// Firmware MMIO R/W
+    pub(crate) const PROT_FW_MMIO_RW: Prot = PROT_FW_RW.memattr(MEMATTR_DEV);
+    /// Firmware MMIO R/O
+    pub(crate) const PROT_FW_MMIO_RO: Prot = PROT_FW_RO.memattr(MEMATTR_DEV);
+    /// Firmware shared (uncached) RW
+    pub(crate) const PROT_FW_SHARED_RW: Prot = PROT_FW_RW.memattr(MEMATTR_UNCACHED);
+    /// Firmware shared (uncached) RO
+    pub(crate) const PROT_FW_SHARED_RO: Prot = PROT_FW_RO.memattr(MEMATTR_UNCACHED);
+    /// Firmware private (cached) RW
+    pub(crate) const PROT_FW_PRIV_RW: Prot = PROT_FW_RW.memattr(MEMATTR_CACHED);
+    /// Firmware/GPU shared (uncached) RW
+    pub(crate) const PROT_GPU_FW_SHARED_RW: Prot = PROT_FW_GPU_RW.memattr(MEMATTR_UNCACHED);
+    /// Firmware/GPU shared (private) RW
+    pub(crate) const PROT_GPU_FW_PRIV_RW: Prot = PROT_FW_GPU_RW.memattr(MEMATTR_CACHED);
+    /// Firmware-RW/GPU-RO shared (private) RW
+    pub(crate) const PROT_GPU_RO_FW_PRIV_RW: Prot = PROT_FW_RW_GPU_RO.memattr(MEMATTR_CACHED);
+    /// GPU shared/coherent RW
+    pub(crate) const PROT_GPU_SHARED_RW: Prot = PROT_GPU_RW.memattr(MEMATTR_UNCACHED);
+    /// GPU shared/coherent RO
+    pub(crate) const PROT_GPU_SHARED_RO: Prot = PROT_GPU_RO.memattr(MEMATTR_UNCACHED);
+    /// GPU shared/coherent WO
+    pub(crate) const PROT_GPU_SHARED_WO: Prot = PROT_GPU_WO.memattr(MEMATTR_UNCACHED);
+}
+
+impl Prot {
+    const fn from_bits(ap: u8, uxn: u16, pxn: u16) -> Self {
+        assert!(uxn <= 1);
+        assert!(pxn <= 1);
+        assert!(ap <= 3);
+
+        Prot {
+            high_bits: HIGH_BITS_GPU_ACCESS | (pxn * HIGH_BITS_PXN) | (uxn * HIGH_BITS_UXN),
+            memattr: 0,
+            ap,
+        }
+    }
+
+    pub(crate) const fn from_pte(pte: u64) -> Self {
+        Prot {
+            high_bits: (pte >> UAT_HIGH_BITS_SHIFT) as u16,
+            ap: ((pte & UAT_AP_BITS) >> UAT_AP_SHIFT) as u8,
+            memattr: ((pte & UAT_MEMATTR_BITS) >> UAT_MEMATTR_SHIFT) as u8,
+        }
+    }
+
+    pub(crate) const fn elf_flags(&self) -> u32 {
+        let ap = (self.ap & 3) as usize;
+        let uxn = if self.high_bits & HIGH_BITS_UXN != 0 {
+            1
+        } else {
+            0
+        };
+        let pxn = if self.high_bits & HIGH_BITS_PXN != 0 {
+            1
+        } else {
+            0
+        };
+        let gpu = self.high_bits & HIGH_BITS_GPU_ACCESS != 0;
+
+        // Format:
+        // [12 top bits of PTE] [12 bottom bits of PTE] [5 bits pad] [ELF RWX]
+        let mut perms = if gpu {
+            PROT_TO_PERMS_OS[ap][(uxn << 1) | pxn]
+        } else {
+            PROT_TO_PERMS_FW[ap][(uxn << 1) | pxn]
+        };
+
+        perms |= ((self.as_pte() >> 52) << 20) as u32;
+        perms |= ((self.as_pte() & 0xfff) << 8) as u32;
+
+        perms
+    }
+
+    const fn memattr(&self, memattr: u8) -> Self {
+        Self { memattr, ..*self }
+    }
+
+    const fn as_pte(&self) -> u64 {
+        (self.ap as u64) << UAT_AP_SHIFT
+            | (self.high_bits as u64) << UAT_HIGH_BITS_SHIFT
+            | (self.memattr as u64) << UAT_MEMATTR_SHIFT
+            | UAT_AF
+    }
+
+    pub(crate) const fn is_cached_noncoherent(&self) -> bool {
+        self.ap != AP_GPU && self.memattr == MEMATTR_CACHED
+    }
+
+    pub(crate) const fn as_uncached(&self) -> Self {
+        self.memattr(MEMATTR_UNCACHED)
+    }
+}
+
+impl Default for Prot {
+    fn default() -> Self {
+        PROT_FW_GPU_NA
+    }
+}
+
+pub(crate) struct DumpedPage {
+    pub(crate) iova: u64,
+    pub(crate) pte: u64,
+    pub(crate) data: Option<Owned<Page>>,
+}
+
+pub(crate) struct UatPageTable {
+    ttb: PhysicalAddr,
+    ttb_owned: bool,
+    va_range: Range<u64>,
+    oas_mask: u64,
+}
+
+impl UatPageTable {
+    pub(crate) fn new(oas: usize) -> Result<Self> {
+        mod_pr_debug!("UATPageTable::new: oas={}\n", oas);
+        let ttb_page = Page::alloc_page(GFP_KERNEL | __GFP_ZERO)?;
+        let ttb = Page::into_phys(ttb_page);
+        Ok(UatPageTable {
+            ttb,
+            ttb_owned: true,
+            va_range: 0..(1u64 << UAT_IAS),
+            oas_mask: (1u64 << oas) - 1,
+        })
+    }
+
+    pub(crate) fn new_with_ttb(
+        ttb: PhysicalAddr,
+        va_range: Range<u64>,
+        oas: usize,
+    ) -> Result<Self> {
+        mod_pr_debug!(
+            "UATPageTable::new_with_ttb: ttb={:#x} range={:#x?} oas={}\n",
+            ttb,
+            va_range,
+            oas
+        );
+        if ttb & (UAT_PGMSK as PhysicalAddr) != 0 {
+            return Err(EINVAL);
+        }
+        if (va_range.start | va_range.end) & (UAT_PGMSK as u64) != 0 {
+            return Err(EINVAL);
+        }
+        // SAFETY: The TTB is should remain valid (if properly mapped), as it is bootloader-managed.
+        if unsafe { Page::borrow_phys(&ttb) }.is_none() {
+            pr_err!(
+                "UATPageTable::new_with_ttb: ttb at {:#x} is not mapped (DT using no-map?)\n",
+                ttb
+            );
+            return Err(EIO);
+        }
+
+        Ok(UatPageTable {
+            ttb,
+            ttb_owned: false,
+            va_range,
+            oas_mask: (1 << oas) - 1,
+        })
+    }
+
+    pub(crate) fn ttb(&self) -> PhysicalAddr {
+        self.ttb
+    }
+
+    fn with_pages<F>(
+        &mut self,
+        iova_range: Range<u64>,
+        alloc: bool,
+        free: bool,
+        mut cb: F,
+    ) -> Result
+    where
+        F: FnMut(u64, &[Pte]) -> Result,
+    {
+        mod_pr_debug!(
+            "UATPageTable::with_pages: {:#x?} alloc={} free={}\n",
+            iova_range,
+            alloc,
+            free
+        );
+        if (iova_range.start | iova_range.end) & (UAT_PGMSK as u64) != 0 {
+            pr_err!(
+                "UATPageTable::with_pages: iova range not aligned: {:#x?}\n",
+                iova_range
+            );
+            return Err(EINVAL);
+        }
+
+        if iova_range.is_empty() {
+            return Ok(());
+        }
+
+        let mut iova = iova_range.start & UAT_IASMSK;
+        let mut last_iova = iova;
+        // Handle the case where iova_range.end is just at the top boundary of the IAS
+        let end = ((iova_range.end - 1) & UAT_IASMSK) + 1;
+
+        let mut pt_addr: [Option<PhysicalAddr>; UAT_LEVELS] = Default::default();
+        pt_addr[UAT_LEVELS - 1] = Some(self.ttb);
+
+        'outer: while iova < end {
+            mod_pr_debug!("UATPageTable::with_pages: iova={:#x}\n", iova);
+            let addr_diff = last_iova ^ iova;
+            for level in (0..UAT_LEVELS - 1).rev() {
+                // If the iova has changed at this level or above, invalidate the physaddr
+                if addr_diff & !((1 << (UAT_PGBIT + (level + 1) * UAT_LVBIT)) - 1) != 0 {
+                    if let Some(phys) = pt_addr[level].take() {
+                        if free {
+                            mod_pr_debug!(
+                                "UATPageTable::with_pages: free level {} {:#x?}\n",
+                                level,
+                                phys
+                            );
+                            // SAFETY: Page tables for our VA ranges always come from Page::into_phys().
+                            unsafe { Page::from_phys(phys) };
+                        }
+                        mod_pr_debug!("UATPageTable::with_pages: invalidate level {}\n", level);
+                    }
+                }
+            }
+            last_iova = iova;
+            for level in (0..UAT_LEVELS - 1).rev() {
+                // Fetch the page table base address for this level
+                if pt_addr[level].is_none() {
+                    let phys = pt_addr[level + 1].unwrap();
+                    mod_pr_debug!(
+                        "UATPageTable::with_pages: need level {}, parent phys {:#x}\n",
+                        level,
+                        phys
+                    );
+                    let upidx = ((iova >> (UAT_PGBIT + (level + 1) * UAT_LVBIT) as u64) & UAT_LVMSK)
+                        as usize;
+                    // SAFETY: Page table addresses are either allocated by us, or
+                    // firmware-managed and safe to borrow a struct page from.
+                    let upt = unsafe { Page::borrow_phys_unchecked(&phys) };
+                    mod_pr_debug!("UATPageTable::with_pages: borrowed phys {:#x}\n", phys);
+                    pt_addr[level] =
+                        upt.with_pointer_into_page(upidx * PTE_SIZE, PTE_SIZE, |p| {
+                            let uptep = p as *const _ as *const Pte;
+                            // SAFETY: with_pointer_into_page() ensures the pointer is valid,
+                            // and our index is aligned so it is safe to deref as an AtomicU64.
+                            let upte = unsafe { &*uptep };
+                            let mut upte_val = upte.load(Ordering::Relaxed);
+                            // Allocate if requested
+                            if upte_val == 0 && alloc {
+                                let pt_page = Page::alloc_page(GFP_KERNEL | __GFP_ZERO)?;
+                                mod_pr_debug!("UATPageTable::with_pages: alloc PT at {:#x}\n", pt_page.phys());
+                                let pt_paddr = Page::into_phys(pt_page);
+                                upte_val = pt_paddr | PTE_TYPE_LEAF_TABLE;
+                                upte.store(upte_val, Ordering::Relaxed);
+                            }
+                            if upte_val & PTE_TYPE_BITS == PTE_TYPE_LEAF_TABLE {
+                                Ok(Some(upte_val & self.oas_mask & (!UAT_PGMSK as u64)))
+                            } else if upte_val == 0 || (!alloc && !free) {
+                                mod_pr_debug!("UATPageTable::with_pages: no level {}\n", level);
+                                Ok(None)
+                            } else {
+                                pr_err!("UATPageTable::with_pages: Unexpected Table PTE value {:#x} at iova {:#x} index {} phys {:#x}\n", upte_val,
+                                        iova, level + 1, phys + ((upidx * PTE_SIZE) as PhysicalAddr));
+                                Ok(None)
+                            }
+                        })?;
+                    mod_pr_debug!(
+                        "UATPageTable::with_pages: level {} PT {:#x?}\n",
+                        level,
+                        pt_addr[level]
+                    );
+                }
+                // If we don't have a page table, skip this entire level
+                if pt_addr[level].is_none() {
+                    let block = 1 << (UAT_PGBIT + UAT_LVBIT * (level + 1));
+                    let old = iova;
+                    iova = align(iova + 1, block);
+                    mod_pr_debug!(
+                        "UATPageTable::with_pages: skip {:#x} {:#x} -> {:#x}\n",
+                        block,
+                        old,
+                        iova
+                    );
+                    continue 'outer;
+                }
+            }
+
+            let idx = ((iova >> UAT_PGBIT as u64) & UAT_LVMSK) as usize;
+            let max_count = UAT_NPTE - idx;
+            let count = (((end - iova) >> UAT_PGBIT) as usize).min(max_count);
+            let phys = pt_addr[0].unwrap();
+            mod_pr_debug!(
+                "UATPageTable::with_pages: leaf PT at {:#x} idx {:#x} count {:#x} iova {:#x}\n",
+                phys,
+                idx,
+                count,
+                iova
+            );
+            // SAFETY: Page table addresses are either allocated by us, or
+            // firmware-managed and safe to borrow a struct page from.
+            let pt = unsafe { Page::borrow_phys_unchecked(&phys) };
+            pt.with_pointer_into_page(idx * PTE_SIZE, count * PTE_SIZE, |p| {
+                let ptep = p as *const _ as *const Pte;
+                // SAFETY: We know this is a valid pointer to PTEs and the range is valid and
+                // checked by with_pointer_into_page().
+                let ptes = unsafe { core::slice::from_raw_parts(ptep, count) };
+                cb(iova, ptes)?;
+                Ok(())
+            })?;
+
+            let block = 1 << (UAT_PGBIT + UAT_LVBIT);
+            iova = align(iova + 1, block);
+        }
+
+        if free {
+            for level in (0..UAT_LEVELS - 1).rev() {
+                if let Some(phys) = pt_addr[level] {
+                    mod_pr_debug!(
+                        "UATPageTable::with_pages: free level {} {:#x?}\n",
+                        level,
+                        phys
+                    );
+                    // SAFETY: Page tables for our VA ranges always come from Page::into_phys().
+                    unsafe { Page::from_phys(phys) };
+                }
+            }
+        }
+
+        Ok(())
+    }
+
+    pub(crate) fn alloc_pages(&mut self, iova_range: Range<u64>) -> Result {
+        mod_pr_debug!("UATPageTable::alloc_pages: {:#x?}\n", iova_range);
+        self.with_pages(iova_range, true, false, |_, _| Ok(()))
+    }
+
+    fn pte_bits(&self) -> u64 {
+        if self.ttb_owned {
+            // Owned page tables are userspace, so non-global
+            PTE_TYPE_LEAF_TABLE | UAT_NON_GLOBAL
+        } else {
+            // The sole non-owned page table is kernelspace, so global
+            PTE_TYPE_LEAF_TABLE
+        }
+    }
+
+    pub(crate) fn map_pages(
+        &mut self,
+        iova_range: Range<u64>,
+        mut phys: PhysicalAddr,
+        prot: Prot,
+        one_page: bool,
+    ) -> Result {
+        mod_pr_debug!(
+            "UATPageTable::map_pages: {:#x?} {:#x?} {:?}\n",
+            iova_range,
+            phys,
+            prot
+        );
+        if phys & (UAT_PGMSK as PhysicalAddr) != 0 {
+            pr_err!("UATPageTable::map_pages: phys not aligned: {:#x?}\n", phys);
+            return Err(EINVAL);
+        }
+
+        let pte_bits = self.pte_bits();
+
+        self.with_pages(iova_range, true, false, |iova, ptes| {
+            for (idx, pte) in ptes.iter().enumerate() {
+                let ptev = pte.load(Ordering::Relaxed);
+                if ptev != 0 {
+                    pr_err!(
+                        "UATPageTable::map_pages: Page at IOVA {:#x} is mapped (PTE: {:#x})\n",
+                        iova + (idx * UAT_PGSZ) as u64,
+                        ptev
+                    );
+                }
+                pte.store(phys | prot.as_pte() | pte_bits, Ordering::Relaxed);
+                if !one_page {
+                    phys += UAT_PGSZ as PhysicalAddr;
+                }
+            }
+            Ok(())
+        })
+    }
+
+    pub(crate) fn reprot_pages(&mut self, iova_range: Range<u64>, prot: Prot) -> Result {
+        mod_pr_debug!(
+            "UATPageTable::reprot_pages: {:#x?} {:?}\n",
+            iova_range,
+            prot
+        );
+        self.with_pages(iova_range, true, false, |iova, ptes| {
+            for (idx, pte) in ptes.iter().enumerate() {
+                let ptev = pte.load(Ordering::Relaxed);
+                if ptev & PTE_TYPE_BITS != PTE_TYPE_LEAF_TABLE {
+                    pr_err!(
+                        "UATPageTable::reprot_pages: Page at IOVA {:#x} is unmapped (PTE: {:#x})\n",
+                        iova + (idx * UAT_PGSZ) as u64,
+                        ptev
+                    );
+                    continue;
+                }
+                pte.store((ptev & !UAT_PROT_BITS) | prot.as_pte(), Ordering::Relaxed);
+            }
+            Ok(())
+        })
+    }
+
+    pub(crate) fn unmap_pages(&mut self, iova_range: Range<u64>) -> Result {
+        mod_pr_debug!("UATPageTable::unmap_pages: {:#x?}\n", iova_range);
+        self.with_pages(iova_range, false, false, |iova, ptes| {
+            for (idx, pte) in ptes.iter().enumerate() {
+                if pte.load(Ordering::Relaxed) & PTE_TYPE_LEAF_TABLE == 0 {
+                    pr_err!(
+                        "UATPageTable::unmap_pages: Page at IOVA {:#x} already unmapped\n",
+                        iova + (idx * UAT_PGSZ) as u64
+                    );
+                }
+                pte.store(0, Ordering::Relaxed);
+            }
+            Ok(())
+        })
+    }
+
+    pub(crate) fn dump_pages(&mut self, iova_range: Range<u64>) -> Result<KVVec<DumpedPage>> {
+        let mut pages = KVVec::new();
+        let oas_mask = self.oas_mask;
+        let iova_base = self.va_range.start & !UAT_IASMSK;
+        self.with_pages(iova_range, false, false, |iova, ptes| {
+            let iova = iova | iova_base;
+            for (idx, ppte) in ptes.iter().enumerate() {
+                let pte = ppte.load(Ordering::Relaxed);
+                if (pte & PTE_TYPE_LEAF_TABLE) != PTE_TYPE_LEAF_TABLE {
+                    continue;
+                }
+                let memattr = ((pte & UAT_MEMATTR_BITS) >> UAT_MEMATTR_SHIFT) as u8;
+
+                if !(memattr == MEMATTR_CACHED || memattr == MEMATTR_UNCACHED) {
+                    pages.push(
+                        DumpedPage {
+                            iova: iova + (idx * UAT_PGSZ) as u64,
+                            pte,
+                            data: None,
+                        },
+                        GFP_KERNEL,
+                    )?;
+                    continue;
+                }
+                let phys = pte & oas_mask & (!UAT_PGMSK as u64);
+                // SAFETY: GPU pages are either firmware/preallocated pages
+                // (which the kernel isn't concerned with and are either in
+                // the page map or not, and if they aren't, borrow_phys()
+                // will fail), or GPU page table pages (which we own),
+                // or GEM buffer pages (which are locked while they are
+                // mapped in the page table), so they should be safe to
+                // borrow.
+                //
+                // This does trust the firmware not to have any weird
+                // mappings in its own internal page tables, but since
+                // those are managed by the uPPL which is privileged anyway,
+                // this trust does not actually extend any trust boundary.
+                let src_page = match unsafe { Page::borrow_phys(&phys) } {
+                    Some(page) => page,
+                    None => {
+                        pages.push(
+                            DumpedPage {
+                                iova: iova + (idx * UAT_PGSZ) as u64,
+                                pte,
+                                data: None,
+                            },
+                            GFP_KERNEL,
+                        )?;
+                        continue;
+                    }
+                };
+                let dst_page = Page::alloc_page(GFP_KERNEL)?;
+                src_page.with_page_mapped(|psrc| -> Result {
+                    // SAFETY: This could technically still have a data race with the firmware
+                    // or other driver code (or even userspace with timestamp buffers), but while
+                    // the Rust language technically says this is UB, in the real world, using
+                    // atomic reads for this is guaranteed to never cause any harmful effects
+                    // other than possibly reading torn/unreliable data. At least on ARM64 anyway.
+                    //
+                    // (Yes, I checked with Rust people about this. ~~ Lina)
+                    //
+                    let src_items = unsafe {
+                        core::slice::from_raw_parts(
+                            psrc as *const AtomicU64,
+                            UAT_PGSZ / core::mem::size_of::<AtomicU64>(),
+                        )
+                    };
+                    dst_page.with_page_mapped(|pdst| -> Result {
+                        // SAFETY: We own the destination page, so it is safe to view its contents
+                        // as a u64 slice.
+                        let dst_items = unsafe {
+                            core::slice::from_raw_parts_mut(
+                                pdst as *mut u64,
+                                UAT_PGSZ / core::mem::size_of::<u64>(),
+                            )
+                        };
+                        for (si, di) in src_items.iter().zip(dst_items.iter_mut()) {
+                            *di = si.load(Ordering::Relaxed);
+                        }
+                        Ok(())
+                    })?;
+                    Ok(())
+                })?;
+                pages.push(
+                    DumpedPage {
+                        iova: iova + (idx * UAT_PGSZ) as u64,
+                        pte,
+                        data: Some(dst_page),
+                    },
+                    GFP_KERNEL,
+                )?;
+            }
+            Ok(())
+        })?;
+        Ok(pages)
+    }
+}
+
+impl Drop for UatPageTable {
+    fn drop(&mut self) {
+        mod_pr_debug!("UATPageTable::drop range: {:#x?}\n", &self.va_range);
+        if self
+            .with_pages(self.va_range.clone(), false, true, |iova, ptes| {
+                for (idx, pte) in ptes.iter().enumerate() {
+                    if pte.load(Ordering::Relaxed) != 0 {
+                        pr_err!(
+                            "UATPageTable::drop: Leaked page at IOVA {:#x}\n",
+                            iova + (idx * UAT_PGSZ) as u64
+                        );
+                    }
+                }
+                Ok(())
+            })
+            .is_err()
+        {
+            pr_err!("UATPageTable::drop failed to free page tables\n",);
+        }
+        if self.ttb_owned {
+            mod_pr_debug!("UATPageTable::drop: Free TTB {:#x}\n", self.ttb);
+            // SAFETY: If we own the ttb, it was allocated with Page::into_phys().
+            unsafe {
+                Page::from_phys(self.ttb);
+            }
+        }
+    }
+}
diff --git a/drivers/gpu/drm/asahi/queue/common.rs b/drivers/gpu/drm/asahi/queue/common.rs
new file mode 100644
index 00000000000000..a68e4fa619ca06
--- /dev/null
+++ b/drivers/gpu/drm/asahi/queue/common.rs
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+//! Common queue functionality.
+//!
+//! Shared helpers used by the submission logic for multiple command types.
+
+use crate::file;
+use crate::fw::job::UserTimestamp;
+
+use kernel::prelude::*;
+use kernel::uapi;
+use kernel::xarray;
+
+pub(super) fn get_timestamp_object(
+    objects: Pin<&xarray::XArray<KBox<file::Object>>>,
+    timestamp: uapi::drm_asahi_timestamp,
+) -> Result<Option<UserTimestamp>> {
+    if timestamp.handle == 0 {
+        return Ok(None);
+    }
+
+    let object = objects.get(timestamp.handle.try_into()?).ok_or(ENOENT)?;
+
+    #[allow(irrefutable_let_patterns)]
+    if let file::Object::TimestampBuffer(mapping) = object.borrow() {
+        let offset = timestamp.offset;
+        if (offset.checked_add(8).ok_or(EINVAL)?) as usize > mapping.size() {
+            return Err(ERANGE);
+        }
+        Ok(Some(UserTimestamp {
+            mapping: mapping.clone(),
+            offset: offset as usize,
+        }))
+    } else {
+        Err(EINVAL)
+    }
+}
diff --git a/drivers/gpu/drm/asahi/queue/compute.rs b/drivers/gpu/drm/asahi/queue/compute.rs
new file mode 100644
index 00000000000000..dcd36bcf6ffaeb
--- /dev/null
+++ b/drivers/gpu/drm/asahi/queue/compute.rs
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+#![allow(clippy::unusual_byte_groupings)]
+
+//! Compute work queue.
+//!
+//! A compute queue consists of one underlying WorkQueue.
+//! This module is in charge of creating all of the firmware structures required to submit compute
+//! work to the GPU, based on the userspace command buffer.
+
+use super::common;
+use crate::alloc::Allocator;
+use crate::debug::*;
+use crate::driver::AsahiDriver;
+use crate::fw::types::*;
+use crate::gpu::GpuManager;
+use crate::{file, fw, gpu, microseq};
+use crate::{inner_ptr, inner_weak_ptr};
+use core::sync::atomic::Ordering;
+use kernel::dma_fence::RawDmaFence;
+use kernel::drm::sched::Job;
+use kernel::prelude::*;
+use kernel::sync::Arc;
+use kernel::types::ForeignOwnable;
+use kernel::uapi;
+use kernel::xarray;
+
+const DEBUG_CLASS: DebugFlags = DebugFlags::Compute;
+
+#[versions(AGX)]
+impl super::QueueInner::ver {
+    /// Submit work to a compute queue.
+    pub(super) fn submit_compute(
+        &self,
+        job: &mut Job<super::QueueJob::ver>,
+        cmdbuf: &uapi::drm_asahi_cmd_compute,
+        attachments: &microseq::Attachments,
+        objects: Pin<&xarray::XArray<KBox<file::Object>>>,
+        id: u64,
+        flush_stamps: bool,
+    ) -> Result {
+        let data = unsafe { &<KBox<AsahiDriver>>::borrow(self.dev.as_ref().get_drvdata()).data };
+        let gpu = match data.gpu.as_any().downcast_ref::<gpu::GpuManager::ver>() {
+            Some(gpu) => gpu,
+            None => {
+                dev_crit!(self.dev.as_ref(), "GpuManager mismatched with Queue!\n");
+                return Err(EIO);
+            }
+        };
+
+        let mut alloc = gpu.alloc();
+        let kalloc = &mut *alloc;
+
+        mod_dev_dbg!(self.dev, "[Submission {}] Compute!\n", id);
+
+        if cmdbuf.flags != 0 {
+            return Err(EINVAL);
+        }
+
+        let mut user_timestamps: fw::job::UserTimestamps = Default::default();
+        user_timestamps.start = common::get_timestamp_object(objects, cmdbuf.ts.start)?;
+        user_timestamps.end = common::get_timestamp_object(objects, cmdbuf.ts.end)?;
+
+        // This sequence number increases per new client/VM? assigned to some slot,
+        // but it's unclear *which* slot...
+        let slot_client_seq: u8 = (self.id & 0xff) as u8;
+
+        let vm_bind = job.vm_bind.clone();
+
+        mod_dev_dbg!(
+            self.dev,
+            "[Submission {}] VM slot = {}\n",
+            id,
+            vm_bind.slot()
+        );
+
+        let notifier = self.notifier.clone();
+
+        let fence = job.fence.clone();
+        let comp_job = job.get_comp()?;
+        let ev_comp = comp_job.event_info();
+
+        let preempt2_off = gpu.get_cfg().compute_preempt1_size;
+        let preempt3_off = preempt2_off + 8;
+        let preempt4_off = preempt3_off + 8;
+        let preempt5_off = preempt4_off + 8;
+        let preempt_size = preempt5_off + 8;
+
+        let preempt_buf = self
+            .ualloc
+            .lock()
+            .array_empty_tagged(preempt_size, b"CPMT")?;
+
+        mod_dev_dbg!(
+            self.dev,
+            "[Submission {}] Event #{} {:#x?} -> {:#x?}\n",
+            id,
+            ev_comp.slot,
+            ev_comp.value,
+            ev_comp.value.next(),
+        );
+
+        let timestamps = Arc::new(
+            kalloc.shared.new_default::<fw::job::JobTimestamps>()?,
+            GFP_KERNEL,
+        )?;
+
+        let uuid = 0;
+        mod_dev_dbg!(self.dev, "[Submission {}] UUID = {:#x?}\n", id, uuid);
+
+        // TODO: check
+        #[ver(V >= V13_0B4)]
+        let count = self.counter.fetch_add(1, Ordering::Relaxed);
+
+        let comp = GpuObject::new_init_prealloc(
+            kalloc.gpu_ro.alloc_object()?,
+            |ptr: GpuWeakPointer<fw::compute::RunCompute::ver>| {
+                let notifier = notifier.clone();
+                let vm_bind = vm_bind.clone();
+                try_init!(fw::compute::RunCompute::ver {
+                    preempt_buf: preempt_buf,
+                    micro_seq: {
+                        let mut builder = microseq::Builder::new();
+
+                        let stats = gpu.initdata.runtime_pointers.stats.comp.weak_pointer();
+
+                        let start_comp = builder.add(microseq::StartCompute::ver {
+                            header: microseq::op::StartCompute::HEADER,
+                            unk_pointer: inner_weak_ptr!(ptr, unk_pointee),
+                            #[ver(G < G14X)]
+                            job_params1: Some(inner_weak_ptr!(ptr, job_params1)),
+                            #[ver(G >= G14X)]
+                            job_params1: None,
+                            #[ver(G >= G14X)]
+                            registers: inner_weak_ptr!(ptr, registers),
+                            stats,
+                            work_queue: ev_comp.info_ptr,
+                            vm_slot: vm_bind.slot(),
+                            unk_28: 0x1,
+                            event_generation: self.id as u32,
+                            event_seq: U64(ev_comp.event_seq),
+                            unk_38: 0x0,
+                            job_params2: inner_weak_ptr!(ptr, job_params2),
+                            unk_44: 0x0,
+                            uuid,
+                            attachments: *attachments,
+                            padding: Default::default(),
+                            #[ver(V >= V13_0B4)]
+                            unk_flag: inner_weak_ptr!(ptr, unk_flag),
+                            #[ver(V >= V13_0B4)]
+                            counter: U64(count),
+                            #[ver(V >= V13_0B4)]
+                            notifier_buf: inner_weak_ptr!(notifier.weak_pointer(), state.unk_buf),
+                        })?;
+
+                        if user_timestamps.any() {
+                            builder.add(microseq::Timestamp::ver {
+                                header: microseq::op::Timestamp::new(true),
+                                command_time: inner_weak_ptr!(ptr, command_time),
+                                ts_pointers: inner_weak_ptr!(ptr, timestamp_pointers),
+                                update_ts: inner_weak_ptr!(ptr, timestamp_pointers.start_addr),
+                                work_queue: ev_comp.info_ptr,
+                                user_ts_pointers: inner_weak_ptr!(ptr, user_timestamp_pointers),
+                                #[ver(V >= V13_0B4)]
+                                unk_ts: inner_weak_ptr!(ptr, context_store_req),
+                                uuid,
+                                unk_30_padding: 0,
+                            })?;
+                        }
+
+                        #[ver(G < G14X)]
+                        builder.add(microseq::WaitForIdle {
+                            header: microseq::op::WaitForIdle::new(microseq::Pipe::Compute),
+                        })?;
+                        #[ver(G >= G14X)]
+                        builder.add(microseq::WaitForIdle2 {
+                            header: microseq::op::WaitForIdle2::HEADER,
+                        })?;
+
+                        if user_timestamps.any() {
+                            builder.add(microseq::Timestamp::ver {
+                                header: microseq::op::Timestamp::new(false),
+                                command_time: inner_weak_ptr!(ptr, command_time),
+                                ts_pointers: inner_weak_ptr!(ptr, timestamp_pointers),
+                                update_ts: inner_weak_ptr!(ptr, timestamp_pointers.end_addr),
+                                work_queue: ev_comp.info_ptr,
+                                user_ts_pointers: inner_weak_ptr!(ptr, user_timestamp_pointers),
+                                #[ver(V >= V13_0B4)]
+                                unk_ts: inner_weak_ptr!(ptr, context_store_req),
+                                uuid,
+                                unk_30_padding: 0,
+                            })?;
+                        }
+
+                        let off = builder.offset_to(start_comp);
+                        builder.add(microseq::FinalizeCompute::ver {
+                            header: microseq::op::FinalizeCompute::HEADER,
+                            stats,
+                            work_queue: ev_comp.info_ptr,
+                            vm_slot: vm_bind.slot(),
+                            #[ver(V < V13_0B4)]
+                            unk_18: 0,
+                            job_params2: inner_weak_ptr!(ptr, job_params2),
+                            unk_24: 0,
+                            uuid,
+                            fw_stamp: ev_comp.fw_stamp_pointer,
+                            stamp_value: ev_comp.value.next(),
+                            unk_38: 0,
+                            unk_3c: 0,
+                            unk_40: 0,
+                            unk_44: 0,
+                            unk_48: 0,
+                            unk_4c: 0,
+                            unk_50: 0,
+                            unk_54: 0,
+                            unk_58: 0,
+                            #[ver(G == G14 && V < V13_0B4)]
+                            unk_5c_g14: U64(0),
+                            restart_branch_offset: off,
+                            has_attachments: (attachments.count > 0) as u32,
+                            #[ver(V >= V13_0B4)]
+                            unk_64: Default::default(),
+                            #[ver(V >= V13_0B4)]
+                            unk_flag: inner_weak_ptr!(ptr, unk_flag),
+                            #[ver(V >= V13_0B4)]
+                            unk_79: Default::default(),
+                        })?;
+
+                        builder.add(microseq::RetireStamp {
+                            header: microseq::op::RetireStamp::HEADER,
+                        })?;
+                        builder.build(&mut kalloc.private)?
+                    },
+                    notifier,
+                    vm_bind,
+                    timestamps,
+                    user_timestamps,
+                })
+            },
+            |inner, _ptr| {
+                let vm_slot = vm_bind.slot();
+                try_init!(fw::compute::raw::RunCompute::ver {
+                    tag: fw::workqueue::CommandType::RunCompute,
+                    #[ver(V >= V13_0B4)]
+                    counter: U64(count),
+                    unk_4: 0,
+                    vm_slot,
+                    notifier: inner.notifier.gpu_pointer(),
+                    unk_pointee: Default::default(),
+                    #[ver(G < G14X)]
+                    __pad0: Default::default(),
+                    #[ver(G < G14X)]
+                    job_params1 <- try_init!(fw::compute::raw::JobParameters1 {
+                        preempt_buf1: inner.preempt_buf.gpu_pointer(),
+                        cdm_ctrl_stream_base: U64(cmdbuf.cdm_ctrl_stream_base),
+                        // buf2-5 Only if internal program is used
+                        preempt_buf2: inner.preempt_buf.gpu_offset_pointer(preempt2_off),
+                        preempt_buf3: inner.preempt_buf.gpu_offset_pointer(preempt3_off),
+                        preempt_buf4: inner.preempt_buf.gpu_offset_pointer(preempt4_off),
+                        preempt_buf5: inner.preempt_buf.gpu_offset_pointer(preempt5_off),
+                        usc_exec_base_cp: U64(self.usc_exec_base),
+                        unk_38: U64(0x8c60),
+                        helper_program: cmdbuf.helper.binary, // Internal program addr | 1
+                        unk_44: 0,
+                        helper_arg: U64(cmdbuf.helper.data), // Only if internal program used
+                        helper_cfg: cmdbuf.helper.cfg, // 0x40 if internal program used
+                        unk_54: 0,
+                        unk_58: 1,
+                        unk_5c: 0,
+                        iogpu_unk_40: 0, // 0x1c if internal program used
+                        __pad: Default::default(),
+                    }),
+                    #[ver(G >= G14X)]
+                    registers: fw::job::raw::RegisterArray::new(
+                        inner_weak_ptr!(_ptr, registers.registers),
+                        |r| {
+                            r.add(0x1a510, inner.preempt_buf.gpu_pointer().into());
+                            r.add(0x1a420, cmdbuf.cdm_ctrl_stream_base);
+                            // buf2-5 Only if internal program is used
+                            r.add(0x1a4d0, inner.preempt_buf.gpu_offset_pointer(preempt2_off).into());
+                            r.add(0x1a4d8, inner.preempt_buf.gpu_offset_pointer(preempt3_off).into());
+                            r.add(0x1a4e0, inner.preempt_buf.gpu_offset_pointer(preempt4_off).into());
+                            r.add(0x1a4e8, inner.preempt_buf.gpu_offset_pointer(preempt5_off).into());
+                            r.add(0x10071, self.usc_exec_base); // USC_EXEC_BASE_CP
+                            r.add(0x11841, cmdbuf.helper.binary.into());
+                            r.add(0x11849, cmdbuf.helper.data);
+                            r.add(0x11f81, cmdbuf.helper.cfg.into());
+                            r.add(0x1a440, 0x24201);
+                            r.add(0x12091, 0 /* iogpu_unk_40 */);
+                            /*
+                            r.add(0x10201, 0x100); // Some kind of counter?? Does this matter?
+                            r.add(0x10428, 0x100); // Some kind of counter?? Does this matter?
+                            */
+                        }
+                    ),
+                    __pad1: Default::default(),
+                    microsequence: inner.micro_seq.gpu_pointer(),
+                    microsequence_size: inner.micro_seq.len() as u32,
+                    job_params2 <- try_init!(fw::compute::raw::JobParameters2::ver {
+                        #[ver(V >= V13_0B4)]
+                        unk_0_0: 0,
+                        unk_0: Default::default(),
+                        preempt_buf1: inner.preempt_buf.gpu_pointer(),
+                        cdm_ctrl_stream_end: U64(cmdbuf.cdm_ctrl_stream_end),
+                        unk_34: Default::default(),
+                        #[ver(G < G14X)]
+                        unk_g14x: 0,
+                        #[ver(G >= G14X)]
+                        unk_g14x: 0x24201,
+                        unk_58: 0,
+                        #[ver(V < V13_0B4)]
+                        unk_5c: 0,
+                    }),
+                    encoder_params <- try_init!(fw::job::raw::EncoderParams {
+                        unk_8: 0x0,     // fixed
+                        sync_grow: 0x0, // check!
+                        unk_10: 0x0,    // fixed
+                        encoder_id: 0,
+                        unk_18: 0x0, // fixed
+                        unk_mask: 0xffffffff,
+                        sampler_array: U64(cmdbuf.sampler_heap),
+                        sampler_count: cmdbuf.sampler_count as u32,
+                        sampler_max: (cmdbuf.sampler_count as u32) + 1,
+                    }),
+                    meta <- try_init!(fw::job::raw::JobMeta {
+                        unk_0: 0,
+                        unk_2: 0,
+                        no_preemption: 0,
+                        stamp: ev_comp.stamp_pointer,
+                        fw_stamp: ev_comp.fw_stamp_pointer,
+                        stamp_value: ev_comp.value.next(),
+                        stamp_slot: ev_comp.slot,
+                        evctl_index: 0, // fixed
+                        flush_stamps: flush_stamps as u32,
+                        uuid,
+                        event_seq: ev_comp.event_seq as u32,
+                    }),
+                    command_time: U64(0),
+                    timestamp_pointers <- try_init!(fw::job::raw::TimestampPointers {
+                        start_addr: Some(inner_ptr!(inner.timestamps.gpu_pointer(), start)),
+                        end_addr: Some(inner_ptr!(inner.timestamps.gpu_pointer(), end)),
+                    }),
+                    user_timestamp_pointers: inner.user_timestamps.pointers()?,
+                    client_sequence: slot_client_seq,
+                    pad_2d1: Default::default(),
+                    unk_2d4: 0,
+                    unk_2d8: 0,
+                    #[ver(V >= V13_0B4)]
+                    context_store_req: U64(0),
+                    #[ver(V >= V13_0B4)]
+                    context_store_compl: U64(0),
+                    #[ver(V >= V13_0B4)]
+                    unk_2e9: Default::default(),
+                    #[ver(V >= V13_0B4)]
+                    unk_flag: U32(0),
+                    #[ver(V >= V13_0B4)]
+                    unk_pad: Default::default(),
+                })
+            },
+        )?;
+
+        core::mem::drop(alloc);
+
+        fence.add_command();
+        comp_job.add_cb(comp, vm_bind.slot(), move |error| {
+            if let Some(err) = error {
+                fence.set_error(err.into())
+            }
+
+            fence.command_complete();
+        })?;
+
+        comp_job.next_seq();
+
+        Ok(())
+    }
+}
diff --git a/drivers/gpu/drm/asahi/queue/mod.rs b/drivers/gpu/drm/asahi/queue/mod.rs
new file mode 100644
index 00000000000000..39c4a68589c7cc
--- /dev/null
+++ b/drivers/gpu/drm/asahi/queue/mod.rs
@@ -0,0 +1,913 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+//! Submission queue management
+//!
+//! This module implements the userspace view of submission queues and the logic to map userspace
+//! submissions to firmware queues.
+
+use kernel::dma_fence::*;
+use kernel::prelude::*;
+use kernel::{
+    c_str, dma_fence,
+    drm::sched,
+    macros::versions,
+    sync::{Arc, Mutex},
+    types::ForeignOwnable,
+    uapi, xarray,
+};
+
+use crate::alloc::Allocator;
+use crate::debug::*;
+use crate::driver::{AsahiDevRef, AsahiDevice, AsahiDriver};
+use crate::file::MAX_COMMANDS_PER_SUBMISSION;
+use crate::fw::types::*;
+use crate::gpu::GpuManager;
+use crate::inner_weak_ptr;
+use crate::microseq;
+use crate::module_parameters;
+use crate::util::{AnyBitPattern, Reader};
+use crate::{alloc, buffer, channel, event, file, fw, gpu, mmu, workqueue};
+
+use core::sync::atomic::{AtomicU64, Ordering};
+
+const DEBUG_CLASS: DebugFlags = DebugFlags::Queue;
+
+const WQ_SIZE: u32 = 0x500;
+
+mod common;
+mod compute;
+mod render;
+
+/// Trait implemented by all versioned queues.
+pub(crate) trait Queue: Send + Sync {
+    fn submit(
+        &mut self,
+        id: u64,
+        syncs: KVec<file::SyncItem>,
+        in_sync_count: usize,
+        cmdbuf_raw: &[u8],
+        objects: Pin<&xarray::XArray<KBox<file::Object>>>,
+    ) -> Result;
+}
+
+#[versions(AGX)]
+struct SubQueue {
+    wq: Arc<workqueue::WorkQueue::ver>,
+}
+
+#[versions(AGX)]
+impl SubQueue::ver {
+    fn new_job(&mut self, fence: dma_fence::Fence) -> SubQueueJob::ver {
+        SubQueueJob::ver {
+            wq: self.wq.clone(),
+            fence: Some(fence),
+            job: None,
+        }
+    }
+}
+
+#[versions(AGX)]
+struct SubQueueJob {
+    wq: Arc<workqueue::WorkQueue::ver>,
+    job: Option<workqueue::Job::ver>,
+    fence: Option<dma_fence::Fence>,
+}
+
+#[versions(AGX)]
+impl SubQueueJob::ver {
+    fn get(&mut self) -> Result<&mut workqueue::Job::ver> {
+        if self.job.is_none() {
+            mod_pr_debug!("SubQueueJob: Creating {:?} job\n", self.wq.pipe_type());
+            self.job
+                .replace(self.wq.new_job(self.fence.take().unwrap())?);
+        }
+        Ok(self.job.as_mut().expect("expected a Job"))
+    }
+
+    fn commit(&mut self) -> Result {
+        match self.job.as_mut() {
+            Some(job) => job.commit(),
+            None => Ok(()),
+        }
+    }
+
+    fn can_submit(&self) -> Option<Fence> {
+        self.job.as_ref().and_then(|job| job.can_submit())
+    }
+}
+
+#[versions(AGX)]
+pub(crate) struct Queue {
+    dev: AsahiDevRef,
+    _sched: sched::Scheduler<QueueJob::ver>,
+    entity: sched::Entity<QueueJob::ver>,
+    vm: mmu::Vm,
+    q_vtx: Option<SubQueue::ver>,
+    q_frag: Option<SubQueue::ver>,
+    q_comp: Option<SubQueue::ver>,
+    fence_ctx: FenceContexts,
+    inner: QueueInner::ver,
+}
+
+#[versions(AGX)]
+pub(crate) struct QueueInner {
+    dev: AsahiDevRef,
+    ualloc: Arc<Mutex<alloc::DefaultAllocator>>,
+    buffer: buffer::Buffer::ver,
+    gpu_context: Arc<workqueue::GpuContext>,
+    notifier_list: Arc<GpuObject<fw::event::NotifierList>>,
+    notifier: Arc<GpuObject<fw::event::Notifier::ver>>,
+    usc_exec_base: u64,
+    id: u64,
+    #[ver(V >= V13_0B4)]
+    counter: AtomicU64,
+}
+
+#[versions(AGX)]
+#[derive(Default)]
+pub(crate) struct JobFence {
+    id: u64,
+    pending: AtomicU64,
+}
+
+#[versions(AGX)]
+impl JobFence::ver {
+    fn add_command(self: &FenceObject<Self>) {
+        self.pending.fetch_add(1, Ordering::Relaxed);
+    }
+
+    fn command_complete(self: &FenceObject<Self>) {
+        let remain = self.pending.fetch_sub(1, Ordering::Relaxed) - 1;
+        mod_pr_debug!(
+            "JobFence[{}]: Command complete (remain: {})\n",
+            self.id,
+            remain
+        );
+        if remain == 0 {
+            mod_pr_debug!("JobFence[{}]: Signaling\n", self.id);
+            if self.signal().is_err() {
+                pr_err!("JobFence[{}]: Fence signal failed\n", self.id);
+            }
+        }
+    }
+}
+
+#[versions(AGX)]
+#[vtable]
+impl dma_fence::FenceOps for JobFence::ver {
+    const USE_64BIT_SEQNO: bool = true;
+
+    fn get_driver_name<'a>(self: &'a FenceObject<Self>) -> &'a CStr {
+        c_str!("asahi")
+    }
+    fn get_timeline_name<'a>(self: &'a FenceObject<Self>) -> &'a CStr {
+        c_str!("queue")
+    }
+}
+
+#[versions(AGX)]
+pub(crate) struct QueueJob {
+    dev: AsahiDevRef,
+    vm_bind: mmu::VmBind,
+    op_guard: Option<gpu::OpGuard>,
+    sj_vtx: Option<SubQueueJob::ver>,
+    sj_frag: Option<SubQueueJob::ver>,
+    sj_comp: Option<SubQueueJob::ver>,
+    fence: UserFence<JobFence::ver>,
+    notifier: Arc<GpuObject<fw::event::Notifier::ver>>,
+    notification_count: u32,
+    did_run: bool,
+    id: u64,
+}
+
+#[versions(AGX)]
+impl QueueJob::ver {
+    fn get_vtx(&mut self) -> Result<&mut workqueue::Job::ver> {
+        self.sj_vtx
+            .as_mut()
+            .ok_or_else(|| {
+                cls_pr_debug!(Errors, "No vertex queue\n");
+                EINVAL
+            })?
+            .get()
+    }
+    fn get_frag(&mut self) -> Result<&mut workqueue::Job::ver> {
+        self.sj_frag
+            .as_mut()
+            .ok_or_else(|| {
+                cls_pr_debug!(Errors, "No fragment queue\n");
+                EINVAL
+            })?
+            .get()
+    }
+    fn get_comp(&mut self) -> Result<&mut workqueue::Job::ver> {
+        self.sj_comp
+            .as_mut()
+            .ok_or_else(|| {
+                cls_pr_debug!(Errors, "No compute queue\n");
+                EINVAL
+            })?
+            .get()
+    }
+
+    fn commit(&mut self) -> Result {
+        mod_dev_dbg!(self.dev, "QueueJob {}: Committing\n", self.id);
+
+        self.sj_vtx.as_mut().map(|a| a.commit()).unwrap_or(Ok(()))?;
+        self.sj_frag
+            .as_mut()
+            .map(|a| a.commit())
+            .unwrap_or(Ok(()))?;
+        self.sj_comp.as_mut().map(|a| a.commit()).unwrap_or(Ok(()))
+    }
+}
+
+#[versions(AGX)]
+impl sched::JobImpl for QueueJob::ver {
+    fn prepare(job: &mut sched::Job<Self>) -> Option<Fence> {
+        mod_dev_dbg!(job.dev, "QueueJob {}: Checking runnability\n", job.id);
+
+        if let Some(sj) = job.sj_vtx.as_ref() {
+            if let Some(fence) = sj.can_submit() {
+                mod_dev_dbg!(
+                    job.dev,
+                    "QueueJob {}: Blocking due to vertex queue full\n",
+                    job.id
+                );
+                return Some(fence);
+            }
+        }
+        if let Some(sj) = job.sj_frag.as_ref() {
+            if let Some(fence) = sj.can_submit() {
+                mod_dev_dbg!(
+                    job.dev,
+                    "QueueJob {}: Blocking due to fragment queue full\n",
+                    job.id
+                );
+                return Some(fence);
+            }
+        }
+        if let Some(sj) = job.sj_comp.as_ref() {
+            if let Some(fence) = sj.can_submit() {
+                mod_dev_dbg!(
+                    job.dev,
+                    "QueueJob {}: Blocking due to compute queue full\n",
+                    job.id
+                );
+                return Some(fence);
+            }
+        }
+        None
+    }
+
+    #[allow(unused_assignments)]
+    fn run(job: &mut sched::Job<Self>) -> Result<Option<dma_fence::Fence>> {
+        mod_dev_dbg!(job.dev, "QueueJob {}: Running Job\n", job.id);
+
+        // We can only increase the notifier threshold here, now that we are
+        // actually running the job. We cannot increase it while queueing the
+        // job without introducing subtle race conditions. Suppose we did, as
+        // early versions of drm/asahi did:
+        //
+        // 1. When processing the ioctl submit, a job is queued to drm_sched.
+        //    Incorrectly, the notifier threshold is increased, gating firmware
+        //    events.
+        // 2. When DRM schedules an event, the hardware is kicked.
+        // 3. When the number of processed jobs equals the threshold, the
+        //    firmware signals the complete event to the kernel
+        // 4. When the kernel gets a complete event, we signal the out-syncs.
+        //
+        // Does that work? There are a few scenarios.
+        //
+        // 1. There is nothing else ioctl submitted before the job completes.
+        //    The job is scheduled, completes, and signals immediately.
+        //    Everything works.
+        // 2. There is nontrivial sync across different queues. Since each queue
+        //    has a separate own notifier threshold, submitting one does not
+        //    block scheduling of the other. Everything works the way you'd
+        //    expect. drm/sched handles the wait/signal ordering.
+        // 3. Two ioctls are submitted back-to-back. The first signals a fence
+        //    that the second waits on. Due to the notifier threshold increment,
+        //    the first job's completion event is deferred. But in good
+        //    conditions, drm/sched will schedule the second submit anyway
+        //    because it kills the pointless intra-queue sync. Then both
+        //    commands execute and are signalled together.
+        // 4. Two ioctls are submitted back-to-back as above, but conditions are
+        //    bad. Reporting completion of the first job is still masked by the
+        //    notifier threshold, but the intra-queue fences are not optimized
+        //    out in drm/sched... drm/sched doesn't schedule the second job
+        //    until the first is signalled, but the first isn't signalled until
+        //    the second is completed, but the second can't complete until it's
+        //    scheduled. We hang!
+        //
+        // In good conditions, everything works properly and/or we win the race
+        // to mask the issue. So the issue here is challenging to hit.
+        // Nevertheless, we do need to get it right.
+        //
+        // The intention with drm/sched is that jobs that are not yet scheduled
+        // are "invisible" to the firmware. Incrementing the notifier threshold
+        // earlier than this violates that which leads to circles like the
+        // above. Deferring the increment to submit solves the race.
+        job.notifier.threshold.with(|raw, _inner| {
+            raw.increase(job.notification_count);
+        });
+
+        let data = unsafe { &<KBox<AsahiDriver>>::borrow(job.dev.as_ref().get_drvdata()).data };
+        let gpu = match data
+            .gpu
+            .clone()
+            .arc_as_any()
+            .downcast::<gpu::GpuManager::ver>()
+        {
+            Ok(gpu) => gpu,
+            Err(_) => {
+                dev_crit!(job.dev.as_ref(), "GpuManager mismatched with QueueJob!\n");
+                return Err(EIO);
+            }
+        };
+
+        if job.op_guard.is_none() {
+            job.op_guard = Some(gpu.start_op()?);
+        }
+
+        // First submit all the commands for each queue. This can fail.
+
+        let mut frag_job = None;
+        let mut frag_sub = None;
+        if let Some(sj) = job.sj_frag.as_mut() {
+            frag_job = sj.job.take();
+            if let Some(wqjob) = frag_job.as_mut() {
+                mod_dev_dbg!(job.dev, "QueueJob {}: Submit fragment\n", job.id);
+                frag_sub = Some(wqjob.submit()?);
+            }
+        }
+
+        let mut vtx_job = None;
+        let mut vtx_sub = None;
+        if let Some(sj) = job.sj_vtx.as_mut() {
+            vtx_job = sj.job.take();
+            if let Some(wqjob) = vtx_job.as_mut() {
+                mod_dev_dbg!(job.dev, "QueueJob {}: Submit vertex\n", job.id);
+                vtx_sub = Some(wqjob.submit()?);
+            }
+        }
+
+        let mut comp_job = None;
+        let mut comp_sub = None;
+        if let Some(sj) = job.sj_comp.as_mut() {
+            comp_job = sj.job.take();
+            if let Some(wqjob) = comp_job.as_mut() {
+                mod_dev_dbg!(job.dev, "QueueJob {}: Submit compute\n", job.id);
+                comp_sub = Some(wqjob.submit()?);
+            }
+        }
+
+        // Now we fully commit to running the job
+        mod_dev_dbg!(job.dev, "QueueJob {}: Run fragment\n", job.id);
+        frag_sub.map(|a| gpu.run_job(a)).transpose()?;
+
+        mod_dev_dbg!(job.dev, "QueueJob {}: Run vertex\n", job.id);
+        vtx_sub.map(|a| gpu.run_job(a)).transpose()?;
+
+        mod_dev_dbg!(job.dev, "QueueJob {}: Run compute\n", job.id);
+        comp_sub.map(|a| gpu.run_job(a)).transpose()?;
+
+        mod_dev_dbg!(job.dev, "QueueJob {}: Drop compute job\n", job.id);
+        core::mem::drop(comp_job);
+        mod_dev_dbg!(job.dev, "QueueJob {}: Drop vertex job\n", job.id);
+        core::mem::drop(vtx_job);
+        mod_dev_dbg!(job.dev, "QueueJob {}: Drop fragment job\n", job.id);
+        core::mem::drop(frag_job);
+
+        job.did_run = true;
+
+        Ok(Some(Fence::from_fence(&job.fence)))
+    }
+
+    fn timed_out(job: &mut sched::Job<Self>) -> sched::Status {
+        // FIXME: Handle timeouts properly
+        dev_err!(
+            job.dev.as_ref(),
+            "QueueJob {}: Job timed out on the DRM scheduler, things will probably break (ran: {})\n",
+            job.id, job.did_run
+        );
+        sched::Status::NoDevice
+    }
+}
+
+#[versions(AGX)]
+impl Drop for QueueJob::ver {
+    fn drop(&mut self) {
+        mod_dev_dbg!(self.dev, "QueueJob {}: Dropping\n", self.id);
+    }
+}
+
+static QUEUE_NAME: &CStr = c_str!("asahi_fence");
+static QUEUE_CLASS_KEY: kernel::sync::LockClassKey = kernel::static_lock_class!();
+
+#[versions(AGX)]
+impl Queue::ver {
+    /// Create a new user queue.
+    #[allow(clippy::too_many_arguments)]
+    pub(crate) fn new(
+        dev: &AsahiDevice,
+        vm: mmu::Vm,
+        alloc: &mut gpu::KernelAllocators,
+        ualloc: Arc<Mutex<alloc::DefaultAllocator>>,
+        ualloc_priv: Arc<Mutex<alloc::DefaultAllocator>>,
+        event_manager: Arc<event::EventManager>,
+        mgr: &buffer::BufferManager::ver,
+        id: u64,
+        priority: u32,
+        usc_exec_base: u64,
+    ) -> Result<Queue::ver> {
+        mod_dev_dbg!(dev, "[Queue {}] Creating queue\n", id);
+
+        let data = unsafe { &<KBox<AsahiDriver>>::borrow(dev.as_ref().get_drvdata()).data };
+
+        // Must be shared, no cache management on this one!
+        let mut notifier_list = alloc.shared.new_default::<fw::event::NotifierList>()?;
+
+        let self_ptr = notifier_list.weak_pointer();
+        notifier_list.with_mut(|raw, _inner| {
+            raw.list_head.next = Some(inner_weak_ptr!(self_ptr, list_head));
+        });
+
+        let threshold = alloc.shared.new_default::<fw::event::Threshold>()?;
+
+        let notifier: Arc<GpuObject<fw::event::Notifier::ver>> = Arc::new(
+            alloc.private.new_init(
+                /*try_*/ init!(fw::event::Notifier::ver { threshold }),
+                |inner, _p| {
+                    try_init!(fw::event::raw::Notifier::ver {
+                        threshold: inner.threshold.gpu_pointer(),
+                        generation: AtomicU32::new(id as u32),
+                        cur_count: AtomicU32::new(0),
+                        unk_10: AtomicU32::new(0x50),
+                        state: Default::default()
+                    })
+                },
+            )?,
+            GFP_KERNEL,
+        )?;
+
+        // Priorities are handled by the AGX scheduler, there is no meaning within a
+        // per-queue scheduler. Use a single run queue wth Kernel priority.
+        let sched =
+            sched::Scheduler::new(dev.as_ref(), 1, WQ_SIZE, 0, 100000, c_str!("asahi_sched"))?;
+        let entity = sched::Entity::new(&sched, sched::Priority::Kernel)?;
+
+        let buffer = buffer::Buffer::ver::new(&*data.gpu, alloc, ualloc.clone(), ualloc_priv, mgr)?;
+
+        let mut ret = Queue::ver {
+            dev: dev.into(),
+            _sched: sched,
+            entity,
+            vm,
+            q_vtx: None,
+            q_frag: None,
+            q_comp: None,
+            fence_ctx: FenceContexts::new(1, QUEUE_NAME, QUEUE_CLASS_KEY)?,
+            inner: QueueInner::ver {
+                dev: dev.into(),
+                ualloc,
+                gpu_context: Arc::new(
+                    workqueue::GpuContext::new(dev, alloc, buffer.any_ref())?,
+                    GFP_KERNEL,
+                )?,
+
+                buffer,
+                notifier_list: Arc::new(notifier_list, GFP_KERNEL)?,
+                notifier,
+                usc_exec_base,
+                id,
+                #[ver(V >= V13_0B4)]
+                counter: AtomicU64::new(0),
+            },
+        };
+
+        // Rendering structures
+        let tvb_blocks = *module_parameters::initial_tvb_size.get();
+
+        ret.inner.buffer.ensure_blocks(tvb_blocks)?;
+
+        ret.q_vtx = Some(SubQueue::ver {
+            wq: workqueue::WorkQueue::ver::new(
+                dev,
+                alloc,
+                event_manager.clone(),
+                ret.inner.gpu_context.clone(),
+                ret.inner.notifier_list.clone(),
+                channel::PipeType::Vertex,
+                id,
+                priority,
+                WQ_SIZE,
+            )?,
+        });
+
+        ret.q_frag = Some(SubQueue::ver {
+            wq: workqueue::WorkQueue::ver::new(
+                dev,
+                alloc,
+                event_manager.clone(),
+                ret.inner.gpu_context.clone(),
+                ret.inner.notifier_list.clone(),
+                channel::PipeType::Fragment,
+                id,
+                priority,
+                WQ_SIZE,
+            )?,
+        });
+
+        // Compute structures
+        ret.q_comp = Some(SubQueue::ver {
+            wq: workqueue::WorkQueue::ver::new(
+                dev,
+                alloc,
+                event_manager,
+                ret.inner.gpu_context.clone(),
+                ret.inner.notifier_list.clone(),
+                channel::PipeType::Compute,
+                id,
+                priority,
+                WQ_SIZE,
+            )?,
+        });
+
+        mod_dev_dbg!(dev, "[Queue {}] Queue created\n", id);
+        Ok(ret)
+    }
+}
+
+const SQ_RENDER: usize = 0;
+const SQ_COMPUTE: usize = 1;
+const SQ_COUNT: usize = 2;
+
+// SAFETY: All bit patterns are valid by construction.
+unsafe impl AnyBitPattern for uapi::drm_asahi_cmd_header {}
+unsafe impl AnyBitPattern for uapi::drm_asahi_cmd_render {}
+unsafe impl AnyBitPattern for uapi::drm_asahi_cmd_compute {}
+unsafe impl AnyBitPattern for uapi::drm_asahi_attachment {}
+
+fn build_attachments(reader: &mut Reader<'_>, size: usize) -> Result<microseq::Attachments> {
+    const STRIDE: usize = core::mem::size_of::<uapi::drm_asahi_attachment>();
+    let count = size / STRIDE;
+
+    if count > microseq::MAX_ATTACHMENTS {
+        return Err(EINVAL);
+    }
+
+    let mut attachments: microseq::Attachments = Default::default();
+    attachments.count = count as u32;
+
+    for i in 0..count {
+        let att: uapi::drm_asahi_attachment = reader.read()?;
+
+        if att.flags != 0 || att.pad != 0 {
+            return Err(EINVAL);
+        }
+
+        // Some kind of power-of-2 exponent related to attachment size, in
+        // bounds [1, 6]? We don't know what this is exactly yet.
+        let unk_e = 1;
+
+        let cache_lines = (att.size + 127) >> 7;
+        attachments.list[i as usize] = microseq::Attachment {
+            address: U64(att.pointer),
+            size: cache_lines.try_into()?,
+            unk_c: 0x17,
+            unk_e: unk_e as u16,
+        };
+    }
+
+    Ok(attachments)
+}
+
+#[versions(AGX)]
+impl Queue for Queue::ver {
+    fn submit(
+        &mut self,
+        id: u64,
+        mut syncs: KVec<file::SyncItem>,
+        in_sync_count: usize,
+        cmdbuf_raw: &[u8],
+        objects: Pin<&xarray::XArray<KBox<file::Object>>>,
+    ) -> Result {
+        let data = unsafe { &<KBox<AsahiDriver>>::borrow(self.dev.as_ref().get_drvdata()).data };
+        let gpu = match data
+            .gpu
+            .clone()
+            .arc_as_any()
+            .downcast::<gpu::GpuManager::ver>()
+        {
+            Ok(gpu) => gpu,
+            Err(_) => {
+                dev_crit!(self.dev.as_ref(), "GpuManager mismatched with JobImpl!\n");
+                return Err(EIO);
+            }
+        };
+
+        mod_dev_dbg!(self.dev, "[Submission {}] Submit job\n", id);
+
+        if gpu.is_crashed() {
+            dev_err!(
+                self.dev.as_ref(),
+                "[Submission {}] GPU is crashed, cannot submit\n",
+                id
+            );
+            return Err(ENODEV);
+        }
+
+        let op_guard = if in_sync_count > 0 {
+            Some(gpu.start_op()?)
+        } else {
+            None
+        };
+
+        let mut events: [KVec<Option<workqueue::QueueEventInfo::ver>>; SQ_COUNT] =
+            Default::default();
+
+        events[SQ_RENDER].push(
+            self.q_frag.as_ref().and_then(|a| a.wq.event_info()),
+            GFP_KERNEL,
+        )?;
+        events[SQ_COMPUTE].push(
+            self.q_comp.as_ref().and_then(|a| a.wq.event_info()),
+            GFP_KERNEL,
+        )?;
+
+        let vm_bind = gpu.bind_vm(&self.vm)?;
+        let vm_slot = vm_bind.slot();
+
+        mod_dev_dbg!(self.dev, "[Submission {}] Creating job\n", id);
+
+        // FIXME: I think this can violate the fence seqno ordering contract.
+        // If we have e.g. a render submission with no barriers and then a compute submission
+        // with no barriers, it's possible for the compute submission to complete first, and
+        // therefore its fence. Maybe we should have separate fence contexts for render
+        // and compute, and then do a ? (Vert+frag should be fine since there is no vert
+        // without frag, and frag always serializes.)
+        let fence: UserFence<JobFence::ver> = self
+            .fence_ctx
+            .new_fence::<JobFence::ver>(
+                0,
+                JobFence::ver {
+                    id,
+                    pending: Default::default(),
+                },
+            )?
+            .into();
+
+        let mut cmdbuf = Reader::new(cmdbuf_raw);
+
+        // First, parse the headers to determine the number of compute/render
+        // commands. This will be used to determine when to flush stamps.
+        //
+        // We also use it to determine how many notifications the job will
+        // generate. We could calculate that in the second pass since we don't
+        // need until much later, but it's convenient to gather everything at
+        // the same time.
+        let mut nr_commands = 0;
+        let mut last_compute = 0;
+        let mut last_render = 0;
+        let mut nr_render = 0;
+        let mut nr_compute = 0;
+
+        while !cmdbuf.is_empty() {
+            let header: uapi::drm_asahi_cmd_header = cmdbuf.read()?;
+            cmdbuf.skip(header.size as usize);
+            nr_commands += 1;
+
+            match header.cmd_type as u32 {
+                uapi::drm_asahi_cmd_type_DRM_ASAHI_CMD_RENDER => {
+                    last_compute = nr_commands;
+                    nr_render += 1;
+                }
+                uapi::drm_asahi_cmd_type_DRM_ASAHI_CMD_COMPUTE => {
+                    last_render = nr_commands;
+                    nr_compute += 1;
+                }
+                _ => {}
+            }
+        }
+
+        let mut job = self.entity.new_job(
+            1,
+            QueueJob::ver {
+                dev: self.dev.clone(),
+                vm_bind,
+                op_guard,
+                sj_vtx: self
+                    .q_vtx
+                    .as_mut()
+                    .map(|a| a.new_job(Fence::from_fence(&fence))),
+                sj_frag: self
+                    .q_frag
+                    .as_mut()
+                    .map(|a| a.new_job(Fence::from_fence(&fence))),
+                sj_comp: self
+                    .q_comp
+                    .as_mut()
+                    .map(|a| a.new_job(Fence::from_fence(&fence))),
+                fence,
+                notifier: self.inner.notifier.clone(),
+
+                // Each render command generates 2 notifications: 1 for the
+                // vertex part, 1 for the fragment part. Each compute command
+                // generates 1 notification. Sum up to calculate the total
+                // notification count for the job.
+                notification_count: (2 * nr_render) + nr_compute,
+
+                did_run: false,
+                id,
+            },
+        )?;
+
+        mod_dev_dbg!(
+            self.dev,
+            "[Submission {}] Adding {} in_syncs\n",
+            id,
+            in_sync_count
+        );
+        for sync in syncs.drain(0..in_sync_count) {
+            if let Some(fence) = sync.fence {
+                job.add_dependency(fence)?;
+            }
+        }
+
+        // Validate the number of hardware commands, ignoring software commands
+        let nr_hw_commands = nr_render + nr_compute;
+        if nr_hw_commands == 0 || nr_hw_commands > MAX_COMMANDS_PER_SUBMISSION {
+            cls_pr_debug!(
+                Errors,
+                "submit: Command count {} out of valid range [1, {}]\n",
+                nr_hw_commands,
+                MAX_COMMANDS_PER_SUBMISSION - 1
+            );
+            return Err(EINVAL);
+        }
+
+        cmdbuf.rewind();
+
+        let mut command_index = 0;
+        let mut vertex_attachments: microseq::Attachments = Default::default();
+        let mut fragment_attachments: microseq::Attachments = Default::default();
+        let mut compute_attachments: microseq::Attachments = Default::default();
+
+        // Parse the full command buffer submitting as we go
+        while !cmdbuf.is_empty() {
+            let header: uapi::drm_asahi_cmd_header = cmdbuf.read()?;
+            let header_size = header.size as usize;
+
+            // Pre-increment command index to match last_compute/last_render
+            command_index += 1;
+
+            for (queue_idx, index) in [header.vdm_barrier, header.cdm_barrier].iter().enumerate() {
+                if *index == uapi::DRM_ASAHI_BARRIER_NONE as u16 {
+                    continue;
+                }
+                if let Some(event) = events[queue_idx].get(*index as usize).ok_or_else(|| {
+                    cls_pr_debug!(Errors, "Invalid barrier #{}: {}\n", queue_idx, index);
+                    EINVAL
+                })? {
+                    let mut alloc = gpu.alloc();
+                    let queue_job = match header.cmd_type as u32 {
+                        uapi::drm_asahi_cmd_type_DRM_ASAHI_CMD_RENDER => job.get_vtx()?,
+                        uapi::drm_asahi_cmd_type_DRM_ASAHI_CMD_COMPUTE => job.get_comp()?,
+                        _ => return Err(EINVAL),
+                    };
+                    mod_dev_dbg!(self.dev, "[Submission {}] Create Explicit Barrier\n", id);
+                    let barrier = alloc.private.new_init(
+                        kernel::init::zeroed::<fw::workqueue::Barrier>(),
+                        |_inner, _p| {
+                            let queue_job = &queue_job;
+                            try_init!(fw::workqueue::raw::Barrier {
+                                tag: fw::workqueue::CommandType::Barrier,
+                                wait_stamp: event.fw_stamp_pointer,
+                                wait_value: event.value,
+                                wait_slot: event.slot,
+                                stamp_self: queue_job.event_info().value.next(),
+                                uuid: 0xffffbbbb,
+                                external_barrier: 0,
+                                internal_barrier_type: 1,
+                                padding: Default::default(),
+                            })
+                        },
+                    )?;
+                    mod_dev_dbg!(self.dev, "[Submission {}] Add Explicit Barrier\n", id);
+                    queue_job.add(barrier, vm_slot)?;
+                } else {
+                    assert!(*index == 0);
+                }
+            }
+
+            match header.cmd_type as u32 {
+                uapi::drm_asahi_cmd_type_DRM_ASAHI_CMD_RENDER => {
+                    let render: uapi::drm_asahi_cmd_render = cmdbuf.read_up_to(header_size)?;
+
+                    self.inner.submit_render(
+                        &mut job,
+                        &render,
+                        &vertex_attachments,
+                        &fragment_attachments,
+                        objects,
+                        id,
+                        command_index == last_render,
+                    )?;
+                    events[SQ_RENDER].push(
+                        Some(
+                            job.sj_frag
+                                .as_ref()
+                                .expect("No frag queue?")
+                                .job
+                                .as_ref()
+                                .expect("No frag job?")
+                                .event_info(),
+                        ),
+                        GFP_KERNEL,
+                    )?;
+                }
+                uapi::drm_asahi_cmd_type_DRM_ASAHI_CMD_COMPUTE => {
+                    let compute: uapi::drm_asahi_cmd_compute = cmdbuf.read_up_to(header_size)?;
+
+                    self.inner.submit_compute(
+                        &mut job,
+                        &compute,
+                        &compute_attachments,
+                        objects,
+                        id,
+                        command_index == last_compute,
+                    )?;
+                    events[SQ_COMPUTE].push(
+                        Some(
+                            job.sj_comp
+                                .as_ref()
+                                .expect("No comp queue?")
+                                .job
+                                .as_ref()
+                                .expect("No comp job?")
+                                .event_info(),
+                        ),
+                        GFP_KERNEL,
+                    )?;
+                }
+                uapi::drm_asahi_cmd_type_DRM_ASAHI_SET_VERTEX_ATTACHMENTS => {
+                    vertex_attachments = build_attachments(&mut cmdbuf, header_size)?;
+                }
+                uapi::drm_asahi_cmd_type_DRM_ASAHI_SET_FRAGMENT_ATTACHMENTS => {
+                    fragment_attachments = build_attachments(&mut cmdbuf, header_size)?;
+                }
+                uapi::drm_asahi_cmd_type_DRM_ASAHI_SET_COMPUTE_ATTACHMENTS => {
+                    compute_attachments = build_attachments(&mut cmdbuf, header_size)?;
+                }
+                _ => {
+                    cls_pr_debug!(Errors, "Unknown command type {}\n", header.cmd_type);
+                    return Err(EINVAL);
+                }
+            }
+        }
+
+        mod_dev_dbg!(
+            self.dev,
+            "Queue {}: Committing job {}\n",
+            self.inner.id,
+            job.id
+        );
+        job.commit()?;
+
+        mod_dev_dbg!(self.dev, "Queue {}: Arming job {}\n", self.inner.id, job.id);
+        let mut job = job.arm();
+        let out_fence = job.fences().finished();
+        mod_dev_dbg!(
+            self.dev,
+            "Queue {}: Pushing job {}\n",
+            self.inner.id,
+            job.id
+        );
+        job.push();
+
+        mod_dev_dbg!(
+            self.dev,
+            "Queue {}: Adding {} out_syncs\n",
+            self.inner.id,
+            syncs.len()
+        );
+        for mut sync in syncs {
+            if let Some(chain) = sync.chain_fence.take() {
+                sync.syncobj
+                    .add_point(chain, &out_fence, sync.timeline_value);
+            } else {
+                sync.syncobj.replace_fence(Some(&out_fence));
+            }
+        }
+
+        Ok(())
+    }
+}
+
+#[versions(AGX)]
+impl Drop for Queue::ver {
+    fn drop(&mut self) {
+        mod_dev_dbg!(self.dev, "[Queue {}] Dropping queue\n", self.inner.id);
+    }
+}
diff --git a/drivers/gpu/drm/asahi/queue/render.rs b/drivers/gpu/drm/asahi/queue/render.rs
new file mode 100644
index 00000000000000..e2acd4f5aa0fbd
--- /dev/null
+++ b/drivers/gpu/drm/asahi/queue/render.rs
@@ -0,0 +1,1390 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+#![allow(clippy::unusual_byte_groupings)]
+
+//! Render work queue.
+//!
+//! A render queue consists of two underlying WorkQueues, one for vertex and one for fragment work.
+//! This module is in charge of creating all of the firmware structures required to submit 3D
+//! rendering work to the GPU, based on the userspace command buffer.
+
+use super::common;
+use crate::alloc::Allocator;
+use crate::debug::*;
+use crate::driver::AsahiDriver;
+use crate::fw::types::*;
+use crate::gpu::GpuManager;
+use crate::util::*;
+use crate::{buffer, file, fw, gpu, microseq};
+use crate::{inner_ptr, inner_weak_ptr};
+use core::sync::atomic::Ordering;
+use kernel::dma_fence::RawDmaFence;
+use kernel::drm::sched::Job;
+use kernel::prelude::*;
+use kernel::sync::Arc;
+use kernel::types::ForeignOwnable;
+use kernel::uapi;
+use kernel::xarray;
+
+const DEBUG_CLASS: DebugFlags = DebugFlags::Render;
+
+/// Tiling/Vertex control bit to disable using more than one GPU cluster. This results in decreased
+/// throughput but also less latency, which is probably desirable for light vertex loads where the
+/// overhead of clustering/merging would exceed the time it takes to just run the job on one
+/// cluster.
+const TILECTL_DISABLE_CLUSTERING: u32 = 1u32 << 0;
+
+#[versions(AGX)]
+impl super::QueueInner::ver {
+    /// Get the appropriate tiling parameters for a given userspace command buffer.
+    fn get_tiling_params(
+        cmdbuf: &uapi::drm_asahi_cmd_render,
+        num_clusters: u32,
+    ) -> Result<buffer::TileInfo> {
+        let width: u32 = cmdbuf.width_px as u32;
+        let height: u32 = cmdbuf.height_px as u32;
+        let layers: u32 = cmdbuf.layers as u32;
+
+        if layers == 0 || layers > 2048 {
+            cls_pr_debug!(Errors, "Layer count invalid ({})\n", layers);
+            return Err(EINVAL);
+        }
+
+        // This is overflow safe: all these calculations are done in u32.
+        // At 64Kx64K max dimensions above, this is 2**32 pixels max.
+        // In terms of tiles that are always larger than one pixel,
+        // this can never overflow. Note that real actual dimensions
+        // are limited to 16K * 16K below anyway.
+        //
+        // Once we multiply by the layer count, then we need to check
+        // for overflow or use u64.
+
+        let tile_width = 32u32;
+        let tile_height = 32u32;
+
+        let utile_width = cmdbuf.utile_width_px as u32;
+        let utile_height = cmdbuf.utile_height_px as u32;
+
+        match (utile_width, utile_height) {
+            (32, 32) | (32, 16) | (16, 16) => (),
+            _ => {
+                cls_pr_debug!(
+                    Errors,
+                    "uTile size invalid ({} x {})\n",
+                    utile_width,
+                    utile_height
+                );
+                return Err(EINVAL);
+            }
+        };
+
+        let utiles_per_tile_x = tile_width / utile_width;
+        let utiles_per_tile_y = tile_height / utile_height;
+
+        let utiles_per_tile = utiles_per_tile_x * utiles_per_tile_y;
+
+        let tiles_x = width.div_ceil(tile_width);
+        let tiles_y = height.div_ceil(tile_height);
+        let tiles = tiles_x * tiles_y;
+
+        let mtiles_x = 4u32;
+        let mtiles_y = 4u32;
+        let mtiles = mtiles_x * mtiles_y;
+
+        let tiles_per_mtile_x = align(tiles_x.div_ceil(mtiles_x), 4);
+        let tiles_per_mtile_y = align(tiles_y.div_ceil(mtiles_y), 4);
+        let tiles_per_mtile = tiles_per_mtile_x * tiles_per_mtile_y;
+
+        let mtile_x1 = tiles_per_mtile_x;
+        let mtile_x2 = 2 * tiles_per_mtile_x;
+        let mtile_x3 = 3 * tiles_per_mtile_x;
+
+        let mtile_y1 = tiles_per_mtile_y;
+        let mtile_y2 = 2 * tiles_per_mtile_y;
+        let mtile_y3 = 3 * tiles_per_mtile_y;
+
+        let rgn_entry_size = 5;
+        // Macrotile stride in 32-bit words
+        let rgn_size = align(rgn_entry_size * tiles_per_mtile * utiles_per_tile, 4) / 4;
+        let tilemap_size = (4 * rgn_size * mtiles) as usize * layers as usize;
+
+        let tpc_entry_size = 8;
+        // TPC stride in 32-bit words
+        let tpc_mtile_stride = tpc_entry_size * utiles_per_tile * tiles_per_mtile / 4;
+        let tpc_size =
+            (4 * tpc_mtile_stride * mtiles) as usize * layers as usize * num_clusters as usize;
+
+        // No idea where this comes from, but it fits what macOS does...
+        // GUESS: Number of 32K heap blocks to fit a 5-byte region header/pointer per tile?
+        // That would make a ton of sense...
+        let meta1_layer_stride = if num_clusters > 1 {
+            (align(tiles_x, 2) * align(tiles_y, 4) * utiles_per_tile).div_ceil(0x1980)
+        } else {
+            0
+        };
+
+        let mut min_tvb_blocks = align((tiles_x * tiles_y).div_ceil(128), 8);
+
+        if num_clusters > 1 {
+            min_tvb_blocks = min_tvb_blocks.max(7 + 2 * layers);
+        }
+
+        Ok(buffer::TileInfo {
+            tiles_x,
+            tiles_y,
+            tiles,
+            utile_width,
+            utile_height,
+            //mtiles_x,
+            //mtiles_y,
+            tiles_per_mtile_x,
+            tiles_per_mtile_y,
+            //tiles_per_mtile,
+            utiles_per_mtile_x: tiles_per_mtile_x * utiles_per_tile_x,
+            utiles_per_mtile_y: tiles_per_mtile_y * utiles_per_tile_y,
+            //utiles_per_mtile: tiles_per_mtile * utiles_per_tile,
+            tilemap_size,
+            tpc_size,
+            meta1_layer_stride,
+            #[ver(G < G14X)]
+            meta1_blocks: meta1_layer_stride * (cmdbuf.layers as u32),
+            #[ver(G >= G14X)]
+            meta1_blocks: meta1_layer_stride,
+            layermeta_size: if layers > 1 { 0x100 } else { 0 },
+            min_tvb_blocks: min_tvb_blocks as usize,
+            params: fw::vertex::raw::TilingParameters {
+                rgn_size,
+                unk_4: 0x88,
+                ppp_ctrl: cmdbuf.ppp_ctrl,
+                x_max: (width - 1) as u16,
+                y_max: (height - 1) as u16,
+                te_screen: ((tiles_y - 1) << 12) | (tiles_x - 1),
+                te_mtile1: mtile_x3 | (mtile_x2 << 9) | (mtile_x1 << 18),
+                te_mtile2: mtile_y3 | (mtile_y2 << 9) | (mtile_y1 << 18),
+                tiles_per_mtile,
+                tpc_stride: tpc_mtile_stride,
+                unk_24: 0x100,
+                unk_28: if layers > 1 {
+                    0xe000 | (layers - 1)
+                } else {
+                    0x8000
+                },
+                helper_cfg: cmdbuf.vertex_helper.cfg,
+                __pad: Default::default(),
+            },
+        })
+    }
+
+    /// Submit work to a render queue.
+    pub(super) fn submit_render(
+        &self,
+        job: &mut Job<super::QueueJob::ver>,
+        cmdbuf: &uapi::drm_asahi_cmd_render,
+        vertex_attachments: &microseq::Attachments,
+        fragment_attachments: &microseq::Attachments,
+        objects: Pin<&xarray::XArray<KBox<file::Object>>>,
+        id: u64,
+        flush_stamps: bool,
+    ) -> Result {
+        mod_dev_dbg!(self.dev, "[Submission {}] Render!\n", id);
+
+        if cmdbuf.flags
+            & !(uapi::drm_asahi_render_flags_DRM_ASAHI_RENDER_VERTEX_SCRATCH
+                | uapi::drm_asahi_render_flags_DRM_ASAHI_RENDER_PROCESS_EMPTY_TILES
+                | uapi::drm_asahi_render_flags_DRM_ASAHI_RENDER_NO_VERTEX_CLUSTERING
+                | uapi::drm_asahi_render_flags_DRM_ASAHI_RENDER_DBIAS_IS_INT) as u32
+            != 0
+        {
+            cls_pr_debug!(Errors, "Invalid flags ({:#x})\n", cmdbuf.flags);
+            return Err(EINVAL);
+        }
+
+        if cmdbuf.width_px == 0
+            || cmdbuf.height_px == 0
+            || cmdbuf.width_px > 16384
+            || cmdbuf.height_px > 16384
+        {
+            cls_pr_debug!(
+                Errors,
+                "Invalid dimensions ({}x{})\n",
+                cmdbuf.width_px,
+                cmdbuf.height_px
+            );
+            return Err(EINVAL);
+        }
+
+        let mut vtx_user_timestamps: fw::job::UserTimestamps = Default::default();
+        let mut frg_user_timestamps: fw::job::UserTimestamps = Default::default();
+
+        vtx_user_timestamps.start = common::get_timestamp_object(objects, cmdbuf.ts_vtx.start)?;
+        vtx_user_timestamps.end = common::get_timestamp_object(objects, cmdbuf.ts_vtx.end)?;
+        frg_user_timestamps.start = common::get_timestamp_object(objects, cmdbuf.ts_frag.start)?;
+        frg_user_timestamps.end = common::get_timestamp_object(objects, cmdbuf.ts_frag.end)?;
+
+        let data = unsafe { &<KBox<AsahiDriver>>::borrow(self.dev.as_ref().get_drvdata()).data };
+        let gpu = match data.gpu.as_any().downcast_ref::<gpu::GpuManager::ver>() {
+            Some(gpu) => gpu,
+            None => {
+                dev_crit!(self.dev.as_ref(), "GpuManager mismatched with Queue!\n");
+                return Err(EIO);
+            }
+        };
+
+        let nclusters = gpu.get_dyncfg().id.num_clusters;
+
+        // Can be set to false to disable clustering (for simpler jobs), but then the
+        // core masks below should be adjusted to cover a single rolling cluster.
+        let mut clustering = nclusters > 1;
+
+        if debug_enabled(debug::DebugFlags::DisableClustering)
+            || cmdbuf.flags
+                & uapi::drm_asahi_render_flags_DRM_ASAHI_RENDER_NO_VERTEX_CLUSTERING as u32
+                != 0
+        {
+            clustering = false;
+        }
+
+        #[ver(G != G14)]
+        let tiling_control = {
+            let render_cfg = gpu.get_cfg().render;
+            let mut tiling_control = render_cfg.tiling_control;
+
+            if !clustering {
+                tiling_control |= TILECTL_DISABLE_CLUSTERING;
+            }
+            tiling_control
+        };
+
+        let mut alloc = gpu.alloc();
+        let kalloc = &mut *alloc;
+
+        // This sequence number increases per new client/VM? assigned to some slot,
+        // but it's unclear *which* slot...
+        let slot_client_seq: u8 = (self.id & 0xff) as u8;
+
+        let tile_info = Self::get_tiling_params(&cmdbuf, if clustering { nclusters } else { 1 })?;
+
+        let buffer = &self.buffer;
+        let notifier = self.notifier.clone();
+
+        let tvb_autogrown = buffer.auto_grow()?;
+        if tvb_autogrown {
+            let new_size = buffer.block_count() as usize;
+            cls_dev_dbg!(
+                TVBStats,
+                &self.dev,
+                "[Submission {}] TVB grew to {} bytes ({} blocks) due to overflows\n",
+                id,
+                new_size * buffer::BLOCK_SIZE,
+                new_size,
+            );
+        }
+
+        let tvb_grown = buffer.ensure_blocks(tile_info.min_tvb_blocks)?;
+        if tvb_grown {
+            cls_dev_dbg!(
+                TVBStats,
+                &self.dev,
+                "[Submission {}] TVB grew to {} bytes ({} blocks) due to dimensions ({}x{})\n",
+                id,
+                tile_info.min_tvb_blocks * buffer::BLOCK_SIZE,
+                tile_info.min_tvb_blocks,
+                cmdbuf.width_px,
+                cmdbuf.height_px
+            );
+        }
+
+        let scene = Arc::new(buffer.new_scene(kalloc, &tile_info)?, GFP_KERNEL)?;
+
+        let vm_bind = job.vm_bind.clone();
+
+        mod_dev_dbg!(
+            self.dev,
+            "[Submission {}] VM slot = {}\n",
+            id,
+            vm_bind.slot()
+        );
+
+        let ev_vtx = job.get_vtx()?.event_info();
+        let ev_frag = job.get_frag()?.event_info();
+
+        mod_dev_dbg!(
+            self.dev,
+            "[Submission {}] Vert event #{} -> {:#x?}\n",
+            id,
+            ev_vtx.slot,
+            ev_vtx.value.next(),
+        );
+        mod_dev_dbg!(
+            self.dev,
+            "[Submission {}] Frag event #{} -> {:#x?}\n",
+            id,
+            ev_frag.slot,
+            ev_frag.value.next(),
+        );
+
+        let uuid_3d = 0;
+        let uuid_ta = 0;
+
+        mod_dev_dbg!(
+            self.dev,
+            "[Submission {}] Vert UUID = {:#x?}\n",
+            id,
+            uuid_ta
+        );
+        mod_dev_dbg!(
+            self.dev,
+            "[Submission {}] Frag UUID = {:#x?}\n",
+            id,
+            uuid_3d
+        );
+
+        let fence = job.fence.clone();
+        let frag_job = job.get_frag()?;
+
+        mod_dev_dbg!(self.dev, "[Submission {}] Create Barrier\n", id);
+        let barrier = kalloc.private.new_init(
+            kernel::init::zeroed::<fw::workqueue::Barrier>(),
+            |_inner, _p| {
+                try_init!(fw::workqueue::raw::Barrier {
+                    tag: fw::workqueue::CommandType::Barrier,
+                    wait_stamp: ev_vtx.fw_stamp_pointer,
+                    wait_value: ev_vtx.value.next(),
+                    wait_slot: ev_vtx.slot,
+                    stamp_self: ev_frag.value.next(),
+                    uuid: uuid_3d,
+                    external_barrier: 0,
+                    internal_barrier_type: 0,
+                    padding: Default::default(),
+                })
+            },
+        )?;
+
+        mod_dev_dbg!(self.dev, "[Submission {}] Add Barrier\n", id);
+        frag_job.add(barrier, vm_bind.slot())?;
+
+        let timestamps = Arc::new(
+            kalloc.shared.new_default::<fw::job::RenderTimestamps>()?,
+            GFP_KERNEL,
+        )?;
+
+        let unk1 = false;
+
+        let mut tile_config: u64 = 0;
+        if !unk1 {
+            tile_config |= 0x280;
+        }
+        if cmdbuf.layers > 1 {
+            tile_config |= 1;
+        }
+        if cmdbuf.flags & uapi::drm_asahi_render_flags_DRM_ASAHI_RENDER_PROCESS_EMPTY_TILES as u32
+            != 0
+        {
+            tile_config |= 0x10000;
+        }
+
+        let samples_log2 = match cmdbuf.samples {
+            1 => 0,
+            2 => 1,
+            4 => 2,
+            _ => {
+                cls_pr_debug!(Errors, "Invalid sample count {}\n", cmdbuf.samples);
+                return Err(EINVAL);
+            }
+        };
+
+        let utile_config = ((tile_info.utile_width / 16) << 12)
+            | ((tile_info.utile_height / 16) << 14)
+            | samples_log2;
+
+        // Calculate the number of 2KiB blocks to allocate per utile. This is
+        // just a bit of dimensional analysis.
+        let pixels_per_utile: u32 =
+            (cmdbuf.utile_width_px as u32) * (cmdbuf.utile_height_px as u32);
+        let samples_per_utile: u32 = pixels_per_utile << samples_log2;
+        let utile_size_bytes: u32 = (cmdbuf.sample_size_B as u32) * samples_per_utile;
+        let block_size_bytes: u32 = 2048;
+        let blocks_per_utile: u32 = utile_size_bytes.div_ceil(block_size_bytes);
+
+        #[ver(G >= G14X)]
+        let frg_tilecfg = 0x0000000_00036011
+            | (((tile_info.tiles_x - 1) as u64) << 44)
+            | (((tile_info.tiles_y - 1) as u64) << 53)
+            | (if unk1 { 0 } else { 0x20_00000000 })
+            | (if cmdbuf.layers > 1 { 0x1_00000000 } else { 0 })
+            | ((utile_config as u64 & 0xf000) << 28);
+
+        // TODO: check
+        #[ver(V >= V13_0B4)]
+        let count_frag = self.counter.fetch_add(2, Ordering::Relaxed);
+        #[ver(V >= V13_0B4)]
+        let count_vtx = count_frag + 1;
+
+        // Unknowns handling
+
+        #[ver(G >= G14)]
+        let g14_unk = 0x4040404;
+        #[ver(G < G14)]
+        let g14_unk = 0;
+        #[ver(G < G14X)]
+        let frg_unk_140 = 0x8c60;
+        let frg_unk_158 = 0x1c;
+        #[ver(G >= G14)]
+        let load_bgobjvals = cmdbuf.isp_bgobjvals as u64;
+        #[ver(G < G14)]
+        let load_bgobjvals = cmdbuf.isp_bgobjvals as u64 | 0x400;
+        let reload_zlsctrl = cmdbuf.zls_ctrl;
+        let iogpu_unk54 = 0x3a0012006b0003;
+        let iogpu_unk56 = 1;
+        #[ver(G < G14)]
+        let tiling_control_2 = 0;
+        #[ver(G >= G14X)]
+        let tiling_control_2 = 4;
+        #[ver(G >= G14X)]
+        let vtx_unk_f0 = 0x1c;
+        #[ver(G < G14)]
+        let vtx_unk_f0 = 0x1c + (align(tile_info.meta1_blocks, 4) as u64);
+        let vtx_unk_118 = 0x1c;
+
+        // DRM_ASAHI_RENDER_DBIAS_IS_INT chosen to match hardware bit.
+        let isp_ctl = 0xc000u32
+            | (cmdbuf.flags & uapi::drm_asahi_render_flags_DRM_ASAHI_RENDER_DBIAS_IS_INT as u32);
+
+        // Always allow preemption at the UAPI level
+        let no_preemption = false;
+
+        mod_dev_dbg!(self.dev, "[Submission {}] Create Frag\n", id);
+        let frag = GpuObject::new_init_prealloc(
+            kalloc.gpu_ro.alloc_object()?,
+            |ptr: GpuWeakPointer<fw::fragment::RunFragment::ver>| {
+                let scene = scene.clone();
+                let notifier = notifier.clone();
+                let vm_bind = vm_bind.clone();
+                let timestamps = timestamps.clone();
+                let private = &mut kalloc.private;
+                try_init!(fw::fragment::RunFragment::ver {
+                    micro_seq: {
+                        let mut builder = microseq::Builder::new();
+
+                        let stats = inner_weak_ptr!(
+                            gpu.initdata.runtime_pointers.stats.frag.weak_pointer(),
+                            stats
+                        );
+
+                        let start_frag = builder.add(microseq::StartFragment::ver {
+                            header: microseq::op::StartFragment::HEADER,
+                            #[ver(G < G14X)]
+                            job_params2: Some(inner_weak_ptr!(ptr, job_params2)),
+                            #[ver(G < G14X)]
+                            job_params1: Some(inner_weak_ptr!(ptr, job_params1)),
+                            #[ver(G >= G14X)]
+                            job_params1: None,
+                            #[ver(G >= G14X)]
+                            job_params2: None,
+                            #[ver(G >= G14X)]
+                            registers: inner_weak_ptr!(ptr, registers),
+                            scene: scene.gpu_pointer(),
+                            stats,
+                            busy_flag: inner_weak_ptr!(ptr, busy_flag),
+                            tvb_overflow_count: inner_weak_ptr!(ptr, tvb_overflow_count),
+                            unk_pointer: inner_weak_ptr!(ptr, unk_pointee),
+                            work_queue: ev_frag.info_ptr,
+                            work_item: ptr,
+                            vm_slot: vm_bind.slot(),
+                            unk_50: 0x1, // fixed
+                            event_generation: self.id as u32,
+                            buffer_slot: scene.slot(),
+                            sync_grow: 0,
+                            event_seq: U64(ev_frag.event_seq),
+                            unk_68: 0,
+                            unk_758_flag: inner_weak_ptr!(ptr, unk_758_flag),
+                            unk_job_buf: inner_weak_ptr!(ptr, unk_buf_0),
+                            #[ver(V >= V13_3)]
+                            unk_7c_0: U64(0),
+                            unk_7c: 0,
+                            unk_80: 0,
+                            unk_84: unk1.into(),
+                            uuid: uuid_3d,
+                            attachments: *fragment_attachments,
+                            padding: 0,
+                            #[ver(V >= V13_0B4)]
+                            counter: U64(count_frag),
+                            #[ver(V >= V13_0B4)]
+                            notifier_buf: inner_weak_ptr!(notifier.weak_pointer(), state.unk_buf),
+                        })?;
+
+                        if frg_user_timestamps.any() {
+                            builder.add(microseq::Timestamp::ver {
+                                header: microseq::op::Timestamp::new(true),
+                                command_time: inner_weak_ptr!(ptr, command_time),
+                                ts_pointers: inner_weak_ptr!(ptr, timestamp_pointers),
+                                update_ts: inner_weak_ptr!(ptr, timestamp_pointers.start_addr),
+                                work_queue: ev_frag.info_ptr,
+                                user_ts_pointers: inner_weak_ptr!(ptr, user_timestamp_pointers),
+                                #[ver(V >= V13_0B4)]
+                                unk_ts: inner_weak_ptr!(ptr, unk_ts),
+                                uuid: uuid_3d,
+                                unk_30_padding: 0,
+                            })?;
+                        }
+
+                        #[ver(G < G14X)]
+                        builder.add(microseq::WaitForIdle {
+                            header: microseq::op::WaitForIdle::new(microseq::Pipe::Fragment),
+                        })?;
+                        #[ver(G >= G14X)]
+                        builder.add(microseq::WaitForIdle2 {
+                            header: microseq::op::WaitForIdle2::HEADER,
+                        })?;
+
+                        if frg_user_timestamps.any() {
+                            builder.add(microseq::Timestamp::ver {
+                                header: microseq::op::Timestamp::new(false),
+                                command_time: inner_weak_ptr!(ptr, command_time),
+                                ts_pointers: inner_weak_ptr!(ptr, timestamp_pointers),
+                                update_ts: inner_weak_ptr!(ptr, timestamp_pointers.end_addr),
+                                work_queue: ev_frag.info_ptr,
+                                user_ts_pointers: inner_weak_ptr!(ptr, user_timestamp_pointers),
+                                #[ver(V >= V13_0B4)]
+                                unk_ts: inner_weak_ptr!(ptr, unk_ts),
+                                uuid: uuid_3d,
+                                unk_30_padding: 0,
+                            })?;
+                        }
+
+                        let off = builder.offset_to(start_frag);
+                        builder.add(microseq::FinalizeFragment::ver {
+                            header: microseq::op::FinalizeFragment::HEADER,
+                            uuid: uuid_3d,
+                            unk_8: 0,
+                            fw_stamp: ev_frag.fw_stamp_pointer,
+                            stamp_value: ev_frag.value.next(),
+                            unk_18: 0,
+                            scene: scene.weak_pointer(),
+                            buffer: scene.weak_buffer_pointer(),
+                            unk_2c: U64(1),
+                            stats,
+                            unk_pointer: inner_weak_ptr!(ptr, unk_pointee),
+                            busy_flag: inner_weak_ptr!(ptr, busy_flag),
+                            work_queue: ev_frag.info_ptr,
+                            work_item: ptr,
+                            vm_slot: vm_bind.slot(),
+                            unk_60: 0,
+                            unk_758_flag: inner_weak_ptr!(ptr, unk_758_flag),
+                            #[ver(V >= V13_3)]
+                            unk_6c_0: U64(0),
+                            unk_6c: U64(0),
+                            unk_74: U64(0),
+                            unk_7c: U64(0),
+                            unk_84: U64(0),
+                            unk_8c: U64(0),
+                            #[ver(G == G14 && V < V13_0B4)]
+                            unk_8c_g14: U64(0),
+                            restart_branch_offset: off,
+                            has_attachments: (fragment_attachments.count > 0) as u32,
+                            #[ver(V >= V13_0B4)]
+                            unk_9c: Default::default(),
+                        })?;
+
+                        builder.add(microseq::RetireStamp {
+                            header: microseq::op::RetireStamp::HEADER,
+                        })?;
+
+                        builder.build(private)?
+                    },
+                    notifier,
+                    scene,
+                    vm_bind,
+                    aux_fb: self.ualloc.lock().array_empty_tagged(0x8000, b"AXFB")?,
+                    timestamps,
+                    user_timestamps: frg_user_timestamps,
+                })
+            },
+            |inner, _ptr| {
+                let vm_slot = vm_bind.slot();
+                let aux_fb_info = fw::fragment::raw::AuxFBInfo::ver {
+                    isp_ctl: isp_ctl,
+                    unk2: 0,
+                    width: cmdbuf.width_px as u32,
+                    height: cmdbuf.height_px as u32,
+                    #[ver(V >= V13_0B4)]
+                    unk3: U64(0x100000),
+                };
+
+                try_init!(fw::fragment::raw::RunFragment::ver {
+                    tag: fw::workqueue::CommandType::RunFragment,
+                    #[ver(V >= V13_0B4)]
+                    counter: U64(count_frag),
+                    vm_slot,
+                    unk_8: 0,
+                    microsequence: inner.micro_seq.gpu_pointer(),
+                    microsequence_size: inner.micro_seq.len() as u32,
+                    notifier: inner.notifier.gpu_pointer(),
+                    buffer: inner.scene.buffer_pointer(),
+                    scene: inner.scene.gpu_pointer(),
+                    unk_buffer_buf: inner.scene.kernel_buffer_pointer(),
+                    tvb_tilemap: inner.scene.tvb_tilemap_pointer(),
+                    ppp_multisamplectl: U64(cmdbuf.ppp_multisamplectl),
+                    samples: cmdbuf.samples as u32,
+                    tiles_per_mtile_y: tile_info.tiles_per_mtile_y as u16,
+                    tiles_per_mtile_x: tile_info.tiles_per_mtile_x as u16,
+                    unk_50: U64(0),
+                    unk_58: U64(0),
+                    isp_merge_upper_x: F32::from_bits(cmdbuf.isp_merge_upper_x),
+                    isp_merge_upper_y: F32::from_bits(cmdbuf.isp_merge_upper_y),
+                    unk_68: U64(0),
+                    tile_count: U64(tile_info.tiles as u64),
+                    #[ver(G < G14X)]
+                    job_params1 <- try_init!(fw::fragment::raw::JobParameters1::ver {
+                        utile_config,
+                        unk_4: 0,
+                        bg: fw::fragment::raw::BackgroundProgram {
+                            rsrc_spec: U64(cmdbuf.bg.rsrc_spec as u64),
+                            address: U64(cmdbuf.bg.usc as u64),
+                        },
+                        ppp_multisamplectl: U64(cmdbuf.ppp_multisamplectl),
+                        isp_scissor_base: U64(cmdbuf.isp_scissor_base),
+                        isp_dbias_base: U64(cmdbuf.isp_dbias_base),
+                        isp_oclqry_base: U64(cmdbuf.isp_oclqry_base),
+                        aux_fb_info,
+                        isp_zls_pixels: U64(cmdbuf.isp_zls_pixels as u64),
+                        zls_ctrl: U64(cmdbuf.zls_ctrl),
+                        #[ver(G >= G14)]
+                        unk_58_g14_0: U64(g14_unk),
+                        #[ver(G >= G14)]
+                        unk_58_g14_8: U64(0),
+                        z_load: U64(cmdbuf.depth.base),
+                        z_store: U64(cmdbuf.depth.base),
+                        s_load: U64(cmdbuf.stencil.base),
+                        s_store: U64(cmdbuf.stencil.base),
+                        #[ver(G >= G14)]
+                        unk_68_g14_0: Default::default(),
+                        z_load_stride: U64(cmdbuf.depth.stride as u64),
+                        z_store_stride: U64(cmdbuf.depth.stride as u64),
+                        s_load_stride: U64(cmdbuf.stencil.stride as u64),
+                        s_store_stride: U64(cmdbuf.stencil.stride as u64),
+                        z_load_comp: U64(cmdbuf.depth.comp_base),
+                        z_load_comp_stride: U64(cmdbuf.depth.comp_stride as u64),
+                        z_store_comp: U64(cmdbuf.depth.comp_base),
+                        z_store_comp_stride: U64(cmdbuf.depth.comp_stride as u64),
+                        s_load_comp: U64(cmdbuf.stencil.comp_base),
+                        s_load_comp_stride: U64(cmdbuf.stencil.comp_stride as u64),
+                        s_store_comp: U64(cmdbuf.stencil.comp_base),
+                        s_store_comp_stride: U64(cmdbuf.stencil.comp_stride as u64),
+                        tvb_tilemap: inner.scene.tvb_tilemap_pointer(),
+                        tvb_layermeta: inner.scene.tvb_layermeta_pointer(),
+                        mtile_stride_dwords: U64((4 * tile_info.params.rgn_size as u64) << 24),
+                        tvb_heapmeta: inner.scene.tvb_heapmeta_pointer(),
+                        tile_config: U64(tile_config),
+                        aux_fb: inner.aux_fb.gpu_pointer(),
+                        unk_108: Default::default(),
+                        usc_exec_base_isp: U64(self.usc_exec_base),
+                        unk_140: U64(frg_unk_140),
+                        helper_program: cmdbuf.fragment_helper.binary,
+                        unk_14c: 0,
+                        helper_arg: U64(cmdbuf.fragment_helper.data),
+                        unk_158: U64(frg_unk_158),
+                        unk_160: U64(0),
+                        __pad: Default::default(),
+                        #[ver(V < V13_0B4)]
+                        __pad1: Default::default(),
+                    }),
+                    #[ver(G < G14X)]
+                    job_params2 <- try_init!(fw::fragment::raw::JobParameters2 {
+                        eot_rsrc_spec: cmdbuf.eot.rsrc_spec,
+                        eot_usc: cmdbuf.eot.usc,
+                        unk_8: 0x0,
+                        unk_c: 0x0,
+                        isp_merge_upper_x: F32::from_bits(cmdbuf.isp_merge_upper_x),
+                        isp_merge_upper_y: F32::from_bits(cmdbuf.isp_merge_upper_y),
+                        unk_18: U64(0x0),
+                        utiles_per_mtile_y: tile_info.utiles_per_mtile_y as u16,
+                        utiles_per_mtile_x: tile_info.utiles_per_mtile_x as u16,
+                        unk_24: 0x0,
+                        tile_counts: ((tile_info.tiles_y - 1) << 12) | (tile_info.tiles_x - 1),
+                        tib_blocks: blocks_per_utile,
+                        isp_bgobjdepth: cmdbuf.isp_bgobjdepth,
+                        // TODO: does this flag need to be exposed to userspace?
+                        isp_bgobjvals: load_bgobjvals as u32,
+                        unk_38: 0x0,
+                        unk_3c: 0x1,
+                        helper_cfg: cmdbuf.fragment_helper.cfg,
+                        __pad: Default::default(),
+                    }),
+                    #[ver(G >= G14X)]
+                    registers: fw::job::raw::RegisterArray::new(
+                        inner_weak_ptr!(_ptr, registers.registers),
+                        |r| {
+                            r.add(0x1739, 1);
+                            r.add(0x10009, utile_config.into());
+                            r.add(0x15379, cmdbuf.eot.rsrc_spec.into());
+                            r.add(0x15381, cmdbuf.eot.usc.into());
+                            r.add(0x15369, cmdbuf.bg.rsrc_spec.into());
+                            r.add(0x15371, cmdbuf.bg.usc.into());
+                            r.add(0x15131, cmdbuf.isp_merge_upper_x.into());
+                            r.add(0x15139, cmdbuf.isp_merge_upper_y.into());
+                            r.add(0x100a1, 0);
+                            r.add(0x15069, 0);
+                            r.add(0x15071, 0); // pointer
+                            r.add(0x16058, 0);
+                            r.add(0x10019, cmdbuf.ppp_multisamplectl);
+                            let isp_mtile_size = (tile_info.utiles_per_mtile_y
+                                | (tile_info.utiles_per_mtile_x << 16))
+                                .into();
+                            r.add(0x100b1, isp_mtile_size); // ISP_MTILE_SIZE
+                            r.add(0x16030, isp_mtile_size); // ISP_MTILE_SIZE
+                            r.add(
+                                0x100d9,
+                                (((tile_info.tiles_y - 1) << 12) | (tile_info.tiles_x - 1)).into(),
+                            ); // TE_SCREEN
+                            r.add(0x16098, inner.scene.tvb_heapmeta_pointer().into());
+                            r.add(0x15109, cmdbuf.isp_scissor_base); // ISP_SCISSOR_BASE
+                            r.add(0x15101, cmdbuf.isp_dbias_base); // ISP_DBIAS_BASE
+                            r.add(0x15021, isp_ctl.into()); // aux_fb_info.unk_1
+                            r.add(
+                                0x15211,
+                                ((cmdbuf.height_px as u64) << 32) | cmdbuf.width_px as u64,
+                            ); // aux_fb_info.{width, heigh
+                            r.add(0x15049, 0x100000); // s2.aux_fb_info.unk3
+                            r.add(0x10051, blocks_per_utile.into()); // s1.unk_2c
+                            r.add(0x15321, cmdbuf.isp_zls_pixels.into()); // ISP_ZLS_PIXELS
+                            r.add(0x15301, cmdbuf.isp_bgobjdepth.into()); // ISP_BGOBJDEPTH
+                            r.add(0x15309, load_bgobjvals); // ISP_BGOBJVALS
+                            r.add(0x15311, cmdbuf.isp_oclqry_base); // ISP_OCLQRY_BASE
+                            r.add(0x15319, cmdbuf.zls_ctrl); // ISP_ZLSCTL
+                            r.add(0x15349, g14_unk); // s2.unk_58_g14_0
+                            r.add(0x15351, 0); // s2.unk_58_g14_8
+                            r.add(0x15329, cmdbuf.depth.base); // ISP_ZLOAD_BASE
+                            r.add(0x15331, cmdbuf.depth.base); // ISP_ZSTORE_BASE
+                            r.add(0x15339, cmdbuf.stencil.base); // ISP_STENCIL_LOAD_BASE
+                            r.add(0x15341, cmdbuf.stencil.base); // ISP_STENCIL_STORE_BASE
+                            r.add(0x15231, 0);
+                            r.add(0x15221, 0);
+                            r.add(0x15239, 0);
+                            r.add(0x15229, 0);
+                            r.add(0x15401, cmdbuf.depth.stride as u64); // load
+                            r.add(0x15421, cmdbuf.depth.stride as u64); // store
+                            r.add(0x15409, cmdbuf.stencil.stride as u64); // load
+                            r.add(0x15429, cmdbuf.stencil.stride as u64);
+                            r.add(0x153c1, cmdbuf.depth.comp_base); // load
+                            r.add(0x15411, cmdbuf.depth.comp_stride as u64); // load
+                            r.add(0x153c9, cmdbuf.depth.comp_base); // store
+                            r.add(0x15431, cmdbuf.depth.comp_stride as u64); // store
+                            r.add(0x153d1, cmdbuf.stencil.comp_base); // load
+                            r.add(0x15419, cmdbuf.stencil.comp_stride as u64); // load
+                            r.add(0x153d9, cmdbuf.stencil.comp_base); // store
+                            r.add(0x15439, cmdbuf.stencil.comp_stride as u64); // store
+                            r.add(0x16429, inner.scene.tvb_tilemap_pointer().into());
+                            r.add(0x16060, inner.scene.tvb_layermeta_pointer().into());
+                            r.add(0x16431, (4 * tile_info.params.rgn_size as u64) << 24); // ISP_RGN?
+                            r.add(0x10039, tile_config); // tile_config ISP_CTL?
+                            r.add(0x16451, 0x0); // ISP_RENDER_ORIGIN
+                            r.add(0x11821, cmdbuf.fragment_helper.binary.into());
+                            r.add(0x11829, cmdbuf.fragment_helper.data);
+                            r.add(0x11f79, cmdbuf.fragment_helper.cfg.into());
+                            r.add(0x15359, 0);
+                            r.add(0x10069, self.usc_exec_base); // frag; USC_EXEC_BASE_ISP
+                            r.add(0x16020, 0);
+                            r.add(0x16461, inner.aux_fb.gpu_pointer().into());
+                            r.add(0x16090, inner.aux_fb.gpu_pointer().into());
+                            r.add(0x120a1, frg_unk_158);
+                            r.add(0x160a8, 0);
+                            r.add(0x16068, frg_tilecfg);
+                            r.add(0x160b8, 0x0);
+                            /*
+                            r.add(0x10201, 0x100); // Some kind of counter?? Does this matter?
+                            r.add(0x10428, 0x100); // Some kind of counter?? Does this matter?
+                            r.add(0x1c838, 1);  // ?
+                            r.add(0x1ca28, 0x1502960f00); // ??
+                            r.add(0x1731, 0x1); // ??
+                            */
+                        }
+                    ),
+                    job_params3 <- try_init!(fw::fragment::raw::JobParameters3::ver {
+                        isp_dbias_base: fw::fragment::raw::ArrayAddr {
+                            ptr: U64(cmdbuf.isp_dbias_base),
+                            unk_padding: U64(0),
+                        },
+                        isp_scissor_base: fw::fragment::raw::ArrayAddr {
+                            ptr: U64(cmdbuf.isp_scissor_base),
+                            unk_padding: U64(0),
+                        },
+                        isp_oclqry_base: U64(cmdbuf.isp_oclqry_base),
+                        unk_118: U64(0x0),
+                        unk_120: Default::default(),
+                        unk_partial_bg: fw::fragment::raw::BackgroundProgram {
+                            rsrc_spec: U64(cmdbuf.partial_bg.rsrc_spec as u64),
+                            address: U64(cmdbuf.partial_bg.usc as u64),
+                        },
+                        unk_258: U64(0),
+                        unk_260: U64(0),
+                        unk_268: U64(0),
+                        unk_270: U64(0),
+                        partial_bg: fw::fragment::raw::BackgroundProgram {
+                            rsrc_spec: U64(cmdbuf.partial_bg.rsrc_spec as u64),
+                            address: U64(cmdbuf.partial_bg.usc as u64),
+                        },
+                        zls_ctrl: U64(reload_zlsctrl),
+                        unk_290: U64(g14_unk),
+                        z_load: U64(cmdbuf.depth.base),
+                        z_partial_stride: U64(cmdbuf.depth.stride as u64),
+                        z_partial_comp_stride: U64(cmdbuf.depth.comp_stride as u64),
+                        z_store: U64(cmdbuf.depth.base),
+                        z_partial: U64(cmdbuf.depth.base),
+                        z_partial_comp: U64(cmdbuf.depth.comp_base),
+                        s_load: U64(cmdbuf.stencil.base),
+                        s_partial_stride: U64(cmdbuf.stencil.stride as u64),
+                        s_partial_comp_stride: U64(cmdbuf.stencil.comp_stride as u64),
+                        s_store: U64(cmdbuf.stencil.base),
+                        s_partial: U64(cmdbuf.stencil.base),
+                        s_partial_comp: U64(cmdbuf.stencil.comp_base),
+                        unk_2f8: Default::default(),
+                        tib_blocks: blocks_per_utile,
+                        unk_30c: 0x0,
+                        aux_fb_info,
+                        tile_config: U64(tile_config),
+                        unk_328_padding: Default::default(),
+                        unk_partial_eot: fw::fragment::raw::EotProgram::new(
+                            cmdbuf.partial_eot.rsrc_spec,
+                            cmdbuf.partial_eot.usc
+                        ),
+                        partial_eot: fw::fragment::raw::EotProgram::new(
+                            cmdbuf.partial_eot.rsrc_spec,
+                            cmdbuf.partial_eot.usc
+                        ),
+                        isp_bgobjdepth: cmdbuf.isp_bgobjdepth,
+                        isp_bgobjvals: cmdbuf.isp_bgobjvals,
+                        sample_size: cmdbuf.sample_size_B as u32,
+                        unk_37c: 0x0,
+                        unk_380: U64(0x0),
+                        unk_388: U64(0x0),
+                        #[ver(V >= V13_0B4)]
+                        unk_390_0: U64(0x0),
+                        isp_zls_pixels: U64(cmdbuf.isp_zls_pixels as u64),
+                    }),
+                    unk_758_flag: 0,
+                    unk_75c_flag: 0,
+                    unk_buf: Default::default(),
+                    busy_flag: 0,
+                    tvb_overflow_count: 0,
+                    unk_878: 0,
+                    encoder_params <- try_init!(fw::job::raw::EncoderParams {
+                        // Maybe set when reloading z/s?
+                        unk_8: 0,
+                        sync_grow: 0,
+                        unk_10: 0x0, // fixed
+                        encoder_id: 0,
+                        unk_18: 0x0, // fixed
+                        unk_mask: 0xffffffffu32,
+                        sampler_array: U64(cmdbuf.sampler_heap),
+                        sampler_count: cmdbuf.sampler_count as u32,
+                        sampler_max: (cmdbuf.sampler_count as u32) + 1,
+                    }),
+                    process_empty_tiles: (cmdbuf.flags
+                        & uapi::drm_asahi_render_flags_DRM_ASAHI_RENDER_PROCESS_EMPTY_TILES as u32
+                        != 0) as u32,
+                    // TODO: needs to be investigated
+                    no_clear_pipeline_textures: 1,
+                    // TODO: needs to be investigated
+                    msaa_zs: 0,
+                    unk_pointee: 0,
+                    #[ver(V >= V13_3)]
+                    unk_v13_3: 0,
+                    meta <- try_init!(fw::job::raw::JobMeta {
+                        unk_0: 0,
+                        unk_2: 0,
+                        no_preemption: no_preemption as u8,
+                        stamp: ev_frag.stamp_pointer,
+                        fw_stamp: ev_frag.fw_stamp_pointer,
+                        stamp_value: ev_frag.value.next(),
+                        stamp_slot: ev_frag.slot,
+                        evctl_index: 0, // fixed
+                        flush_stamps: flush_stamps as u32,
+                        uuid: uuid_3d,
+                        event_seq: ev_frag.event_seq as u32,
+                    }),
+                    unk_after_meta: unk1.into(),
+                    unk_buf_0: U64(0),
+                    unk_buf_8: U64(0),
+                    #[ver(G < G14X)]
+                    unk_buf_10: U64(1),
+                    #[ver(G >= G14X)]
+                    unk_buf_10: U64(0),
+                    command_time: U64(0),
+                    timestamp_pointers <- try_init!(fw::job::raw::TimestampPointers {
+                        start_addr: Some(inner_ptr!(inner.timestamps.gpu_pointer(), frag.start)),
+                        end_addr: Some(inner_ptr!(inner.timestamps.gpu_pointer(), frag.end)),
+                    }),
+                    user_timestamp_pointers: inner.user_timestamps.pointers()?,
+                    client_sequence: slot_client_seq,
+                    pad_925: Default::default(),
+                    unk_928: 0,
+                    unk_92c: 0,
+                    #[ver(V >= V13_0B4)]
+                    unk_ts: U64(0),
+                    #[ver(V >= V13_0B4)]
+                    unk_92d_8: Default::default(),
+                })
+            },
+        )?;
+
+        mod_dev_dbg!(self.dev, "[Submission {}] Add Frag\n", id);
+        fence.add_command();
+
+        frag_job.add_cb(frag, vm_bind.slot(), move |error| {
+            if let Some(err) = error {
+                fence.set_error(err.into());
+            }
+
+            fence.command_complete();
+        })?;
+
+        let fence = job.fence.clone();
+        let vtx_job = job.get_vtx()?;
+
+        if scene.rebind() || tvb_grown || tvb_autogrown {
+            mod_dev_dbg!(self.dev, "[Submission {}] Create Bind Buffer\n", id);
+            let bind_buffer = kalloc.private.new_init(
+                {
+                    let scene = scene.clone();
+                    try_init!(fw::buffer::InitBuffer::ver { scene })
+                },
+                |inner, _ptr| {
+                    let vm_slot = vm_bind.slot();
+                    try_init!(fw::buffer::raw::InitBuffer::ver {
+                        tag: fw::workqueue::CommandType::InitBuffer,
+                        vm_slot,
+                        buffer_slot: inner.scene.slot(),
+                        unk_c: 0,
+                        block_count: buffer.block_count(),
+                        buffer: inner.scene.buffer_pointer(),
+                        stamp_value: ev_vtx.value.next(),
+                    })
+                },
+            )?;
+
+            mod_dev_dbg!(self.dev, "[Submission {}] Add Bind Buffer\n", id);
+            vtx_job.add(bind_buffer, vm_bind.slot())?;
+        }
+
+        mod_dev_dbg!(self.dev, "[Submission {}] Create Vertex\n", id);
+        let vtx = GpuObject::new_init_prealloc(
+            kalloc.gpu_ro.alloc_object()?,
+            |ptr: GpuWeakPointer<fw::vertex::RunVertex::ver>| {
+                let scene = scene.clone();
+                let vm_bind = vm_bind.clone();
+                let timestamps = timestamps.clone();
+                let private = &mut kalloc.private;
+                try_init!(fw::vertex::RunVertex::ver {
+                    micro_seq: {
+                        let mut builder = microseq::Builder::new();
+
+                        let stats = inner_weak_ptr!(
+                            gpu.initdata.runtime_pointers.stats.vtx.weak_pointer(),
+                            stats
+                        );
+
+                        let start_vtx = builder.add(microseq::StartVertex::ver {
+                            header: microseq::op::StartVertex::HEADER,
+                            #[ver(G < G14X)]
+                            tiling_params: Some(inner_weak_ptr!(ptr, tiling_params)),
+                            #[ver(G < G14X)]
+                            job_params1: Some(inner_weak_ptr!(ptr, job_params1)),
+                            #[ver(G >= G14X)]
+                            tiling_params: None,
+                            #[ver(G >= G14X)]
+                            job_params1: None,
+                            #[ver(G >= G14X)]
+                            registers: inner_weak_ptr!(ptr, registers),
+                            buffer: scene.weak_buffer_pointer(),
+                            scene: scene.weak_pointer(),
+                            stats,
+                            work_queue: ev_vtx.info_ptr,
+                            vm_slot: vm_bind.slot(),
+                            unk_38: 1, // fixed
+                            event_generation: self.id as u32,
+                            buffer_slot: scene.slot(),
+                            unk_44: 0,
+                            event_seq: U64(ev_vtx.event_seq),
+                            unk_50: 0,
+                            unk_pointer: inner_weak_ptr!(ptr, unk_pointee),
+                            unk_job_buf: inner_weak_ptr!(ptr, unk_buf_0),
+                            unk_64: 0x0, // fixed
+                            unk_68: unk1.into(),
+                            uuid: uuid_ta,
+                            attachments: *vertex_attachments,
+                            padding: 0,
+                            #[ver(V >= V13_0B4)]
+                            counter: U64(count_vtx),
+                            #[ver(V >= V13_0B4)]
+                            notifier_buf: inner_weak_ptr!(notifier.weak_pointer(), state.unk_buf),
+                            #[ver(V < V13_0B4)]
+                            unk_178: 0x0, // padding?
+                            #[ver(V >= V13_0B4)]
+                            unk_178: (!clustering) as u32,
+                        })?;
+
+                        if vtx_user_timestamps.any() {
+                            builder.add(microseq::Timestamp::ver {
+                                header: microseq::op::Timestamp::new(true),
+                                command_time: inner_weak_ptr!(ptr, command_time),
+                                ts_pointers: inner_weak_ptr!(ptr, timestamp_pointers),
+                                update_ts: inner_weak_ptr!(ptr, timestamp_pointers.start_addr),
+                                work_queue: ev_vtx.info_ptr,
+                                user_ts_pointers: inner_weak_ptr!(ptr, user_timestamp_pointers),
+                                #[ver(V >= V13_0B4)]
+                                unk_ts: inner_weak_ptr!(ptr, unk_ts),
+                                uuid: uuid_ta,
+                                unk_30_padding: 0,
+                            })?;
+                        }
+
+                        #[ver(G < G14X)]
+                        builder.add(microseq::WaitForIdle {
+                            header: microseq::op::WaitForIdle::new(microseq::Pipe::Vertex),
+                        })?;
+                        #[ver(G >= G14X)]
+                        builder.add(microseq::WaitForIdle2 {
+                            header: microseq::op::WaitForIdle2::HEADER,
+                        })?;
+
+                        if vtx_user_timestamps.any() {
+                            builder.add(microseq::Timestamp::ver {
+                                header: microseq::op::Timestamp::new(false),
+                                command_time: inner_weak_ptr!(ptr, command_time),
+                                ts_pointers: inner_weak_ptr!(ptr, timestamp_pointers),
+                                update_ts: inner_weak_ptr!(ptr, timestamp_pointers.end_addr),
+                                work_queue: ev_vtx.info_ptr,
+                                user_ts_pointers: inner_weak_ptr!(ptr, user_timestamp_pointers),
+                                #[ver(V >= V13_0B4)]
+                                unk_ts: inner_weak_ptr!(ptr, unk_ts),
+                                uuid: uuid_ta,
+                                unk_30_padding: 0,
+                            })?;
+                        }
+
+                        let off = builder.offset_to(start_vtx);
+                        builder.add(microseq::FinalizeVertex::ver {
+                            header: microseq::op::FinalizeVertex::HEADER,
+                            scene: scene.weak_pointer(),
+                            buffer: scene.weak_buffer_pointer(),
+                            stats,
+                            work_queue: ev_vtx.info_ptr,
+                            vm_slot: vm_bind.slot(),
+                            unk_28: 0x0, // fixed
+                            unk_pointer: inner_weak_ptr!(ptr, unk_pointee),
+                            unk_34: 0x0, // fixed
+                            uuid: uuid_ta,
+                            fw_stamp: ev_vtx.fw_stamp_pointer,
+                            stamp_value: ev_vtx.value.next(),
+                            unk_48: U64(0x0), // fixed
+                            unk_50: 0x0,      // fixed
+                            unk_54: 0x0,      // fixed
+                            unk_58: U64(0x0), // fixed
+                            unk_60: 0x0,      // fixed
+                            unk_64: 0x0,      // fixed
+                            unk_68: 0x0,      // fixed
+                            #[ver(G >= G14 && V < V13_0B4)]
+                            unk_68_g14: U64(0),
+                            restart_branch_offset: off,
+                            has_attachments: (vertex_attachments.count > 0) as u32,
+                            #[ver(V >= V13_0B4)]
+                            unk_74: Default::default(), // Ventura
+                        })?;
+
+                        builder.add(microseq::RetireStamp {
+                            header: microseq::op::RetireStamp::HEADER,
+                        })?;
+                        builder.build(private)?
+                    },
+                    notifier,
+                    scene,
+                    vm_bind,
+                    timestamps,
+                    user_timestamps: vtx_user_timestamps,
+                })
+            },
+            |inner, _ptr| {
+                let vm_slot = vm_bind.slot();
+                #[ver(G < G14)]
+                let core_masks = gpu.core_masks_packed();
+
+                try_init!(fw::vertex::raw::RunVertex::ver {
+                    tag: fw::workqueue::CommandType::RunVertex,
+                    #[ver(V >= V13_0B4)]
+                    counter: U64(count_vtx),
+                    vm_slot,
+                    unk_8: 0,
+                    notifier: inner.notifier.gpu_pointer(),
+                    buffer_slot: inner.scene.slot(),
+                    unk_1c: 0,
+                    buffer: inner.scene.buffer_pointer(),
+                    scene: inner.scene.gpu_pointer(),
+                    unk_buffer_buf: inner.scene.kernel_buffer_pointer(),
+                    unk_34: 0,
+                    #[ver(G < G14X)]
+                    job_params1 <- try_init!(fw::vertex::raw::JobParameters1::ver {
+                        unk_0: U64(if unk1 { 0 } else { 0x200 }), // sometimes 0
+                        unk_8: f32!(1e-20),                       // fixed
+                        unk_c: f32!(1e-20),                       // fixed
+                        tvb_tilemap: inner.scene.tvb_tilemap_pointer(),
+                        #[ver(G < G14)]
+                        tvb_cluster_tilemaps: inner.scene.cluster_tilemaps_pointer(),
+                        tpc: inner.scene.tpc_pointer(),
+                        tvb_heapmeta: inner.scene.tvb_heapmeta_pointer().or(0x8000_0000_0000_0000),
+                        iogpu_unk_54: U64(iogpu_unk54), // fixed
+                        iogpu_unk_56: U64(iogpu_unk56), // fixed
+                        #[ver(G < G14)]
+                        tvb_cluster_meta1: inner
+                            .scene
+                            .meta_1_pointer()
+                            .map(|x| x.or((tile_info.meta1_layer_stride as u64) << 50)),
+                        utile_config,
+                        unk_4c: 0,
+                        ppp_multisamplectl: U64(cmdbuf.ppp_multisamplectl), // fixed
+                        tvb_layermeta: inner.scene.tvb_layermeta_pointer(),
+                        #[ver(G < G14)]
+                        tvb_cluster_layermeta: inner.scene.tvb_cluster_layermeta_pointer(),
+                        #[ver(G < G14)]
+                        core_mask: Array::new([
+                            *core_masks.first().unwrap_or(&0),
+                            *core_masks.get(1).unwrap_or(&0),
+                        ]),
+                        preempt_buf1: inner.scene.preempt_buf_1_pointer(),
+                        preempt_buf2: inner.scene.preempt_buf_2_pointer(),
+                        unk_80: U64(0x1), // fixed
+                        preempt_buf3: inner.scene.preempt_buf_3_pointer().or(0x4_0000_0000_0000), // check
+                        vdm_ctrl_stream_base: U64(cmdbuf.vdm_ctrl_stream_base),
+                        #[ver(G < G14)]
+                        tvb_cluster_meta2: inner.scene.meta_2_pointer(),
+                        #[ver(G < G14)]
+                        tvb_cluster_meta3: inner.scene.meta_3_pointer(),
+                        #[ver(G < G14)]
+                        tiling_control,
+                        #[ver(G < G14)]
+                        unk_ac: tiling_control_2 as u32, // fixed
+                        unk_b0: Default::default(), // fixed
+                        usc_exec_base_ta: U64(self.usc_exec_base),
+                        #[ver(G < G14)]
+                        tvb_cluster_meta4: inner
+                            .scene
+                            .meta_4_pointer()
+                            .map(|x| x.or(0x3000_0000_0000_0000)),
+                        #[ver(G < G14)]
+                        unk_f0: U64(vtx_unk_f0),
+                        unk_f8: U64(0x8c60),     // fixed
+                        helper_program: cmdbuf.vertex_helper.binary,
+                        unk_104: 0,
+                        helper_arg: U64(cmdbuf.vertex_helper.data),
+                        unk_110: Default::default(),      // fixed
+                        unk_118: vtx_unk_118 as u32, // fixed
+                        __pad: Default::default(),
+                    }),
+                    #[ver(G < G14X)]
+                    tiling_params: tile_info.params,
+                    #[ver(G >= G14X)]
+                    registers: fw::job::raw::RegisterArray::new(
+                        inner_weak_ptr!(_ptr, registers.registers),
+                        |r| {
+                            r.add(0x10141, if unk1 { 0 } else { 0x200 }); // s2.unk_0
+                            r.add(0x1c039, inner.scene.tvb_tilemap_pointer().into());
+                            r.add(0x1c9c8, inner.scene.tvb_tilemap_pointer().into());
+
+                            let cl_tilemaps_ptr = inner
+                                .scene
+                                .cluster_tilemaps_pointer()
+                                .map_or(0, |a| a.into());
+                            r.add(0x1c041, cl_tilemaps_ptr);
+                            r.add(0x1c9d0, cl_tilemaps_ptr);
+                            r.add(0x1c0a1, inner.scene.tpc_pointer().into()); // TE_TPC_ADDR
+
+                            let tvb_heapmeta_ptr = inner
+                                .scene
+                                .tvb_heapmeta_pointer()
+                                .or(0x8000_0000_0000_0000)
+                                .into();
+                            r.add(0x1c031, tvb_heapmeta_ptr);
+                            r.add(0x1c9c0, tvb_heapmeta_ptr);
+                            r.add(0x1c051, iogpu_unk54); // iogpu_unk_54/55
+                            r.add(0x1c061, iogpu_unk56); // iogpu_unk_56
+                            r.add(0x10149, utile_config.into()); // s2.unk_48 utile_config
+                            r.add(0x10139, cmdbuf.ppp_multisamplectl); // PPP_MULTISAMPLECTL
+                            r.add(0x10111, inner.scene.preempt_buf_1_pointer().into());
+                            r.add(0x1c9b0, inner.scene.preempt_buf_1_pointer().into());
+                            r.add(0x10119, inner.scene.preempt_buf_2_pointer().into());
+                            r.add(0x1c9b8, inner.scene.preempt_buf_2_pointer().into());
+                            r.add(0x1c958, 1); // s2.unk_80
+                            r.add(
+                                0x1c950,
+                                inner
+                                    .scene
+                                    .preempt_buf_3_pointer()
+                                    .or(0x4_0000_0000_0000)
+                                    .into(),
+                            );
+                            r.add(0x1c930, 0); // VCE related addr, lsb to enable
+                            r.add(0x1c880, cmdbuf.vdm_ctrl_stream_base); // VDM_CTRL_STREAM_BASE
+                            r.add(0x1c898, 0x0); // if lsb set, faults in UL1C0, possibly missing addr.
+                            r.add(
+                                0x1c948,
+                                inner.scene.meta_2_pointer().map_or(0, |a| a.into()),
+                            ); // tvb_cluster_meta2
+                            r.add(
+                                0x1c888,
+                                inner.scene.meta_3_pointer().map_or(0, |a| a.into()),
+                            ); // tvb_cluster_meta3
+                            r.add(0x1c890, tiling_control.into()); // tvb_tiling_control
+                            r.add(0x1c918, tiling_control_2);
+                            r.add(0x1c079, inner.scene.tvb_layermeta_pointer().into());
+                            r.add(0x1c9d8, inner.scene.tvb_layermeta_pointer().into());
+                            let cl_layermeta_pointer =
+                                inner.scene.tvb_cluster_layermeta_pointer().map_or(0, |a| a.into());
+                            r.add(0x1c089, cl_layermeta_pointer);
+                            r.add(0x1c9e0, cl_layermeta_pointer);
+                            let cl_meta_4_pointer =
+                                inner.scene.meta_4_pointer().map_or(0, |a| a.into());
+                            r.add(0x16c41, cl_meta_4_pointer); // tvb_cluster_meta4
+                            r.add(0x1ca40, cl_meta_4_pointer); // tvb_cluster_meta4
+                            r.add(0x1c9a8, vtx_unk_f0); // + meta1_blocks? min_free_tvb_pages?
+                            r.add(
+                                0x1c920,
+                                inner.scene.meta_1_pointer().map_or(0, |a| a.into()),
+                            ); // ??? | meta1_blocks?
+                            r.add(0x10151, 0);
+                            r.add(0x1c199, 0);
+                            r.add(0x1c1a1, 0);
+                            r.add(0x1c1a9, 0); // 0x10151 bit 1 enables
+                            r.add(0x1c1b1, 0);
+                            r.add(0x1c1b9, 0);
+                            r.add(0x10061, self.usc_exec_base); // USC_EXEC_BASE_TA
+                            r.add(0x11801, cmdbuf.vertex_helper.binary.into());
+                            r.add(0x11809, cmdbuf.vertex_helper.data);
+                            r.add(0x11f71, cmdbuf.vertex_helper.cfg.into());
+                            r.add(0x1c0b1, tile_info.params.rgn_size.into()); // TE_PSG
+                            r.add(0x1c850, tile_info.params.rgn_size.into());
+                            r.add(0x10131, tile_info.params.unk_4.into());
+                            r.add(0x10121, tile_info.params.ppp_ctrl.into()); // PPP_CTRL
+                            r.add(
+                                0x10129,
+                                tile_info.params.x_max as u64
+                                    | ((tile_info.params.y_max as u64) << 16),
+                            ); // PPP_SCREEN
+                            r.add(0x101b9, tile_info.params.te_screen.into()); // TE_SCREEN
+                            r.add(0x1c069, tile_info.params.te_mtile1.into()); // TE_MTILE1
+                            r.add(0x1c071, tile_info.params.te_mtile2.into()); // TE_MTILE2
+                            r.add(0x1c081, tile_info.params.tiles_per_mtile.into()); // TE_MTILE
+                            r.add(0x1c0a9, tile_info.params.tpc_stride.into()); // TE_TPC
+                            r.add(0x10171, tile_info.params.unk_24.into());
+                            r.add(0x10169, tile_info.params.unk_28.into()); // TA_RENDER_TARGET_MAX
+                            r.add(0x12099, vtx_unk_118);
+                            r.add(0x1c9e8, (tile_info.params.unk_28 & 0x4fff).into());
+                            /*
+                            r.add(0x10209, 0x100); // Some kind of counter?? Does this matter?
+                            r.add(0x1c9f0, 0x100); // Some kind of counter?? Does this matter?
+                            r.add(0x1c830, 1); // ?
+                            r.add(0x1ca30, 0x1502960e60); // ?
+                            r.add(0x16c39, 0x1502960e60); // ?
+                            r.add(0x1c910, 0xa0000b011d); // ?
+                            r.add(0x1c8e0, 0xff); // cluster mask
+                            r.add(0x1c8e8, 0); // ?
+                            */
+                        }
+                    ),
+                    tpc: inner.scene.tpc_pointer(),
+                    tpc_size: U64(tile_info.tpc_size as u64),
+                    microsequence: inner.micro_seq.gpu_pointer(),
+                    microsequence_size: inner.micro_seq.len() as u32,
+                    fragment_stamp_slot: ev_frag.slot,
+                    fragment_stamp_value: ev_frag.value.next(),
+                    unk_pointee: 0,
+                    unk_pad: 0,
+                    job_params2 <- try_init!(fw::vertex::raw::JobParameters2 {
+                        unk_480: Default::default(), // fixed
+                        unk_498: U64(0x0),           // fixed
+                        unk_4a0: 0x0,                // fixed
+                        preempt_buf1: inner.scene.preempt_buf_1_pointer(),
+                        unk_4ac: 0x0,      // fixed
+                        unk_4b0: U64(0x0), // fixed
+                        unk_4b8: 0x0,      // fixed
+                        unk_4bc: U64(0x0), // fixed
+                        unk_4c4_padding: Default::default(),
+                        unk_50c: 0x0,      // fixed
+                        unk_510: U64(0x0), // fixed
+                        unk_518: U64(0x0), // fixed
+                        unk_520: U64(0x0), // fixed
+                    }),
+                    encoder_params <- try_init!(fw::job::raw::EncoderParams {
+                        unk_8: 0x0,     // fixed
+                        sync_grow: 0x0, // fixed
+                        unk_10: 0x0,    // fixed
+                        encoder_id: 0,
+                        unk_18: 0x0, // fixed
+                        unk_mask: 0xffffffffu32,
+                        sampler_array: U64(cmdbuf.sampler_heap),
+                        sampler_count: cmdbuf.sampler_count as u32,
+                        sampler_max: (cmdbuf.sampler_count as u32) + 1,
+                    }),
+                    unk_55c: 0,
+                    unk_560: 0,
+                    sync_grow: 0,
+                    unk_568: 0,
+                    uses_scratch: (cmdbuf.flags
+                        & uapi::drm_asahi_render_flags_DRM_ASAHI_RENDER_VERTEX_SCRATCH as u32
+                        != 0) as u32,
+                    meta <- try_init!(fw::job::raw::JobMeta {
+                        unk_0: 0,
+                        unk_2: 0,
+                        no_preemption: no_preemption as u8,
+                        stamp: ev_vtx.stamp_pointer,
+                        fw_stamp: ev_vtx.fw_stamp_pointer,
+                        stamp_value: ev_vtx.value.next(),
+                        stamp_slot: ev_vtx.slot,
+                        evctl_index: 0, // fixed
+                        flush_stamps: flush_stamps as u32,
+                        uuid: uuid_ta,
+                        event_seq: ev_vtx.event_seq as u32,
+                    }),
+                    unk_after_meta: unk1.into(),
+                    unk_buf_0: U64(0),
+                    unk_buf_8: U64(0),
+                    unk_buf_10: U64(0),
+                    command_time: U64(0),
+                    timestamp_pointers <- try_init!(fw::job::raw::TimestampPointers {
+                        start_addr: Some(inner_ptr!(inner.timestamps.gpu_pointer(), vtx.start)),
+                        end_addr: Some(inner_ptr!(inner.timestamps.gpu_pointer(), vtx.end)),
+                    }),
+                    user_timestamp_pointers: inner.user_timestamps.pointers()?,
+                    client_sequence: slot_client_seq,
+                    pad_5d5: Default::default(),
+                    unk_5d8: 0,
+                    unk_5dc: 0,
+                    #[ver(V >= V13_0B4)]
+                    unk_ts: U64(0),
+                    #[ver(V >= V13_0B4)]
+                    unk_5dd_8: Default::default(),
+                })
+            },
+        )?;
+
+        core::mem::drop(alloc);
+
+        mod_dev_dbg!(self.dev, "[Submission {}] Add Vertex\n", id);
+        fence.add_command();
+        vtx_job.add_cb(vtx, vm_bind.slot(), move |error| {
+            if let Some(err) = error {
+                fence.set_error(err.into())
+            }
+
+            fence.command_complete();
+        })?;
+
+        mod_dev_dbg!(self.dev, "[Submission {}] Increment counters\n", id);
+
+        // TODO: handle rollbacks, move to job submit?
+        buffer.increment();
+
+        job.get_vtx()?.next_seq();
+        job.get_frag()?.next_seq();
+
+        Ok(())
+    }
+}
diff --git a/drivers/gpu/drm/asahi/regs.rs b/drivers/gpu/drm/asahi/regs.rs
new file mode 100644
index 00000000000000..4395682c45339a
--- /dev/null
+++ b/drivers/gpu/drm/asahi/regs.rs
@@ -0,0 +1,487 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+//! GPU MMIO register abstraction
+//!
+//! Since the vast majority of the interactions with the GPU are brokered through the firmware,
+//! there is very little need to interact directly with GPU MMIO register. This module abstracts
+//! the few operations that require that, mainly reading the MMU fault status, reading GPU ID
+//! information, and starting the GPU firmware coprocessor.
+
+use crate::hw;
+use kernel::{c_str, devres::Devres, io::mem::IoMem, platform, prelude::*};
+
+/// Size of the ASC control MMIO region.
+pub(crate) const ASC_CTL_SIZE: usize = 0x4000;
+
+/// Size of the SGX MMIO region.
+pub(crate) const SGX_SIZE: usize = 0x1000000;
+
+const CPU_CONTROL: usize = 0x44;
+const CPU_RUN: u32 = 0x1 << 4; // BIT(4)
+
+const FAULT_INFO: usize = 0x17030;
+
+const ID_VERSION: usize = 0xd04000;
+const ID_UNK08: usize = 0xd04008;
+const ID_COUNTS_1: usize = 0xd04010;
+const ID_COUNTS_2: usize = 0xd04014;
+const ID_UNK18: usize = 0xd04018;
+const ID_CLUSTERS: usize = 0xd0401c;
+
+const CORE_MASK_0: usize = 0xd01500;
+const CORE_MASK_1: usize = 0xd01514;
+
+const CORE_MASKS_G14X: usize = 0xe01500;
+const FAULT_INFO_G14X: usize = 0xd8c0;
+const FAULT_ADDR_G14X: usize = 0xd8c8;
+
+/// Enum representing the unit that caused an MMU fault.
+#[allow(non_camel_case_types)]
+#[allow(clippy::upper_case_acronyms)]
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+pub(crate) enum FaultUnit {
+    /// Decompress / pixel fetch
+    DCMP(u8),
+    /// USC L1 Cache (device loads/stores)
+    UL1C(u8),
+    /// Compress / pixel store
+    CMP(u8),
+    GSL1(u8),
+    IAP(u8),
+    VCE(u8),
+    /// Tiling Engine
+    TE(u8),
+    RAS(u8),
+    /// Vertex Data Master
+    VDM(u8),
+    PPP(u8),
+    /// ISP Parameter Fetch
+    IPF(u8),
+    IPF_CPF(u8),
+    VF(u8),
+    VF_CPF(u8),
+    /// Depth/Stencil load/store
+    ZLS(u8),
+
+    /// Parameter Management
+    dPM,
+    /// Compute Data Master
+    dCDM_KS(u8),
+    dIPP,
+    dIPP_CS,
+    // Vertex Data Master
+    dVDM_CSD,
+    dVDM_SSD,
+    dVDM_ILF,
+    dVDM_ILD,
+    dRDE(u8),
+    FC,
+    GSL2,
+
+    /// Graphics L2 Cache Control?
+    GL2CC_META(u8),
+    GL2CC_MB,
+
+    /// Parameter Management
+    gPM_SP(u8),
+    /// Vertex Data Master - CSD
+    gVDM_CSD_SP(u8),
+    gVDM_SSD_SP(u8),
+    gVDM_ILF_SP(u8),
+    gVDM_TFP_SP(u8),
+    gVDM_MMB_SP(u8),
+    /// Compute Data Master
+    gCDM_CS_KS0_SP(u8),
+    gCDM_CS_KS1_SP(u8),
+    gCDM_CS_KS2_SP(u8),
+    gCDM_KS0_SP(u8),
+    gCDM_KS1_SP(u8),
+    gCDM_KS2_SP(u8),
+    gIPP_SP(u8),
+    gIPP_CS_SP(u8),
+    gRDE0_SP(u8),
+    gRDE1_SP(u8),
+
+    gCDM_CS,
+    gCDM_ID,
+    gCDM_CSR,
+    gCDM_CSW,
+    gCDM_CTXR,
+    gCDM_CTXW,
+    gIPP,
+    gIPP_CS,
+    gKSM_RCE,
+
+    Unknown(u8),
+}
+
+/// Reason for an MMU fault.
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+pub(crate) enum FaultReason {
+    Unmapped,
+    AfFault,
+    WriteOnly,
+    ReadOnly,
+    NoAccess,
+    Unknown(u8),
+}
+
+/// Collection of information about an MMU fault.
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+pub(crate) struct FaultInfo {
+    pub(crate) address: u64,
+    pub(crate) sideband: u8,
+    pub(crate) vm_slot: u32,
+    pub(crate) unit_code: u8,
+    pub(crate) unit: FaultUnit,
+    pub(crate) level: u8,
+    pub(crate) unk_5: u8,
+    pub(crate) read: bool,
+    pub(crate) reason: FaultReason,
+}
+
+/// Device resources for this GPU instance.
+pub(crate) struct Resources {
+    dev: platform::Device,
+    asc: Devres<IoMem<ASC_CTL_SIZE>>,
+    sgx: Devres<IoMem<SGX_SIZE>>,
+}
+
+impl Resources {
+    /// Map the required resources given our platform device.
+    pub(crate) fn new(pdev: &mut platform::Device) -> Result<Resources> {
+        let asc_res = pdev.resource_by_name(c_str!("asc")).ok_or(EINVAL)?;
+        let asc_iomem = pdev.ioremap_resource_sized::<ASC_CTL_SIZE>(asc_res)?;
+
+        let sgx_res = pdev.resource_by_name(c_str!("sgx")).ok_or(EINVAL)?;
+        let sgx_iomem = pdev.ioremap_resource_sized::<SGX_SIZE>(sgx_res)?;
+
+        Ok(Resources {
+            // SAFETY: This device does DMA via the UAT IOMMU.
+            dev: pdev.clone(),
+            asc: asc_iomem,
+            sgx: sgx_iomem,
+        })
+    }
+
+    fn sgx_read32<const OFF: usize>(&self) -> u32 {
+        if let Some(sgx) = self.sgx.try_access() {
+            sgx.readl_relaxed(OFF)
+        } else {
+            0
+        }
+    }
+
+    /* Not yet used
+    fn sgx_write32<OFF: usize>(&self, val: u32) {
+        if let Some(sgx) = self.sgx.try_access() {
+            sgx.writel_relaxed(val, OFF)
+        }
+    }
+    */
+
+    fn sgx_read64<const OFF: usize>(&self) -> u64 {
+        if let Some(sgx) = self.sgx.try_access() {
+            sgx.readq_relaxed(OFF)
+        } else {
+            0
+        }
+    }
+
+    /* Not yet used
+    fn sgx_write64<OFF: usize>(&self, val: u64) {
+        if let Some(sgx) = self.sgx.try_access() {
+            sgx.writeq_relaxed(val, OFF)
+        }
+    }
+    */
+
+    /// Initialize the MMIO registers for the GPU.
+    pub(crate) fn init_mmio(&self) -> Result {
+        // Nothing to do for now...
+
+        Ok(())
+    }
+
+    /// Start the ASC coprocessor CPU.
+    pub(crate) fn start_cpu(&self) -> Result {
+        let res = self.asc.try_access().ok_or(ENXIO)?;
+        let val = res.readl_relaxed(CPU_CONTROL);
+
+        res.writel_relaxed(val | CPU_RUN, CPU_CONTROL);
+
+        Ok(())
+    }
+
+    /// Get the GPU identification info from registers.
+    ///
+    /// See [`hw::GpuIdConfig`] for the result.
+    pub(crate) fn get_gpu_id(&self) -> Result<hw::GpuIdConfig> {
+        let id_version = self.sgx_read32::<ID_VERSION>();
+        let id_unk08 = self.sgx_read32::<ID_UNK08>();
+        let id_counts_1 = self.sgx_read32::<ID_COUNTS_1>();
+        let id_counts_2 = self.sgx_read32::<ID_COUNTS_2>();
+        let id_unk18 = self.sgx_read32::<ID_UNK18>();
+        let id_clusters = self.sgx_read32::<ID_CLUSTERS>();
+
+        dev_info!(
+            self.dev.as_ref(),
+            "GPU ID registers: {:#x} {:#x} {:#x} {:#x} {:#x} {:#x}\n",
+            id_version,
+            id_unk08,
+            id_counts_1,
+            id_counts_2,
+            id_unk18,
+            id_clusters
+        );
+
+        let gpu_gen = (id_version >> 24) & 0xff;
+
+        let mut core_mask_regs = KVec::new();
+
+        let num_clusters = match gpu_gen {
+            4 | 5 => {
+                // G13 | G14G
+                core_mask_regs.push(self.sgx_read32::<CORE_MASK_0>(), GFP_KERNEL)?;
+                core_mask_regs.push(self.sgx_read32::<CORE_MASK_1>(), GFP_KERNEL)?;
+                (id_clusters >> 12) & 0xff
+            }
+            6 => {
+                // G14X
+                core_mask_regs.push(self.sgx_read32::<CORE_MASKS_G14X>(), GFP_KERNEL)?;
+                core_mask_regs.push(self.sgx_read32::<{ CORE_MASKS_G14X + 4 }>(), GFP_KERNEL)?;
+                core_mask_regs.push(self.sgx_read32::<{ CORE_MASKS_G14X + 8 }>(), GFP_KERNEL)?;
+                // Clusters per die * num dies
+                ((id_counts_1 >> 8) & 0xff) * ((id_counts_1 >> 16) & 0xf)
+            }
+            a => {
+                dev_err!(self.dev.as_ref(), "Unknown GPU generation {}\n", a);
+                return Err(ENODEV);
+            }
+        };
+
+        let mut core_masks_packed = KVec::new();
+        core_masks_packed.extend_from_slice(&core_mask_regs, GFP_KERNEL)?;
+
+        dev_info!(self.dev.as_ref(), "Core masks: {:#x?}\n", core_masks_packed);
+
+        let num_cores = id_counts_1 & 0xff;
+
+        if num_cores > 32 {
+            dev_err!(
+                self.dev.as_ref(),
+                "Too many cores per cluster ({} > 32)\n",
+                num_cores
+            );
+            return Err(ENODEV);
+        }
+
+        if num_cores * num_clusters > (core_mask_regs.len() * 32) as u32 {
+            dev_err!(
+                self.dev.as_ref(),
+                "Too many total cores ({} x {} > {})\n",
+                num_clusters,
+                num_cores,
+                core_mask_regs.len() * 32
+            );
+            return Err(ENODEV);
+        }
+
+        let mut core_masks = KVec::new();
+        let mut total_active_cores: u32 = 0;
+
+        let max_core_mask = ((1u64 << num_cores) - 1) as u32;
+        for _ in 0..num_clusters {
+            let mask = core_mask_regs[0] & max_core_mask;
+            core_masks.push(mask, GFP_KERNEL)?;
+            for i in 0..core_mask_regs.len() {
+                core_mask_regs[i] >>= num_cores;
+                if i < (core_mask_regs.len() - 1) {
+                    core_mask_regs[i] |= core_mask_regs[i + 1] << (32 - num_cores);
+                }
+            }
+            total_active_cores += mask.count_ones();
+        }
+
+        if core_mask_regs.iter().any(|a| *a != 0) {
+            dev_err!(
+                self.dev.as_ref(),
+                "Leftover core mask: {:#x?}\n",
+                core_mask_regs
+            );
+            return Err(EIO);
+        }
+
+        let (gpu_rev, gpu_rev_id) = match (id_version >> 8) & 0xff {
+            0x00 => (hw::GpuRevision::A0, hw::GpuRevisionID::A0),
+            0x01 => (hw::GpuRevision::A1, hw::GpuRevisionID::A1),
+            0x10 => (hw::GpuRevision::B0, hw::GpuRevisionID::B0),
+            0x11 => (hw::GpuRevision::B1, hw::GpuRevisionID::B1),
+            0x20 => (hw::GpuRevision::C0, hw::GpuRevisionID::C0),
+            0x21 => (hw::GpuRevision::C1, hw::GpuRevisionID::C1),
+            a => {
+                dev_err!(self.dev.as_ref(), "Unknown GPU revision {}\n", a);
+                return Err(ENODEV);
+            }
+        };
+
+        Ok(hw::GpuIdConfig {
+            gpu_gen: match (id_version >> 24) & 0xff {
+                4 => hw::GpuGen::G13,
+                5 => hw::GpuGen::G14,
+                6 => hw::GpuGen::G14, // G14X has a separate ID
+                a => {
+                    dev_err!(self.dev.as_ref(), "Unknown GPU generation {}\n", a);
+                    return Err(ENODEV);
+                }
+            },
+            gpu_variant: match (id_version >> 16) & 0xff {
+                1 => hw::GpuVariant::P, // Guess
+                2 => hw::GpuVariant::G,
+                3 => hw::GpuVariant::S,
+                4 => {
+                    if num_clusters > 4 {
+                        hw::GpuVariant::D
+                    } else {
+                        hw::GpuVariant::C
+                    }
+                }
+                a => {
+                    dev_err!(self.dev.as_ref(), "Unknown GPU variant {}\n", a);
+                    return Err(ENODEV);
+                }
+            },
+            gpu_rev,
+            gpu_rev_id,
+            num_clusters,
+            num_cores,
+            num_frags: num_cores, // Used to be id_counts_1[15:8] but does not work for G14X
+            num_gps: (id_counts_2 >> 16) & 0xff,
+            total_active_cores,
+            core_masks,
+            core_masks_packed,
+        })
+    }
+
+    /// Get the fault information from the MMU status register, if one occurred.
+    pub(crate) fn get_fault_info(&self, cfg: &'static hw::HwConfig) -> Option<FaultInfo> {
+        let g14x = cfg.gpu_core as u32 >= hw::GpuCore::G14S as u32;
+
+        let fault_info = if g14x {
+            self.sgx_read64::<FAULT_INFO_G14X>()
+        } else {
+            self.sgx_read64::<FAULT_INFO>()
+        };
+
+        if fault_info & 1 == 0 {
+            return None;
+        }
+
+        let fault_addr = if g14x {
+            self.sgx_read64::<FAULT_ADDR_G14X>()
+        } else {
+            fault_info >> 30
+        };
+
+        let unit_code = ((fault_info >> 9) & 0xff) as u8;
+        let unit = match unit_code {
+            0x00..=0x9f => match unit_code & 0xf {
+                0x0 => FaultUnit::DCMP(unit_code >> 4),
+                0x1 => FaultUnit::UL1C(unit_code >> 4),
+                0x2 => FaultUnit::CMP(unit_code >> 4),
+                0x3 => FaultUnit::GSL1(unit_code >> 4),
+                0x4 => FaultUnit::IAP(unit_code >> 4),
+                0x5 => FaultUnit::VCE(unit_code >> 4),
+                0x6 => FaultUnit::TE(unit_code >> 4),
+                0x7 => FaultUnit::RAS(unit_code >> 4),
+                0x8 => FaultUnit::VDM(unit_code >> 4),
+                0x9 => FaultUnit::PPP(unit_code >> 4),
+                0xa => FaultUnit::IPF(unit_code >> 4),
+                0xb => FaultUnit::IPF_CPF(unit_code >> 4),
+                0xc => FaultUnit::VF(unit_code >> 4),
+                0xd => FaultUnit::VF_CPF(unit_code >> 4),
+                0xe => FaultUnit::ZLS(unit_code >> 4),
+                _ => FaultUnit::Unknown(unit_code),
+            },
+            0xa1 => FaultUnit::dPM,
+            0xa2 => FaultUnit::dCDM_KS(0),
+            0xa3 => FaultUnit::dCDM_KS(1),
+            0xa4 => FaultUnit::dCDM_KS(2),
+            0xa5 => FaultUnit::dIPP,
+            0xa6 => FaultUnit::dIPP_CS,
+            0xa7 => FaultUnit::dVDM_CSD,
+            0xa8 => FaultUnit::dVDM_SSD,
+            0xa9 => FaultUnit::dVDM_ILF,
+            0xaa => FaultUnit::dVDM_ILD,
+            0xab => FaultUnit::dRDE(0),
+            0xac => FaultUnit::dRDE(1),
+            0xad => FaultUnit::FC,
+            0xae => FaultUnit::GSL2,
+            0xb0..=0xb7 => FaultUnit::GL2CC_META(unit_code & 0xf),
+            0xb8 => FaultUnit::GL2CC_MB,
+            0xd0..=0xdf if g14x => match unit_code & 0xf {
+                0x0 => FaultUnit::gCDM_CS,
+                0x1 => FaultUnit::gCDM_ID,
+                0x2 => FaultUnit::gCDM_CSR,
+                0x3 => FaultUnit::gCDM_CSW,
+                0x4 => FaultUnit::gCDM_CTXR,
+                0x5 => FaultUnit::gCDM_CTXW,
+                0x6 => FaultUnit::gIPP,
+                0x7 => FaultUnit::gIPP_CS,
+                0x8 => FaultUnit::gKSM_RCE,
+                _ => FaultUnit::Unknown(unit_code),
+            },
+            0xe0..=0xff if g14x => match unit_code & 0xf {
+                0x0 => FaultUnit::gPM_SP((unit_code >> 4) & 1),
+                0x1 => FaultUnit::gVDM_CSD_SP((unit_code >> 4) & 1),
+                0x2 => FaultUnit::gVDM_SSD_SP((unit_code >> 4) & 1),
+                0x3 => FaultUnit::gVDM_ILF_SP((unit_code >> 4) & 1),
+                0x4 => FaultUnit::gVDM_TFP_SP((unit_code >> 4) & 1),
+                0x5 => FaultUnit::gVDM_MMB_SP((unit_code >> 4) & 1),
+                0x6 => FaultUnit::gRDE0_SP((unit_code >> 4) & 1),
+                _ => FaultUnit::Unknown(unit_code),
+            },
+            0xe0..=0xff if !g14x => match unit_code & 0xf {
+                0x0 => FaultUnit::gPM_SP((unit_code >> 4) & 1),
+                0x1 => FaultUnit::gVDM_CSD_SP((unit_code >> 4) & 1),
+                0x2 => FaultUnit::gVDM_SSD_SP((unit_code >> 4) & 1),
+                0x3 => FaultUnit::gVDM_ILF_SP((unit_code >> 4) & 1),
+                0x4 => FaultUnit::gVDM_TFP_SP((unit_code >> 4) & 1),
+                0x5 => FaultUnit::gVDM_MMB_SP((unit_code >> 4) & 1),
+                0x6 => FaultUnit::gCDM_CS_KS0_SP((unit_code >> 4) & 1),
+                0x7 => FaultUnit::gCDM_CS_KS1_SP((unit_code >> 4) & 1),
+                0x8 => FaultUnit::gCDM_CS_KS2_SP((unit_code >> 4) & 1),
+                0x9 => FaultUnit::gCDM_KS0_SP((unit_code >> 4) & 1),
+                0xa => FaultUnit::gCDM_KS1_SP((unit_code >> 4) & 1),
+                0xb => FaultUnit::gCDM_KS2_SP((unit_code >> 4) & 1),
+                0xc => FaultUnit::gIPP_SP((unit_code >> 4) & 1),
+                0xd => FaultUnit::gIPP_CS_SP((unit_code >> 4) & 1),
+                0xe => FaultUnit::gRDE0_SP((unit_code >> 4) & 1),
+                0xf => FaultUnit::gRDE1_SP((unit_code >> 4) & 1),
+                _ => FaultUnit::Unknown(unit_code),
+            },
+            _ => FaultUnit::Unknown(unit_code),
+        };
+
+        let reason = match (fault_info >> 1) & 0x7 {
+            0 => FaultReason::Unmapped,
+            1 => FaultReason::AfFault,
+            2 => FaultReason::WriteOnly,
+            3 => FaultReason::ReadOnly,
+            4 => FaultReason::NoAccess,
+            a => FaultReason::Unknown(a as u8),
+        };
+
+        Some(FaultInfo {
+            address: fault_addr << 6,
+            sideband: ((fault_info >> 23) & 0x7f) as u8,
+            vm_slot: ((fault_info >> 17) & 0x3f) as u32,
+            unit_code,
+            unit,
+            level: ((fault_info >> 7) & 3) as u8,
+            unk_5: ((fault_info >> 5) & 3) as u8,
+            read: (fault_info & (1 << 4)) != 0,
+            reason,
+        })
+    }
+}
diff --git a/drivers/gpu/drm/asahi/slotalloc.rs b/drivers/gpu/drm/asahi/slotalloc.rs
new file mode 100644
index 00000000000000..c6b57d4e1680fc
--- /dev/null
+++ b/drivers/gpu/drm/asahi/slotalloc.rs
@@ -0,0 +1,312 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+//! Generic slot allocator
+//!
+//! This is a simple allocator to manage fixed-size pools of GPU resources that are transiently
+//! required during command execution. Each item resides in a "slot" at a given index. Users borrow
+//! and return free items from the available pool.
+//!
+//! Allocations are "sticky", and return a token that callers can use to request the same slot
+//! again later. This allows slots to be lazily invalidated, so that multiple uses by the same user
+//! avoid any actual cleanup work.
+//!
+//! The allocation policy is currently a simple LRU mechanism, doing a full linear scan over the
+//! slots when no token was previously provided. This is probably good enough, since in the absence
+//! of serious system contention most allocation requests will be immediately fulfilled from the
+//! previous slot without doing an LRU scan.
+
+use core::num::NonZeroUsize;
+use core::ops::{Deref, DerefMut};
+use kernel::{
+    error::{code::*, Result},
+    prelude::*,
+    str::CStr,
+    sync::{Arc, CondVar, LockClassKey, Mutex},
+};
+
+/// Trait representing a single item within a slot.
+pub(crate) trait SlotItem {
+    /// Arbitrary user data associated with the SlotAllocator.
+    type Data;
+
+    /// Called eagerly when this item is released back into the available pool.
+    fn release(&mut self, _data: &mut Self::Data, _slot: u32) {}
+}
+
+/// Trivial implementation for users which do not require any slot data nor any allocator data.
+impl SlotItem for () {
+    type Data = ();
+}
+
+/// Represents a current or previous allocation of an item from a slot. Users keep `SlotToken`s
+/// around across allocations to request that, if possible, the same slot be reused.
+#[derive(Copy, Clone, Debug)]
+pub(crate) struct SlotToken {
+    time: u64,
+    slot: u32,
+}
+
+impl SlotToken {
+    /// Returns the slot index that this token represents a past assignment to.
+    pub(crate) fn last_slot(&self) -> u32 {
+        self.slot
+    }
+}
+
+/// A guard representing active ownership of a slot.
+pub(crate) struct Guard<T: SlotItem> {
+    item: Option<T>,
+    changed: bool,
+    token: SlotToken,
+    alloc: Arc<SlotAllocatorOuter<T>>,
+}
+
+impl<T: SlotItem> Guard<T> {
+    /// Returns the active slot owned by this `Guard`.
+    pub(crate) fn slot(&self) -> u32 {
+        self.token.slot
+    }
+
+    /// Returns `true` if the slot changed since the last allocation (or no `SlotToken` was
+    /// provided), or `false` if the previously allocated slot was successfully re-acquired with
+    /// no other users in the interim.
+    pub(crate) fn changed(&self) -> bool {
+        self.changed
+    }
+
+    /// Returns a `SlotToken` that can be used to re-request the same slot at a later time, after
+    /// this `Guard` is dropped.
+    pub(crate) fn token(&self) -> SlotToken {
+        self.token
+    }
+}
+
+impl<T: SlotItem> Deref for Guard<T> {
+    type Target = T;
+
+    fn deref(&self) -> &Self::Target {
+        self.item.as_ref().expect("SlotItem Guard lost our item!")
+    }
+}
+
+impl<T: SlotItem> DerefMut for Guard<T> {
+    fn deref_mut(&mut self) -> &mut Self::Target {
+        self.item.as_mut().expect("SlotItem Guard lost our item!")
+    }
+}
+
+/// A slot item that is currently free.
+struct Entry<T: SlotItem> {
+    item: T,
+    get_time: u64,
+    drop_time: u64,
+}
+
+/// Inner data for the `SlotAllocator`, protected by a `Mutex`.
+struct SlotAllocatorInner<T: SlotItem> {
+    data: T::Data,
+    slots: KVec<Option<Entry<T>>>,
+    get_count: u64,
+    drop_count: u64,
+    slot_limit: usize,
+}
+
+/// A single slot allocator instance.
+#[pin_data]
+struct SlotAllocatorOuter<T: SlotItem> {
+    #[pin]
+    inner: Mutex<SlotAllocatorInner<T>>,
+    #[pin]
+    cond: CondVar,
+}
+
+/// A shared reference to a slot allocator instance.
+pub(crate) struct SlotAllocator<T: SlotItem>(Arc<SlotAllocatorOuter<T>>);
+
+impl<T: SlotItem> SlotAllocator<T> {
+    /// Creates a new `SlotAllocator`, with a fixed number of slots and arbitrary associated data.
+    ///
+    /// The caller provides a constructor callback which takes a reference to the `T::Data` and
+    /// creates a single slot. This is called during construction to create all the initial
+    /// items, which then live the lifetime of the `SlotAllocator`.
+    pub(crate) fn new(
+        num_slots: u32,
+        mut data: T::Data,
+        mut constructor: impl FnMut(&mut T::Data, u32) -> Option<T>,
+        name: &'static CStr,
+        lock_key1: LockClassKey,
+        lock_key2: LockClassKey,
+    ) -> Result<SlotAllocator<T>> {
+        let mut slots = KVec::with_capacity(num_slots as usize, GFP_KERNEL)?;
+
+        for i in 0..num_slots {
+            slots
+                .push(
+                    constructor(&mut data, i).map(|item| Entry {
+                        item,
+                        get_time: 0,
+                        drop_time: 0,
+                    }),
+                    GFP_KERNEL,
+                )
+                .expect("try_push() failed after reservation");
+        }
+
+        let inner = SlotAllocatorInner {
+            data,
+            slots,
+            get_count: 0,
+            drop_count: 0,
+            slot_limit: usize::MAX,
+        };
+
+        let alloc = Arc::pin_init(
+            pin_init!(SlotAllocatorOuter {
+                // SAFETY: `mutex_init!` is called below.
+                inner <- Mutex::new_with_key(inner, name, lock_key1),
+                // SAFETY: `condvar_init!` is called below.
+                cond <- CondVar::new(name, lock_key2),
+            }),
+            GFP_KERNEL,
+        )?;
+
+        Ok(SlotAllocator(alloc))
+    }
+
+    /// Calls a callback on the inner data associated with this allocator, taking the lock.
+    pub(crate) fn with_inner<RetVal>(&self, cb: impl FnOnce(&mut T::Data) -> RetVal) -> RetVal {
+        let mut inner = self.0.inner.lock();
+        cb(&mut inner.data)
+    }
+
+    /// Set the slot limit for this allocator. New bindings will not use slots above
+    /// this threshold.
+    pub(crate) fn set_limit(&self, limit: Option<NonZeroUsize>) {
+        let mut inner = self.0.inner.lock();
+        inner.slot_limit = limit.unwrap_or(NonZeroUsize::MAX).get();
+    }
+
+    /// Gets a fresh slot, optionally reusing a previous allocation if a `SlotToken` is provided.
+    ///
+    /// Blocks if no slots are free.
+    pub(crate) fn get(&self, token: Option<SlotToken>) -> Result<Guard<T>> {
+        self.get_inner(token, |_a, _b| Ok(()))
+    }
+
+    /// Gets a fresh slot, optionally reusing a previous allocation if a `SlotToken` is provided.
+    ///
+    /// Blocks if no slots are free.
+    ///
+    /// This version allows the caller to pass in a callback that gets a mutable reference to the
+    /// user data for the allocator and the freshly acquired slot, which is called before the
+    /// allocator lock is released. This can be used to perform bookkeeping associated with
+    /// specific slots (such as tracking their current owner).
+    pub(crate) fn get_inner(
+        &self,
+        token: Option<SlotToken>,
+        cb: impl FnOnce(&mut T::Data, &mut Guard<T>) -> Result<()>,
+    ) -> Result<Guard<T>> {
+        let mut inner = self.0.inner.lock();
+
+        if let Some(token) = token {
+            if (token.slot as usize) < inner.slot_limit {
+                let slot = &mut inner.slots[token.slot as usize];
+                if slot.is_some() {
+                    let count = slot.as_ref().unwrap().get_time;
+                    if count == token.time {
+                        let mut guard = Guard {
+                            item: Some(slot.take().unwrap().item),
+                            token,
+                            changed: false,
+                            alloc: self.0.clone(),
+                        };
+                        cb(&mut inner.data, &mut guard)?;
+                        return Ok(guard);
+                    }
+                }
+            }
+        }
+
+        let mut first = true;
+        let slot = loop {
+            let mut oldest_time = u64::MAX;
+            let mut oldest_slot = 0u32;
+
+            for (i, slot) in inner.slots.iter().enumerate() {
+                if i >= inner.slot_limit {
+                    break;
+                }
+                if let Some(slot) = slot.as_ref() {
+                    if slot.drop_time < oldest_time {
+                        oldest_slot = i as u32;
+                        oldest_time = slot.drop_time;
+                    }
+                }
+            }
+
+            if oldest_time == u64::MAX {
+                if first && inner.slot_limit == usize::MAX {
+                    pr_warn!(
+                        "{}: out of slots, blocking\n",
+                        core::any::type_name::<Self>()
+                    );
+                }
+                first = false;
+                if self.0.cond.wait_interruptible(&mut inner) {
+                    return Err(ERESTARTSYS);
+                }
+            } else {
+                break oldest_slot;
+            }
+        };
+
+        inner.get_count += 1;
+
+        let item = inner.slots[slot as usize]
+            .take()
+            .expect("Someone stole our slot?")
+            .item;
+
+        let mut guard = Guard {
+            item: Some(item),
+            changed: true,
+            token: SlotToken {
+                time: inner.get_count,
+                slot,
+            },
+            alloc: self.0.clone(),
+        };
+
+        cb(&mut inner.data, &mut guard)?;
+        Ok(guard)
+    }
+}
+
+impl<T: SlotItem> Clone for SlotAllocator<T> {
+    fn clone(&self) -> Self {
+        SlotAllocator(self.0.clone())
+    }
+}
+
+impl<T: SlotItem> Drop for Guard<T> {
+    fn drop(&mut self) {
+        let mut inner = self.alloc.inner.lock();
+        if inner.slots[self.token.slot as usize].is_some() {
+            pr_crit!(
+                "{}: tried to return an item into a full slot ({})\n",
+                core::any::type_name::<Self>(),
+                self.token.slot
+            );
+        } else {
+            inner.drop_count += 1;
+            let mut item = self.item.take().expect("Guard lost its item");
+            item.release(&mut inner.data, self.token.slot);
+            inner.slots[self.token.slot as usize] = Some(Entry {
+                item,
+                get_time: self.token.time,
+                drop_time: inner.drop_count,
+            });
+            self.alloc.cond.notify_one();
+        }
+    }
+}
diff --git a/drivers/gpu/drm/asahi/util.rs b/drivers/gpu/drm/asahi/util.rs
new file mode 100644
index 00000000000000..499cfe96bf94b7
--- /dev/null
+++ b/drivers/gpu/drm/asahi/util.rs
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+//! Miscellaneous utility functions
+
+use core::ops::{Add, BitAnd, Div, Not, Sub};
+use kernel::prelude::*;
+
+/// Aligns an integer type to a power of two.
+pub(crate) fn align<T>(a: T, b: T) -> T
+where
+    T: Copy
+        + Default
+        + BitAnd<Output = T>
+        + Not<Output = T>
+        + Add<Output = T>
+        + Sub<Output = T>
+        + Div<Output = T>
+        + core::cmp::PartialEq,
+{
+    let def: T = Default::default();
+    #[allow(clippy::eq_op)]
+    let one: T = !def / !def;
+
+    assert!((b & (b - one)) == def);
+
+    (a + b - one) & !(b - one)
+}
+
+/// Aligns an integer type down to a power of two.
+pub(crate) fn align_down<T>(a: T, b: T) -> T
+where
+    T: Copy
+        + Default
+        + BitAnd<Output = T>
+        + Not<Output = T>
+        + Sub<Output = T>
+        + Div<Output = T>
+        + core::cmp::PartialEq,
+{
+    let def: T = Default::default();
+    #[allow(clippy::eq_op)]
+    let one: T = !def / !def;
+
+    assert!((b & (b - one)) == def);
+
+    a & !(b - one)
+}
+
+pub(crate) trait RangeExt<T> {
+    fn overlaps(&self, other: Self) -> bool;
+    fn is_superset(&self, other: Self) -> bool;
+    // fn len(&self) -> usize;
+    fn range(&self) -> T;
+}
+
+impl<T: PartialOrd<T> + Default + Copy + Sub<Output = T>> RangeExt<T> for core::ops::Range<T>
+where
+    usize: core::convert::TryFrom<T>,
+    <usize as core::convert::TryFrom<T>>::Error: core::fmt::Debug,
+{
+    fn overlaps(&self, other: Self) -> bool {
+        !(self.is_empty() || other.is_empty() || self.end <= other.start || other.end <= self.start)
+    }
+    fn is_superset(&self, other: Self) -> bool {
+        !self.is_empty()
+            && (other.is_empty() || (other.start >= self.start && other.end <= self.end))
+    }
+    fn range(&self) -> T {
+        if self.is_empty() {
+            Default::default()
+        } else {
+            self.end - self.start
+        }
+    }
+    // fn len(&self) -> usize {
+    //     self.range().try_into().unwrap()
+    // }
+}
+
+pub(crate) fn gcd(in_n: u64, in_m: u64) -> u64 {
+    let mut n = in_n;
+    let mut m = in_m;
+
+    while n != 0 {
+        let remainder = m % n;
+        m = n;
+        n = remainder;
+    }
+
+    m
+}
+
+pub(crate) unsafe trait AnyBitPattern: Default + Sized + Copy + 'static {}
+
+pub(crate) struct Reader<'a> {
+    buffer: &'a [u8],
+    offset: usize,
+}
+
+impl<'a> Reader<'a> {
+    pub(crate) fn new(buffer: &'a [u8]) -> Self {
+        Reader { buffer, offset: 0 }
+    }
+
+    pub(crate) fn read_up_to<T: AnyBitPattern>(&mut self, max_size: usize) -> Result<T> {
+        let mut obj: T = Default::default();
+        let size: usize = core::mem::size_of::<T>().min(max_size);
+        let range = self.offset..self.offset + size;
+        let src = self.buffer.get(range).ok_or(EINVAL)?;
+
+        // SAFETY: The output pointer is valid, and the size does not exceed
+        // the type size, and all bit patterns are valid.
+        let dst = unsafe { core::slice::from_raw_parts_mut(&mut obj as *mut _ as *mut u8, size) };
+
+        dst.copy_from_slice(src);
+        self.offset += size;
+        Ok(obj)
+    }
+
+    pub(crate) fn read<T: Default + AnyBitPattern>(&mut self) -> Result<T> {
+        self.read_up_to(!0)
+    }
+
+    pub(crate) fn is_empty(&self) -> bool {
+        self.offset >= self.buffer.len()
+    }
+
+    pub(crate) fn skip(&mut self, size: usize) {
+        self.offset += size
+    }
+
+    pub(crate) fn rewind(&mut self) {
+        self.offset = 0
+    }
+}
diff --git a/drivers/gpu/drm/asahi/workqueue.rs b/drivers/gpu/drm/asahi/workqueue.rs
new file mode 100644
index 00000000000000..47b757bb945625
--- /dev/null
+++ b/drivers/gpu/drm/asahi/workqueue.rs
@@ -0,0 +1,1015 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+//! GPU command execution queues
+//!
+//! The AGX GPU firmware schedules GPU work commands out of work queues, which are ring buffers of
+//! pointers to work commands. There can be an arbitrary number of work queues. Work queues have an
+//! associated type (vertex, fragment, or compute) and may only contain generic commands or commands
+//! specific to that type.
+//!
+//! This module manages queueing work commands into a work queue and submitting them for execution
+//! by the firmware. An active work queue needs an event to signal completion of its work, which is
+//! owned by what we call a batch. This event then notifies the work queue when work is completed,
+//! and that triggers freeing of all resources associated with that work. An idle work queue gives
+//! up its associated event.
+
+use crate::debug::*;
+use crate::driver::AsahiDriver;
+use crate::fw::channels::{ChannelErrorType, PipeType};
+use crate::fw::types::*;
+use crate::fw::workqueue::*;
+use crate::no_debug;
+use crate::object::OpaqueGpuObject;
+use crate::{channel, driver, event, fw, gpu, regs};
+use core::any::Any;
+use core::num::NonZeroU64;
+use core::sync::atomic::Ordering;
+use kernel::{
+    c_str, dma_fence,
+    error::code::*,
+    prelude::*,
+    sync::{
+        lock::{mutex::MutexBackend, Guard},
+        Arc, Mutex,
+    },
+    types::ForeignOwnable,
+    workqueue::{self, impl_has_work, new_work, Work, WorkItem},
+};
+
+pub(crate) trait OpaqueCommandObject: OpaqueGpuObject {}
+
+impl<T: GpuStruct + Sync + Send> OpaqueCommandObject for GpuObject<T> where T: Command {}
+
+const DEBUG_CLASS: DebugFlags = DebugFlags::WorkQueue;
+
+const MAX_JOB_SLOTS: u32 = 127;
+
+/// An enum of possible errors that might cause a piece of work to fail execution.
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub(crate) enum WorkError {
+    /// GPU timeout (command execution took too long).
+    Timeout,
+    /// GPU MMU fault (invalid access).
+    Fault(regs::FaultInfo),
+    /// Work failed due to an error caused by other concurrent GPU work.
+    Killed,
+    /// Channel error
+    ChannelError(ChannelErrorType),
+    /// The GPU crashed.
+    NoDevice,
+    /// Unknown reason.
+    Unknown,
+}
+
+impl From<WorkError> for kernel::error::Error {
+    fn from(err: WorkError) -> Self {
+        match err {
+            WorkError::Timeout => ETIMEDOUT,
+            // Not EFAULT because that's for userspace faults
+            WorkError::Fault(_) => EIO,
+            WorkError::Unknown => ENODATA,
+            WorkError::Killed => ECANCELED,
+            WorkError::NoDevice => ENODEV,
+            WorkError::ChannelError(_) => EIO,
+        }
+    }
+}
+
+/// A GPU context tracking structure, which must be explicitly invalidated when dropped.
+pub(crate) struct GpuContext {
+    dev: driver::AsahiDevRef,
+    data: Option<KBox<GpuObject<fw::workqueue::GpuContextData>>>,
+}
+no_debug!(GpuContext);
+
+impl GpuContext {
+    /// Allocate a new GPU context.
+    pub(crate) fn new(
+        dev: &driver::AsahiDevice,
+        alloc: &mut gpu::KernelAllocators,
+        buffer: Arc<dyn core::any::Any + Send + Sync>,
+    ) -> Result<GpuContext> {
+        Ok(GpuContext {
+            dev: dev.into(),
+            data: Some(KBox::new(
+                alloc.shared.new_object(
+                    fw::workqueue::GpuContextData { _buffer: buffer },
+                    |_inner| Default::default(),
+                )?,
+                GFP_KERNEL,
+            )?),
+        })
+    }
+
+    /// Returns the GPU pointer to the inner GPU context data structure.
+    pub(crate) fn gpu_pointer(&self) -> GpuPointer<'_, fw::workqueue::GpuContextData> {
+        self.data.as_ref().unwrap().gpu_pointer()
+    }
+}
+
+impl Drop for GpuContext {
+    fn drop(&mut self) {
+        mod_dev_dbg!(self.dev, "GpuContext: Freeing GPU context\n");
+        let dev_data =
+            unsafe { &<KBox<AsahiDriver>>::borrow(self.dev.as_ref().get_drvdata()).data };
+        let data = self.data.take().unwrap();
+        dev_data.gpu.free_context(data);
+    }
+}
+
+struct SubmittedWork<O, C>
+where
+    O: OpaqueCommandObject,
+    C: FnOnce(Option<WorkError>) + Send + Sync + 'static,
+{
+    object: O,
+    value: EventValue,
+    error: Option<WorkError>,
+    wptr: u32,
+    vm_slot: u32,
+    callback: Option<C>,
+    fence: dma_fence::Fence,
+}
+
+pub(crate) trait GenSubmittedWork: Send + Sync {
+    fn gpu_va(&self) -> NonZeroU64;
+    fn value(&self) -> event::EventValue;
+    fn wptr(&self) -> u32;
+    fn set_wptr(&mut self, wptr: u32);
+    fn mark_error(&mut self, error: WorkError);
+    fn complete(&mut self);
+    fn get_fence(&self) -> dma_fence::Fence;
+}
+
+#[pin_data]
+struct SubmittedWorkContainer {
+    #[pin]
+    work: Work<Self>,
+    inner: KBox<dyn GenSubmittedWork>,
+}
+
+impl_has_work! {
+    impl HasWork<Self> for SubmittedWorkContainer { self.work }
+}
+
+impl WorkItem for SubmittedWorkContainer {
+    type Pointer = Pin<KBox<SubmittedWorkContainer>>;
+
+    fn run(this: Pin<KBox<SubmittedWorkContainer>>) {
+        mod_pr_debug!("WorkQueue: Freeing command @ {:?}\n", this.inner.gpu_va());
+    }
+}
+
+impl SubmittedWorkContainer {
+    fn inner_mut(self: Pin<&mut Self>) -> &mut KBox<dyn GenSubmittedWork> {
+        // SAFETY: inner does not require structural pinning.
+        unsafe { &mut self.get_unchecked_mut().inner }
+    }
+}
+
+impl<O: OpaqueCommandObject, C: FnOnce(Option<WorkError>) + Send + Sync> GenSubmittedWork
+    for SubmittedWork<O, C>
+{
+    fn gpu_va(&self) -> NonZeroU64 {
+        self.object.gpu_va()
+    }
+
+    fn value(&self) -> event::EventValue {
+        self.value
+    }
+
+    fn wptr(&self) -> u32 {
+        self.wptr
+    }
+
+    fn set_wptr(&mut self, wptr: u32) {
+        self.wptr = wptr;
+    }
+
+    fn complete(&mut self) {
+        if let Some(cb) = self.callback.take() {
+            cb(self.error);
+        }
+    }
+
+    fn mark_error(&mut self, error: WorkError) {
+        mod_pr_debug!("WorkQueue: Command at value {:#x?} failed\n", self.value);
+        self.error = Some(match error {
+            WorkError::Fault(info) if info.vm_slot != self.vm_slot => WorkError::Killed,
+            err => err,
+        });
+    }
+
+    fn get_fence(&self) -> dma_fence::Fence {
+        self.fence.clone()
+    }
+}
+
+/// Inner data for managing a single work queue.
+#[versions(AGX)]
+struct WorkQueueInner {
+    dev: driver::AsahiDevRef,
+    event_manager: Arc<event::EventManager>,
+    info: GpuObject<QueueInfo::ver>,
+    new: bool,
+    pipe_type: PipeType,
+    size: u32,
+    wptr: u32,
+    pending: KVec<Pin<KBox<SubmittedWorkContainer>>>,
+    last_completed_work: Option<Pin<KBox<SubmittedWorkContainer>>>,
+    last_token: Option<event::Token>,
+    pending_jobs: usize,
+    last_submitted: Option<event::EventValue>,
+    last_completed: Option<event::EventValue>,
+    event: Option<(event::Event, event::EventValue)>,
+    priority: u32,
+    commit_seq: u64,
+    submit_seq: u64,
+    event_seq: u64,
+}
+
+/// An instance of a work queue.
+#[versions(AGX)]
+#[pin_data]
+pub(crate) struct WorkQueue {
+    info_pointer: GpuWeakPointer<QueueInfo::ver>,
+    #[pin]
+    inner: Mutex<WorkQueueInner::ver>,
+}
+
+#[versions(AGX)]
+impl WorkQueueInner::ver {
+    /// Return the GPU done pointer, representing how many work items have been completed by the
+    /// GPU.
+    fn doneptr(&self) -> u32 {
+        self.info
+            .state
+            .with(|raw, _inner| raw.gpu_doneptr.load(Ordering::Acquire))
+    }
+}
+
+#[versions(AGX)]
+#[derive(Copy, Clone)]
+pub(crate) struct QueueEventInfo {
+    pub(crate) stamp_pointer: GpuWeakPointer<Stamp>,
+    pub(crate) fw_stamp_pointer: GpuWeakPointer<FwStamp>,
+    pub(crate) slot: u32,
+    pub(crate) value: event::EventValue,
+    pub(crate) cmd_seq: u64,
+    pub(crate) event_seq: u64,
+    pub(crate) info_ptr: GpuWeakPointer<QueueInfo::ver>,
+}
+
+#[versions(AGX)]
+pub(crate) struct Job {
+    wq: Arc<WorkQueue::ver>,
+    event_info: QueueEventInfo::ver,
+    start_value: EventValue,
+    pending: KVec<Pin<KBox<SubmittedWorkContainer>>>,
+    committed: bool,
+    submitted: bool,
+    event_count: usize,
+    fence: dma_fence::Fence,
+}
+
+#[versions(AGX)]
+pub(crate) struct JobSubmission<'a> {
+    inner: Option<Guard<'a, WorkQueueInner::ver, MutexBackend>>,
+    wptr: u32,
+    event_count: usize,
+    command_count: usize,
+}
+
+#[versions(AGX)]
+impl Job::ver {
+    pub(crate) fn event_info(&self) -> QueueEventInfo::ver {
+        let mut info = self.event_info;
+        info.cmd_seq += self.pending.len() as u64;
+        info.event_seq += self.event_count as u64;
+
+        info
+    }
+
+    pub(crate) fn next_seq(&mut self) {
+        self.event_count += 1;
+        self.event_info.value.increment();
+    }
+
+    pub(crate) fn add<O: OpaqueCommandObject + 'static>(
+        &mut self,
+        command: O,
+        vm_slot: u32,
+    ) -> Result {
+        self.add_cb(command, vm_slot, |_| {})
+    }
+
+    pub(crate) fn add_cb<O: OpaqueCommandObject + 'static>(
+        &mut self,
+        command: O,
+        vm_slot: u32,
+        callback: impl FnOnce(Option<WorkError>) + Sync + Send + 'static,
+    ) -> Result {
+        if self.committed {
+            pr_err!("WorkQueue: Tried to mutate committed Job\n");
+            return Err(EINVAL);
+        }
+
+        let fence = self.fence.clone();
+        let value = self.event_info.value.next();
+
+        self.pending.push(
+            KBox::try_pin_init(
+                try_pin_init!(SubmittedWorkContainer {
+                    work <- new_work!("SubmittedWorkWrapper::work"),
+                    inner: KBox::new(SubmittedWork::<_, _> {
+                        object: command,
+                        value,
+                        error: None,
+                        callback: Some(callback),
+                        wptr: 0,
+                        vm_slot,
+                        fence,
+                    }, GFP_KERNEL)?
+                }),
+                GFP_KERNEL,
+            )?,
+            GFP_KERNEL,
+        )?;
+
+        Ok(())
+    }
+
+    pub(crate) fn commit(&mut self) -> Result {
+        if self.committed {
+            pr_err!("WorkQueue: Tried to commit committed Job\n");
+            return Err(EINVAL);
+        }
+
+        if self.pending.is_empty() {
+            pr_err!("WorkQueue: Job::commit() with no commands\n");
+            return Err(EINVAL);
+        }
+
+        let mut inner = self.wq.inner.lock();
+
+        let ev = inner.event.as_mut().expect("WorkQueue: Job lost its event");
+
+        if ev.1 != self.start_value {
+            pr_err!(
+                "WorkQueue: Job::commit() out of order (event slot {} {:?} != {:?}\n",
+                ev.0.slot(),
+                ev.1,
+                self.start_value
+            );
+            return Err(EINVAL);
+        }
+
+        ev.1 = self.event_info.value;
+        inner.commit_seq += self.pending.len() as u64;
+        inner.event_seq += self.event_count as u64;
+        self.committed = true;
+
+        Ok(())
+    }
+
+    pub(crate) fn can_submit(&self) -> Option<dma_fence::Fence> {
+        let inner = self.wq.inner.lock();
+        if inner.free_slots() > self.event_count && inner.free_space() > self.pending.len() {
+            None
+        } else if let Some(work) = inner.pending.first() {
+            Some(work.inner.get_fence())
+        } else {
+            pr_err!(
+                "WorkQueue: Cannot submit, but queue is empty? {} > {}, {} > {} (pend={} ls={:#x?} lc={:#x?}) ev={:#x?} cur={:#x?} slot {:?}\n",
+                inner.free_slots(),
+                self.event_count,
+                inner.free_space(),
+                self.pending.len(),
+                inner.pending.len(),
+                inner.last_submitted,
+                inner.last_completed,
+                inner.event.as_ref().map(|a| a.1),
+                inner.event.as_ref().map(|a| a.0.current()),
+                inner.event.as_ref().map(|a| a.0.slot()),
+            );
+            None
+        }
+    }
+
+    pub(crate) fn submit(&mut self) -> Result<JobSubmission::ver<'_>> {
+        if !self.committed {
+            pr_err!("WorkQueue: Tried to submit uncommitted Job\n");
+            return Err(EINVAL);
+        }
+
+        if self.submitted {
+            pr_err!("WorkQueue: Tried to submit Job twice\n");
+            return Err(EINVAL);
+        }
+
+        if self.pending.is_empty() {
+            pr_err!("WorkQueue: Job::submit() with no commands\n");
+            return Err(EINVAL);
+        }
+
+        let mut inner = self.wq.inner.lock();
+
+        if inner.submit_seq != self.event_info.cmd_seq {
+            pr_err!(
+                "WorkQueue: Job::submit() out of order (submit_seq {} != {})\n",
+                inner.submit_seq,
+                self.event_info.cmd_seq
+            );
+            return Err(EINVAL);
+        }
+
+        if inner.commit_seq < (self.event_info.cmd_seq + self.pending.len() as u64) {
+            pr_err!(
+                "WorkQueue: Job::submit() out of order (commit_seq {} != {})\n",
+                inner.commit_seq,
+                (self.event_info.cmd_seq + self.pending.len() as u64)
+            );
+            return Err(EINVAL);
+        }
+
+        let mut wptr = inner.wptr;
+        let command_count = self.pending.len();
+
+        if inner.free_space() <= command_count {
+            pr_err!("WorkQueue: Job does not fit in ring buffer\n");
+            return Err(EBUSY);
+        }
+
+        inner.pending.reserve(command_count, GFP_KERNEL)?;
+
+        inner.last_submitted = Some(self.event_info.value);
+        mod_dev_dbg!(
+            inner.dev,
+            "WorkQueue: submitting {} cmds at {:#x?}, lc {:#x?}, cur {:#x?}, pending {}, events {}\n",
+            self.pending.len(),
+            inner.last_submitted,
+            inner.last_completed,
+            inner.event.as_ref().map(|a| a.0.current()),
+            inner.pending.len(),
+            self.event_count,
+        );
+
+        for mut command in self.pending.drain(..) {
+            command.as_mut().inner_mut().set_wptr(wptr);
+
+            let next_wptr = (wptr + 1) % inner.size;
+            assert!(inner.doneptr() != next_wptr);
+            inner.info.ring[wptr as usize] = command.inner.gpu_va().get();
+            wptr = next_wptr;
+
+            // Cannot fail, since we did a reserve(1) above
+            inner
+                .pending
+                .push(command, GFP_KERNEL)
+                .expect("push() failed after reserve()");
+        }
+
+        self.submitted = true;
+
+        Ok(JobSubmission::ver {
+            inner: Some(inner),
+            wptr,
+            command_count,
+            event_count: self.event_count,
+        })
+    }
+}
+
+#[versions(AGX)]
+impl<'a> JobSubmission::ver<'a> {
+    pub(crate) fn run(mut self, channel: &mut channel::PipeChannel::ver) {
+        let command_count = self.command_count;
+        let mut inner = self.inner.take().expect("No inner?");
+        let wptr = self.wptr;
+        core::mem::forget(self);
+
+        inner
+            .info
+            .state
+            .with(|raw, _inner| raw.cpu_wptr.store(wptr, Ordering::Release));
+
+        inner.wptr = wptr;
+
+        let event = inner.event.as_mut().expect("JobSubmission lost its event");
+
+        let event_slot = event.0.slot();
+
+        let msg = fw::channels::RunWorkQueueMsg::ver {
+            pipe_type: inner.pipe_type,
+            work_queue: Some(inner.info.weak_pointer()),
+            wptr: inner.wptr,
+            event_slot,
+            is_new: inner.new,
+            __pad: Default::default(),
+        };
+        channel.send(&msg);
+        inner.new = false;
+
+        inner.submit_seq += command_count as u64;
+    }
+
+    pub(crate) fn pipe_type(&self) -> PipeType {
+        self.inner.as_ref().expect("No inner?").pipe_type
+    }
+
+    pub(crate) fn priority(&self) -> u32 {
+        self.inner.as_ref().expect("No inner?").priority
+    }
+}
+
+#[versions(AGX)]
+impl Drop for Job::ver {
+    fn drop(&mut self) {
+        mod_pr_debug!("WorkQueue: Dropping Job\n");
+        let mut inner = self.wq.inner.lock();
+
+        if !self.committed {
+            pr_info!(
+                "WorkQueue: Dropping uncommitted job with {} events\n",
+                self.event_count
+            );
+        }
+
+        if self.committed && !self.submitted {
+            let pipe_type = inner.pipe_type;
+            let event = inner.event.as_mut().expect("Job lost its event");
+            pr_info!(
+                "WorkQueue({:?}): Roll back {} events (slot {} val {:#x?}) and {} commands\n",
+                pipe_type,
+                self.event_count,
+                event.0.slot(),
+                event.1,
+                self.pending.len()
+            );
+            event.1.sub(self.event_count as u32);
+            inner.commit_seq -= self.pending.len() as u64;
+            inner.event_seq -= self.event_count as u64;
+        }
+
+        inner.pending_jobs -= 1;
+
+        if inner.pending.is_empty() && inner.pending_jobs == 0 {
+            mod_pr_debug!("WorkQueue({:?}): Dropping event\n", inner.pipe_type);
+            inner.event = None;
+            inner.last_submitted = None;
+            inner.last_completed = None;
+        }
+        mod_pr_debug!("WorkQueue({:?}): Dropped Job\n", inner.pipe_type);
+    }
+}
+
+#[versions(AGX)]
+impl<'a> Drop for JobSubmission::ver<'a> {
+    fn drop(&mut self) {
+        let inner = self.inner.as_mut().expect("No inner?");
+        mod_pr_debug!("WorkQueue({:?}): Dropping JobSubmission\n", inner.pipe_type);
+
+        let new_len = inner.pending.len() - self.command_count;
+        inner.pending.truncate(new_len);
+
+        let pipe_type = inner.pipe_type;
+        let event = inner.event.as_mut().expect("JobSubmission lost its event");
+        pr_info!(
+            "WorkQueue({:?}): JobSubmission: Roll back {} events (slot {} val {:#x?}) and {} commands\n",
+            pipe_type,
+            self.event_count,
+            event.0.slot(),
+            event.1,
+            self.command_count
+        );
+        event.1.sub(self.event_count as u32);
+        let val = event.1;
+        inner.commit_seq -= self.command_count as u64;
+        inner.event_seq -= self.event_count as u64;
+        inner.last_submitted = Some(val);
+        mod_pr_debug!("WorkQueue({:?}): Dropped JobSubmission\n", inner.pipe_type);
+    }
+}
+
+#[versions(AGX)]
+impl WorkQueueInner::ver {
+    /// Return the number of free entries in the workqueue
+    pub(crate) fn free_space(&self) -> usize {
+        self.size as usize - self.pending.len() - 1
+    }
+
+    pub(crate) fn free_slots(&self) -> usize {
+        let busy_slots = if let Some(ls) = self.last_submitted {
+            let lc = self
+                .last_completed
+                .expect("last_submitted but not completed?");
+            ls.delta(&lc)
+        } else {
+            0
+        };
+
+        ((MAX_JOB_SLOTS as i32) - busy_slots).max(0) as usize
+    }
+}
+
+#[versions(AGX)]
+impl WorkQueue::ver {
+    /// Create a new WorkQueue of a given type and priority.
+    #[allow(clippy::too_many_arguments)]
+    pub(crate) fn new(
+        dev: &driver::AsahiDevice,
+        alloc: &mut gpu::KernelAllocators,
+        event_manager: Arc<event::EventManager>,
+        gpu_context: Arc<GpuContext>,
+        notifier_list: Arc<GpuObject<fw::event::NotifierList>>,
+        pipe_type: PipeType,
+        id: u64,
+        priority: u32,
+        size: u32,
+    ) -> Result<Arc<WorkQueue::ver>> {
+        let gpu_buf = alloc.private.array_empty_tagged(0x2c18, b"GPBF")?;
+        let mut state = alloc.shared.new_default::<RingState>()?;
+        let ring = alloc.shared.array_empty(size as usize)?;
+        let mut prio = *raw::PRIORITY.get(priority as usize).ok_or(EINVAL)?;
+
+        if pipe_type == PipeType::Compute && !debug_enabled(DebugFlags::Debug0) {
+            // Hack to disable compute preemption until we fix it
+            prio.0 = 0;
+            prio.5 = 1;
+        }
+
+        let inner = WorkQueueInner::ver {
+            dev: dev.into(),
+            event_manager,
+            // Use shared (coherent) state with verbose faults so we can dump state correctly
+            info: if debug_enabled(DebugFlags::VerboseFaults) {
+                &mut alloc.shared
+            } else {
+                &mut alloc.private
+            }
+            .new_init(
+                try_init!(QueueInfo::ver {
+                    state: {
+                        state.with_mut(|raw, _inner| {
+                            raw.rb_size = size;
+                        });
+                        state
+                    },
+                    ring,
+                    gpu_buf,
+                    notifier_list: notifier_list,
+                    gpu_context: gpu_context,
+                }),
+                |inner, _p| {
+                    try_init!(raw::QueueInfo::ver {
+                        state: inner.state.gpu_pointer(),
+                        ring: inner.ring.gpu_pointer(),
+                        notifier_list: inner.notifier_list.gpu_pointer(),
+                        gpu_buf: inner.gpu_buf.gpu_pointer(),
+                        gpu_rptr1: Default::default(),
+                        gpu_rptr2: Default::default(),
+                        gpu_rptr3: Default::default(),
+                        event_id: AtomicI32::new(-1),
+                        priority: prio,
+                        unk_4c: -1,
+                        uuid: id as u32,
+                        unk_54: -1,
+                        unk_58: Default::default(),
+                        busy: Default::default(),
+                        __pad: Default::default(),
+                        #[ver(V >= V13_2 && G < G14X)]
+                        unk_84_0: 0,
+                        unk_84_state: Default::default(),
+                        error_count: Default::default(),
+                        unk_8c: 0,
+                        unk_90: 0,
+                        unk_94: 0,
+                        pending: Default::default(),
+                        unk_9c: 0,
+                        gpu_context: inner.gpu_context.gpu_pointer(),
+                        unk_a8: Default::default(),
+                        #[ver(V >= V13_2 && G < G14X)]
+                        unk_b0: 0,
+                    })
+                },
+            )?,
+            new: true,
+            pipe_type,
+            size,
+            wptr: 0,
+            pending: KVec::new(),
+            last_completed_work: None,
+            last_token: None,
+            event: None,
+            priority,
+            pending_jobs: 0,
+            commit_seq: 0,
+            submit_seq: 0,
+            event_seq: 0,
+            last_completed: None,
+            last_submitted: None,
+        };
+
+        let info_pointer = inner.info.weak_pointer();
+
+        Arc::pin_init(
+            pin_init!(Self {
+                info_pointer,
+                inner <- match pipe_type {
+                    PipeType::Vertex => Mutex::new_named(inner, c_str!("WorkQueue::inner (Vertex)")),
+                    PipeType::Fragment => Mutex::new_named(inner, c_str!("WorkQueue::inner (Fragment)")),
+                    PipeType::Compute => Mutex::new_named(inner, c_str!("WorkQueue::inner (Compute)")),
+                },
+            }),
+            GFP_KERNEL,
+        )
+    }
+
+    pub(crate) fn event_info(&self) -> Option<QueueEventInfo::ver> {
+        let inner = self.inner.lock();
+
+        inner.event.as_ref().map(|ev| QueueEventInfo::ver {
+            stamp_pointer: ev.0.stamp_pointer(),
+            fw_stamp_pointer: ev.0.fw_stamp_pointer(),
+            slot: ev.0.slot(),
+            value: ev.1,
+            cmd_seq: inner.commit_seq,
+            event_seq: inner.event_seq,
+            info_ptr: self.info_pointer,
+        })
+    }
+
+    pub(crate) fn new_job(self: &Arc<Self>, fence: dma_fence::Fence) -> Result<Job::ver> {
+        let mut inner = self.inner.lock();
+
+        if inner.event.is_none() {
+            mod_pr_debug!("WorkQueue({:?}): Grabbing event\n", inner.pipe_type);
+            let event = inner.event_manager.get(inner.last_token, self.clone())?;
+            let cur = event.current();
+            inner.last_token = Some(event.token());
+            mod_pr_debug!(
+                "WorkQueue({:?}): Grabbed event slot {}: {:#x?}\n",
+                inner.pipe_type,
+                event.slot(),
+                cur
+            );
+            inner.event = Some((event, cur));
+            inner.last_submitted = Some(cur);
+            inner.last_completed = Some(cur);
+        }
+
+        inner.pending_jobs += 1;
+
+        let ev = &inner.event.as_ref().unwrap();
+
+        mod_pr_debug!(
+            "WorkQueue({:?}): New job at value {:#x?} slot {}\n",
+            inner.pipe_type,
+            ev.1,
+            ev.0.slot()
+        );
+        Ok(Job::ver {
+            wq: self.clone(),
+            event_info: QueueEventInfo::ver {
+                stamp_pointer: ev.0.stamp_pointer(),
+                fw_stamp_pointer: ev.0.fw_stamp_pointer(),
+                slot: ev.0.slot(),
+                value: ev.1,
+                cmd_seq: inner.commit_seq,
+                event_seq: inner.event_seq,
+                info_ptr: self.info_pointer,
+            },
+            start_value: ev.1,
+            pending: KVec::new(),
+            event_count: 0,
+            committed: false,
+            submitted: false,
+            fence,
+        })
+    }
+
+    pub(crate) fn pipe_type(&self) -> PipeType {
+        self.inner.lock().pipe_type
+    }
+
+    pub(crate) fn dump_info(&self) {
+        pr_info!("WorkQueue @ {:?}:", self.info_pointer);
+        self.inner.lock().info.with(|raw, _inner| {
+            pr_info!("  GPU rptr1: {:#x}", raw.gpu_rptr1.load(Ordering::Relaxed));
+            pr_info!("  GPU rptr1: {:#x}", raw.gpu_rptr2.load(Ordering::Relaxed));
+            pr_info!("  GPU rptr1: {:#x}", raw.gpu_rptr3.load(Ordering::Relaxed));
+            pr_info!("  Event ID: {:#x}", raw.event_id.load(Ordering::Relaxed));
+            pr_info!("  Busy: {:#x}", raw.busy.load(Ordering::Relaxed));
+            pr_info!("  Unk 84: {:#x}", raw.unk_84_state.load(Ordering::Relaxed));
+            pr_info!(
+                "  Error count: {:#x}",
+                raw.error_count.load(Ordering::Relaxed)
+            );
+            pr_info!("  Pending: {:#x}", raw.pending.load(Ordering::Relaxed));
+        });
+    }
+
+    pub(crate) fn info_pointer(&self) -> GpuWeakPointer<QueueInfo::ver> {
+        self.info_pointer
+    }
+}
+
+/// Trait used to erase the version-specific type of WorkQueues, to avoid leaking
+/// version-specificity into the event module.
+pub(crate) trait WorkQueue {
+    /// Cast as an Any type.
+    fn as_any(&self) -> &dyn Any;
+
+    fn signal(&self) -> bool;
+    fn mark_error(&self, value: event::EventValue, error: WorkError);
+    fn fail_all(&self, error: WorkError);
+}
+
+#[versions(AGX)]
+impl WorkQueue for WorkQueue::ver {
+    fn as_any(&self) -> &dyn Any {
+        self
+    }
+
+    /// Signal a workqueue that some work was completed.
+    ///
+    /// This will check the event stamp value to find out exactly how many commands were processed.
+    fn signal(&self) -> bool {
+        let mut inner = self.inner.lock();
+        let event = inner.event.as_ref();
+        let value = match event {
+            None => {
+                mod_pr_debug!("WorkQueue: signal() called but no event?\n");
+
+                if inner.pending_jobs > 0 || !inner.pending.is_empty() {
+                    pr_crit!("WorkQueue: signal() called with no event and pending jobs.\n");
+                }
+                return true;
+            }
+            Some(event) => event.0.current(),
+        };
+
+        if let Some(lc) = inner.last_completed {
+            if value < lc {
+                pr_err!(
+                    "WorkQueue: event rolled back? cur {:#x?}, lc {:#x?}, ls {:#x?}",
+                    value,
+                    inner.last_completed,
+                    inner.last_submitted
+                );
+            }
+        } else {
+            pr_crit!("WorkQueue: signal() called with no last_completed.\n");
+        }
+        inner.last_completed = Some(value);
+
+        mod_pr_debug!(
+            "WorkQueue({:?}): Signaling event {:?} value {:#x?}\n",
+            inner.pipe_type,
+            inner.last_token,
+            value
+        );
+
+        let mut completed_commands: usize = 0;
+
+        for cmd in inner.pending.iter() {
+            if cmd.inner.value() <= value {
+                mod_pr_debug!(
+                    "WorkQueue({:?}): Command at value {:#x?} complete\n",
+                    inner.pipe_type,
+                    cmd.inner.value()
+                );
+                completed_commands += 1;
+            } else {
+                break;
+            }
+        }
+
+        if completed_commands == 0 {
+            return inner.pending.is_empty();
+        }
+
+        let last_wptr = inner.pending[completed_commands - 1].inner.wptr();
+        let pipe_type = inner.pipe_type;
+
+        let mut last_cmd = inner.last_completed_work.take();
+
+        for mut cmd in inner.pending.drain(..completed_commands) {
+            mod_pr_debug!(
+                "WorkQueue({:?}): Queueing command @ {:?} for cleanup\n",
+                pipe_type,
+                cmd.inner.gpu_va()
+            );
+            cmd.as_mut().inner_mut().complete();
+            if let Some(last_cmd) = last_cmd.replace(cmd) {
+                workqueue::system().enqueue(last_cmd);
+            }
+        }
+
+        inner.last_completed_work = last_cmd;
+
+        mod_pr_debug!(
+            "WorkQueue({:?}): Completed {} commands, left pending {}, ls {:#x?}, lc {:#x?}\n",
+            inner.pipe_type,
+            completed_commands,
+            inner.pending.len(),
+            inner.last_submitted,
+            inner.last_completed,
+        );
+
+        inner
+            .info
+            .state
+            .with(|raw, _inner| raw.cpu_freeptr.store(last_wptr, Ordering::Release));
+
+        let empty = inner.pending.is_empty();
+        if empty && inner.pending_jobs == 0 {
+            inner.event = None;
+            inner.last_submitted = None;
+            inner.last_completed = None;
+        }
+
+        empty
+    }
+
+    /// Mark this queue's work up to a certain stamp value as having failed.
+    fn mark_error(&self, value: event::EventValue, error: WorkError) {
+        // If anything is marked completed, we can consider it successful
+        // at this point, even if we didn't get the signal event yet.
+        self.signal();
+
+        let mut inner = self.inner.lock();
+
+        if inner.event.is_none() {
+            mod_pr_debug!("WorkQueue: signal_fault() called but no event?\n");
+
+            if inner.pending_jobs > 0 || !inner.pending.is_empty() {
+                pr_crit!("WorkQueue: signal_fault() called with no event and pending jobs.\n");
+            }
+            return;
+        }
+
+        mod_pr_debug!(
+            "WorkQueue({:?}): Signaling fault for event {:?} at value {:#x?}\n",
+            inner.pipe_type,
+            inner.last_token,
+            value
+        );
+
+        for cmd in inner.pending.iter_mut() {
+            if cmd.inner.value() <= value {
+                cmd.as_mut().inner_mut().mark_error(error);
+            } else {
+                break;
+            }
+        }
+    }
+
+    /// Mark all of this queue's work as having failed, and complete it.
+    fn fail_all(&self, error: WorkError) {
+        // If anything is marked completed, we can consider it successful
+        // at this point, even if we didn't get the signal event yet.
+        self.signal();
+
+        let mut inner = self.inner.lock();
+
+        if inner.event.is_none() {
+            mod_pr_debug!("WorkQueue: fail_all() called but no event?\n");
+
+            if inner.pending_jobs > 0 || !inner.pending.is_empty() {
+                pr_crit!("WorkQueue: fail_all() called with no event and pending jobs.\n");
+            }
+            return;
+        }
+
+        mod_pr_debug!(
+            "WorkQueue({:?}): Failing all jobs {:?}\n",
+            inner.pipe_type,
+            error
+        );
+
+        let mut cmds = KVec::new();
+
+        core::mem::swap(&mut inner.pending, &mut cmds);
+
+        if inner.pending_jobs == 0 {
+            inner.event = None;
+        }
+
+        core::mem::drop(inner);
+
+        for mut cmd in cmds {
+            cmd.as_mut().inner_mut().mark_error(error);
+            cmd.as_mut().inner_mut().complete();
+        }
+    }
+}
+
+#[versions(AGX)]
+impl Drop for WorkQueueInner::ver {
+    fn drop(&mut self) {
+        if let Some(last_cmd) = self.last_completed_work.take() {
+            workqueue::system().enqueue(last_cmd);
+        }
+    }
+}
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index ee811764c3df4b..8f998fe6beecd2 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -195,6 +195,7 @@ void drm_gem_private_object_init(struct drm_device *dev,
 
 	drm_vma_node_reset(&obj->vma_node);
 	INIT_LIST_HEAD(&obj->lru_node);
+	obj->exportable = true;
 }
 EXPORT_SYMBOL(drm_gem_private_object_init);
 
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 5ab351409312b5..be310db5863871 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -338,6 +338,8 @@ int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
 	struct drm_gem_object *obj = &shmem->base;
 	int ret = 0;
 
+	dma_resv_assert_held(obj->resv);
+
 	if (obj->import_attach) {
 		ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
 		if (!ret) {
@@ -404,6 +406,8 @@ void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
 {
 	struct drm_gem_object *obj = &shmem->base;
 
+	dma_resv_assert_held(obj->resv);
+
 	if (obj->import_attach) {
 		dma_buf_vunmap(obj->import_attach->dmabuf, map);
 	} else {
@@ -531,7 +535,7 @@ int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
 }
 EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
 
-static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
+vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
 {
 	struct vm_area_struct *vma = vmf->vma;
 	struct drm_gem_object *obj = vma->vm_private_data;
@@ -560,8 +564,9 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
 
 	return ret;
 }
+EXPORT_SYMBOL_GPL(drm_gem_shmem_fault);
 
-static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
+void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
 {
 	struct drm_gem_object *obj = vma->vm_private_data;
 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
@@ -582,8 +587,9 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
 
 	drm_gem_vm_open(vma);
 }
+EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_open);
 
-static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
+void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
 {
 	struct drm_gem_object *obj = vma->vm_private_data;
 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
@@ -594,6 +600,7 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
 
 	drm_gem_vm_close(vma);
 }
+EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_close);
 
 const struct vm_operations_struct drm_gem_shmem_vm_ops = {
 	.fault = drm_gem_shmem_fault,
diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c
index f9eb56f24bef29..354ab208a4948f 100644
--- a/drivers/gpu/drm/drm_gpuvm.c
+++ b/drivers/gpu/drm/drm_gpuvm.c
@@ -670,6 +670,12 @@
  *	}
  */
 
+/**
+ * Mask of flags which must match to consider a drm_gpuva eligible for merging
+ * with a new overlaid mapping.
+ */
+#define DRM_GPUVA_UNMERGEABLE_FLAGS DRM_GPUVA_SINGLE_PAGE
+
 /**
  * get_next_vm_bo_from_list() - get the next vm_bo element
  * @__gpuvm: the &drm_gpuvm
@@ -2054,7 +2060,8 @@ EXPORT_SYMBOL_GPL(drm_gpuva_unmap);
 static int
 op_map_cb(const struct drm_gpuvm_ops *fn, void *priv,
 	  u64 addr, u64 range,
-	  struct drm_gem_object *obj, u64 offset)
+	  struct drm_gem_object *obj, u64 offset,
+	  enum drm_gpuva_flags flags)
 {
 	struct drm_gpuva_op op = {};
 
@@ -2063,6 +2070,7 @@ op_map_cb(const struct drm_gpuvm_ops *fn, void *priv,
 	op.map.va.range = range;
 	op.map.gem.obj = obj;
 	op.map.gem.offset = offset;
+	op.map.flags = flags;
 
 	return fn->sm_step_map(&op, priv);
 }
@@ -2102,7 +2110,8 @@ static int
 __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
 		   const struct drm_gpuvm_ops *ops, void *priv,
 		   u64 req_addr, u64 req_range,
-		   struct drm_gem_object *req_obj, u64 req_offset)
+		   struct drm_gem_object *req_obj, u64 req_offset,
+		   enum drm_gpuva_flags req_flags)
 {
 	struct drm_gpuva *va, *next;
 	u64 req_end = req_addr + req_range;
@@ -2118,6 +2127,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
 		u64 range = va->va.range;
 		u64 end = addr + range;
 		bool merge = !!va->gem.obj;
+		bool single_page = va->flags & DRM_GPUVA_SINGLE_PAGE;
+
+		merge &= !((va->flags ^ req_flags) & DRM_GPUVA_UNMERGEABLE_FLAGS);
 
 		if (addr == req_addr) {
 			merge &= obj == req_obj &&
@@ -2142,7 +2154,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
 					.va.addr = req_end,
 					.va.range = range - req_range,
 					.gem.obj = obj,
-					.gem.offset = offset + req_range,
+					.gem.offset = offset +
+						(single_page ? 0 : req_range),
+					.flags = va->flags,
 				};
 				struct drm_gpuva_op_unmap u = {
 					.va = va,
@@ -2161,11 +2175,16 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
 				.va.range = ls_range,
 				.gem.obj = obj,
 				.gem.offset = offset,
+				.flags = va->flags,
 			};
 			struct drm_gpuva_op_unmap u = { .va = va };
 
-			merge &= obj == req_obj &&
-				 offset + ls_range == req_offset;
+			merge &= obj == req_obj;
+			if (single_page)
+				merge &= offset == req_offset;
+			else
+				merge &= offset + ls_range == req_offset;
+
 			u.keep = merge;
 
 			if (end == req_end) {
@@ -2187,8 +2206,10 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
 					.va.addr = req_end,
 					.va.range = end - req_end,
 					.gem.obj = obj,
-					.gem.offset = offset + ls_range +
-						      req_range,
+					.gem.offset = offset +
+						(single_page ? 0 :
+						 ls_range + req_range),
+					.flags = va->flags,
 				};
 
 				ret = op_remap_cb(ops, priv, &p, &n, &u);
@@ -2197,9 +2218,13 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
 				break;
 			}
 		} else if (addr > req_addr) {
-			merge &= obj == req_obj &&
-				 offset == req_offset +
-					   (addr - req_addr);
+			merge &= obj == req_obj;
+
+			if (single_page)
+				merge &= offset == req_offset;
+			else
+				merge &= offset == req_offset +
+					 (addr - req_addr);
 
 			if (end == req_end) {
 				ret = op_unmap_cb(ops, priv, va, merge);
@@ -2220,7 +2245,10 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
 					.va.addr = req_end,
 					.va.range = end - req_end,
 					.gem.obj = obj,
-					.gem.offset = offset + req_end - addr,
+					.gem.offset = offset +
+						(single_page ? 0 :
+						 req_end - addr),
+					.flags = va->flags,
 				};
 				struct drm_gpuva_op_unmap u = {
 					.va = va,
@@ -2237,7 +2265,8 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
 
 	return op_map_cb(ops, priv,
 			 req_addr, req_range,
-			 req_obj, req_offset);
+			 req_obj, req_offset,
+			 req_flags);
 }
 
 static int
@@ -2260,12 +2289,14 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
 		u64 addr = va->va.addr;
 		u64 range = va->va.range;
 		u64 end = addr + range;
+		bool single_page = va->flags & DRM_GPUVA_SINGLE_PAGE;
 
 		if (addr < req_addr) {
 			prev.va.addr = addr;
 			prev.va.range = req_addr - addr;
 			prev.gem.obj = obj;
 			prev.gem.offset = offset;
+			prev.flags = va->flags;
 
 			prev_split = true;
 		}
@@ -2274,7 +2305,10 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
 			next.va.addr = req_end;
 			next.va.range = end - req_end;
 			next.gem.obj = obj;
-			next.gem.offset = offset + (req_end - addr);
+			next.gem.offset = offset;
+			if (!single_page)
+				next.gem.offset += req_end - addr;
+			next.flags = va->flags;
 
 			next_split = true;
 		}
@@ -2333,7 +2367,8 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
 int
 drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
 		 u64 req_addr, u64 req_range,
-		 struct drm_gem_object *req_obj, u64 req_offset)
+		 struct drm_gem_object *req_obj, u64 req_offset,
+		 enum drm_gpuva_flags req_flags)
 {
 	const struct drm_gpuvm_ops *ops = gpuvm->ops;
 
@@ -2344,7 +2379,8 @@ drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
 
 	return __drm_gpuvm_sm_map(gpuvm, ops, priv,
 				  req_addr, req_range,
-				  req_obj, req_offset);
+				  req_obj, req_offset,
+				  req_flags);
 }
 EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map);
 
@@ -2516,7 +2552,8 @@ static const struct drm_gpuvm_ops gpuvm_list_ops = {
 struct drm_gpuva_ops *
 drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
 			    u64 req_addr, u64 req_range,
-			    struct drm_gem_object *req_obj, u64 req_offset)
+			    struct drm_gem_object *req_obj, u64 req_offset,
+			    enum drm_gpuva_flags req_flags)
 {
 	struct drm_gpuva_ops *ops;
 	struct {
@@ -2536,7 +2573,8 @@ drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
 
 	ret = __drm_gpuvm_sm_map(gpuvm, &gpuvm_list_ops, &args,
 				 req_addr, req_range,
-				 req_obj, req_offset);
+				 req_obj, req_offset,
+				 req_flags);
 	if (ret)
 		goto err_free_ops;
 
@@ -2664,6 +2702,49 @@ drm_gpuvm_prefetch_ops_create(struct drm_gpuvm *gpuvm,
 }
 EXPORT_SYMBOL_GPL(drm_gpuvm_prefetch_ops_create);
 
+/**
+ * drm_gpuvm_bo_unmap() - unmaps a GEM
+ * @vm_bo: the &drm_gpuvm_bo abstraction
+ *
+ * This function calls the unmap callback for every GPUVA attached to a GEM.
+ *
+ * It is the callers responsibility to protect the GEMs GPUVA list against
+ * concurrent access using the GEMs dma_resv lock.
+ *
+ * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
+ */
+int
+drm_gpuvm_bo_unmap(struct drm_gpuvm_bo *vm_bo, void *priv)
+{
+	struct drm_gpuva_op *op;
+	int ret;
+
+	if (unlikely(!vm_bo->vm))
+		return -EINVAL;
+
+	const struct drm_gpuvm_ops *vm_ops = vm_bo->vm->ops;
+
+	if (unlikely(!(vm_ops && vm_ops->sm_step_unmap)))
+		return -EINVAL;
+
+	struct drm_gpuva_ops *ops = drm_gpuvm_bo_unmap_ops_create(vm_bo);
+        if (IS_ERR(ops))
+                return PTR_ERR(ops);
+
+	drm_gpuva_for_each_op(op, ops) {
+		drm_WARN_ON(vm_bo->vm->drm, op->op != DRM_GPUVA_OP_UNMAP);
+
+		ret = op_unmap_cb(vm_ops, priv, op->unmap.va, false);
+		if (ret)
+			goto cleanup;
+	}
+
+cleanup:
+	drm_gpuva_ops_free(vm_bo->vm, ops);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_bo_unmap);
+
 /**
  * drm_gpuvm_bo_unmap_ops_create() - creates the &drm_gpuva_ops to unmap a GEM
  * @vm_bo: the &drm_gpuvm_bo abstraction
diff --git a/drivers/gpu/drm/drm_panic.c b/drivers/gpu/drm/drm_panic.c
index f128d345b16dfb..dee5301dd72997 100644
--- a/drivers/gpu/drm/drm_panic.c
+++ b/drivers/gpu/drm/drm_panic.c
@@ -486,11 +486,6 @@ static void drm_panic_qr_exit(void)
 	stream.workspace = NULL;
 }
 
-extern size_t drm_panic_qr_max_data_size(u8 version, size_t url_len);
-
-extern u8 drm_panic_qr_generate(const char *url, u8 *data, size_t data_len, size_t data_size,
-				u8 *tmp, size_t tmp_size);
-
 static int drm_panic_get_qr_code_url(u8 **qr_image)
 {
 	struct kmsg_dump_iter iter;
diff --git a/drivers/gpu/drm/drm_panic_qr.rs b/drivers/gpu/drm/drm_panic_qr.rs
index 6903e2010cb98b..2c436d8b55f373 100644
--- a/drivers/gpu/drm/drm_panic_qr.rs
+++ b/drivers/gpu/drm/drm_panic_qr.rs
@@ -27,7 +27,7 @@
 //! * <https://github.com/bjguillot/qr>
 
 use core::cmp;
-use kernel::str::CStr;
+use kernel::{prelude::*, str::CStr};
 
 #[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd)]
 struct Version(usize);
@@ -929,7 +929,7 @@ impl QrImage<'_> {
 /// * `tmp` must be valid for reading and writing for `tmp_size` bytes.
 ///
 /// They must remain valid for the duration of the function call.
-#[no_mangle]
+#[export]
 pub unsafe extern "C" fn drm_panic_qr_generate(
     url: *const kernel::ffi::c_char,
     data: *mut u8,
@@ -980,8 +980,13 @@ pub unsafe extern "C" fn drm_panic_qr_generate(
 /// * If `url_len` > 0, remove the 2 segments header/length and also count the
 ///   conversion to numeric segments.
 /// * If `url_len` = 0, only removes 3 bytes for 1 binary segment.
-#[no_mangle]
-pub extern "C" fn drm_panic_qr_max_data_size(version: u8, url_len: usize) -> usize {
+///
+/// # Safety
+///
+/// Always safe to call.
+// Required to be unsafe due to the `#[export]` annotation.
+#[export]
+pub unsafe extern "C" fn drm_panic_qr_max_data_size(version: u8, url_len: usize) -> usize {
     #[expect(clippy::manual_range_contains)]
     if version < 1 || version > 40 {
         return 0;
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 32a8781cfd67b8..20aa350280abe9 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -387,6 +387,11 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev,
 		return dmabuf;
 	}
 
+	if (!obj->exportable) {
+		dmabuf = ERR_PTR(-EINVAL);
+		return dmabuf;
+	}
+
 	if (obj->funcs && obj->funcs->export)
 		dmabuf = obj->funcs->export(obj, flags);
 	else
diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c
index 2896fa7501b1cc..f895d4aadc2668 100644
--- a/drivers/gpu/drm/imagination/pvr_vm.c
+++ b/drivers/gpu/drm/imagination/pvr_vm.c
@@ -190,7 +190,8 @@ static int pvr_vm_bind_op_exec(struct pvr_vm_bind_op *bind_op)
 					bind_op, bind_op->device_addr,
 					bind_op->size,
 					gem_from_pvr_gem(bind_op->pvr_obj),
-					bind_op->offset);
+					bind_op->offset,
+					0);
 
 	case PVR_VM_BIND_TYPE_UNMAP:
 		return drm_gpuvm_sm_unmap(&bind_op->vm_ctx->gpuvm_mgr,
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
index 48f105239f42d8..d548154c0a38c0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
@@ -1304,7 +1304,8 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job,
 							      op->va.addr,
 							      op->va.range,
 							      op->gem.obj,
-							      op->gem.offset);
+							      op->gem.offset,
+							      0);
 			if (IS_ERR(op->ops)) {
 				ret = PTR_ERR(op->ops);
 				goto unwind_continue;
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index d7469c565d1db8..5085a82e4bc695 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -925,6 +925,15 @@ config DRM_PANEL_SIMPLE
 	  that it can be automatically turned off when the panel goes into a
 	  low power state.
 
+config DRM_PANEL_SUMMIT
+	tristate "Apple Summit display panel"
+	depends on OF
+	depends on DRM_MIPI_DSI
+	depends on BACKLIGHT_CLASS_DEVICE
+	help
+	  Say Y if you want to enable support for the "Summit" display panel
+	  used as a touchbar on certain Apple laptops.
+
 config DRM_PANEL_SYNAPTICS_R63353
 	tristate "Synaptics R63353-based panels"
 	depends on OF
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 7dcf72646cacff..10ac2e850f5cd6 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -89,6 +89,7 @@ obj-$(CONFIG_DRM_PANEL_SHARP_LS060T1SX01) += panel-sharp-ls060t1sx01.o
 obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7701) += panel-sitronix-st7701.o
 obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7703) += panel-sitronix-st7703.o
 obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7789V) += panel-sitronix-st7789v.o
+obj-$(CONFIG_DRM_PANEL_SUMMIT) += panel-summit.o
 obj-$(CONFIG_DRM_PANEL_SYNAPTICS_R63353) += panel-synaptics-r63353.o
 obj-$(CONFIG_DRM_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o
 obj-$(CONFIG_DRM_PANEL_SONY_TD4353_JDI) += panel-sony-td4353-jdi.o
diff --git a/drivers/gpu/drm/panel/panel-summit.c b/drivers/gpu/drm/panel/panel-summit.c
new file mode 100644
index 00000000000000..e780faee18570c
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-summit.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/backlight.h>
+#include <drm/drm_device.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_mode.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
+#include <video/mipi_display.h>
+
+struct summit_data {
+	struct mipi_dsi_device *dsi;
+	struct backlight_device *bl;
+	struct drm_panel panel;
+};
+
+static int summit_set_brightness(struct device *dev)
+{
+	struct summit_data *s_data = dev_get_drvdata(dev);
+	int level = backlight_get_brightness(s_data->bl);
+
+	return mipi_dsi_dcs_set_display_brightness(s_data->dsi, level);
+}
+
+static int summit_bl_update_status(struct backlight_device *dev)
+{
+	return summit_set_brightness(&dev->dev);
+}
+
+static const struct backlight_ops summit_bl_ops = {
+	.update_status	= summit_bl_update_status,
+};
+
+static struct drm_display_mode summit_mode = {
+	.vdisplay = 2008,
+	.hdisplay = 60,
+	.hsync_start = 60 + 8,
+	.hsync_end = 60 + 8 + 80,
+	.htotal = 60 + 8 + 80 + 40,
+	.vsync_start = 2008 + 1,
+	.vsync_end = 2008 + 1 + 15,
+	.vtotal = 2008 + 1 + 15 + 6,
+	.clock = ((60 + 8 + 80 + 40) * (2008 + 1 + 15 + 6) * 60) / 1000,
+	.type = DRM_MODE_TYPE_DRIVER,
+	.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC,
+};
+
+static int summit_get_modes(struct drm_panel *panel,
+			    struct drm_connector *connector)
+{
+	connector->display_info.non_desktop = true;
+	drm_object_property_set_value(&connector->base,
+				      connector->dev->mode_config.non_desktop_property,
+				      connector->display_info.non_desktop);
+
+	return drm_connector_helper_get_modes_fixed(connector, &summit_mode);
+}
+
+static const struct drm_panel_funcs summit_panel_funcs = {
+	.get_modes = summit_get_modes,
+};
+
+static int summit_probe(struct mipi_dsi_device *dsi)
+{
+	struct backlight_properties props = { 0 };
+	struct device *dev = &dsi->dev;
+	struct summit_data *s_data;
+	int ret;
+
+	s_data = devm_kzalloc(dev, sizeof(*s_data), GFP_KERNEL);
+	if (!s_data)
+		return -ENOMEM;
+
+	mipi_dsi_set_drvdata(dsi, s_data);
+	s_data->dsi = dsi;
+
+	ret = device_property_read_u32(dev, "max-brightness", &props.max_brightness);
+	if (ret)
+		return ret;
+	props.type = BACKLIGHT_RAW;
+
+	s_data->bl = devm_backlight_device_register(dev, dev_name(dev),
+						    dev, s_data, &summit_bl_ops, &props);
+	if (IS_ERR(s_data->bl))
+		return PTR_ERR(s_data->bl);
+
+	drm_panel_init(&s_data->panel, dev, &summit_panel_funcs,
+		       DRM_MODE_CONNECTOR_DSI);
+	drm_panel_add(&s_data->panel);
+
+	return mipi_dsi_attach(dsi);
+}
+
+static void summit_remove(struct mipi_dsi_device *dsi)
+{
+	struct summit_data *s_data = mipi_dsi_get_drvdata(dsi);
+
+	mipi_dsi_detach(dsi);
+	drm_panel_remove(&s_data->panel);
+}
+
+static int summit_suspend(struct device *dev)
+{
+	struct summit_data *s_data = dev_get_drvdata(dev);
+
+	return mipi_dsi_dcs_set_display_brightness(s_data->dsi, 0);
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(summit_pm_ops, summit_suspend,
+				summit_set_brightness);
+
+static const struct of_device_id summit_of_match[] = {
+	{ .compatible = "apple,summit" },
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, summit_of_match);
+
+static struct mipi_dsi_driver summit_driver = {
+	.probe = summit_probe,
+	.remove = summit_remove,
+	.driver = {
+		.name = "panel-summit",
+		.of_match_table = summit_of_match,
+		.pm = pm_sleep_ptr(&summit_pm_ops),
+	},
+};
+module_mipi_dsi_driver(summit_driver);
+
+MODULE_DESCRIPTION("Summit Display Panel Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
index 1202de8811c2ae..74afa8b1243373 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.c
+++ b/drivers/gpu/drm/panthor/panthor_mmu.c
@@ -2189,7 +2189,8 @@ panthor_vm_exec_op(struct panthor_vm *vm, struct panthor_vm_op_ctx *op,
 		}
 
 		ret = drm_gpuvm_sm_map(&vm->base, vm, op->va.addr, op->va.range,
-				       op->map.vm_bo->obj, op->map.bo_offset);
+				       op->map.vm_bo->obj, op->map.bo_offset,
+				       0);
 		break;
 
 	case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP:
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index da00572d7d42e2..2ee5ba710578b4 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -436,7 +436,12 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
 
 		/*
 		 * Fence is from the same scheduler, only need to wait for
-		 * it to be scheduled
+		 * it to be scheduled.
+		 *
+		 * Note: s_fence->sched could have been freed and reallocated
+		 * as another scheduler. This false positive case is okay, as if
+		 * the old scheduler was freed all of its jobs must have
+		 * signaled their completion fences.
 		 */
 		fence = dma_fence_get(&s_fence->scheduled);
 		dma_fence_put(entity->dependency);
diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c
index 0f35f009b9d373..a12fef84a19d87 100644
--- a/drivers/gpu/drm/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/scheduler/sched_fence.c
@@ -90,7 +90,7 @@ static const char *drm_sched_fence_get_driver_name(struct dma_fence *fence)
 static const char *drm_sched_fence_get_timeline_name(struct dma_fence *f)
 {
 	struct drm_sched_fence *fence = to_drm_sched_fence(f);
-	return (const char *)fence->sched->name;
+	return (const char *)fence->sched_name;
 }
 
 static void drm_sched_fence_free_rcu(struct rcu_head *rcu)
@@ -224,6 +224,8 @@ void drm_sched_fence_init(struct drm_sched_fence *fence,
 	unsigned seq;
 
 	fence->sched = entity->rq->sched;
+	strscpy(fence->sched_name, entity->rq->sched->name,
+		sizeof(fence->sched_name));
 	seq = atomic_inc_return(&entity->fence_seq);
 	dma_fence_init(&fence->scheduled, &drm_sched_fence_ops_scheduled,
 		       &fence->lock, entity->fence_context, seq);
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 57da84908752c6..a89c863fe086a7 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -1368,8 +1368,33 @@ EXPORT_SYMBOL(drm_sched_init);
 void drm_sched_fini(struct drm_gpu_scheduler *sched)
 {
 	struct drm_sched_entity *s_entity;
+	struct drm_sched_job *s_job, *tmp;
 	int i;
 
+	/*
+	* Stop the scheduler, detaching all jobs from their hardware callbacks
+	* and cleaning up complete jobs.
+	*/
+	drm_sched_stop(sched, NULL);
+
+	/*
+	 * Iterate through the pending job list and free all jobs.
+	 * This assumes the driver has either guaranteed jobs are already stopped, or that
+	 * otherwise it is responsible for keeping any necessary data structures for
+	 * in-progress jobs alive even when the free_job() callback is called early (e.g. by
+	 * putting them in its own queue or doing its own refcounting).
+	 */
+	list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
+		spin_lock(&sched->job_list_lock);
+		list_del_init(&s_job->list);
+		spin_unlock(&sched->job_list_lock);
+
+		drm_sched_fence_finished(s_job->s_fence, -ESRCH);
+
+		WARN_ON(s_job->s_fence->parent);
+		sched->ops->free_job(s_job);
+	}
+
 	drm_sched_wqueue_stop(sched);
 
 	for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c
index 5d9ab8adf80058..424f63ffd12915 100644
--- a/drivers/gpu/drm/tiny/simpledrm.c
+++ b/drivers/gpu/drm/tiny/simpledrm.c
@@ -843,6 +843,8 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
 		if (IS_ERR(mem))
 			return ERR_CAST(mem);
 		panel_node = of_parse_phandle(of_node, "panel", 0);
+		if (!panel_node)
+			panel_node = of_parse_phandle(of_node, "panel-dimensions", 0);
 		if (panel_node) {
 			simplefb_read_u32_of(dev, panel_node, "width-mm", &width_mm);
 			simplefb_read_u32_of(dev, panel_node, "height-mm", &height_mm);
@@ -1030,6 +1032,12 @@ static int simpledrm_probe(struct platform_device *pdev)
 	struct drm_device *dev;
 	int ret;
 
+	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+	if (ret)
+		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+	if (ret)
+		return dev_err_probe(&pdev->dev, ret, "Failed to set dma mask\n");
+
 	sdev = simpledrm_device_create(&simpledrm_driver, pdev);
 	if (IS_ERR(sdev))
 		return PTR_ERR(sdev);
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 5956631c0d40a4..6019c839b93313 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -1981,7 +1981,8 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
 	case DRM_XE_VM_BIND_OP_MAP:
 	case DRM_XE_VM_BIND_OP_MAP_USERPTR:
 		ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
-						  obj, bo_offset_or_userptr);
+						  obj, bo_offset_or_userptr,
+						  0);
 		break;
 	case DRM_XE_VM_BIND_OP_UNMAP:
 		ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 4cfea399ebab2d..d0089459e18b00 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -129,7 +129,7 @@ config HID_APPLE
 	tristate "Apple {i,Power,Mac}Books"
 	depends on LEDS_CLASS
 	depends on NEW_LEDS
-	default !EXPERT
+	default !EXPERT || SPI_HID_APPLE
 	help
 	Support for some Apple devices which less or more break
 	HID specification.
@@ -570,8 +570,7 @@ config HID_LED
 
 config HID_LENOVO
 	tristate "Lenovo / Thinkpad devices"
-	depends on ACPI
-	select ACPI_PLATFORM_PROFILE
+	select ACPI_PLATFORM_PROFILE if ACPI
 	select NEW_LEDS
 	select LEDS_CLASS
 	help
@@ -689,11 +688,13 @@ config LOGIWHEELS_FF
 
 config HID_MAGICMOUSE
 	tristate "Apple Magic Mouse/Trackpad multi-touch support"
+	default SPI_HID_APPLE
 	help
 	Support for the Apple Magic Mouse/Trackpad multi-touch.
 
 	Say Y here if you want support for the multi-touch features of the
-	Apple Wireless "Magic" Mouse and the Apple Wireless "Magic" Trackpad.
+	Apple Wireless "Magic" Mouse, the Apple Wireless "Magic" Trackpad and
+	force touch Trackpads in Macbooks starting from 2015.
 
 config HID_MALTRON
 	tristate "Maltron L90 keyboard"
@@ -1407,4 +1408,8 @@ endif # HID
 
 source "drivers/hid/usbhid/Kconfig"
 
+source "drivers/hid/spi-hid/Kconfig"
+
+source "drivers/hid/dockchannel-hid/Kconfig"
+
 endif # HID_SUPPORT
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index c7ecfbb3e2280c..b540b6d179b2d9 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -170,6 +170,12 @@ obj-$(CONFIG_INTEL_ISH_HID)	+= intel-ish-hid/
 
 obj-$(CONFIG_AMD_SFH_HID)       += amd-sfh-hid/
 
+obj-$(CONFIG_HID_DOCKCHANNEL)   += dockchannel-hid/
+
+obj-$(CONFIG_SPI_HID_APPLE_CORE)	+= spi-hid/
+
+obj-$(CONFIG_HID_DOCKCHANNEL)   += dockchannel-hid/
+
 obj-$(CONFIG_SURFACE_HID_CORE)  += surface-hid/
 
 obj-$(CONFIG_INTEL_THC_HID)     += intel-thc-hid/
diff --git a/drivers/hid/dockchannel-hid/Kconfig b/drivers/hid/dockchannel-hid/Kconfig
new file mode 100644
index 00000000000000..8a81d551a83d51
--- /dev/null
+++ b/drivers/hid/dockchannel-hid/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-only OR MIT
+menu "DockChannel HID support"
+	depends on APPLE_DOCKCHANNEL
+
+config HID_DOCKCHANNEL
+	tristate "HID over DockChannel transport layer for Apple Silicon SoCs"
+	default ARCH_APPLE
+	depends on APPLE_DOCKCHANNEL && INPUT && OF && HID
+	help
+	  Say Y here if you use an M2 or later Apple Silicon based laptop.
+	  The keyboard and touchpad are HID based devices connected via the
+	  proprietary DockChannel interface.
+
+endmenu
diff --git a/drivers/hid/dockchannel-hid/Makefile b/drivers/hid/dockchannel-hid/Makefile
new file mode 100644
index 00000000000000..7dba766b047fcc
--- /dev/null
+++ b/drivers/hid/dockchannel-hid/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only OR MIT
+#
+# Makefile for DockChannel HID transport drivers
+#
+
+obj-$(CONFIG_HID_DOCKCHANNEL)	+= dockchannel-hid.o
diff --git a/drivers/hid/dockchannel-hid/dockchannel-hid.c b/drivers/hid/dockchannel-hid/dockchannel-hid.c
new file mode 100644
index 00000000000000..a712a724ded30b
--- /dev/null
+++ b/drivers/hid/dockchannel-hid/dockchannel-hid.c
@@ -0,0 +1,1213 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0 OR MIT
+ *
+ * Apple DockChannel HID transport driver
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/gpio/consumer.h>
+#include <linux/hid.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/soc/apple/dockchannel.h>
+#include <linux/string.h>
+#include <linux/unaligned.h>
+#include <linux/of.h>
+#include "../hid-ids.h"
+
+#define COMMAND_TIMEOUT_MS 1000
+#define START_TIMEOUT_MS 2000
+
+#define MAX_INTERFACES 16
+
+/* Data + checksum */
+#define MAX_PKT_SIZE (0xffff + 4)
+
+#define DCHID_CHANNEL_CMD 0x11
+#define DCHID_CHANNEL_REPORT 0x12
+
+struct dchid_hdr {
+	u8 hdr_len;
+	u8 channel;
+	u16 length;
+	u8 seq;
+	u8 iface;
+	u16 pad;
+} __packed;
+
+#define IFACE_COMM 0
+
+#define FLAGS_GROUP GENMASK(7, 6)
+#define FLAGS_REQ GENMASK(5, 0)
+
+#define REQ_SET_REPORT 0
+#define REQ_GET_REPORT 1
+
+struct dchid_subhdr {
+	u8 flags;
+	u8 unk;
+	u16 length;
+	u32 retcode;
+} __packed;
+
+#define EVENT_GPIO_CMD	0xa0
+#define EVENT_INIT	0xf0
+#define EVENT_READY	0xf1
+
+struct dchid_init_hdr {
+	u8 type;
+	u8 unk1;
+	u8 unk2;
+	u8 iface;
+	char name[16];
+	u8 more_packets;
+	u8 unkpad;
+} __packed;
+
+#define INIT_HID_DESCRIPTOR	0
+#define INIT_GPIO_REQUEST	1
+#define INIT_TERMINATOR		2
+#define INIT_PRODUCT_NAME	7
+
+#define CMD_RESET_INTERFACE 0x40
+#define CMD_SEND_FIRMWARE 0x95
+#define CMD_ENABLE_INTERFACE 0xb4
+#define CMD_ACK_GPIO_CMD 0xa1
+
+struct dchid_init_block_hdr {
+	u16 type;
+	u16 length;
+} __packed;
+
+#define MAX_GPIO_NAME 32
+
+struct dchid_gpio_request {
+	u16 unk;
+	u16 id;
+	char name[MAX_GPIO_NAME];
+} __packed;
+
+struct dchid_gpio_cmd {
+	u8 type;
+	u8 iface;
+	u8 gpio;
+	u8 unk;
+	u8 cmd;
+} __packed;
+
+struct dchid_gpio_ack {
+	u8 type;
+	u32 retcode;
+	u8 cmd[];
+} __packed;
+
+#define STM_REPORT_ID		0x10
+#define STM_REPORT_SERIAL	0x11
+#define STM_REPORT_KEYBTYPE	0x14
+
+struct dchid_stm_id {
+	u8 unk;
+	u16 vendor_id;
+	u16 product_id;
+	u16 version_number;
+	u8 unk2;
+	u8 unk3;
+	u8 keyboard_type;
+	u8 serial_length;
+	/* Serial follows, but we grab it with a different report. */
+} __packed;
+
+#define FW_MAGIC 0x46444948
+#define FW_VER 1
+
+struct fw_header {
+	u32 magic;
+	u32 version;
+	u32 hdr_length;
+	u32 data_length;
+	u32 iface_offset;
+} __packed;
+
+struct dchid_work {
+	struct work_struct work;
+	struct dchid_iface *iface;
+
+	struct dchid_hdr hdr;
+	u8 data[];
+};
+
+struct dchid_iface {
+	struct dockchannel_hid *dchid;
+	struct hid_device *hid;
+	struct workqueue_struct *wq;
+
+	bool creating;
+	struct work_struct create_work;
+
+	int index;
+	const char *name;
+	const struct device_node *of_node;
+
+	uint8_t tx_seq;
+	bool deferred;
+	bool starting;
+	bool open;
+	struct completion ready;
+
+	void *hid_desc;
+	size_t hid_desc_len;
+
+	struct gpio_desc *gpio;
+	char gpio_name[MAX_GPIO_NAME];
+	int gpio_id;
+
+	struct mutex out_mutex;
+	u32 out_flags;
+	int out_report;
+	u32 retcode;
+	void *resp_buf;
+	size_t resp_size;
+	struct completion out_complete;
+
+	u32 keyboard_layout_id;
+};
+
+struct dockchannel_hid {
+	struct device *dev;
+	struct dockchannel *dc;
+	struct device_link *helper_link;
+
+	bool id_ready;
+	struct dchid_stm_id device_id;
+	char serial[64];
+
+	struct dchid_iface *comm;
+	struct dchid_iface *ifaces[MAX_INTERFACES];
+
+	u8 pkt_buf[MAX_PKT_SIZE];
+
+	/* Workqueue to asynchronously create HID devices */
+	struct workqueue_struct *new_iface_wq;
+};
+
+static ssize_t apple_layout_id_show(struct device *dev,
+				       struct device_attribute *attr,
+				       char *buf)
+{
+	struct hid_device *hdev = to_hid_device(dev);
+	struct dchid_iface *iface = hdev->driver_data;
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", iface->keyboard_layout_id);
+}
+
+static DEVICE_ATTR_RO(apple_layout_id);
+
+static struct dchid_iface *
+dchid_get_interface(struct dockchannel_hid *dchid, int index, const char *name)
+{
+	struct dchid_iface *iface;
+
+	if (index >= MAX_INTERFACES) {
+		dev_err(dchid->dev, "Interface index %d out of range\n", index);
+		return NULL;
+	}
+
+	if (dchid->ifaces[index])
+		return dchid->ifaces[index];
+
+	iface = devm_kzalloc(dchid->dev, sizeof(struct dchid_iface), GFP_KERNEL);
+	if (!iface)
+		return NULL;
+
+	iface->index = index;
+	iface->name = devm_kstrdup(dchid->dev, name, GFP_KERNEL);
+	iface->dchid = dchid;
+	iface->out_report= -1;
+	init_completion(&iface->out_complete);
+	init_completion(&iface->ready);
+	mutex_init(&iface->out_mutex);
+	iface->wq = alloc_ordered_workqueue("dchid-%s", WQ_MEM_RECLAIM, iface->name);
+	if (!iface->wq)
+		return NULL;
+
+	/* Comm is not a HID subdevice */
+	if (!strcmp(name, "comm")) {
+		dchid->ifaces[index] = iface;
+		return iface;
+	}
+
+	iface->of_node = of_get_child_by_name(dchid->dev->of_node, name);
+	if (!iface->of_node) {
+		dev_warn(dchid->dev, "No OF node for subdevice %s, ignoring.", name);
+		return NULL;
+	}
+
+	dchid->ifaces[index] = iface;
+	return iface;
+}
+
+static u32 dchid_checksum(void *p, size_t length)
+{
+	u32 sum = 0;
+
+	while (length >= 4) {
+		sum += get_unaligned_le32(p);
+		p += 4;
+		length -= 4;
+	}
+
+	WARN_ON_ONCE(length);
+	return sum;
+}
+
+static int dchid_send(struct dchid_iface *iface, u32 flags, void *msg, size_t size)
+{
+	u32 checksum = 0xffffffff;
+	size_t wsize = round_down(size, 4);
+	size_t tsize = size - wsize;
+	int ret;
+	struct {
+		struct dchid_hdr hdr;
+		struct dchid_subhdr sub;
+	} __packed h;
+
+	memset(&h, 0, sizeof(h));
+	h.hdr.hdr_len = sizeof(h.hdr);
+	h.hdr.channel = DCHID_CHANNEL_CMD;
+	h.hdr.length = round_up(size, 4) + sizeof(h.sub);
+	h.hdr.seq = iface->tx_seq;
+	h.hdr.iface = iface->index;
+	h.sub.flags = flags;
+	h.sub.length = size;
+
+	ret = dockchannel_send(iface->dchid->dc, &h, sizeof(h));
+	if (ret < 0)
+		return ret;
+	checksum -= dchid_checksum(&h, sizeof(h));
+
+	ret = dockchannel_send(iface->dchid->dc, msg, wsize);
+	if (ret < 0)
+		return ret;
+	checksum -= dchid_checksum(msg, wsize);
+
+	if (tsize) {
+		u8 tail[4] = {0, 0, 0, 0};
+
+		memcpy(tail, msg + wsize, tsize);
+		ret = dockchannel_send(iface->dchid->dc, tail, sizeof(tail));
+		if (ret < 0)
+			return ret;
+		checksum -= dchid_checksum(tail, sizeof(tail));
+	}
+
+	ret = dockchannel_send(iface->dchid->dc, &checksum, sizeof(checksum));
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int dchid_cmd(struct dchid_iface *iface, u32 type, u32 req,
+		     void *data, size_t size, void *resp_buf, size_t resp_size)
+{
+	int ret;
+	int report_id = *(u8*)data;
+
+	mutex_lock(&iface->out_mutex);
+
+	WARN_ON(iface->out_report != -1);
+	iface->out_report = report_id;
+	iface->out_flags = FIELD_PREP(FLAGS_GROUP, type) | FIELD_PREP(FLAGS_REQ, req);
+	iface->resp_buf = resp_buf;
+	iface->resp_size = resp_size;
+	reinit_completion(&iface->out_complete);
+
+	ret = dchid_send(iface, iface->out_flags, data, size);
+	if (ret < 0)
+		goto done;
+
+	if (!wait_for_completion_timeout(&iface->out_complete, msecs_to_jiffies(COMMAND_TIMEOUT_MS))) {
+		dev_err(iface->dchid->dev, "output report 0x%x to iface  %d (%s) timed out\n",
+			report_id, iface->index, iface->name);
+		ret = -ETIMEDOUT;
+		goto done;
+	}
+
+	ret = iface->resp_size;
+	if (iface->retcode) {
+		dev_err(iface->dchid->dev,
+			"output report 0x%x to iface %d (%s) failed with err 0x%x\n",
+			report_id, iface->index, iface->name, iface->retcode);
+		ret = -EIO;
+	}
+
+done:
+	iface->tx_seq++;
+	iface->out_report = -1;
+	iface->out_flags = 0;
+	iface->resp_buf = NULL;
+	iface->resp_size = 0;
+	mutex_unlock(&iface->out_mutex);
+	return ret;
+}
+
+static int dchid_comm_cmd(struct dockchannel_hid *dchid, void *cmd, size_t size)
+{
+	return dchid_cmd(dchid->comm, HID_FEATURE_REPORT, REQ_SET_REPORT, cmd, size, NULL, 0);
+}
+
+static int dchid_enable_interface(struct dchid_iface *iface)
+{
+	u8 msg[] = { CMD_ENABLE_INTERFACE, iface->index };
+
+	return dchid_comm_cmd(iface->dchid, msg, sizeof(msg));
+}
+
+static int dchid_reset_interface(struct dchid_iface *iface, int state)
+{
+	u8 msg[] = { CMD_RESET_INTERFACE, 1, iface->index, state };
+
+	return dchid_comm_cmd(iface->dchid, msg, sizeof(msg));
+}
+
+static int dchid_send_firmware(struct dchid_iface *iface, void *firmware, size_t size)
+{
+	struct {
+		u8 cmd;
+		u8 unk1;
+		u8 unk2;
+		u8 iface;
+		u64 addr;
+		u32 size;
+	} __packed msg = {
+		.cmd = CMD_SEND_FIRMWARE,
+		.unk1 = 2,
+		.unk2 = 0,
+		.iface = iface->index,
+		.size = size,
+	};
+	dma_addr_t addr;
+	void *buf = dmam_alloc_coherent(iface->dchid->dev, size, &addr, GFP_KERNEL);
+
+	if (IS_ERR_OR_NULL(buf))
+		return buf ? PTR_ERR(buf) : -ENOMEM;
+
+	msg.addr = addr;
+	memcpy(buf, firmware, size);
+	wmb();
+
+	return dchid_comm_cmd(iface->dchid, &msg, sizeof(msg));
+}
+
+static int dchid_get_firmware(struct dchid_iface *iface, void **firmware, size_t *size)
+{
+	int ret;
+	const char *fw_name;
+	const struct firmware *fw;
+	struct fw_header *hdr;
+	u8 *fw_data;
+
+	ret = of_property_read_string(iface->of_node, "firmware-name", &fw_name);
+	if (ret) {
+		/* Firmware is only for some devices */
+		*firmware = NULL;
+		*size = 0;
+		return 0;
+	}
+
+	ret = request_firmware(&fw, fw_name, iface->dchid->dev);
+	if (ret)
+		return ret;
+
+	hdr = (struct fw_header *)fw->data;
+
+	if (hdr->magic != FW_MAGIC || hdr->version != FW_VER ||
+		hdr->hdr_length < sizeof(*hdr) || hdr->hdr_length > fw->size ||
+		(hdr->hdr_length + (size_t)hdr->data_length) > fw->size ||
+		hdr->iface_offset >= hdr->data_length) {
+		dev_warn(iface->dchid->dev, "%s: invalid firmware header\n",
+			 fw_name);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	fw_data = devm_kmemdup(iface->dchid->dev, fw->data + hdr->hdr_length,
+			       hdr->data_length, GFP_KERNEL);
+	if (!fw_data) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	if (hdr->iface_offset)
+		fw_data[hdr->iface_offset] = iface->index;
+
+	*firmware = fw_data;
+	*size = hdr->data_length;
+
+done:
+	release_firmware(fw);
+	return ret;
+}
+
+static int dchid_request_gpio(struct dchid_iface *iface)
+{
+	char prop_name[MAX_GPIO_NAME + 16];
+
+	if (iface->gpio)
+		return 0;
+
+	dev_info(iface->dchid->dev, "Requesting GPIO %s#%d: %s\n",
+		 iface->name, iface->gpio_id, iface->gpio_name);
+
+	snprintf(prop_name, sizeof(prop_name), "apple,%s", iface->gpio_name);
+
+	iface->gpio = devm_gpiod_get_index(iface->dchid->dev, prop_name, 0, GPIOD_OUT_LOW);
+
+	if (IS_ERR_OR_NULL(iface->gpio)) {
+		dev_err(iface->dchid->dev, "Failed to request GPIO %s-gpios\n", prop_name);
+		iface->gpio = NULL;
+		return -1;
+	}
+
+	return 0;
+}
+
+static int dchid_start_interface(struct dchid_iface *iface)
+{
+	void *fw;
+	size_t size;
+	int ret;
+
+	if (iface->starting) {
+		dev_warn(iface->dchid->dev, "Interface %s is already starting", iface->name);
+		return -EINPROGRESS;
+	}
+
+	dev_info(iface->dchid->dev, "Starting interface %s\n", iface->name);
+
+	iface->starting = true;
+
+	/* Look to see if we need firmware */
+	ret = dchid_get_firmware(iface, &fw, &size);
+	if (ret < 0)
+		goto err;
+
+	/* If we need a GPIO, make sure we have it. */
+	if (iface->gpio_id) {
+		ret = dchid_request_gpio(iface);
+		if (ret < 0)
+			goto err;
+	}
+
+	/* Only multi-touch has firmware */
+	if (fw && size) {
+
+		/* Send firmware to the device */
+		dev_info(iface->dchid->dev, "Sending firmware for %s\n", iface->name);
+		ret = dchid_send_firmware(iface, fw, size);
+		if (ret < 0) {
+			dev_err(iface->dchid->dev, "Failed to send %s firmwareS", iface->name);
+			goto err;
+		}
+
+		/* After loading firmware, multi-touch needs a reset */
+		dev_info(iface->dchid->dev, "Resetting %s\n", iface->name);
+		dchid_reset_interface(iface, 0);
+		dchid_reset_interface(iface, 2);
+	}
+
+	return 0;
+
+err:
+	iface->starting = false;
+	return ret;
+}
+
+static int dchid_start(struct hid_device *hdev)
+{
+	struct dchid_iface *iface = hdev->driver_data;
+
+	if (iface->keyboard_layout_id) {
+		int ret = device_create_file(&hdev->dev, &dev_attr_apple_layout_id);
+		if (ret) {
+			dev_warn(iface->dchid->dev, "Failed to create apple_layout_id: %d", ret);
+			iface->keyboard_layout_id = 0;
+		}
+	}
+
+	return 0;
+};
+
+static void dchid_stop(struct hid_device *hdev)
+{
+	struct dchid_iface *iface = hdev->driver_data;
+
+	if (iface->keyboard_layout_id)
+		device_remove_file(&hdev->dev, &dev_attr_apple_layout_id);
+}
+
+static int dchid_open(struct hid_device *hdev)
+{
+	struct dchid_iface *iface = hdev->driver_data;
+	int ret;
+
+	if (!completion_done(&iface->ready)) {
+		ret = dchid_start_interface(iface);
+		if (ret < 0)
+			return ret;
+
+		if (!wait_for_completion_timeout(&iface->ready, msecs_to_jiffies(START_TIMEOUT_MS))) {
+			dev_err(iface->dchid->dev, "iface %s start timed out\n", iface->name);
+			return -ETIMEDOUT;
+		}
+	}
+
+	iface->open = true;
+	return 0;
+}
+
+static void dchid_close(struct hid_device *hdev)
+{
+	struct dchid_iface *iface = hdev->driver_data;
+
+	iface->open = false;
+}
+
+static int dchid_parse(struct hid_device *hdev)
+{
+	struct dchid_iface *iface = hdev->driver_data;
+
+	return hid_parse_report(hdev, iface->hid_desc, iface->hid_desc_len);
+}
+
+/* Note: buf excludes report number! For ease of fetching strings/etc. */
+static int dchid_get_report_cmd(struct dchid_iface *iface, u8 reportnum, void *buf, size_t len)
+{
+	int ret = dchid_cmd(iface, HID_FEATURE_REPORT, REQ_GET_REPORT, &reportnum, 1, buf, len);
+
+	return ret <= 0 ? ret : ret - 1;
+}
+
+/* Note: buf includes report number! */
+static int dchid_set_report(struct dchid_iface *iface, void *buf, size_t len)
+{
+	return dchid_cmd(iface, HID_OUTPUT_REPORT, REQ_SET_REPORT, buf, len, NULL, 0);
+}
+
+static int dchid_raw_request(struct hid_device *hdev,
+				unsigned char reportnum, __u8 *buf, size_t len,
+				unsigned char rtype, int reqtype)
+{
+	struct dchid_iface *iface = hdev->driver_data;
+
+	switch (reqtype) {
+	case HID_REQ_GET_REPORT:
+		buf[0] = reportnum;
+		return dchid_cmd(iface, rtype, REQ_GET_REPORT, &reportnum, 1, buf + 1, len - 1);
+	case HID_REQ_SET_REPORT:
+		return dchid_set_report(iface, buf, len);
+	default:
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static struct hid_ll_driver dchid_ll = {
+	.start = &dchid_start,
+	.stop = &dchid_stop,
+	.open = &dchid_open,
+	.close = &dchid_close,
+	.parse = &dchid_parse,
+	.raw_request = &dchid_raw_request,
+};
+
+static void dchid_create_interface_work(struct work_struct *ws)
+{
+	struct dchid_iface *iface = container_of(ws, struct dchid_iface, create_work);
+	struct dockchannel_hid *dchid = iface->dchid;
+	struct hid_device *hid;
+	int ret;
+
+	if (iface->hid) {
+		dev_warn(dchid->dev, "Interface %s already created!\n",
+			 iface->name);
+		return;
+	}
+
+	dev_info(dchid->dev, "New interface %s\n", iface->name);
+
+	/* Start the interface. This is not the entire init process, as firmware is loaded later on device open. */
+	ret = dchid_enable_interface(iface);
+	if (ret < 0) {
+		dev_warn(dchid->dev, "Failed to enable %s: %d\n", iface->name, ret);
+		return;
+	}
+
+	iface->deferred = false;
+
+	hid = hid_allocate_device();
+	if (IS_ERR(hid))
+		return;
+
+	snprintf(hid->name, sizeof(hid->name), "Apple MTP %s", iface->name);
+	snprintf(hid->phys, sizeof(hid->phys), "%s.%d (%s)",
+		 dev_name(dchid->dev), iface->index, iface->name);
+	strscpy(hid->uniq, dchid->serial, sizeof(hid->uniq));
+
+	hid->ll_driver = &dchid_ll;
+	hid->bus = BUS_HOST;
+	hid->vendor = dchid->device_id.vendor_id;
+	hid->product = dchid->device_id.product_id;
+	hid->version = dchid->device_id.version_number;
+	hid->type = HID_TYPE_OTHER;
+	if (!strcmp(iface->name, "multi-touch")) {
+		hid->type = HID_TYPE_SPI_MOUSE;
+	} else if (!strcmp(iface->name, "keyboard")) {
+		u32 country_code = 0;
+
+		hid->type = HID_TYPE_SPI_KEYBOARD;
+
+		/*
+		 * We have to get the country code from the device tree, since the
+		 * device provides no reliable way to get this info.
+		 */
+		if (!of_property_read_u32(iface->of_node, "hid-country-code", &country_code))
+			hid->country = country_code;
+
+		of_property_read_u32(iface->of_node, "apple,keyboard-layout-id",
+			&iface->keyboard_layout_id);
+	}
+
+	hid->dev.parent = iface->dchid->dev;
+	hid->driver_data = iface;
+
+	iface->hid = hid;
+
+	ret = hid_add_device(hid);
+	if (ret < 0) {
+		iface->hid = NULL;
+		hid_destroy_device(hid);
+		dev_warn(iface->dchid->dev, "Failed to register hid device %s", iface->name);
+	}
+}
+
+static int dchid_create_interface(struct dchid_iface *iface)
+{
+	if (iface->creating)
+		return -EBUSY;
+
+	iface->creating = true;
+	INIT_WORK(&iface->create_work, dchid_create_interface_work);
+	return queue_work(iface->dchid->new_iface_wq, &iface->create_work);
+}
+
+static void dchid_handle_descriptor(struct dchid_iface *iface, void *hid_desc, size_t desc_len)
+{
+	if (iface->hid) {
+		dev_warn(iface->dchid->dev, "Tried to initialize already started interface %s!\n",
+			 iface->name);
+		return;
+	}
+
+	iface->hid_desc = devm_kmemdup(iface->dchid->dev, hid_desc, desc_len, GFP_KERNEL);
+	if (!iface->hid_desc)
+		return;
+
+	iface->hid_desc_len = desc_len;
+}
+
+static void dchid_handle_ready(struct dockchannel_hid *dchid, void *data, size_t length)
+{
+	struct dchid_iface *iface;
+	u8 *pkt = data;
+	u8 index;
+	int i, ret;
+
+	if (length < 2) {
+		dev_err(dchid->dev, "Bad length for ready message: %zu\n", length);
+		return;
+	}
+
+	index = pkt[1];
+
+	if (index >= MAX_INTERFACES) {
+		dev_err(dchid->dev, "Got ready notification for bad iface %d\n", index);
+		return;
+	}
+
+	iface = dchid->ifaces[index];
+	if (!iface) {
+		dev_err(dchid->dev, "Got ready notification for unknown iface %d\n", index);
+		return;
+	}
+
+	dev_info(dchid->dev, "Interface %s is now ready\n", iface->name);
+	complete_all(&iface->ready);
+
+	/* When STM is ready, grab global device info */
+	if (!strcmp(iface->name, "stm")) {
+		ret = dchid_get_report_cmd(iface, STM_REPORT_ID, &dchid->device_id,
+					   sizeof(dchid->device_id));
+		if (ret < sizeof(dchid->device_id)) {
+			dev_warn(iface->dchid->dev, "Failed to get device ID from STM!\n");
+			/* Fake it and keep going. Things might still work... */
+			memset(&dchid->device_id, 0, sizeof(dchid->device_id));
+			dchid->device_id.vendor_id = HOST_VENDOR_ID_APPLE;
+		}
+		ret = dchid_get_report_cmd(iface, STM_REPORT_SERIAL, dchid->serial,
+					   sizeof(dchid->serial) - 1);
+		if (ret < 0) {
+			dev_warn(iface->dchid->dev, "Failed to get serial from STM!\n");
+			dchid->serial[0] = 0;
+		}
+
+		dchid->id_ready = true;
+		for (i = 0; i < MAX_INTERFACES; i++) {
+			if (!dchid->ifaces[i] || !dchid->ifaces[i]->deferred)
+				continue;
+			dchid_create_interface(dchid->ifaces[i]);
+		}
+	}
+}
+
+static void dchid_handle_init(struct dockchannel_hid *dchid, void *data, size_t length)
+{
+	struct dchid_init_hdr *hdr = data;
+	struct dchid_iface *iface;
+	struct dchid_init_block_hdr *blk;
+
+	if (length < sizeof(*hdr))
+		return;
+
+	iface = dchid_get_interface(dchid, hdr->iface, hdr->name);
+	if (!iface)
+		return;
+
+	data += sizeof(*hdr);
+	length -= sizeof(*hdr);
+
+	while (length >= sizeof(*blk)) {
+		blk = data;
+		data += sizeof(*blk);
+		length -= sizeof(*blk);
+
+		if (blk->length > length)
+			break;
+
+		switch (blk->type) {
+		case INIT_HID_DESCRIPTOR:
+			dchid_handle_descriptor(iface, data, blk->length);
+			break;
+
+		case INIT_GPIO_REQUEST: {
+			struct dchid_gpio_request *req = data;
+
+			if (sizeof(*req) > length)
+				break;
+
+			if (iface->gpio_id) {
+				dev_err(dchid->dev,
+					"Cannot request more than one GPIO per interface!\n");
+				break;
+			}
+
+			strscpy(iface->gpio_name, req->name, MAX_GPIO_NAME);
+			iface->gpio_id = req->id;
+			break;
+		}
+
+		case INIT_TERMINATOR:
+			break;
+
+		case INIT_PRODUCT_NAME: {
+			char *product = data;
+
+			if (product[blk->length - 1] != 0) {
+				dev_warn(dchid->dev, "Unterminated product name for %s\n",
+					 iface->name);
+			} else {
+				dev_info(dchid->dev, "Product name for %s: %s\n",
+					 iface->name, product);
+			}
+			break;
+		}
+
+		default:
+			dev_warn(dchid->dev, "Unknown init packet %d for %s\n",
+				 blk->type, iface->name);
+			break;
+		}
+
+		data += blk->length;
+		length -= blk->length;
+
+		if (blk->type == INIT_TERMINATOR)
+			break;
+	}
+
+	if (hdr->more_packets)
+		return;
+
+	/* We need to enable STM first, since it'll give us the device IDs */
+	if (iface->dchid->id_ready || !strcmp(iface->name, "stm")) {
+		dchid_create_interface(iface);
+	} else {
+		iface->deferred = true;
+	}
+}
+
+static void dchid_handle_gpio(struct dockchannel_hid *dchid, void *data, size_t length)
+{
+	struct dchid_gpio_cmd *cmd = data;
+	struct dchid_iface *iface;
+	u32 retcode = 0xe000f00d; /* Give it a random Apple-style error code */
+	struct dchid_gpio_ack *ack;
+
+	if (length < sizeof(*cmd))
+		return;
+
+	if (cmd->iface >= MAX_INTERFACES || !(iface = dchid->ifaces[cmd->iface])) {
+		dev_err(dchid->dev, "Got GPIO command for bad inteface %d\n", cmd->iface);
+		goto err;
+	}
+
+	if (dchid_request_gpio(iface) < 0)
+		goto err;
+
+	if (!iface->gpio || cmd->gpio != iface->gpio_id) {
+		dev_err(dchid->dev, "Got GPIO command for bad GPIO %s#%d\n",
+			iface->name, cmd->gpio);
+		goto err;
+	}
+
+	dev_info(dchid->dev, "GPIO command: %s#%d: %d\n", iface->name, cmd->gpio, cmd->cmd);
+
+	switch (cmd->cmd) {
+	case 3:
+		/* Pulse.  */
+		gpiod_set_value_cansleep(iface->gpio, 1);
+		msleep(10); /* Random guess... */
+		gpiod_set_value_cansleep(iface->gpio, 0);
+		retcode = 0;
+		break;
+	default:
+		dev_err(dchid->dev, "Unknown GPIO command %d\n", cmd->cmd	);
+		break;
+	}
+
+err:
+	/* Ack it */
+	ack = kzalloc(sizeof(*ack) + length, GFP_KERNEL);
+	if (!ack)
+		return;
+
+	ack->type = CMD_ACK_GPIO_CMD;
+	ack->retcode = retcode;
+	memcpy(ack->cmd, data, length);
+
+	if (dchid_comm_cmd(dchid, ack, sizeof(*ack) + length) < 0)
+		dev_err(dchid->dev, "Failed to ACK GPIO command\n");
+
+	kfree(ack);
+}
+
+static void dchid_handle_event(struct dockchannel_hid *dchid, void *data, size_t length)
+{
+	u8 *p = data;
+	switch (*p) {
+	case EVENT_INIT:
+		dchid_handle_init(dchid, data, length);
+		break;
+	case EVENT_READY:
+		dchid_handle_ready(dchid, data, length);
+		break;
+	case EVENT_GPIO_CMD:
+		dchid_handle_gpio(dchid, data, length);
+		break;
+	}
+}
+
+static void dchid_handle_report(struct dchid_iface *iface, void *data, size_t length)
+{
+	struct dockchannel_hid *dchid = iface->dchid;
+
+	if (!iface->hid) {
+		dev_warn(dchid->dev, "Report received but %s is not initialized!\n", iface->name);
+		return;
+	}
+
+	if (!iface->open)
+		return;
+
+	hid_input_report(iface->hid, HID_INPUT_REPORT, data, length, 1);
+}
+
+static void dchid_packet_work(struct work_struct *ws)
+{
+	struct dchid_work *work = container_of(ws, struct dchid_work, work);
+	struct dchid_subhdr *shdr = (void *)work->data;
+	struct dockchannel_hid *dchid = work->iface->dchid;
+	int type = FIELD_GET(FLAGS_GROUP, shdr->flags);
+	u8 *payload = work->data + sizeof(*shdr);
+
+	if (shdr->length + sizeof(*shdr) > work->hdr.length) {
+		dev_err(dchid->dev, "Bad sub header length (%d > %zu)\n",
+			shdr->length, work->hdr.length - sizeof(*shdr));
+		return;
+	}
+
+	switch (type) {
+	case HID_INPUT_REPORT:
+		if (work->hdr.iface == IFACE_COMM)
+			dchid_handle_event(dchid, payload, shdr->length);
+		else
+			dchid_handle_report(work->iface, payload, shdr->length);
+		break;
+	default:
+		dev_err(dchid->dev, "Received unknown packet type %d\n", type);
+		break;
+	}
+
+	kfree(work);
+}
+
+static void dchid_handle_ack(struct dchid_iface *iface, struct dchid_hdr *hdr, void *data)
+{
+	struct dchid_subhdr *shdr = (void *)data;
+	u8 *payload = data + sizeof(*shdr);
+
+	if (shdr->length + sizeof(*shdr) > hdr->length) {
+		dev_err(iface->dchid->dev, "Bad sub header length (%d > %ld)\n",
+			shdr->length, hdr->length - sizeof(*shdr));
+		return;
+	}
+	if (shdr->flags != iface->out_flags) {
+		dev_err(iface->dchid->dev,
+			"Received unexpected flags 0x%x on ACK channel (expFected 0x%x)\n",
+			shdr->flags, iface->out_flags);
+		return;
+	}
+
+	if (shdr->length < 1) {
+		dev_err(iface->dchid->dev, "Received length 0 output report ack\n");
+		return;
+	}
+	if (iface->tx_seq != hdr->seq) {
+		dev_err(iface->dchid->dev, "Received ACK with bad seq (expected %d, got %d)\n",
+			iface->tx_seq, hdr->seq);
+		return;
+	}
+	if (iface->out_report != payload[0]) {
+		dev_err(iface->dchid->dev, "Received ACK with bad report (expected %d, got %d\n",
+			iface->out_report, payload[0]);
+		return;
+	}
+
+	if (iface->resp_buf && iface->resp_size)
+		memcpy(iface->resp_buf, payload + 1, min((size_t)shdr->length - 1, iface->resp_size));
+
+	iface->resp_size = shdr->length;
+	iface->out_report = -1;
+	iface->retcode = shdr->retcode;
+	complete(&iface->out_complete);
+}
+
+static void dchid_handle_packet(void *cookie, size_t avail)
+{
+	struct dockchannel_hid *dchid = cookie;
+	struct dchid_hdr hdr;
+	struct dchid_work *work;
+	struct dchid_iface *iface;
+	u32 checksum;
+
+	if (dockchannel_recv(dchid->dc, &hdr, sizeof(hdr)) != sizeof(hdr)) {
+		dev_err(dchid->dev, "Read failed (header)\n");
+		return;
+	}
+
+	if (hdr.hdr_len != sizeof(hdr)) {
+		dev_err(dchid->dev, "Bad header length %d\n", hdr.hdr_len);
+		goto done;
+	}
+
+	if (dockchannel_recv(dchid->dc, dchid->pkt_buf, hdr.length + 4) != (hdr.length + 4)) {
+		dev_err(dchid->dev, "Read failed (body)\n");
+		goto done;
+	}
+
+	checksum = dchid_checksum(&hdr, sizeof(hdr));
+	checksum += dchid_checksum(dchid->pkt_buf, hdr.length + 4);
+
+	if (checksum != 0xffffffff) {
+		dev_err(dchid->dev, "Checksum mismatch (iface %d): 0x%08x != 0xffffffff\n",
+			hdr.iface, checksum);
+		goto done;
+	}
+
+
+	if (hdr.iface >= MAX_INTERFACES) {
+		dev_err(dchid->dev, "Bad iface %d\n", hdr.iface);
+	}
+
+	iface = dchid->ifaces[hdr.iface];
+
+	if (!iface) {
+		dev_err(dchid->dev, "Received packet for uninitialized iface %d\n", hdr.iface);
+		goto done;
+	}
+
+	switch (hdr.channel) {
+		case DCHID_CHANNEL_CMD:
+			dchid_handle_ack(iface, &hdr, dchid->pkt_buf);
+			goto done;
+		case DCHID_CHANNEL_REPORT:
+			break;
+		default:
+			dev_warn(dchid->dev, "Unknown channel 0x%x, treating as report...\n",
+				 hdr.channel);
+			break;
+	}
+
+	work = kzalloc(sizeof(*work) + hdr.length, GFP_KERNEL);
+	if (!work)
+		return;
+
+	work->hdr = hdr;
+	work->iface = iface;
+	memcpy(work->data, dchid->pkt_buf, hdr.length);
+	INIT_WORK(&work->work, dchid_packet_work);
+
+	queue_work(iface->wq, &work->work);
+
+done:
+	dockchannel_await(dchid->dc, dchid_handle_packet, dchid, sizeof(struct dchid_hdr));
+}
+
+static int dockchannel_hid_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct dockchannel_hid *dchid;
+	struct device_node *child, *helper;
+	struct platform_device *helper_pdev;
+	struct property *prop;
+	int ret;
+
+	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+	if (ret)
+		return ret;
+
+	dchid = devm_kzalloc(dev, sizeof(*dchid), GFP_KERNEL);
+	if (!dchid) {
+		return -ENOMEM;
+	}
+
+	dchid->dev = dev;
+
+	/*
+	 * First make sure all the GPIOs are available, in cased we need to defer.
+	 * This is necessary because MTP will request them by name later, and by then
+	 * it's too late to defer the probe.
+	 */
+
+	for_each_child_of_node(dev->of_node, child) {
+		for_each_property_of_node(child, prop) {
+			size_t len = strlen(prop->name);
+			struct gpio_desc *gpio;
+
+			if (len < 12 || strncmp("apple,", prop->name, 6) ||
+			    strcmp("-gpios", prop->name + len - 6))
+				continue;
+
+			gpio = fwnode_gpiod_get_index(&child->fwnode, prop->name, 0, GPIOD_ASIS,
+						      prop->name);
+			if (IS_ERR_OR_NULL(gpio)) {
+				if (PTR_ERR(gpio) == -EPROBE_DEFER) {
+					of_node_put(child);
+					return -EPROBE_DEFER;
+				}
+			} else {
+				gpiod_put(gpio);
+			}
+		}
+	}
+
+	/*
+	 * Make sure we also have the MTP coprocessor available, and
+	 * defer probe if the helper hasn't probed yet.
+	 */
+	helper = of_parse_phandle(dev->of_node, "apple,helper-cpu", 0);
+	if (!helper) {
+		dev_err(dev, "Missing apple,helper-cpu property");
+		return -EINVAL;
+	}
+
+	helper_pdev = of_find_device_by_node(helper);
+	of_node_put(helper);
+	if (!helper_pdev) {
+		dev_err(dev, "Failed to find helper device");
+		return -EINVAL;
+	}
+
+	dchid->helper_link = device_link_add(dev, &helper_pdev->dev,
+					     DL_FLAG_AUTOREMOVE_CONSUMER);
+	put_device(&helper_pdev->dev);
+	if (!dchid->helper_link) {
+		dev_err(dev, "Failed to link to helper device");
+		return -EINVAL;
+	}
+
+	if (dchid->helper_link->supplier->links.status != DL_DEV_DRIVER_BOUND)
+		return -EPROBE_DEFER;
+
+	/* Now it is safe to begin initializing */
+	dchid->dc = dockchannel_init(pdev);
+	if (IS_ERR_OR_NULL(dchid->dc)) {
+		return PTR_ERR(dchid->dc);
+	}
+	dchid->new_iface_wq = alloc_workqueue("dchid-new", WQ_MEM_RECLAIM, 0);
+	if (!dchid->new_iface_wq)
+		return -ENOMEM;
+
+	dchid->comm = dchid_get_interface(dchid, IFACE_COMM, "comm");
+	if (!dchid->comm) {
+		dev_err(dchid->dev, "Failed to initialize comm interface");
+		return -EIO;
+	}
+
+	dev_info(dchid->dev, "Initialized, awaiting packets\n");
+	dockchannel_await(dchid->dc, dchid_handle_packet, dchid, sizeof(struct dchid_hdr));
+
+	return 0;
+}
+
+static void dockchannel_hid_remove(struct platform_device *pdev)
+{
+	BUG_ON(1);
+}
+
+static const struct of_device_id dockchannel_hid_of_match[] = {
+	{ .compatible = "apple,dockchannel-hid" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, dockchannel_hid_of_match);
+MODULE_FIRMWARE("apple/tpmtfw-*.bin");
+
+static struct platform_driver dockchannel_hid_driver = {
+	.driver = {
+		.name = "dockchannel-hid",
+		.of_match_table = dockchannel_hid_of_match,
+	},
+	.probe = dockchannel_hid_probe,
+	.remove = dockchannel_hid_remove,
+};
+module_platform_driver(dockchannel_hid_driver);
+
+MODULE_DESCRIPTION("Apple DockChannel HID transport driver");
+MODULE_AUTHOR("Hector Martin <marcan@marcan.st>");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index d900dd05c335c3..e00dd3d145dbc8 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -52,10 +52,16 @@
 #define APPLE_MAGIC_REPORT_ID_POWER		3
 #define APPLE_MAGIC_REPORT_ID_BRIGHTNESS	1
 
+// DO NOT UPSTREAM:
+// temporary Fn key mode until xkeyboard-config has keyboard layouts with media
+// key mappings. At that point auto mode can drop function key mappings and this
+// mode can be dropped.
+#define FKEYS_IGNORE	4
+
 static unsigned int fnmode = 3;
 module_param(fnmode, uint, 0644);
 MODULE_PARM_DESC(fnmode, "Mode of fn key on Apple keyboards (0 = disabled, "
-		"1 = fkeyslast, 2 = fkeysfirst, [3] = auto)");
+		"1 = fkeyslast, 2 = fkeysfirst, [3] = auto, [4] = fkeysignore)");
 
 static int iso_layout = -1;
 module_param(iso_layout, int, 0644);
@@ -276,6 +282,16 @@ static const struct apple_key_translation apple_fn_keys[] = {
 	{ }
 };
 
+static const struct apple_key_translation apple_fn_keys_minimal[] = {
+	{ KEY_BACKSPACE, KEY_DELETE },
+	{ KEY_ENTER,	KEY_INSERT },
+	{ KEY_UP,	KEY_PAGEUP },
+	{ KEY_DOWN,	KEY_PAGEDOWN },
+	{ KEY_LEFT,	KEY_HOME },
+	{ KEY_RIGHT,	KEY_END },
+	{ }
+};
+
 static const struct apple_key_translation powerbook_fn_keys[] = {
 	{ KEY_BACKSPACE, KEY_DELETE },
 	{ KEY_F1,	KEY_BRIGHTNESSDOWN,     APPLE_FLAG_FKEY },
@@ -426,6 +442,8 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
 
 	if (fnmode == 3) {
 		real_fnmode = (asc->quirks & APPLE_IS_NON_APPLE) ? 2 : 1;
+	} else if (fnmode == FKEYS_IGNORE) {
+		real_fnmode = 2;
 	} else {
 		real_fnmode = fnmode;
 	}
@@ -498,6 +516,18 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
 		else if (hid->product >= USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI &&
 				hid->product <= USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS)
 			table = macbookair_fn_keys;
+		else if (hid->bus == BUS_HOST || hid->bus == BUS_SPI)
+			switch (hid->product) {
+			case SPI_DEVICE_ID_APPLE_MACBOOK_PRO13_2020:
+			case HOST_DEVICE_ID_APPLE_MACBOOK_PRO13_2022:
+				table = macbookpro_dedicated_esc_fn_keys;
+				break;
+			default:
+				table = (fnmode == FKEYS_IGNORE) ?
+					apple_fn_keys_minimal :
+					apple2021_fn_keys;
+				break;
+			}
 		else if (hid->product < 0x21d || hid->product >= 0x300)
 			table = powerbook_fn_keys;
 		else
@@ -677,6 +707,7 @@ static void apple_setup_input(struct input_dev *input)
 
 	/* Enable all needed keys */
 	apple_setup_key_translation(input, apple_fn_keys);
+	apple_setup_key_translation(input, apple_fn_keys_minimal);
 	apple_setup_key_translation(input, powerbook_fn_keys);
 	apple_setup_key_translation(input, powerbook_numlock_keys);
 	apple_setup_key_translation(input, apple_iso_keyboard);
@@ -910,6 +941,15 @@ static int apple_probe(struct hid_device *hdev,
 	struct apple_sc *asc;
 	int ret;
 
+	if ((id->bus == BUS_SPI || id->bus == BUS_HOST) && id->vendor == SPI_VENDOR_ID_APPLE &&
+	    hdev->type != HID_TYPE_SPI_KEYBOARD)
+		return -ENODEV;
+
+	// key remapping will happen in xkeyboard-config so ignore
+	// APPLE_ISO_TILDE_QUIRK
+	if ((id->bus == BUS_SPI || id->bus == BUS_HOST) && fnmode == FKEYS_IGNORE)
+		quirks &= ~APPLE_ISO_TILDE_QUIRK;
+
 	asc = devm_kzalloc(&hdev->dev, sizeof(*asc), GFP_KERNEL);
 	if (asc == NULL) {
 		hid_err(hdev, "can't alloc apple descriptor\n");
@@ -1169,6 +1209,10 @@ static const struct hid_device_id apple_devices[] = {
 		.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK | APPLE_RDESC_BATTERY },
 	{ HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2021),
 		.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
+	{ HID_SPI_DEVICE(SPI_VENDOR_ID_APPLE, HID_ANY_ID),
+		.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK }, // TODO: remove APPLE_ISO_TILDE_QUIRK
+	{ HID_DEVICE(BUS_HOST, HID_GROUP_ANY, HOST_VENDOR_ID_APPLE, HID_ANY_ID),
+		.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK }, // TODO: remove APPLE_ISO_TILDE_QUIRK
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_TOUCHBAR_BACKLIGHT),
 		.driver_data = APPLE_MAGIC_BACKLIGHT },
 
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 4497b50799dbfa..a79fd45c7a2c11 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -464,7 +464,10 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
 
 	case HID_GLOBAL_ITEM_TAG_REPORT_SIZE:
 		parser->global.report_size = item_udata(item);
-		if (parser->global.report_size > 256) {
+		/* Arbitrary maximum. Some Apple devices have 16384 here.
+		 * This * HID_MAX_USAGES must fit in a signed integer.
+		 */
+		if (parser->global.report_size > 16384) {
 			hid_err(parser->device, "invalid report_size %d\n",
 					parser->global.report_size);
 			return -1;
@@ -2290,6 +2293,12 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
 	case BUS_I2C:
 		bus = "I2C";
 		break;
+	case BUS_SPI:
+		bus = "SPI";
+		break;
+	case BUS_HOST:
+		bus = "HOST";
+		break;
 	case BUS_VIRTUAL:
 		bus = "VIRTUAL";
 		break;
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 288a2b864cc41d..f83a543a831327 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -89,6 +89,8 @@
 
 #define USB_VENDOR_ID_APPLE		0x05ac
 #define BT_VENDOR_ID_APPLE		0x004c
+#define SPI_VENDOR_ID_APPLE		0x05ac
+#define HOST_VENDOR_ID_APPLE		0x05ac
 #define USB_DEVICE_ID_APPLE_MIGHTYMOUSE	0x0304
 #define USB_DEVICE_ID_APPLE_MAGICMOUSE	0x030d
 #define USB_DEVICE_ID_APPLE_MAGICMOUSE2	0x0269
@@ -189,6 +191,12 @@
 #define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2021   0x029f
 #define USB_DEVICE_ID_APPLE_TOUCHBAR_BACKLIGHT 0x8102
 #define USB_DEVICE_ID_APPLE_TOUCHBAR_DISPLAY 0x8302
+#define SPI_DEVICE_ID_APPLE_MACBOOK_AIR_2020	0x0281
+#define SPI_DEVICE_ID_APPLE_MACBOOK_PRO13_2020	0x0341
+#define SPI_DEVICE_ID_APPLE_MACBOOK_PRO14_2021	0x0342
+#define SPI_DEVICE_ID_APPLE_MACBOOK_PRO16_2021	0x0343
+#define HOST_DEVICE_ID_APPLE_MACBOOK_AIR13_2022	0x0351
+#define HOST_DEVICE_ID_APPLE_MACBOOK_PRO13_2022	0x0354
 
 #define USB_VENDOR_ID_ASETEK			0x2433
 #define USB_DEVICE_ID_ASETEK_INVICTA		0xf300
diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
index 04508c36bdc823..73c6a26638a22a 100644
--- a/drivers/hid/hid-lenovo.c
+++ b/drivers/hid/hid-lenovo.c
@@ -728,7 +728,7 @@ static int lenovo_raw_event_TP_X12_tab(struct hid_device *hdev, u32 raw_data)
 			if (hdev->product == USB_DEVICE_ID_LENOVO_X12_TAB) {
 				report_key_event(input, KEY_RFKILL);
 				return 1;
-			} else {
+			} else if (IS_ENABLED(CONFIG_ACPI_PLATFORM_PROFILE)) {
 				platform_profile_cycle();
 				return 1;
 			}
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index a76f171585399f..62955889ed90c0 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -60,8 +60,14 @@ MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state fie
 #define MOUSE_REPORT_ID    0x29
 #define MOUSE2_REPORT_ID   0x12
 #define DOUBLE_REPORT_ID   0xf7
+#define SPI_REPORT_ID      0x02
+#define SPI_RESET_REPORT_ID 0x60
+#define MTP_REPORT_ID      0x75
+#define SENSOR_DIMENSIONS_REPORT_ID 0xd9
 #define USB_BATTERY_TIMEOUT_MS 60000
 
+#define MAX_CONTACTS 16
+
 /* These definitions are not precise, but they're close enough.  (Bits
  * 0x03 seem to indicate the aspect ratio of the touch, bits 0x70 seem
  * to be some kind of bit mask -- 0x20 may be a near-field reading,
@@ -112,30 +118,53 @@ MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state fie
 #define TRACKPAD2_RES_Y \
 	((TRACKPAD2_MAX_Y - TRACKPAD2_MIN_Y) / (TRACKPAD2_DIMENSION_Y / 100))
 
+/* These are fallback values, since the real values will be queried from the device. */
+#define J314_TP_DIMENSION_X (float)13000
+#define J314_TP_MIN_X -5900
+#define J314_TP_MAX_X 6500
+#define J314_TP_RES_X \
+	((J314_TP_MAX_X - J314_TP_MIN_X) / (J314_TP_DIMENSION_X / 100))
+#define J314_TP_DIMENSION_Y (float)8100
+#define J314_TP_MIN_Y -200
+#define J314_TP_MAX_Y 7400
+#define J314_TP_RES_Y \
+	((J314_TP_MAX_Y - J314_TP_MIN_Y) / (J314_TP_DIMENSION_Y / 100))
+
+#define J314_TP_MAX_FINGER_ORIENTATION 16384
+
+struct magicmouse_input_ops {
+	int (*raw_event)(struct hid_device *hdev,
+		struct hid_report *report, u8 *data, int size);
+	int (*setup_input)(struct input_dev *input, struct hid_device *hdev);
+};
+
 /**
  * struct magicmouse_sc - Tracks Magic Mouse-specific data.
  * @input: Input device through which we report events.
  * @quirks: Currently unused.
+ * @query_dimensions: Whether to query and update dimensions on first open
  * @ntouches: Number of touches in most recent touch report.
  * @scroll_accel: Number of consecutive scroll motions.
  * @scroll_jiffies: Time of last scroll motion.
+ * @pos: multi touch position data of the last report.
  * @touches: Most recent data for a touch, indexed by tracking ID.
  * @tracking_ids: Mapping of current touch input data to @touches.
  * @hdev: Pointer to the underlying HID device.
  * @work: Workqueue to handle initialization retry for quirky devices.
  * @battery_timer: Timer for obtaining battery level information.
+ * @input_ops: Input ops based on device type.
  */
 struct magicmouse_sc {
 	struct input_dev *input;
 	unsigned long quirks;
+	bool query_dimensions;
 
 	int ntouches;
 	int scroll_accel;
 	unsigned long scroll_jiffies;
 
+	struct input_mt_pos pos[MAX_CONTACTS];
 	struct {
-		short x;
-		short y;
 		short scroll_x;
 		short scroll_y;
 		short scroll_x_hr;
@@ -143,14 +172,164 @@ struct magicmouse_sc {
 		u8 size;
 		bool scroll_x_active;
 		bool scroll_y_active;
-	} touches[16];
-	int tracking_ids[16];
+	} touches[MAX_CONTACTS];
+	int tracking_ids[MAX_CONTACTS];
 
 	struct hid_device *hdev;
 	struct delayed_work work;
 	struct timer_list battery_timer;
+	struct magicmouse_input_ops input_ops;
 };
 
+static inline int le16_to_int(__le16 x)
+{
+	return (signed short)le16_to_cpu(x);
+}
+
+static int magicmouse_enable_multitouch(struct hid_device *hdev)
+{
+	const u8 *feature;
+	const u8 feature_mt[] = { 0xD7, 0x01 };
+	const u8 feature_mt_mouse2[] = { 0xF1, 0x02, 0x01 };
+	const u8 feature_mt_trackpad2_usb[] = { 0x02, 0x01 };
+	const u8 feature_mt_trackpad2_bt[] = { 0xF1, 0x02, 0x01 };
+	u8 *buf;
+	int ret;
+	int feature_size;
+
+	if (hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
+	    hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) {
+		if (hdev->vendor == BT_VENDOR_ID_APPLE) {
+			feature_size = sizeof(feature_mt_trackpad2_bt);
+			feature = feature_mt_trackpad2_bt;
+		} else { /* USB_VENDOR_ID_APPLE */
+			feature_size = sizeof(feature_mt_trackpad2_usb);
+			feature = feature_mt_trackpad2_usb;
+		}
+	} else if (hdev->vendor == SPI_VENDOR_ID_APPLE) {
+		feature_size = sizeof(feature_mt_trackpad2_usb);
+		feature = feature_mt_trackpad2_usb;
+	} else if (hdev->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) {
+		feature_size = sizeof(feature_mt_mouse2);
+		feature = feature_mt_mouse2;
+	} else {
+		feature_size = sizeof(feature_mt);
+		feature = feature_mt;
+	}
+
+	buf = kmemdup(feature, feature_size, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	ret = hid_hw_raw_request(hdev, buf[0], buf, feature_size,
+				HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+	kfree(buf);
+	return ret;
+}
+
+static void magicmouse_enable_mt_work(struct work_struct *work)
+{
+	struct magicmouse_sc *msc =
+		container_of(work, struct magicmouse_sc, work.work);
+	int ret;
+
+	ret = magicmouse_enable_multitouch(msc->hdev);
+	if (ret < 0)
+		hid_err(msc->hdev, "unable to request touch data (%d)\n", ret);
+}
+
+static int magicmouse_open(struct input_dev *dev)
+{
+	struct hid_device *hdev = input_get_drvdata(dev);
+	struct magicmouse_sc *msc = hid_get_drvdata(hdev);
+	int ret;
+
+	ret = hid_hw_open(hdev);
+	if (ret)
+		return ret;
+
+	/*
+	 * Some devices repond with 'invalid report id' when feature
+	 * report switching it into multitouch mode is sent to it.
+	 *
+	 * This results in -EIO from the _raw low-level transport callback,
+	 * but there seems to be no other way of switching the mode.
+	 * Thus the super-ugly hacky success check below.
+	 *
+	 * MTP devices do not need this.
+	 */
+	if (hdev->bus != BUS_HOST) {
+		ret = magicmouse_enable_multitouch(hdev);
+		if (ret == -EIO && hdev->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) {
+			schedule_delayed_work(&msc->work, msecs_to_jiffies(500));
+			return 0;
+		}
+		if (ret < 0)
+			hid_err(hdev, "unable to request touch data (%d)\n", ret);
+	}
+	/*
+	 * MT enable is usually not required after the first time, so don't
+	 * consider it fatal.
+	 */
+
+	/*
+	 * For Apple Silicon trackpads, we want to query the dimensions on
+	 * device open. This is because doing so requires the firmware, but
+	 * we don't want to force a firmware load until the device is opened
+	 * for the first time. So do that here and update the input properties
+	 * just in time before userspace queries them.
+	 */
+	if (msc->query_dimensions) {
+		struct input_dev *input = msc->input;
+		u8 buf[32];
+		struct {
+			__le32 width;
+			__le32 height;
+			__le16 min_x;
+			__le16 min_y;
+			__le16 max_x;
+			__le16 max_y;
+		} dim;
+		uint32_t x_span, y_span;
+
+		ret = hid_hw_raw_request(hdev, SENSOR_DIMENSIONS_REPORT_ID, buf, sizeof(buf), HID_FEATURE_REPORT, HID_REQ_GET_REPORT);
+		if (ret < (int)(1 + sizeof(dim))) {
+			hid_err(hdev, "unable to request dimensions (%d)\n", ret);
+			return ret;
+		}
+
+		memcpy(&dim, buf + 1, sizeof(dim));
+
+		/* finger position */
+		input_set_abs_params(input, ABS_MT_POSITION_X,
+				     le16_to_int(dim.min_x), le16_to_int(dim.max_x), 0, 0);
+		/* Y axis is inverted */
+		input_set_abs_params(input, ABS_MT_POSITION_Y,
+				     -le16_to_int(dim.max_y), -le16_to_int(dim.min_y), 0, 0);
+		x_span = le16_to_int(dim.max_x) - le16_to_int(dim.min_x);
+		y_span = le16_to_int(dim.max_y) - le16_to_int(dim.min_y);
+
+		/* X/Y resolution */
+		input_abs_set_res(input, ABS_MT_POSITION_X, 100 * x_span / le32_to_cpu(dim.width) );
+		input_abs_set_res(input, ABS_MT_POSITION_Y, 100 * y_span / le32_to_cpu(dim.height) );
+
+		/* copy info, as input_mt_init_slots() does */
+		dev->absinfo[ABS_X] = dev->absinfo[ABS_MT_POSITION_X];
+		dev->absinfo[ABS_Y] = dev->absinfo[ABS_MT_POSITION_Y];
+
+		msc->query_dimensions = false;
+	}
+
+	return 0;
+}
+
+static void magicmouse_close(struct input_dev *dev)
+{
+	struct hid_device *hdev = input_get_drvdata(dev);
+
+	hid_hw_close(hdev);
+}
+
 static int magicmouse_firm_touch(struct magicmouse_sc *msc)
 {
 	int touch = -1;
@@ -192,7 +371,7 @@ static void magicmouse_emit_buttons(struct magicmouse_sc *msc, int state)
 		} else if (last_state != 0) {
 			state = last_state;
 		} else if ((id = magicmouse_firm_touch(msc)) >= 0) {
-			int x = msc->touches[id].x;
+			int x = msc->pos[id].x;
 			if (x < middle_button_start)
 				state = 1;
 			else if (x > middle_button_stop)
@@ -255,8 +434,8 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
 
 	/* Store tracking ID and other fields. */
 	msc->tracking_ids[raw_id] = id;
-	msc->touches[id].x = x;
-	msc->touches[id].y = y;
+	msc->pos[id].x = x;
+	msc->pos[id].y = y;
 	msc->touches[id].size = size;
 
 	/* If requested, emulate a scroll wheel by detecting small
@@ -385,6 +564,14 @@ static int magicmouse_raw_event(struct hid_device *hdev,
 		struct hid_report *report, u8 *data, int size)
 {
 	struct magicmouse_sc *msc = hid_get_drvdata(hdev);
+
+	return msc->input_ops.raw_event(hdev, report, data, size);
+}
+
+static int magicmouse_raw_event_usb(struct hid_device *hdev,
+		struct hid_report *report, u8 *data, int size)
+{
+	struct magicmouse_sc *msc = hid_get_drvdata(hdev);
 	struct input_dev *input = msc->input;
 	int x = 0, y = 0, ii, clicks = 0, npoints;
 
@@ -515,6 +702,177 @@ static int magicmouse_raw_event(struct hid_device *hdev,
 	return 1;
 }
 
+/**
+ * struct tp_finger - single trackpad finger structure, le16-aligned
+ *
+ * @unknown1:		unknown
+ * @unknown2:		unknown
+ * @abs_x:		absolute x coordinate
+ * @abs_y:		absolute y coordinate
+ * @rel_x:		relative x coordinate
+ * @rel_y:		relative y coordinate
+ * @tool_major:		tool area, major axis
+ * @tool_minor:		tool area, minor axis
+ * @orientation:	16384 when point, else 15 bit angle
+ * @touch_major:	touch area, major axis
+ * @touch_minor:	touch area, minor axis
+ * @unused:		zeros
+ * @pressure:		pressure on forcetouch touchpad
+ * @multi:		one finger: varies, more fingers: constant
+ * @crc16:		on last finger: crc over the whole message struct
+ *			(i.e. message header + this struct) minus the last
+ *			@crc16 field; unknown on all other fingers.
+ */
+struct tp_finger {
+	__le16 unknown1;
+	__le16 unknown2;
+	__le16 abs_x;
+	__le16 abs_y;
+	__le16 rel_x;
+	__le16 rel_y;
+	__le16 tool_major;
+	__le16 tool_minor;
+	__le16 orientation;
+	__le16 touch_major;
+	__le16 touch_minor;
+	__le16 unused[2];
+	__le16 pressure;
+	__le16 multi;
+} __attribute__((packed, aligned(2)));
+
+/**
+ * vendor trackpad report
+ *
+ * @num_fingers:	the number of fingers being reported in @fingers
+ * @buttons:		same as HID buttons
+ */
+struct tp_header {
+	// HID vendor part, up to 1751 bytes
+	u8 unknown[22];
+	u8 num_fingers;
+	u8 buttons;
+	u8 unknown3[14];
+};
+
+/**
+ * standard HID mouse report
+ *
+ * @report_id:		reportid
+ * @buttons:		HID Usage Buttons 3 1-bit reports
+ */
+struct tp_mouse_report {
+	// HID mouse report
+	u8 report_id;
+	u8 buttons;
+	u8 rel_x;
+	u8 rel_y;
+	u8 padding[4];
+};
+
+static void report_finger_data(struct input_dev *input, int slot,
+			       const struct input_mt_pos *pos,
+			       const struct tp_finger *f)
+{
+	input_mt_slot(input, slot);
+	input_mt_report_slot_state(input, MT_TOOL_FINGER, true);
+
+	input_report_abs(input, ABS_MT_TOUCH_MAJOR,
+			 le16_to_int(f->touch_major) << 1);
+	input_report_abs(input, ABS_MT_TOUCH_MINOR,
+			 le16_to_int(f->touch_minor) << 1);
+	input_report_abs(input, ABS_MT_WIDTH_MAJOR,
+			 le16_to_int(f->tool_major) << 1);
+	input_report_abs(input, ABS_MT_WIDTH_MINOR,
+			 le16_to_int(f->tool_minor) << 1);
+	input_report_abs(input, ABS_MT_ORIENTATION,
+			 J314_TP_MAX_FINGER_ORIENTATION - le16_to_int(f->orientation));
+	input_report_abs(input, ABS_MT_PRESSURE, le16_to_int(f->pressure));
+	input_report_abs(input, ABS_MT_POSITION_X, pos->x);
+	input_report_abs(input, ABS_MT_POSITION_Y, pos->y);
+}
+
+static int magicmouse_raw_event_mtp(struct hid_device *hdev,
+		struct hid_report *report, u8 *data, int size)
+{
+	struct magicmouse_sc *msc = hid_get_drvdata(hdev);
+	struct input_dev *input = msc->input;
+	struct tp_header *tp_hdr;
+	struct tp_finger *f;
+	int i, n;
+	u32 npoints;
+	const size_t hdr_sz = sizeof(struct tp_header);
+	const size_t touch_sz = sizeof(struct tp_finger);
+	u8 map_contacs[MAX_CONTACTS];
+
+	// hid_warn(hdev, "%s\n", __func__);
+	// print_hex_dump_debug("appleft ev: ", DUMP_PREFIX_OFFSET, 16, 1, data,
+	// 		     size, false);
+
+	/* Expect 46 bytes of prefix, and N * 30 bytes of touch data. */
+	if (size < hdr_sz || ((size - hdr_sz) % touch_sz) != 0)
+		return 0;
+
+	tp_hdr = (struct tp_header *)data;
+
+	npoints = (size - hdr_sz) / touch_sz;
+	if (npoints < tp_hdr->num_fingers || npoints > MAX_CONTACTS) {
+		hid_warn(hdev,
+			 "unexpected number of touches (%u) for "
+			 "report\n",
+			 npoints);
+		return 0;
+	}
+
+	n = 0;
+	for (i = 0; i < tp_hdr->num_fingers; i++) {
+		f = (struct tp_finger *)(data + hdr_sz + i * touch_sz);
+		if (le16_to_int(f->touch_major) == 0)
+			continue;
+
+		hid_dbg(hdev, "ev x:%04x y:%04x\n", le16_to_int(f->abs_x),
+			le16_to_int(f->abs_y));
+		msc->pos[n].x = le16_to_int(f->abs_x);
+		msc->pos[n].y = -le16_to_int(f->abs_y);
+		map_contacs[n] = i;
+		n++;
+	}
+
+	input_mt_assign_slots(input, msc->tracking_ids, msc->pos, n, 0);
+
+	for (i = 0; i < n; i++) {
+		int idx = map_contacs[i];
+		f = (struct tp_finger *)(data + hdr_sz + idx * touch_sz);
+		report_finger_data(input, msc->tracking_ids[i], &msc->pos[i], f);
+	}
+
+	input_mt_sync_frame(input);
+	input_report_key(input, BTN_MOUSE, tp_hdr->buttons & 1);
+
+	input_sync(input);
+	return 1;
+}
+
+static int magicmouse_raw_event_spi(struct hid_device *hdev,
+		struct hid_report *report, u8 *data, int size)
+{
+	struct magicmouse_sc *msc = hid_get_drvdata(hdev);
+	const size_t hdr_sz = sizeof(struct tp_mouse_report);
+
+	if (!size)
+		return 0;
+
+	if (data[0] == SPI_RESET_REPORT_ID) {
+		hid_info(hdev, "Touch controller was reset, re-enabling touch mode\n");
+		schedule_delayed_work(&msc->work, msecs_to_jiffies(10));
+		return 1;
+	}
+
+	if (data[0] != TRACKPAD2_USB_REPORT_ID || size < hdr_sz)
+		return 0;
+
+	return magicmouse_raw_event_mtp(hdev, report, data + hdr_sz, size - hdr_sz);
+}
+
 static int magicmouse_event(struct hid_device *hdev, struct hid_field *field,
 		struct hid_usage *usage, __s32 value)
 {
@@ -532,7 +890,17 @@ static int magicmouse_event(struct hid_device *hdev, struct hid_field *field,
 	return 0;
 }
 
-static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hdev)
+
+static int magicmouse_setup_input(struct input_dev *input,
+				  struct hid_device *hdev)
+{
+	struct magicmouse_sc *msc = hid_get_drvdata(hdev);
+
+	return msc->input_ops.setup_input(input, hdev);
+}
+
+static int magicmouse_setup_input_usb(struct input_dev *input,
+				      struct hid_device *hdev)
 {
 	int error;
 	int mt_flags = 0;
@@ -610,7 +978,7 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
 
 	__set_bit(EV_ABS, input->evbit);
 
-	error = input_mt_init_slots(input, 16, mt_flags);
+	error = input_mt_init_slots(input, MAX_CONTACTS, mt_flags);
 	if (error)
 		return error;
 	input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 255 << 2,
@@ -689,6 +1057,109 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
 	 */
 	__clear_bit(EV_REP, input->evbit);
 
+	/*
+	 * This isn't strictly speaking needed for USB, but enabling MT on
+	 * device open is probably more robust than only doing it once on probe
+	 * even if USB devices are not known to suffer from the SPI reset issue.
+	 */
+	input->open = magicmouse_open;
+	input->close = magicmouse_close;
+	return 0;
+}
+
+static int magicmouse_setup_input_mtp(struct input_dev *input,
+				      struct hid_device *hdev)
+{
+	int error;
+	int mt_flags = 0;
+	struct magicmouse_sc *msc = hid_get_drvdata(hdev);
+
+	__set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
+	__clear_bit(BTN_0, input->keybit);
+	__clear_bit(BTN_RIGHT, input->keybit);
+	__clear_bit(BTN_MIDDLE, input->keybit);
+	__clear_bit(EV_REL, input->evbit);
+	__clear_bit(REL_X, input->relbit);
+	__clear_bit(REL_Y, input->relbit);
+
+	mt_flags = INPUT_MT_POINTER | INPUT_MT_DROP_UNUSED | INPUT_MT_TRACK;
+
+	/* finger touch area */
+	input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 5000, 0, 0);
+	input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 5000, 0, 0);
+
+	/* finger approach area */
+	input_set_abs_params(input, ABS_MT_WIDTH_MAJOR, 0, 5000, 0, 0);
+	input_set_abs_params(input, ABS_MT_WIDTH_MINOR, 0, 5000, 0, 0);
+
+	/* Note: Touch Y position from the device is inverted relative
+	 * to how pointer motion is reported (and relative to how USB
+	 * HID recommends the coordinates work).  This driver keeps
+	 * the origin at the same position, and just uses the additive
+	 * inverse of the reported Y.
+	 */
+
+	input_set_abs_params(input, ABS_MT_PRESSURE, 0, 6000, 0, 0);
+
+	/*
+	 * This makes libinput recognize this as a PressurePad and
+	 * stop trying to use pressure for touch size. Pressure unit
+	 * seems to be ~grams on these touchpads.
+	 */
+	input_abs_set_res(input, ABS_MT_PRESSURE, 1);
+
+	/* finger orientation */
+	input_set_abs_params(input, ABS_MT_ORIENTATION, -J314_TP_MAX_FINGER_ORIENTATION,
+			     J314_TP_MAX_FINGER_ORIENTATION, 0, 0);
+
+	/* finger position */
+	input_set_abs_params(input, ABS_MT_POSITION_X, J314_TP_MIN_X, J314_TP_MAX_X,
+			     0, 0);
+	/* Y axis is inverted */
+	input_set_abs_params(input, ABS_MT_POSITION_Y, -J314_TP_MAX_Y, -J314_TP_MIN_Y,
+			     0, 0);
+
+	/* X/Y resolution */
+	input_abs_set_res(input, ABS_MT_POSITION_X, J314_TP_RES_X);
+	input_abs_set_res(input, ABS_MT_POSITION_Y, J314_TP_RES_Y);
+
+	input_set_events_per_packet(input, 60);
+
+	/* touchpad button */
+	input_set_capability(input, EV_KEY, BTN_MOUSE);
+
+	/*
+	 * hid-input may mark device as using autorepeat, but the trackpad does
+	 * not actually want it.
+	 */
+	__clear_bit(EV_REP, input->evbit);
+
+	error = input_mt_init_slots(input, MAX_CONTACTS, mt_flags);
+	if (error)
+		return error;
+
+	/*
+	 * Override the default input->open function to send the MT
+	 * enable every time the device is opened. This ensures it works
+	 * even if we missed a reset event due to the device being closed.
+	 * input->close is overridden for symmetry.
+	 *
+	 * This also takes care of the dimensions query.
+	 */
+	input->open = magicmouse_open;
+	input->close = magicmouse_close;
+	msc->query_dimensions = true;
+
+	return 0;
+}
+
+static int magicmouse_setup_input_spi(struct input_dev *input,
+				      struct hid_device *hdev)
+{
+	int ret = magicmouse_setup_input_mtp(input, hdev);
+	if (ret)
+		return ret;
+
 	return 0;
 }
 
@@ -730,55 +1201,6 @@ static int magicmouse_input_configured(struct hid_device *hdev,
 	return 0;
 }
 
-static int magicmouse_enable_multitouch(struct hid_device *hdev)
-{
-	const u8 *feature;
-	const u8 feature_mt[] = { 0xD7, 0x01 };
-	const u8 feature_mt_mouse2[] = { 0xF1, 0x02, 0x01 };
-	const u8 feature_mt_trackpad2_usb[] = { 0x02, 0x01 };
-	const u8 feature_mt_trackpad2_bt[] = { 0xF1, 0x02, 0x01 };
-	u8 *buf;
-	int ret;
-	int feature_size;
-
-	if (hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
-	    hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) {
-		if (hdev->vendor == BT_VENDOR_ID_APPLE) {
-			feature_size = sizeof(feature_mt_trackpad2_bt);
-			feature = feature_mt_trackpad2_bt;
-		} else { /* USB_VENDOR_ID_APPLE */
-			feature_size = sizeof(feature_mt_trackpad2_usb);
-			feature = feature_mt_trackpad2_usb;
-		}
-	} else if (hdev->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) {
-		feature_size = sizeof(feature_mt_mouse2);
-		feature = feature_mt_mouse2;
-	} else {
-		feature_size = sizeof(feature_mt);
-		feature = feature_mt;
-	}
-
-	buf = kmemdup(feature, feature_size, GFP_KERNEL);
-	if (!buf)
-		return -ENOMEM;
-
-	ret = hid_hw_raw_request(hdev, buf[0], buf, feature_size,
-				HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
-	kfree(buf);
-	return ret;
-}
-
-static void magicmouse_enable_mt_work(struct work_struct *work)
-{
-	struct magicmouse_sc *msc =
-		container_of(work, struct magicmouse_sc, work.work);
-	int ret;
-
-	ret = magicmouse_enable_multitouch(msc->hdev);
-	if (ret < 0)
-		hid_err(msc->hdev, "unable to request touch data (%d)\n", ret);
-}
-
 static int magicmouse_fetch_battery(struct hid_device *hdev)
 {
 #ifdef CONFIG_HID_BATTERY_STRENGTH
@@ -825,12 +1247,30 @@ static int magicmouse_probe(struct hid_device *hdev,
 	struct hid_report *report;
 	int ret;
 
+	if ((id->bus == BUS_SPI || id->bus == BUS_HOST) && id->vendor == SPI_VENDOR_ID_APPLE &&
+	    hdev->type != HID_TYPE_SPI_MOUSE)
+		return -ENODEV;
+
 	msc = devm_kzalloc(&hdev->dev, sizeof(*msc), GFP_KERNEL);
 	if (msc == NULL) {
 		hid_err(hdev, "can't alloc magicmouse descriptor\n");
 		return -ENOMEM;
 	}
 
+	// internal trackpad use a data format use input ops to avoid
+	// conflicts with the report ID.
+	if (id->bus == BUS_HOST) {
+		msc->input_ops.raw_event = magicmouse_raw_event_mtp;
+		msc->input_ops.setup_input = magicmouse_setup_input_mtp;
+	} else if (id->bus == BUS_SPI) {
+		msc->input_ops.raw_event = magicmouse_raw_event_spi;
+		msc->input_ops.setup_input = magicmouse_setup_input_spi;
+
+	} else {
+		msc->input_ops.raw_event = magicmouse_raw_event_usb;
+		msc->input_ops.setup_input = magicmouse_setup_input_usb;
+	}
+
 	msc->scroll_accel = SCROLL_ACCEL_DEFAULT;
 	msc->hdev = hdev;
 	INIT_DEFERRABLE_WORK(&msc->work, magicmouse_enable_mt_work);
@@ -882,6 +1322,10 @@ static int magicmouse_probe(struct hid_device *hdev,
 		else /* USB_VENDOR_ID_APPLE */
 			report = hid_register_report(hdev, HID_INPUT_REPORT,
 				TRACKPAD2_USB_REPORT_ID, 0);
+	} else if (id->bus == BUS_SPI) {
+		report = hid_register_report(hdev, HID_INPUT_REPORT, SPI_REPORT_ID, 0);
+	} else if (id->bus == BUS_HOST) {
+		report = hid_register_report(hdev, HID_INPUT_REPORT, MTP_REPORT_ID, 0);
 	} else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
 		report = hid_register_report(hdev, HID_INPUT_REPORT,
 			TRACKPAD_REPORT_ID, 0);
@@ -896,21 +1340,14 @@ static int magicmouse_probe(struct hid_device *hdev,
 	}
 	report->size = 6;
 
-	/*
-	 * Some devices repond with 'invalid report id' when feature
-	 * report switching it into multitouch mode is sent to it.
-	 *
-	 * This results in -EIO from the _raw low-level transport callback,
-	 * but there seems to be no other way of switching the mode.
-	 * Thus the super-ugly hacky success check below.
-	 */
-	ret = magicmouse_enable_multitouch(hdev);
-	if (ret != -EIO && ret < 0) {
-		hid_err(hdev, "unable to request touch data (%d)\n", ret);
-		goto err_stop_hw;
-	}
-	if (ret == -EIO && id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) {
-		schedule_delayed_work(&msc->work, msecs_to_jiffies(500));
+	/* MTP devices do not need the MT enable, this is handled by the MTP driver */
+	if (id->bus == BUS_HOST)
+		return 0;
+
+	/* SPI devices need to watch for reset events to re-send the MT enable */
+	if (id->bus == BUS_SPI) {
+		report = hid_register_report(hdev, HID_INPUT_REPORT, SPI_RESET_REPORT_ID, 0);
+		report->size = 2;
 	}
 
 	return 0;
@@ -981,10 +1418,24 @@ static const struct hid_device_id magic_mice[] = {
 		USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC), .driver_data = 0 },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE,
 		USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC), .driver_data = 0 },
+	{ HID_SPI_DEVICE(SPI_VENDOR_ID_APPLE, HID_ANY_ID),
+	  .driver_data = 0 },
+	{ HID_DEVICE(BUS_HOST, HID_GROUP_ANY, HOST_VENDOR_ID_APPLE,
+                     HID_ANY_ID), .driver_data = 0 },
 	{ }
 };
 MODULE_DEVICE_TABLE(hid, magic_mice);
 
+#ifdef CONFIG_PM
+static int magicmouse_reset_resume(struct hid_device *hdev)
+{
+	if (hdev->bus == BUS_SPI)
+		return magicmouse_enable_multitouch(hdev);
+
+	return 0;
+}
+#endif
+
 static struct hid_driver magicmouse_driver = {
 	.name = "magicmouse",
 	.id_table = magic_mice,
@@ -995,6 +1446,10 @@ static struct hid_driver magicmouse_driver = {
 	.event = magicmouse_event,
 	.input_mapping = magicmouse_input_mapping,
 	.input_configured = magicmouse_input_configured,
+#ifdef CONFIG_PM
+        .reset_resume = magicmouse_reset_resume,
+#endif
+
 };
 module_hid_driver(magicmouse_driver);
 
diff --git a/drivers/hid/spi-hid/Kconfig b/drivers/hid/spi-hid/Kconfig
new file mode 100644
index 00000000000000..8e37f0fec28ac9
--- /dev/null
+++ b/drivers/hid/spi-hid/Kconfig
@@ -0,0 +1,26 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menu "SPI HID support"
+	depends on SPI
+
+config SPI_HID_APPLE_OF
+	tristate "HID over SPI transport layer for Apple Silicon SoCs"
+	default ARCH_APPLE
+	depends on SPI && INPUT && OF
+	help
+	  Say Y here if you use Apple Silicon based laptop. The keyboard and
+	  touchpad are HID based devices connected via SPI.
+
+	  If unsure, say N.
+
+	  This support is also available as a module.  If so, the module
+	  will be called spi-hid-apple-of. It will also build/depend on the
+	  module spi-hid-apple.
+
+endmenu
+
+config SPI_HID_APPLE_CORE
+	tristate
+	default y if SPI_HID_APPLE_OF=y
+	default m if SPI_HID_APPLE_OF=m
+	select HID
+	select CRC16
diff --git a/drivers/hid/spi-hid/Makefile b/drivers/hid/spi-hid/Makefile
new file mode 100644
index 00000000000000..f276ee12cb94fc
--- /dev/null
+++ b/drivers/hid/spi-hid/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for SPI HID tarnsport drivers
+#
+
+obj-$(CONFIG_SPI_HID_APPLE_CORE)		+= spi-hid-apple.o
+
+spi-hid-apple-objs				=  spi-hid-apple-core.o
+
+obj-$(CONFIG_SPI_HID_APPLE_OF)			+= spi-hid-apple-of.o
diff --git a/drivers/hid/spi-hid/spi-hid-apple-core.c b/drivers/hid/spi-hid/spi-hid-apple-core.c
new file mode 100644
index 00000000000000..1f8fa64d6d8627
--- /dev/null
+++ b/drivers/hid/spi-hid/spi-hid-apple-core.c
@@ -0,0 +1,1194 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Apple SPI HID transport driver
+ *
+ * Copyright (C) The Asahi Linux Contributors
+ *
+ * Based on: drivers/input/applespi.c
+ *
+ * MacBook (Pro) SPI keyboard and touchpad driver
+ *
+ * Copyright (c) 2015-2018 Federico Lorenzi
+ * Copyright (c) 2017-2018 Ronald Tschalär
+ *
+ */
+
+//#define DEBUG 2
+
+#include <linux/crc16.h>
+#include <linux/delay.h>
+#include <linux/device/driver.h>
+#include <linux/hid.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/printk.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/unaligned.h>
+#include <linux/wait.h>
+
+#include "spi-hid-apple.h"
+
+#define SPIHID_DEF_WAIT msecs_to_jiffies(1000)
+
+#define SPIHID_MAX_INPUT_REPORT_SIZE 0x800
+
+/* support only keyboard, trackpad and management dev for now */
+#define SPIHID_MAX_DEVICES 3
+
+#define SPIHID_DEVICE_ID_MNGT 0x0
+#define SPIHID_DEVICE_ID_KBD 0x1
+#define SPIHID_DEVICE_ID_TP 0x2
+#define SPIHID_DEVICE_ID_INFO 0xd0
+
+#define SPIHID_READ_PACKET 0x20
+#define SPIHID_WRITE_PACKET 0x40
+
+#define SPIHID_DESC_MAX 512
+
+#define SPIHID_SET_LEDS 0x0151 /* caps lock */
+
+#define SPI_RW_CHG_DELAY_US 200 /* 'Inter Stage Us'? */
+
+static const u8 spi_hid_apple_booted[4] = { 0xa0, 0x80, 0x00, 0x00 };
+static const u8 spi_hid_apple_status_ok[4] = { 0xac, 0x27, 0x68, 0xd5 };
+
+struct spihid_interface {
+	struct hid_device *hid;
+	u8 *hid_desc;
+	u32 hid_desc_len;
+	u32 id;
+	unsigned country;
+	u32 max_control_report_len;
+	u32 max_input_report_len;
+	u32 max_output_report_len;
+	u8 name[32];
+	u8 reply_buf[SPIHID_DESC_MAX];
+	u32 reply_len;
+	bool ready;
+};
+
+struct spihid_input_report {
+	u8 *buf;
+	u32 length;
+	u32 offset;
+	u8 device;
+	u8 flags;
+};
+
+struct spihid_apple {
+	struct spi_device *spidev;
+
+	struct spihid_apple_ops *ops;
+
+	struct spihid_interface mngt;
+	struct spihid_interface kbd;
+	struct spihid_interface tp;
+
+	wait_queue_head_t wait;
+	struct mutex tx_lock; //< protects against concurrent SPI writes
+
+	struct spi_message rx_msg;
+	struct spi_message tx_msg;
+	struct spi_transfer rx_transfer;
+	struct spi_transfer tx_transfer;
+	struct spi_transfer status_transfer;
+
+	u8 *rx_buf;
+	u8 *tx_buf;
+	u8 *status_buf;
+
+	u8 vendor[32];
+	u8 product[64];
+	u8 serial[32];
+
+	u32 num_devices;
+
+	u32 vendor_id;
+	u32 product_id;
+	u32 version_number;
+
+	u8 msg_id;
+
+	/* fragmented HID report */
+	struct spihid_input_report report;
+
+	/* state tracking flags */
+	bool status_booted;
+
+#ifdef IRQ_WAKE_SUPPORT
+	bool irq_wake_enabled;
+#endif
+};
+
+/**
+ * struct spihid_msg_hdr - common header of protocol messages.
+ *
+ * Each message begins with fixed header, followed by a message-type specific
+ * payload, and ends with a 16-bit crc. Because of the varying lengths of the
+ * payload, the crc is defined at the end of each payload struct, rather than
+ * in this struct.
+ *
+ * @unknown0:	request type? output, input (0x10), feature, protocol
+ * @unknown1:	maybe report id?
+ * @unknown2:	mostly zero, in info request maybe device num
+ * @msgid:	incremented on each message, rolls over after 255; there is a
+ *		separate counter for each message type.
+ * @rsplen:	response length (the exact nature of this field is quite
+ *		speculative). On a request/write this is often the same as
+ *		@length, though in some cases it has been seen to be much larger
+ *		(e.g. 0x400); on a response/read this the same as on the
+ *		request; for reads that are not responses it is 0.
+ * @length:	length of the remainder of the data in the whole message
+ *		structure (after re-assembly in case of being split over
+ *		multiple spi-packets), minus the trailing crc. The total size
+ *		of a message is therefore @length + 10.
+ */
+
+struct spihid_msg_hdr {
+	u8 unknown0;
+	u8 unknown1;
+	u8 unknown2;
+	u8 id;
+	__le16 rsplen;
+	__le16 length;
+};
+
+/**
+ * struct spihid_transfer_packet - a complete spi packet; always 256 bytes. This carries
+ * the (parts of the) message in the data. But note that this does not
+ * necessarily contain a complete message, as in some cases (e.g. many
+ * fingers pressed) the message is split over multiple packets (see the
+ * @offset, @remain, and @length fields). In general the data parts in
+ * spihid_transfer_packet's are concatenated until @remaining is 0, and the
+ * result is an message.
+ *
+ * @flags:	0x40 = write (to device), 0x20 = read (from device); note that
+ *		the response to a write still has 0x40.
+ * @device:	1 = keyboard, 2 = touchpad
+ * @offset:	specifies the offset of this packet's data in the complete
+ *		message; i.e. > 0 indicates this is a continuation packet (in
+ *		the second packet for a message split over multiple packets
+ *		this would then be the same as the @length in the first packet)
+ * @remain:	number of message bytes remaining in subsequents packets (in
+ *		the first packet of a message split over two packets this would
+ *		then be the same as the @length in the second packet)
+ * @length:	length of the valid data in the @data in this packet
+ * @data:	all or part of a message
+ * @crc16:	crc over this whole structure minus this @crc16 field. This
+ *		covers just this packet, even on multi-packet messages (in
+ *		contrast to the crc in the message).
+ */
+struct spihid_transfer_packet {
+	u8 flags;
+	u8 device;
+	__le16 offset;
+	__le16 remain;
+	__le16 length;
+	u8 data[246];
+	__le16 crc16;
+};
+
+/*
+ * how HID is mapped onto the protocol is not fully clear. This are the known
+ * reports/request:
+ *
+ *			pkt.flags	pkt.dev?	msg.u0	msg.u1	msg.u2
+ * info			0x40		0xd0		0x20	0x01	0xd0
+ *
+ * info mngt:		0x40		0xd0		0x20	0x10	0x00
+ * info kbd:		0x40		0xd0		0x20	0x10	0x01
+ * info tp:		0x40		0xd0		0x20	0x10	0x02
+ *
+ * desc kbd:		0x40		0xd0		0x20	0x10	0x01
+ * desc trackpad:	0x40		0xd0		0x20	0x10	0x02
+ *
+ * mt mode:		0x40		0x02		0x52	0x02	0x00	set protocol?
+ * capslock led		0x40		0x01		0x51	0x01	0x00	output report
+ *
+ * report kbd:		0x20		0x01		0x10	0x01	0x00	input report
+ * report tp:		0x20		0x02		0x10	0x02	0x00	input report
+ *
+ */
+
+
+static int spihid_apple_request(struct spihid_apple *spihid, u8 target, u8 unk0,
+				u8 unk1, u8 unk2, u16 resp_len, u8 *buf,
+				    size_t len)
+{
+	struct spihid_transfer_packet *pkt;
+	struct spihid_msg_hdr *hdr;
+	u16 crc;
+	int err;
+
+	/* know reports are small enoug to fit in a single packet */
+	if (len > sizeof(pkt->data) - sizeof(*hdr) - sizeof(__le16))
+		return -EINVAL;
+
+	err = mutex_lock_interruptible(&spihid->tx_lock);
+	if (err < 0)
+		return err;
+
+	pkt = (struct spihid_transfer_packet *)spihid->tx_buf;
+
+	memset(pkt, 0, sizeof(*pkt));
+	pkt->flags = SPIHID_WRITE_PACKET;
+	pkt->device = target;
+	pkt->length = cpu_to_le16(sizeof(*hdr) + len + sizeof(__le16));
+
+	hdr = (struct spihid_msg_hdr *)&pkt->data[0];
+	hdr->unknown0 = unk0;
+	hdr->unknown1 = unk1;
+	hdr->unknown2 = unk2;
+	hdr->id = spihid->msg_id++;
+	hdr->rsplen = cpu_to_le16(resp_len);
+	hdr->length = cpu_to_le16(len);
+
+	if (len)
+		memcpy(pkt->data + sizeof(*hdr), buf, len);
+	crc = crc16(0, &pkt->data[0], sizeof(*hdr) + len);
+	put_unaligned_le16(crc, pkt->data + sizeof(*hdr) + len);
+
+	pkt->crc16 = cpu_to_le16(crc16(0, spihid->tx_buf,
+				 offsetof(struct spihid_transfer_packet, crc16)));
+
+	memset(spihid->status_buf, 0, sizeof(spi_hid_apple_status_ok));
+
+	err = spi_sync(spihid->spidev, &spihid->tx_msg);
+
+	if (memcmp(spihid->status_buf, spi_hid_apple_status_ok,
+		   sizeof(spi_hid_apple_status_ok))) {
+		u8 *b = spihid->status_buf;
+		dev_warn_ratelimited(&spihid->spidev->dev, "status message "
+				     "mismatch: %02x %02x %02x %02x\n",
+				     b[0], b[1], b[2], b[3]);
+	}
+	mutex_unlock(&spihid->tx_lock);
+	if (err < 0)
+		return err;
+
+	return (int)len;
+}
+
+static struct spihid_apple *spihid_get_data(struct spihid_interface *idev)
+{
+	switch (idev->id) {
+	case SPIHID_DEVICE_ID_KBD:
+		return container_of(idev, struct spihid_apple, kbd);
+	case SPIHID_DEVICE_ID_TP:
+		return container_of(idev, struct spihid_apple, tp);
+	default:
+		return NULL;
+	}
+}
+
+static int apple_ll_start(struct hid_device *hdev)
+{
+	/* no-op SPI transport is already setup */
+	return 0;
+};
+
+static void apple_ll_stop(struct hid_device *hdev)
+{
+	/* no-op, devices will be desstroyed on driver destruction */
+}
+
+static int apple_ll_open(struct hid_device *hdev)
+{
+	struct spihid_apple *spihid;
+	struct spihid_interface *idev = hdev->driver_data;
+
+	if (idev->hid_desc_len == 0) {
+		spihid = spihid_get_data(idev);
+		dev_warn(&spihid->spidev->dev,
+			 "HID descriptor missing for dev %u", idev->id);
+	} else
+		idev->ready = true;
+
+	return 0;
+}
+
+static void apple_ll_close(struct hid_device *hdev)
+{
+	struct spihid_interface *idev = hdev->driver_data;
+	idev->ready = false;
+}
+
+static int apple_ll_parse(struct hid_device *hdev)
+{
+	struct spihid_interface *idev = hdev->driver_data;
+
+	return hid_parse_report(hdev, idev->hid_desc, idev->hid_desc_len);
+}
+
+static int apple_ll_raw_request(struct hid_device *hdev,
+				unsigned char reportnum, __u8 *buf, size_t len,
+				unsigned char rtype, int reqtype)
+{
+	struct spihid_interface *idev = hdev->driver_data;
+	struct spihid_apple *spihid = spihid_get_data(idev);
+	int ret;
+
+	dev_dbg(&spihid->spidev->dev,
+		"apple_ll_raw_request: device:%u reportnum:%hhu rtype:%hhu",
+		idev->id, reportnum, rtype);
+
+	switch (reqtype) {
+	case HID_REQ_GET_REPORT:
+		if (rtype != HID_FEATURE_REPORT)
+			return -EINVAL;
+
+		idev->reply_len = 0;
+		ret = spihid_apple_request(spihid, idev->id, 0x32, reportnum, 0x00, len, NULL, 0);
+		if (ret < 0)
+			return ret;
+
+		ret = wait_event_interruptible_timeout(spihid->wait, idev->reply_len,
+						       SPIHID_DEF_WAIT);
+		if (ret == 0)
+			ret = -ETIMEDOUT;
+		if (ret < 0) {
+			dev_err(&spihid->spidev->dev, "waiting for get report failed: %d", ret);
+			return ret;
+		}
+		memcpy(buf, idev->reply_buf, max_t(size_t, len, idev->reply_len));
+		return idev->reply_len;
+
+	case HID_REQ_SET_REPORT:
+		if (buf[0] != reportnum)
+			return -EINVAL;
+		if (reportnum != idev->id) {
+			dev_warn(&spihid->spidev->dev,
+				 "device:%u reportnum:"
+				 "%hhu mismatch",
+				 idev->id, reportnum);
+			return -EINVAL;
+		}
+		return spihid_apple_request(spihid, idev->id, 0x52, reportnum, 0x00, 2, buf, len);
+	default:
+		return -EIO;
+	}
+}
+
+static int apple_ll_output_report(struct hid_device *hdev, __u8 *buf,
+				  size_t len)
+{
+	struct spihid_interface *idev = hdev->driver_data;
+	struct spihid_apple *spihid = spihid_get_data(idev);
+	if (!spihid)
+		return -1;
+
+	dev_dbg(&spihid->spidev->dev,
+		"apple_ll_output_report: device:%u len:%zu:",
+		idev->id, len);
+	// second idev->id should maybe be buf[0]?
+	return spihid_apple_request(spihid, idev->id, 0x51, idev->id, 0x00, 0, buf, len);
+}
+
+static struct hid_ll_driver apple_hid_ll = {
+	.start = &apple_ll_start,
+	.stop = &apple_ll_stop,
+	.open = &apple_ll_open,
+	.close = &apple_ll_close,
+	.parse = &apple_ll_parse,
+	.raw_request = &apple_ll_raw_request,
+	.output_report = &apple_ll_output_report,
+	.max_buffer_size = SPIHID_MAX_INPUT_REPORT_SIZE,
+};
+
+static struct spihid_interface *spihid_get_iface(struct spihid_apple *spihid,
+						 u32 iface)
+{
+	switch (iface) {
+	case SPIHID_DEVICE_ID_MNGT:
+		return &spihid->mngt;
+	case SPIHID_DEVICE_ID_KBD:
+		return &spihid->kbd;
+	case SPIHID_DEVICE_ID_TP:
+		return &spihid->tp;
+	default:
+		return NULL;
+	}
+}
+
+static int spihid_verify_msg(struct spihid_apple *spihid, u8 *buf, size_t len)
+{
+	u16 msg_crc, crc;
+	struct device *dev = &spihid->spidev->dev;
+
+	crc = crc16(0, buf, len - sizeof(__le16));
+	msg_crc = get_unaligned_le16(buf + len - sizeof(__le16));
+	if (crc != msg_crc) {
+		dev_warn_ratelimited(dev, "Read message crc mismatch\n");
+		return 0;
+	}
+	return 1;
+}
+
+static bool spihid_status_report(struct spihid_apple *spihid, u8 *pl,
+				 size_t len)
+{
+	struct device *dev = &spihid->spidev->dev;
+	dev_dbg(dev, "%s: len: %zu", __func__, len);
+	if (len == 5 && pl[0] == 0xe0)
+		return true;
+
+	return false;
+}
+
+static bool spihid_process_input_report(struct spihid_apple *spihid, u32 device,
+					struct spihid_msg_hdr *hdr, u8 *payload,
+					size_t len)
+{
+	//dev_dbg(&spihid>spidev->dev, "input report: req:%hx iface:%u ", hdr->unknown0, device);
+	if (hdr->unknown0 != 0x10)
+		return false;
+
+	/* HID device as well but Vendor usage only, handle it internally for now */
+	if (device == 0) {
+		if (hdr->unknown1 == 0xe0) {
+			return spihid_status_report(spihid, payload, len);
+		}
+	} else if (device < SPIHID_MAX_DEVICES) {
+		struct spihid_interface *iface =
+			spihid_get_iface(spihid, device);
+		if (iface && iface->hid && iface->ready) {
+			hid_input_report(iface->hid, HID_INPUT_REPORT, payload,
+					 len, 1);
+			return true;
+		}
+	} else
+		dev_dbg(&spihid->spidev->dev,
+			"unexpected iface:%u for input report", device);
+
+	return false;
+}
+
+struct spihid_device_info {
+	__le16 u0[2];
+	__le16 num_devices;
+	__le16 vendor_id;
+	__le16 product_id;
+	__le16 version_number;
+	__le16 vendor_str[2]; //< offset and string length
+	__le16 product_str[2]; //< offset and string length
+	__le16 serial_str[2]; //< offset and string length
+};
+
+static bool spihid_process_device_info(struct spihid_apple *spihid, u32 iface,
+				       u8 *payload, size_t len)
+{
+	struct device *dev = &spihid->spidev->dev;
+
+	if (iface != SPIHID_DEVICE_ID_INFO)
+		return false;
+
+	if (spihid->vendor_id == 0 &&
+	    len >= sizeof(struct spihid_device_info)) {
+		struct spihid_device_info *info =
+			(struct spihid_device_info *)payload;
+		u16 voff, vlen, poff, plen, soff, slen;
+		u32 num_devices;
+
+		num_devices = __le16_to_cpu(info->num_devices);
+
+		if (num_devices < SPIHID_MAX_DEVICES) {
+			dev_err(dev,
+				"Device info reports %u devices, expecting at least 3",
+				num_devices);
+			return false;
+		}
+		spihid->num_devices = num_devices;
+
+		if (spihid->num_devices > SPIHID_MAX_DEVICES) {
+			dev_info(
+				dev,
+				"limiting the number of devices to mngt, kbd and mouse");
+			spihid->num_devices = SPIHID_MAX_DEVICES;
+		}
+
+		spihid->vendor_id = __le16_to_cpu(info->vendor_id);
+		spihid->product_id = __le16_to_cpu(info->product_id);
+		spihid->version_number = __le16_to_cpu(info->version_number);
+
+		voff = __le16_to_cpu(info->vendor_str[0]);
+		vlen = __le16_to_cpu(info->vendor_str[1]);
+
+		if (voff < len && vlen <= len - voff &&
+		    vlen < sizeof(spihid->vendor)) {
+			memcpy(spihid->vendor, payload + voff, vlen);
+			spihid->vendor[vlen] = '\0';
+		}
+
+		poff = __le16_to_cpu(info->product_str[0]);
+		plen = __le16_to_cpu(info->product_str[1]);
+
+		if (poff < len && plen <= len - poff &&
+		    plen < sizeof(spihid->product)) {
+			memcpy(spihid->product, payload + poff, plen);
+			spihid->product[plen] = '\0';
+		}
+
+		soff = __le16_to_cpu(info->serial_str[0]);
+		slen = __le16_to_cpu(info->serial_str[1]);
+
+		if (soff < len && slen <= len - soff &&
+		    slen < sizeof(spihid->serial)) {
+			memcpy(spihid->vendor, payload + soff, slen);
+			spihid->serial[slen] = '\0';
+		}
+
+		wake_up_interruptible(&spihid->wait);
+	}
+	return true;
+}
+
+struct spihid_iface_info {
+	u8 u_0;
+	u8 interface_num;
+	u8 u_2;
+	u8 u_3;
+	u8 u_4;
+	u8 country_code;
+	__le16 max_input_report_len;
+	__le16 max_output_report_len;
+	__le16 max_control_report_len;
+	__le16 name_offset;
+	__le16 name_length;
+};
+
+static bool spihid_process_iface_info(struct spihid_apple *spihid, u32 num,
+				      u8 *payload, size_t len)
+{
+	struct spihid_iface_info *info;
+	struct spihid_interface *iface = spihid_get_iface(spihid, num);
+	u32 name_off, name_len;
+
+	if (!iface)
+		return false;
+
+	if (!iface->max_input_report_len) {
+		if (len < sizeof(*info))
+			return false;
+
+		info = (struct spihid_iface_info *)payload;
+
+		iface->max_input_report_len =
+			le16_to_cpu(info->max_input_report_len);
+		iface->max_output_report_len =
+			le16_to_cpu(info->max_output_report_len);
+		iface->max_control_report_len =
+			le16_to_cpu(info->max_control_report_len);
+		iface->country = info->country_code;
+
+		name_off = le16_to_cpu(info->name_offset);
+		name_len = le16_to_cpu(info->name_length);
+
+		if (name_off < len && name_len <= len - name_off &&
+		    name_len < sizeof(iface->name)) {
+			memcpy(iface->name, payload + name_off, name_len);
+			iface->name[name_len] = '\0';
+		}
+
+		dev_dbg(&spihid->spidev->dev, "Info for %s, country code: 0x%x",
+			iface->name, iface->country);
+
+		wake_up_interruptible(&spihid->wait);
+	}
+
+	return true;
+}
+
+static int spihid_register_hid_device(struct spihid_apple *spihid,
+				      struct spihid_interface *idev, u8 device);
+
+static bool spihid_process_iface_hid_report_desc(struct spihid_apple *spihid,
+						 u32 num, u8 *payload,
+						 size_t len)
+{
+	struct spihid_interface *iface = spihid_get_iface(spihid, num);
+
+	if (!iface)
+		return false;
+
+	if (iface->hid_desc_len == 0) {
+		if (len > SPIHID_DESC_MAX)
+			return false;
+		memcpy(iface->hid_desc, payload, len);
+		iface->hid_desc_len = len;
+
+		/* do not register the mngt iface as HID device */
+		if (num > 0)
+			spihid_register_hid_device(spihid, iface, num);
+
+		wake_up_interruptible(&spihid->wait);
+	}
+	return true;
+}
+
+static bool spihid_process_iface_get_report(struct spihid_apple *spihid,
+					    u32 device, u8 report,
+					    u8 *payload, size_t len)
+{
+	struct spihid_interface *iface = spihid_get_iface(spihid, device);
+
+	if (!iface)
+		return false;
+
+	if (len > sizeof(iface->reply_buf) || len < 1)
+		return false;
+
+	memcpy(iface->reply_buf, payload, len);
+	iface->reply_len = len;
+
+	wake_up_interruptible(&spihid->wait);
+
+	return true;
+}
+
+static bool spihid_process_response(struct spihid_apple *spihid, u32 device,
+				    struct spihid_msg_hdr *hdr, u8 *payload,
+				    size_t len)
+{
+	if (hdr->unknown0 == 0x20) {
+		switch (hdr->unknown1) {
+		case 0x01:
+			return spihid_process_device_info(spihid, hdr->unknown2,
+							  payload, len);
+		case 0x02:
+			return spihid_process_iface_info(spihid, hdr->unknown2,
+							 payload, len);
+		case 0x10:
+			return spihid_process_iface_hid_report_desc(
+				spihid, hdr->unknown2, payload, len);
+		default:
+			break;
+		}
+	}
+
+	if (hdr->unknown0 == 0x32) {
+		return spihid_process_iface_get_report(spihid, device, hdr->unknown1, payload, len);
+	}
+
+	return false;
+}
+
+static void spihid_process_message(struct spihid_apple *spihid, u8 *data,
+				   size_t length, u8 device, u8 flags)
+{
+	struct device *dev = &spihid->spidev->dev;
+	struct spihid_msg_hdr *hdr;
+	bool handled = false;
+	size_t payload_len;
+	u8 *payload;
+
+	if (!spihid_verify_msg(spihid, data, length))
+		return;
+
+	hdr = (struct spihid_msg_hdr *)data;
+	payload_len = le16_to_cpu(hdr->length);
+
+	if (payload_len == 0 ||
+		(payload_len + sizeof(struct spihid_msg_hdr) + 2) > length)
+		return;
+
+	payload = data + sizeof(struct spihid_msg_hdr);
+
+	switch (flags) {
+	case SPIHID_READ_PACKET:
+		handled = spihid_process_input_report(spihid, device, hdr,
+						      payload, payload_len);
+		break;
+	case SPIHID_WRITE_PACKET:
+		handled = spihid_process_response(spihid, device, hdr, payload,
+						  payload_len);
+		break;
+	default:
+		break;
+	}
+
+#if defined(DEBUG) && DEBUG > 1
+	{
+		dev_dbg(dev,
+			"R msg: req:%02hhx rep:%02hhx dev:%02hhx id:%hu len:%hu\n",
+			hdr->unknown0, hdr->unknown1, hdr->unknown2, hdr->id,
+			hdr->length);
+		print_hex_dump_debug("spihid msg: ", DUMP_PREFIX_OFFSET, 16, 1,
+				     payload, le16_to_cpu(hdr->length), true);
+	}
+#else
+	if (!handled) {
+		dev_dbg(dev,
+			"R unhandled msg: req:%02hhx rep:%02hhx dev:%02hhx id:%hu len:%hu\n",
+			hdr->unknown0, hdr->unknown1, hdr->unknown2, hdr->id,
+			hdr->length);
+		print_hex_dump_debug("spihid msg: ", DUMP_PREFIX_OFFSET, 16, 1,
+				     payload, le16_to_cpu(hdr->length), true);
+	}
+#endif
+}
+
+static void spihid_assemble_message(struct spihid_apple *spihid,
+				    struct spihid_transfer_packet *pkt)
+{
+	size_t length, offset, remain;
+	struct device *dev = &spihid->spidev->dev;
+	struct spihid_input_report *rep = &spihid->report;
+
+	length = le16_to_cpu(pkt->length);
+	remain = le16_to_cpu(pkt->remain);
+	offset = le16_to_cpu(pkt->offset);
+
+	if (offset + length + remain > U16_MAX) {
+		return;
+	}
+
+	if (pkt->device != rep->device || pkt->flags != rep->flags ||
+	    offset != rep->offset) {
+		rep->device = 0;
+		rep->flags = 0;
+		rep->offset = 0;
+		rep->length = 0;
+	}
+
+	if (offset == 0) {
+		if (rep->offset != 0) {
+			dev_warn(dev, "incomplete report off:%u len:%u",
+				 rep->offset, rep->length);
+		}
+		memcpy(rep->buf, pkt->data, length);
+		rep->offset = length;
+		rep->length = length + remain;
+		rep->device = pkt->device;
+		rep->flags = pkt->flags;
+	} else if (offset == rep->offset) {
+		if (offset + length + remain != rep->length) {
+			dev_warn(dev, "incomplete report off:%u len:%u",
+				 rep->offset, rep->length);
+			return;
+		}
+		memcpy(rep->buf + offset, pkt->data, length);
+		rep->offset += length;
+
+		if (rep->offset == rep->length) {
+			spihid_process_message(spihid, rep->buf, rep->length,
+					       rep->device, rep->flags);
+			rep->device = 0;
+			rep->flags = 0;
+			rep->offset = 0;
+			rep->length = 0;
+		}
+	}
+}
+
+static void spihid_process_read(struct spihid_apple *spihid)
+{
+	u16 crc;
+	size_t length;
+	struct device *dev = &spihid->spidev->dev;
+	struct spihid_transfer_packet *pkt;
+
+	pkt = (struct spihid_transfer_packet *)spihid->rx_buf;
+
+	/* check transfer packet crc */
+	crc = crc16(0, spihid->rx_buf,
+		    offsetof(struct spihid_transfer_packet, crc16));
+	if (crc != le16_to_cpu(pkt->crc16)) {
+		dev_warn_ratelimited(dev, "Read package crc mismatch\n");
+		return;
+	}
+
+	length = le16_to_cpu(pkt->length);
+
+	if (length < sizeof(struct spihid_msg_hdr) + 2) {
+		if (length == sizeof(spi_hid_apple_booted) &&
+		    !memcmp(pkt->data, spi_hid_apple_booted, length)) {
+			if (!spihid->status_booted) {
+				spihid->status_booted = true;
+				wake_up_interruptible(&spihid->wait);
+			}
+		} else {
+			dev_info(dev, "R short packet: len:%zu\n", length);
+			print_hex_dump(KERN_INFO, "spihid pkt:",
+				       DUMP_PREFIX_OFFSET, 16, 1, pkt->data,
+				       length, false);
+		}
+		return;
+	}
+
+#if defined(DEBUG) && DEBUG > 1
+	dev_dbg(dev,
+		"R pkt: flags:%02hhx dev:%02hhx off:%hu remain:%hu, len:%zu\n",
+		pkt->flags, pkt->device, pkt->offset, pkt->remain, length);
+#if defined(DEBUG) && DEBUG > 2
+	print_hex_dump_debug("spihid pkt: ", DUMP_PREFIX_OFFSET, 16, 1,
+			     spihid->rx_buf,
+			     sizeof(struct spihid_transfer_packet), true);
+#endif
+#endif
+
+	if (length > sizeof(pkt->data)) {
+		dev_warn_ratelimited(dev, "Invalid pkt len:%zu", length);
+		return;
+	}
+
+	/* short message */
+	if (pkt->offset == 0 && pkt->remain == 0) {
+		spihid_process_message(spihid, pkt->data, length, pkt->device,
+				       pkt->flags);
+	} else {
+		spihid_assemble_message(spihid, pkt);
+	}
+}
+
+static void spihid_read_packet_sync(struct spihid_apple *spihid)
+{
+	int err;
+
+	err = spi_sync(spihid->spidev, &spihid->rx_msg);
+	if (!err) {
+		spihid_process_read(spihid);
+	} else {
+		dev_warn(&spihid->spidev->dev, "RX failed: %d\n", err);
+	}
+}
+
+irqreturn_t spihid_apple_core_irq(int irq, void *data)
+{
+	struct spi_device *spi = data;
+	struct spihid_apple *spihid = spi_get_drvdata(spi);
+
+	spihid_read_packet_sync(spihid);
+
+	return IRQ_HANDLED;
+}
+EXPORT_SYMBOL_GPL(spihid_apple_core_irq);
+
+static void spihid_apple_setup_spi_msgs(struct spihid_apple *spihid)
+{
+	memset(&spihid->rx_transfer, 0, sizeof(spihid->rx_transfer));
+
+	spihid->rx_transfer.rx_buf = spihid->rx_buf;
+	spihid->rx_transfer.len = sizeof(struct spihid_transfer_packet);
+
+	spi_message_init(&spihid->rx_msg);
+	spi_message_add_tail(&spihid->rx_transfer, &spihid->rx_msg);
+
+	memset(&spihid->tx_transfer, 0, sizeof(spihid->rx_transfer));
+	memset(&spihid->status_transfer, 0, sizeof(spihid->status_transfer));
+
+	spihid->tx_transfer.tx_buf = spihid->tx_buf;
+	spihid->tx_transfer.len = sizeof(struct spihid_transfer_packet);
+	spihid->tx_transfer.delay.unit = SPI_DELAY_UNIT_USECS;
+	spihid->tx_transfer.delay.value = SPI_RW_CHG_DELAY_US;
+
+	spihid->status_transfer.rx_buf = spihid->status_buf;
+	spihid->status_transfer.len = sizeof(spi_hid_apple_status_ok);
+
+	spi_message_init(&spihid->tx_msg);
+	spi_message_add_tail(&spihid->tx_transfer, &spihid->tx_msg);
+	spi_message_add_tail(&spihid->status_transfer, &spihid->tx_msg);
+}
+
+static int spihid_apple_setup_spi(struct spihid_apple *spihid)
+{
+	spihid_apple_setup_spi_msgs(spihid);
+
+	return spihid->ops->power_on(spihid->ops);
+}
+
+static int spihid_register_hid_device(struct spihid_apple *spihid,
+				      struct spihid_interface *iface, u8 device)
+{
+	int ret;
+	char *suffix;
+	struct hid_device *hid;
+
+	iface->id = device;
+
+	hid = hid_allocate_device();
+	if (IS_ERR(hid))
+		return PTR_ERR(hid);
+
+	/*
+	 * Use 'Apple SPI Keyboard' and 'Apple SPI Trackpad' as input device
+	 * names. The device names need to be distinct since at least Kwin uses
+	 * the tripple Vendor ID, Product ID, Name to identify devices.
+	 */
+	snprintf(hid->name, sizeof(hid->name), "Apple SPI %s", iface->name);
+	// strip ' / Boot' suffix from the name
+	suffix = strstr(hid->name, " / Boot");
+	if (suffix)
+		suffix[0] = '\0';
+	snprintf(hid->phys, sizeof(hid->phys), "%s (%hhx)",
+		 dev_name(&spihid->spidev->dev), device);
+	strscpy(hid->uniq, spihid->serial, sizeof(hid->uniq));
+
+	hid->ll_driver = &apple_hid_ll;
+	hid->bus = BUS_SPI;
+	hid->vendor = spihid->vendor_id;
+	hid->product = spihid->product_id;
+	hid->version = spihid->version_number;
+
+	if (device == SPIHID_DEVICE_ID_KBD)
+		hid->type = HID_TYPE_SPI_KEYBOARD;
+	else if (device == SPIHID_DEVICE_ID_TP)
+		hid->type = HID_TYPE_SPI_MOUSE;
+
+	hid->country = iface->country;
+	hid->dev.parent = &spihid->spidev->dev;
+	hid->driver_data = iface;
+
+	ret = hid_add_device(hid);
+	if (ret < 0) {
+		hid_destroy_device(hid);
+		dev_warn(&spihid->spidev->dev,
+			 "Failed to register hid device %hhu", device);
+		return ret;
+	}
+
+	iface->hid = hid;
+
+	return 0;
+}
+
+static void spihid_destroy_hid_device(struct spihid_interface *iface)
+{
+	if (iface->hid) {
+		hid_destroy_device(iface->hid);
+		iface->hid = NULL;
+	}
+	iface->ready = false;
+}
+
+int spihid_apple_core_probe(struct spi_device *spi, struct spihid_apple_ops *ops)
+{
+	struct device *dev = &spi->dev;
+	struct spihid_apple *spihid;
+	int err, i;
+
+	if (!ops || !ops->power_on || !ops->power_off || !ops->enable_irq || !ops->disable_irq)
+		return -EINVAL;
+
+	spihid = devm_kzalloc(dev, sizeof(*spihid), GFP_KERNEL);
+	if (!spihid)
+		return -ENOMEM;
+
+	spihid->ops = ops;
+	spihid->spidev = spi;
+
+	// init spi
+	spi_set_drvdata(spi, spihid);
+
+	/*
+	 * allocate SPI buffers
+	 * Overallocate the receice buffer since it passed directly into
+	 * hid_input_report / hid_report_raw_event. The later expects the buffer
+	 * to be HID_MAX_BUFFER_SIZE (16k) or hid_ll_driver.max_buffer_size if
+	 * set.
+	 */
+	spihid->rx_buf = devm_kmalloc(
+		&spi->dev, SPIHID_MAX_INPUT_REPORT_SIZE, GFP_KERNEL);
+	spihid->tx_buf = devm_kmalloc(
+		&spi->dev, sizeof(struct spihid_transfer_packet), GFP_KERNEL);
+	spihid->status_buf = devm_kmalloc(
+		&spi->dev, sizeof(spi_hid_apple_status_ok), GFP_KERNEL);
+
+	if (!spihid->rx_buf || !spihid->tx_buf || !spihid->status_buf)
+		return -ENOMEM;
+
+	spihid->report.buf =
+		devm_kmalloc(dev, SPIHID_MAX_INPUT_REPORT_SIZE, GFP_KERNEL);
+
+	spihid->kbd.hid_desc = devm_kmalloc(dev, SPIHID_DESC_MAX, GFP_KERNEL);
+	spihid->tp.hid_desc = devm_kmalloc(dev, SPIHID_DESC_MAX, GFP_KERNEL);
+
+	if (!spihid->report.buf || !spihid->kbd.hid_desc ||
+	    !spihid->tp.hid_desc)
+		return -ENOMEM;
+
+	init_waitqueue_head(&spihid->wait);
+
+	mutex_init(&spihid->tx_lock);
+
+	/* Init spi transfer buffers and power device on */
+	err = spihid_apple_setup_spi(spihid);
+	if (err < 0)
+		goto error;
+
+	/* enable HID irq */
+	spihid->ops->enable_irq(spihid->ops);
+
+	// wait for boot message
+	err = wait_event_interruptible_timeout(spihid->wait,
+					       spihid->status_booted,
+					       msecs_to_jiffies(1000));
+	if (err == 0)
+		err = -ENODEV;
+	if (err < 0) {
+		dev_err(dev, "waiting for device boot failed: %d", err);
+		goto error;
+	}
+
+	/* request device information */
+	dev_dbg(dev, "request device info");
+	spihid_apple_request(spihid, 0xd0, 0x20, 0x01, 0xd0, 0, NULL, 0);
+	err = wait_event_interruptible_timeout(spihid->wait, spihid->vendor_id,
+					       SPIHID_DEF_WAIT);
+	if (err == 0)
+		err = -ENODEV;
+	if (err < 0) {
+		dev_err(dev, "waiting for device info failed: %d", err);
+		goto error;
+	}
+
+	/* request interface information */
+	for (i = 0; i < spihid->num_devices; i++) {
+		struct spihid_interface *iface = spihid_get_iface(spihid, i);
+		if (!iface)
+			continue;
+		dev_dbg(dev, "request interface info 0x%02x", i);
+		spihid_apple_request(spihid, 0xd0, 0x20, 0x02, i,
+				     SPIHID_DESC_MAX, NULL, 0);
+		err = wait_event_interruptible_timeout(
+			spihid->wait, iface->max_input_report_len,
+			SPIHID_DEF_WAIT);
+	}
+
+	/* request HID report descriptors */
+	for (i = 1; i < spihid->num_devices; i++) {
+		struct spihid_interface *iface = spihid_get_iface(spihid, i);
+		if (!iface)
+			continue;
+		dev_dbg(dev, "request hid report desc 0x%02x", i);
+		spihid_apple_request(spihid, 0xd0, 0x20, 0x10, i,
+				     SPIHID_DESC_MAX, NULL, 0);
+		wait_event_interruptible_timeout(
+			spihid->wait, iface->hid_desc_len, SPIHID_DEF_WAIT);
+	}
+
+	return 0;
+error:
+	return err;
+}
+EXPORT_SYMBOL_GPL(spihid_apple_core_probe);
+
+void spihid_apple_core_remove(struct spi_device *spi)
+{
+	struct spihid_apple *spihid = spi_get_drvdata(spi);
+
+	/* destroy input devices */
+
+	spihid_destroy_hid_device(&spihid->tp);
+	spihid_destroy_hid_device(&spihid->kbd);
+
+	/* disable irq */
+	spihid->ops->disable_irq(spihid->ops);
+
+	/* power SPI device down */
+	spihid->ops->power_off(spihid->ops);
+}
+EXPORT_SYMBOL_GPL(spihid_apple_core_remove);
+
+void spihid_apple_core_shutdown(struct spi_device *spi)
+{
+	struct spihid_apple *spihid = spi_get_drvdata(spi);
+
+	/* disable irq */
+	spihid->ops->disable_irq(spihid->ops);
+
+	/* power SPI device down */
+	spihid->ops->power_off(spihid->ops);
+}
+EXPORT_SYMBOL_GPL(spihid_apple_core_shutdown);
+
+#ifdef CONFIG_PM_SLEEP
+static int spihid_apple_core_suspend(struct device *dev)
+{
+	int ret;
+#ifdef IRQ_WAKE_SUPPORT
+	int wake_status;
+#endif
+	struct spihid_apple *spihid = spi_get_drvdata(to_spi_device(dev));
+
+	if (spihid->tp.hid) {
+		ret = hid_driver_suspend(spihid->tp.hid, PMSG_SUSPEND);
+		if (ret < 0)
+			return ret;
+	}
+
+	if (spihid->kbd.hid) {
+		ret = hid_driver_suspend(spihid->kbd.hid, PMSG_SUSPEND);
+		if (ret < 0) {
+			if (spihid->tp.hid)
+				hid_driver_resume(spihid->tp.hid);
+			return ret;
+		}
+	}
+
+	/* Save some power */
+	spihid->ops->disable_irq(spihid->ops);
+
+#ifdef IRQ_WAKE_SUPPORT
+	if (device_may_wakeup(dev)) {
+		wake_status = spihid->ops->enable_irq_wake(spihid->ops);
+		if (!wake_status)
+			spihid->irq_wake_enabled = true;
+		else
+			dev_warn(dev, "Failed to enable irq wake: %d\n",
+				wake_status);
+	} else {
+		spihid->ops->power_off(spihid->ops);
+	}
+#else
+	spihid->ops->power_off(spihid->ops);
+#endif
+
+	return 0;
+}
+
+static int spihid_apple_core_resume(struct device *dev)
+{
+	int ret_tp = 0, ret_kbd = 0;
+	struct spihid_apple *spihid = spi_get_drvdata(to_spi_device(dev));
+#ifdef IRQ_WAKE_SUPPORT
+	int wake_status;
+
+	if (!device_may_wakeup(dev)) {
+		spihid->ops->power_on(spihid->ops);
+	} else if (spihid->irq_wake_enabled) {
+		wake_status = spihid->ops->disable_irq_wake(spihid->ops);
+		if (!wake_status)
+			spihid->irq_wake_enabled = false;
+		else
+			dev_warn(dev, "Failed to disable irq wake: %d\n",
+				wake_status);
+	}
+#endif
+
+	spihid->ops->enable_irq(spihid->ops);
+	spihid->ops->power_on(spihid->ops);
+
+	if (spihid->tp.hid)
+		ret_tp = hid_driver_reset_resume(spihid->tp.hid);
+	if (spihid->kbd.hid)
+		ret_kbd = hid_driver_reset_resume(spihid->kbd.hid);
+
+	if (ret_tp < 0)
+		return ret_tp;
+
+	return ret_kbd;
+}
+#endif
+
+const struct dev_pm_ops spihid_apple_core_pm = {
+	SET_SYSTEM_SLEEP_PM_OPS(spihid_apple_core_suspend,
+				spihid_apple_core_resume)
+};
+EXPORT_SYMBOL_GPL(spihid_apple_core_pm);
+
+MODULE_DESCRIPTION("Apple SPI HID transport driver");
+MODULE_AUTHOR("Janne Grunau <j@jannau.net>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/spi-hid/spi-hid-apple-of.c b/drivers/hid/spi-hid/spi-hid-apple-of.c
new file mode 100644
index 00000000000000..3f87b299351dfd
--- /dev/null
+++ b/drivers/hid/spi-hid/spi-hid-apple-of.c
@@ -0,0 +1,151 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Apple SPI HID transport driver - Open Firmware
+ *
+ * Copyright (C) The Asahi Linux Contributors
+ */
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+
+#include "spi-hid-apple.h"
+
+
+struct spihid_apple_of {
+	struct spihid_apple_ops ops;
+
+	struct gpio_desc *enable_gpio;
+	int irq;
+};
+
+static int spihid_apple_of_power_on(struct spihid_apple_ops *ops)
+{
+	struct spihid_apple_of *sh_of = container_of(ops, struct spihid_apple_of, ops);
+
+	/* reset the controller on boot */
+	gpiod_direction_output(sh_of->enable_gpio, 1);
+	msleep(5);
+	gpiod_direction_output(sh_of->enable_gpio, 0);
+	msleep(5);
+	/* turn SPI device on */
+	gpiod_direction_output(sh_of->enable_gpio, 1);
+	msleep(50);
+
+	return 0;
+}
+
+static int spihid_apple_of_power_off(struct spihid_apple_ops *ops)
+{
+	struct spihid_apple_of *sh_of = container_of(ops, struct spihid_apple_of, ops);
+
+	/* turn SPI device off */
+	gpiod_direction_output(sh_of->enable_gpio, 0);
+
+	return 0;
+}
+
+static int spihid_apple_of_enable_irq(struct spihid_apple_ops *ops)
+{
+	struct spihid_apple_of *sh_of = container_of(ops, struct spihid_apple_of, ops);
+
+	enable_irq(sh_of->irq);
+
+	return 0;
+}
+
+static int spihid_apple_of_disable_irq(struct spihid_apple_ops *ops)
+{
+	struct spihid_apple_of *sh_of = container_of(ops, struct spihid_apple_of, ops);
+
+	disable_irq(sh_of->irq);
+
+	return 0;
+}
+
+static int spihid_apple_of_enable_irq_wake(struct spihid_apple_ops *ops)
+{
+	struct spihid_apple_of *sh_of = container_of(ops, struct spihid_apple_of, ops);
+
+	return enable_irq_wake(sh_of->irq);
+}
+
+static int spihid_apple_of_disable_irq_wake(struct spihid_apple_ops *ops)
+{
+	struct spihid_apple_of *sh_of = container_of(ops, struct spihid_apple_of, ops);
+
+	return disable_irq_wake(sh_of->irq);
+}
+
+static int spihid_apple_of_probe(struct spi_device *spi)
+{
+	struct device *dev = &spi->dev;
+	struct spihid_apple_of *spihid_of;
+	int err;
+
+	spihid_of = devm_kzalloc(dev, sizeof(*spihid_of), GFP_KERNEL);
+	if (!spihid_of)
+		return -ENOMEM;
+
+	spihid_of->ops.power_on = spihid_apple_of_power_on;
+	spihid_of->ops.power_off = spihid_apple_of_power_off;
+	spihid_of->ops.enable_irq = spihid_apple_of_enable_irq;
+	spihid_of->ops.disable_irq = spihid_apple_of_disable_irq;
+	spihid_of->ops.enable_irq_wake = spihid_apple_of_enable_irq_wake;
+	spihid_of->ops.disable_irq_wake = spihid_apple_of_disable_irq_wake;
+
+	spihid_of->enable_gpio = devm_gpiod_get_index(dev, "spien", 0, 0);
+	if (IS_ERR(spihid_of->enable_gpio)) {
+		err = PTR_ERR(spihid_of->enable_gpio);
+		dev_err(dev, "failed to get 'spien' gpio pin: %d", err);
+		return err;
+	}
+
+	spihid_of->irq = of_irq_get(dev->of_node, 0);
+	if (spihid_of->irq < 0) {
+		err = spihid_of->irq;
+		dev_err(dev, "failed to get 'extended-irq': %d", err);
+		return err;
+	}
+	err = devm_request_threaded_irq(dev, spihid_of->irq, NULL,
+					spihid_apple_core_irq, IRQF_ONESHOT | IRQF_NO_AUTOEN,
+					"spi-hid-apple-irq", spi);
+	if (err < 0) {
+		dev_err(dev, "failed to request extended-irq %d: %d",
+			spihid_of->irq, err);
+		return err;
+	}
+
+	return spihid_apple_core_probe(spi, &spihid_of->ops);
+}
+
+static const struct of_device_id spihid_apple_of_match[] = {
+	{ .compatible = "apple,spi-hid-transport" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, spihid_apple_of_match);
+
+static struct spi_device_id spihid_apple_of_id[] = {
+	{ "spi-hid-transport", 0 },
+	{}
+};
+MODULE_DEVICE_TABLE(spi, spihid_apple_of_id);
+
+static struct spi_driver spihid_apple_of_driver = {
+	.driver = {
+		.name	= "spi-hid-apple-of",
+		.pm	= &spihid_apple_core_pm,
+		.of_match_table = of_match_ptr(spihid_apple_of_match),
+	},
+
+	.id_table	= spihid_apple_of_id,
+	.probe		= spihid_apple_of_probe,
+	.remove		= spihid_apple_core_remove,
+	.shutdown	= spihid_apple_core_shutdown,
+};
+
+module_spi_driver(spihid_apple_of_driver);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/spi-hid/spi-hid-apple.h b/drivers/hid/spi-hid/spi-hid-apple.h
new file mode 100644
index 00000000000000..9abecd1ba78028
--- /dev/null
+++ b/drivers/hid/spi-hid/spi-hid-apple.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+
+#ifndef SPI_HID_APPLE_H
+#define SPI_HID_APPLE_H
+
+#include <linux/interrupt.h>
+#include <linux/spi/spi.h>
+
+/**
+ * struct spihid_apple_ops - Ops to control the device from the core driver.
+ *
+ * @power_on: reset and power the device on.
+ * @power_off: power the device off.
+ * @enable_irq: enable irq or ACPI gpe.
+ * @disable_irq: disable irq or ACPI gpe.
+ */
+
+struct spihid_apple_ops {
+    int (*power_on)(struct spihid_apple_ops *ops);
+    int (*power_off)(struct spihid_apple_ops *ops);
+    int (*enable_irq)(struct spihid_apple_ops *ops);
+    int (*disable_irq)(struct spihid_apple_ops *ops);
+    int (*enable_irq_wake)(struct spihid_apple_ops *ops);
+    int (*disable_irq_wake)(struct spihid_apple_ops *ops);
+};
+
+irqreturn_t spihid_apple_core_irq(int irq, void *data);
+
+int spihid_apple_core_probe(struct spi_device *spi, struct spihid_apple_ops *ops);
+void spihid_apple_core_remove(struct spi_device *spi);
+void spihid_apple_core_shutdown(struct spi_device *spi);
+
+extern const struct dev_pm_ops spihid_apple_core_pm;
+
+#endif /* SPI_HID_APPLE_H */
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 4cbaba15d86ef4..c82e8ea880a39e 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1580,6 +1580,19 @@ config SENSORS_LM95245
 	  This driver can also be built as a module. If so, the module
 	  will be called lm95245.
 
+config SENSORS_MACSMC
+	tristate "Apple SMC (Apple Silicon)"
+	depends on APPLE_SMC && OF
+	depends on ARCH_APPLE && ARM64
+	help
+	  This driver exposes the temperature, voltage, current, power, and fan
+	  sensors present on Apple Silicon devices, such as the M-series Macs.
+
+	  Say Y here if you have an Apple Silicon device.
+
+	  This driver can also be built as a module. If so, the module will be called
+	  macsmc_hwmon.
+
 config SENSORS_PC87360
 	tristate "National Semiconductor PC87360 family"
 	depends on HAS_IOPORT
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index b7ef0f0562d37e..d84c336a0b177b 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -143,6 +143,7 @@ obj-$(CONFIG_SENSORS_LTC4260)	+= ltc4260.o
 obj-$(CONFIG_SENSORS_LTC4261)	+= ltc4261.o
 obj-$(CONFIG_SENSORS_LTC4282)	+= ltc4282.o
 obj-$(CONFIG_SENSORS_LTQ_CPUTEMP) += ltq-cputemp.o
+obj-$(CONFIG_SENSORS_MACSMC) += macsmc-hwmon.o
 obj-$(CONFIG_SENSORS_MAX1111)	+= max1111.o
 obj-$(CONFIG_SENSORS_MAX127)	+= max127.o
 obj-$(CONFIG_SENSORS_MAX16065)	+= max16065.o
diff --git a/drivers/hwmon/macsmc-hwmon.c b/drivers/hwmon/macsmc-hwmon.c
new file mode 100644
index 00000000000000..53f0264d88d079
--- /dev/null
+++ b/drivers/hwmon/macsmc-hwmon.c
@@ -0,0 +1,719 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple SMC hwmon driver for Apple Silicon platforms
+ *
+ * The System Management Controller on Apple Silicon devices is responsible for
+ * measuring data from sensors across the SoC and machine. These include power,
+ * temperature, voltage and current sensors. Some "sensors" actually expose
+ * derived values. An example of this is the key PHPC, which is an estimate
+ * of the heat energy being dissipated by the SoC.
+ *
+ * While each SoC only has one SMC variant, each platform exposes a different
+ * set of sensors. For example, M1 MacBooks expose battery telemetry sensors
+ * which are not present on the M1 Mac mini. For this reason, the available
+ * sensors for a given platform are described in the device tree in a child
+ * node of the SMC device. We must walk this list of available sensors and
+ * populate the required hwmon data structures at runtime.
+ *
+ * Originally based on a prototype by Jean-Francois Bortolotti <jeff@borto.fr>
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/mfd/macsmc.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#define MAX_LABEL_LENGTH 32
+#define NUM_SENSOR_TYPES 5 /* temp, volt, current, power, fan */
+
+static bool melt_my_mac;
+module_param_unsafe(melt_my_mac, bool, 0644);
+MODULE_PARM_DESC(melt_my_mac, "Override the SMC to set your own fan speeds on supported machines");
+
+struct macsmc_hwmon_sensor {
+	struct apple_smc_key_info info;
+	smc_key macsmc_key;
+	char label[MAX_LABEL_LENGTH];
+};
+
+struct macsmc_hwmon_fan {
+	struct macsmc_hwmon_sensor now;
+	struct macsmc_hwmon_sensor min;
+	struct macsmc_hwmon_sensor max;
+	struct macsmc_hwmon_sensor set;
+	struct macsmc_hwmon_sensor mode;
+	char label[MAX_LABEL_LENGTH];
+	u32 attrs;
+	bool manual;
+};
+
+struct macsmc_hwmon_sensors {
+	struct hwmon_channel_info channel_info;
+	struct macsmc_hwmon_sensor *sensors;
+	u32 n_sensors;
+};
+
+struct macsmc_hwmon_fans {
+	struct hwmon_channel_info channel_info;
+	struct macsmc_hwmon_fan *fans;
+	u32 n_fans;
+};
+
+struct macsmc_hwmon {
+	struct device *dev;
+	struct apple_smc *smc;
+	struct device *hwmon_dev;
+	struct hwmon_chip_info chip_info;
+	/* Chip + sensor types + NULL */
+	const struct hwmon_channel_info *channel_infos[1 + NUM_SENSOR_TYPES + 1];
+	struct macsmc_hwmon_sensors temp;
+	struct macsmc_hwmon_sensors volt;
+	struct macsmc_hwmon_sensors curr;
+	struct macsmc_hwmon_sensors power;
+	struct macsmc_hwmon_fans fan;
+};
+
+static int macsmc_hwmon_read_label(struct device *dev,
+				enum hwmon_sensor_types type, u32 attr,
+				int channel, const char **str)
+{
+	struct macsmc_hwmon *hwmon = dev_get_drvdata(dev);
+
+	switch (type) {
+	case hwmon_temp:
+		if (channel >= hwmon->temp.n_sensors)
+			return -EINVAL;
+		*str = hwmon->temp.sensors[channel].label;
+		break;
+	case hwmon_in:
+		if (channel >= hwmon->volt.n_sensors)
+			return -EINVAL;
+		*str = hwmon->volt.sensors[channel].label;
+		break;
+	case hwmon_curr:
+		if (channel >= hwmon->curr.n_sensors)
+			return -EINVAL;
+		*str = hwmon->curr.sensors[channel].label;
+		break;
+	case hwmon_power:
+		if (channel >= hwmon->power.n_sensors)
+			return -EINVAL;
+		*str = hwmon->power.sensors[channel].label;
+		break;
+	case hwmon_fan:
+		if (channel >= hwmon->fan.n_fans)
+			return -EINVAL;
+		*str = hwmon->fan.fans[channel].label;
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	return 0;
+}
+
+/*
+ * The SMC has keys of multiple types, denoted by a FourCC of the same format
+ * as the key ID. We don't know what data type a key encodes until we poke at it.
+ *
+ * TODO: support other key types
+ */
+static int macsmc_hwmon_read_key(struct apple_smc *smc,
+				struct macsmc_hwmon_sensor *sensor, int scale,
+				long *val)
+{
+	int ret = 0;
+
+	switch (sensor->info.type_code) {
+	/* 32-bit IEEE 754 float */
+	case __SMC_KEY('f', 'l', 't', ' '): {
+		u32 flt_ = 0;
+
+		ret = apple_smc_read_f32_scaled(smc, sensor->macsmc_key, &flt_,
+						scale);
+		*val = flt_;
+		break;
+	}
+	/* 48.16 fixed point decimal */
+	case __SMC_KEY('i', 'o', 'f', 't'): {
+		u64 ioft = 0;
+
+		ret = apple_smc_read_ioft_scaled(smc, sensor->macsmc_key, &ioft,
+						scale);
+		*val = (long)ioft;
+		break;
+	}
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	if (ret)
+		return -EINVAL;
+
+
+	return 0;
+}
+
+static int macsmc_hwmon_write_key(struct apple_smc *smc,
+				  struct macsmc_hwmon_sensor *sensor, long val,
+				  int scale)
+{
+	switch (sensor->info.type_code) {
+	/* 32-bit IEEE 754 float */
+	case __SMC_KEY('f', 'l', 't', ' '):
+		return apple_smc_write_f32_scaled(smc, sensor->macsmc_key, val, scale);
+	case __SMC_KEY('u', 'i', '8', ' '):
+		return apple_smc_write_u8(smc, sensor->macsmc_key, val);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static int macsmc_hwmon_read_fan(struct macsmc_hwmon *hwmon, u32 attr, int chan, long *val)
+{
+	if (!(hwmon->fan.fans[chan].attrs & BIT(attr)))
+		return -EINVAL;
+
+	switch (attr) {
+	case hwmon_fan_input:
+		return macsmc_hwmon_read_key(hwmon->smc, &hwmon->fan.fans[chan].now,
+					     1, val);
+	case hwmon_fan_min:
+		return macsmc_hwmon_read_key(hwmon->smc, &hwmon->fan.fans[chan].min,
+					     1, val);
+	case hwmon_fan_max:
+		return macsmc_hwmon_read_key(hwmon->smc, &hwmon->fan.fans[chan].max,
+					     1, val);
+	case hwmon_fan_target:
+		return macsmc_hwmon_read_key(hwmon->smc, &hwmon->fan.fans[chan].set,
+					     1, val);
+	default:
+		return -EINVAL;
+	}
+}
+
+static int macsmc_hwmon_write_fan(struct device *dev, u32 attr, int channel, long val)
+{
+	struct macsmc_hwmon *hwmon = dev_get_drvdata(dev);
+	int ret = 0;
+	long min = 0;
+	long max = 0;
+
+	if (!melt_my_mac ||
+	    hwmon->fan.fans[channel].mode.macsmc_key == 0)
+		return -EOPNOTSUPP;
+
+	if ((channel >= hwmon->fan.n_fans) ||
+	    !(hwmon->fan.fans[channel].attrs & BIT(attr)) ||
+	    (attr != hwmon_fan_target))
+		return -EINVAL;
+
+	/*
+	 * The SMC does no sanity checks on requested fan speeds, so we need to.
+	 */
+	ret = macsmc_hwmon_read_key(hwmon->smc, &hwmon->fan.fans[channel].min, 1, &min);
+	if (ret)
+		return ret;
+	ret = macsmc_hwmon_read_key(hwmon->smc, &hwmon->fan.fans[channel].max, 1, &max);
+	if (ret)
+		return ret;
+
+	if (val >= min && val <= max) {
+		if (!hwmon->fan.fans[channel].manual) {
+			/* Write 1 to mode key for manual control */
+			ret = macsmc_hwmon_write_key(hwmon->smc, &hwmon->fan.fans[channel].mode, 1, 1);
+			if (ret < 0)
+				return ret;
+
+			hwmon->fan.fans[channel].manual = true;
+			dev_info(dev, "Fan %d now under manual control! Set target speed to 0 for automatic control.\n",
+				channel + 1);
+		}
+		return macsmc_hwmon_write_key(hwmon->smc, &hwmon->fan.fans[channel].set, val, 1);
+	} else if (!val) {
+		if (hwmon->fan.fans[channel].manual) {
+			dev_info(dev, "Returning control of fan %d to SMC.\n", channel + 1);
+			ret = macsmc_hwmon_write_key(hwmon->smc, &hwmon->fan.fans[channel].mode, 0, 1);
+			if (ret < 0)
+				return ret;
+
+			hwmon->fan.fans[channel].manual = false;
+		}
+	} else {
+		dev_err(dev, "Requested fan speed %ld out of range [%ld, %ld]", val, min, max);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int macsmc_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+			u32 attr, int channel, long *val)
+{
+	struct macsmc_hwmon *hwmon = dev_get_drvdata(dev);
+	int ret = 0;
+
+	switch (type) {
+	case hwmon_temp:
+		ret = macsmc_hwmon_read_key(hwmon->smc, &hwmon->temp.sensors[channel],
+					    1000, val);
+		break;
+	case hwmon_in:
+		ret = macsmc_hwmon_read_key(hwmon->smc, &hwmon->volt.sensors[channel],
+					    1000, val);
+		break;
+	case hwmon_curr:
+		ret = macsmc_hwmon_read_key(hwmon->smc, &hwmon->curr.sensors[channel],
+					    1000, val);
+		break;
+	case hwmon_power:
+		/* SMC returns power in Watts with acceptable precision to scale to uW */
+		ret = macsmc_hwmon_read_key(hwmon->smc, &hwmon->power.sensors[channel],
+					    1000000, val);
+		break;
+	case hwmon_fan:
+		ret = macsmc_hwmon_read_fan(hwmon, attr, channel, val);
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	return ret;
+}
+
+static int macsmc_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
+			u32 attr, int channel, long val)
+{
+	switch (type) {
+	case hwmon_fan:
+		return macsmc_hwmon_write_fan(dev, attr, channel, val);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static umode_t macsmc_hwmon_fan_is_visible(const void *data, u32 attr, int channel)
+{
+	const struct macsmc_hwmon *hwmon = data;
+
+	if (channel >= hwmon->fan.n_fans)
+		return -EINVAL;
+
+	if (melt_my_mac && attr == hwmon_fan_target && hwmon->fan.fans[channel].mode.macsmc_key != 0)
+		return 0644;
+
+	return 0444;
+}
+
+static umode_t macsmc_hwmon_is_visible(const void *data,
+				enum hwmon_sensor_types type, u32 attr,
+				int channel)
+{
+	switch (type) {
+	case hwmon_fan:
+		return macsmc_hwmon_fan_is_visible(data, attr, channel);
+	default:
+		break;
+	}
+
+	return 0444;
+}
+
+static const struct hwmon_ops macsmc_hwmon_ops = {
+	.is_visible = macsmc_hwmon_is_visible,
+	.read = macsmc_hwmon_read,
+	.read_string = macsmc_hwmon_read_label,
+	.write = macsmc_hwmon_write,
+};
+
+/*
+ * Get the key metadata, including key data type, from the SMC.
+ */
+static int macsmc_hwmon_parse_key(struct device *dev, struct apple_smc *smc,
+			struct macsmc_hwmon_sensor *sensor, const char *key)
+{
+	int ret = 0;
+
+	ret = apple_smc_get_key_info(smc, _SMC_KEY(key), &sensor->info);
+	if (ret) {
+		dev_err(dev, "Failed to retrieve key info for %s\n", key);
+		return ret;
+	}
+	sensor->macsmc_key = _SMC_KEY(key);
+
+	return 0;
+}
+
+/*
+ * A sensor is a single key-value pair as made available by the SMC.
+ * The devicetree gives us the SMC key ID and a friendly name where the
+ * purpose of the sensor is known.
+ */
+static int macsmc_hwmon_create_sensor(struct device *dev, struct apple_smc *smc,
+				struct device_node *sensor_node,
+				struct macsmc_hwmon_sensor *sensor)
+{
+	const char *key, *label;
+	int ret = 0;
+
+	ret = of_property_read_string(sensor_node, "apple,key-id", &key);
+	if (ret) {
+		dev_err(dev, "Could not find apple,key-id in sensor node");
+		return ret;
+	}
+
+	ret = macsmc_hwmon_parse_key(dev, smc, sensor, key);
+	if (ret)
+		return ret;
+
+	if (!of_property_read_string(sensor_node, "label", &label))
+		strscpy_pad(sensor->label, label, sizeof(sensor->label));
+	else
+		strscpy_pad(sensor->label, key, sizeof(sensor->label));
+
+	return 0;
+}
+
+/*
+ * Fan data is exposed by the SMC as multiple sensors.
+ *
+ * The devicetree schema reuses apple,key-id for the actual fan speed sensor.
+ * Mix, max and target keys do not need labels, so we can reuse label
+ * for naming the entire fan.
+ */
+static int macsmc_hwmon_create_fan(struct device *dev, struct apple_smc *smc,
+				struct device_node *fan_node, struct macsmc_hwmon_fan *fan)
+{
+	const char *label;
+	const char *now;
+	const char *min;
+	const char *max;
+	const char *set;
+	const char *mode;
+	int ret = 0;
+
+	ret = of_property_read_string(fan_node, "apple,key-id", &now);
+	if (ret) {
+		dev_err(dev, "apple,key-id not found in fan node!");
+		return -EINVAL;
+	}
+
+	ret = macsmc_hwmon_parse_key(dev, smc, &fan->now, now);
+	if (ret)
+		return ret;
+
+	if (!of_property_read_string(fan_node, "label", &label))
+		strscpy_pad(fan->label, label, sizeof(fan->label));
+	else
+		strscpy_pad(fan->label, now, sizeof(fan->label));
+
+	fan->attrs = HWMON_F_LABEL | HWMON_F_INPUT;
+
+	ret = of_property_read_string(fan_node, "apple,fan-minimum", &min);
+	if (ret)
+		dev_warn(dev, "No minimum fan speed key for %s", fan->label);
+	else {
+		if (!macsmc_hwmon_parse_key(dev, smc, &fan->min, min))
+			fan->attrs |= HWMON_F_MIN;
+	}
+
+	ret = of_property_read_string(fan_node, "apple,fan-maximum", &max);
+	if (ret)
+		dev_warn(dev, "No maximum fan speed key for %s", fan->label);
+	else {
+		if (!macsmc_hwmon_parse_key(dev, smc, &fan->max, max))
+			fan->attrs |= HWMON_F_MAX;
+	}
+
+	ret = of_property_read_string(fan_node, "apple,fan-target", &set);
+	if (ret)
+		dev_warn(dev, "No target fan speed key for %s", fan->label);
+	else {
+		if (!macsmc_hwmon_parse_key(dev, smc, &fan->set, set))
+			fan->attrs |= HWMON_F_TARGET;
+	}
+
+	ret = of_property_read_string(fan_node, "apple,fan-mode", &mode);
+	if (ret)
+		dev_warn(dev, "No fan mode key for %s", fan->label);
+	else {
+		ret = macsmc_hwmon_parse_key(dev, smc, &fan->mode, mode);
+		if (ret)
+			return ret;
+	}
+
+	/* Initialise fan control mode to automatic */
+	fan->manual = false;
+
+	return 0;
+}
+
+static int macsmc_hwmon_populate_sensors(struct macsmc_hwmon *hwmon,
+					struct device_node *hwmon_node)
+{
+	struct device_node *group_node = NULL;
+
+	for_each_child_of_node(hwmon_node, group_node) {
+		struct device_node *key_node = NULL;
+		struct macsmc_hwmon_sensors *sensor_group = NULL;
+		struct macsmc_hwmon_fans *fan_group = NULL;
+		u32 n_keys = 0;
+		int i = 0;
+
+		n_keys = of_get_child_count(group_node);
+		if (!n_keys) {
+			dev_err(hwmon->dev, "No keys found in %s!\n", group_node->name);
+			continue;
+		}
+
+		if (strcmp(group_node->name, "apple,temp-keys") == 0)
+			sensor_group = &hwmon->temp;
+		else if (strcmp(group_node->name, "apple,volt-keys") == 0)
+			sensor_group = &hwmon->volt;
+		else if (strcmp(group_node->name, "apple,current-keys") == 0)
+			sensor_group = &hwmon->curr;
+		else if (strcmp(group_node->name, "apple,power-keys") == 0)
+			sensor_group = &hwmon->power;
+		else if (strcmp(group_node->name, "apple,fan-keys") == 0)
+			fan_group = &hwmon->fan;
+		else {
+			dev_err(hwmon->dev, "Invalid group node: %s", group_node->name);
+			continue;
+		}
+
+		if (sensor_group) {
+			sensor_group->sensors = devm_kzalloc(hwmon->dev,
+					sizeof(struct macsmc_hwmon_sensor) * n_keys,
+					GFP_KERNEL);
+			if (!sensor_group->sensors) {
+				of_node_put(group_node);
+				return -ENOMEM;
+			}
+
+			for_each_child_of_node(group_node, key_node) {
+				if (!macsmc_hwmon_create_sensor(hwmon->dev, hwmon->smc,
+							key_node, &sensor_group->sensors[i]))
+					i++;
+			}
+
+			sensor_group->n_sensors = i;
+			if (!sensor_group->n_sensors) {
+				dev_err(hwmon->dev,
+					"No valid sensor keys found in %s\n",
+					group_node->name);
+				continue;
+			}
+		} else if (fan_group) {
+			fan_group->fans = devm_kzalloc(hwmon->dev,
+					sizeof(struct macsmc_hwmon_fan) * n_keys,
+					GFP_KERNEL);
+
+			if (!fan_group->fans) {
+				of_node_put(group_node);
+				return -ENOMEM;
+			}
+
+			for_each_child_of_node(group_node, key_node) {
+				if (!macsmc_hwmon_create_fan(hwmon->dev,
+					hwmon->smc, key_node,
+					&fan_group->fans[i]))
+					i++;
+			}
+
+			fan_group->n_fans = i;
+			if (!fan_group->n_fans) {
+				dev_err(hwmon->dev,
+					"No valid sensor fans found in %s\n",
+					group_node->name);
+				continue;
+			}
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Create NULL-terminated config arrays
+ */
+static void macsmc_hwmon_populate_configs(u32 *configs,
+					u32 num_keys, u32 flags)
+{
+	int idx = 0;
+
+	for (idx = 0; idx < num_keys; idx++)
+		configs[idx] = flags;
+
+	configs[idx + 1] = 0;
+}
+
+static void macsmc_hwmon_populate_fan_configs(u32 *configs,
+					u32 num_keys, struct macsmc_hwmon_fans *fans)
+{
+	int idx = 0;
+
+	for (idx = 0; idx < num_keys; idx++)
+		configs[idx] = fans->fans[idx].attrs;
+
+	configs[idx + 1] = 0;
+}
+
+static const struct hwmon_channel_info * const macsmc_chip_channel_info =
+	HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ);
+
+static int macsmc_hwmon_create_infos(struct macsmc_hwmon *hwmon)
+{
+	int i = 0;
+	struct hwmon_channel_info *channel_info;
+
+	/* chip */
+	hwmon->channel_infos[i++] = macsmc_chip_channel_info;
+
+	if (hwmon->temp.n_sensors) {
+		channel_info = &hwmon->temp.channel_info;
+		channel_info->type = hwmon_temp;
+		channel_info->config = devm_kzalloc(hwmon->dev,
+						sizeof(u32) * hwmon->temp.n_sensors + 1,
+						GFP_KERNEL);
+		if (!channel_info->config)
+			return -ENOMEM;
+
+		macsmc_hwmon_populate_configs((u32 *)channel_info->config,
+						hwmon->temp.n_sensors,
+						(HWMON_T_INPUT | HWMON_T_LABEL));
+		hwmon->channel_infos[i++] = channel_info;
+	}
+
+	if (hwmon->volt.n_sensors) {
+		channel_info = &hwmon->volt.channel_info;
+		channel_info->type = hwmon_in;
+		channel_info->config = devm_kzalloc(hwmon->dev,
+						sizeof(u32) * hwmon->volt.n_sensors + 1,
+						GFP_KERNEL);
+		if (!channel_info->config)
+			return -ENOMEM;
+
+		macsmc_hwmon_populate_configs((u32 *)channel_info->config,
+						hwmon->volt.n_sensors,
+						(HWMON_I_INPUT | HWMON_I_LABEL));
+		hwmon->channel_infos[i++] = channel_info;
+	}
+
+	if (hwmon->curr.n_sensors) {
+		channel_info = &hwmon->curr.channel_info;
+		channel_info->type = hwmon_curr;
+		channel_info->config = devm_kzalloc(hwmon->dev,
+						sizeof(u32) * hwmon->curr.n_sensors + 1,
+						GFP_KERNEL);
+		if (!channel_info->config)
+			return -ENOMEM;
+
+		macsmc_hwmon_populate_configs((u32 *)channel_info->config,
+						hwmon->curr.n_sensors,
+						(HWMON_C_INPUT | HWMON_C_LABEL));
+		hwmon->channel_infos[i++] = channel_info;
+	}
+
+	if (hwmon->power.n_sensors) {
+		channel_info = &hwmon->power.channel_info;
+		channel_info->type = hwmon_power;
+		channel_info->config = devm_kzalloc(hwmon->dev,
+						sizeof(u32) * hwmon->power.n_sensors + 1,
+						GFP_KERNEL);
+		if (!channel_info->config)
+			return -ENOMEM;
+
+		macsmc_hwmon_populate_configs((u32 *)channel_info->config,
+						hwmon->power.n_sensors,
+						(HWMON_P_INPUT | HWMON_P_LABEL));
+		hwmon->channel_infos[i++] = channel_info;
+	}
+
+	if (hwmon->fan.n_fans) {
+		channel_info = &hwmon->fan.channel_info;
+		channel_info->type = hwmon_fan;
+		channel_info->config = devm_kzalloc(hwmon->dev,
+						sizeof(u32) * hwmon->fan.n_fans + 1,
+						GFP_KERNEL);
+		if (!channel_info->config)
+			return -ENOMEM;
+
+		macsmc_hwmon_populate_fan_configs((u32 *)channel_info->config,
+							hwmon->fan.n_fans, &hwmon->fan);
+		hwmon->channel_infos[i++] = channel_info;
+	}
+
+	return 0;
+}
+
+static int macsmc_hwmon_probe(struct platform_device *pdev)
+{
+	struct apple_smc *smc = dev_get_drvdata(pdev->dev.parent);
+	struct macsmc_hwmon *hwmon;
+	struct device_node *hwmon_node;
+	int ret = 0;
+
+	hwmon = devm_kzalloc(&pdev->dev, sizeof(struct macsmc_hwmon), GFP_KERNEL);
+	if (!hwmon)
+		return -ENOMEM;
+
+	hwmon->dev = &pdev->dev;
+	hwmon->smc = smc;
+
+	hwmon_node = of_get_child_by_name(pdev->dev.parent->of_node, "hwmon");
+	if (!hwmon_node) {
+		dev_err(hwmon->dev, "macsmc-hwmon not found in devicetree!\n");
+		return -ENODEV;
+	}
+
+	ret = macsmc_hwmon_populate_sensors(hwmon, hwmon_node);
+	if (ret)
+		dev_info(hwmon->dev, "Could not populate keys!\n");
+
+	of_node_put(hwmon_node);
+
+	if (!hwmon->temp.n_sensors && !hwmon->volt.n_sensors &&
+		!hwmon->curr.n_sensors && !hwmon->power.n_sensors &&
+		!hwmon->fan.n_fans) {
+		dev_err(hwmon->dev, "No valid keys found of any supported type");
+		return -ENODEV;
+	}
+
+	ret = macsmc_hwmon_create_infos(hwmon);
+	if (ret)
+		return ret;
+
+	hwmon->chip_info.ops = &macsmc_hwmon_ops;
+	hwmon->chip_info.info = (const struct hwmon_channel_info *const *)&hwmon->channel_infos;
+
+	hwmon->hwmon_dev = devm_hwmon_device_register_with_info(&pdev->dev,
+						"macsmc_hwmon", hwmon,
+						&hwmon->chip_info, NULL);
+	if (IS_ERR(hwmon->hwmon_dev))
+		return dev_err_probe(hwmon->dev, PTR_ERR(hwmon->hwmon_dev),
+				     "Probing SMC hwmon device failed!\n");
+
+	dev_info(hwmon->dev, "Registered SMC hwmon device. Sensors:");
+	dev_info(hwmon->dev, "Temperature: %d, Voltage: %d, Current: %d, Power: %d, Fans: %d",
+		hwmon->temp.n_sensors, hwmon->volt.n_sensors,
+		hwmon->curr.n_sensors, hwmon->power.n_sensors, hwmon->fan.n_fans);
+
+	return 0;
+}
+
+static struct platform_driver macsmc_hwmon_driver = {
+	.probe = macsmc_hwmon_probe,
+	.driver = {
+		.name = "macsmc_hwmon",
+	},
+};
+module_platform_driver(macsmc_hwmon_driver);
+
+MODULE_DESCRIPTION("Apple Silicon SMC hwmon driver");
+MODULE_AUTHOR("James Calligeros <jcalligeros99@gmail.com>");
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_ALIAS("platform:macsmc_hwmon");
diff --git a/drivers/i2c/busses/i2c-pasemi-core.c b/drivers/i2c/busses/i2c-pasemi-core.c
index dac694a9d781f8..8f2538c8768771 100644
--- a/drivers/i2c/busses/i2c-pasemi-core.c
+++ b/drivers/i2c/busses/i2c-pasemi-core.c
@@ -5,6 +5,7 @@
  * SMBus host driver for PA Semi PWRficient
  */
 
+#include <linux/bitfield.h>
 #include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/kernel.h>
@@ -20,27 +21,39 @@
 /* Register offsets */
 #define REG_MTXFIFO	0x00
 #define REG_MRXFIFO	0x04
+#define REG_XFSTA	0x0c
 #define REG_SMSTA	0x14
 #define REG_IMASK	0x18
 #define REG_CTL		0x1c
 #define REG_REV		0x28
 
 /* Register defs */
-#define MTXFIFO_READ	0x00000400
-#define MTXFIFO_STOP	0x00000200
-#define MTXFIFO_START	0x00000100
-#define MTXFIFO_DATA_M	0x000000ff
-
-#define MRXFIFO_EMPTY	0x00000100
-#define MRXFIFO_DATA_M	0x000000ff
-
-#define SMSTA_XEN	0x08000000
-#define SMSTA_MTN	0x00200000
-
-#define CTL_MRR		0x00000400
-#define CTL_MTR		0x00000200
-#define CTL_EN		0x00000800
-#define CTL_CLK_M	0x000000ff
+#define MTXFIFO_READ	BIT(10)
+#define MTXFIFO_STOP	BIT(9)
+#define MTXFIFO_START	BIT(8)
+#define MTXFIFO_DATA_M	GENMASK(7, 0)
+
+#define MRXFIFO_EMPTY	BIT(8)
+#define MRXFIFO_DATA_M	GENMASK(7, 0)
+
+#define SMSTA_XIP	BIT(28)
+#define SMSTA_XEN	BIT(27)
+#define SMSTA_JMD	BIT(25)
+#define SMSTA_JAM	BIT(24)
+#define SMSTA_MTO	BIT(23)
+#define SMSTA_MTA	BIT(22)
+#define SMSTA_MTN	BIT(21)
+#define SMSTA_MRNE	BIT(19)
+#define SMSTA_MTE	BIT(16)
+#define SMSTA_TOM	BIT(6)
+
+#define CTL_EN		BIT(11)
+#define CTL_MRR		BIT(10)
+#define CTL_MTR		BIT(9)
+#define CTL_UJM		BIT(8)
+#define CTL_CLK_M	GENMASK(7, 0)
+
+#define TRANSFER_TIMEOUT_MS	100
 
 static inline void reg_write(struct pasemi_smbus *smbus, int reg, int val)
 {
@@ -61,7 +74,7 @@ static inline int reg_read(struct pasemi_smbus *smbus, int reg)
 
 static void pasemi_reset(struct pasemi_smbus *smbus)
 {
-	u32 val = (CTL_MTR | CTL_MRR | (smbus->clk_div & CTL_CLK_M));
+	u32 val = (CTL_MTR | CTL_MRR | CTL_UJM | (smbus->clk_div & CTL_CLK_M));
 
 	if (smbus->hw_rev >= 6)
 		val |= CTL_EN;
@@ -70,23 +83,51 @@ static void pasemi_reset(struct pasemi_smbus *smbus)
 	reinit_completion(&smbus->irq_completion);
 }
 
-static void pasemi_smb_clear(struct pasemi_smbus *smbus)
+static int pasemi_smb_clear(struct pasemi_smbus *smbus)
 {
-	unsigned int status;
+	unsigned int status, xfstatus;
+	int timeout = TRANSFER_TIMEOUT_MS;
 
 	status = reg_read(smbus, REG_SMSTA);
+
+	/* First wait for the bus to go idle */
+	while ((status & (SMSTA_XIP | SMSTA_JAM)) && timeout--) {
+		msleep(1);
+		status = reg_read(smbus, REG_SMSTA);
+	}
+
+	xfstatus = reg_read(smbus, REG_XFSTA);
+
+	if (timeout < 0) {
+		dev_warn(smbus->dev, "Bus is still stuck (status 0x%08x xfstatus 0x%08x)\n",
+			 status, xfstatus);
+		return -EIO;
+	}
+
+	/* If any badness happened or there is data in the FIFOs, reset the FIFOs */
+	if ((status & (SMSTA_MRNE | SMSTA_JMD | SMSTA_MTO | SMSTA_TOM | SMSTA_MTN | SMSTA_MTA)) ||
+		!(status & SMSTA_MTE)) {
+		dev_warn(smbus->dev, "Issuing reset due to status 0x%08x (xfstatus 0x%08x)\n",
+			 status, xfstatus);
+		pasemi_reset(smbus);
+	}
+
+	/* Clear the flags */
 	reg_write(smbus, REG_SMSTA, status);
+
+	return 0;
 }
 
 static int pasemi_smb_waitready(struct pasemi_smbus *smbus)
 {
-	int timeout = 100;
+	int timeout = TRANSFER_TIMEOUT_MS;
 	unsigned int status;
 
 	if (smbus->use_irq) {
 		reinit_completion(&smbus->irq_completion);
-		reg_write(smbus, REG_IMASK, SMSTA_XEN | SMSTA_MTN);
-		wait_for_completion_timeout(&smbus->irq_completion, msecs_to_jiffies(100));
+		/* XEN should be set when a transaction terminates, whether due to error or not */
+		reg_write(smbus, REG_IMASK, SMSTA_XEN);
+		wait_for_completion_timeout(&smbus->irq_completion, msecs_to_jiffies(timeout));
 		reg_write(smbus, REG_IMASK, 0);
 		status = reg_read(smbus, REG_SMSTA);
 	} else {
@@ -97,16 +138,32 @@ static int pasemi_smb_waitready(struct pasemi_smbus *smbus)
 		}
 	}
 
-	/* Got NACK? */
-	if (status & SMSTA_MTN)
-		return -ENXIO;
+	/* Controller timeout? */
+	if (status & SMSTA_TOM) {
+		dev_warn(smbus->dev, "Controller timeout, status 0x%08x\n", status);
+		return -EIO;
+	}
 
-	if (timeout < 0) {
-		dev_warn(smbus->dev, "Timeout, status 0x%08x\n", status);
-		reg_write(smbus, REG_SMSTA, status);
+	/* Peripheral timeout? */
+	if (status & SMSTA_MTO) {
+		dev_warn(smbus->dev, "Peripheral timeout, status 0x%08x\n", status);
 		return -ETIME;
 	}
 
+	/* Still stuck in a transaction? */
+	if (status & SMSTA_XIP) {
+		dev_warn(smbus->dev, "Bus stuck, status 0x%08x\n", status);
+		return -EIO;
+	}
+
+	/* Arbitration loss? */
+	if (status & SMSTA_MTA)
+		return -EBUSY;
+
+	/* Got NACK? */
+	if (status & SMSTA_MTN)
+		return -ENXIO;
+
 	/* Clear XEN */
 	reg_write(smbus, REG_SMSTA, SMSTA_XEN);
 
@@ -167,7 +224,8 @@ static int pasemi_i2c_xfer(struct i2c_adapter *adapter,
 	struct pasemi_smbus *smbus = adapter->algo_data;
 	int ret, i;
 
-	pasemi_smb_clear(smbus);
+	if (pasemi_smb_clear(smbus))
+		return -EIO;
 
 	ret = 0;
 
@@ -190,7 +248,8 @@ static int pasemi_smb_xfer(struct i2c_adapter *adapter,
 	addr <<= 1;
 	read_flag = read_write == I2C_SMBUS_READ;
 
-	pasemi_smb_clear(smbus);
+	if (pasemi_smb_clear(smbus))
+		return -EIO;
 
 	switch (size) {
 	case I2C_SMBUS_QUICK:
diff --git a/drivers/iio/common/Kconfig b/drivers/iio/common/Kconfig
index 1ccb5ccf370660..e3818ef567822b 100644
--- a/drivers/iio/common/Kconfig
+++ b/drivers/iio/common/Kconfig
@@ -3,6 +3,7 @@
 # IIO common modules
 #
 
+source "drivers/iio/common/aop_sensors/Kconfig"
 source "drivers/iio/common/cros_ec_sensors/Kconfig"
 source "drivers/iio/common/hid-sensors/Kconfig"
 source "drivers/iio/common/inv_sensors/Kconfig"
diff --git a/drivers/iio/common/Makefile b/drivers/iio/common/Makefile
index d3e952239a6219..5f99a429725d66 100644
--- a/drivers/iio/common/Makefile
+++ b/drivers/iio/common/Makefile
@@ -8,6 +8,7 @@
 #
 
 # When adding new entries keep the list in alphabetical order
+obj-y += aop_sensors/
 obj-y += cros_ec_sensors/
 obj-y += hid-sensors/
 obj-y += inv_sensors/
diff --git a/drivers/iio/common/aop_sensors/Kconfig b/drivers/iio/common/aop_sensors/Kconfig
new file mode 100644
index 00000000000000..10d6e720057609
--- /dev/null
+++ b/drivers/iio/common/aop_sensors/Kconfig
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+config IIO_AOP_SENSOR_LAS
+	tristate "AOP Lid angle sensor"
+	depends on ARCH_APPLE || COMPILE_TEST
+	depends on RUST
+	depends on SYSFS
+	select APPLE_AOP
+	default m if ARCH_APPLE
+	help
+	  Module to handle the lid angle sensor attached to the AOP
+	  coprocessor on Apple laptops.
+
+config IIO_AOP_SENSOR_ALS
+	tristate "AOP Ambient light sensor"
+	depends on ARCH_APPLE || COMPILE_TEST
+	depends on RUST
+	depends on SYSFS
+	select APPLE_AOP
+	default m if ARCH_APPLE
+	help
+	  Module to handle the ambient light sensor attached to the AOP
+	  coprocessor on Apple laptops.
diff --git a/drivers/iio/common/aop_sensors/Makefile b/drivers/iio/common/aop_sensors/Makefile
new file mode 100644
index 00000000000000..8da5a19efe0f0c
--- /dev/null
+++ b/drivers/iio/common/aop_sensors/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+obj-$(CONFIG_IIO_AOP_SENSOR_LAS) += aop_las.o
+obj-$(CONFIG_IIO_AOP_SENSOR_ALS) += aop_als.o
diff --git a/drivers/iio/common/aop_sensors/aop_als.rs b/drivers/iio/common/aop_sensors/aop_als.rs
new file mode 100644
index 00000000000000..5a4d91969b0c15
--- /dev/null
+++ b/drivers/iio/common/aop_sensors/aop_als.rs
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+//! Apple AOP ambient light sensor driver
+//!
+//! Copyright (C) The Asahi Linux Contributors
+
+use kernel::{
+    bindings, c_str,
+    iio::common::aop_sensors::{AopSensorData, IIORegistration, MessageProcessor},
+    module_platform_driver,
+    of::{self, Node},
+    platform,
+    prelude::*,
+    soc::apple::aop::{EPICService, AOP},
+    sync::Arc,
+    types::ForeignOwnable,
+};
+
+const EPIC_SUBTYPE_SET_ALS_PROPERTY: u16 = 0x4;
+
+fn enable_als(aop: &dyn AOP, dev: &platform::Device, of: &Node, svc: &EPICService) -> Result<()> {
+    if let Some(prop) = of.find_property(c_str!("apple,als-calibration")) {
+        set_als_property(aop, svc, 0xb, prop.value())?;
+        set_als_property(aop, svc, 0, &200000u32.to_le_bytes())?;
+    } else {
+        dev_warn!(
+            dev.as_ref(),
+            "ALS Calibration not found, will not enable it"
+        );
+    }
+    Ok(())
+}
+fn set_als_property(aop: &dyn AOP, svc: &EPICService, tag: u32, data: &[u8]) -> Result<u32> {
+    let mut buf = KVec::new();
+    buf.resize(data.len() + 8, 0, GFP_KERNEL)?;
+    buf[8..].copy_from_slice(data);
+    buf[4..8].copy_from_slice(&tag.to_le_bytes());
+    aop.epic_call(svc, EPIC_SUBTYPE_SET_ALS_PROPERTY, &buf)
+}
+
+fn f32_to_u32(f: u32) -> u32 {
+    if f & 0x80000000 != 0 {
+        return 0;
+    }
+    let exp = ((f & 0x7f800000) >> 23) as i32 - 127;
+    if exp < 0 {
+        return 0;
+    }
+    if exp == 128 && f & 0x7fffff != 0 {
+        return 0;
+    }
+    let mant = f & 0x7fffff | 0x800000;
+    if exp <= 23 {
+        return mant >> (23 - exp);
+    }
+    if exp >= 32 {
+        return u32::MAX;
+    }
+    mant << (exp - 23)
+}
+
+struct MsgProc(usize);
+
+impl MessageProcessor for MsgProc {
+    fn process(&self, message: &[u8]) -> u32 {
+        let offset = self.0;
+        let raw = u32::from_le_bytes(message[offset..offset + 4].try_into().unwrap());
+        f32_to_u32(raw)
+    }
+}
+
+#[repr(transparent)]
+struct IIOAopAlsDriver(IIORegistration<MsgProc>);
+
+kernel::of_device_table!(OF_TABLE, MODULE_OF_TABLE, (), [] as [(of::DeviceId, ()); 0]);
+
+impl platform::Driver for IIOAopAlsDriver {
+    type IdInfo = ();
+
+    const OF_ID_TABLE: Option<of::IdTable<Self::IdInfo>> = Some(&OF_TABLE);
+
+    fn probe(
+        pdev: &mut platform::Device,
+        _info: Option<&()>,
+    ) -> Result<Pin<KBox<IIOAopAlsDriver>>> {
+        let dev = pdev.as_ref();
+        let parent = dev.parent().unwrap();
+        // SAFETY: our parent is AOP, and AopDriver is repr(transparent) for Arc<dyn Aop>
+        let adata_ptr = unsafe { Pin::<KBox<Arc<dyn AOP>>>::borrow(parent.get_drvdata()) };
+        let adata = (&*adata_ptr).clone();
+        // SAFETY: AOP sets the platform data correctly
+        let service = unsafe { *((*dev.as_raw()).platform_data as *const EPICService) };
+        let of = parent
+            .of_node()
+            .ok_or(EIO)?
+            .get_child_by_name(c_str!("als"))
+            .ok_or(EIO)?;
+        let ty = bindings::BINDINGS_IIO_LIGHT;
+        let data = AopSensorData::new(pdev.clone(), ty, MsgProc(40))?;
+        adata.add_fakehid_listener(service, data.clone())?;
+        enable_als(adata.as_ref(), pdev, &of, &service)?;
+        let info_mask = 1 << bindings::BINDINGS_IIO_CHAN_INFO_PROCESSED;
+        Ok(KBox::pin(
+            IIOAopAlsDriver(IIORegistration::<MsgProc>::new(
+                data,
+                c_str!("aop-sensors-als"),
+                ty,
+                info_mask,
+                &THIS_MODULE,
+            )?),
+            GFP_KERNEL,
+        )?)
+    }
+}
+
+module_platform_driver! {
+    type: IIOAopAlsDriver,
+    name: "iio_aop_als",
+    license: "Dual MIT/GPL",
+    alias: ["platform:iio_aop_als"],
+}
diff --git a/drivers/iio/common/aop_sensors/aop_las.rs b/drivers/iio/common/aop_sensors/aop_las.rs
new file mode 100644
index 00000000000000..cf3d4c45678728
--- /dev/null
+++ b/drivers/iio/common/aop_sensors/aop_las.rs
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+//! Apple AOP lid angle sensor driver
+//!
+//! Copyright (C) The Asahi Linux Contributors
+
+use kernel::{
+    bindings, c_str,
+    iio::common::aop_sensors::{AopSensorData, IIORegistration, MessageProcessor},
+    module_platform_driver, of, platform,
+    prelude::*,
+    soc::apple::aop::{EPICService, AOP},
+    sync::Arc,
+    types::ForeignOwnable,
+};
+
+struct MsgProc;
+
+impl MessageProcessor for MsgProc {
+    fn process(&self, message: &[u8]) -> u32 {
+        message[1] as u32
+    }
+}
+
+#[repr(transparent)]
+struct IIOAopLasDriver(IIORegistration<MsgProc>);
+
+kernel::of_device_table!(OF_TABLE, MODULE_OF_TABLE, (), [] as [(of::DeviceId, ()); 0]);
+
+impl platform::Driver for IIOAopLasDriver {
+    type IdInfo = ();
+
+    const OF_ID_TABLE: Option<of::IdTable<Self::IdInfo>> = Some(&OF_TABLE);
+
+    fn probe(
+        pdev: &mut platform::Device,
+        _info: Option<&()>,
+    ) -> Result<Pin<KBox<IIOAopLasDriver>>> {
+        let dev = pdev.as_ref();
+        let parent = dev.parent().unwrap();
+        // SAFETY: our parent is AOP, and AopDriver is repr(transparent) for Arc<dyn Aop>
+        let adata_ptr = unsafe { Pin::<KBox<Arc<dyn AOP>>>::borrow(parent.get_drvdata()) };
+        let adata = (&*adata_ptr).clone();
+        // SAFETY: AOP sets the platform data correctly
+        let service = unsafe { *((*dev.as_raw()).platform_data as *const EPICService) };
+
+        let ty = bindings::BINDINGS_IIO_ANGL;
+        let data = AopSensorData::new(pdev.clone(), ty, MsgProc)?;
+        adata.add_fakehid_listener(service, data.clone())?;
+        let info_mask = 1 << bindings::BINDINGS_IIO_CHAN_INFO_RAW;
+        Ok(KBox::pin(
+            IIOAopLasDriver(IIORegistration::<MsgProc>::new(
+                data,
+                c_str!("aop-sensors-las"),
+                ty,
+                info_mask,
+                &THIS_MODULE,
+            )?),
+            GFP_KERNEL,
+        )?)
+    }
+}
+
+module_platform_driver! {
+    type: IIOAopLasDriver,
+    name: "iio_aop_las",
+    license: "Dual MIT/GPL",
+    alias: ["platform:iio_aop_las"],
+}
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 13d135257e0601..1e4b9f752856e3 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -968,4 +968,16 @@ config INPUT_STPMIC1_ONKEY
 	  To compile this driver as a module, choose M here: the
 	  module will be called stpmic1_onkey.
 
+config INPUT_MACSMC_HID
+	tristate "Apple Mac SMC lid/buttons"
+	depends on APPLE_SMC
+	default ARCH_APPLE
+	help
+	  Say Y here if you want to use the input events delivered via the
+	  SMC controller on Apple Mac machines using the macsmc driver.
+	  This includes lid open/close and the power button.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called macsmc-hid.
+
 endif
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 6d91804d0a6f76..8c998ae0f919e9 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -51,6 +51,7 @@ obj-$(CONFIG_INPUT_IQS7222)		+= iqs7222.o
 obj-$(CONFIG_INPUT_KEYSPAN_REMOTE)	+= keyspan_remote.o
 obj-$(CONFIG_INPUT_KXTJ9)		+= kxtj9.o
 obj-$(CONFIG_INPUT_M68K_BEEP)		+= m68kspkr.o
+obj-$(CONFIG_INPUT_MACSMC_HID)		+= macsmc-hid.o
 obj-$(CONFIG_INPUT_MAX77650_ONKEY)	+= max77650-onkey.o
 obj-$(CONFIG_INPUT_MAX77693_HAPTIC)	+= max77693-haptic.o
 obj-$(CONFIG_INPUT_MAX8925_ONKEY)	+= max8925_onkey.o
diff --git a/drivers/input/misc/macsmc-hid.c b/drivers/input/misc/macsmc-hid.c
new file mode 100644
index 00000000000000..aeb658a5321e32
--- /dev/null
+++ b/drivers/input/misc/macsmc-hid.c
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple SMC input event driver
+ * Copyright The Asahi Linux Contributors
+ *
+ * This driver exposes HID events from the SMC as an input device.
+ * This includes the lid open/close and power button notifications.
+ */
+
+#include <linux/device.h>
+#include <linux/input.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/macsmc.h>
+#include <linux/reboot.h>
+
+struct macsmc_hid {
+	struct device *dev;
+	struct apple_smc *smc;
+	struct input_dev *input;
+	struct notifier_block nb;
+	bool wakeup_mode;
+};
+
+#define SMC_EV_BTN 0x7201
+#define SMC_EV_LID 0x7203
+
+#define BTN_POWER	0x01
+#define BTN_TOUCHID	0x06
+#define BTN_POWER_HELD1	0xfe
+#define BTN_POWER_HELD2	0x00
+
+static int macsmc_hid_event(struct notifier_block *nb, unsigned long event, void *data)
+{
+	struct macsmc_hid *smchid = container_of(nb, struct macsmc_hid, nb);
+	u16 type = event >> 16;
+	u8 d1 = (event >> 8) & 0xff;
+	u8 d2 = event & 0xff;
+
+	switch (type) {
+	case SMC_EV_BTN:
+		switch (d1) {
+		case BTN_POWER:
+		case BTN_TOUCHID:
+			if (smchid->wakeup_mode) {
+				if (d2) {
+					dev_info(smchid->dev, "Button wakeup\n");
+					pm_wakeup_hard_event(smchid->dev);
+				}
+			} else {
+				input_report_key(smchid->input, KEY_POWER, d2);
+				input_sync(smchid->input);
+			}
+			break;
+		case BTN_POWER_HELD1:
+			/*
+			 * TODO: is this pre-warning useful?
+			 */
+			if (d2)
+				dev_warn(smchid->dev, "Power button held down\n");
+			break;
+		case BTN_POWER_HELD2:
+			/*
+			 * If we get here, we have about 4 seconds before forced shutdown.
+			 * Try to do an emergency shutdown to make sure the NVMe cache is
+			 * flushed. macOS actually does this by panicing (!)...
+			 */
+			if (d2) {
+				dev_crit(smchid->dev, "Triggering forced shutdown!\n");
+				if (kernel_can_power_off())
+					kernel_power_off();
+				else /* Missing macsmc-reboot driver? */
+					kernel_restart("SMC power button triggered restart");
+			}
+			break;
+		default:
+			dev_info(smchid->dev, "Unknown SMC button event: %02x %02x\n", d1, d2);
+			break;
+		}
+		return NOTIFY_OK;
+	case SMC_EV_LID:
+		if (smchid->wakeup_mode && !d1) {
+			dev_info(smchid->dev, "Lid wakeup\n");
+			pm_wakeup_hard_event(smchid->dev);
+		}
+		input_report_switch(smchid->input, SW_LID, d1);
+		input_sync(smchid->input);
+		return NOTIFY_OK;
+	}
+
+	return NOTIFY_DONE;
+}
+
+static int macsmc_hid_probe(struct platform_device *pdev)
+{
+	struct apple_smc *smc = dev_get_drvdata(pdev->dev.parent);
+	struct macsmc_hid *smchid;
+	bool have_lid, have_power;
+	int ret;
+
+	have_lid = apple_smc_key_exists(smc, SMC_KEY(MSLD));
+	have_power = apple_smc_key_exists(smc, SMC_KEY(bHLD));
+
+	if (!have_lid && !have_power)
+		return -ENODEV;
+
+	smchid = devm_kzalloc(&pdev->dev, sizeof(*smchid), GFP_KERNEL);
+	if (!smchid)
+		return -ENOMEM;
+
+	smchid->dev = &pdev->dev;
+	smchid->smc = smc;
+	platform_set_drvdata(pdev, smchid);
+
+	smchid->input = devm_input_allocate_device(&pdev->dev);
+	if (!smchid->input)
+		return -ENOMEM;
+
+	smchid->input->phys = "macsmc-hid (0)";
+	smchid->input->name = "Apple SMC power/lid events";
+
+	if (have_lid)
+		input_set_capability(smchid->input, EV_SW, SW_LID);
+	if (have_power)
+		input_set_capability(smchid->input, EV_KEY, KEY_POWER);
+
+	ret = input_register_device(smchid->input);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to register input device: %d\n", ret);
+		return ret;
+	}
+
+	if (have_lid) {
+		u8 val;
+
+		ret = apple_smc_read_u8(smc, SMC_KEY(MSLD), &val);
+		if (ret < 0) {
+			dev_err(&pdev->dev, "Failed to read initial lid state\n");
+		} else {
+			input_report_switch(smchid->input, SW_LID, val);
+		}
+	}
+	if (have_power) {
+		u32 val;
+
+		ret = apple_smc_read_u32(smc, SMC_KEY(bHLD), &val);
+		if (ret < 0) {
+			dev_err(&pdev->dev, "Failed to read initial power button state\n");
+		} else {
+			input_report_key(smchid->input, KEY_POWER, val & 1);
+		}
+	}
+
+	input_sync(smchid->input);
+
+	smchid->nb.notifier_call = macsmc_hid_event;
+	apple_smc_register_notifier(smc, &smchid->nb);
+
+	device_init_wakeup(&pdev->dev, 1);
+
+	return 0;
+}
+
+static int macsmc_hid_pm_prepare(struct device *dev)
+{
+	struct macsmc_hid *smchid = dev_get_drvdata(dev);
+
+	smchid->wakeup_mode = true;
+	return 0;
+}
+
+static void macsmc_hid_pm_complete(struct device *dev)
+{
+	struct macsmc_hid *smchid = dev_get_drvdata(dev);
+
+	smchid->wakeup_mode = false;
+}
+
+static const struct dev_pm_ops macsmc_hid_pm_ops = {
+	.prepare = macsmc_hid_pm_prepare,
+	.complete = macsmc_hid_pm_complete,
+};
+
+static struct platform_driver macsmc_hid_driver = {
+	.driver = {
+		.name = "macsmc-hid",
+		.pm = &macsmc_hid_pm_ops,
+	},
+	.probe = macsmc_hid_probe,
+};
+module_platform_driver(macsmc_hid_driver);
+
+MODULE_AUTHOR("Hector Martin <marcan@marcan.st>");
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_DESCRIPTION("Apple SMC GPIO driver");
+MODULE_ALIAS("platform:macsmc-hid");
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 1a03de7fcfa66c..91a2b584dab146 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -103,6 +103,19 @@ config TOUCHSCREEN_ADC
 	  To compile this driver as a module, choose M here: the
 	  module will be called resistive-adc-touch.ko.
 
+config TOUCHSCREEN_APPLE_Z2
+	tristate "Apple Z2 touchscreens"
+	default ARCH_APPLE
+	depends on SPI && (ARCH_APPLE || COMPILE_TEST)
+	help
+	  Say Y here if you have an ARM Apple device with
+	  a touchscreen or a touchbar.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called apple_z2.
+
 config TOUCHSCREEN_AR1021_I2C
 	tristate "Microchip AR1020/1021 i2c touchscreen"
 	depends on I2C && OF
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 82bc837ca01e2e..97a025c6a3770f 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_TOUCHSCREEN_AD7879_I2C)	+= ad7879-i2c.o
 obj-$(CONFIG_TOUCHSCREEN_AD7879_SPI)	+= ad7879-spi.o
 obj-$(CONFIG_TOUCHSCREEN_ADC)		+= resistive-adc-touch.o
 obj-$(CONFIG_TOUCHSCREEN_ADS7846)	+= ads7846.o
+obj-$(CONFIG_TOUCHSCREEN_APPLE_Z2)	+= apple_z2.o
 obj-$(CONFIG_TOUCHSCREEN_AR1021_I2C)	+= ar1021_i2c.o
 obj-$(CONFIG_TOUCHSCREEN_ATMEL_MXT)	+= atmel_mxt_ts.o
 obj-$(CONFIG_TOUCHSCREEN_AUO_PIXCIR)	+= auo-pixcir-ts.o
diff --git a/drivers/input/touchscreen/apple_z2.c b/drivers/input/touchscreen/apple_z2.c
new file mode 100644
index 00000000000000..a996cf28e4d25f
--- /dev/null
+++ b/drivers/input/touchscreen/apple_z2.c
@@ -0,0 +1,473 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Apple Z2 touchscreen driver
+ *
+ * Copyright (C) The Asahi Linux Contributors
+ */
+
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/input/touchscreen.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/spi/spi.h>
+#include <linux/unaligned.h>
+
+#define APPLE_Z2_NUM_FINGERS_OFFSET      16
+#define APPLE_Z2_FINGERS_OFFSET          24
+#define APPLE_Z2_TOUCH_STARTED           3
+#define APPLE_Z2_TOUCH_MOVED             4
+#define APPLE_Z2_CMD_READ_INTERRUPT_DATA 0xEB
+#define APPLE_Z2_HBPP_CMD_BLOB           0x3001
+#define APPLE_Z2_FW_MAGIC                0x5746325A
+#define LOAD_COMMAND_INIT_PAYLOAD        0
+#define LOAD_COMMAND_SEND_BLOB           1
+#define LOAD_COMMAND_SEND_CALIBRATION    2
+#define CAL_PROP_NAME                    "apple,z2-cal-blob"
+
+struct apple_z2 {
+	struct spi_device *spidev;
+	struct gpio_desc *reset_gpio;
+	struct input_dev *input_dev;
+	struct completion boot_irq;
+	bool booted;
+	int index_parity;
+	struct touchscreen_properties props;
+	const char *fw_name;
+	u8 *tx_buf;
+	u8 *rx_buf;
+};
+
+struct apple_z2_finger {
+	u8 finger;
+	u8 state;
+	__le16 unknown2;
+	__le16 abs_x;
+	__le16 abs_y;
+	__le16 rel_x;
+	__le16 rel_y;
+	__le16 tool_major;
+	__le16 tool_minor;
+	__le16 orientation;
+	__le16 touch_major;
+	__le16 touch_minor;
+	__le16 unused[2];
+	__le16 pressure;
+	__le16 multi;
+} __packed;
+
+struct apple_z2_hbpp_blob_hdr {
+	__le16 cmd;
+	__le16 len;
+	__le32 addr;
+	__le16 checksum;
+};
+
+struct apple_z2_fw_hdr {
+	__le32 magic;
+	__le32 version;
+};
+
+struct apple_z2_read_interrupt_cmd {
+	u8 cmd;
+	u8 counter;
+	u8 unused[12];
+	__le16 checksum;
+};
+
+static void apple_z2_parse_touches(struct apple_z2 *z2,
+				   const u8 *msg, size_t msg_len)
+{
+	int i;
+	int nfingers;
+	int slot;
+	int slot_valid;
+	struct apple_z2_finger *fingers;
+
+	if (msg_len <= APPLE_Z2_NUM_FINGERS_OFFSET)
+		return;
+	nfingers = msg[APPLE_Z2_NUM_FINGERS_OFFSET];
+	fingers = (struct apple_z2_finger *)(msg + APPLE_Z2_FINGERS_OFFSET);
+	for (i = 0; i < nfingers; i++) {
+		slot = input_mt_get_slot_by_key(z2->input_dev, fingers[i].finger);
+		if (slot < 0) {
+			dev_warn(&z2->spidev->dev, "unable to get slot for finger\n");
+			continue;
+		}
+		slot_valid = fingers[i].state == APPLE_Z2_TOUCH_STARTED ||
+			     fingers[i].state == APPLE_Z2_TOUCH_MOVED;
+		input_mt_slot(z2->input_dev, slot);
+		if (!input_mt_report_slot_state(z2->input_dev, MT_TOOL_FINGER, slot_valid))
+			continue;
+		touchscreen_report_pos(z2->input_dev, &z2->props,
+				       le16_to_cpu(fingers[i].abs_x),
+				       le16_to_cpu(fingers[i].abs_y),
+				       true);
+		input_report_abs(z2->input_dev, ABS_MT_WIDTH_MAJOR,
+				 le16_to_cpu(fingers[i].tool_major));
+		input_report_abs(z2->input_dev, ABS_MT_WIDTH_MINOR,
+				 le16_to_cpu(fingers[i].tool_minor));
+		input_report_abs(z2->input_dev, ABS_MT_ORIENTATION,
+				 le16_to_cpu(fingers[i].orientation));
+		input_report_abs(z2->input_dev, ABS_MT_TOUCH_MAJOR,
+				 le16_to_cpu(fingers[i].touch_major));
+		input_report_abs(z2->input_dev, ABS_MT_TOUCH_MINOR,
+				 le16_to_cpu(fingers[i].touch_minor));
+	}
+	input_mt_sync_frame(z2->input_dev);
+	input_sync(z2->input_dev);
+}
+
+static int apple_z2_read_packet(struct apple_z2 *z2)
+{
+	struct apple_z2_read_interrupt_cmd *len_cmd = (void *)z2->tx_buf;
+	struct spi_transfer xfer;
+	int error;
+	size_t pkt_len;
+
+	memset(&xfer, 0, sizeof(xfer));
+	len_cmd->cmd = APPLE_Z2_CMD_READ_INTERRUPT_DATA;
+	len_cmd->counter = z2->index_parity + 1;
+	len_cmd->checksum =
+		cpu_to_le16(APPLE_Z2_CMD_READ_INTERRUPT_DATA + len_cmd->counter);
+	z2->index_parity = !z2->index_parity;
+	xfer.tx_buf = z2->tx_buf;
+	xfer.rx_buf = z2->rx_buf;
+	xfer.len = sizeof(*len_cmd);
+
+	error = spi_sync_transfer(z2->spidev, &xfer, 1);
+	if (error)
+		return error;
+
+	pkt_len = (get_unaligned_le16(z2->rx_buf + 1) + 8) & 0xfffffffc;
+
+	error = spi_read(z2->spidev, z2->rx_buf, pkt_len);
+	if (error)
+		return error;
+
+	apple_z2_parse_touches(z2, z2->rx_buf + 5, pkt_len - 5);
+
+	return 0;
+}
+
+static irqreturn_t apple_z2_irq(int irq, void *data)
+{
+	struct apple_z2 *z2 = data;
+
+	if (unlikely(!z2->booted))
+		complete(&z2->boot_irq);
+	else
+		apple_z2_read_packet(z2);
+
+	return IRQ_HANDLED;
+}
+
+/* Build calibration blob, caller is responsible for freeing the blob data. */
+static const u8 *apple_z2_build_cal_blob(struct apple_z2 *z2,
+					 u32 address, size_t *size)
+{
+	u8 *cal_data;
+	int cal_size;
+	size_t blob_size;
+	u32 checksum;
+	u16 checksum_hdr;
+	int i;
+	struct apple_z2_hbpp_blob_hdr *hdr;
+	int error;
+
+	if (!device_property_present(&z2->spidev->dev, CAL_PROP_NAME))
+		return NULL;
+
+	cal_size = device_property_count_u8(&z2->spidev->dev, CAL_PROP_NAME);
+	if (cal_size < 0)
+		return ERR_PTR(cal_size);
+
+	blob_size = sizeof(struct apple_z2_hbpp_blob_hdr) + cal_size + sizeof(__le32);
+	u8 *blob_data __free(kfree) = kzalloc(blob_size, GFP_KERNEL);
+	if (!blob_data)
+		return ERR_PTR(-ENOMEM);
+
+	hdr = (struct apple_z2_hbpp_blob_hdr *)blob_data;
+	hdr->cmd = cpu_to_le16(APPLE_Z2_HBPP_CMD_BLOB);
+	hdr->len = cpu_to_le16(round_up(cal_size, 4) / 4);
+	hdr->addr = cpu_to_le32(address);
+
+	checksum_hdr = 0;
+	for (i = 2; i < 8; i++)
+		checksum_hdr += blob_data[i];
+	hdr->checksum = cpu_to_le16(checksum_hdr);
+
+	cal_data = blob_data + sizeof(struct apple_z2_hbpp_blob_hdr);
+	error = device_property_read_u8_array(&z2->spidev->dev, CAL_PROP_NAME,
+					      cal_data, cal_size);
+	if (error)
+		return ERR_PTR(error);
+
+	checksum = 0;
+	for (i = 0; i < cal_size; i++)
+		checksum += cal_data[i];
+	put_unaligned_le32(checksum, cal_data + cal_size);
+
+	*size = blob_size;
+	return no_free_ptr(blob_data);
+}
+
+static int apple_z2_send_firmware_blob(struct apple_z2 *z2, const u8 *data,
+				       u32 size, bool init)
+{
+	struct spi_message msg;
+	struct spi_transfer blob_xfer, ack_xfer;
+	int error;
+
+	z2->tx_buf[0] = 0x1a;
+	z2->tx_buf[1] = 0xa1;
+
+	spi_message_init(&msg);
+	memset(&blob_xfer, 0, sizeof(blob_xfer));
+	memset(&ack_xfer, 0, sizeof(ack_xfer));
+
+	blob_xfer.tx_buf = data;
+	blob_xfer.len = size;
+	blob_xfer.bits_per_word = init ? 8 : 16;
+	spi_message_add_tail(&blob_xfer, &msg);
+
+	ack_xfer.tx_buf = z2->tx_buf;
+	ack_xfer.len = 2;
+	spi_message_add_tail(&ack_xfer, &msg);
+
+	reinit_completion(&z2->boot_irq);
+	error = spi_sync(z2->spidev, &msg);
+	if (error)
+		return error;
+
+	/* Irq only happens sometimes, but the thing boots reliably nonetheless */
+	wait_for_completion_timeout(&z2->boot_irq, msecs_to_jiffies(20));
+
+	return 0;
+}
+
+static int apple_z2_upload_firmware(struct apple_z2 *z2)
+{
+	const struct apple_z2_fw_hdr *fw_hdr;
+	size_t fw_idx = sizeof(struct apple_z2_fw_hdr);
+	int error;
+	u32 load_cmd;
+	u32 address;
+	bool init;
+	size_t size;
+
+	const struct firmware *fw __free(firmware) = NULL;
+	error = request_firmware(&fw, z2->fw_name, &z2->spidev->dev);
+	if (error) {
+		dev_err(&z2->spidev->dev, "unable to load firmware\n");
+		return error;
+	}
+
+	fw_hdr = (const struct apple_z2_fw_hdr *)fw->data;
+	if (le32_to_cpu(fw_hdr->magic) != APPLE_Z2_FW_MAGIC || le32_to_cpu(fw_hdr->version) != 1) {
+		dev_err(&z2->spidev->dev, "invalid firmware header\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * This will interrupt the upload half-way if the file is malformed
+	 * As the device has no non-volatile storage to corrupt, and gets reset
+	 * on boot anyway, this is fine.
+	 */
+	while (fw_idx < fw->size) {
+		if (fw->size - fw_idx < 8) {
+			dev_err(&z2->spidev->dev, "firmware malformed\n");
+			return -EINVAL;
+		}
+
+		load_cmd = le32_to_cpup((__force __le32 *)(fw->data + fw_idx));
+		fw_idx += sizeof(u32);
+		if (load_cmd == LOAD_COMMAND_INIT_PAYLOAD || load_cmd == LOAD_COMMAND_SEND_BLOB) {
+			size = le32_to_cpup((__force __le32 *)(fw->data + fw_idx));
+			fw_idx += sizeof(u32);
+			if (fw->size - fw_idx < size) {
+				dev_err(&z2->spidev->dev, "firmware malformed\n");
+				return -EINVAL;
+			}
+			init = load_cmd == LOAD_COMMAND_INIT_PAYLOAD;
+			error = apple_z2_send_firmware_blob(z2, fw->data + fw_idx,
+							    size, init);
+			if (error)
+				return error;
+			fw_idx += size;
+		} else if (load_cmd == LOAD_COMMAND_SEND_CALIBRATION) {
+			address = le32_to_cpup((__force __le32 *)(fw->data + fw_idx));
+			fw_idx += sizeof(u32);
+
+			const u8 *data __free(kfree) =
+				apple_z2_build_cal_blob(z2, address, &size);
+			if (IS_ERR(data))
+				return PTR_ERR(data);
+
+			if (data) {
+				error = apple_z2_send_firmware_blob(z2, data, size, false);
+				if (error)
+					return error;
+			}
+		} else {
+			dev_err(&z2->spidev->dev, "firmware malformed\n");
+			return -EINVAL;
+		}
+		fw_idx = round_up(fw_idx, 4);
+	}
+
+
+	z2->booted = true;
+	apple_z2_read_packet(z2);
+	return 0;
+}
+
+static int apple_z2_boot(struct apple_z2 *z2)
+{
+	int error;
+
+	reinit_completion(&z2->boot_irq);
+	enable_irq(z2->spidev->irq);
+	gpiod_set_value(z2->reset_gpio, 0);
+	if (!wait_for_completion_timeout(&z2->boot_irq, msecs_to_jiffies(20)))
+		return -ETIMEDOUT;
+
+	error = apple_z2_upload_firmware(z2);
+	if (error) {
+		gpiod_set_value(z2->reset_gpio, 1);
+		disable_irq(z2->spidev->irq);
+		return error;
+	}
+	return 0;
+}
+
+static int apple_z2_probe(struct spi_device *spi)
+{
+	struct device *dev = &spi->dev;
+	struct apple_z2 *z2;
+	int error;
+
+	z2 = devm_kzalloc(dev, sizeof(*z2), GFP_KERNEL);
+	if (!z2)
+		return -ENOMEM;
+
+	z2->tx_buf = devm_kzalloc(dev, sizeof(struct apple_z2_read_interrupt_cmd), GFP_KERNEL);
+	if (!z2->tx_buf)
+		return -ENOMEM;
+	/* 4096 will end up being rounded up to 8192 due to devres header */
+	z2->rx_buf = devm_kzalloc(dev, 4000, GFP_KERNEL);
+	if (!z2->rx_buf)
+		return -ENOMEM;
+
+	z2->spidev = spi;
+	init_completion(&z2->boot_irq);
+	spi_set_drvdata(spi, z2);
+
+	/* Reset the device on boot */
+	z2->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+	if (IS_ERR(z2->reset_gpio))
+		return dev_err_probe(dev, PTR_ERR(z2->reset_gpio), "unable to get reset\n");
+
+	error = devm_request_threaded_irq(dev, z2->spidev->irq, NULL,
+					  apple_z2_irq, IRQF_ONESHOT | IRQF_NO_AUTOEN,
+					  "apple-z2-irq", z2);
+	if (error)
+		return dev_err_probe(dev, error, "unable to request irq\n");
+
+	error = device_property_read_string(dev, "firmware-name", &z2->fw_name);
+	if (error)
+		return dev_err_probe(dev, error, "unable to get firmware name\n");
+
+	z2->input_dev = devm_input_allocate_device(dev);
+	if (!z2->input_dev)
+		return -ENOMEM;
+	z2->input_dev->name = (char *)spi_get_device_id(spi)->driver_data;
+	z2->input_dev->phys = "apple_z2";
+	z2->input_dev->id.bustype = BUS_SPI;
+
+	/* Allocate the axes before setting from DT */
+	input_set_abs_params(z2->input_dev, ABS_MT_POSITION_X, 0, 0, 0, 0);
+	input_set_abs_params(z2->input_dev, ABS_MT_POSITION_Y, 0, 0, 0, 0);
+	touchscreen_parse_properties(z2->input_dev, true, &z2->props);
+	input_abs_set_res(z2->input_dev, ABS_MT_POSITION_X, 100);
+	input_abs_set_res(z2->input_dev, ABS_MT_POSITION_Y, 100);
+	input_set_abs_params(z2->input_dev, ABS_MT_WIDTH_MAJOR, 0, 65535, 0, 0);
+	input_set_abs_params(z2->input_dev, ABS_MT_WIDTH_MINOR, 0, 65535, 0, 0);
+	input_set_abs_params(z2->input_dev, ABS_MT_TOUCH_MAJOR, 0, 65535, 0, 0);
+	input_set_abs_params(z2->input_dev, ABS_MT_TOUCH_MINOR, 0, 65535, 0, 0);
+	input_set_abs_params(z2->input_dev, ABS_MT_ORIENTATION, -32768, 32767, 0, 0);
+
+	error = input_mt_init_slots(z2->input_dev, 256, INPUT_MT_DIRECT);
+	if (error)
+		return dev_err_probe(dev, error, "unable to initialize multitouch slots\n");
+
+	error = input_register_device(z2->input_dev);
+	if (error)
+		return dev_err_probe(dev, error, "unable to register input device\n");
+
+	/* Wait for device reset to finish */
+	usleep_range(5000, 10000);
+	error = apple_z2_boot(z2);
+	if (error)
+		return error;
+	return 0;
+}
+
+static void apple_z2_shutdown(struct spi_device *spi)
+{
+	struct apple_z2 *z2 = spi_get_drvdata(spi);
+
+	disable_irq(z2->spidev->irq);
+	gpiod_direction_output(z2->reset_gpio, 1);
+	z2->booted = false;
+}
+
+static int apple_z2_suspend(struct device *dev)
+{
+	apple_z2_shutdown(to_spi_device(dev));
+	return 0;
+}
+
+static int apple_z2_resume(struct device *dev)
+{
+	struct apple_z2 *z2 = spi_get_drvdata(to_spi_device(dev));
+
+	return apple_z2_boot(z2);
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(apple_z2_pm, apple_z2_suspend, apple_z2_resume);
+
+static const struct of_device_id apple_z2_of_match[] = {
+	{ .compatible = "apple,j293-touchbar" },
+	{ .compatible = "apple,j493-touchbar" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, apple_z2_of_match);
+
+static struct spi_device_id apple_z2_of_id[] = {
+	{ .name = "j293-touchbar", .driver_data = (kernel_ulong_t)"MacBookPro17,1 Touch Bar" },
+	{ .name = "j493-touchbar", .driver_data = (kernel_ulong_t)"Mac14,7 Touch Bar" },
+	{}
+};
+MODULE_DEVICE_TABLE(spi, apple_z2_of_id);
+
+static struct spi_driver apple_z2_driver = {
+	.driver = {
+		.name	= "apple-z2",
+		.pm	= pm_sleep_ptr(&apple_z2_pm),
+		.of_match_table = apple_z2_of_match,
+		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
+	},
+	.id_table = apple_z2_of_id,
+	.probe    = apple_z2_probe,
+	.remove   = apple_z2_shutdown,
+};
+
+module_spi_driver(apple_z2_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_FIRMWARE("apple/dfrmtfw-*.bin");
+MODULE_DESCRIPTION("Apple Z2 touchscreens driver");
diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c
index 95ba3caeb40177..ddc51b019ca432 100644
--- a/drivers/iommu/apple-dart.c
+++ b/drivers/iommu/apple-dart.c
@@ -21,6 +21,7 @@
 #include <linux/io-pgtable.h>
 #include <linux/iommu.h>
 #include <linux/iopoll.h>
+#include <linux/minmax.h>
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
@@ -28,6 +29,7 @@
 #include <linux/of_platform.h>
 #include <linux/pci.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/slab.h>
 #include <linux/swab.h>
 #include <linux/types.h>
@@ -36,7 +38,7 @@
 
 #define DART_MAX_STREAMS 256
 #define DART_MAX_TTBR 4
-#define MAX_DARTS_PER_DEVICE 2
+#define MAX_DARTS_PER_DEVICE 3
 
 /* Common registers */
 
@@ -122,6 +124,8 @@
 #define DART_T8110_ERROR_ADDR_LO 0x170
 #define DART_T8110_ERROR_ADDR_HI 0x174
 
+#define DART_T8110_ERROR_STREAMS 0x1c0
+
 #define DART_T8110_PROTECT 0x200
 #define DART_T8110_UNPROTECT 0x204
 #define DART_T8110_PROTECT_LOCK 0x208
@@ -133,6 +137,7 @@
 #define DART_T8110_TCR                  0x1000
 #define DART_T8110_TCR_REMAP            GENMASK(11, 8)
 #define DART_T8110_TCR_REMAP_EN         BIT(7)
+#define DART_T8110_TCR_FOUR_LEVEL       BIT(3)
 #define DART_T8110_TCR_BYPASS_DAPF      BIT(2)
 #define DART_T8110_TCR_BYPASS_DART      BIT(1)
 #define DART_T8110_TCR_TRANSLATE_ENABLE BIT(0)
@@ -166,22 +171,23 @@ struct apple_dart_hw {
 
 	int max_sid_count;
 
-	u64 lock;
-	u64 lock_bit;
+	u32 lock;
+	u32 lock_bit;
 
-	u64 error;
+	u32 error;
 
-	u64 enable_streams;
+	u32 enable_streams;
 
-	u64 tcr;
-	u64 tcr_enabled;
-	u64 tcr_disabled;
-	u64 tcr_bypass;
+	u32 tcr;
+	u32 tcr_enabled;
+	u32 tcr_disabled;
+	u32 tcr_bypass;
+	u32 tcr_4level;
 
-	u64 ttbr;
-	u64 ttbr_valid;
-	u64 ttbr_addr_field_shift;
-	u64 ttbr_shift;
+	u32 ttbr;
+	u32 ttbr_valid;
+	u32 ttbr_addr_field_shift;
+	u32 ttbr_shift;
 	int ttbr_count;
 };
 
@@ -197,6 +203,7 @@ struct apple_dart_hw {
  * @lock: lock for hardware operations involving this dart
  * @pgsize: pagesize supported by this DART
  * @supports_bypass: indicates if this DART supports bypass mode
+ * @locked: indicates if this DART is locked
  * @sid2group: maps stream ids to iommu_groups
  * @iommu: iommu core device
  */
@@ -217,12 +224,19 @@ struct apple_dart {
 	u32 pgsize;
 	u32 num_streams;
 	u32 supports_bypass : 1;
+	u32 locked : 1;
+	u32 four_level : 1;
+
+	dma_addr_t dma_min;
+	dma_addr_t dma_max;
 
 	struct iommu_group *sid2group[DART_MAX_STREAMS];
 	struct iommu_device iommu;
 
 	u32 save_tcr[DART_MAX_STREAMS];
 	u32 save_ttbr[DART_MAX_STREAMS][DART_MAX_TTBR];
+
+	u64 *locked_ttbr[DART_MAX_STREAMS][DART_MAX_TTBR];
 };
 
 /*
@@ -262,6 +276,7 @@ struct apple_dart_domain {
 	struct io_pgtable_ops *pgtbl_ops;
 
 	bool finalized;
+	u64 mask;
 	struct mutex init_lock;
 	struct apple_dart_atomic_stream_map stream_maps[MAX_DARTS_PER_DEVICE];
 
@@ -277,6 +292,10 @@ struct apple_dart_domain {
  * @streams: streams for this device
  */
 struct apple_dart_master_cfg {
+	/* Intersection of DART capabilitles */
+	u32 supports_bypass : 1;
+	u32 locked : 1;
+
 	struct apple_dart_stream_map stream_maps[MAX_DARTS_PER_DEVICE];
 };
 
@@ -302,13 +321,16 @@ static struct apple_dart_domain *to_dart_domain(struct iommu_domain *dom)
 }
 
 static void
-apple_dart_hw_enable_translation(struct apple_dart_stream_map *stream_map)
+apple_dart_hw_enable_translation(struct apple_dart_stream_map *stream_map, int levels)
 {
 	struct apple_dart *dart = stream_map->dart;
 	int sid;
 
+	WARN_ON(levels != 3 && levels != 4);
+	WARN_ON(levels == 4 && !dart->four_level);
 	for_each_set_bit(sid, stream_map->sidmap, dart->num_streams)
-		writel(dart->hw->tcr_enabled, dart->regs + DART_TCR(dart, sid));
+		writel(dart->hw->tcr_enabled | (levels == 4 ? dart->hw->tcr_4level : 0),
+		       dart->regs + DART_TCR(dart, sid));
 }
 
 static void apple_dart_hw_disable_dma(struct apple_dart_stream_map *stream_map)
@@ -364,6 +386,82 @@ apple_dart_hw_clear_all_ttbrs(struct apple_dart_stream_map *stream_map)
 		apple_dart_hw_clear_ttbr(stream_map, i);
 }
 
+static int
+apple_dart_hw_map_locked_ttbr(struct apple_dart_stream_map *stream_map, u8 idx)
+{
+	struct apple_dart *dart = stream_map->dart;
+	int sid;
+
+	for_each_set_bit(sid, stream_map->sidmap, dart->num_streams) {
+		u32 ttbr;
+		phys_addr_t phys;
+		u64 *l1_tbl;
+
+		ttbr = readl(dart->regs + DART_TTBR(dart, sid, idx));
+
+		if (!(ttbr & dart->hw->ttbr_valid)) {
+			dev_err(dart->dev, "Invalid ttbr[%u] for locked dart\n",
+				idx);
+			return -EIO;
+		}
+
+		ttbr &= ~dart->hw->ttbr_valid;
+
+		if (dart->hw->ttbr_addr_field_shift)
+			ttbr >>= dart->hw->ttbr_addr_field_shift;
+		phys = ((phys_addr_t) ttbr) << dart->hw->ttbr_shift;
+
+		l1_tbl = devm_memremap(dart->dev, phys, dart->pgsize,
+				       MEMREMAP_WB);
+		if (!l1_tbl)
+			return -ENOMEM;
+
+		dart->locked_ttbr[sid][idx] = l1_tbl;
+	}
+
+	return 0;
+}
+
+static int
+apple_dart_hw_unmap_locked_ttbr(struct apple_dart_stream_map *stream_map,
+				u8 idx)
+{
+	struct apple_dart *dart = stream_map->dart;
+	int sid;
+
+	for_each_set_bit(sid, stream_map->sidmap, dart->num_streams) {
+		/* TODO: locked L1 table might need to be restored to boot state */
+		if (dart->locked_ttbr[sid][idx]) {
+			memset(dart->locked_ttbr[sid][idx], 0, dart->pgsize);
+			devm_memunmap(dart->dev, dart->locked_ttbr[sid][idx]);
+		}
+		dart->locked_ttbr[sid][idx] = NULL;
+	}
+
+	return 0;
+}
+
+static int
+apple_dart_hw_sync_locked(struct io_pgtable_cfg *cfg,
+			  struct apple_dart_stream_map *stream_map)
+{
+	struct apple_dart *dart = stream_map->dart;
+	int sid;
+
+	for_each_set_bit(sid, stream_map->sidmap, dart->num_streams) {
+		for (int idx = 0; idx < dart->hw->ttbr_count; idx++) {
+			u64 *ttbrep = dart->locked_ttbr[sid][idx];
+			u64 *ptep = cfg->apple_dart_cfg.ttbr[idx];
+			if (!ttbrep || !ptep)
+				continue;
+			for (int entry = 0; entry < dart->pgsize / sizeof(*ptep); entry++)
+				ttbrep[entry] = ptep[entry];
+		}
+	}
+
+	return 0;
+}
+
 static int
 apple_dart_t8020_hw_stream_command(struct apple_dart_stream_map *stream_map,
 			     u32 command)
@@ -450,17 +548,9 @@ apple_dart_t8110_hw_invalidate_tlb(struct apple_dart_stream_map *stream_map)
 
 static int apple_dart_hw_reset(struct apple_dart *dart)
 {
-	u32 config;
 	struct apple_dart_stream_map stream_map;
 	int i;
 
-	config = readl(dart->regs + dart->hw->lock);
-	if (config & dart->hw->lock_bit) {
-		dev_err(dart->dev, "DART is locked down until reboot: %08x\n",
-			config);
-		return -EINVAL;
-	}
-
 	stream_map.dart = dart;
 	bitmap_zero(stream_map.sidmap, DART_MAX_STREAMS);
 	bitmap_set(stream_map.sidmap, 0, dart->num_streams);
@@ -485,6 +575,8 @@ static void apple_dart_domain_flush_tlb(struct apple_dart_domain *domain)
 	int i, j;
 	struct apple_dart_atomic_stream_map *domain_stream_map;
 	struct apple_dart_stream_map stream_map;
+	struct io_pgtable_cfg *pgtbl_cfg =
+		&io_pgtable_ops_to_pgtable(domain->pgtbl_ops)->cfg;
 
 	for_each_stream_map(i, domain, domain_stream_map) {
 		stream_map.dart = domain_stream_map->dart;
@@ -492,7 +584,13 @@ static void apple_dart_domain_flush_tlb(struct apple_dart_domain *domain)
 		for (j = 0; j < BITS_TO_LONGS(stream_map.dart->num_streams); j++)
 			stream_map.sidmap[j] = atomic_long_read(&domain_stream_map->sidmap[j]);
 
+		WARN_ON(pm_runtime_get_sync(stream_map.dart->dev) < 0);
+
+		if (stream_map.dart->locked)
+			apple_dart_hw_sync_locked(pgtbl_cfg, &stream_map);
+
 		stream_map.dart->hw->invalidate_tlb(&stream_map);
+		pm_runtime_put(stream_map.dart->dev);
 	}
 }
 
@@ -523,7 +621,7 @@ static phys_addr_t apple_dart_iova_to_phys(struct iommu_domain *domain,
 	if (!ops)
 		return 0;
 
-	return ops->iova_to_phys(ops, iova);
+	return ops->iova_to_phys(ops, iova & dart_domain->mask);
 }
 
 static int apple_dart_map_pages(struct iommu_domain *domain, unsigned long iova,
@@ -537,8 +635,8 @@ static int apple_dart_map_pages(struct iommu_domain *domain, unsigned long iova,
 	if (!ops)
 		return -ENODEV;
 
-	return ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, gfp,
-			      mapped);
+	return ops->map_pages(ops, iova & dart_domain->mask, paddr, pgsize,
+			      pgcount, prot, gfp, mapped);
 }
 
 static size_t apple_dart_unmap_pages(struct iommu_domain *domain,
@@ -549,7 +647,8 @@ static size_t apple_dart_unmap_pages(struct iommu_domain *domain,
 	struct apple_dart_domain *dart_domain = to_dart_domain(domain);
 	struct io_pgtable_ops *ops = dart_domain->pgtbl_ops;
 
-	return ops->unmap_pages(ops, iova, pgsize, pgcount, gather);
+	return ops->unmap_pages(ops, iova & dart_domain->mask, pgsize, pgcount,
+				gather);
 }
 
 static void
@@ -560,13 +659,33 @@ apple_dart_setup_translation(struct apple_dart_domain *domain,
 	struct io_pgtable_cfg *pgtbl_cfg =
 		&io_pgtable_ops_to_pgtable(domain->pgtbl_ops)->cfg;
 
-	for (i = 0; i < pgtbl_cfg->apple_dart_cfg.n_ttbrs; ++i)
-		apple_dart_hw_set_ttbr(stream_map, i,
-				       pgtbl_cfg->apple_dart_cfg.ttbr[i]);
+	for (i = 0; i < pgtbl_cfg->apple_dart_cfg.n_ttbrs; ++i) {
+		u64 ttbr = virt_to_phys(pgtbl_cfg->apple_dart_cfg.ttbr[i]);
+		apple_dart_hw_set_ttbr(stream_map, i, ttbr);
+	}
 	for (; i < stream_map->dart->hw->ttbr_count; ++i)
 		apple_dart_hw_clear_ttbr(stream_map, i);
 
-	apple_dart_hw_enable_translation(stream_map);
+	apple_dart_hw_enable_translation(stream_map,
+					 pgtbl_cfg->apple_dart_cfg.n_levels);
+	stream_map->dart->hw->invalidate_tlb(stream_map);
+}
+
+static void
+apple_dart_setup_translation_locked(struct apple_dart_domain *domain,
+				    struct apple_dart_stream_map *stream_map)
+{
+	int i;
+	struct io_pgtable_cfg *pgtbl_cfg =
+		&io_pgtable_ops_to_pgtable(domain->pgtbl_ops)->cfg;
+
+	/* Locked DARTs are set up by the bootloader. */
+	for (i = 0; i < pgtbl_cfg->apple_dart_cfg.n_ttbrs; ++i)
+		apple_dart_hw_map_locked_ttbr(stream_map, i);
+	for (; i < stream_map->dart->hw->ttbr_count; ++i)
+		apple_dart_hw_unmap_locked_ttbr(stream_map, i);
+
+	apple_dart_hw_sync_locked(pgtbl_cfg, stream_map);
 	stream_map->dart->hw->invalidate_tlb(stream_map);
 }
 
@@ -575,6 +694,8 @@ static int apple_dart_finalize_domain(struct apple_dart_domain *dart_domain,
 {
 	struct apple_dart *dart = cfg->stream_maps[0].dart;
 	struct io_pgtable_cfg pgtbl_cfg;
+	dma_addr_t dma_max = dart->dma_max;
+	u32 ias = min_t(u32, dart->ias, fls64(dma_max));
 	int ret = 0;
 	int i, j;
 
@@ -595,12 +716,48 @@ static int apple_dart_finalize_domain(struct apple_dart_domain *dart_domain,
 
 	pgtbl_cfg = (struct io_pgtable_cfg){
 		.pgsize_bitmap = dart->pgsize,
-		.ias = dart->ias,
+		.ias = ias,
 		.oas = dart->oas,
 		.coherent_walk = 1,
 		.iommu_dev = dart->dev,
 	};
 
+	if (dart->locked) {
+		unsigned long *sidmap;
+		int sid;
+		u32 ttbr;
+
+		/* Locked DARTs can only have a single stream bound */
+		sidmap = cfg->stream_maps[0].sidmap;
+		sid = find_first_bit(sidmap, dart->num_streams);
+
+		WARN_ON((sid < 0) || bitmap_weight(sidmap, dart->num_streams) > 1);
+		ttbr = readl(dart->regs + DART_TTBR(dart, sid, 0));
+
+		WARN_ON(!(ttbr & dart->hw->ttbr_valid));
+
+		/* If the DART is locked, we need to keep the translation level count. */
+		if (dart->hw->tcr_4level && dart->ias > 36) {
+			if (readl(dart->regs + DART_TCR(dart, sid)) & dart->hw->tcr_4level) {
+				if (ias < 37) {
+					dev_info(dart->dev, "Expanded to ias=37 due to lock\n");
+					pgtbl_cfg.ias = 37;
+				}
+			} else if (ias > 36) {
+				dev_info(dart->dev, "Limited to ias=36 due to lock\n");
+				pgtbl_cfg.ias = 36;
+				if (dart->dma_min == 0 && dma_max == DMA_BIT_MASK(dart->ias)) {
+					dma_max = DMA_BIT_MASK(pgtbl_cfg.ias);
+				} else if ((dart->dma_min ^ dma_max) & ~DMA_BIT_MASK(36)) {
+					dev_err(dart->dev,
+						"Invalid DMA range for locked 3-level PT\n");
+					ret = -ENOMEM;
+					goto done;
+				}
+			}
+		}
+	}
+
 	dart_domain->pgtbl_ops = alloc_io_pgtable_ops(dart->hw->fmt, &pgtbl_cfg,
 						      &dart_domain->domain);
 	if (!dart_domain->pgtbl_ops) {
@@ -608,10 +765,16 @@ static int apple_dart_finalize_domain(struct apple_dart_domain *dart_domain,
 		goto done;
 	}
 
+	if (pgtbl_cfg.pgsize_bitmap == SZ_4K)
+		dart_domain->mask = DMA_BIT_MASK(min_t(u32, dart->ias, 32));
+	else if (pgtbl_cfg.apple_dart_cfg.n_levels == 3)
+		dart_domain->mask = DMA_BIT_MASK(min_t(u32, dart->ias, 36));
+	else if (pgtbl_cfg.apple_dart_cfg.n_levels == 4)
+		dart_domain->mask = DMA_BIT_MASK(min_t(u32, dart->ias, 47));
+
 	dart_domain->domain.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
-	dart_domain->domain.geometry.aperture_start = 0;
-	dart_domain->domain.geometry.aperture_end =
-		(dma_addr_t)DMA_BIT_MASK(dart->ias);
+	dart_domain->domain.geometry.aperture_start = dart->dma_min;
+	dart_domain->domain.geometry.aperture_end = dma_max;
 	dart_domain->domain.geometry.force_aperture = true;
 
 	dart_domain->finalized = true;
@@ -664,17 +827,29 @@ static int apple_dart_attach_dev_paging(struct iommu_domain *domain,
 	struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
 	struct apple_dart_domain *dart_domain = to_dart_domain(domain);
 
+	for_each_stream_map(i, cfg, stream_map)
+		WARN_ON(pm_runtime_get_sync(stream_map->dart->dev) < 0);
+
 	ret = apple_dart_finalize_domain(dart_domain, cfg);
 	if (ret)
-		return ret;
+		goto err;
 
 	ret = apple_dart_domain_add_streams(dart_domain, cfg);
 	if (ret)
-		return ret;
+		goto err;
+
+	for_each_stream_map(i, cfg, stream_map) {
+		if (!stream_map->dart->locked)
+			apple_dart_setup_translation(dart_domain, stream_map);
+		else
+			apple_dart_setup_translation_locked(dart_domain,
+							    stream_map);
+	}
 
+err:
 	for_each_stream_map(i, cfg, stream_map)
-		apple_dart_setup_translation(dart_domain, stream_map);
-	return 0;
+		pm_runtime_put(stream_map->dart->dev);
+	return ret;
 }
 
 static int apple_dart_attach_dev_identity(struct iommu_domain *domain,
@@ -684,11 +859,20 @@ static int apple_dart_attach_dev_identity(struct iommu_domain *domain,
 	struct apple_dart_stream_map *stream_map;
 	int i;
 
-	if (!cfg->stream_maps[0].dart->supports_bypass)
+	if (!cfg->supports_bypass)
 		return -EINVAL;
 
+	if (cfg->locked)
+		return -EINVAL;
+
+	for_each_stream_map(i, cfg, stream_map)
+		WARN_ON(pm_runtime_get_sync(stream_map->dart->dev) < 0);
+
 	for_each_stream_map(i, cfg, stream_map)
 		apple_dart_hw_enable_bypass(stream_map);
+
+	for_each_stream_map(i, cfg, stream_map)
+		pm_runtime_put(stream_map->dart->dev);
 	return 0;
 }
 
@@ -708,8 +892,17 @@ static int apple_dart_attach_dev_blocked(struct iommu_domain *domain,
 	struct apple_dart_stream_map *stream_map;
 	int i;
 
+	if (cfg->locked)
+		return -EINVAL;
+
+	for_each_stream_map(i, cfg, stream_map)
+		WARN_ON(pm_runtime_get_sync(stream_map->dart->dev) < 0);
+
 	for_each_stream_map(i, cfg, stream_map)
 		apple_dart_hw_disable_dma(stream_map);
+
+	for_each_stream_map(i, cfg, stream_map)
+		pm_runtime_put(stream_map->dart->dev);
 	return 0;
 }
 
@@ -728,21 +921,29 @@ static struct iommu_device *apple_dart_probe_device(struct device *dev)
 	struct apple_dart_stream_map *stream_map;
 	int i;
 
-	if (!cfg)
+	if (!dev_iommu_fwspec_get(dev) || !cfg)
 		return ERR_PTR(-ENODEV);
 
 	for_each_stream_map(i, cfg, stream_map)
-		device_link_add(
-			dev, stream_map->dart->dev,
-			DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER);
+		device_link_add(dev, stream_map->dart->dev,
+			DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER |
+			DL_FLAG_RPM_ACTIVE);
 
 	return &cfg->stream_maps[0].dart->iommu;
 }
 
 static void apple_dart_release_device(struct device *dev)
 {
+	int i, j;
+	struct apple_dart_stream_map *stream_map;
 	struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
 
+	for_each_stream_map(j, cfg, stream_map) {
+		if (stream_map->dart->locked)
+			for (i = 0; i < stream_map->dart->hw->ttbr_count; ++i)
+				apple_dart_hw_unmap_locked_ttbr(stream_map, i);
+	}
+
 	kfree(cfg);
 }
 
@@ -792,20 +993,26 @@ static int apple_dart_of_xlate(struct device *dev,
 		return -EINVAL;
 	sid = args->args[0];
 
-	if (!cfg)
+	if (!cfg) {
 		cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
-	if (!cfg)
-		return -ENOMEM;
+		if (!cfg)
+			return -ENOMEM;
+		/* Will be ANDed with DART capabilities */
+		cfg->supports_bypass = true;
+		/* Will be ORed with DART capabilities*/
+		cfg->locked = false;
+	}
 	dev_iommu_priv_set(dev, cfg);
 
 	cfg_dart = cfg->stream_maps[0].dart;
 	if (cfg_dart) {
-		if (cfg_dart->supports_bypass != dart->supports_bypass)
-			return -EINVAL;
 		if (cfg_dart->pgsize != dart->pgsize)
 			return -EINVAL;
 	}
 
+	cfg->supports_bypass &= dart->supports_bypass;
+	cfg->locked |= dart->locked;
+
 	for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) {
 		if (cfg->stream_maps[i].dart == dart) {
 			set_bit(sid, cfg->stream_maps[i].sidmap);
@@ -945,7 +1152,9 @@ static int apple_dart_def_domain_type(struct device *dev)
 
 	if (cfg->stream_maps[0].dart->pgsize > PAGE_SIZE)
 		return IOMMU_DOMAIN_IDENTITY;
-	if (!cfg->stream_maps[0].dart->supports_bypass)
+	if (!cfg->supports_bypass)
+		return IOMMU_DOMAIN_DMA;
+	if (cfg->locked)
 		return IOMMU_DOMAIN_DMA;
 
 	return 0;
@@ -979,12 +1188,12 @@ static void apple_dart_get_resv_regions(struct device *dev,
 static const struct iommu_ops apple_dart_iommu_ops = {
 	.identity_domain = &apple_dart_identity_domain,
 	.blocked_domain = &apple_dart_blocked_domain,
+	.def_domain_type = apple_dart_def_domain_type,
 	.domain_alloc_paging = apple_dart_domain_alloc_paging,
 	.probe_device = apple_dart_probe_device,
 	.release_device = apple_dart_release_device,
 	.device_group = apple_dart_device_group,
 	.of_xlate = apple_dart_of_xlate,
-	.def_domain_type = apple_dart_def_domain_type,
 	.get_resv_regions = apple_dart_get_resv_regions,
 	.pgsize_bitmap = -1UL, /* Restricted during dart probe */
 	.owner = THIS_MODULE,
@@ -1047,6 +1256,7 @@ static irqreturn_t apple_dart_t8110_irq(int irq, void *dev)
 	u32 addr_hi = readl(dart->regs + DART_T8110_ERROR_ADDR_HI);
 	u64 addr = addr_lo | (((u64)addr_hi) << 32);
 	u8 stream_idx = FIELD_GET(DART_T8110_ERROR_STREAM, error);
+	int i;
 
 	if (!(error & DART_T8110_ERROR_FLAG))
 		return IRQ_NONE;
@@ -1073,9 +1283,28 @@ static irqreturn_t apple_dart_t8110_irq(int irq, void *dev)
 		error, stream_idx, error_code, fault_name, addr);
 
 	writel(error, dart->regs + DART_T8110_ERROR);
+	for (i = 0; i < BITS_TO_U32(dart->num_streams); i++)
+		writel(U32_MAX, dart->regs + DART_T8110_ERROR_STREAMS + 4 * i);
+
 	return IRQ_HANDLED;
 }
 
+static irqreturn_t apple_dart_irq(int irq, void *dev)
+{
+	irqreturn_t ret;
+	struct apple_dart *dart = dev;
+
+	WARN_ON(pm_runtime_get_sync(dart->dev) < 0);
+	ret = dart->hw->irq_handler(irq, dev);
+	pm_runtime_put(dart->dev);
+	return ret;
+}
+
+static bool apple_dart_is_locked(struct apple_dart *dart)
+{
+	return !!(readl(dart->regs + dart->hw->lock) & dart->hw->lock_bit);
+}
+
 static int apple_dart_probe(struct platform_device *pdev)
 {
 	int ret;
@@ -1083,6 +1312,7 @@ static int apple_dart_probe(struct platform_device *pdev)
 	struct resource *res;
 	struct apple_dart *dart;
 	struct device *dev = &pdev->dev;
+	u64 dma_range[2];
 
 	dart = devm_kzalloc(dev, sizeof(*dart), GFP_KERNEL);
 	if (!dart)
@@ -1114,6 +1344,14 @@ static int apple_dart_probe(struct platform_device *pdev)
 	if (ret)
 		return ret;
 
+	pm_runtime_get_noresume(dev);
+	pm_runtime_set_active(dev);
+	pm_runtime_irq_safe(dev);
+
+	ret = devm_pm_runtime_enable(dev);
+	if (ret)
+		goto err_clk_disable;
+
 	dart_params[0] = readl(dart->regs + DART_PARAMS1);
 	dart_params[1] = readl(dart->regs + DART_PARAMS2);
 	dart->pgsize = 1 << FIELD_GET(DART_PARAMS1_PAGE_SHIFT, dart_params[0]);
@@ -1133,9 +1371,30 @@ static int apple_dart_probe(struct platform_device *pdev)
 		dart->ias = FIELD_GET(DART_T8110_PARAMS3_VA_WIDTH, dart_params[2]);
 		dart->oas = FIELD_GET(DART_T8110_PARAMS3_PA_WIDTH, dart_params[2]);
 		dart->num_streams = FIELD_GET(DART_T8110_PARAMS4_NUM_SIDS, dart_params[3]);
+		dart->four_level = dart->ias > 36;
 		break;
 	}
 
+	dart->dma_min = 0;
+	dart->dma_max = DMA_BIT_MASK(dart->ias);
+
+	ret = of_property_read_u64_array(dev->of_node, "apple,dma-range", dma_range, 2);
+	if (ret == -EINVAL) {
+		ret = 0;
+	} else if (ret) {
+		goto err_clk_disable;
+	} else {
+		dart->dma_min = dma_range[0];
+		dart->dma_max = dma_range[0] + dma_range[1] - 1;
+		if ((dart->dma_min ^ dart->dma_max) & ~DMA_BIT_MASK(dart->ias)) {
+			dev_err(&pdev->dev, "Invalid DMA range for ias=%d\n",
+				dart->ias);
+			goto err_clk_disable;
+		}
+		dev_info(&pdev->dev, "Limiting DMA range to %pad..%pad\n",
+			 &dart->dma_min, &dart->dma_max);
+	}
+
 	if (dart->num_streams > DART_MAX_STREAMS) {
 		dev_err(&pdev->dev, "Too many streams (%d > %d)\n",
 			dart->num_streams, DART_MAX_STREAMS);
@@ -1143,11 +1402,14 @@ static int apple_dart_probe(struct platform_device *pdev)
 		goto err_clk_disable;
 	}
 
-	ret = apple_dart_hw_reset(dart);
-	if (ret)
-		goto err_clk_disable;
+	dart->locked = apple_dart_is_locked(dart);
+	if (!dart->locked) {
+		ret = apple_dart_hw_reset(dart);
+		if (ret)
+			goto err_clk_disable;
+	}
 
-	ret = request_irq(dart->irq, dart->hw->irq_handler, IRQF_SHARED,
+	ret = request_irq(dart->irq, apple_dart_irq, IRQF_SHARED,
 			  "apple-dart fault handler", dart);
 	if (ret)
 		goto err_clk_disable;
@@ -1163,11 +1425,13 @@ static int apple_dart_probe(struct platform_device *pdev)
 	if (ret)
 		goto err_sysfs_remove;
 
+	pm_runtime_put(dev);
+
 	dev_info(
 		&pdev->dev,
-		"DART [pagesize %x, %d streams, bypass support: %d, bypass forced: %d] initialized\n",
+		"DART [pagesize %x, %d streams, bypass support: %d, bypass forced: %d, locked: %d, AS %d -> %d] initialized\n",
 		dart->pgsize, dart->num_streams, dart->supports_bypass,
-		dart->pgsize > PAGE_SIZE);
+		dart->pgsize > PAGE_SIZE, dart->locked, dart->ias, dart->oas);
 	return 0;
 
 err_sysfs_remove:
@@ -1175,6 +1439,7 @@ static int apple_dart_probe(struct platform_device *pdev)
 err_free_irq:
 	free_irq(dart->irq, dart);
 err_clk_disable:
+	pm_runtime_put(dev);
 	clk_bulk_disable_unprepare(dart->num_clks, dart->clks);
 
 	return ret;
@@ -1184,7 +1449,9 @@ static void apple_dart_remove(struct platform_device *pdev)
 {
 	struct apple_dart *dart = platform_get_drvdata(pdev);
 
-	apple_dart_hw_reset(dart);
+	if (!dart->locked)
+		apple_dart_hw_reset(dart);
+
 	free_irq(dart->irq, dart);
 
 	iommu_device_unregister(&dart->iommu);
@@ -1288,6 +1555,7 @@ static const struct apple_dart_hw apple_dart_hw_t8110 = {
 	.tcr_enabled = DART_T8110_TCR_TRANSLATE_ENABLE,
 	.tcr_disabled = 0,
 	.tcr_bypass = DART_T8110_TCR_BYPASS_DAPF | DART_T8110_TCR_BYPASS_DART,
+	.tcr_4level = DART_T8110_TCR_FOUR_LEVEL,
 
 	.ttbr = DART_T8110_TTBR,
 	.ttbr_valid = DART_T8110_TTBR_VALID,
@@ -1301,6 +1569,10 @@ static __maybe_unused int apple_dart_suspend(struct device *dev)
 	struct apple_dart *dart = dev_get_drvdata(dev);
 	unsigned int sid, idx;
 
+	/* Locked DARTs can't be restored so skip saving their registers. */
+	if (dart->locked)
+		return 0;
+
 	for (sid = 0; sid < dart->num_streams; sid++) {
 		dart->save_tcr[sid] = readl(dart->regs + DART_TCR(dart, sid));
 		for (idx = 0; idx < dart->hw->ttbr_count; idx++)
@@ -1317,6 +1589,10 @@ static __maybe_unused int apple_dart_resume(struct device *dev)
 	unsigned int sid, idx;
 	int ret;
 
+	/* Locked DARTs can't be restored, and they should not need it */
+	if (dart->locked)
+		return 0;
+
 	ret = apple_dart_hw_reset(dart);
 	if (ret) {
 		dev_err(dev, "Failed to reset DART on resume\n");
@@ -1333,7 +1609,7 @@ static __maybe_unused int apple_dart_resume(struct device *dev)
 	return 0;
 }
 
-static DEFINE_SIMPLE_DEV_PM_OPS(apple_dart_pm_ops, apple_dart_suspend, apple_dart_resume);
+static DEFINE_RUNTIME_DEV_PM_OPS(apple_dart_pm_ops, apple_dart_suspend, apple_dart_resume, NULL);
 
 static const struct of_device_id apple_dart_of_match[] = {
 	{ .compatible = "apple,t8103-dart", .data = &apple_dart_hw_t8103 },
@@ -1349,7 +1625,7 @@ static struct platform_driver apple_dart_driver = {
 		.name			= "apple-dart",
 		.of_match_table		= apple_dart_of_match,
 		.suppress_bind_attrs    = true,
-		.pm			= pm_sleep_ptr(&apple_dart_pm_ops),
+		.pm			= pm_ptr(&apple_dart_pm_ops),
 	},
 	.probe	= apple_dart_probe,
 	.remove = apple_dart_remove,
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 2a9fa0c8cc00fe..5ac3f824b5121a 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -583,8 +583,13 @@ static int iova_reserve_iommu_regions(struct device *dev,
 		if (region->type == IOMMU_RESV_SW_MSI)
 			continue;
 
-		lo = iova_pfn(iovad, region->start);
-		hi = iova_pfn(iovad, region->start + region->length - 1);
+		if (region->type == IOMMU_RESV_TRANSLATED) {
+			lo = iova_pfn(iovad, region->dva);
+			hi = iova_pfn(iovad, region->dva + region->length - 1);
+		} else {
+			lo = iova_pfn(iovad, region->start);
+			hi = iova_pfn(iovad, region->start + region->length - 1);
+		}
 		reserve_iova(iovad, lo, hi);
 
 		if (region->type == IOMMU_RESV_MSI)
diff --git a/drivers/iommu/io-pgtable-dart.c b/drivers/iommu/io-pgtable-dart.c
index 06aca9ab52f9a8..7e1b0fc3b8e3e8 100644
--- a/drivers/iommu/io-pgtable-dart.c
+++ b/drivers/iommu/io-pgtable-dart.c
@@ -27,8 +27,9 @@
 
 #define DART1_MAX_ADDR_BITS	36
 
-#define DART_MAX_TABLES		4
-#define DART_LEVELS		2
+#define DART_MAX_TABLE_BITS	2
+#define DART_MAX_TABLES		BIT(DART_MAX_TABLE_BITS)
+#define DART_MAX_LEVELS		4 /* Includes TTBR level */
 
 /* Struct accessors */
 #define io_pgtable_to_data(x)						\
@@ -68,6 +69,7 @@
 struct dart_io_pgtable {
 	struct io_pgtable	iop;
 
+	int			levels;
 	int			tbl_bits;
 	int			bits_per_level;
 
@@ -164,44 +166,45 @@ static dart_iopte dart_install_table(dart_iopte *table,
 	return old;
 }
 
-static int dart_get_table(struct dart_io_pgtable *data, unsigned long iova)
+static int dart_get_index(struct dart_io_pgtable *data, unsigned long iova, int level)
 {
-	return (iova >> (3 * data->bits_per_level + ilog2(sizeof(dart_iopte)))) &
-		((1 << data->tbl_bits) - 1);
+	return (iova >> (level * data->bits_per_level + ilog2(sizeof(dart_iopte)))) &
+		((1 << data->bits_per_level) - 1);
 }
 
-static int dart_get_l1_index(struct dart_io_pgtable *data, unsigned long iova)
-{
-
-	return (iova >> (2 * data->bits_per_level + ilog2(sizeof(dart_iopte)))) &
-		 ((1 << data->bits_per_level) - 1);
-}
-
-static int dart_get_l2_index(struct dart_io_pgtable *data, unsigned long iova)
+static int dart_get_last_index(struct dart_io_pgtable *data, unsigned long iova)
 {
 
 	return (iova >> (data->bits_per_level + ilog2(sizeof(dart_iopte)))) &
 		 ((1 << data->bits_per_level) - 1);
 }
 
-static  dart_iopte *dart_get_l2(struct dart_io_pgtable *data, unsigned long iova)
+static dart_iopte *dart_get_last(struct dart_io_pgtable *data, unsigned long iova)
 {
 	dart_iopte pte, *ptep;
-	int tbl = dart_get_table(data, iova);
+	int level = data->levels;
+	int tbl = dart_get_index(data, iova, level);
+
+	if (tbl > (1 << data->tbl_bits))
+		return NULL;
 
 	ptep = data->pgd[tbl];
 	if (!ptep)
 		return NULL;
 
-	ptep += dart_get_l1_index(data, iova);
-	pte = READ_ONCE(*ptep);
+	while (--level > 1) {
+		ptep += dart_get_index(data, iova, level);
+		pte = READ_ONCE(*ptep);
 
-	/* Valid entry? */
-	if (!pte)
-		return NULL;
+		/* Valid entry? */
+		if (!pte)
+			return NULL;
 
-	/* Deref to get level 2 table */
-	return iopte_deref(pte, data);
+		/* Deref to get next level table */
+		ptep = iopte_deref(pte, data);
+	}
+
+	return ptep;
 }
 
 static dart_iopte dart_prot_to_pte(struct dart_io_pgtable *data,
@@ -238,6 +241,7 @@ static int dart_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
 	int ret = 0, tbl, num_entries, max_entries, map_idx_start;
 	dart_iopte pte, *cptep, *ptep;
 	dart_iopte prot;
+	int level = data->levels;
 
 	if (WARN_ON(pgsize != cfg->pgsize_bitmap))
 		return -EINVAL;
@@ -248,31 +252,36 @@ static int dart_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
 	if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
 		return -EINVAL;
 
-	tbl = dart_get_table(data, iova);
+	tbl = dart_get_index(data, iova, level);
+
+	if (tbl > (1 << data->tbl_bits))
+		return -ENOMEM;
 
 	ptep = data->pgd[tbl];
-	ptep += dart_get_l1_index(data, iova);
-	pte = READ_ONCE(*ptep);
+	while (--level > 1) {
+		ptep += dart_get_index(data, iova, level);
+		pte = READ_ONCE(*ptep);
 
-	/* no L2 table present */
-	if (!pte) {
-		cptep = __dart_alloc_pages(tblsz, gfp);
-		if (!cptep)
-			return -ENOMEM;
+		/* no table present */
+		if (!pte) {
+			cptep = __dart_alloc_pages(tblsz, gfp);
+			if (!cptep)
+				return -ENOMEM;
 
-		pte = dart_install_table(cptep, ptep, 0, data);
-		if (pte)
-			iommu_free_pages(cptep, get_order(tblsz));
+			pte = dart_install_table(cptep, ptep, 0, data);
+			if (pte)
+				iommu_free_pages(cptep, get_order(tblsz));
 
-		/* L2 table is present (now) */
-		pte = READ_ONCE(*ptep);
-	}
+			/* L2 table is present (now) */
+			pte = READ_ONCE(*ptep);
+		}
 
-	ptep = iopte_deref(pte, data);
+		ptep = iopte_deref(pte, data);
+	}
 
 	/* install a leaf entries into L2 table */
 	prot = dart_prot_to_pte(data, iommu_prot);
-	map_idx_start = dart_get_l2_index(data, iova);
+	map_idx_start = dart_get_last_index(data, iova);
 	max_entries = DART_PTES_PER_TABLE(data) - map_idx_start;
 	num_entries = min_t(int, pgcount, max_entries);
 	ptep += map_idx_start;
@@ -301,13 +310,13 @@ static size_t dart_unmap_pages(struct io_pgtable_ops *ops, unsigned long iova,
 	if (WARN_ON(pgsize != cfg->pgsize_bitmap || !pgcount))
 		return 0;
 
-	ptep = dart_get_l2(data, iova);
+	ptep = dart_get_last(data, iova);
 
 	/* Valid L2 IOPTE pointer? */
 	if (WARN_ON(!ptep))
 		return 0;
 
-	unmap_idx_start = dart_get_l2_index(data, iova);
+	unmap_idx_start = dart_get_last_index(data, iova);
 	ptep += unmap_idx_start;
 
 	max_entries = DART_PTES_PER_TABLE(data) - unmap_idx_start;
@@ -338,13 +347,13 @@ static phys_addr_t dart_iova_to_phys(struct io_pgtable_ops *ops,
 	struct dart_io_pgtable *data = io_pgtable_ops_to_data(ops);
 	dart_iopte pte, *ptep;
 
-	ptep = dart_get_l2(data, iova);
+	ptep = dart_get_last(data, iova);
 
 	/* Valid L2 IOPTE pointer? */
 	if (!ptep)
 		return 0;
 
-	ptep += dart_get_l2_index(data, iova);
+	ptep += dart_get_last_index(data, iova);
 
 	pte = READ_ONCE(*ptep);
 	/* Found translation */
@@ -361,21 +370,37 @@ static struct dart_io_pgtable *
 dart_alloc_pgtable(struct io_pgtable_cfg *cfg)
 {
 	struct dart_io_pgtable *data;
-	int tbl_bits, bits_per_level, va_bits, pg_shift;
+	int levels, max_tbl_bits, tbl_bits, bits_per_level, va_bits, pg_shift;
+
+	/*
+	 * Old 4K page DARTs can use up to 4 top-level tables.
+	 * Newer ones only ever use a maximum of 1.
+	 */
+	if (cfg->pgsize_bitmap == SZ_4K)
+		max_tbl_bits = DART_MAX_TABLE_BITS;
+	else
+		max_tbl_bits = 0;
 
 	pg_shift = __ffs(cfg->pgsize_bitmap);
 	bits_per_level = pg_shift - ilog2(sizeof(dart_iopte));
 
 	va_bits = cfg->ias - pg_shift;
 
-	tbl_bits = max_t(int, 0, va_bits - (bits_per_level * DART_LEVELS));
-	if ((1 << tbl_bits) > DART_MAX_TABLES)
+	levels = max_t(int, 2, (va_bits - max_tbl_bits + bits_per_level - 1) / bits_per_level);
+
+	if (levels > (DART_MAX_LEVELS - 1))
+		return NULL;
+
+	tbl_bits = max_t(int, 0, va_bits - (bits_per_level * levels));
+
+	if (tbl_bits > max_tbl_bits)
 		return NULL;
 
 	data = kzalloc(sizeof(*data), GFP_KERNEL);
 	if (!data)
 		return NULL;
 
+	data->levels = levels + 1; /* Table level counts as one level */
 	data->tbl_bits = tbl_bits;
 	data->bits_per_level = bits_per_level;
 
@@ -411,12 +436,13 @@ apple_dart_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
 		return NULL;
 
 	cfg->apple_dart_cfg.n_ttbrs = 1 << data->tbl_bits;
+	cfg->apple_dart_cfg.n_levels = data->levels;
 
 	for (i = 0; i < cfg->apple_dart_cfg.n_ttbrs; ++i) {
 		data->pgd[i] = __dart_alloc_pages(DART_GRANULE(data), GFP_KERNEL);
 		if (!data->pgd[i])
 			goto out_free_data;
-		cfg->apple_dart_cfg.ttbr[i] = virt_to_phys(data->pgd[i]);
+		cfg->apple_dart_cfg.ttbr[i] = data->pgd[i];
 	}
 
 	return &data->iop;
@@ -430,24 +456,32 @@ apple_dart_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
 	return NULL;
 }
 
-static void apple_dart_free_pgtable(struct io_pgtable *iop)
+static void apple_dart_free_pgtables(struct dart_io_pgtable *data, dart_iopte *ptep, int level)
 {
-	struct dart_io_pgtable *data = io_pgtable_to_data(iop);
+	dart_iopte *end;
+	dart_iopte *start = ptep;
 	int order = get_order(DART_GRANULE(data));
-	dart_iopte *ptep, *end;
-	int i;
 
-	for (i = 0; i < (1 << data->tbl_bits) && data->pgd[i]; ++i) {
-		ptep = data->pgd[i];
+	if (level > 1) {
 		end = (void *)ptep + DART_GRANULE(data);
 
 		while (ptep != end) {
 			dart_iopte pte = *ptep++;
 
 			if (pte)
-				iommu_free_pages(iopte_deref(pte, data), order);
+				apple_dart_free_pgtables(data, iopte_deref(pte, data), level - 1);
 		}
-		iommu_free_pages(data->pgd[i], order);
+	}
+	iommu_free_pages(start, order);
+}
+
+static void apple_dart_free_pgtable(struct io_pgtable *iop)
+{
+	struct dart_io_pgtable *data = io_pgtable_to_data(iop);
+	int i;
+
+	for (i = 0; i < (1 << data->tbl_bits) && data->pgd[i]; ++i) {
+		apple_dart_free_pgtables(data, data->pgd[i], data->levels - 1);
 	}
 
 	kfree(data);
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 1efe7cddb4fe33..498f8f4075ad40 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -86,6 +86,7 @@ static const char * const iommu_group_resv_type_string[] = {
 	[IOMMU_RESV_RESERVED]			= "reserved",
 	[IOMMU_RESV_MSI]			= "msi",
 	[IOMMU_RESV_SW_MSI]			= "msi",
+	[IOMMU_RESV_TRANSLATED]			= "translated",
 };
 
 #define IOMMU_CMD_LINE_DMA_API		BIT(0)
@@ -128,8 +129,8 @@ static void __iommu_group_set_domain_nofail(struct iommu_group *group,
 
 static int iommu_setup_default_domain(struct iommu_group *group,
 				      int target_type);
-static int iommu_create_device_direct_mappings(struct iommu_domain *domain,
-					       struct device *dev);
+static int iommu_create_device_fw_mappings(struct iommu_domain *domain,
+					   struct device *dev);
 static ssize_t iommu_group_store_type(struct iommu_group *group,
 				      const char *buf, size_t count);
 static struct group_device *iommu_group_alloc_device(struct iommu_group *group,
@@ -565,7 +566,7 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list
 	list_add_tail(&gdev->list, &group->devices);
 	WARN_ON(group->default_domain && !group->domain);
 	if (group->default_domain)
-		iommu_create_device_direct_mappings(group->default_domain, dev);
+		iommu_create_device_fw_mappings(group->default_domain, dev);
 	if (group->domain) {
 		ret = __iommu_device_set_domain(group, dev, group->domain, 0);
 		if (ret)
@@ -1093,8 +1094,8 @@ int iommu_group_set_name(struct iommu_group *group, const char *name)
 }
 EXPORT_SYMBOL_GPL(iommu_group_set_name);
 
-static int iommu_create_device_direct_mappings(struct iommu_domain *domain,
-					       struct device *dev)
+static int iommu_create_device_fw_mappings(struct iommu_domain *domain,
+					   struct device *dev)
 {
 	struct iommu_resv_region *entry;
 	struct list_head mappings;
@@ -1111,27 +1112,35 @@ static int iommu_create_device_direct_mappings(struct iommu_domain *domain,
 
 	/* We need to consider overlapping regions for different devices */
 	list_for_each_entry(entry, &mappings, list) {
-		dma_addr_t start, end, addr;
+		dma_addr_t start, end, addr, iova;
 		size_t map_size = 0;
 
 		if (entry->type == IOMMU_RESV_DIRECT)
 			dev->iommu->require_direct = 1;
+		if (entry->type == IOMMU_RESV_TRANSLATED)
+			dev->iommu->require_translated = 1;
 
 		if ((entry->type != IOMMU_RESV_DIRECT &&
-		     entry->type != IOMMU_RESV_DIRECT_RELAXABLE) ||
+		     entry->type != IOMMU_RESV_DIRECT_RELAXABLE &&
+		     entry->type != IOMMU_RESV_TRANSLATED) ||
 		    !iommu_is_dma_domain(domain))
 			continue;
 
 		start = ALIGN(entry->start, pg_size);
 		end   = ALIGN(entry->start + entry->length, pg_size);
 
-		for (addr = start; addr <= end; addr += pg_size) {
+		if (entry->type == IOMMU_RESV_TRANSLATED)
+			iova = ALIGN(entry->dva, pg_size);
+		else
+			iova = start;
+
+		for (addr = start; addr <= end; addr += pg_size, iova += pg_size) {
 			phys_addr_t phys_addr;
 
 			if (addr == end)
 				goto map_end;
 
-			phys_addr = iommu_iova_to_phys(domain, addr);
+			phys_addr = iommu_iova_to_phys(domain, iova);
 			if (!phys_addr) {
 				map_size += pg_size;
 				continue;
@@ -1139,7 +1148,7 @@ static int iommu_create_device_direct_mappings(struct iommu_domain *domain,
 
 map_end:
 			if (map_size) {
-				ret = iommu_map(domain, addr - map_size,
+				ret = iommu_map(domain, iova - map_size,
 						addr - map_size, map_size,
 						entry->prot, GFP_KERNEL);
 				if (ret)
@@ -2237,6 +2246,19 @@ static int __iommu_device_set_domain(struct iommu_group *group,
 			 "Firmware has requested this device have a 1:1 IOMMU mapping, rejecting configuring the device without a 1:1 mapping. Contact your platform vendor.\n");
 		return -EINVAL;
 	}
+	/*
+	 * If the device requires IOMMU_RESV_TRANSLATED then we cannot allow
+	 * the identy or blocking domain to be attached as it does not contain
+	 * the required translated mapping.
+	 */
+	if (dev->iommu->require_translated &&
+	    (new_domain->type == IOMMU_DOMAIN_IDENTITY ||
+	     new_domain->type == IOMMU_DOMAIN_BLOCKED ||
+	     new_domain == group->blocking_domain)) {
+		dev_warn(dev,
+			 "Firmware has requested this device have a translated IOMMU mapping, rejecting configuring the device without a translated mapping. Contact your platform vendor.\n");
+		return -EINVAL;
+	}
 
 	if (dev->iommu->attach_deferred) {
 		if (new_domain == group->default_domain)
@@ -2761,10 +2783,11 @@ void iommu_put_resv_regions(struct device *dev, struct list_head *list)
 }
 EXPORT_SYMBOL(iommu_put_resv_regions);
 
-struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
-						  size_t length, int prot,
-						  enum iommu_resv_type type,
-						  gfp_t gfp)
+struct iommu_resv_region *iommu_alloc_resv_region_tr(phys_addr_t start,
+						     dma_addr_t dva_start,
+						     size_t length, int prot,
+						     enum iommu_resv_type type,
+						     gfp_t gfp)
 {
 	struct iommu_resv_region *region;
 
@@ -2774,11 +2797,25 @@ struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
 
 	INIT_LIST_HEAD(&region->list);
 	region->start = start;
+	if (type == IOMMU_RESV_TRANSLATED)
+		region->dva = dva_start;
 	region->length = length;
 	region->prot = prot;
 	region->type = type;
 	return region;
 }
+EXPORT_SYMBOL_GPL(iommu_alloc_resv_region_tr);
+
+struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
+						  size_t length, int prot,
+						  enum iommu_resv_type type,
+						  gfp_t gfp)
+{
+	if (type == IOMMU_RESV_TRANSLATED)
+		return NULL;
+
+	return iommu_alloc_resv_region_tr(start, 0, length, prot, type, gfp);
+}
 EXPORT_SYMBOL_GPL(iommu_alloc_resv_region);
 
 void iommu_set_default_passthrough(bool cmd_line)
@@ -2928,7 +2965,7 @@ static int iommu_setup_default_domain(struct iommu_group *group,
 	struct iommu_domain *old_dom = group->default_domain;
 	struct group_device *gdev;
 	struct iommu_domain *dom;
-	bool direct_failed;
+	bool fw_failed;
 	int req_type;
 	int ret;
 
@@ -2958,10 +2995,10 @@ static int iommu_setup_default_domain(struct iommu_group *group,
 	 * mapped before their device is attached, in order to guarantee
 	 * continuity with any FW activity
 	 */
-	direct_failed = false;
+	fw_failed = false;
 	for_each_group_device(group, gdev) {
-		if (iommu_create_device_direct_mappings(dom, gdev->dev)) {
-			direct_failed = true;
+		if (iommu_create_device_fw_mappings(dom, gdev->dev)) {
+			fw_failed = true;
 			dev_warn_once(
 				gdev->dev->iommu->iommu_dev->dev,
 				"IOMMU driver was not able to establish FW requested direct mapping.");
@@ -2993,9 +3030,9 @@ static int iommu_setup_default_domain(struct iommu_group *group,
 	 * trying again after attaching. If this happens it means the device
 	 * will not continuously have the IOMMU_RESV_DIRECT map.
 	 */
-	if (direct_failed) {
+	if (fw_failed) {
 		for_each_group_device(group, gdev) {
-			ret = iommu_create_device_direct_mappings(dom, gdev->dev);
+			ret = iommu_create_device_fw_mappings(dom, gdev->dev);
 			if (ret)
 				goto err_restore_domain;
 		}
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 97987cd78da934..89d64f1a269a3f 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -145,6 +145,8 @@ int of_iommu_configure(struct device *dev, struct device_node *master_np,
 		of_pci_check_device_ats(dev, master_np);
 	} else {
 		err = of_iommu_configure_device(master_np, dev, id);
+		if (err == -EPROBE_DEFER)
+			iommu_fwspec_free(dev);
 	}
 
 	if (err)
@@ -178,9 +180,7 @@ iommu_resv_region_get_type(struct device *dev,
 	if (start == phys->start && end == phys->end)
 		return IOMMU_RESV_DIRECT;
 
-	dev_warn(dev, "treating non-direct mapping [%pr] -> [%pap-%pap] as reservation\n", phys,
-		 &start, &end);
-	return IOMMU_RESV_RESERVED;
+	return IOMMU_RESV_TRANSLATED;
 }
 
 /**
@@ -251,8 +251,13 @@ void of_iommu_get_resv_regions(struct device *dev, struct list_head *list)
 				}
 				type = iommu_resv_region_get_type(dev, &phys, iova, length);
 
-				region = iommu_alloc_resv_region(iova, length, prot, type,
+				if (type == IOMMU_RESV_TRANSLATED)
+					region = iommu_alloc_resv_region_tr(phys.start, iova, length, prot, type,
+								    GFP_KERNEL);
+				else
+					region = iommu_alloc_resv_region(iova, length, prot, type,
 								 GFP_KERNEL);
+
 				if (region)
 					list_add_tail(&region->list, list);
 			}
diff --git a/drivers/irqchip/irq-apple-aic.c b/drivers/irqchip/irq-apple-aic.c
index 2b1684c60e3cac..a767cc1dd47aee 100644
--- a/drivers/irqchip/irq-apple-aic.c
+++ b/drivers/irqchip/irq-apple-aic.c
@@ -54,6 +54,7 @@
 #include <linux/irqdomain.h>
 #include <linux/jump_label.h>
 #include <linux/limits.h>
+#include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/slab.h>
 #include <asm/apple_m1_pmu.h>
@@ -251,6 +252,9 @@ struct aic_info {
 	u32 mask_set;
 	u32 mask_clr;
 
+	u32 cap0_off;
+	u32 maxnumirq_off;
+
 	u32 die_stride;
 
 	/* Features */
@@ -288,6 +292,14 @@ static const struct aic_info aic2_info __initconst = {
 	.version	= 2,
 
 	.irq_cfg	= AIC2_IRQ_CFG,
+	.cap0_off	= AIC2_INFO1,
+	.maxnumirq_off	= AIC2_INFO3,
+
+	.fast_ipi	= true,
+};
+
+static const struct aic_info aic3_info __initconst = {
+	.version	= 3,
 
 	.fast_ipi	= true,
 	.local_fast_ipi = true,
@@ -310,6 +322,10 @@ static const struct of_device_id aic_info_match[] = {
 		.compatible = "apple,aic2",
 		.data = &aic2_info,
 	},
+	{
+		.compatible = "apple,aic3",
+		.data = &aic3_info,
+	},
 	{}
 };
 
@@ -624,7 +640,7 @@ static int aic_irq_domain_map(struct irq_domain *id, unsigned int irq,
 	u32 type = FIELD_GET(AIC_EVENT_TYPE, hw);
 	struct irq_chip *chip = &aic_chip;
 
-	if (ic->info.version == 2)
+	if (ic->info.version == 2 || ic->info.version == 3)
 		chip = &aic2_chip;
 
 	if (type == AIC_EVENT_TYPE_IRQ) {
@@ -931,6 +947,7 @@ static void build_fiq_affinity(struct aic_irq_chip *ic, struct device_node *aff)
 
 static int __init aic_of_ic_init(struct device_node *node, struct device_node *parent)
 {
+	int ret;
 	int i, die;
 	u32 off, start_off;
 	void __iomem *regs;
@@ -974,11 +991,24 @@ static int __init aic_of_ic_init(struct device_node *node, struct device_node *p
 
 		break;
 	}
+	case 3:
+		/* read offsets from device tree for aic version 3 */
+		/* extint-baseaddress? */
+		ret = of_property_read_u32(node, "config-offset", &irqc->info.irq_cfg);
+		if (ret < 0)
+			return ret;
+		ret = of_property_read_u32(node, "cap0-offset", &irqc->info.cap0_off);
+		if (ret < 0)
+			return ret;
+		ret = of_property_read_u32(node, "maxnumirq-offset", &irqc->info.maxnumirq_off);
+		if (ret < 0)
+			return ret;
+		fallthrough;
 	case 2: {
 		u32 info1, info3;
 
-		info1 = aic_ic_read(irqc, AIC2_INFO1);
-		info3 = aic_ic_read(irqc, AIC2_INFO3);
+		info1 = aic_ic_read(irqc, irqc->info.cap0_off);
+		info3 = aic_ic_read(irqc, irqc->info.maxnumirq_off);
 
 		irqc->nr_irq = FIELD_GET(AIC2_INFO1_NR_IRQ, info1);
 		irqc->max_irq = FIELD_GET(AIC2_INFO3_MAX_IRQ, info3);
@@ -1048,7 +1078,7 @@ static int __init aic_of_ic_init(struct device_node *node, struct device_node *p
 		off += irqc->info.die_stride;
 	}
 
-	if (irqc->info.version == 2) {
+	if (irqc->info.version == 2 || irqc->info.version == 3) {
 		u32 config = aic_ic_read(irqc, AIC2_CONFIG);
 
 		config |= AIC2_CONFIG_ENABLE;
@@ -1099,3 +1129,4 @@ static int __init aic_of_ic_init(struct device_node *node, struct device_node *p
 
 IRQCHIP_DECLARE(apple_aic, "apple,aic", aic_of_ic_init);
 IRQCHIP_DECLARE(apple_aic2, "apple,aic2", aic_of_ic_init);
+IRQCHIP_DECLARE(apple_aic3, "apple,aic3", aic_of_ic_init);
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 85d2627776b6a4..ba75cfdb57f710 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -65,6 +65,7 @@ config VIDEO_MUX
 source "drivers/media/platform/allegro-dvt/Kconfig"
 source "drivers/media/platform/amlogic/Kconfig"
 source "drivers/media/platform/amphion/Kconfig"
+source "drivers/media/platform/apple/Kconfig"
 source "drivers/media/platform/aspeed/Kconfig"
 source "drivers/media/platform/atmel/Kconfig"
 source "drivers/media/platform/broadcom/Kconfig"
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index ace4e34483ddce..e59e4259064bf0 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -8,6 +8,7 @@
 obj-y += allegro-dvt/
 obj-y += amlogic/
 obj-y += amphion/
+obj-y += apple/
 obj-y += aspeed/
 obj-y += atmel/
 obj-y += broadcom/
diff --git a/drivers/media/platform/apple/Kconfig b/drivers/media/platform/apple/Kconfig
new file mode 100644
index 00000000000000..f16508bff5242a
--- /dev/null
+++ b/drivers/media/platform/apple/Kconfig
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+comment "Apple media platform drivers"
+
+source "drivers/media/platform/apple/isp/Kconfig"
diff --git a/drivers/media/platform/apple/Makefile b/drivers/media/platform/apple/Makefile
new file mode 100644
index 00000000000000..d8fe985b0e6c37
--- /dev/null
+++ b/drivers/media/platform/apple/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-y += isp/
diff --git a/drivers/media/platform/apple/isp/.gitignore b/drivers/media/platform/apple/isp/.gitignore
new file mode 100644
index 00000000000000..bd7fab40e0d98a
--- /dev/null
+++ b/drivers/media/platform/apple/isp/.gitignore
@@ -0,0 +1 @@
+.clang-format
diff --git a/drivers/media/platform/apple/isp/Kconfig b/drivers/media/platform/apple/isp/Kconfig
new file mode 100644
index 00000000000000..5695bef44adf5b
--- /dev/null
+++ b/drivers/media/platform/apple/isp/Kconfig
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config VIDEO_APPLE_ISP
+	tristate "Apple Silicon Image Signal Processor driver"
+	select VIDEOBUF2_CORE
+	select VIDEOBUF2_V4L2
+	select VIDEOBUF2_DMA_SG
+	depends on ARCH_APPLE || COMPILE_TEST
+	depends on OF_ADDRESS
+	depends on V4L_PLATFORM_DRIVERS
+	depends on VIDEO_DEV
diff --git a/drivers/media/platform/apple/isp/Makefile b/drivers/media/platform/apple/isp/Makefile
new file mode 100644
index 00000000000000..4649f32987f025
--- /dev/null
+++ b/drivers/media/platform/apple/isp/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+apple-isp-y := isp-cam.o isp-cmd.o isp-drv.o isp-fw.o isp-iommu.o isp-ipc.o isp-v4l2.o
+obj-$(CONFIG_VIDEO_APPLE_ISP) += apple-isp.o
diff --git a/drivers/media/platform/apple/isp/isp-cam.c b/drivers/media/platform/apple/isp/isp-cam.c
new file mode 100644
index 00000000000000..c889173bd348f3
--- /dev/null
+++ b/drivers/media/platform/apple/isp/isp-cam.c
@@ -0,0 +1,498 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright 2023 Eileen Yoon <eyn@gmx.com> */
+
+#include <linux/firmware.h>
+
+#include "isp-cam.h"
+#include "isp-cmd.h"
+#include "isp-fw.h"
+#include "isp-iommu.h"
+
+#define ISP_MAX_PRESETS 32
+
+struct isp_setfile {
+	u32 version;
+	u32 magic;
+	const char *path;
+	size_t size;
+};
+
+// clang-format off
+static const struct isp_setfile isp_setfiles[] = {
+	[ISP_IMX248_1820_01] = {0x248, 0x18200103, "apple/isp_1820_01XX.dat", 0x442c},
+	[ISP_IMX248_1822_02] = {0x248, 0x18220201, "apple/isp_1822_02XX.dat", 0x442c},
+	[ISP_IMX343_5221_02] = {0x343, 0x52210211, "apple/isp_5221_02XX.dat", 0x4870},
+	[ISP_IMX354_9251_02] = {0x354, 0x92510208, "apple/isp_9251_02XX.dat", 0xa5ec},
+	[ISP_IMX356_4820_01] = {0x356, 0x48200107, "apple/isp_4820_01XX.dat", 0x9324},
+	[ISP_IMX356_4820_02] = {0x356, 0x48200206, "apple/isp_4820_02XX.dat", 0x9324},
+	[ISP_IMX364_8720_01] = {0x364, 0x87200103, "apple/isp_8720_01XX.dat", 0x36ac},
+	[ISP_IMX364_8723_01] = {0x364, 0x87230101, "apple/isp_8723_01XX.dat", 0x361c},
+	[ISP_IMX372_3820_01] = {0x372, 0x38200108, "apple/isp_3820_01XX.dat", 0xfdb0},
+	[ISP_IMX372_3820_02] = {0x372, 0x38200205, "apple/isp_3820_02XX.dat", 0xfdb0},
+	[ISP_IMX372_3820_11] = {0x372, 0x38201104, "apple/isp_3820_11XX.dat", 0xfdb0},
+	[ISP_IMX372_3820_12] = {0x372, 0x38201204, "apple/isp_3820_12XX.dat", 0xfdb0},
+	[ISP_IMX405_9720_01] = {0x405, 0x97200102, "apple/isp_9720_01XX.dat", 0x92c8},
+	[ISP_IMX405_9721_01] = {0x405, 0x97210102, "apple/isp_9721_01XX.dat", 0x9818},
+	[ISP_IMX405_9723_01] = {0x405, 0x97230101, "apple/isp_9723_01XX.dat", 0x92c8},
+	[ISP_IMX414_2520_01] = {0x414, 0x25200102, "apple/isp_2520_01XX.dat", 0xa444},
+	[ISP_IMX503_7820_01] = {0x503, 0x78200109, "apple/isp_7820_01XX.dat", 0xb268},
+	[ISP_IMX503_7820_02] = {0x503, 0x78200206, "apple/isp_7820_02XX.dat", 0xb268},
+	[ISP_IMX505_3921_01] = {0x505, 0x39210102, "apple/isp_3921_01XX.dat", 0x89b0},
+	[ISP_IMX514_2820_01] = {0x514, 0x28200108, "apple/isp_2820_01XX.dat", 0xa198},
+	[ISP_IMX514_2820_02] = {0x514, 0x28200205, "apple/isp_2820_02XX.dat", 0xa198},
+	[ISP_IMX514_2820_03] = {0x514, 0x28200305, "apple/isp_2820_03XX.dat", 0xa198},
+	[ISP_IMX514_2820_04] = {0x514, 0x28200405, "apple/isp_2820_04XX.dat", 0xa198},
+	[ISP_IMX558_1921_01] = {0x558, 0x19210106, "apple/isp_1921_01XX.dat", 0xad40},
+	[ISP_IMX558_1922_02] = {0x558, 0x19220201, "apple/isp_1922_02XX.dat", 0xad40},
+	[ISP_IMX603_7920_01] = {0x603, 0x79200109, "apple/isp_7920_01XX.dat", 0xad2c},
+	[ISP_IMX603_7920_02] = {0x603, 0x79200205, "apple/isp_7920_02XX.dat", 0xad2c},
+	[ISP_IMX603_7921_01] = {0x603, 0x79210104, "apple/isp_7921_01XX.dat", 0xad90},
+	[ISP_IMX613_4920_01] = {0x613, 0x49200108, "apple/isp_4920_01XX.dat", 0x9324},
+	[ISP_IMX613_4920_02] = {0x613, 0x49200204, "apple/isp_4920_02XX.dat", 0x9324},
+	[ISP_IMX614_2921_01] = {0x614, 0x29210107, "apple/isp_2921_01XX.dat", 0xed6c},
+	[ISP_IMX614_2921_02] = {0x614, 0x29210202, "apple/isp_2921_02XX.dat", 0xed6c},
+	[ISP_IMX614_2922_02] = {0x614, 0x29220201, "apple/isp_2922_02XX.dat", 0xed6c},
+	[ISP_IMX633_3622_01] = {0x633, 0x36220111, "apple/isp_3622_01XX.dat", 0x100d4},
+	[ISP_IMX703_7721_01] = {0x703, 0x77210106, "apple/isp_7721_01XX.dat", 0x936c},
+	[ISP_IMX703_7722_01] = {0x703, 0x77220106, "apple/isp_7722_01XX.dat", 0xac20},
+	[ISP_IMX713_4721_01] = {0x713, 0x47210107, "apple/isp_4721_01XX.dat", 0x936c},
+	[ISP_IMX713_4722_01] = {0x713, 0x47220109, "apple/isp_4722_01XX.dat", 0x9218},
+	[ISP_IMX714_2022_01] = {0x714, 0x20220107, "apple/isp_2022_01XX.dat", 0xa198},
+	[ISP_IMX772_3721_01] = {0x772, 0x37210106, "apple/isp_3721_01XX.dat", 0xfdf8},
+	[ISP_IMX772_3721_11] = {0x772, 0x37211106, "apple/isp_3721_11XX.dat", 0xfe14},
+	[ISP_IMX772_3722_01] = {0x772, 0x37220104, "apple/isp_3722_01XX.dat", 0xfca4},
+	[ISP_IMX772_3723_01] = {0x772, 0x37230106, "apple/isp_3723_01XX.dat", 0xfca4},
+	[ISP_IMX814_2123_01] = {0x814, 0x21230101, "apple/isp_2123_01XX.dat", 0xed54},
+	[ISP_IMX853_7622_01] = {0x853, 0x76220112, "apple/isp_7622_01XX.dat", 0x247f8},
+	[ISP_IMX913_7523_01] = {0x913, 0x75230107, "apple/isp_7523_01XX.dat", 0x247f8},
+	[ISP_VD56G0_6221_01] = {0xd56, 0x62210102, "apple/isp_6221_01XX.dat", 0x1b80},
+	[ISP_VD56G0_6222_01] = {0xd56, 0x62220102, "apple/isp_6222_01XX.dat", 0x1b80},
+};
+// clang-format on
+
+static int isp_ch_get_sensor_id(struct apple_isp *isp, u32 ch)
+{
+	struct isp_format *fmt = isp_get_format(isp, ch);
+	enum isp_sensor_id id;
+	int err = 0;
+
+	/* TODO need more datapoints to figure out the sub-versions
+	 * Defaulting to 1st release for now, the calib files aren't too different.
+	 */
+	switch (fmt->version) {
+	case 0x248:
+		id = ISP_IMX248_1820_01;
+		break;
+	case 0x343:
+		id = ISP_IMX343_5221_02;
+		break;
+	case 0x354:
+		id = ISP_IMX354_9251_02;
+		break;
+	case 0x356:
+		id = ISP_IMX356_4820_01;
+		break;
+	case 0x364:
+		id = ISP_IMX364_8720_01;
+		break;
+	case 0x372:
+		id = ISP_IMX372_3820_01;
+		break;
+	case 0x405:
+		id = ISP_IMX405_9720_01;
+		break;
+	case 0x414:
+		id = ISP_IMX414_2520_01;
+		break;
+	case 0x503:
+		id = ISP_IMX503_7820_01;
+		break;
+	case 0x505:
+		id = ISP_IMX505_3921_01;
+		break;
+	case 0x514:
+		id = ISP_IMX514_2820_01;
+		break;
+	case 0x558:
+		id = ISP_IMX558_1921_01;
+		break;
+	case 0x603:
+		id = ISP_IMX603_7920_01;
+		break;
+	case 0x613:
+		id = ISP_IMX613_4920_01;
+		break;
+	case 0x614:
+		id = ISP_IMX614_2921_01;
+		break;
+	case 0x633:
+		id = ISP_IMX633_3622_01;
+		break;
+	case 0x703:
+		id = ISP_IMX703_7721_01;
+		break;
+	case 0x713:
+		id = ISP_IMX713_4721_01;
+		break;
+	case 0x714:
+		id = ISP_IMX714_2022_01;
+		break;
+	case 0x772:
+		id = ISP_IMX772_3721_01;
+		break;
+	case 0x814:
+		id = ISP_IMX814_2123_01;
+		break;
+	case 0x853:
+		id = ISP_IMX853_7622_01;
+		break;
+	case 0x913:
+		id = ISP_IMX913_7523_01;
+		break;
+	case 0xd56:
+		id = ISP_VD56G0_6221_01;
+		break;
+	default:
+		err = -EINVAL;
+		break;
+	}
+
+	if (err)
+		dev_err(isp->dev, "invalid sensor version: 0x%x\n",
+			fmt->version);
+	else
+		fmt->id = id;
+
+	return err;
+}
+
+static int isp_ch_get_camera_preset(struct apple_isp *isp, u32 ch, u32 ps)
+{
+	int err = 0;
+
+	struct cmd_ch_camera_config *args; /* Too big to allocate on stack */
+	args = kzalloc(sizeof(*args), GFP_KERNEL);
+	if (!args)
+		return -ENOMEM;
+
+	err = isp_cmd_ch_camera_config_get(isp, ch, ps, args);
+	if (err)
+		goto exit;
+
+	pr_info("apple-isp: ps: CISP_CMD_CH_CAMERA_CONFIG_GET: %d\n", ps);
+	print_hex_dump(KERN_INFO, "apple-isp: ps: ", DUMP_PREFIX_NONE, 32, 4,
+		       args, sizeof(*args), false);
+
+exit:
+	kfree(args);
+
+	return err;
+}
+
+static int isp_ch_cache_sensor_info(struct apple_isp *isp, u32 ch)
+{
+	struct isp_format *fmt = isp_get_format(isp, ch);
+	int err = 0;
+
+	struct cmd_ch_info *args; /* Too big to allocate on stack */
+	args = kzalloc(sizeof(*args), GFP_KERNEL);
+	if (!args)
+		return -ENOMEM;
+
+	err = isp_cmd_ch_info_get(isp, ch, args);
+	if (err)
+		goto exit;
+
+	dev_info(isp->dev, "found sensor %x %s on ch %d\n", args->version,
+		 args->module_sn, ch);
+
+	fmt->version = args->version;
+
+	pr_info("apple-isp: ch: CISP_CMD_CH_INFO_GET: %d\n", ch);
+	print_hex_dump(KERN_INFO, "apple-isp: ch: ", DUMP_PREFIX_NONE, 32, 4,
+		       args, sizeof(*args), false);
+
+	for (u32 ps = 0; ps < args->num_presets; ps++) {
+		isp_ch_get_camera_preset(isp, ch, ps);
+	}
+
+	err = isp_ch_get_sensor_id(isp, ch);
+	if (err ||
+	    (fmt->id != ISP_IMX248_1820_01 && fmt->id != ISP_IMX558_1921_01 &&
+	     fmt->id != ISP_IMX364_8720_01)) {
+		dev_err(isp->dev,
+			"ch %d: unsupported sensor. Please file a bug report with hardware info & dmesg trace.\n",
+			ch);
+		return -ENODEV;
+	}
+
+exit:
+	kfree(args);
+
+	return err;
+}
+
+static int isp_detect_camera(struct apple_isp *isp)
+{
+	int err;
+
+	struct cmd_config_get args;
+	memset(&args, 0, sizeof(args));
+
+	err = isp_cmd_config_get(isp, &args);
+	if (err)
+		return err;
+
+	pr_info("apple-isp: CISP_CMD_CONFIG_GET: \n");
+	print_hex_dump(KERN_INFO, "apple-isp: ", DUMP_PREFIX_NONE, 32, 4, &args,
+		       sizeof(args), false);
+
+	if (!args.num_channels) {
+		dev_err(isp->dev, "did not detect any channels\n");
+		return -ENODEV;
+	}
+
+	if (args.num_channels > ISP_MAX_CHANNELS) {
+		dev_warn(isp->dev, "found %d channels when maximum is %d\n",
+			 args.num_channels, ISP_MAX_CHANNELS);
+		args.num_channels = ISP_MAX_CHANNELS;
+	}
+
+	if (args.num_channels > 1) {
+		dev_warn(
+			isp->dev,
+			"warning: driver doesn't support multiple channels. Please file a bug report with hardware info & dmesg trace.\n");
+	}
+
+	isp->num_channels = args.num_channels;
+	isp->current_ch = 0;
+
+	err = isp_ch_cache_sensor_info(isp, isp->current_ch);
+	if (err) {
+		dev_err(isp->dev, "failed to cache sensor info\n");
+		return err;
+	}
+
+	return 0;
+}
+
+int apple_isp_detect_camera(struct apple_isp *isp)
+{
+	int err;
+
+	/* RPM must be enabled prior to calling this */
+	err = apple_isp_firmware_boot(isp);
+	if (err) {
+		dev_err(isp->dev,
+			"failed to boot firmware for initial sensor detection: %d\n",
+			err);
+		return -EPROBE_DEFER;
+	}
+
+	err = isp_detect_camera(isp);
+
+	isp_cmd_flicker_sensor_set(isp, 0);
+
+	isp_cmd_ch_stop(isp, 0);
+	isp_cmd_ch_buffer_return(isp, isp->current_ch);
+
+	apple_isp_firmware_shutdown(isp);
+
+	return err;
+}
+
+static int isp_ch_load_setfile(struct apple_isp *isp, u32 ch)
+{
+	struct isp_format *fmt = isp_get_format(isp, ch);
+	const struct isp_setfile *setfile = &isp_setfiles[fmt->id];
+	const struct firmware *fw;
+	u32 magic;
+	int err;
+
+	err = request_firmware(&fw, setfile->path, isp->dev);
+	if (err) {
+		dev_err(isp->dev, "failed to request setfile '%s': %d\n",
+			setfile->path, err);
+		return err;
+	}
+
+	if (fw->size < setfile->size) {
+		dev_err(isp->dev, "setfile too small (0x%lx/0x%zx)\n", fw->size,
+			setfile->size);
+		release_firmware(fw);
+		return -EINVAL;
+	}
+
+	magic = be32_to_cpup((__be32 *)fw->data);
+	if (magic != setfile->magic) {
+		dev_err(isp->dev, "setfile '%s' corrupted?\n", setfile->path);
+		release_firmware(fw);
+		return -EINVAL;
+	}
+
+	memcpy(isp->data_surf->virt, (void *)fw->data, setfile->size);
+	release_firmware(fw);
+
+	return isp_cmd_ch_set_file_load(isp, ch, isp->data_surf->iova,
+					setfile->size);
+}
+
+static int isp_ch_configure_capture(struct apple_isp *isp, u32 ch)
+{
+	struct isp_format *fmt = isp_get_format(isp, ch);
+	int err;
+
+	isp_cmd_flicker_sensor_set(isp, 0);
+
+	/* The setfile isn't requisite but then we don't get calibration */
+	err = isp_ch_load_setfile(isp, ch);
+	if (err) {
+		dev_err(isp->dev, "warning: calibration data not loaded: %d\n",
+			err);
+
+		/* If this failed due to a signal, propagate */
+		if (err == -EINTR)
+			return err;
+	}
+
+	if (isp->hw->lpdp) {
+		err = isp_cmd_ch_lpdp_hs_receiver_tuning_set(isp, ch, 1, 15);
+		if (err)
+			return err;
+	}
+
+	err = isp_cmd_ch_sbs_enable(isp, ch, 1);
+	if (err)
+		return err;
+
+	err = isp_cmd_ch_camera_config_select(isp, ch, fmt->preset->index);
+	if (err)
+		return err;
+
+	err = isp_cmd_ch_buffer_recycle_mode_set(
+		isp, ch, CISP_BUFFER_RECYCLE_MODE_EMPTY_ONLY);
+	if (err)
+		return err;
+
+	err = isp_cmd_ch_buffer_recycle_start(isp, ch);
+	if (err)
+		return err;
+
+	err = isp_cmd_ch_crop_set(isp, ch, fmt->preset->crop_offset.x,
+				  fmt->preset->crop_offset.y,
+				  fmt->preset->crop_size.x,
+				  fmt->preset->crop_size.y);
+	if (err)
+		return err;
+
+	err = isp_cmd_ch_output_config_set(isp, ch, fmt->preset->output_dim.x,
+					   fmt->preset->output_dim.y,
+					   fmt->strides, CISP_COLORSPACE_REC709,
+					   CISP_OUTPUT_FORMAT_YUV_2PLANE);
+	if (err)
+		return err;
+
+	err = isp_cmd_ch_preview_stream_set(isp, ch, 1);
+	if (err)
+		return err;
+
+	err = isp_cmd_ch_cnr_start(isp, ch);
+	if (err)
+		return err;
+
+	err = isp_cmd_ch_mbnr_enable(isp, ch, 0, ISP_MBNR_MODE_ENABLE, 1);
+	if (err)
+		return err;
+
+	err = isp_cmd_apple_ch_ae_fd_scene_metering_config_set(isp, ch);
+	if (err)
+		return err;
+
+	err = isp_cmd_apple_ch_ae_metering_mode_set(isp, ch, 3);
+	if (err)
+		return err;
+
+	err = isp_cmd_ch_ae_stability_set(isp, ch, 32);
+	if (err)
+		return err;
+
+	err = isp_cmd_ch_ae_stability_to_stable_set(isp, ch, 20);
+	if (err)
+		return err;
+
+	err = isp_cmd_ch_sif_pixel_format_set(isp, ch);
+	if (err)
+		return err;
+
+	err = isp_cmd_ch_ae_frame_rate_max_set(isp, ch, ISP_FRAME_RATE_DEN);
+	if (err)
+		return err;
+
+	err = isp_cmd_ch_ae_frame_rate_min_set(isp, ch, ISP_FRAME_RATE_DEN2);
+	if (err)
+		return err;
+
+	err = isp_cmd_apple_ch_temporal_filter_start(isp, ch, isp->temporal_filter);
+	if (err)
+		return err;
+
+	err = isp_cmd_apple_ch_motion_history_start(isp, ch);
+	if (err)
+		return err;
+
+	err = isp_cmd_apple_ch_temporal_filter_enable(isp, ch);
+	if (err)
+		return err;
+
+	err = isp_cmd_ch_buffer_pool_config_set(isp, ch, CISP_POOL_TYPE_META);
+	if (err)
+		return err;
+
+	err = isp_cmd_ch_buffer_pool_config_set(isp, ch,
+						CISP_POOL_TYPE_META_CAPTURE);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+static int isp_configure_capture(struct apple_isp *isp)
+{
+	return isp_ch_configure_capture(isp, isp->current_ch);
+}
+
+int apple_isp_start_camera(struct apple_isp *isp)
+{
+	int err;
+
+	err = apple_isp_firmware_boot(isp);
+	if (err < 0) {
+		dev_err(isp->dev, "failed to boot firmware: %d\n", err);
+		return err;
+	}
+
+	err = isp_configure_capture(isp);
+	if (err) {
+		dev_err(isp->dev, "failed to configure capture: %d\n", err);
+		apple_isp_firmware_shutdown(isp);
+		return err;
+	}
+
+	return 0;
+}
+
+void apple_isp_stop_camera(struct apple_isp *isp)
+{
+	apple_isp_firmware_shutdown(isp);
+}
+
+int apple_isp_start_capture(struct apple_isp *isp)
+{
+	return isp_cmd_ch_start(isp, 0); // TODO channel mask
+}
+
+void apple_isp_stop_capture(struct apple_isp *isp)
+{
+	isp_cmd_ch_stop(isp, 0); // TODO channel mask
+	isp_cmd_ch_buffer_return(isp, isp->current_ch);
+}
diff --git a/drivers/media/platform/apple/isp/isp-cam.h b/drivers/media/platform/apple/isp/isp-cam.h
new file mode 100644
index 00000000000000..f4fa4224c7a934
--- /dev/null
+++ b/drivers/media/platform/apple/isp/isp-cam.h
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright 2023 Eileen Yoon <eyn@gmx.com> */
+
+#ifndef __ISP_CAM_H__
+#define __ISP_CAM_H__
+
+#include "isp-drv.h"
+
+#define ISP_FRAME_RATE_NUM 256
+#define ISP_FRAME_RATE_DEN 7680
+#define ISP_FRAME_RATE_DEN2 3840
+
+int apple_isp_detect_camera(struct apple_isp *isp);
+
+int apple_isp_start_camera(struct apple_isp *isp);
+void apple_isp_stop_camera(struct apple_isp *isp);
+
+int apple_isp_start_capture(struct apple_isp *isp);
+void apple_isp_stop_capture(struct apple_isp *isp);
+
+#endif /* __ISP_CAM_H__ */
diff --git a/drivers/media/platform/apple/isp/isp-cmd.c b/drivers/media/platform/apple/isp/isp-cmd.c
new file mode 100644
index 00000000000000..ee491d2cb42c5b
--- /dev/null
+++ b/drivers/media/platform/apple/isp/isp-cmd.c
@@ -0,0 +1,634 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright 2023 Eileen Yoon <eyn@gmx.com> */
+
+#include "isp-cmd.h"
+#include "isp-drv.h"
+#include "isp-iommu.h"
+#include "isp-ipc.h"
+
+#define CISP_OPCODE_SHIFT     32UL
+#define CISP_OPCODE(x)	      (((u64)(x)) << CISP_OPCODE_SHIFT)
+#define CISP_OPCODE_GET(x)    (((u64)(x)) >> CISP_OPCODE_SHIFT)
+
+#define CISP_TIMEOUT	      msecs_to_jiffies(3000)
+#define CISP_SEND_IN(x, a)    (cisp_send((x), &(a), sizeof(a), 0, CISP_TIMEOUT))
+#define CISP_SEND_INOUT(x, a) (cisp_send((x), &(a), sizeof(a), sizeof(a), CISP_TIMEOUT))
+#define CISP_SEND_OUT(x, a)   (cisp_send_read((x), (a), sizeof(*a), sizeof(*a)))
+#define CISP_POST_IN(x, a)    (cisp_send((x), &(a), sizeof(a), 0, 0))
+#define CISP_POST_INOUT(x, a)    (cisp_send((x), &(a), sizeof(a), sizeof(a), 0))
+
+static int cisp_send(struct apple_isp *isp, void *args, u32 insize, u32 outsize, int timeout)
+{
+	struct isp_channel *chan = isp->chan_io;
+	struct isp_message *req = &chan->req;
+	int err;
+
+	req->arg0 = isp->cmd_iova;
+	req->arg1 = insize;
+	req->arg2 = outsize;
+
+	memcpy(isp->cmd_virt, args, insize);
+	err = ipc_chan_send(isp, chan, timeout);
+	if (err) {
+		u64 opcode;
+		memcpy(&opcode, args, sizeof(opcode));
+		dev_err(isp->dev,
+			"%s: failed to send OPCODE 0x%04llx: [0x%llx, 0x%llx, 0x%llx]\n",
+			chan->name, CISP_OPCODE_GET(opcode), req->arg0,
+			req->arg1, req->arg2);
+	}
+
+	return err;
+}
+
+static int cisp_send_read(struct apple_isp *isp, void *args, u32 insize,
+			  u32 outsize)
+{
+	/* TODO do I need to lock the iova space? */
+	int err = cisp_send(isp, args, insize, outsize, CISP_TIMEOUT);
+	if (err)
+		return err;
+
+	memcpy(args, isp->cmd_virt, outsize);
+	return 0;
+}
+
+int isp_cmd_start(struct apple_isp *isp, u32 mode)
+{
+	struct cmd_start args = {
+		.opcode = CISP_OPCODE(CISP_CMD_START),
+		.mode = mode,
+	};
+	return CISP_SEND_IN(isp, args);
+}
+
+int isp_cmd_stop(struct apple_isp *isp, u32 mode)
+{
+	struct cmd_stop args = {
+		.opcode = CISP_OPCODE(CISP_CMD_STOP),
+		.mode = mode,
+	};
+	return CISP_SEND_IN(isp, args);
+}
+
+int isp_cmd_power_down(struct apple_isp *isp)
+{
+	struct cmd_power_down args = {
+		.opcode = CISP_OPCODE(CISP_CMD_POWER_DOWN),
+	};
+	return CISP_POST_INOUT(isp, args);
+}
+
+int isp_cmd_suspend(struct apple_isp *isp)
+{
+	struct cmd_suspend args = {
+		.opcode = CISP_OPCODE(CISP_CMD_SUSPEND),
+	};
+	return CISP_SEND_IN(isp, args);
+}
+
+int isp_cmd_print_enable(struct apple_isp *isp, u32 enable)
+{
+	struct cmd_print_enable args = {
+		.opcode = CISP_OPCODE(CISP_CMD_PRINT_ENABLE),
+		.enable = enable,
+	};
+	return CISP_SEND_INOUT(isp, args);
+}
+
+int isp_cmd_trace_enable(struct apple_isp *isp, u32 enable)
+{
+	struct cmd_trace_enable args = {
+		.opcode = CISP_OPCODE(CISP_CMD_TRACE_ENABLE),
+		.enable = enable,
+	};
+	return CISP_SEND_INOUT(isp, args);
+}
+
+int isp_cmd_config_get(struct apple_isp *isp, struct cmd_config_get *args)
+{
+	args->opcode = CISP_OPCODE(CISP_CMD_CONFIG_GET);
+	return CISP_SEND_OUT(isp, args);
+}
+
+int isp_cmd_set_isp_pmu_base(struct apple_isp *isp, u64 pmu_base)
+{
+	struct cmd_set_isp_pmu_base args = {
+		.opcode = CISP_OPCODE(CISP_CMD_SET_ISP_PMU_BASE),
+		.pmu_base = pmu_base,
+	};
+	return CISP_SEND_IN(isp, args);
+}
+
+int isp_cmd_set_dsid_clr_req_base2(struct apple_isp *isp, u64 dsid_clr_base0,
+				   u64 dsid_clr_base1, u64 dsid_clr_base2,
+				   u64 dsid_clr_base3, u32 dsid_clr_range0,
+				   u32 dsid_clr_range1, u32 dsid_clr_range2,
+				   u32 dsid_clr_range3)
+{
+	struct cmd_set_dsid_clr_req_base2 args = {
+		.opcode = CISP_OPCODE(CISP_CMD_SET_DSID_CLR_REG_BASE2),
+		.dsid_clr_base0 = dsid_clr_base0,
+		.dsid_clr_base1 = dsid_clr_base1,
+		.dsid_clr_base2 = dsid_clr_base2,
+		.dsid_clr_base3 = dsid_clr_base3,
+		.dsid_clr_range0 = dsid_clr_range0,
+		.dsid_clr_range1 = dsid_clr_range1,
+		.dsid_clr_range2 = dsid_clr_range2,
+		.dsid_clr_range3 = dsid_clr_range3,
+	};
+	return CISP_SEND_IN(isp, args);
+}
+
+int isp_cmd_set_dsid_clr_req_base(struct apple_isp *isp, u64 dsid_clr_base,
+				  u32 dsid_clr_range)
+{
+	struct cmd_set_dsid_clr_req_base args = {
+		.opcode = CISP_OPCODE(CISP_CMD_SET_DSID_CLR_REG_BASE),
+		.dsid_clr_base = dsid_clr_base,
+		.dsid_clr_range = dsid_clr_range,
+	};
+	return CISP_SEND_IN(isp, args);
+}
+
+int isp_cmd_pmp_ctrl_set(struct apple_isp *isp, u64 clock_scratch,
+			 u64 clock_base, u8 clock_bit, u8 clock_size,
+			 u64 bandwidth_scratch, u64 bandwidth_base,
+			 u8 bandwidth_bit, u8 bandwidth_size)
+{
+	struct cmd_pmp_ctrl_set args = {
+		.opcode = CISP_OPCODE(CISP_CMD_PMP_CTRL_SET),
+		.clock_scratch = clock_scratch,
+		.clock_base = clock_base,
+		.clock_bit = clock_bit,
+		.clock_size = clock_size,
+		.clock_pad = 0,
+		.bandwidth_scratch = bandwidth_scratch,
+		.bandwidth_base = bandwidth_base,
+		.bandwidth_bit = bandwidth_bit,
+		.bandwidth_size = bandwidth_size,
+		.bandwidth_pad = 0,
+	};
+	return CISP_SEND_IN(isp, args);
+}
+
+int isp_cmd_fid_enter(struct apple_isp *isp)
+{
+	struct cmd_fid_enter args = {
+		.opcode = CISP_OPCODE(CISP_CMD_FID_ENTER),
+	};
+	return CISP_SEND_IN(isp, args);
+}
+
+int isp_cmd_fid_exit(struct apple_isp *isp)
+{
+	struct cmd_fid_exit args = {
+		.opcode = CISP_OPCODE(CISP_CMD_FID_EXIT),
+	};
+	return CISP_SEND_IN(isp, args);
+}
+
+int isp_cmd_ch_start(struct apple_isp *isp, u32 chan)
+{
+	struct cmd_ch_start args = {
+		.opcode = CISP_OPCODE(CISP_CMD_CH_START),
+		.chan = chan,
+	};
+	return CISP_SEND_IN(isp, args);
+}
+
+int isp_cmd_ch_stop(struct apple_isp *isp, u32 chan)
+{
+	struct cmd_ch_stop args = {
+		.opcode = CISP_OPCODE(CISP_CMD_CH_STOP),
+		.chan = chan,
+	};
+	return CISP_SEND_IN(isp, args);
+}
+
+int isp_cmd_flicker_sensor_set(struct apple_isp *isp, u32 mode)
+{
+	struct cmd_flicker_sensor_set args = {
+		.opcode = CISP_OPCODE(CISP_CMD_FLICKER_SENSOR_SET),
+		.mode = mode,
+	};
+	return CISP_SEND_INOUT(isp, args);
+}
+
+int isp_cmd_ch_info_get(struct apple_isp *isp, u32 chan,
+			struct cmd_ch_info *args)
+{
+	args->opcode = CISP_OPCODE(CISP_CMD_CH_INFO_GET);
+	args->chan = chan;
+	return CISP_SEND_OUT(isp, args);
+}
+
+int isp_cmd_ch_camera_config_get(struct apple_isp *isp, u32 chan, u32 preset,
+				 struct cmd_ch_camera_config *args)
+{
+	args->opcode = CISP_OPCODE(CISP_CMD_CH_CAMERA_CONFIG_GET);
+	args->preset = preset;
+	args->chan = chan;
+	return CISP_SEND_OUT(isp, args);
+}
+
+int isp_cmd_ch_camera_config_current_get(struct apple_isp *isp, u32 chan,
+					 struct cmd_ch_camera_config *args)
+{
+	args->opcode = CISP_OPCODE(CISP_CMD_CH_CAMERA_CONFIG_CURRENT_GET);
+	args->chan = chan;
+	return CISP_SEND_OUT(isp, args);
+}
+
+int isp_cmd_ch_camera_config_select(struct apple_isp *isp, u32 chan, u32 preset)
+{
+	struct cmd_ch_camera_config_select args = {
+		.opcode = CISP_OPCODE(CISP_CMD_CH_CAMERA_CONFIG_SELECT),
+		.chan = chan,
+		.preset = preset,
+	};
+	return CISP_SEND_IN(isp, args);
+}
+
+int isp_cmd_ch_buffer_return(struct apple_isp *isp, u32 chan)
+{
+	struct cmd_ch_buffer_return args = {
+		.opcode = CISP_OPCODE(CISP_CMD_CH_BUFFER_RETURN),
+		.chan = chan,
+	};
+	return CISP_SEND_IN(isp, args);
+}
+
+int isp_cmd_ch_set_file_load(struct apple_isp *isp, u32 chan, u64 addr,
+			     u32 size)
+{
+	if (isp->fw_compat >= ISP_FIRMWARE_V_13_5) {
+		struct cmd_ch_set_file_load64 args = {
+			.opcode = CISP_OPCODE(CISP_CMD_CH_SET_FILE_LOAD),
+			.chan = chan,
+			.addr = addr,
+			.size = size,
+		};
+		return CISP_SEND_IN(isp, args);
+	} else {
+		struct cmd_ch_set_file_load args = {
+			.opcode = CISP_OPCODE(CISP_CMD_CH_SET_FILE_LOAD),
+			.chan = chan,
+			.addr = addr,
+			.size = size,
+		};
+		return CISP_SEND_IN(isp, args);
+	}
+}
+
+int isp_cmd_ch_sbs_enable(struct apple_isp *isp, u32 chan, u32 enable)
+{
+	struct cmd_ch_sbs_enable args = {
+		.opcode = CISP_OPCODE(CISP_CMD_CH_SBS_ENABLE),
+		.chan = chan,
+		.enable = enable,
+	};
+	return CISP_SEND_IN(isp, args);
+}
+
+int isp_cmd_ch_crop_set(struct apple_isp *isp, u32 chan, u32 x1, u32 y1, u32 x2,
+			u32 y2)
+{
+	struct cmd_ch_crop_set args = {
+		.opcode = CISP_OPCODE(isp->hw->scl1 ? CISP_CMD_CH_CROP_SCL1_SET
+				      : CISP_CMD_CH_CROP_SET),
+		.chan = chan,
+		.x1 = x1,
+		.y1 = y1,
+		.x2 = x2,
+		.y2 = y2,
+	};
+	return CISP_SEND_IN(isp, args);
+}
+
+int isp_cmd_ch_output_config_set(struct apple_isp *isp, u32 chan, u32 width,
+				 u32 height, u32 strides[3], u32 colorspace, u32 format)
+{
+	struct cmd_ch_output_config_set args = {
+		.opcode = CISP_OPCODE(isp->hw->scl1 ? CISP_CMD_CH_OUTPUT_CONFIG_SCL1_SET
+				      : CISP_CMD_CH_OUTPUT_CONFIG_SET),
+		.chan = chan,
+		.width = width,
+		.height = height,
+		.colorspace = colorspace,
+		.format = format,
+		.padding_rows = 0,
+		.unk_h0 = height,
+		.compress = 0,
+		.unk_w2 = width,
+	};
+	memcpy(args.strides, strides, sizeof(args.strides));
+	return CISP_SEND_IN(isp, args);
+}
+
+int isp_cmd_ch_preview_stream_set(struct apple_isp *isp, u32 chan, u32 stream)
+{
+	struct cmd_ch_preview_stream_set args = {
+		.opcode = CISP_OPCODE(CISP_CMD_CH_PREVIEW_STREAM_SET),
+		.chan = chan,
+		.stream = stream,
+	};
+	return CISP_SEND_IN(isp, args);
+}
+
+int isp_cmd_ch_als_disable(struct apple_isp *isp, u32 chan)
+{
+	struct cmd_ch_als_disable args = {
+		.opcode = CISP_OPCODE(CISP_CMD_CH_ALS_DISABLE),
+		.chan = chan,
+	};
+	return CISP_SEND_IN(isp, args);
+}
+
+int isp_cmd_ch_cnr_start(struct apple_isp *isp, u32 chan)
+{
+	struct cmd_ch_cnr_start args = {
+		.opcode = CISP_OPCODE(CISP_CMD_CH_CNR_START),
+		.chan = chan,
+	};
+	return CISP_SEND_IN(isp, args);
+}
+
+int isp_cmd_ch_mbnr_enable(struct apple_isp *isp, u32 chan, u32 use_case,
+			   u32 mode, u32 enable_chroma)
+{
+	struct cmd_ch_mbnr_enable args = {
+		.opcode = CISP_OPCODE(CISP_CMD_CH_MBNR_ENABLE),
+		.chan = chan,
+		.use_case = use_case,
+		.mode = mode,
+		.enable_chroma = enable_chroma,
+	};
+	return CISP_SEND_IN(isp, args);
+}
+
+int isp_cmd_ch_sif_pixel_format_set(struct apple_isp *isp, u32 chan)
+{
+	struct cmd_ch_sif_pixel_format_set args = {
+		.opcode = CISP_OPCODE(CISP_CMD_CH_SIF_PIXEL_FORMAT_SET),
+		.chan = chan,
+		.format = 3,
+		.type = 1,
+		.compress = 0,
+		.unk_10 = 0,
+	};
+	return CISP_SEND_IN(isp, args);
+}
+
+int isp_cmd_ch_buffer_recycle_mode_set(struct apple_isp *isp, u32 chan,
+				       u32 mode)
+{
+	struct cmd_ch_buffer_recycle_mode_set args = {
+		.opcode = CISP_OPCODE(CISP_CMD_CH_BUFFER_RECYCLE_MODE_SET),
+		.chan = chan,
+		.mode = mode,
+	};
+	return CISP_SEND_IN(isp, args);
+}
+
+int isp_cmd_ch_buffer_recycle_start(struct apple_isp *isp, u32 chan)
+{
+	struct cmd_ch_buffer_recycle_start args = {
+		.opcode = CISP_OPCODE(CISP_CMD_CH_BUFFER_RECYCLE_START),
+		.chan = chan,
+	};
+	return CISP_SEND_IN(isp, args);
+}
+
+int isp_cmd_ch_buffer_pool_config_set(struct apple_isp *isp, u32 chan, u16 type)
+{
+	struct cmd_ch_buffer_pool_config_set args = {
+		.opcode = CISP_OPCODE(CISP_CMD_CH_BUFFER_POOL_CONFIG_SET),
+		.chan = chan,
+		.type = type,
+		.count = ISP_MAX_BUFFERS,
+		.meta_size0 = isp->hw->meta_size,
+		.meta_size1 = isp->hw->meta_size,
+		.unk0 = 0,
+		.unk1 = 0,
+		.unk2 = 0,
+		.data_blocks = 1,
+		.compress = 0,
+	};
+	return CISP_SEND_INOUT(isp, args);
+}
+
+int isp_cmd_ch_buffer_pool_return(struct apple_isp *isp, u32 chan)
+{
+	struct cmd_ch_buffer_pool_return args = {
+		.opcode = CISP_OPCODE(CISP_CMD_CH_BUFFER_POOL_RETURN),
+		.chan = chan,
+	};
+	return CISP_SEND_IN(isp, args);
+}
+
+int isp_cmd_apple_ch_temporal_filter_start(struct apple_isp *isp, u32 chan, u32 arg)
+{
+	struct cmd_apple_ch_temporal_filter_start args = {
+		.opcode = CISP_OPCODE(CISP_CMD_APPLE_CH_TEMPORAL_FILTER_START),
+		.chan = chan,
+		.unk_c = 1,
+		.unk_10 = arg,
+	};
+	return CISP_SEND_IN(isp, args);
+}
+
+int isp_cmd_apple_ch_temporal_filter_stop(struct apple_isp *isp, u32 chan)
+{
+	struct cmd_apple_ch_temporal_filter_stop args = {
+		.opcode = CISP_OPCODE(CISP_CMD_APPLE_CH_TEMPORAL_FILTER_STOP),
+		.chan = chan,
+	};
+	return CISP_SEND_IN(isp, args);
+}
+
+int isp_cmd_apple_ch_motion_history_start(struct apple_isp *isp, u32 chan)
+{
+	struct cmd_apple_ch_motion_history_start args = {
+		.opcode = CISP_OPCODE(CISP_CMD_APPLE_CH_MOTION_HISTORY_START),
+		.chan = chan,
+	};
+	return CISP_SEND_IN(isp, args);
+}
+
+int isp_cmd_apple_ch_motion_history_stop(struct apple_isp *isp, u32 chan)
+{
+	struct cmd_apple_ch_motion_history_stop args = {
+		.opcode = CISP_OPCODE(CISP_CMD_APPLE_CH_MOTION_HISTORY_STOP),
+		.chan = chan,
+	};
+	return CISP_SEND_IN(isp, args);
+}
+
+int isp_cmd_apple_ch_temporal_filter_enable(struct apple_isp *isp, u32 chan)
+{
+	struct cmd_apple_ch_temporal_filter_enable args = {
+		.opcode = CISP_OPCODE(CISP_CMD_APPLE_CH_TEMPORAL_FILTER_ENABLE),
+		.chan = chan,
+	};
+	return CISP_SEND_IN(isp, args);
+}
+
+int isp_cmd_apple_ch_temporal_filter_disable(struct apple_isp *isp, u32 chan)
+{
+	struct cmd_apple_ch_temporal_filter_disable args = {
+		.opcode =
+			CISP_OPCODE(CISP_CMD_APPLE_CH_TEMPORAL_FILTER_DISABLE),
+		.chan = chan,
+	};
+	return CISP_SEND_IN(isp, args);
+}
+
+int isp_cmd_ch_ae_stability_set(struct apple_isp *isp, u32 chan, u32 stability)
+{
+	struct cmd_ch_ae_stability_set args = {
+		.opcode = CISP_OPCODE(CISP_CMD_CH_AE_STABILITY_SET),
+		.chan = chan,
+		.stability = stability,
+	};
+	return CISP_SEND_IN(isp, args);
+}
+
+int isp_cmd_ch_ae_stability_to_stable_set(struct apple_isp *isp, u32 chan,
+					  u32 stability)
+{
+	struct cmd_ch_ae_stability_to_stable_set args = {
+		.opcode = CISP_OPCODE(CISP_CMD_CH_AE_STABILITY_TO_STABLE_SET),
+		.chan = chan,
+		.stability = stability,
+	};
+	return CISP_SEND_IN(isp, args);
+}
+
+int isp_cmd_ch_ae_frame_rate_max_get(struct apple_isp *isp, u32 chan,
+				     struct cmd_ch_ae_frame_rate_max_get *args)
+{
+	args->opcode = CISP_OPCODE(CISP_CMD_CH_AE_FRAME_RATE_MAX_GET);
+	args->chan = chan;
+	return CISP_SEND_OUT(isp, args);
+}
+
+int isp_cmd_ch_ae_frame_rate_max_set(struct apple_isp *isp, u32 chan,
+				     u32 framerate)
+{
+	struct cmd_ch_ae_frame_rate_max_set args = {
+		.opcode = CISP_OPCODE(CISP_CMD_CH_AE_FRAME_RATE_MAX_SET),
+		.chan = chan,
+		.framerate = framerate,
+	};
+	return CISP_SEND_IN(isp, args);
+}
+
+int isp_cmd_ch_ae_frame_rate_min_set(struct apple_isp *isp, u32 chan,
+				     u32 framerate)
+{
+	struct cmd_ch_ae_frame_rate_min_set args = {
+		.opcode = CISP_OPCODE(CISP_CMD_CH_AE_FRAME_RATE_MIN_SET),
+		.chan = chan,
+		.framerate = framerate,
+	};
+	return CISP_SEND_IN(isp, args);
+}
+
+int isp_cmd_apple_ch_ae_fd_scene_metering_config_set(struct apple_isp *isp,
+						     u32 chan)
+{
+	struct cmd_apple_ch_ae_fd_scene_metering_config_set args = {
+		.opcode = CISP_OPCODE(
+			CISP_CMD_APPLE_CH_AE_FD_SCENE_METERING_CONFIG_SET),
+		.chan = chan,
+		.unk_c = 0xb8,
+		.unk_10 = 0x2000200,
+		.unk_14 = 0x280800,
+		.unk_18 = 0xe10028,
+		.unk_1c = 0xa0399,
+		.unk_20 = 0x3cc02cc,
+	};
+	return CISP_SEND_INOUT(isp, args);
+}
+
+int isp_cmd_apple_ch_ae_metering_mode_set(struct apple_isp *isp, u32 chan,
+					  u32 mode)
+{
+	struct cmd_apple_ch_ae_metering_mode_set args = {
+		.opcode = CISP_OPCODE(CISP_CMD_APPLE_CH_AE_METERING_MODE_SET),
+		.chan = chan,
+		.mode = mode,
+	};
+	return CISP_SEND_IN(isp, args);
+}
+
+int isp_cmd_apple_ch_ae_flicker_freq_update_current_set(struct apple_isp *isp,
+							u32 chan, u32 freq)
+{
+	struct cmd_apple_ch_ae_flicker_freq_update_current_set args = {
+		.opcode = CISP_OPCODE(
+			CISP_CMD_APPLE_CH_AE_FLICKER_FREQ_UPDATE_CURRENT_SET),
+		.chan = chan,
+		.freq = freq,
+	};
+	return CISP_SEND_IN(isp, args);
+}
+
+int isp_cmd_ch_semantic_video_enable(struct apple_isp *isp, u32 chan,
+				     u32 enable)
+{
+	struct cmd_ch_semantic_video_enable args = {
+		.opcode = CISP_OPCODE(CISP_CMD_CH_SEMANTIC_VIDEO_ENABLE),
+		.chan = chan,
+		.enable = enable,
+	};
+	return CISP_SEND_IN(isp, args);
+}
+
+int isp_cmd_ch_semantic_awb_enable(struct apple_isp *isp, u32 chan, u32 enable)
+{
+	struct cmd_ch_semantic_awb_enable args = {
+		.opcode = CISP_OPCODE(CISP_CMD_CH_SEMANTIC_AWB_ENABLE),
+		.chan = chan,
+		.enable = enable,
+	};
+	return CISP_SEND_IN(isp, args);
+}
+
+int isp_cmd_ch_lpdp_hs_receiver_tuning_set(struct apple_isp *isp, u32 chan, u32 unk1, u32 unk2)
+{
+	struct cmd_ch_lpdp_hs_receiver_tuning_set args = {
+		.opcode = CISP_OPCODE(CISP_CMD_CH_LPDP_HS_RECEIVER_TUNING_SET),
+		.chan = chan,
+		.unk1 = unk1,
+		.unk2 = unk2,
+	};
+	return CISP_SEND_IN(isp, args);
+}
+
+int isp_cmd_ch_property_write(struct apple_isp *isp, u32 chan, u32 prop, u32 val)
+{
+	struct cmd_ch_property_write args = {
+		.opcode = CISP_OPCODE(CISP_CMD_CH_PROPERTY_WRITE),
+		.chan = chan,
+		.prop = prop,
+		.val = val,
+	};
+	return CISP_SEND_IN(isp, args);
+}
+
+int isp_cmd_ch_property_read(struct apple_isp *isp, u32 chan, u32 prop, u32 *val)
+{
+	struct cmd_ch_property_write args = {
+		.opcode = CISP_OPCODE(CISP_CMD_CH_PROPERTY_READ),
+		.chan = chan,
+		.prop = prop,
+		.val = 0xdeadbeef,
+	};
+	int ret = CISP_SEND_OUT(isp, &args);
+
+	*val = args.val;
+
+	return ret;
+}
diff --git a/drivers/media/platform/apple/isp/isp-cmd.h b/drivers/media/platform/apple/isp/isp-cmd.h
new file mode 100644
index 00000000000000..5a3c8cd9177e48
--- /dev/null
+++ b/drivers/media/platform/apple/isp/isp-cmd.h
@@ -0,0 +1,691 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright 2023 Eileen Yoon <eyn@gmx.com> */
+
+#ifndef __ISP_CMD_H__
+#define __ISP_CMD_H__
+
+#include "isp-drv.h"
+
+#define CISP_CMD_START					     0x0000
+#define CISP_CMD_STOP					     0x0001
+#define CISP_CMD_CONFIG_GET				     0x0003
+#define CISP_CMD_PRINT_ENABLE				     0x0004
+#define CISP_CMD_BUILDINFO				     0x0006
+#define CISP_CMD_GET_BES_PARAM				     0x000f
+#define CISP_CMD_POWER_DOWN				     0x0010
+#define CISP_CMD_SET_ISP_PMU_BASE			     0x0011
+#define CISP_CMD_PMP_CTRL_SET				     0x001c
+#define CISP_CMD_TRACE_ENABLE				     0x001d
+#define CISP_CMD_SUSPEND				     0x0021
+#define CISP_CMD_FID_ENTER				     0x0022
+#define CISP_CMD_FID_EXIT				     0x0023
+#define CISP_CMD_FLICKER_SENSOR_SET			     0x0024
+#define CISP_CMD_CH_START				     0x0100
+#define CISP_CMD_CH_STOP				     0x0101
+#define CISP_CMD_CH_BUFFER_RETURN			     0x0104
+#define CISP_CMD_CH_CAMERA_CONFIG_CURRENT_GET		     0x0105
+#define CISP_CMD_CH_CAMERA_CONFIG_GET			     0x0106
+#define CISP_CMD_CH_CAMERA_CONFIG_SELECT		     0x0107
+#define CISP_CMD_CH_INFO_GET				     0x010d
+#define CISP_CMD_CH_BUFFER_RECYCLE_MODE_SET		     0x010e
+#define CISP_CMD_CH_BUFFER_RECYCLE_START		     0x010f
+#define CISP_CMD_CH_BUFFER_RECYCLE_STOP			     0x0110
+#define CISP_CMD_CH_SET_FILE_LOAD			     0x0111
+#define CISP_CMD_CH_SIF_PIXEL_FORMAT_SET		     0x0115
+#define CISP_CMD_CH_BUFFER_POOL_CONFIG_GET		     0x0116
+#define CISP_CMD_CH_BUFFER_POOL_CONFIG_SET		     0x0117
+#define CISP_CMD_CH_CAMERA_MIPI_FREQUENCY_GET		     0x011a
+#define CISP_CMD_CH_CAMERA_PIX_FREQUENCY_GET		     0x011f
+#define CISP_CMD_CH_PROPERTY_WRITE			     0x0122
+#define CISP_CMD_CH_PROPERTY_READ			     0x0123
+#define CISP_CMD_CH_LOCAL_RAW_BUFFER_ENABLE		     0x0125
+#define CISP_CMD_CH_META_DATA_ENABLE			     0x0126
+#define CISP_CMD_CH_CAMERA_MIPI_FREQUENCY_TOTAL_GET	     0x0133
+#define CISP_CMD_CH_SBS_ENABLE				     0x013b
+#define CISP_CMD_CH_LSC_POLYNOMIAL_COEFF_GET		     0x0142
+#define CISP_CMD_CH_SET_META_DATA_REQUIRED		     0x014f
+#define CISP_CMD_CH_BUFFER_POOL_RETURN			     0x015b
+#define CISP_CMD_CH_CAMERA_AGILE_FREQ_ARRAY_CURRENT_GET	     0x015e
+#define CISP_CMD_CH_AE_START				     0x0200
+#define CISP_CMD_CH_AE_STOP				     0x0201
+#define CISP_CMD_CH_AE_FRAME_RATE_MAX_GET		     0x0207
+#define CISP_CMD_CH_AE_FRAME_RATE_MAX_SET		     0x0208
+#define CISP_CMD_CH_AE_FRAME_RATE_MIN_GET		     0x0209
+#define CISP_CMD_CH_AE_FRAME_RATE_MIN_SET		     0x020a
+#define CISP_CMD_CH_AE_STABILITY_SET			     0x021a
+#define CISP_CMD_CH_AE_STABILITY_TO_STABLE_SET		     0x0229
+#define CISP_CMD_CH_SENSOR_NVM_GET			     0x0501
+#define CISP_CMD_CH_SENSOR_PERMODULE_LSC_INFO_GET	     0x0507
+#define CISP_CMD_CH_SENSOR_PERMODULE_LSC_GRID_GET	     0x0511
+#define CISP_CMD_CH_LPDP_HS_RECEIVER_TUNING_SET		     0x051b
+#define CISP_CMD_CH_FOCUS_LIMITS_GET			     0x0701
+#define CISP_CMD_CH_CROP_GET				     0x0800
+#define CISP_CMD_CH_CROP_SET				     0x0801
+#define CISP_CMD_CH_SCALER_CROP_SET			     0x080a
+#define CISP_CMD_CH_CROP_SCL1_GET			     0x080b
+#define CISP_CMD_CH_CROP_SCL1_SET			     0x080c
+#define CISP_CMD_CH_SCALER_CROP_SCL1_SET		     0x080d
+#define CISP_CMD_CH_ALS_ENABLE				     0x0a1c
+#define CISP_CMD_CH_ALS_DISABLE				     0x0a1d
+#define CISP_CMD_CH_CNR_START				     0x0a2f
+#define CISP_CMD_CH_MBNR_ENABLE				     0x0a3a
+#define CISP_CMD_CH_OUTPUT_CONFIG_SET			     0x0b01
+#define CISP_CMD_CH_OUTPUT_CONFIG_SCL1_SET		     0x0b09
+#define CISP_CMD_CH_PREVIEW_STREAM_SET			     0x0b0d
+#define CISP_CMD_CH_SEMANTIC_VIDEO_ENABLE		     0x0b17
+#define CISP_CMD_CH_SEMANTIC_AWB_ENABLE			     0x0b18
+#define CISP_CMD_CH_FACE_DETECTION_START		     0x0d00
+#define CISP_CMD_CH_FACE_DETECTION_STOP			     0x0d01
+#define CISP_CMD_CH_FACE_DETECTION_CONFIG_GET		     0x0d02
+#define CISP_CMD_CH_FACE_DETECTION_CONFIG_SET		     0x0d03
+#define CISP_CMD_CH_FACE_DETECTION_DISABLE		     0x0d04
+#define CISP_CMD_CH_FACE_DETECTION_ENABLE		     0x0d05
+#define CISP_CMD_CH_FID_START				     0x3000
+#define CISP_CMD_CH_FID_STOP				     0x3001
+#define CISP_CMD_IPC_ENDPOINT_SET2			     0x300c
+#define CISP_CMD_IPC_ENDPOINT_UNSET2			     0x300d
+#define CISP_CMD_SET_DSID_CLR_REG_BASE2			     0x3204
+#define CISP_CMD_SET_DSID_CLR_REG_BASE			     0x3205
+#define CISP_CMD_APPLE_CH_AE_METERING_MODE_SET		     0x8206
+#define CISP_CMD_APPLE_CH_AE_FD_SCENE_METERING_CONFIG_SET    0x820e
+#define CISP_CMD_APPLE_CH_AE_FLICKER_FREQ_UPDATE_CURRENT_SET 0x8212
+#define CISP_CMD_APPLE_CH_TEMPORAL_FILTER_START		     0xc100
+#define CISP_CMD_APPLE_CH_TEMPORAL_FILTER_STOP		     0xc101
+#define CISP_CMD_APPLE_CH_MOTION_HISTORY_START		     0xc102
+#define CISP_CMD_APPLE_CH_MOTION_HISTORY_STOP		     0xc103
+#define CISP_CMD_APPLE_CH_TEMPORAL_FILTER_ENABLE	     0xc113
+#define CISP_CMD_APPLE_CH_TEMPORAL_FILTER_DISABLE	     0xc114
+
+#define CISP_POOL_TYPE_META				     0x0
+#define CISP_POOL_TYPE_RENDERED				     0x1
+#define CISP_POOL_TYPE_FD				     0x2
+#define CISP_POOL_TYPE_RAW				     0x3
+#define CISP_POOL_TYPE_STAT				     0x4
+#define CISP_POOL_TYPE_RAW_AUX				     0x5
+#define CISP_POOL_TYPE_YCC				     0x6
+#define CISP_POOL_TYPE_CAPTURE_FULL_RES			     0x7
+#define CISP_POOL_TYPE_META_CAPTURE			     0x8
+#define CISP_POOL_TYPE_RENDERED_SCL1			     0x9
+#define CISP_POOL_TYPE_STAT_PIXELOUTPUT			     0x11
+#define CISP_POOL_TYPE_FSCL				     0x12
+#define CISP_POOL_TYPE_CAPTURE_FULL_RES_YCC		     0x13
+#define CISP_POOL_TYPE_RENDERED_RAW			     0x14
+#define CISP_POOL_TYPE_CAPTURE_PDC_RAW			     0x16
+#define CISP_POOL_TYPE_FPC_DATA				     0x17
+#define CISP_POOL_TYPE_AICAM_SEG			     0x19
+#define CISP_POOL_TYPE_SPD				     0x1a
+#define CISP_POOL_TYPE_META_DEPTH			     0x1c
+#define CISP_POOL_TYPE_JASPER_DEPTH			     0x1d
+#define CISP_POOL_TYPE_RAW_SIFR				     0x1f
+#define CISP_POOL_TYPE_FEP_THUMBNAIL_DYNAMIC_POOL_RAW	     0x21
+
+#define CISP_COLORSPACE_REC709				     0x1
+#define CISP_OUTPUT_FORMAT_YUV_2PLANE			     0x0
+#define CISP_OUTPUT_FORMAT_YUV_1PLANE			     0x1
+#define CISP_OUTPUT_FORMAT_RGB				     0x2
+#define CISP_BUFFER_RECYCLE_MODE_EMPTY_ONLY		     0x1
+
+struct cmd_start {
+	u64 opcode;
+	u32 mode;
+} __packed;
+static_assert(sizeof(struct cmd_start) == 0xc);
+
+struct cmd_stop {
+	u64 opcode;
+	u32 mode;
+} __packed;
+static_assert(sizeof(struct cmd_stop) == 0xc);
+
+struct cmd_power_down {
+	u64 opcode;
+} __packed;
+static_assert(sizeof(struct cmd_power_down) == 0x8);
+
+struct cmd_suspend {
+	u64 opcode;
+} __packed;
+static_assert(sizeof(struct cmd_suspend) == 0x8);
+
+struct cmd_print_enable {
+	u64 opcode;
+	u32 enable;
+} __packed;
+static_assert(sizeof(struct cmd_print_enable) == 0xc);
+
+struct cmd_trace_enable {
+	u64 opcode;
+	u32 enable;
+} __packed;
+static_assert(sizeof(struct cmd_trace_enable) == 0xc);
+
+struct cmd_config_get {
+	u64 opcode;
+	u32 timestamp_freq;
+	u32 num_channels;
+	u32 unk_10;
+	u32 unk_14;
+	u32 unk_18;
+} __packed;
+static_assert(sizeof(struct cmd_config_get) == 0x1c);
+
+struct cmd_set_isp_pmu_base {
+	u64 opcode;
+	u64 pmu_base;
+} __packed;
+static_assert(sizeof(struct cmd_set_isp_pmu_base) == 0x10);
+
+struct cmd_set_dsid_clr_req_base2 {
+	u64 opcode;
+	u64 dsid_clr_base0;
+	u64 dsid_clr_base1;
+	u64 dsid_clr_base2;
+	u64 dsid_clr_base3;
+	u32 dsid_clr_range0;
+	u32 dsid_clr_range1;
+	u32 dsid_clr_range2;
+	u32 dsid_clr_range3;
+} __packed;
+static_assert(sizeof(struct cmd_set_dsid_clr_req_base2) == 0x38);
+
+struct cmd_set_dsid_clr_req_base {
+	u64 opcode;
+	u64 dsid_clr_base;
+	u32 dsid_clr_range;
+} __packed;
+static_assert(sizeof(struct cmd_set_dsid_clr_req_base) == 0x14);
+
+struct cmd_pmp_ctrl_set {
+	u64 opcode;
+	u64 clock_scratch;
+	u64 clock_base;
+	u8 clock_bit;
+	u8 clock_size;
+	u16 clock_pad;
+	u64 bandwidth_scratch;
+	u64 bandwidth_base;
+	u8 bandwidth_bit;
+	u8 bandwidth_size;
+	u16 bandwidth_pad;
+} __packed;
+static_assert(sizeof(struct cmd_pmp_ctrl_set) == 0x30);
+
+struct cmd_fid_enter {
+	u64 opcode;
+} __packed;
+static_assert(sizeof(struct cmd_fid_enter) == 0x8);
+
+struct cmd_fid_exit {
+	u64 opcode;
+} __packed;
+static_assert(sizeof(struct cmd_fid_exit) == 0x8);
+
+struct cmd_ipc_endpoint_set2 {
+	u64 opcode;
+	u32 unk;
+	u64 addr1;
+	u32 size1;
+	u64 addr2;
+	u32 size2;
+	u64 regs;
+	u32 unk2;
+} __packed;
+static_assert(sizeof(struct cmd_ipc_endpoint_set2) == 0x30);
+
+struct cmd_flicker_sensor_set {
+	u64 opcode;
+	u32 mode;
+} __packed;
+static_assert(sizeof(struct cmd_flicker_sensor_set) == 0xc);
+
+int isp_cmd_start(struct apple_isp *isp, u32 mode);
+int isp_cmd_stop(struct apple_isp *isp, u32 mode);
+int isp_cmd_power_down(struct apple_isp *isp);
+int isp_cmd_suspend(struct apple_isp *isp);
+int isp_cmd_print_enable(struct apple_isp *isp, u32 enable);
+int isp_cmd_trace_enable(struct apple_isp *isp, u32 enable);
+int isp_cmd_config_get(struct apple_isp *isp, struct cmd_config_get *args);
+int isp_cmd_set_isp_pmu_base(struct apple_isp *isp, u64 pmu_base);
+int isp_cmd_set_dsid_clr_req_base(struct apple_isp *isp, u64 dsid_clr_base,
+				  u32 dsid_clr_range);
+int isp_cmd_set_dsid_clr_req_base2(struct apple_isp *isp, u64 dsid_clr_base0,
+				   u64 dsid_clr_base1, u64 dsid_clr_base2,
+				   u64 dsid_clr_base3, u32 dsid_clr_range0,
+				   u32 dsid_clr_range1, u32 dsid_clr_range2,
+				   u32 dsid_clr_range3);
+int isp_cmd_pmp_ctrl_set(struct apple_isp *isp, u64 clock_scratch,
+			 u64 clock_base, u8 clock_bit, u8 clock_size,
+			 u64 bandwidth_scratch, u64 bandwidth_base,
+			 u8 bandwidth_bit, u8 bandwidth_size);
+int isp_cmd_fid_enter(struct apple_isp *isp);
+int isp_cmd_fid_exit(struct apple_isp *isp);
+int isp_cmd_flicker_sensor_set(struct apple_isp *isp, u32 mode);
+
+struct cmd_ch_start {
+	u64 opcode;
+	u32 chan;
+} __packed;
+static_assert(sizeof(struct cmd_ch_start) == 0xc);
+
+struct cmd_ch_stop {
+	u64 opcode;
+	u32 chan;
+} __packed;
+static_assert(sizeof(struct cmd_ch_stop) == 0xc);
+
+struct cmd_ch_info {
+	u64 opcode;
+	u32 chan;
+	u32 unk_c;  // 0x7da0001, 0x7db0001
+	u32 unk_10; // 0x300ac, 0x5006d
+	u32 unk_14; // 0x40007, 0x10007
+	u32 unk_18; // 0x5, 0x2
+	u32 unk_1c; // 0x1, 0x1
+	u32 version;
+	u32 unk_24; // 0x7, 0x9
+	u32 unk_28; // 0x1, 0x1410
+	u32 unk_2c; // 0x7, 0x2
+	u32 pad_30[7];
+	u32 unk_4c; // 0x10000, 0x50000
+	u32 unk_50; // 0x1, 0x1
+	u32 unk_54; // 0x0, 0x0
+	u32 unk_58; // 0x4, 0x4
+	u32 unk_5c; // 0x10, 0x20
+	u32 num_presets;
+	u32 unk_64; // 0x0, 0x0
+	u32 unk_68; // 0x44c0, 0x4680
+	u32 unk_6c; // 0x40, 0x40
+	u32 unk_70; // 0x1, 0x1
+	u32 unk_74; // 0x2, 0x2
+	u32 unk_78; // 0x4000, 0x4000
+	u32 unk_7c; // 0x40, 0x40
+	u32 unk_80; // 0x1, 0x1
+	u32 pad_84[2];
+	u32 unk_8c; // 0x36, 0x36
+	u32 pad_90[2];
+	u32 timestamp_freq;
+	u16 pad_9c;
+	char module_sn[20];
+	u16 pad_b0;
+	u32 unk_b4; // 0x8, 0x8
+	u32 pad_b8[2];
+	u32 unk_c0; // 0x4, 0x1
+	u32 unk_c4; // 0x0, 0x0
+	u32 unk_c8; // 0x0, 0x100
+	u32 pad_cc[4];
+	u32 unk_dc; // 0xff0000, 0xff0000
+	u32 unk_e0; // 0xc00, 0xc00
+	u32 unk_e4; // 0x0, 0x0
+	u32 unk_e8; // 0x1c, 0x1c
+	u32 unk_ec; // 0x640, 0x680
+	u32 unk_f0; // 0x4, 0x4
+	u32 unk_f4; // 0x4, 0x4
+	u32 pad_f8[6];
+	u32 unk_110; // 0x0, 0x7800000
+	u32 unk_114; // 0x0, 0x780
+} __packed;
+static_assert(sizeof(struct cmd_ch_info) == 0x118);
+
+struct cmd_ch_camera_config {
+	u64 opcode;
+	u32 chan;
+	u32 preset;
+	u16 in_width;
+	u16 in_height;
+	u16 out_width;
+	u16 out_height;
+	u32 unk_28;
+	u32 unk_2c;
+	u32 unk_30[16];
+	u32 sensor_clk;
+	u32 unk_64[4];
+	u32 timestamp_freq;
+	u32 unk_78[2];
+	u32 unk_80[16];
+	u32 in_width2; // repeated in u32??
+	u32 in_height2;
+	u32 unk_c8[3];
+	u32 out_width2;
+	u32 out_height2;
+} __packed;
+static_assert(sizeof(struct cmd_ch_camera_config) == 0xdc);
+
+struct cmd_ch_camera_config_select {
+	u64 opcode;
+	u32 chan;
+	u32 preset;
+} __packed;
+static_assert(sizeof(struct cmd_ch_camera_config_select) == 0x10);
+
+struct cmd_ch_set_file_load {
+	u64 opcode;
+	u32 chan;
+	u32 addr;
+	u32 size;
+} __packed;
+static_assert(sizeof(struct cmd_ch_set_file_load) == 0x14);
+
+struct cmd_ch_set_file_load64 {
+	u64 opcode;
+	u32 chan;
+	u64 addr;
+	u32 size;
+} __packed;
+static_assert(sizeof(struct cmd_ch_set_file_load64) == 0x18);
+
+struct cmd_ch_buffer_return {
+	u64 opcode;
+	u32 chan;
+} __packed;
+static_assert(sizeof(struct cmd_ch_buffer_return) == 0xc);
+
+struct cmd_ch_sbs_enable {
+	u64 opcode;
+	u32 chan;
+	u32 enable;
+} __packed;
+static_assert(sizeof(struct cmd_ch_sbs_enable) == 0x10);
+
+struct cmd_ch_crop_set {
+	u64 opcode;
+	u32 chan;
+	u32 x1;
+	u32 y1;
+	u32 x2;
+	u32 y2;
+} __packed;
+static_assert(sizeof(struct cmd_ch_crop_set) == 0x1c);
+
+struct cmd_ch_output_config_set {
+	u64 opcode;
+	u32 chan;
+	u32 width;
+	u32 height;
+	u32 colorspace;
+	u32 format;
+	u32 strides[3];
+	u32 padding_rows;
+	u32 unk_h0;
+	u32 compress;
+	u32 unk_w2;
+} __packed;
+static_assert(sizeof(struct cmd_ch_output_config_set) == 0x38);
+
+struct cmd_ch_preview_stream_set {
+	u64 opcode;
+	u32 chan;
+	u32 stream;
+} __packed;
+static_assert(sizeof(struct cmd_ch_preview_stream_set) == 0x10);
+
+struct cmd_ch_als_disable {
+	u64 opcode;
+	u32 chan;
+} __packed;
+static_assert(sizeof(struct cmd_ch_als_disable) == 0xc);
+
+struct cmd_ch_cnr_start {
+	u64 opcode;
+	u32 chan;
+} __packed;
+static_assert(sizeof(struct cmd_ch_cnr_start) == 0xc);
+
+struct cmd_ch_mbnr_enable {
+	u64 opcode;
+	u32 chan;
+	u32 use_case;
+	u32 mode;
+	u32 enable_chroma;
+} __packed;
+static_assert(sizeof(struct cmd_ch_mbnr_enable) == 0x18);
+
+struct cmd_ch_sif_pixel_format_set {
+	u64 opcode;
+	u32 chan;
+	u8 format;
+	u8 type;
+	u16 compress;
+	u32 unk_10;
+} __packed;
+static_assert(sizeof(struct cmd_ch_sif_pixel_format_set) == 0x14);
+
+struct cmd_ch_lpdp_hs_receiver_tuning_set {
+	u64 opcode;
+	u32 chan;
+	u32 unk1;
+	u32 unk2;
+} __packed;
+static_assert(sizeof(struct cmd_ch_lpdp_hs_receiver_tuning_set) == 0x14);
+
+struct cmd_ch_property_write {
+	u64 opcode;
+	u32 chan;
+	u32 prop;
+	u32 val;
+	u32 unk1;
+	u32 unk2;
+} __packed;
+static_assert(sizeof(struct cmd_ch_property_write) == 0x1c);
+
+int isp_cmd_ch_start(struct apple_isp *isp, u32 chan);
+int isp_cmd_ch_stop(struct apple_isp *isp, u32 chan);
+int isp_cmd_ch_info_get(struct apple_isp *isp, u32 chan,
+			struct cmd_ch_info *args);
+int isp_cmd_ch_camera_config_get(struct apple_isp *isp, u32 chan, u32 preset,
+				 struct cmd_ch_camera_config *args);
+int isp_cmd_ch_camera_config_current_get(struct apple_isp *isp, u32 chan,
+					 struct cmd_ch_camera_config *args);
+int isp_cmd_ch_camera_config_select(struct apple_isp *isp, u32 chan,
+				    u32 preset);
+int isp_cmd_ch_set_file_load(struct apple_isp *isp, u32 chan, u64 addr,
+			     u32 size);
+int isp_cmd_ch_buffer_return(struct apple_isp *isp, u32 chan);
+int isp_cmd_ch_sbs_enable(struct apple_isp *isp, u32 chan, u32 enable);
+int isp_cmd_ch_crop_set(struct apple_isp *isp, u32 chan, u32 x1, u32 y1, u32 x2,
+			u32 y2);
+int isp_cmd_ch_output_config_set(struct apple_isp *isp, u32 chan, u32 width,
+				 u32 height, u32 strides[3], u32 colorspace, u32 format);
+int isp_cmd_ch_preview_stream_set(struct apple_isp *isp, u32 chan, u32 stream);
+int isp_cmd_ch_als_disable(struct apple_isp *isp, u32 chan);
+int isp_cmd_ch_cnr_start(struct apple_isp *isp, u32 chan);
+int isp_cmd_ch_mbnr_enable(struct apple_isp *isp, u32 chan, u32 use_case,
+			   u32 mode, u32 enable_chroma);
+int isp_cmd_ch_sif_pixel_format_set(struct apple_isp *isp, u32 chan);
+int isp_cmd_ch_lpdp_hs_receiver_tuning_set(struct apple_isp *isp, u32 chan, u32 unk1, u32 unk2);
+
+int isp_cmd_ch_property_read(struct apple_isp *isp, u32 chan, u32 prop, u32 *val);
+int isp_cmd_ch_property_write(struct apple_isp *isp, u32 chan, u32 prop, u32 val);
+
+enum isp_mbnr_mode {
+	ISP_MBNR_MODE_DISABLE = 0,
+	ISP_MBNR_MODE_ENABLE = 1,
+	ISP_MBNR_MODE_BYPASS = 2,
+};
+
+struct cmd_ch_buffer_recycle_mode_set {
+	u64 opcode;
+	u32 chan;
+	u32 mode;
+} __packed;
+static_assert(sizeof(struct cmd_ch_buffer_recycle_mode_set) == 0x10);
+
+struct cmd_ch_buffer_recycle_start {
+	u64 opcode;
+	u32 chan;
+} __packed;
+static_assert(sizeof(struct cmd_ch_buffer_recycle_start) == 0xc);
+
+struct cmd_ch_buffer_pool_config_set {
+	u64 opcode;
+	u32 chan;
+	u16 type;
+	u16 count;
+	u32 meta_size0;
+	u32 meta_size1;
+	u64 unk0;
+	u64 unk1;
+	u64 unk2;
+	u32 zero[0x19];
+	u32 data_blocks;
+	u32 compress;
+} __packed;
+static_assert(sizeof(struct cmd_ch_buffer_pool_config_set) == 0x9c);
+
+struct cmd_ch_buffer_pool_return {
+	u64 opcode;
+	u32 chan;
+} __packed;
+static_assert(sizeof(struct cmd_ch_buffer_pool_return) == 0xc);
+
+int isp_cmd_ch_buffer_recycle_mode_set(struct apple_isp *isp, u32 chan,
+				       u32 mode);
+int isp_cmd_ch_buffer_recycle_start(struct apple_isp *isp, u32 chan);
+int isp_cmd_ch_buffer_pool_config_set(struct apple_isp *isp, u32 chan,
+				      u16 type);
+int isp_cmd_ch_buffer_pool_config_get(struct apple_isp *isp, u32 chan,
+				      u16 type);
+int isp_cmd_ch_buffer_pool_return(struct apple_isp *isp, u32 chan);
+
+struct cmd_apple_ch_temporal_filter_start {
+	u64 opcode;
+	u32 chan;
+	u32 unk_c;
+	u32 unk_10;
+} __packed;
+static_assert(sizeof(struct cmd_apple_ch_temporal_filter_start) == 0x14);
+
+struct cmd_apple_ch_temporal_filter_stop {
+	u64 opcode;
+	u32 chan;
+} __packed;
+static_assert(sizeof(struct cmd_apple_ch_temporal_filter_stop) == 0xc);
+
+struct cmd_apple_ch_motion_history_start {
+	u64 opcode;
+	u32 chan;
+} __packed;
+static_assert(sizeof(struct cmd_apple_ch_motion_history_start) == 0xc);
+
+struct cmd_apple_ch_motion_history_stop {
+	u64 opcode;
+	u32 chan;
+} __packed;
+static_assert(sizeof(struct cmd_apple_ch_motion_history_stop) == 0xc);
+
+struct cmd_apple_ch_temporal_filter_enable {
+	u64 opcode;
+	u32 chan;
+} __packed;
+static_assert(sizeof(struct cmd_apple_ch_temporal_filter_enable) == 0xc);
+
+struct cmd_apple_ch_temporal_filter_disable {
+	u64 opcode;
+	u32 chan;
+} __packed;
+static_assert(sizeof(struct cmd_apple_ch_temporal_filter_disable) == 0xc);
+
+int isp_cmd_apple_ch_temporal_filter_start(struct apple_isp *isp, u32 chan, u32 arg);
+int isp_cmd_apple_ch_temporal_filter_stop(struct apple_isp *isp, u32 chan);
+int isp_cmd_apple_ch_motion_history_start(struct apple_isp *isp, u32 chan);
+int isp_cmd_apple_ch_motion_history_stop(struct apple_isp *isp, u32 chan);
+int isp_cmd_apple_ch_temporal_filter_enable(struct apple_isp *isp, u32 chan);
+int isp_cmd_apple_ch_temporal_filter_disable(struct apple_isp *isp, u32 chan);
+
+struct cmd_ch_ae_stability_set {
+	u64 opcode;
+	u32 chan;
+	u32 stability;
+} __packed;
+static_assert(sizeof(struct cmd_ch_ae_stability_set) == 0x10);
+
+struct cmd_ch_ae_stability_to_stable_set {
+	u64 opcode;
+	u32 chan;
+	u32 stability;
+} __packed;
+static_assert(sizeof(struct cmd_ch_ae_stability_to_stable_set) == 0x10);
+
+struct cmd_ch_ae_frame_rate_max_get {
+	u64 opcode;
+	u32 chan;
+	u32 framerate;
+} __packed;
+static_assert(sizeof(struct cmd_ch_ae_frame_rate_max_get) == 0x10);
+
+struct cmd_ch_ae_frame_rate_max_set {
+	u64 opcode;
+	u32 chan;
+	u32 framerate;
+} __packed;
+static_assert(sizeof(struct cmd_ch_ae_frame_rate_max_set) == 0x10);
+
+struct cmd_ch_ae_frame_rate_min_set {
+	u64 opcode;
+	u32 chan;
+	u32 framerate;
+} __packed;
+static_assert(sizeof(struct cmd_ch_ae_frame_rate_min_set) == 0x10);
+
+struct cmd_apple_ch_ae_fd_scene_metering_config_set {
+	u64 opcode;
+	u32 chan;
+	u32 unk_c;
+	u32 unk_10;
+	u32 unk_14;
+	u32 unk_18;
+	u32 unk_1c;
+	u32 unk_20;
+} __packed;
+static_assert(sizeof(struct cmd_apple_ch_ae_fd_scene_metering_config_set) ==
+	      0x24);
+
+struct cmd_apple_ch_ae_metering_mode_set {
+	u64 opcode;
+	u32 chan;
+	u32 mode;
+} __packed;
+static_assert(sizeof(struct cmd_apple_ch_ae_metering_mode_set) == 0x10);
+
+struct cmd_apple_ch_ae_flicker_freq_update_current_set {
+	u64 opcode;
+	u32 chan;
+	u32 freq;
+} __packed;
+static_assert(sizeof(struct cmd_apple_ch_ae_flicker_freq_update_current_set) ==
+	      0x10);
+
+int isp_cmd_ch_ae_stability_set(struct apple_isp *isp, u32 chan, u32 stability);
+int isp_cmd_ch_ae_stability_to_stable_set(struct apple_isp *isp, u32 chan,
+					  u32 stability);
+int isp_cmd_ch_ae_frame_rate_max_get(struct apple_isp *isp, u32 chan,
+				     struct cmd_ch_ae_frame_rate_max_get *args);
+int isp_cmd_ch_ae_frame_rate_max_set(struct apple_isp *isp, u32 chan,
+				     u32 framerate);
+int isp_cmd_ch_ae_frame_rate_min_set(struct apple_isp *isp, u32 chan,
+				     u32 framerate);
+int isp_cmd_apple_ch_ae_fd_scene_metering_config_set(struct apple_isp *isp,
+						     u32 chan);
+int isp_cmd_apple_ch_ae_metering_mode_set(struct apple_isp *isp, u32 chan,
+					  u32 mode);
+int isp_cmd_apple_ch_ae_flicker_freq_update_current_set(struct apple_isp *isp,
+							u32 chan, u32 freq);
+
+struct cmd_ch_semantic_video_enable {
+	u64 opcode;
+	u32 chan;
+	u32 enable;
+} __packed;
+static_assert(sizeof(struct cmd_ch_semantic_video_enable) == 0x10);
+
+struct cmd_ch_semantic_awb_enable {
+	u64 opcode;
+	u32 chan;
+	u32 enable;
+} __packed;
+static_assert(sizeof(struct cmd_ch_semantic_awb_enable) == 0x10);
+
+int isp_cmd_ch_semantic_video_enable(struct apple_isp *isp, u32 chan,
+				     u32 enable);
+int isp_cmd_ch_semantic_awb_enable(struct apple_isp *isp, u32 chan, u32 enable);
+
+#endif /* __ISP_CMD_H__ */
diff --git a/drivers/media/platform/apple/isp/isp-drv.c b/drivers/media/platform/apple/isp/isp-drv.c
new file mode 100644
index 00000000000000..848f7abd535a7f
--- /dev/null
+++ b/drivers/media/platform/apple/isp/isp-drv.c
@@ -0,0 +1,594 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Apple Image Signal Processor driver
+ *
+ * Copyright (C) 2023 The Asahi Linux Contributors
+ */
+
+#include <linux/iommu.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/workqueue.h>
+
+#include "isp-cam.h"
+#include "isp-fw.h"
+#include "isp-iommu.h"
+#include "isp-v4l2.h"
+
+static void apple_isp_detach_genpd(struct apple_isp *isp)
+{
+	if (isp->pd_count <= 1)
+		return;
+
+	for (int i = isp->pd_count - 1; i >= 0; i--) {
+		if (isp->pd_link[i])
+			device_link_del(isp->pd_link[i]);
+		if (!IS_ERR_OR_NULL(isp->pd_dev[i]))
+			dev_pm_domain_detach(isp->pd_dev[i], true);
+	}
+
+	return;
+}
+
+static int apple_isp_attach_genpd(struct apple_isp *isp)
+{
+	struct device *dev = isp->dev;
+
+	isp->pd_count = of_count_phandle_with_args(
+		dev->of_node, "power-domains", "#power-domain-cells");
+	if (isp->pd_count <= 1)
+		return 0;
+
+	isp->pd_dev = devm_kcalloc(dev, isp->pd_count, sizeof(*isp->pd_dev),
+				   GFP_KERNEL);
+	if (!isp->pd_dev)
+		return -ENOMEM;
+
+	isp->pd_link = devm_kcalloc(dev, isp->pd_count, sizeof(*isp->pd_link),
+				    GFP_KERNEL);
+	if (!isp->pd_link)
+		return -ENOMEM;
+
+	for (int i = 0; i < isp->pd_count; i++) {
+		int flags = DL_FLAG_STATELESS;
+
+		/* Primary power domain uses RPM integration */
+		if (i == 0)
+			flags |= DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE;
+
+		isp->pd_dev[i] = dev_pm_domain_attach_by_id(dev, i);
+		if (IS_ERR(isp->pd_dev[i])) {
+			apple_isp_detach_genpd(isp);
+			return PTR_ERR(isp->pd_dev[i]);
+		}
+
+		isp->pd_link[i] =
+			device_link_add(dev, isp->pd_dev[i], flags);
+
+		if (!isp->pd_link[i]) {
+			apple_isp_detach_genpd(isp);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int apple_isp_init_iommu(struct apple_isp *isp)
+{
+	struct device *dev = isp->dev;
+	phys_addr_t heap_base;
+	size_t heap_size;
+	u64 vm_size;
+	int err;
+	int idx;
+	int size;
+	struct device_node *mem_node;
+	const __be32 *maps, *end;
+
+	isp->domain = iommu_get_domain_for_dev(isp->dev);
+	if (!isp->domain)
+		return -ENODEV;
+	isp->shift = __ffs(isp->domain->pgsize_bitmap);
+
+	idx = of_property_match_string(dev->of_node, "memory-region-names",
+				       "heap");
+	mem_node = of_parse_phandle(dev->of_node, "memory-region", idx);
+	if (!mem_node) {
+		dev_err(dev, "No memory-region found for heap\n");
+		return -ENODEV;
+	}
+
+	maps = of_get_property(mem_node, "iommu-addresses", &size);
+	if (!maps || !size) {
+		dev_err(dev, "No valid iommu-addresses found for heap\n");
+		return -ENODEV;
+	}
+
+	end = maps + size / sizeof(__be32);
+
+	while (maps < end) {
+		maps++;
+		maps = of_translate_dma_region(dev->of_node, maps, &heap_base,
+					       &heap_size);
+	}
+
+	isp->fw.heap_top = heap_base + heap_size;
+
+	err = of_property_read_u64(dev->of_node, "apple,dart-vm-size",
+				   &vm_size);
+	if (err) {
+		dev_err(dev, "failed to read 'apple,dart-vm-size': %d\n", err);
+		return err;
+	}
+
+	// FIXME: refactor this, maybe use regular iova stuff?
+	drm_mm_init(&isp->iovad, isp->fw.heap_top,
+		    vm_size - (heap_base & 0xffffffff));
+
+	return 0;
+}
+
+static void apple_isp_free_iommu(struct apple_isp *isp)
+{
+	drm_mm_takedown(&isp->iovad);
+}
+
+static int isp_of_read_coord(struct device *dev, struct device_node *np,
+			     const char *prop, struct coord *val)
+{
+	u32 xy[2];
+	int ret;
+
+	ret = of_property_read_u32_array(np, prop, xy, 2);
+	if (ret) {
+		dev_err(dev, "failed to read '%s' property\n", prop);
+		return ret;
+	}
+
+	val->x = xy[0];
+	val->y = xy[1];
+	return 0;
+}
+
+static int apple_isp_init_presets(struct apple_isp *isp)
+{
+	struct device *dev = isp->dev;
+	struct isp_preset *preset;
+	int err = 0;
+
+	struct device_node *np __free(device_node) =
+		of_get_child_by_name(dev->of_node, "sensor-presets");
+	if (!np) {
+		dev_err(dev, "failed to get DT node 'presets'\n");
+		return -EINVAL;
+	}
+
+	isp->num_presets = of_get_child_count(np);
+	if (!isp->num_presets) {
+		dev_err(dev, "no sensor presets found\n");
+		return -EINVAL;
+	}
+
+	isp->presets = devm_kzalloc(
+		dev, sizeof(*isp->presets) * isp->num_presets, GFP_KERNEL);
+	if (!isp->presets)
+		return -ENOMEM;
+
+	preset = isp->presets;
+	for_each_child_of_node_scoped(np, child) {
+		u32 xywh[4];
+
+		err = of_property_read_u32(child, "apple,config-index",
+					   &preset->index);
+		if (err) {
+			dev_err(dev, "no apple,config-index property\n");
+			return err;
+		}
+
+		err = isp_of_read_coord(dev, child, "apple,input-size",
+					&preset->input_dim);
+		if (err)
+			return err;
+		err = isp_of_read_coord(dev, child, "apple,output-size",
+					&preset->output_dim);
+		if (err)
+			return err;
+
+		err = of_property_read_u32_array(child, "apple,crop", xywh, 4);
+		if (err) {
+			dev_err(dev, "failed to read 'apple,crop' property\n");
+			return err;
+		}
+		preset->crop_offset.x = xywh[0];
+		preset->crop_offset.y = xywh[1];
+		preset->crop_size.x = xywh[2];
+		preset->crop_size.y = xywh[3];
+
+		preset++;
+	}
+
+	return 0;
+}
+
+static const char * isp_fw2str(enum isp_firmware_version version)
+{
+	switch (version) {
+	case ISP_FIRMWARE_V_12_3:
+		return "12.3";
+	case ISP_FIRMWARE_V_12_4:
+		return "12.4";
+	case ISP_FIRMWARE_V_13_5:
+		return "13.5";
+	default:
+		return "unknown";
+	}
+}
+
+#define ISP_FW_VERSION_MIN_LEN	3
+#define ISP_FW_VERSION_MAX_LEN	5
+
+static enum isp_firmware_version isp_read_fw_version(struct device *dev,
+						     const char *name)
+{
+	u32 ver[ISP_FW_VERSION_MAX_LEN];
+	int len = of_property_read_variable_u32_array(dev->of_node, name, ver,
+						      ISP_FW_VERSION_MIN_LEN,
+						      ISP_FW_VERSION_MAX_LEN);
+
+	switch (len) {
+	case 3:
+		if (ver[0] == 12 && ver[1] == 3 && ver[2] <= 1)
+			return ISP_FIRMWARE_V_12_3;
+		else if (ver[0] == 12 && ver[1] == 4 && ver[2] == 0)
+			return ISP_FIRMWARE_V_12_4;
+		else if (ver[0] == 13 && ver[1] == 5 && ver[2] == 0)
+			return ISP_FIRMWARE_V_13_5;
+
+		dev_warn(dev, "unknown %s: %d.%d.%d\n", name, ver[0], ver[1], ver[2]);
+		break;
+	case 4:
+		dev_warn(dev, "unknown %s: %d.%d.%d.%d\n", name, ver[0], ver[1],
+			 ver[2], ver[3]);
+		break;
+	case 5:
+		dev_warn(dev, "unknown %s: %d.%d.%d.%d.%d\n", name, ver[0],
+			 ver[1], ver[2], ver[3], ver[4]);
+		break;
+	default:
+		dev_warn(dev, "could not parse %s: %d\n", name, len);
+		break;
+	}
+
+	return ISP_FIRMWARE_V_UNKNOWN;
+}
+
+static enum isp_firmware_version isp_check_firmware_version(struct device *dev)
+{
+	enum isp_firmware_version version, compat;
+
+	/* firmware version is just informative */
+	version = isp_read_fw_version(dev, "apple,firmware-version");
+	compat = isp_read_fw_version(dev, "apple,firmware-compat");
+
+	dev_info(dev, "ISP firmware-compat: %s (FW: %s)\n", isp_fw2str(compat),
+		 isp_fw2str(version));
+
+	return compat;
+}
+
+static int apple_isp_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct apple_isp *isp;
+	int err;
+
+	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(42));
+	if (err)
+		return err;
+
+	isp = devm_kzalloc(dev, sizeof(*isp), GFP_KERNEL);
+	if (!isp)
+		return -ENOMEM;
+
+	isp->dev = dev;
+	isp->hw = of_device_get_match_data(dev);
+	platform_set_drvdata(pdev, isp);
+	dev_set_drvdata(dev, isp);
+
+	/* Differences between firmware versions are rather minor so try to work
+	 * with unknown firmware.
+	 */
+	isp->fw_compat = isp_check_firmware_version(dev);
+
+	err = of_property_read_u32(dev->of_node, "apple,platform-id",
+				   &isp->platform_id);
+	if (err) {
+		dev_err(dev, "failed to get 'apple,platform-id' property: %d\n",
+			err);
+		return err;
+	}
+
+	err = of_property_read_u32(dev->of_node, "apple,temporal-filter",
+				   &isp->temporal_filter);
+	if (err)
+		isp->temporal_filter = 0;
+
+	err = apple_isp_init_presets(isp);
+	if (err) {
+		dev_err(dev, "failed to initialize presets\n");
+		return err;
+	}
+
+	err = apple_isp_attach_genpd(isp);
+	if (err) {
+		dev_err(dev, "failed to attatch power domains\n");
+		return err;
+	}
+
+	isp->coproc = devm_platform_ioremap_resource_byname(pdev, "coproc");
+	if (IS_ERR(isp->coproc)) {
+		err = PTR_ERR(isp->coproc);
+		goto detach_genpd;
+	}
+
+	isp->mbox = devm_platform_ioremap_resource_byname(pdev, "mbox");
+	if (IS_ERR(isp->mbox)) {
+		err = PTR_ERR(isp->mbox);
+		goto detach_genpd;
+	}
+
+	isp->gpio = devm_platform_ioremap_resource_byname(pdev, "gpio");
+	if (IS_ERR(isp->gpio)) {
+		err = PTR_ERR(isp->gpio);
+		goto detach_genpd;
+	}
+
+	isp->mbox2 = devm_platform_ioremap_resource_byname(pdev, "mbox2");
+	if (IS_ERR(isp->mbox2)) {
+		err = PTR_ERR(isp->mbox2);
+		goto detach_genpd;
+	}
+
+	isp->irq = platform_get_irq(pdev, 0);
+	if (isp->irq < 0) {
+		err = isp->irq;
+		goto detach_genpd;
+	}
+	if (!isp->irq) {
+		err = -ENODEV;
+		goto detach_genpd;
+	}
+
+	mutex_init(&isp->iovad_lock);
+	mutex_init(&isp->video_lock);
+	spin_lock_init(&isp->buf_lock);
+	init_waitqueue_head(&isp->wait);
+	INIT_LIST_HEAD(&isp->gc);
+	INIT_LIST_HEAD(&isp->bufs_pending);
+	INIT_LIST_HEAD(&isp->bufs_submitted);
+	isp->wq = alloc_workqueue("apple-isp-wq", WQ_UNBOUND, 0);
+	if (!isp->wq) {
+		dev_err(dev, "failed to create workqueue\n");
+		err = -ENOMEM;
+		goto detach_genpd;
+	}
+
+	err = apple_isp_init_iommu(isp);
+	if (err) {
+		dev_err(dev, "failed to init iommu: %d\n", err);
+		goto destroy_wq;
+	}
+
+	err = apple_isp_alloc_firmware_surface(isp);
+	if (err) {
+		dev_err(dev, "failed to alloc firmware surface: %d\n", err);
+		goto free_iommu;
+	}
+
+	pm_runtime_enable(dev);
+
+	err = apple_isp_detect_camera(isp);
+	if (err) {
+		dev_err(dev, "failed to detect camera: %d\n", err);
+		goto free_surface;
+	}
+
+	err = apple_isp_setup_video(isp);
+	if (err) {
+		dev_err(dev, "failed to register video device: %d\n", err);
+		goto free_surface;
+	}
+
+	dev_info(dev, "apple-isp probe!\n");
+
+	return 0;
+
+free_surface:
+	pm_runtime_disable(dev);
+	apple_isp_free_firmware_surface(isp);
+free_iommu:
+	apple_isp_free_iommu(isp);
+destroy_wq:
+	destroy_workqueue(isp->wq);
+detach_genpd:
+	apple_isp_detach_genpd(isp);
+	return err;
+}
+
+static void apple_isp_remove(struct platform_device *pdev)
+{
+	struct apple_isp *isp = platform_get_drvdata(pdev);
+
+	apple_isp_remove_video(isp);
+	pm_runtime_disable(isp->dev);
+	apple_isp_free_firmware_surface(isp);
+	apple_isp_free_iommu(isp);
+	destroy_workqueue(isp->wq);
+	apple_isp_detach_genpd(isp);
+}
+
+static const struct apple_isp_hw apple_isp_hw_t8103 = {
+	.gen = ISP_GEN_T8103,
+	.pmu_base = 0x23b704000,
+
+	.dsid_count = 4,
+	.dsid_clr_base0 = 0x200014000,
+	.dsid_clr_base1 = 0x200054000,
+	.dsid_clr_base2 = 0x200094000,
+	.dsid_clr_base3 = 0x2000d4000,
+	.dsid_clr_range0 = 0x1000,
+	.dsid_clr_range1 = 0x1000,
+	.dsid_clr_range2 = 0x1000,
+	.dsid_clr_range3 = 0x1000,
+
+	.clock_scratch = 0x23b738010,
+	.clock_base = 0x23bc3c000,
+	.clock_bit = 0x1,
+	.clock_size = 0x4,
+	.bandwidth_scratch = 0x23b73800c,
+	.bandwidth_base = 0x23bc3c000,
+	.bandwidth_bit = 0x0,
+	.bandwidth_size = 0x4,
+
+	.scl1 = false,
+	.lpdp = false,
+	.meta_size = ISP_META_SIZE_T8103,
+};
+
+static const struct apple_isp_hw apple_isp_hw_t6000 = {
+	.gen = ISP_GEN_T8103,
+	.pmu_base = 0x28e584000,
+
+	.dsid_count = 1,
+	.dsid_clr_base0 = 0x200014000,
+	.dsid_clr_base1 = 0x200054000,
+	.dsid_clr_base2 = 0x200094000,
+	.dsid_clr_base3 = 0x2000d4000,
+	.dsid_clr_range0 = 0x1000,
+	.dsid_clr_range1 = 0x1000,
+	.dsid_clr_range2 = 0x1000,
+	.dsid_clr_range3 = 0x1000,
+
+	.clock_scratch = 0x28e3d0868,
+	.clock_base = 0x0,
+	.clock_bit = 0x0,
+	.clock_size = 0x8,
+	.bandwidth_scratch = 0x28e3d0980,
+	.bandwidth_base = 0x0,
+	.bandwidth_bit = 0x0,
+	.bandwidth_size = 0x8,
+
+	.scl1 = false,
+	.lpdp = false,
+	.meta_size = ISP_META_SIZE_T8103,
+};
+
+static const struct apple_isp_hw apple_isp_hw_t8112 = {
+	.gen = ISP_GEN_T8112,
+	.pmu_base = 0x23b704000,
+
+	.dsid_count = 1,
+	.dsid_clr_base0 = 0x200f14000,
+	.dsid_clr_range0 = 0x1000,
+
+	.clock_scratch = 0x23b3d0560,
+	.clock_base = 0x0,
+	.clock_bit = 0x0,
+	.clock_size = 0x8,
+	.bandwidth_scratch = 0x23b3d05d0,
+	.bandwidth_base = 0x0,
+	.bandwidth_bit = 0x0,
+	.bandwidth_size = 0x8,
+
+	.scl1 = false,
+	.lpdp = false,
+	.meta_size = ISP_META_SIZE_T8112,
+};
+
+static const struct apple_isp_hw apple_isp_hw_t6020 = {
+	.gen = ISP_GEN_T8112,
+	.pmu_base = 0x290284000,
+
+	.dsid_count = 1,
+	.dsid_clr_base0 = 0x200f14000,
+	.dsid_clr_range0 = 0x1000,
+
+	.clock_scratch = 0x28e3d10a8,
+	.clock_base = 0x0,
+	.clock_bit = 0x0,
+	.clock_size = 0x8,
+	.bandwidth_scratch = 0x28e3d1200,
+	.bandwidth_base = 0x0,
+	.bandwidth_bit = 0x0,
+	.bandwidth_size = 0x8,
+
+	.scl1 = true,
+	.lpdp = true,
+	.meta_size = ISP_META_SIZE_T8112,
+};
+
+static const struct of_device_id apple_isp_of_match[] = {
+	{ .compatible = "apple,t8103-isp", .data = &apple_isp_hw_t8103 },
+	{ .compatible = "apple,t8112-isp", .data = &apple_isp_hw_t8112 },
+	{ .compatible = "apple,t6000-isp", .data = &apple_isp_hw_t6000 },
+	{ .compatible = "apple,t6020-isp", .data = &apple_isp_hw_t6020 },
+	{},
+};
+MODULE_DEVICE_TABLE(of, apple_isp_of_match);
+
+static __maybe_unused int apple_isp_runtime_suspend(struct device *dev)
+{
+	/* RPM sleep is called when the V4L2 file handle is closed */
+	return 0;
+}
+
+static __maybe_unused int apple_isp_runtime_resume(struct device *dev)
+{
+	return 0;
+}
+
+static __maybe_unused int apple_isp_suspend(struct device *dev)
+{
+	struct apple_isp *isp = dev_get_drvdata(dev);
+
+	/* We must restore V4L2 context on system resume. If we were streaming
+	 * before, we (essentially) stop streaming and start streaming again.
+	 */
+	apple_isp_video_suspend(isp);
+
+	return 0;
+}
+
+static __maybe_unused int apple_isp_resume(struct device *dev)
+{
+	struct apple_isp *isp = dev_get_drvdata(dev);
+
+	apple_isp_video_resume(isp);
+
+	return 0;
+}
+
+static const struct dev_pm_ops apple_isp_pm_ops = {
+	SYSTEM_SLEEP_PM_OPS(apple_isp_suspend, apple_isp_resume)
+	RUNTIME_PM_OPS(apple_isp_runtime_suspend, apple_isp_runtime_resume, NULL)
+};
+
+static struct platform_driver apple_isp_driver = {
+	.driver	= {
+		.name		= "apple-isp",
+		.of_match_table	= apple_isp_of_match,
+		.pm		= pm_ptr(&apple_isp_pm_ops),
+	},
+	.probe	= apple_isp_probe,
+	.remove	= apple_isp_remove,
+};
+module_platform_driver(apple_isp_driver);
+
+MODULE_AUTHOR("Eileen Yoon <eyn@gmx.com>");
+MODULE_DESCRIPTION("Apple ISP driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/apple/isp/isp-drv.h b/drivers/media/platform/apple/isp/isp-drv.h
new file mode 100644
index 00000000000000..96a1d0b39f860d
--- /dev/null
+++ b/drivers/media/platform/apple/isp/isp-drv.h
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright 2023 Eileen Yoon <eyn@gmx.com> */
+
+#ifndef __ISP_DRV_H__
+#define __ISP_DRV_H__
+
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+
+#include <drm/drm_mm.h>
+#include <media/v4l2-device.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
+
+/* #define APPLE_ISP_DEBUG */
+#define APPLE_ISP_DEVICE_NAME "apple-isp"
+#define APPLE_ISP_CARD_NAME "FaceTime HD Camera"
+
+#define ISP_MAX_CHANNELS      6
+#define ISP_IPC_MESSAGE_SIZE  64
+#define ISP_IPC_FLAG_ACK      0x1
+#define ISP_META_SIZE_T8103      0x4640
+#define ISP_META_SIZE_T8112      0x4840
+
+/* used to limit the user space buffers to the buffer_pool_config */
+#define ISP_MAX_BUFFERS 16
+
+enum isp_generation {
+	ISP_GEN_T8103,
+	ISP_GEN_T8112,
+};
+
+enum isp_firmware_version {
+	ISP_FIRMWARE_V_UNKNOWN,
+	ISP_FIRMWARE_V_12_3,
+	ISP_FIRMWARE_V_12_4,
+	ISP_FIRMWARE_V_13_5,
+};
+
+struct isp_surf {
+	struct drm_mm_node *mm;
+	struct list_head head;
+	u64 size;
+	u64 type;
+	u32 num_pages;
+	struct page **pages;
+	struct sg_table sgt;
+	dma_addr_t iova;
+	void *virt;
+	refcount_t refcount;
+	bool gc;
+	bool submitted;
+};
+
+struct isp_message {
+	u64 arg0;
+	u64 arg1;
+	u64 arg2;
+	u64 arg3;
+	u64 arg4;
+	u64 arg5;
+	u64 arg6;
+	u64 arg7;
+} __packed;
+static_assert(sizeof(struct isp_message) == ISP_IPC_MESSAGE_SIZE);
+
+struct isp_channel {
+	char *name;
+	u32 type;
+	u32 src;
+	u32 num;
+	u64 size;
+	dma_addr_t iova;
+	void *virt;
+	u32 doorbell;
+	u32 cursor;
+	struct mutex lock;
+	struct isp_message req;
+	struct isp_message rsp;
+	const struct isp_chan_ops *ops;
+};
+
+struct coord {
+	u32 x;
+	u32 y;
+};
+
+struct isp_preset {
+	u32 index;
+	struct coord input_dim;
+	struct coord output_dim;
+	struct coord crop_offset;
+	struct coord crop_size;
+};
+
+struct apple_isp_hw {
+	enum isp_generation gen;
+	u64 pmu_base;
+
+	int dsid_count;
+	u64 dsid_clr_base0;
+	u64 dsid_clr_base1;
+	u64 dsid_clr_base2;
+	u64 dsid_clr_base3;
+	u32 dsid_clr_range0;
+	u32 dsid_clr_range1;
+	u32 dsid_clr_range2;
+	u32 dsid_clr_range3;
+
+	u64 clock_scratch;
+	u64 clock_base;
+	u8 clock_bit;
+	u8 clock_size;
+	u64 bandwidth_scratch;
+	u64 bandwidth_base;
+	u8 bandwidth_bit;
+	u8 bandwidth_size;
+
+	u32 meta_size;
+	bool scl1;
+	bool lpdp;
+};
+
+enum isp_sensor_id {
+	ISP_IMX248_1820_01,
+	ISP_IMX248_1822_02,
+	ISP_IMX343_5221_02,
+	ISP_IMX354_9251_02,
+	ISP_IMX356_4820_01,
+	ISP_IMX356_4820_02,
+	ISP_IMX364_8720_01,
+	ISP_IMX364_8723_01,
+	ISP_IMX372_3820_01,
+	ISP_IMX372_3820_02,
+	ISP_IMX372_3820_11,
+	ISP_IMX372_3820_12,
+	ISP_IMX405_9720_01,
+	ISP_IMX405_9721_01,
+	ISP_IMX405_9723_01,
+	ISP_IMX414_2520_01,
+	ISP_IMX503_7820_01,
+	ISP_IMX503_7820_02,
+	ISP_IMX505_3921_01,
+	ISP_IMX514_2820_01,
+	ISP_IMX514_2820_02,
+	ISP_IMX514_2820_03,
+	ISP_IMX514_2820_04,
+	ISP_IMX558_1921_01,
+	ISP_IMX558_1922_02,
+	ISP_IMX603_7920_01,
+	ISP_IMX603_7920_02,
+	ISP_IMX603_7921_01,
+	ISP_IMX613_4920_01,
+	ISP_IMX613_4920_02,
+	ISP_IMX614_2921_01,
+	ISP_IMX614_2921_02,
+	ISP_IMX614_2922_02,
+	ISP_IMX633_3622_01,
+	ISP_IMX703_7721_01,
+	ISP_IMX703_7722_01,
+	ISP_IMX713_4721_01,
+	ISP_IMX713_4722_01,
+	ISP_IMX714_2022_01,
+	ISP_IMX772_3721_01,
+	ISP_IMX772_3721_11,
+	ISP_IMX772_3722_01,
+	ISP_IMX772_3723_01,
+	ISP_IMX814_2123_01,
+	ISP_IMX853_7622_01,
+	ISP_IMX913_7523_01,
+	ISP_VD56G0_6221_01,
+	ISP_VD56G0_6222_01,
+};
+
+struct isp_format {
+	enum isp_sensor_id id;
+	u32 version;
+	struct isp_preset *preset;
+	unsigned int num_planes;
+	u32 strides[VB2_MAX_PLANES];
+	size_t plane_size[VB2_MAX_PLANES];
+	size_t total_size;
+};
+
+struct apple_isp {
+	struct device *dev;
+	const struct apple_isp_hw *hw;
+	enum isp_firmware_version fw_compat;
+	u32 platform_id;
+	u32 temporal_filter;
+	struct isp_preset *presets;
+	int num_presets;
+
+	int num_channels;
+	struct isp_format fmts[ISP_MAX_CHANNELS];
+	unsigned int current_ch;
+
+	struct video_device vdev;
+	struct media_device mdev;
+	struct v4l2_device v4l2_dev;
+	struct vb2_queue vbq;
+	struct mutex video_lock;
+	unsigned int sequence;
+	bool multiplanar;
+
+	int pd_count;
+	struct device **pd_dev;
+	struct device_link **pd_link;
+	bool pds_active;
+
+	int irq;
+
+	void __iomem *coproc;
+	void __iomem *mbox;
+	void __iomem *gpio;
+	void __iomem *mbox2;
+
+	struct iommu_domain *domain;
+	unsigned long shift;
+	struct drm_mm iovad; /* TODO iova.c can't allocate bottom-up */
+	struct mutex iovad_lock;
+
+	struct isp_firmware {
+		u64 heap_top;
+	} fw;
+
+	struct isp_surf *ipc_surf;
+	struct isp_surf *extra_surf;
+	struct isp_surf *data_surf;
+	struct isp_surf *log_surf;
+	struct isp_surf *bt_surf;
+	struct isp_surf *meta_surfs[ISP_MAX_BUFFERS];
+	struct list_head gc;
+	struct workqueue_struct *wq;
+
+	int num_ipc_chans;
+	struct isp_channel **ipc_chans;
+	struct isp_channel *chan_tm; /* TERMINAL */
+	struct isp_channel *chan_io; /* IO */
+	struct isp_channel *chan_dg; /* DEBUG */
+	struct isp_channel *chan_bh; /* BUF_H2T */
+	struct isp_channel *chan_bt; /* BUF_T2H */
+	struct isp_channel *chan_sm; /* SHAREDMALLOC */
+	struct isp_channel *chan_it; /* IO_T2H */
+
+	wait_queue_head_t wait;
+	dma_addr_t cmd_iova;
+	void *cmd_virt;
+
+	unsigned long state;
+	spinlock_t buf_lock;
+	struct list_head bufs_pending;
+	struct list_head bufs_submitted;
+};
+
+struct isp_chan_ops {
+	int (*handle)(struct apple_isp *isp, struct isp_channel *chan);
+};
+
+struct isp_buffer {
+	struct vb2_v4l2_buffer vb;
+	struct list_head link;
+	struct isp_surf surfs[VB2_MAX_PLANES];
+};
+
+#define to_isp_buffer(x) container_of((x), struct isp_buffer, vb)
+
+enum {
+	ISP_STATE_STREAMING,
+	ISP_STATE_LOGGING,
+	ISP_STATE_SLEEPING,
+};
+
+#ifdef APPLE_ISP_DEBUG
+#define isp_dbg(isp, fmt, ...) \
+	dev_info((isp)->dev, "[%s] " fmt, __func__, ##__VA_ARGS__)
+#else
+#define isp_dbg(isp, fmt, ...) \
+	dev_dbg((isp)->dev, "[%s] " fmt, __func__, ##__VA_ARGS__)
+#endif
+
+#define isp_err(isp, fmt, ...) \
+	dev_err((isp)->dev, "[%s] " fmt, __func__, ##__VA_ARGS__)
+
+#define isp_get_format(isp, ch)	    (&(isp)->fmts[(ch)])
+#define isp_get_current_format(isp) (isp_get_format(isp, isp->current_ch))
+
+#endif /* __ISP_DRV_H__ */
diff --git a/drivers/media/platform/apple/isp/isp-fw.c b/drivers/media/platform/apple/isp/isp-fw.c
new file mode 100644
index 00000000000000..a39f5fb4445fa7
--- /dev/null
+++ b/drivers/media/platform/apple/isp/isp-fw.c
@@ -0,0 +1,788 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright 2023 Eileen Yoon <eyn@gmx.com> */
+
+#include "isp-fw.h"
+
+#include <asm/io.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/types.h>
+
+#include "isp-cmd.h"
+#include "isp-iommu.h"
+#include "isp-ipc.h"
+#include "isp-regs.h"
+#include "isp-v4l2.h"
+
+#define ISP_FIRMWARE_MDELAY    1
+#define ISP_FIRMWARE_MAX_TRIES 1000
+
+#define ISP_FIRMWARE_IPC_SIZE  0x1c000
+#define ISP_FIRMWARE_DATA_SIZE 0x28000
+
+#define ISP_COPROC_IN_WFI      0x3
+
+static inline u32 isp_coproc_read32(struct apple_isp *isp, u32 reg)
+{
+	return readl(isp->coproc + reg);
+}
+
+static inline void isp_coproc_write32(struct apple_isp *isp, u32 reg, u32 val)
+{
+	writel(val, isp->coproc + reg);
+}
+
+static inline u32 isp_gpio_read32(struct apple_isp *isp, u32 reg)
+{
+	return readl(isp->gpio + reg);
+}
+
+static inline void isp_gpio_write32(struct apple_isp *isp, u32 reg, u32 val)
+{
+	writel(val, isp->gpio + reg);
+}
+
+static int apple_isp_power_up_domains(struct apple_isp *isp)
+{
+	int ret;
+
+	if (isp->pds_active)
+		return 0;
+
+	for (int i = 1; i < isp->pd_count; i++) {
+		ret = pm_runtime_get_sync(isp->pd_dev[i]);
+		if (ret < 0) {
+			dev_err(isp->dev,
+				"Failed to power up power domain %d: %d\n", i, ret);
+			while (--i != 1)
+				pm_runtime_put_sync(isp->pd_dev[i]);
+			return ret;
+		}
+	}
+
+	isp->pds_active = true;
+
+	return 0;
+}
+
+static void apple_isp_power_down_domains(struct apple_isp *isp)
+{
+	int ret;
+
+	if (!isp->pds_active)
+		return;
+
+	for (int i = isp->pd_count - 1; i >= 1; i--) {
+		ret = pm_runtime_put_sync(isp->pd_dev[i]);
+		if (ret < 0)
+			dev_err(isp->dev,
+				"Failed to power up power domain %d: %d\n", i, ret);
+	}
+
+	isp->pds_active = false;
+}
+
+void *apple_isp_translate(struct apple_isp *isp, struct isp_surf *surf,
+			  dma_addr_t iova, size_t size)
+{
+	dma_addr_t end = iova + size;
+	if (!surf) {
+		dev_err(isp->dev,
+			"Failed to translate IPC iova 0x%llx (0x%zx): No surface\n",
+			(long long)iova, size);
+		return NULL;
+	}
+
+	if (end < iova || iova < surf->iova ||
+	    end > (surf->iova + surf->size)) {
+		dev_err(isp->dev,
+			"Failed to translate IPC iova 0x%llx (0x%zx): Out of bounds\n",
+			(long long)iova, size);
+		return NULL;
+	}
+
+	if (!surf->virt) {
+		dev_err(isp->dev,
+			"Failed to translate IPC iova 0x%llx (0x%zx): No VMap\n",
+			(long long)iova, size);
+		return NULL;
+	}
+
+	return surf->virt + (iova - surf->iova);
+}
+
+struct isp_firmware_bootargs {
+	u32 pad_0[2];
+	u64 ipc_iova;
+	u64 shared_base;
+	u64 shared_size;
+	u64 extra_iova;
+	u64 extra_size;
+	u32 platform_id;
+	u32 pad_40;
+	u64 logbuf_addr;
+	u64 logbuf_size;
+	u64 logbuf_entsize;
+	u32 ipc_size;
+	u32 pad_60[5];
+	u32 unk5;
+	u32 pad_7c[13];
+	u32 pad_b0;
+	u32 unk7;
+	u32 pad_b8[5];
+	u32 unk_iova1;
+	u32 pad_c0[47];
+	u32 unk9;
+} __packed;
+static_assert(sizeof(struct isp_firmware_bootargs) == 0x180);
+
+struct isp_chan_desc {
+	char name[64];
+	u32 type;
+	u32 src;
+	u32 num;
+	u32 pad;
+	u64 iova;
+	u32 padding[0x2a];
+} __packed;
+static_assert(sizeof(struct isp_chan_desc) == 0x100);
+
+static const struct isp_chan_ops tm_ops = {
+	.handle = ipc_tm_handle,
+};
+
+static const struct isp_chan_ops sm_ops = {
+	.handle = ipc_sm_handle,
+};
+
+static const struct isp_chan_ops bt_ops = {
+	.handle = ipc_bt_handle,
+};
+
+static irqreturn_t apple_isp_isr(int irq, void *dev)
+{
+	struct apple_isp *isp = dev;
+
+	isp_mbox2_write32(isp, ISP_MBOX2_IRQ_ACK,
+			 isp_mbox_read32(isp, ISP_MBOX_IRQ_INTERRUPT));
+
+	return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t apple_isp_isr_thread(int irq, void *dev)
+{
+	struct apple_isp *isp = dev;
+
+	wake_up_all(&isp->wait);
+
+	ipc_chan_handle(isp, isp->chan_sm);
+	wake_up_all(&isp->wait); /* Some commands depend on sm */
+
+	ipc_chan_handle(isp, isp->chan_tm);
+
+	ipc_chan_handle(isp, isp->chan_bt);
+	wake_up_all(&isp->wait);
+
+	return IRQ_HANDLED;
+}
+
+static void isp_disable_irq(struct apple_isp *isp)
+{
+	isp_mbox_write32(isp, ISP_MBOX_IRQ_ENABLE, 0x0);
+	free_irq(isp->irq, isp);
+	isp_gpio_write32(isp, ISP_GPIO_1, 0xfeedbabe); /* real funny */
+}
+
+static int isp_enable_irq(struct apple_isp *isp)
+{
+	int err;
+
+	err = request_threaded_irq(isp->irq, apple_isp_isr,
+				   apple_isp_isr_thread, 0, "apple-isp", isp);
+	if (err < 0) {
+		isp_err(isp, "failed to request IRQ#%u (%d)\n", isp->irq, err);
+		return err;
+	}
+
+	isp_dbg(isp, "about to enable interrupts...\n");
+
+	isp_mbox_write32(isp, ISP_MBOX_IRQ_ENABLE, 0xf);
+
+	return 0;
+}
+
+static int isp_reset_coproc(struct apple_isp *isp)
+{
+	int retries;
+	u32 status;
+	u32 val;
+
+	isp_coproc_write32(isp, ISP_COPROC_EDPRCR, 0x2);
+
+	isp_coproc_write32(isp, ISP_COPROC_FABRIC_0, 0xff00ff);
+	isp_coproc_write32(isp, ISP_COPROC_FABRIC_1, 0xff00ff);
+	isp_coproc_write32(isp, ISP_COPROC_FABRIC_2, 0xff00ff);
+	isp_coproc_write32(isp, ISP_COPROC_FABRIC_3, 0xff00ff);
+
+	isp_coproc_write32(isp, ISP_COPROC_IRQ_MASK_0, 0xffffffff);
+	isp_coproc_write32(isp, ISP_COPROC_IRQ_MASK_1, 0xffffffff);
+	isp_coproc_write32(isp, ISP_COPROC_IRQ_MASK_2, 0xffffffff);
+	isp_coproc_write32(isp, ISP_COPROC_IRQ_MASK_3, 0xffffffff);
+	isp_coproc_write32(isp, ISP_COPROC_IRQ_MASK_4, 0xffffffff);
+	isp_coproc_write32(isp, ISP_COPROC_IRQ_MASK_5, 0xffffffff);
+
+	for (retries = 0; retries < 128; retries++) {
+		val = isp_coproc_read32(isp, 0x818);
+		if (val == 0)
+			break;
+	}
+
+	for (retries = 0; retries < 128; retries++) {
+		val = isp_coproc_read32(isp, 0x81c);
+		if (val == 0)
+			break;
+	}
+
+	for (retries = 0; retries < ISP_FIRMWARE_MAX_TRIES; retries++) {
+		status = isp_coproc_read32(isp, ISP_COPROC_STATUS);
+		if (status & ISP_COPROC_IN_WFI) {
+			isp_dbg(isp, "%d: coproc in WFI (status: 0x%x)\n",
+				retries, status);
+			break;
+		}
+		mdelay(ISP_FIRMWARE_MDELAY);
+	}
+	if (retries >= ISP_FIRMWARE_MAX_TRIES) {
+		isp_err(isp, "coproc NOT in WFI (status: 0x%x)\n", status);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static void isp_firmware_shutdown_stage1(struct apple_isp *isp)
+{
+	isp_coproc_write32(isp, ISP_COPROC_CONTROL, 0x0);
+
+	apple_isp_power_down_domains(isp);
+}
+
+static int isp_firmware_boot_stage1(struct apple_isp *isp)
+{
+	int err, retries;
+	// u32 val;
+
+	err = apple_isp_power_up_domains(isp);
+	if (err < 0)
+		return err;
+
+
+	isp_gpio_write32(isp, ISP_GPIO_CLOCK_EN, 0x1);
+
+#if 0
+	/* This doesn't work well with system sleep */
+	val = isp_gpio_read32(isp, ISP_GPIO_1);
+	if (val == 0xfeedbabe) {
+		err = isp_reset_coproc(isp);
+		if (err < 0)
+			return err;
+	}
+#endif
+
+	err = isp_reset_coproc(isp);
+	if (err < 0)
+		return err;
+
+	isp_gpio_write32(isp, ISP_GPIO_0, 0x0);
+	isp_gpio_write32(isp, ISP_GPIO_1, 0x0);
+	isp_gpio_write32(isp, ISP_GPIO_2, 0x0);
+	isp_gpio_write32(isp, ISP_GPIO_3, 0x0);
+	isp_gpio_write32(isp, ISP_GPIO_4, 0x0);
+	isp_gpio_write32(isp, ISP_GPIO_5, 0x0);
+	isp_gpio_write32(isp, ISP_GPIO_6, 0x0);
+	isp_gpio_write32(isp, ISP_GPIO_7, 0x0);
+
+	isp_mbox_write32(isp, ISP_MBOX_IRQ_ENABLE, 0x0);
+
+	isp_coproc_write32(isp, ISP_COPROC_CONTROL, 0x0);
+	isp_coproc_write32(isp, ISP_COPROC_CONTROL, 0x10);
+
+	/* Wait for ISP_GPIO_7 to 0x0 -> 0x8042006 */
+	for (retries = 0; retries < ISP_FIRMWARE_MAX_TRIES; retries++) {
+		u32 val = isp_gpio_read32(isp, ISP_GPIO_7);
+		if (val == 0x8042006) {
+			isp_dbg(isp,
+				"got first magic number (0x%x) from firmware\n",
+				val);
+			break;
+		}
+		mdelay(ISP_FIRMWARE_MDELAY);
+	}
+	if (retries >= ISP_FIRMWARE_MAX_TRIES) {
+		isp_err(isp,
+			"never received first magic number from firmware\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+int apple_isp_alloc_firmware_surface(struct apple_isp *isp)
+{
+	/* These are static, so let's do it once and for all */
+	isp->ipc_surf = isp_alloc_surface_vmap(isp, ISP_FIRMWARE_IPC_SIZE);
+	if (!isp->ipc_surf) {
+		isp_err(isp, "failed to alloc shared surface for ipc\n");
+		return -ENOMEM;
+	}
+	dev_info(isp->dev, "IPC surface iova: 0x%llx\n",
+		 (long long)isp->ipc_surf->iova);
+
+	isp->data_surf = isp_alloc_surface_vmap(isp, ISP_FIRMWARE_DATA_SIZE);
+	if (!isp->data_surf) {
+		isp_err(isp, "failed to alloc shared surface for data files\n");
+		isp_free_surface(isp, isp->ipc_surf);
+		return -ENOMEM;
+	}
+	dev_info(isp->dev, "Data surface iova: 0x%llx\n",
+		 (long long)isp->data_surf->iova);
+
+	return 0;
+}
+
+void apple_isp_free_firmware_surface(struct apple_isp *isp)
+{
+	isp_free_surface(isp, isp->data_surf);
+	isp_free_surface(isp, isp->ipc_surf);
+}
+
+static void isp_firmware_shutdown_stage2(struct apple_isp *isp)
+{
+	isp_free_surface(isp, isp->extra_surf);
+}
+
+static int isp_firmware_boot_stage2(struct apple_isp *isp)
+{
+	struct isp_firmware_bootargs args;
+	dma_addr_t args_iova;
+	void *args_virt;
+	int err, retries;
+
+	u32 num_ipc_chans = isp_gpio_read32(isp, ISP_GPIO_0);
+	u32 args_offset = isp_gpio_read32(isp, ISP_GPIO_1);
+	u32 extra_size = isp_gpio_read32(isp, ISP_GPIO_3);
+	isp->num_ipc_chans = num_ipc_chans;
+
+	if (!isp->num_ipc_chans) {
+		dev_err(isp->dev, "No IPC channels found\n");
+		return -ENODEV;
+	}
+
+	if (isp->num_ipc_chans != 7)
+		dev_warn(isp->dev, "unexpected channel count (%d)\n",
+			 num_ipc_chans);
+
+	isp->extra_surf = isp_alloc_surface_vmap(isp, extra_size);
+	if (!isp->extra_surf) {
+		isp_err(isp, "failed to alloc surface for extra heap\n");
+		return -ENOMEM;
+	}
+
+	args_iova = isp->ipc_surf->iova + args_offset + 0x40;
+	args_virt = isp->ipc_surf->virt + args_offset + 0x40;
+	isp->cmd_iova = args_iova + sizeof(args) + 0x40;
+	isp->cmd_virt = args_virt + sizeof(args) + 0x40;
+
+	memset(&args, 0, sizeof(args));
+	args.ipc_iova = isp->ipc_surf->iova;
+	args.ipc_size = isp->ipc_surf->size;
+	args.shared_base = isp->fw.heap_top & 0xffffffff;
+	args.shared_size = 0x10000000UL - args.shared_base;
+	args.extra_iova = isp->extra_surf->iova;
+	args.extra_size = isp->extra_surf->size;
+	args.platform_id = isp->platform_id;
+	args.unk5 = 0x40;
+	args.unk7 = 0x1; // 0?
+	args.unk_iova1 = args_iova + sizeof(args) - 0xc;
+	args.unk9 = 0x3;
+	memcpy(args_virt, &args, sizeof(args));
+
+	isp_gpio_write32(isp, ISP_GPIO_0, args_iova);
+	isp_gpio_write32(isp, ISP_GPIO_1, args_iova >> 32);
+	dma_wmb();
+
+	/* Wait for ISP_GPIO_7 to 0xf7fbdff9 -> 0x8042006 */
+	isp_gpio_write32(isp, ISP_GPIO_7, 0xf7fbdff9);
+
+	for (retries = 0; retries < ISP_FIRMWARE_MAX_TRIES; retries++) {
+		u32 val = isp_gpio_read32(isp, ISP_GPIO_7);
+		if (val == 0x8042006) {
+			isp_dbg(isp,
+				"got second magic number (0x%x) from firmware\n",
+				val);
+			break;
+		}
+		mdelay(ISP_FIRMWARE_MDELAY);
+	}
+	if (retries >= ISP_FIRMWARE_MAX_TRIES) {
+		isp_err(isp,
+			"never received second magic number from firmware\n");
+		err = -ENODEV;
+		goto free_extra;
+	}
+
+	return 0;
+
+free_extra:
+	isp_free_surface(isp, isp->extra_surf);
+	return err;
+}
+
+static inline struct isp_channel *isp_get_chan_index(struct apple_isp *isp,
+						     const char *name)
+{
+	for (int i = 0; i < isp->num_ipc_chans; i++) {
+		if (!strcasecmp(isp->ipc_chans[i]->name, name))
+			return isp->ipc_chans[i];
+	}
+	return NULL;
+}
+
+static void isp_free_channel_info(struct apple_isp *isp)
+{
+	for (int i = 0; i < isp->num_ipc_chans; i++) {
+		struct isp_channel *chan = isp->ipc_chans[i];
+		if (!chan)
+			continue;
+		kfree(chan->name);
+		kfree(chan);
+		isp->ipc_chans[i] = NULL;
+	}
+	kfree(isp->ipc_chans);
+	isp->ipc_chans = NULL;
+}
+
+static int isp_fill_channel_info(struct apple_isp *isp)
+{
+	u64 table_iova = isp_gpio_read32(isp, ISP_GPIO_0) |
+			 ((u64)isp_gpio_read32(isp, ISP_GPIO_1)) << 32;
+	void *table_virt = apple_isp_ipc_translate(
+		isp, table_iova,
+		sizeof(struct isp_chan_desc) * isp->num_ipc_chans);
+
+	if (!table_virt) {
+		dev_err(isp->dev, "Failed to find channel table\n");
+		return -EIO;
+	}
+
+	isp->ipc_chans = kcalloc(isp->num_ipc_chans,
+				 sizeof(struct isp_channel *), GFP_KERNEL);
+	if (!isp->ipc_chans)
+		goto out;
+
+	for (int i = 0; i < isp->num_ipc_chans; i++) {
+		struct isp_chan_desc desc;
+		void *desc_virt = table_virt + (i * sizeof(desc));
+		struct isp_channel *chan =
+			kzalloc(sizeof(struct isp_channel), GFP_KERNEL);
+		if (!chan)
+			goto out;
+		isp->ipc_chans[i] = chan;
+
+		memcpy(&desc, desc_virt, sizeof(desc));
+		chan->name = kstrdup(desc.name, GFP_KERNEL);
+		chan->type = desc.type;
+		chan->src = desc.src;
+		chan->doorbell = 1 << chan->src;
+		chan->num = desc.num;
+		chan->size = desc.num * ISP_IPC_MESSAGE_SIZE;
+		chan->iova = desc.iova;
+		chan->virt =
+			apple_isp_ipc_translate(isp, desc.iova, chan->size);
+		chan->cursor = 0;
+		mutex_init(&chan->lock);
+
+		if (!chan->virt) {
+			dev_err(isp->dev, "Failed to find channel buffer\n");
+			goto out;
+		}
+
+		if ((chan->type != ISP_IPC_CHAN_TYPE_COMMAND) &&
+		    (chan->type != ISP_IPC_CHAN_TYPE_REPLY) &&
+		    (chan->type != ISP_IPC_CHAN_TYPE_REPORT)) {
+			isp_err(isp, "invalid ipc chan type (%d)\n",
+				chan->type);
+			goto out;
+		}
+
+		isp_dbg(isp, "chan: %s type: %d src: %d num: %d iova: 0x%llx\n",
+			chan->name, chan->type, chan->src, chan->num,
+			chan->iova);
+	}
+
+	isp->chan_tm = isp_get_chan_index(isp, "TERMINAL");
+	isp->chan_io = isp_get_chan_index(isp, "IO");
+	isp->chan_dg = isp_get_chan_index(isp, "DEBUG");
+	isp->chan_bh = isp_get_chan_index(isp, "BUF_H2T");
+	isp->chan_bt = isp_get_chan_index(isp, "BUF_T2H");
+	isp->chan_sm = isp_get_chan_index(isp, "SHAREDMALLOC");
+	isp->chan_it = isp_get_chan_index(isp, "IO_T2H");
+
+	if (!isp->chan_tm || !isp->chan_io || !isp->chan_dg || !isp->chan_bh ||
+	    !isp->chan_bt || !isp->chan_sm || !isp->chan_it) {
+		isp_err(isp, "did not find all of the required ipc chans\n");
+		goto out;
+	}
+
+	isp->chan_tm->ops = &tm_ops;
+	isp->chan_sm->ops = &sm_ops;
+	isp->chan_bt->ops = &bt_ops;
+
+	return 0;
+out:
+	isp_free_channel_info(isp);
+	return -ENOMEM;
+}
+
+static void isp_firmware_shutdown_stage3(struct apple_isp *isp)
+{
+	isp_free_channel_info(isp);
+}
+
+static int isp_firmware_boot_stage3(struct apple_isp *isp)
+{
+	int err, retries;
+
+	err = isp_fill_channel_info(isp);
+	if (err < 0)
+		return err;
+
+	/* Mask the command channels to prepare for submission */
+	for (int i = 0; i < isp->num_ipc_chans; i++) {
+		struct isp_channel *chan = isp->ipc_chans[i];
+		if (chan->type != ISP_IPC_CHAN_TYPE_COMMAND)
+			continue;
+		for (int j = 0; j < chan->num; j++) {
+			struct isp_message msg;
+			void *msg_virt = chan->virt + (j * sizeof(msg));
+
+			memset(&msg, 0, sizeof(msg));
+			msg.arg0 = ISP_IPC_FLAG_ACK;
+			memcpy(msg_virt, &msg, sizeof(msg));
+		}
+	}
+	dma_wmb();
+
+	/* Wait for ISP_GPIO_3 to 0x8042006 -> 0x0 */
+	isp_gpio_write32(isp, ISP_GPIO_3, 0x8042006);
+
+	for (retries = 0; retries < ISP_FIRMWARE_MAX_TRIES; retries++) {
+		u32 val = isp_gpio_read32(isp, ISP_GPIO_3);
+		if (val == 0x0) {
+			isp_dbg(isp,
+				"got third magic number (0x%x) from firmware\n",
+				val);
+			break;
+		}
+		mdelay(ISP_FIRMWARE_MDELAY);
+	}
+	if (retries >= ISP_FIRMWARE_MAX_TRIES) {
+		isp_err(isp,
+			"never received third magic number from firmware\n");
+		isp_free_channel_info(isp);
+		return -ENODEV;
+	}
+
+	isp_dbg(isp, "firmware booted!\n");
+
+	return 0;
+}
+
+static int isp_stop_command_processor(struct apple_isp *isp)
+{
+	int retries;
+
+#if 0
+	int res = isp_cmd_stop(isp, 0);
+	if (res) {
+		isp_err(isp, "isp_cmd_stop() failed\n");
+		return res;
+	}
+
+	/* Wait for ISP_GPIO_0 to 0xf7fbdff9 -> 0x8042006 */
+	isp_gpio_write32(isp, ISP_GPIO_0, 0xf7fbdff9);
+
+	isp_cmd_power_down(isp);
+#else
+	isp_gpio_write32(isp, ISP_GPIO_0, 0xf7fbdff9);
+
+	int res = isp_cmd_suspend(isp);
+	if (res) {
+		isp_err(isp, "isp_cmd_suspend() failed\n");
+		return res;
+	}
+#endif
+
+	for (retries = 0; retries < ISP_FIRMWARE_MAX_TRIES; retries++) {
+		u32 val = isp_gpio_read32(isp, ISP_GPIO_0);
+		if (val == 0x8042006) {
+			isp_dbg(isp, "got magic number (0x%x) from firmware\n",
+				val);
+			break;
+		}
+		mdelay(ISP_FIRMWARE_MDELAY);
+	}
+	if (retries >= ISP_FIRMWARE_MAX_TRIES) {
+		isp_err(isp, "never received magic number from firmware\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static int isp_start_command_processor(struct apple_isp *isp)
+{
+	int err;
+
+	err = isp_cmd_print_enable(isp, 1);
+	if (err)
+		return err;
+
+	err = isp_cmd_set_isp_pmu_base(isp, isp->hw->pmu_base);
+	if (err)
+		return err;
+
+	if (isp->hw->dsid_count == 1) {
+		err = isp_cmd_set_dsid_clr_req_base(
+			isp, isp->hw->dsid_clr_base0, isp->hw->dsid_clr_range0);
+		if (err)
+			return err;
+	} else {
+		err = isp_cmd_set_dsid_clr_req_base2(
+			isp, isp->hw->dsid_clr_base0, isp->hw->dsid_clr_base1,
+			isp->hw->dsid_clr_base2, isp->hw->dsid_clr_base3,
+			isp->hw->dsid_clr_range0, isp->hw->dsid_clr_range1,
+			isp->hw->dsid_clr_range2, isp->hw->dsid_clr_range3);
+		if (err)
+			return err;
+	}
+
+	err = isp_cmd_pmp_ctrl_set(
+		isp, isp->hw->clock_scratch, isp->hw->clock_base,
+		isp->hw->clock_bit, isp->hw->clock_size,
+		isp->hw->bandwidth_scratch, isp->hw->bandwidth_base,
+		isp->hw->bandwidth_bit, isp->hw->bandwidth_size);
+	if (err)
+		return err;
+
+	err = isp_cmd_start(isp, 0);
+	if (err)
+		return err;
+
+	/* Now we can access CISP_CMD_CH_* commands */
+
+	return 0;
+}
+
+static void isp_collect_gc_surface(struct apple_isp *isp)
+{
+	struct isp_surf *tmp, *surf;
+
+	isp->log_surf = NULL;
+	isp->bt_surf = NULL;
+
+	list_for_each_entry_safe_reverse(surf, tmp, &isp->gc, head) {
+		isp_dbg(isp, "freeing iova: 0x%llx size: 0x%llx virt: %pS\n",
+			surf->iova, surf->size, (void *)surf->virt);
+		isp_free_surface(isp, surf);
+	}
+}
+
+static int isp_firmware_boot(struct apple_isp *isp)
+{
+	int err;
+
+	err = isp_firmware_boot_stage1(isp);
+	if (err < 0) {
+		isp_err(isp, "failed firmware boot stage 1: %d\n", err);
+		goto garbage_collect;
+	}
+
+	err = isp_firmware_boot_stage2(isp);
+	if (err < 0) {
+		isp_err(isp, "failed firmware boot stage 2: %d\n", err);
+		goto shutdown_stage1;
+	}
+
+	err = isp_firmware_boot_stage3(isp);
+	if (err < 0) {
+		isp_err(isp, "failed firmware boot stage 3: %d\n", err);
+		goto shutdown_stage2;
+	}
+
+	err = isp_enable_irq(isp);
+	if (err < 0) {
+		isp_err(isp, "failed to enable interrupts: %d\n", err);
+		goto shutdown_stage3;
+	}
+
+	err = isp_start_command_processor(isp);
+	if (err < 0) {
+		isp_err(isp, "failed to start command processor: %d\n", err);
+		goto disable_irqs;
+	}
+
+	flush_workqueue(isp->wq);
+
+	return 0;
+
+disable_irqs:
+	isp_disable_irq(isp);
+shutdown_stage3:
+	isp_firmware_shutdown_stage3(isp);
+shutdown_stage2:
+	isp_firmware_shutdown_stage2(isp);
+shutdown_stage1:
+	isp_firmware_shutdown_stage1(isp);
+garbage_collect:
+	isp_collect_gc_surface(isp);
+	return err;
+}
+
+static void isp_firmware_shutdown(struct apple_isp *isp)
+{
+	flush_workqueue(isp->wq);
+	isp_stop_command_processor(isp);
+	isp_disable_irq(isp);
+	isp_firmware_shutdown_stage3(isp);
+	isp_firmware_shutdown_stage2(isp);
+	isp_firmware_shutdown_stage1(isp);
+	isp_collect_gc_surface(isp);
+}
+
+int apple_isp_firmware_boot(struct apple_isp *isp)
+{
+	int err;
+
+	/* Needs to be power cycled for IOMMU to behave correctly */
+	err = pm_runtime_resume_and_get(isp->dev);
+	if (err < 0) {
+		dev_err(isp->dev, "failed to enable power: %d\n", err);
+		return err;
+	}
+
+	err = isp_firmware_boot(isp);
+	if (err) {
+		dev_err(isp->dev, "failed to boot firmware: %d\n", err);
+		pm_runtime_put_sync(isp->dev);
+		return err;
+	}
+
+	return 0;
+}
+
+void apple_isp_firmware_shutdown(struct apple_isp *isp)
+{
+	isp_firmware_shutdown(isp);
+	pm_runtime_put_sync(isp->dev);
+}
diff --git a/drivers/media/platform/apple/isp/isp-fw.h b/drivers/media/platform/apple/isp/isp-fw.h
new file mode 100644
index 00000000000000..974216f0989f91
--- /dev/null
+++ b/drivers/media/platform/apple/isp/isp-fw.h
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright 2023 Eileen Yoon <eyn@gmx.com> */
+
+#ifndef __ISP_FW_H__
+#define __ISP_FW_H__
+
+#include "isp-drv.h"
+
+int apple_isp_alloc_firmware_surface(struct apple_isp *isp);
+void apple_isp_free_firmware_surface(struct apple_isp *isp);
+
+int apple_isp_firmware_boot(struct apple_isp *isp);
+void apple_isp_firmware_shutdown(struct apple_isp *isp);
+
+void *apple_isp_translate(struct apple_isp *isp, struct isp_surf *surf,
+			  dma_addr_t iova, size_t size);
+
+static inline void *apple_isp_ipc_translate(struct apple_isp *isp,
+					    dma_addr_t iova, size_t size)
+{
+	return apple_isp_translate(isp, isp->ipc_surf, iova, size);
+}
+
+#endif /* __ISP_FW_H__ */
diff --git a/drivers/media/platform/apple/isp/isp-iommu.c b/drivers/media/platform/apple/isp/isp-iommu.c
new file mode 100644
index 00000000000000..1ddd089d77355a
--- /dev/null
+++ b/drivers/media/platform/apple/isp/isp-iommu.c
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright 2023 Eileen Yoon <eyn@gmx.com> */
+
+#include <linux/iommu.h>
+#include <linux/vmalloc.h>
+
+#include "isp-iommu.h"
+
+static void isp_surf_free_pages(struct isp_surf *surf)
+{
+	for (u32 i = 0; i < surf->num_pages && surf->pages[i] != NULL; i++) {
+		__free_page(surf->pages[i]);
+	}
+	kvfree(surf->pages);
+}
+
+static int isp_surf_alloc_pages(struct isp_surf *surf)
+{
+	surf->pages = kvmalloc_array(surf->num_pages, sizeof(*surf->pages),
+				     GFP_KERNEL);
+	if (!surf->pages)
+		return -ENOMEM;
+
+	for (u32 i = 0; i < surf->num_pages; i++) {
+		surf->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
+		if (surf->pages[i] == NULL)
+			goto free_pages;
+	}
+
+	return 0;
+
+free_pages:
+	isp_surf_free_pages(surf);
+	return -ENOMEM;
+}
+
+int isp_surf_vmap(struct apple_isp *isp, struct isp_surf *surf)
+{
+	surf->virt = vmap(surf->pages, surf->num_pages, VM_MAP,
+			  pgprot_writecombine(PAGE_KERNEL));
+	if (surf->virt == NULL) {
+		dev_err(isp->dev, "failed to vmap size 0x%llx\n", surf->size);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void isp_surf_vunmap(struct apple_isp *isp, struct isp_surf *surf)
+{
+	if (surf->virt)
+		vunmap(surf->virt);
+	surf->virt = NULL;
+}
+
+static void isp_surf_unreserve_iova(struct apple_isp *isp,
+				    struct isp_surf *surf)
+{
+	if (surf->mm) {
+		mutex_lock(&isp->iovad_lock);
+		drm_mm_remove_node(surf->mm);
+		mutex_unlock(&isp->iovad_lock);
+		kfree(surf->mm);
+	}
+	surf->mm = NULL;
+}
+
+static int isp_surf_reserve_iova(struct apple_isp *isp, struct isp_surf *surf)
+{
+	int err;
+
+	surf->mm = kzalloc(sizeof(*surf->mm), GFP_KERNEL);
+	if (!surf->mm)
+		return -ENOMEM;
+
+	mutex_lock(&isp->iovad_lock);
+	err = drm_mm_insert_node_generic(&isp->iovad, surf->mm,
+					 ALIGN(surf->size, 1UL << isp->shift),
+					 1UL << isp->shift, 0, 0);
+	mutex_unlock(&isp->iovad_lock);
+	if (err < 0) {
+		dev_err(isp->dev, "failed to reserve 0x%llx of iova space\n",
+			surf->size);
+		goto mm_free;
+	}
+
+	surf->iova = surf->mm->start;
+
+	return 0;
+mm_free:
+	kfree(surf->mm);
+	surf->mm = NULL;
+	return err;
+}
+
+static void isp_surf_iommu_unmap(struct apple_isp *isp, struct isp_surf *surf)
+{
+	iommu_unmap(isp->domain, surf->iova, surf->size);
+	sg_free_table(&surf->sgt);
+}
+
+static int isp_surf_iommu_map(struct apple_isp *isp, struct isp_surf *surf)
+{
+	unsigned long size;
+	int err;
+
+	err = sg_alloc_table_from_pages(&surf->sgt, surf->pages,
+					surf->num_pages, 0, surf->size,
+					GFP_KERNEL);
+	if (err < 0) {
+		dev_err(isp->dev, "failed to alloc sgt from pages\n");
+		return err;
+	}
+
+	size = iommu_map_sgtable(isp->domain, surf->iova, &surf->sgt,
+				 IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
+	if (size < surf->size) {
+		dev_err(isp->dev, "failed to iommu_map sgt to iova 0x%llx\n",
+			surf->iova);
+		sg_free_table(&surf->sgt);
+		return -ENXIO;
+	}
+
+	return 0;
+}
+
+static void __isp_surf_init(struct apple_isp *isp, struct isp_surf *surf,
+			    u64 size, bool gc)
+{
+	surf->mm = NULL;
+	surf->virt = NULL;
+	surf->size = ALIGN(size, 1UL << isp->shift);
+	surf->num_pages = surf->size >> isp->shift;
+	surf->gc = gc;
+}
+
+struct isp_surf *__isp_alloc_surface(struct apple_isp *isp, u64 size, bool gc)
+{
+	int err;
+
+	struct isp_surf *surf = kzalloc(sizeof(struct isp_surf), GFP_KERNEL);
+	if (!surf)
+		return NULL;
+
+	__isp_surf_init(isp, surf, size, gc);
+
+	err = isp_surf_alloc_pages(surf);
+	if (err < 0) {
+		dev_err(isp->dev, "failed to allocate %d pages\n",
+			surf->num_pages);
+		goto free_surf;
+	}
+
+	err = isp_surf_reserve_iova(isp, surf);
+	if (err < 0) {
+		dev_err(isp->dev, "failed to reserve 0x%llx of iova space\n",
+			surf->size);
+		goto free_pages;
+	}
+
+	err = isp_surf_iommu_map(isp, surf);
+	if (err < 0) {
+		dev_err(isp->dev,
+			"failed to iommu_map size 0x%llx to iova 0x%llx\n",
+			surf->size, surf->iova);
+		goto unreserve_iova;
+	}
+
+	refcount_set(&surf->refcount, 1);
+	if (surf->gc)
+		list_add_tail(&surf->head, &isp->gc);
+
+	return surf;
+
+unreserve_iova:
+	isp_surf_unreserve_iova(isp, surf);
+free_pages:
+	isp_surf_free_pages(surf);
+free_surf:
+	kfree(surf);
+	return NULL;
+}
+
+struct isp_surf *isp_alloc_surface_vmap(struct apple_isp *isp, u64 size)
+{
+	int err;
+
+	struct isp_surf *surf = __isp_alloc_surface(isp, size, false);
+	if (!surf)
+		return NULL;
+
+	err = isp_surf_vmap(isp, surf);
+	if (err < 0) {
+		dev_err(isp->dev, "failed to vmap iova 0x%llx - 0x%llx\n",
+			surf->iova, surf->iova + surf->size);
+		isp_free_surface(isp, surf);
+		return NULL;
+	}
+
+	return surf;
+}
+
+void isp_free_surface(struct apple_isp *isp, struct isp_surf *surf)
+{
+	if (refcount_dec_and_test(&surf->refcount)) {
+		isp_surf_vunmap(isp, surf);
+		isp_surf_iommu_unmap(isp, surf);
+		isp_surf_unreserve_iova(isp, surf);
+		isp_surf_free_pages(surf);
+		if (surf->gc)
+			list_del(&surf->head);
+		kfree(surf);
+	}
+}
+
+int apple_isp_iommu_map_sgt(struct apple_isp *isp, struct isp_surf *surf,
+			    struct sg_table *sgt, u64 size)
+{
+	int err;
+	ssize_t mapped;
+
+	// TODO userptr sends unaligned sizes
+	surf->mm = NULL;
+	surf->size = size;
+
+	err = isp_surf_reserve_iova(isp, surf);
+	if (err < 0) {
+		dev_err(isp->dev, "failed to reserve 0x%llx of iova space\n",
+			surf->size);
+		return err;
+	}
+
+	mapped = iommu_map_sgtable(isp->domain, surf->iova, sgt,
+				   IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
+	if (mapped < surf->size) {
+		dev_err(isp->dev, "failed to iommu_map sgt to iova 0x%llx\n",
+			surf->iova);
+		isp_surf_unreserve_iova(isp, surf);
+		return -ENXIO;
+	}
+	surf->size = mapped;
+
+	return 0;
+}
+
+void apple_isp_iommu_unmap_sgt(struct apple_isp *isp, struct isp_surf *surf)
+{
+	iommu_unmap(isp->domain, surf->iova, surf->size);
+	isp_surf_unreserve_iova(isp, surf);
+}
diff --git a/drivers/media/platform/apple/isp/isp-iommu.h b/drivers/media/platform/apple/isp/isp-iommu.h
new file mode 100644
index 00000000000000..b99a182e284b72
--- /dev/null
+++ b/drivers/media/platform/apple/isp/isp-iommu.h
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright 2023 Eileen Yoon <eyn@gmx.com> */
+
+#ifndef __ISP_IOMMU_H__
+#define __ISP_IOMMU_H__
+
+#include "isp-drv.h"
+
+struct isp_surf *__isp_alloc_surface(struct apple_isp *isp, u64 size, bool gc);
+#define isp_alloc_surface(isp, size)	(__isp_alloc_surface(isp, size, false))
+#define isp_alloc_surface_gc(isp, size) (__isp_alloc_surface(isp, size, true))
+struct isp_surf *isp_alloc_surface_vmap(struct apple_isp *isp, u64 size);
+int isp_surf_vmap(struct apple_isp *isp, struct isp_surf *surf);
+void isp_free_surface(struct apple_isp *isp, struct isp_surf *surf);
+
+int apple_isp_iommu_map_sgt(struct apple_isp *isp, struct isp_surf *surf,
+			    struct sg_table *sgt, u64 size);
+void apple_isp_iommu_unmap_sgt(struct apple_isp *isp, struct isp_surf *surf);
+
+#endif /* __ISP_IOMMU_H__ */
diff --git a/drivers/media/platform/apple/isp/isp-ipc.c b/drivers/media/platform/apple/isp/isp-ipc.c
new file mode 100644
index 00000000000000..7300eb60892116
--- /dev/null
+++ b/drivers/media/platform/apple/isp/isp-ipc.c
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright 2023 Eileen Yoon <eyn@gmx.com> */
+
+#include "isp-iommu.h"
+#include "isp-ipc.h"
+#include "isp-regs.h"
+#include "isp-fw.h"
+
+#define ISP_IPC_FLAG_TERMINAL_ACK	0x3
+#define ISP_IPC_BUFEXC_STAT_META_OFFSET 0x10
+
+struct isp_sm_deferred_work {
+	struct work_struct work;
+	struct apple_isp *isp;
+	struct isp_surf *surf;
+};
+
+struct isp_bufexc_stat {
+	u64 unk_0; // 2
+	u64 unk_8; // 2
+
+	u64 meta_iova;
+	u64 pad_20[3];
+	u64 meta_size; // 0x4640
+	u64 unk_38;
+
+	u32 unk_40; // 1
+	u32 unk_44;
+	u64 unk_48;
+
+	u64 iova0;
+	u64 iova1;
+	u64 iova2;
+	u64 iova3;
+	u32 pad_70[4];
+
+	u32 unk_80; // 2
+	u32 unk_84; // 1
+	u32 unk_88; // 0x10 || 0x13
+	u32 unk_8c;
+	u32 pad_90[96];
+
+	u32 unk_210; // 0x28
+	u32 unk_214;
+	u32 index;
+	u16 bes_width; // 1296, 0x510
+	u16 bes_height; // 736, 0x2e0
+
+	u32 unk_220; // 0x0 || 0x1
+	u32 pad_224[3];
+	u32 unk_230; // 0xf7ed38
+	u32 unk_234; // 3
+	u32 pad_238[2];
+	u32 pad_240[16];
+} __packed;
+static_assert(sizeof(struct isp_bufexc_stat) == ISP_IPC_BUFEXC_STAT_SIZE);
+
+static inline void *chan_msg_virt(struct isp_channel *chan, u32 index)
+{
+	return chan->virt + (index * ISP_IPC_MESSAGE_SIZE);
+}
+
+static inline void chan_read_msg_index(struct apple_isp *isp,
+				       struct isp_channel *chan,
+				       struct isp_message *msg, u32 index)
+{
+	memcpy(msg, chan_msg_virt(chan, index), sizeof(*msg));
+}
+
+static inline void chan_read_msg(struct apple_isp *isp,
+				 struct isp_channel *chan,
+				 struct isp_message *msg)
+{
+	chan_read_msg_index(isp, chan, msg, chan->cursor);
+}
+
+static inline void chan_write_msg_index(struct apple_isp *isp,
+					struct isp_channel *chan,
+					struct isp_message *msg, u32 index)
+{
+	u64 *p0 = chan_msg_virt(chan, index);
+	memcpy(p0 + 1, &msg->arg1, sizeof(*msg) - 8);
+
+	/* Make sure we write arg0 last, since that indicates message validity. */
+
+	dma_wmb();
+	*p0 = msg->arg0;
+	dma_wmb();
+}
+
+static inline void chan_write_msg(struct apple_isp *isp,
+				  struct isp_channel *chan,
+				  struct isp_message *msg)
+{
+	chan_write_msg_index(isp, chan, msg, chan->cursor);
+}
+
+static inline void chan_update_cursor(struct isp_channel *chan)
+{
+	if (chan->cursor >= (chan->num - 1)) {
+		chan->cursor = 0;
+	} else {
+		chan->cursor += 1;
+	}
+}
+
+static int chan_handle_once(struct apple_isp *isp, struct isp_channel *chan)
+{
+	int err;
+
+	lockdep_assert_held(&chan->lock);
+
+	err = chan->ops->handle(isp, chan);
+	if (err < 0) {
+		dev_err(isp->dev, "%s: handler failed: %d)\n", chan->name, err);
+		return err;
+	}
+
+	chan_write_msg(isp, chan, &chan->rsp);
+
+	isp_mbox2_write32(isp, ISP_MBOX2_IRQ_DOORBELL, chan->doorbell);
+
+	chan_update_cursor(chan);
+
+	return 0;
+}
+
+static inline bool chan_rx_done(struct apple_isp *isp, struct isp_channel *chan)
+{
+	if (((chan->req.arg0 & 0xf) == ISP_IPC_FLAG_ACK) ||
+	    ((chan->req.arg0 & 0xf) == ISP_IPC_FLAG_TERMINAL_ACK)) {
+		return true;
+	}
+	return false;
+}
+
+int ipc_chan_handle(struct apple_isp *isp, struct isp_channel *chan)
+{
+	int err = 0;
+
+	mutex_lock(&chan->lock);
+	while (1) {
+		chan_read_msg(isp, chan, &chan->req);
+		if (chan_rx_done(isp, chan)) {
+			err = 0;
+			break;
+		}
+		err = chan_handle_once(isp, chan);
+		if (err < 0) {
+			break;
+		}
+	}
+	mutex_unlock(&chan->lock);
+
+	return err;
+}
+
+static inline bool chan_tx_done(struct apple_isp *isp, struct isp_channel *chan)
+{
+	dma_rmb();
+
+	chan_read_msg(isp, chan, &chan->rsp);
+	if ((chan->rsp.arg0) == (chan->req.arg0 | ISP_IPC_FLAG_ACK)) {
+		chan_update_cursor(chan);
+		return true;
+	}
+	return false;
+}
+
+int ipc_chan_send(struct apple_isp *isp, struct isp_channel *chan,
+		  unsigned long timeout)
+{
+	long t;
+
+	chan_write_msg(isp, chan, &chan->req);
+	dma_wmb();
+
+	isp_mbox2_write32(isp, ISP_MBOX2_IRQ_DOORBELL, chan->doorbell);
+
+	if (!timeout)
+		return 0;
+
+	t = wait_event_timeout(isp->wait, chan_tx_done(isp, chan), timeout);
+	if (t == 0) {
+		dev_err(isp->dev,
+			"%s: timed out on request [0x%llx, 0x%llx, 0x%llx]\n",
+			chan->name, chan->req.arg0, chan->req.arg1,
+			chan->req.arg2);
+		return -ETIME;
+	}
+
+	isp_dbg(isp, "%s: request success (%ld)\n", chan->name, t);
+
+	return 0;
+}
+
+int ipc_tm_handle(struct apple_isp *isp, struct isp_channel *chan)
+{
+	struct isp_message *rsp = &chan->rsp;
+
+#ifdef APPLE_ISP_DEBUG
+	struct isp_message *req = &chan->req;
+	char buf[512];
+	dma_addr_t iova = req->arg0 & ~ISP_IPC_FLAG_TERMINAL_ACK;
+	u32 size = req->arg1;
+	if (iova && size && size < sizeof(buf) &&
+	    isp->log_surf) {
+		void *p = apple_isp_translate(isp, isp->log_surf, iova, size);
+		if (p) {
+			size = min_t(u32, size, 512);
+			memcpy(buf, p, size);
+			isp_dbg(isp, "ISPASC: %.*s", size, buf);
+		}
+	}
+#endif
+
+	rsp->arg0 = ISP_IPC_FLAG_ACK;
+	rsp->arg1 = 0x0;
+	rsp->arg2 = 0x0;
+
+	return 0;
+}
+
+int ipc_sm_handle(struct apple_isp *isp, struct isp_channel *chan)
+{
+	struct isp_message *req = &chan->req, *rsp = &chan->rsp;
+	int err;
+
+	if (req->arg0 == 0x0) {
+		struct isp_sm_deferred_work *dwork;
+		struct isp_surf *surf;
+
+		surf = isp_alloc_surface_gc(isp, req->arg1);
+		if (!surf) {
+			isp_err(isp, "failed to alloc requested size 0x%llx\n",
+				req->arg1);
+			kfree(dwork);
+			return -ENOMEM;
+		}
+		surf->type = req->arg2;
+
+		rsp->arg0 = surf->iova | ISP_IPC_FLAG_ACK;
+		rsp->arg1 = 0x0;
+		rsp->arg2 = 0x0; /* macOS uses this to index surfaces */
+
+		switch (surf->type) {
+		case 0x4c4f47: /* "LOG" */
+			isp->log_surf = surf;
+			break;
+		case 0x4d495343: /* "MISC" */
+			/* Hacky... maybe there's a better way to identify this surface? */
+			if (surf->size == 0xc000)
+				isp->bt_surf = surf;
+			break;
+		default:
+			// skip vmap
+			return 0;
+		}
+
+		err = isp_surf_vmap(isp, surf);
+		if (err < 0) {
+			isp_err(isp, "failed to vmap iova=0x%llx size=0x%llx\n",
+				surf->iova, surf->size);
+		}
+	} else {
+		/* This should be the shared surface free request, but
+		 * 1) The fw doesn't request to free all of what it requested
+		 * 2) The fw continues to access the surface after
+		 * So we link it to the gc, which runs after fw shutdown
+		 */
+		rsp->arg0 = req->arg0 | ISP_IPC_FLAG_ACK;
+		rsp->arg1 = 0x0;
+		rsp->arg2 = 0x0;
+	}
+
+	return 0;
+}
diff --git a/drivers/media/platform/apple/isp/isp-ipc.h b/drivers/media/platform/apple/isp/isp-ipc.h
new file mode 100644
index 00000000000000..0c1d681835c72f
--- /dev/null
+++ b/drivers/media/platform/apple/isp/isp-ipc.h
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright 2023 Eileen Yoon <eyn@gmx.com> */
+
+#ifndef __ISP_IPC_H__
+#define __ISP_IPC_H__
+
+#include "isp-drv.h"
+
+#define ISP_IPC_CHAN_TYPE_COMMAND   0
+#define ISP_IPC_CHAN_TYPE_REPLY	    1
+#define ISP_IPC_CHAN_TYPE_REPORT    2
+
+#define ISP_IPC_BUFEXC_STAT_SIZE    0x280
+#define ISP_IPC_BUFEXC_FLAG_RENDER  0x10000000
+#define ISP_IPC_BUFEXC_FLAG_COMMAND 0x30000000
+#define ISP_IPC_BUFEXC_FLAG_ACK	    0x80000000
+
+int ipc_chan_handle(struct apple_isp *isp, struct isp_channel *chan);
+int ipc_chan_send(struct apple_isp *isp, struct isp_channel *chan,
+		  unsigned long timeout);
+
+int ipc_tm_handle(struct apple_isp *isp, struct isp_channel *chan);
+int ipc_sm_handle(struct apple_isp *isp, struct isp_channel *chan);
+
+#endif /* __ISP_IPC_H__ */
diff --git a/drivers/media/platform/apple/isp/isp-regs.h b/drivers/media/platform/apple/isp/isp-regs.h
new file mode 100644
index 00000000000000..7357fa10fa5483
--- /dev/null
+++ b/drivers/media/platform/apple/isp/isp-regs.h
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright 2023 Eileen Yoon <eyn@gmx.com> */
+
+#ifndef __ISP_REGS_H__
+#define __ISP_REGS_H__
+
+#include "isp-drv.h"
+
+#define ISP_COPROC_FABRIC_0    0x738
+#define ISP_COPROC_FABRIC_1    0x798
+#define ISP_COPROC_FABRIC_2    0x7f8
+#define ISP_COPROC_FABRIC_3    0x858
+
+#define ISP_COPROC_RVBAR       0x1050000
+#define ISP_COPROC_EDPRCR      0x1010310
+#define ISP_COPROC_CONTROL     0x1400044
+#define ISP_COPROC_STATUS      0x1400048
+
+#define ISP_COPROC_IRQ_MASK_0  0x1400a00
+#define ISP_COPROC_IRQ_MASK_1  0x1400a04
+#define ISP_COPROC_IRQ_MASK_2  0x1400a08
+#define ISP_COPROC_IRQ_MASK_3  0x1400a0c
+#define ISP_COPROC_IRQ_MASK_4  0x1400a10
+#define ISP_COPROC_IRQ_MASK_5  0x1400a14
+
+#define ISP_MBOX_IRQ_INTERRUPT 0x00
+#define ISP_MBOX_IRQ_ENABLE    0x04
+#define ISP_MBOX2_IRQ_DOORBELL 0x00
+#define ISP_MBOX2_IRQ_ACK      0x0c
+
+#define ISP_GPIO_0	       0x00
+#define ISP_GPIO_1	       0x04
+#define ISP_GPIO_2	       0x08
+#define ISP_GPIO_3	       0x0c
+#define ISP_GPIO_4	       0x10
+#define ISP_GPIO_5	       0x14
+#define ISP_GPIO_6	       0x18
+#define ISP_GPIO_7	       0x1c
+#define ISP_GPIO_CLOCK_EN      0x20
+
+static inline u32 isp_mbox_read32(struct apple_isp *isp, u32 reg)
+{
+	return readl(isp->mbox + reg);
+}
+
+static inline void isp_mbox_write32(struct apple_isp *isp, u32 reg, u32 val)
+{
+	writel(val, isp->mbox + reg);
+}
+
+static inline void isp_mbox2_write32(struct apple_isp *isp, u32 reg, u32 val)
+{
+	writel(val, isp->mbox2 + reg);
+}
+
+#endif /* __ISP_REGS_H__ */
diff --git a/drivers/media/platform/apple/isp/isp-v4l2.c b/drivers/media/platform/apple/isp/isp-v4l2.c
new file mode 100644
index 00000000000000..0561653ea7becd
--- /dev/null
+++ b/drivers/media/platform/apple/isp/isp-v4l2.c
@@ -0,0 +1,910 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright 2023 Eileen Yoon <eyn@gmx.com> */
+
+#include <linux/module.h>
+
+#include <media/media-device.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mc.h>
+#include <media/videobuf2-dma-sg.h>
+
+#include "isp-cam.h"
+#include "isp-cmd.h"
+#include "isp-iommu.h"
+#include "isp-ipc.h"
+#include "isp-fw.h"
+#include "isp-v4l2.h"
+
+#define ISP_MIN_FRAMES 2
+#define ISP_MAX_PLANES 4
+#define ISP_MAX_PIX_FORMATS 2
+#define ISP_BUFFER_TIMEOUT msecs_to_jiffies(1500)
+#define ISP_STRIDE_ALIGNMENT 64
+
+static bool multiplanar = false;
+module_param(multiplanar, bool, 0644);
+MODULE_PARM_DESC(multiplanar, "Enable multiplanar API");
+
+struct isp_buflist_buffer {
+	u64 iovas[ISP_MAX_PLANES];
+	u32 flags[ISP_MAX_PLANES];
+	u32 num_planes;
+	u32 pool_type;
+	u32 tag;
+	u32 pad;
+} __packed;
+static_assert(sizeof(struct isp_buflist_buffer) == 0x40);
+
+struct isp_buflist {
+	u64 type;
+	u64 num_buffers;
+	struct isp_buflist_buffer buffers[];
+};
+
+int ipc_bt_handle(struct apple_isp *isp, struct isp_channel *chan)
+{
+	struct isp_message *req = &chan->req, *rsp = &chan->rsp;
+	struct isp_buffer *tmp, *buf;
+	struct isp_buflist *bl;
+	u32 count;
+	int err = 0;
+
+	/* printk("H2T: 0x%llx 0x%llx 0x%llx\n", (long long)req->arg0,
+	       (long long)req->arg1, (long long)req->arg2); */
+
+	if (req->arg1 < sizeof(struct isp_buflist)) {
+		dev_err(isp->dev, "%s: Bad length 0x%llx\n", chan->name,
+			req->arg1);
+		return -EIO;
+	}
+
+	bl = apple_isp_translate(isp, isp->bt_surf, req->arg0, req->arg1);
+
+	count = bl->num_buffers;
+	if (count > (req->arg1 - sizeof(struct isp_buffer)) /
+			    sizeof(struct isp_buflist_buffer)) {
+		dev_err(isp->dev, "%s: Bad length 0x%llx\n", chan->name,
+			req->arg1);
+		return -EIO;
+	}
+
+	spin_lock(&isp->buf_lock);
+	for (int i = 0; i < count; i++) {
+		struct isp_buflist_buffer *bufd = &bl->buffers[i];
+
+		/* printk("Return: 0x%llx (%d)\n", bufd->iovas[0],
+		       bufd->pool_type); */
+
+		if (bufd->pool_type == 0) {
+			for (int j = 0; j < ARRAY_SIZE(isp->meta_surfs); j++) {
+				struct isp_surf *meta = isp->meta_surfs[j];
+				if ((u32)bufd->iovas[0] == (u32)meta->iova) {
+					WARN_ON(!meta->submitted);
+					meta->submitted = false;
+				}
+			}
+		} else {
+			list_for_each_entry_safe_reverse(
+				buf, tmp, &isp->bufs_submitted, link) {
+				if ((u32)buf->surfs[0].iova ==
+				    (u32)bufd->iovas[0]) {
+					enum vb2_buffer_state state =
+						VB2_BUF_STATE_ERROR;
+
+					buf->vb.vb2_buf.timestamp =
+						ktime_get_ns();
+					buf->vb.sequence = isp->sequence++;
+					buf->vb.field = V4L2_FIELD_NONE;
+					if (req->arg2 ==
+					    ISP_IPC_BUFEXC_FLAG_RENDER)
+						state = VB2_BUF_STATE_DONE;
+					vb2_buffer_done(&buf->vb.vb2_buf,
+							state);
+					list_del(&buf->link);
+				}
+			}
+		}
+	}
+	spin_unlock(&isp->buf_lock);
+
+	rsp->arg0 = req->arg0 | ISP_IPC_FLAG_ACK;
+	rsp->arg1 = 0x0;
+	rsp->arg2 = ISP_IPC_BUFEXC_FLAG_ACK;
+
+	return err;
+}
+
+static int isp_submit_buffers(struct apple_isp *isp)
+{
+	struct isp_format *fmt = isp_get_current_format(isp);
+	struct isp_channel *chan = isp->chan_bh;
+	struct isp_message *req = &chan->req;
+	struct isp_buffer *buf, *tmp;
+	unsigned long flags;
+	size_t offset;
+	int err;
+
+	struct isp_buflist *bl = isp->cmd_virt;
+	struct isp_buflist_buffer *bufd = &bl->buffers[0];
+
+	bl->type = 1;
+	bl->num_buffers = 0;
+
+	spin_lock_irqsave(&isp->buf_lock, flags);
+	for (int i = 0; i < ARRAY_SIZE(isp->meta_surfs); i++) {
+		struct isp_surf *meta = isp->meta_surfs[i];
+
+		if (meta->submitted)
+			continue;
+
+		/* printk("Submit: 0x%llx .. 0x%llx (meta)\n", meta->iova,
+		       meta->iova + meta->size); */
+
+		bufd->num_planes = 1;
+		bufd->pool_type = 0;
+		bufd->iovas[0] = meta->iova;
+		bufd->flags[0] = 0x40000000;
+		bufd++;
+		bl->num_buffers++;
+
+		meta->submitted = true;
+	}
+
+	while ((buf = list_first_entry_or_null(&isp->bufs_pending,
+					       struct isp_buffer, link))) {
+		memset(bufd, 0, sizeof(*bufd));
+
+		bufd->num_planes = fmt->num_planes;
+		bufd->pool_type = isp->hw->scl1 ? CISP_POOL_TYPE_RENDERED_SCL1 :
+						  CISP_POOL_TYPE_RENDERED;
+		offset = 0;
+		for (int j = 0; j < fmt->num_planes; j++) {
+			bufd->iovas[j] = buf->surfs[0].iova + offset;
+			bufd->flags[j] = 0x40000000;
+			offset += fmt->plane_size[j];
+		}
+
+		/* printk("Submit: 0x%llx .. 0x%llx (render)\n",
+		       buf->surfs[0].iova,
+		       buf->surfs[0].iova + buf->surfs[0].size); */
+		bufd++;
+		bl->num_buffers++;
+
+		/*
+		 * Queue the buffer as submitted and release the lock for now.
+		 * We need to do this before actually submitting to avoid a
+		 * race with the buffer return codepath.
+		 */
+		list_move_tail(&buf->link, &isp->bufs_submitted);
+	}
+
+	spin_unlock_irqrestore(&isp->buf_lock, flags);
+
+	req->arg0 = isp->cmd_iova;
+	req->arg1 = max_t(u64, ISP_IPC_BUFEXC_STAT_SIZE,
+			  ((uintptr_t)bufd - (uintptr_t)bl));
+	req->arg2 = ISP_IPC_BUFEXC_FLAG_COMMAND;
+
+	err = ipc_chan_send(isp, chan, ISP_BUFFER_TIMEOUT);
+	if (err) {
+		/* If we fail, consider the buffer not submitted. */
+		dev_err(isp->dev,
+			"%s: failed to send bufs: [0x%llx, 0x%llx, 0x%llx]\n",
+			chan->name, req->arg0, req->arg1, req->arg2);
+
+		/*
+		 * Try to find the buffer in the list, and if it's
+		 * still there, move it back to the pending list.
+		 */
+		spin_lock_irqsave(&isp->buf_lock, flags);
+
+		bufd = &bl->buffers[0];
+		for (int i = 0; i < bl->num_buffers; i++, bufd++) {
+			list_for_each_entry_safe_reverse(
+				buf, tmp, &isp->bufs_submitted, link) {
+				if (bufd->iovas[0] == buf->surfs[0].iova) {
+					list_move_tail(&buf->link,
+						       &isp->bufs_pending);
+				}
+			}
+			for (int j = 0; j < ARRAY_SIZE(isp->meta_surfs); j++) {
+				struct isp_surf *meta = isp->meta_surfs[j];
+				if (bufd->iovas[0] == meta->iova) {
+					meta->submitted = false;
+				}
+			}
+		}
+
+		spin_unlock_irqrestore(&isp->buf_lock, flags);
+	}
+
+	return err;
+}
+
+/*
+ * Videobuf2 section
+ */
+static int isp_vb2_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+			       unsigned int *num_planes, unsigned int sizes[],
+			       struct device *alloc_devs[])
+{
+	struct apple_isp *isp = vb2_get_drv_priv(vq);
+	struct isp_format *fmt = isp_get_current_format(isp);
+
+	/* This is not strictly neccessary but makes it easy to enforce that
+	 * at most 16 buffers are submitted at once. ISP on t6001 (FW 12.3)
+	 * times out if more buffers are submitted than set in the buffer pool
+	 * config before streaming is started.
+	 */
+	*nbuffers = min_t(unsigned int, *nbuffers, ISP_MAX_BUFFERS);
+
+	if (*num_planes) {
+		if (sizes[0] < fmt->total_size)
+			return -EINVAL;
+
+		return 0;
+	}
+
+	*num_planes = 1;
+	sizes[0] = fmt->total_size;
+
+	return 0;
+}
+
+static void __isp_vb2_buf_cleanup(struct vb2_buffer *vb, unsigned int i)
+{
+	struct apple_isp *isp = vb2_get_drv_priv(vb->vb2_queue);
+	struct isp_buffer *buf =
+		container_of(vb, struct isp_buffer, vb.vb2_buf);
+
+	while (i--)
+		apple_isp_iommu_unmap_sgt(isp, &buf->surfs[i]);
+}
+
+static void isp_vb2_buf_cleanup(struct vb2_buffer *vb)
+{
+	__isp_vb2_buf_cleanup(vb, vb->num_planes);
+}
+
+static int isp_vb2_buf_init(struct vb2_buffer *vb)
+{
+	struct apple_isp *isp = vb2_get_drv_priv(vb->vb2_queue);
+	struct isp_buffer *buf =
+		container_of(vb, struct isp_buffer, vb.vb2_buf);
+	unsigned int i;
+	int err;
+
+	for (i = 0; i < vb->num_planes; i++) {
+		struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, i);
+		err = apple_isp_iommu_map_sgt(isp, &buf->surfs[i], sgt,
+					      vb2_plane_size(vb, i));
+		if (err)
+			goto cleanup;
+	}
+
+	return 0;
+
+cleanup:
+	__isp_vb2_buf_cleanup(vb, i);
+	return err;
+}
+
+static int isp_vb2_buf_prepare(struct vb2_buffer *vb)
+{
+	struct apple_isp *isp = vb2_get_drv_priv(vb->vb2_queue);
+	struct isp_format *fmt = isp_get_current_format(isp);
+
+	if (vb2_plane_size(vb, 0) < fmt->total_size)
+		return -EINVAL;
+
+	vb2_set_plane_payload(vb, 0, fmt->total_size);
+
+	return 0;
+}
+
+static void isp_vb2_release_buffers(struct apple_isp *isp,
+				    enum vb2_buffer_state state)
+{
+	struct isp_buffer *buf;
+	unsigned long flags;
+
+	spin_lock_irqsave(&isp->buf_lock, flags);
+	list_for_each_entry(buf, &isp->bufs_submitted, link)
+		vb2_buffer_done(&buf->vb.vb2_buf, state);
+	INIT_LIST_HEAD(&isp->bufs_submitted);
+	list_for_each_entry(buf, &isp->bufs_pending, link)
+		vb2_buffer_done(&buf->vb.vb2_buf, state);
+	INIT_LIST_HEAD(&isp->bufs_pending);
+	spin_unlock_irqrestore(&isp->buf_lock, flags);
+}
+
+static void isp_vb2_buf_queue(struct vb2_buffer *vb)
+{
+	struct apple_isp *isp = vb2_get_drv_priv(vb->vb2_queue);
+	struct isp_buffer *buf =
+		container_of(vb, struct isp_buffer, vb.vb2_buf);
+	unsigned long flags;
+	bool empty;
+
+	spin_lock_irqsave(&isp->buf_lock, flags);
+	empty = list_empty(&isp->bufs_pending) &&
+		list_empty(&isp->bufs_submitted);
+	list_add_tail(&buf->link, &isp->bufs_pending);
+	spin_unlock_irqrestore(&isp->buf_lock, flags);
+
+	if (test_bit(ISP_STATE_STREAMING, &isp->state) && !empty)
+		isp_submit_buffers(isp);
+}
+
+static int apple_isp_start_streaming(struct apple_isp *isp)
+{
+	int err;
+
+	err = apple_isp_start_camera(isp);
+	if (err) {
+		dev_err(isp->dev, "failed to start camera: %d\n", err);
+		goto release_buffers;
+	}
+
+	err = isp_submit_buffers(isp);
+	if (err) {
+		dev_err(isp->dev, "failed to send initial batch: %d\n", err);
+		goto stop_camera;
+	}
+
+	err = apple_isp_start_capture(isp);
+	if (err) {
+		dev_err(isp->dev, "failed to start capture: %d\n", err);
+		goto stop_camera;
+	}
+
+	set_bit(ISP_STATE_STREAMING, &isp->state);
+
+	return 0;
+
+stop_camera:
+	apple_isp_stop_camera(isp);
+release_buffers:
+	isp_vb2_release_buffers(isp, VB2_BUF_STATE_QUEUED);
+	return err;
+}
+
+static void apple_isp_stop_streaming(struct apple_isp *isp)
+{
+	clear_bit(ISP_STATE_STREAMING, &isp->state);
+	apple_isp_stop_capture(isp);
+	apple_isp_stop_camera(isp);
+}
+
+static int isp_vb2_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+	struct apple_isp *isp = vb2_get_drv_priv(q);
+
+	isp->sequence = 0;
+
+	return apple_isp_start_streaming(isp);
+}
+
+static void isp_vb2_stop_streaming(struct vb2_queue *q)
+{
+	struct apple_isp *isp = vb2_get_drv_priv(q);
+
+	apple_isp_stop_streaming(isp);
+	isp_vb2_release_buffers(isp, VB2_BUF_STATE_ERROR);
+}
+
+int apple_isp_video_suspend(struct apple_isp *isp)
+{
+	/* Swap into STATE_SLEEPING as isp_vb2_buf_queue() submits on
+	 * STATE_STREAMING.
+	 */
+	if (test_bit(ISP_STATE_STREAMING, &isp->state)) {
+		/* Signal buffers to be recycled for clean shutdown */
+		isp_vb2_release_buffers(isp, VB2_BUF_STATE_QUEUED);
+		apple_isp_stop_streaming(isp);
+		set_bit(ISP_STATE_SLEEPING, &isp->state);
+	}
+
+	return 0;
+}
+
+int apple_isp_video_resume(struct apple_isp *isp)
+{
+	if (test_bit(ISP_STATE_SLEEPING, &isp->state)) {
+		clear_bit(ISP_STATE_SLEEPING, &isp->state);
+		apple_isp_start_streaming(isp);
+	}
+
+	return 0;
+}
+
+static const struct vb2_ops isp_vb2_ops = {
+	.queue_setup = isp_vb2_queue_setup,
+	.buf_init = isp_vb2_buf_init,
+	.buf_cleanup = isp_vb2_buf_cleanup,
+	.buf_prepare = isp_vb2_buf_prepare,
+	.buf_queue = isp_vb2_buf_queue,
+	.start_streaming = isp_vb2_start_streaming,
+	.stop_streaming = isp_vb2_stop_streaming,
+	.wait_prepare = vb2_ops_wait_prepare,
+	.wait_finish = vb2_ops_wait_finish,
+};
+
+static int isp_set_preset(struct apple_isp *isp, struct isp_format *fmt,
+			  struct isp_preset *preset)
+{
+	int i;
+	size_t total_size;
+
+	fmt->preset = preset;
+
+	/* I really fucking hope they all use NV12. */
+	fmt->num_planes = 2;
+	fmt->strides[0] = ALIGN(preset->output_dim.x, ISP_STRIDE_ALIGNMENT);
+	/* UV subsampled interleaved */
+	fmt->strides[1] = ALIGN(preset->output_dim.x, ISP_STRIDE_ALIGNMENT);
+	fmt->plane_size[0] = fmt->strides[0] * preset->output_dim.y;
+	fmt->plane_size[1] = fmt->strides[1] * preset->output_dim.y / 2;
+
+	total_size = 0;
+	for (i = 0; i < fmt->num_planes; i++)
+		total_size += fmt->plane_size[i];
+	fmt->total_size = total_size;
+
+	return 0;
+}
+
+static struct isp_preset *isp_select_preset(struct apple_isp *isp, u32 width,
+				     u32 height)
+{
+	struct isp_preset *preset, *best = &isp->presets[0];
+	int i, score, best_score = INT_MAX;
+
+	/* Default if no dimensions */
+	if (width == 0 || height == 0)
+		return &isp->presets[0];
+
+	for (i = 0; i < isp->num_presets; i++) {
+		preset = &isp->presets[i];
+		score = abs((int)preset->output_dim.x - (int)width) +
+		abs((int)preset->output_dim.y - (int)height);
+		if (score < best_score) {
+			best = preset;
+			best_score = score;
+		}
+	}
+
+	return best;
+}
+
+/*
+ * V4L2 ioctl section
+ */
+static int isp_vidioc_querycap(struct file *file, void *priv,
+			       struct v4l2_capability *cap)
+{
+	strscpy(cap->card, APPLE_ISP_CARD_NAME, sizeof(cap->card));
+	strscpy(cap->driver, APPLE_ISP_DEVICE_NAME, sizeof(cap->driver));
+
+	return 0;
+}
+
+static int isp_vidioc_enum_format(struct file *file, void *fh,
+				  struct v4l2_fmtdesc *f)
+{
+	struct apple_isp *isp = video_drvdata(file);
+
+	if (f->index >= ISP_MAX_PIX_FORMATS)
+		return -EINVAL;
+
+	switch (f->index) {
+	case 0:
+		f->pixelformat = V4L2_PIX_FMT_NV12;
+		break;
+	case 1:
+		if (!isp->multiplanar)
+			return -EINVAL;
+		f->pixelformat = V4L2_PIX_FMT_NV12M;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int isp_vidioc_enum_framesizes(struct file *file, void *fh,
+				      struct v4l2_frmsizeenum *f)
+{
+	struct apple_isp *isp = video_drvdata(file);
+
+	if (f->index >= isp->num_presets)
+		return -EINVAL;
+
+	if ((f->pixel_format != V4L2_PIX_FMT_NV12) &&
+	    (f->pixel_format != V4L2_PIX_FMT_NV12M))
+		return -EINVAL;
+
+	f->discrete.width = isp->presets[f->index].output_dim.x;
+	f->discrete.height = isp->presets[f->index].output_dim.y;
+	f->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+
+	return 0;
+}
+
+static int isp_vidioc_enum_frameintervals(struct file *filp, void *priv,
+					  struct v4l2_frmivalenum *interval)
+{
+	if (interval->index != 0)
+		return -EINVAL;
+
+	interval->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+	interval->discrete.numerator = 1;
+	interval->discrete.denominator = 30;
+	return 0;
+}
+
+static inline void isp_get_sp_pix_format(struct apple_isp *isp,
+					 struct v4l2_format *f,
+					 struct isp_format *fmt)
+{
+	f->fmt.pix.width = fmt->preset->output_dim.x;
+	f->fmt.pix.height = fmt->preset->output_dim.y;
+	f->fmt.pix.bytesperline = fmt->strides[0];
+	f->fmt.pix.sizeimage = fmt->total_size;
+
+	f->fmt.pix.field = V4L2_FIELD_NONE;
+	f->fmt.pix.pixelformat = V4L2_PIX_FMT_NV12;
+	f->fmt.pix.colorspace = V4L2_COLORSPACE_REC709;
+	f->fmt.pix.ycbcr_enc = V4L2_YCBCR_ENC_709;
+	f->fmt.pix.xfer_func = V4L2_XFER_FUNC_709;
+}
+
+static inline void isp_get_mp_pix_format(struct apple_isp *isp,
+					 struct v4l2_format *f,
+					 struct isp_format *fmt)
+{
+	f->fmt.pix_mp.width = fmt->preset->output_dim.x;
+	f->fmt.pix_mp.height = fmt->preset->output_dim.y;
+	f->fmt.pix_mp.num_planes = fmt->num_planes;
+	for (int i = 0; i < fmt->num_planes; i++) {
+		f->fmt.pix_mp.plane_fmt[i].sizeimage = fmt->plane_size[i];
+		f->fmt.pix_mp.plane_fmt[i].bytesperline = fmt->strides[i];
+	}
+
+	f->fmt.pix_mp.field = V4L2_FIELD_NONE;
+	f->fmt.pix_mp.pixelformat = V4L2_PIX_FMT_NV12M;
+	f->fmt.pix_mp.colorspace = V4L2_COLORSPACE_REC709;
+	f->fmt.pix_mp.ycbcr_enc = V4L2_YCBCR_ENC_709;
+	f->fmt.pix_mp.xfer_func = V4L2_XFER_FUNC_709;
+}
+
+static int isp_vidioc_get_format(struct file *file, void *fh,
+				 struct v4l2_format *f)
+{
+	struct apple_isp *isp = video_drvdata(file);
+	struct isp_format *fmt = isp_get_current_format(isp);
+
+	isp_get_sp_pix_format(isp, f, fmt);
+
+	return 0;
+}
+
+static int isp_vidioc_set_format(struct file *file, void *fh,
+				 struct v4l2_format *f)
+{
+	struct apple_isp *isp = video_drvdata(file);
+	struct isp_format *fmt = isp_get_current_format(isp);
+	struct isp_preset *preset;
+	int err;
+
+	preset = isp_select_preset(isp, f->fmt.pix.width, f->fmt.pix.height);
+	err = isp_set_preset(isp, fmt, preset);
+	if (err)
+		return err;
+
+	isp_get_sp_pix_format(isp, f, fmt);
+
+	isp->vbq.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+
+	return 0;
+}
+
+static int isp_vidioc_try_format(struct file *file, void *fh,
+				 struct v4l2_format *f)
+{
+	struct apple_isp *isp = video_drvdata(file);
+	struct isp_format fmt = *isp_get_current_format(isp);
+	struct isp_preset *preset;
+	int err;
+
+	preset = isp_select_preset(isp, f->fmt.pix.width, f->fmt.pix.height);
+	err = isp_set_preset(isp, &fmt, preset);
+	if (err)
+		return err;
+
+	isp_get_sp_pix_format(isp, f, &fmt);
+
+	return 0;
+}
+
+static int isp_vidioc_get_format_mplane(struct file *file, void *fh,
+					struct v4l2_format *f)
+{
+	struct apple_isp *isp = video_drvdata(file);
+	struct isp_format *fmt = isp_get_current_format(isp);
+
+	if (!isp->multiplanar)
+		return -ENOTTY;
+
+	isp_get_mp_pix_format(isp, f, fmt);
+
+	return 0;
+}
+
+static int isp_vidioc_set_format_mplane(struct file *file, void *fh,
+					struct v4l2_format *f)
+{
+	struct apple_isp *isp = video_drvdata(file);
+	struct isp_format *fmt = isp_get_current_format(isp);
+	struct isp_preset *preset;
+	int err;
+
+	if (!isp->multiplanar)
+		return -ENOTTY;
+
+	preset = isp_select_preset(isp, f->fmt.pix_mp.width,
+				   f->fmt.pix_mp.height);
+	err = isp_set_preset(isp, fmt, preset);
+	if (err)
+		return err;
+
+	isp_get_mp_pix_format(isp, f, fmt);
+
+	isp->vbq.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+
+	return 0;
+}
+
+static int isp_vidioc_try_format_mplane(struct file *file, void *fh,
+					struct v4l2_format *f)
+{
+	struct apple_isp *isp = video_drvdata(file);
+	struct isp_format fmt = *isp_get_current_format(isp);
+	struct isp_preset *preset;
+	int err;
+
+	if (!isp->multiplanar)
+		return -ENOTTY;
+
+	preset = isp_select_preset(isp, f->fmt.pix_mp.width,
+				   f->fmt.pix_mp.height);
+	err = isp_set_preset(isp, &fmt, preset);
+	if (err)
+		return err;
+
+	isp_get_mp_pix_format(isp, f, &fmt);
+
+	return 0;
+}
+
+static int isp_vidioc_enum_input(struct file *file, void *fh,
+				 struct v4l2_input *inp)
+{
+	if (inp->index)
+		return -EINVAL;
+
+	strscpy(inp->name, APPLE_ISP_DEVICE_NAME, sizeof(inp->name));
+	inp->type = V4L2_INPUT_TYPE_CAMERA;
+
+	return 0;
+}
+
+static int isp_vidioc_get_input(struct file *file, void *fh, unsigned int *i)
+{
+	*i = 0;
+
+	return 0;
+}
+
+static int isp_vidioc_set_input(struct file *file, void *fh, unsigned int i)
+{
+	if (i)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int isp_vidioc_get_param(struct file *file, void *fh,
+				struct v4l2_streamparm *a)
+{
+	struct apple_isp *isp = video_drvdata(file);
+
+	if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+	    (!isp->multiplanar ||
+	     a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE))
+		return -EINVAL;
+
+	a->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+	a->parm.capture.readbuffers = ISP_MIN_FRAMES;
+	a->parm.capture.timeperframe.numerator = ISP_FRAME_RATE_NUM;
+	a->parm.capture.timeperframe.denominator = ISP_FRAME_RATE_DEN;
+
+	return 0;
+}
+
+static int isp_vidioc_set_param(struct file *file, void *fh,
+				struct v4l2_streamparm *a)
+{
+	struct apple_isp *isp = video_drvdata(file);
+
+	if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+	    (!isp->multiplanar ||
+	     a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE))
+		return -EINVAL;
+
+	/* Not supporting frame rate sets. No use. Plus floats. */
+	a->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+	a->parm.capture.readbuffers = ISP_MIN_FRAMES;
+	a->parm.capture.timeperframe.numerator = ISP_FRAME_RATE_NUM;
+	a->parm.capture.timeperframe.denominator = ISP_FRAME_RATE_DEN;
+
+	return 0;
+}
+
+static const struct v4l2_ioctl_ops isp_v4l2_ioctl_ops = {
+	.vidioc_querycap = isp_vidioc_querycap,
+
+	.vidioc_enum_fmt_vid_cap = isp_vidioc_enum_format,
+	.vidioc_g_fmt_vid_cap = isp_vidioc_get_format,
+	.vidioc_s_fmt_vid_cap = isp_vidioc_set_format,
+	.vidioc_try_fmt_vid_cap = isp_vidioc_try_format,
+	.vidioc_g_fmt_vid_cap_mplane = isp_vidioc_get_format_mplane,
+	.vidioc_s_fmt_vid_cap_mplane = isp_vidioc_set_format_mplane,
+	.vidioc_try_fmt_vid_cap_mplane = isp_vidioc_try_format_mplane,
+
+	.vidioc_enum_framesizes = isp_vidioc_enum_framesizes,
+	.vidioc_enum_frameintervals = isp_vidioc_enum_frameintervals,
+	.vidioc_enum_input = isp_vidioc_enum_input,
+	.vidioc_g_input = isp_vidioc_get_input,
+	.vidioc_s_input = isp_vidioc_set_input,
+	.vidioc_g_parm = isp_vidioc_get_param,
+	.vidioc_s_parm = isp_vidioc_set_param,
+
+	.vidioc_reqbufs = vb2_ioctl_reqbufs,
+	.vidioc_querybuf = vb2_ioctl_querybuf,
+	.vidioc_create_bufs = vb2_ioctl_create_bufs,
+	.vidioc_qbuf = vb2_ioctl_qbuf,
+	.vidioc_expbuf = vb2_ioctl_expbuf,
+	.vidioc_dqbuf = vb2_ioctl_dqbuf,
+	.vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+	.vidioc_streamon = vb2_ioctl_streamon,
+	.vidioc_streamoff = vb2_ioctl_streamoff,
+};
+
+static const struct v4l2_file_operations isp_v4l2_fops = {
+	.owner = THIS_MODULE,
+	.open = v4l2_fh_open,
+	.release = vb2_fop_release,
+	.read = vb2_fop_read,
+	.poll = vb2_fop_poll,
+	.mmap = vb2_fop_mmap,
+	.unlocked_ioctl = video_ioctl2,
+};
+
+static const struct media_device_ops isp_media_device_ops = {
+	.link_notify = v4l2_pipeline_link_notify,
+};
+
+int apple_isp_setup_video(struct apple_isp *isp)
+{
+	struct video_device *vdev = &isp->vdev;
+	struct vb2_queue *vbq = &isp->vbq;
+	struct isp_format *fmt = isp_get_current_format(isp);
+	int err;
+
+	err = isp_set_preset(isp, fmt, &isp->presets[0]);
+	if (err) {
+		dev_err(isp->dev, "failed to set default preset: %d\n", err);
+		return err;
+	}
+
+	for (int i = 0; i < ARRAY_SIZE(isp->meta_surfs); i++) {
+		isp->meta_surfs[i] =
+			isp_alloc_surface_vmap(isp, isp->hw->meta_size);
+		if (!isp->meta_surfs[i]) {
+			isp_err(isp, "failed to alloc meta surface\n");
+			err = -ENOMEM;
+			goto surf_cleanup;
+		}
+	}
+
+	media_device_init(&isp->mdev);
+	isp->v4l2_dev.mdev = &isp->mdev;
+	isp->mdev.ops = &isp_media_device_ops;
+	isp->mdev.dev = isp->dev;
+	strscpy(isp->mdev.model, APPLE_ISP_DEVICE_NAME,
+		sizeof(isp->mdev.model));
+
+	err = media_device_register(&isp->mdev);
+	if (err) {
+		dev_err(isp->dev, "failed to register media device: %d\n", err);
+		goto media_cleanup;
+	}
+
+	isp->multiplanar = multiplanar;
+
+	err = v4l2_device_register(isp->dev, &isp->v4l2_dev);
+	if (err) {
+		dev_err(isp->dev, "failed to register v4l2 device: %d\n", err);
+		goto media_unregister;
+	}
+
+	vbq->drv_priv = isp;
+	vbq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	vbq->io_modes = VB2_MMAP;
+	vbq->dev = isp->dev;
+	vbq->ops = &isp_vb2_ops;
+	vbq->mem_ops = &vb2_dma_sg_memops;
+	vbq->buf_struct_size = sizeof(struct isp_buffer);
+	vbq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+	vbq->min_queued_buffers = ISP_MIN_FRAMES;
+	vbq->lock = &isp->video_lock;
+
+	err = vb2_queue_init(vbq);
+	if (err) {
+		dev_err(isp->dev, "failed to init vb2 queue: %d\n", err);
+		goto v4l2_unregister;
+	}
+
+	vdev->queue = vbq;
+	vdev->fops = &isp_v4l2_fops;
+	vdev->ioctl_ops = &isp_v4l2_ioctl_ops;
+	vdev->device_caps = V4L2_BUF_TYPE_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+	if (isp->multiplanar)
+		vdev->device_caps |= V4L2_CAP_VIDEO_CAPTURE_MPLANE;
+	vdev->v4l2_dev = &isp->v4l2_dev;
+	vdev->vfl_type = VFL_TYPE_VIDEO;
+	vdev->vfl_dir = VFL_DIR_RX;
+	vdev->release = video_device_release_empty;
+	vdev->lock = &isp->video_lock;
+	strscpy(vdev->name, APPLE_ISP_DEVICE_NAME, sizeof(vdev->name));
+	video_set_drvdata(vdev, isp);
+
+	err = video_register_device(vdev, VFL_TYPE_VIDEO, 0);
+	if (err) {
+		dev_err(isp->dev, "failed to register video device: %d\n", err);
+		goto v4l2_unregister;
+	}
+
+	return 0;
+
+v4l2_unregister:
+	v4l2_device_unregister(&isp->v4l2_dev);
+media_unregister:
+	media_device_unregister(&isp->mdev);
+media_cleanup:
+	media_device_cleanup(&isp->mdev);
+surf_cleanup:
+	for (int i = 0; i < ARRAY_SIZE(isp->meta_surfs); i++) {
+		if (isp->meta_surfs[i])
+			isp_free_surface(isp, isp->meta_surfs[i]);
+		isp->meta_surfs[i] = NULL;
+	}
+
+	return err;
+}
+
+void apple_isp_remove_video(struct apple_isp *isp)
+{
+	vb2_video_unregister_device(&isp->vdev);
+	v4l2_device_unregister(&isp->v4l2_dev);
+	media_device_unregister(&isp->mdev);
+	media_device_cleanup(&isp->mdev);
+	for (int i = 0; i < ARRAY_SIZE(isp->meta_surfs); i++) {
+		if (isp->meta_surfs[i])
+			isp_free_surface(isp, isp->meta_surfs[i]);
+		isp->meta_surfs[i] = NULL;
+	}
+}
diff --git a/drivers/media/platform/apple/isp/isp-v4l2.h b/drivers/media/platform/apple/isp/isp-v4l2.h
new file mode 100644
index 00000000000000..4d47deeb83b055
--- /dev/null
+++ b/drivers/media/platform/apple/isp/isp-v4l2.h
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright 2023 Eileen Yoon <eyn@gmx.com> */
+
+#ifndef __ISP_V4L2_H__
+#define __ISP_V4L2_H__
+
+#include "isp-drv.h"
+
+int apple_isp_setup_video(struct apple_isp *isp);
+void apple_isp_remove_video(struct apple_isp *isp);
+int ipc_bt_handle(struct apple_isp *isp, struct isp_channel *chan);
+
+int apple_isp_video_suspend(struct apple_isp *isp);
+int apple_isp_video_resume(struct apple_isp *isp);
+
+#endif /* __ISP_V4L2_H__ */
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 6b0682af6e32b4..3d38df85fbb7ef 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -64,6 +64,21 @@ config MFD_ACT8945A
 	  linear regulators, along with a complete ActivePath battery
 	  charger.
 
+config MFD_APPLE_SPMI_PMU
+	tristate "Apple SPMI PMUs"
+	depends on SPMI
+	depends on ARCH_APPLE || COMPILE_TEST
+	default ARCH_APPLE
+	select MFD_SIMPLE_MFD_SPMI
+	help
+	  Say yes here to enable support for Apple PMUs attached via the
+	  SPMI bus. These can be found on Apple devices such as Apple
+	  Silicon Macs.
+
+	  This driver itself only attaches to the core device, and relies
+	  on subsystem drivers for individual device functions. You must
+	  enable those for it to be useful.
+
 config MFD_SUN4I_GPADC
 	tristate "Allwinner sunxi platforms' GPADC MFD driver"
 	select MFD_CORE
@@ -1348,6 +1363,19 @@ config MFD_SIMPLE_MFD_I2C
 	  sub-devices represented by child nodes in Device Tree will be
 	  subsequently registered.
 
+config MFD_SIMPLE_MFD_SPMI
+	tristate
+	depends on SPMI
+	select MFD_CORE
+	select REGMAP_SPMI
+	help
+	  This driver creates a single register map with the intention for it
+	  to be shared by all sub-devices.
+
+	  Once the register map has been successfully initialised, any
+	  sub-devices represented by child nodes in Device Tree will be
+	  subsequently registered.
+
 config MFD_SL28CPLD
 	tristate "Kontron sl28cpld Board Management Controller"
 	depends on I2C
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 9220eaf7cf1255..f579150859b804 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -279,6 +279,7 @@ obj-$(CONFIG_MFD_QCOM_PM8008)	+= qcom-pm8008.o
 
 obj-$(CONFIG_SGI_MFD_IOC3)	+= ioc3.o
 obj-$(CONFIG_MFD_SIMPLE_MFD_I2C)	+= simple-mfd-i2c.o
+obj-$(CONFIG_MFD_SIMPLE_MFD_SPMI)	+= simple-mfd-spmi.o
 obj-$(CONFIG_MFD_SMPRO)		+= smpro-core.o
 
 obj-$(CONFIG_MFD_INTEL_M10_BMC_CORE)   += intel-m10-bmc-core.o
diff --git a/drivers/mfd/simple-mfd-spmi.c b/drivers/mfd/simple-mfd-spmi.c
new file mode 100644
index 00000000000000..8737fc22b932a5
--- /dev/null
+++ b/drivers/mfd/simple-mfd-spmi.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Simple MFD - SPMI
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/spmi.h>
+#include <linux/of_platform.h>
+
+static const struct regmap_config spmi_regmap_config = {
+	.reg_bits	= 16,
+	.val_bits	= 8,
+	.max_register	= 0xffff,
+};
+
+static int simple_spmi_probe(struct spmi_device *sdev)
+{
+	struct regmap *regmap;
+
+	regmap = devm_regmap_init_spmi_ext(sdev, &spmi_regmap_config);
+	if (IS_ERR(regmap))
+		return PTR_ERR(regmap);
+
+	return devm_of_platform_populate(&sdev->dev);
+}
+
+static const struct of_device_id simple_spmi_id_table[] = {
+	{ .compatible = "apple,spmi-pmu" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, simple_spmi_id_table);
+
+static struct spmi_driver pmic_spmi_driver = {
+	.probe = simple_spmi_probe,
+	.driver = {
+		.name = "simple-mfd-spmi",
+		.of_match_table = simple_spmi_id_table,
+	},
+};
+module_spmi_driver(pmic_spmi_driver);
+
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_DESCRIPTION("Simple MFD - SPMI driver");
+MODULE_AUTHOR("Hector Martin <marcan@marcan.st>");
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 5241528f8b90ff..26b9b6c9bdfb0e 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -2277,10 +2277,11 @@ void mmc_rescan(struct work_struct *work)
 	 * while initializing the legacy SD interface. Therefore, let's start
 	 * with UHS-II for now.
 	 */
-	if (!mmc_attach_sd_uhs2(host)) {
-		mmc_release_host(host);
-		goto out;
-	}
+	if (host->caps2 & MMC_CAP2_SD_UHS2)
+		if (!mmc_attach_sd_uhs2(host)) {
+			mmc_release_host(host);
+			goto out;
+		}
 
 	for (i = 0; i < ARRAY_SIZE(freqs); i++) {
 		unsigned int freq = freqs[i];
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 1f0bd723f01124..769c0d2230f3ce 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -27,6 +27,7 @@
 #include <linux/debugfs.h>
 #include <linux/acpi.h>
 #include <linux/dmi.h>
+#include <linux/of.h>
 
 #include <linux/mmc/host.h>
 #include <linux/mmc/mmc.h>
@@ -2124,6 +2125,7 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
 	struct sdhci_host *host;
 	int ret, bar = first_bar + slotno;
 	size_t priv_size = chip->fixes ? chip->fixes->priv_size : 0;
+	u32 cd_debounce_delay_ms;
 
 	if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
 		dev_err(&pdev->dev, "BAR %d is not iomem. Aborting.\n", bar);
@@ -2190,6 +2192,10 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
 	if (host->mmc->caps & MMC_CAP_CD_WAKE)
 		device_init_wakeup(&pdev->dev, true);
 
+	if (device_property_read_u32(&pdev->dev, "cd-debounce-delay-ms",
+				     &cd_debounce_delay_ms))
+		cd_debounce_delay_ms = 200;
+
 	if (slot->cd_idx >= 0) {
 		struct gpiod_lookup_table *cd_gpio_lookup_table;
 
@@ -2208,7 +2214,7 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
 			ret = mmc_gpiod_request_cd(host->mmc, NULL,
 						   slot->cd_idx,
 						   slot->cd_override_level,
-						   0);
+						   cd_debounce_delay_ms * 1000);
 		if (ret == -EPROBE_DEFER)
 			goto remove;
 
@@ -2216,6 +2222,16 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
 			dev_warn(&pdev->dev, "failed to setup card detect gpio\n");
 			slot->cd_idx = -1;
 		}
+	} else if (is_of_node(pdev->dev.fwnode)) {
+		/* Allow all OF systems to use a CD GPIO if provided */
+
+		ret = mmc_gpiod_request_cd(host->mmc, "cd", 0,
+					   slot->cd_override_level,
+					   cd_debounce_delay_ms * 1000);
+		if (ret == -EPROBE_DEFER)
+			goto remove;
+		else if (ret == 0)
+			slot->cd_idx = 0;
 	}
 
 	if (chip->fixes && chip->fixes->add_host)
diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
index 4c2ae71770f782..366c61b6bd7a17 100644
--- a/drivers/mmc/host/sdhci-pci-gli.c
+++ b/drivers/mmc/host/sdhci-pci-gli.c
@@ -1966,7 +1966,8 @@ static const struct sdhci_ops sdhci_gl9755_ops = {
 
 const struct sdhci_pci_fixes sdhci_gl9755 = {
 	.quirks		= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
-	.quirks2	= SDHCI_QUIRK2_BROKEN_DDR50,
+	// disable non-working UHS-II mode on apple silicon devices
+	.quirks2	= SDHCI_QUIRK2_BROKEN_DDR50 | SDHCI_QUIRK2_BROKEN_UHS2,
 	.probe_slot	= gli_probe_slot_gl9755,
 	.add_host	= sdhci_pci_uhs2_add_host,
 	.remove_host	= sdhci_pci_uhs2_remove_host,
diff --git a/drivers/mmc/host/sdhci-uhs2.c b/drivers/mmc/host/sdhci-uhs2.c
index c53b64d50c0de5..76e9156cc0aef7 100644
--- a/drivers/mmc/host/sdhci-uhs2.c
+++ b/drivers/mmc/host/sdhci-uhs2.c
@@ -1162,7 +1162,8 @@ static void __sdhci_uhs2_add_host_v4(struct sdhci_host *host, u32 caps1)
 	mmc = host->mmc;
 
 	/* Support UHS2 */
-	if (caps1 & SDHCI_SUPPORT_UHS2)
+	if ((caps1 & SDHCI_SUPPORT_UHS2) &&
+		!(host->quirks2 & SDHCI_QUIRK2_BROKEN_UHS2))
 		mmc->caps2 |= MMC_CAP2_SD_UHS2;
 
 	max_current_caps2 = sdhci_readl(host, SDHCI_MAX_CURRENT_1);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index cd0e35a805427c..ba0fd121a1997e 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -537,6 +537,8 @@ struct sdhci_host {
 /* Issue CMD and DATA reset together */
 #define SDHCI_QUIRK2_ISSUE_CMD_DAT_RESET_TOGETHER	(1<<19)
 
+#define SDHCI_QUIRK2_BROKEN_UHS2			(1<<27)
+
 	int irq;		/* Device IRQ */
 	void __iomem *ioaddr;	/* Mapped address */
 	phys_addr_t mapbase;	/* physical address base */
diff --git a/drivers/mux/Kconfig b/drivers/mux/Kconfig
index 80f015cf6e54f6..c0f62ae4c8047f 100644
--- a/drivers/mux/Kconfig
+++ b/drivers/mux/Kconfig
@@ -31,6 +31,19 @@ config MUX_ADGS1408
 	  To compile the driver as a module, choose M here: the module will
 	  be called mux-adgs1408.
 
+config MUX_APPLE_DPXBAR
+	tristate "Apple Silicon Display Crossbar"
+	depends on ARCH_APPLE
+	help
+	  Apple Silicon Display Crossbar multiplexer.
+
+	  This drivers adds support for the display crossbar used to route
+	  display controller streams to the three different modes
+	  (DP AltMode, USB4 Tunnel #0/#1) of the Type-C ports.
+
+	  To compile this driver as a module, chose M here: the module will be
+	  called mux-apple-display-crossbar.
+
 config MUX_GPIO
 	tristate "GPIO-controlled Multiplexer"
 	depends on GPIOLIB || COMPILE_TEST
diff --git a/drivers/mux/Makefile b/drivers/mux/Makefile
index 6e9fa47daf5663..7b5b3325068010 100644
--- a/drivers/mux/Makefile
+++ b/drivers/mux/Makefile
@@ -8,9 +8,11 @@ mux-adg792a-objs		:= adg792a.o
 mux-adgs1408-objs		:= adgs1408.o
 mux-gpio-objs			:= gpio.o
 mux-mmio-objs			:= mmio.o
+mux-apple-display-crossbar-objs	:= apple-display-crossbar.o
 
 obj-$(CONFIG_MULTIPLEXER)	+= mux-core.o
 obj-$(CONFIG_MUX_ADG792A)	+= mux-adg792a.o
 obj-$(CONFIG_MUX_ADGS1408)	+= mux-adgs1408.o
+obj-$(CONFIG_MUX_APPLE_DPXBAR)	+= mux-apple-display-crossbar.o
 obj-$(CONFIG_MUX_GPIO)		+= mux-gpio.o
 obj-$(CONFIG_MUX_MMIO)		+= mux-mmio.o
diff --git a/drivers/mux/apple-display-crossbar.c b/drivers/mux/apple-display-crossbar.c
new file mode 100644
index 00000000000000..9b17371d92c3ba
--- /dev/null
+++ b/drivers/mux/apple-display-crossbar.c
@@ -0,0 +1,463 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple Silicon Display Crossbar multiplexer driver
+ *
+ * Copyright (C) Asahi Linux Contributors
+ *
+ * Author: Sven Peter <sven@svenpeter.dev>
+ */
+
+#include <linux/bitmap.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/mux/driver.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+/*
+ * T602x register interface is cleary different so most of the names below are
+ * probably wrong.
+ */
+
+#define T602X_FIFO_WR_DPTX_CLK_EN 0x000
+#define T602X_FIFO_WR_N_CLK_EN 0x004
+#define T602X_FIFO_WR_UNK_EN 0x008
+#define T602X_REG_00C 0x00c
+#define T602X_REG_014 0x014
+#define T602X_REG_018 0x018
+#define T602X_REG_01C 0x01c
+#define T602X_FIFO_RD_PCLK2_EN 0x024
+#define T602X_FIFO_RD_N_CLK_EN 0x028
+#define T602X_FIFO_RD_UNK_EN 0x02c
+#define T602X_REG_030 0x030
+#define T602X_REG_034 0x034
+
+#define T602X_REG_804_STAT 0x804 // status of 0x004
+#define T602X_REG_810_STAT 0x810 // status of 0x014
+#define T602X_REG_81C_STAT 0x81c // status of 0x024
+
+/*
+ * T8013, T600x, T8112 dp crossbar registers.
+ */
+
+#define FIFO_WR_DPTX_CLK_EN 0x000
+#define FIFO_WR_N_CLK_EN 0x004
+#define FIFO_WR_UNK_EN 0x008
+#define FIFO_RD_PCLK1_EN 0x020
+#define FIFO_RD_PCLK2_EN 0x024
+#define FIFO_RD_N_CLK_EN 0x028
+#define FIFO_RD_UNK_EN 0x02c
+
+#define OUT_PCLK1_EN 0x040
+#define OUT_PCLK2_EN 0x044
+#define OUT_N_CLK_EN 0x048
+#define OUT_UNK_EN 0x04c
+
+#define CROSSBAR_DISPEXT_EN 0x050
+#define CROSSBAR_MUX_CTRL 0x060
+#define CROSSBAR_MUX_CTRL_DPPHY_SELECT0 GENMASK(23, 20)
+#define CROSSBAR_MUX_CTRL_DPIN1_SELECT0 GENMASK(19, 16)
+#define CROSSBAR_MUX_CTRL_DPIN0_SELECT0 GENMASK(15, 12)
+#define CROSSBAR_MUX_CTRL_DPPHY_SELECT1 GENMASK(11, 8)
+#define CROSSBAR_MUX_CTRL_DPIN1_SELECT1 GENMASK(7, 4)
+#define CROSSBAR_MUX_CTRL_DPIN0_SELECT1 GENMASK(3, 0)
+#define CROSSBAR_ATC_EN 0x070
+
+#define FIFO_WR_DPTX_CLK_EN_STAT 0x800
+#define FIFO_WR_N_CLK_EN_STAT 0x804
+#define FIFO_RD_PCLK1_EN_STAT 0x820
+#define FIFO_RD_PCLK2_EN_STAT 0x824
+#define FIFO_RD_N_CLK_EN_STAT 0x828
+
+#define OUT_PCLK1_EN_STAT 0x840
+#define OUT_PCLK2_EN_STAT 0x844
+#define OUT_N_CLK_EN_STAT 0x848
+
+#define UNK_TUNABLE 0xc00
+
+#define ATC_DPIN0 BIT(0)
+#define ATC_DPIN1 BIT(4)
+#define ATC_DPPHY BIT(8)
+
+enum { MUX_DPPHY = 0, MUX_DPIN0 = 1, MUX_DPIN1 = 2, MUX_MAX = 3 };
+static const char *apple_dpxbar_names[MUX_MAX] = { "dpphy", "dpin0", "dpin1" };
+
+struct apple_dpxbar_hw {
+	unsigned int n_ufp;
+	u32 tunable;
+	const struct mux_control_ops *ops;
+};
+
+struct apple_dpxbar {
+	struct device *dev;
+	void __iomem *regs;
+	int selected_dispext[MUX_MAX];
+	spinlock_t lock;
+};
+
+static inline void dpxbar_mask32(struct apple_dpxbar *xbar, u32 reg, u32 mask,
+				 u32 set)
+{
+	u32 value = readl(xbar->regs + reg);
+	value &= ~mask;
+	value |= set;
+	writel(value, xbar->regs + reg);
+}
+
+static inline void dpxbar_set32(struct apple_dpxbar *xbar, u32 reg, u32 set)
+{
+	dpxbar_mask32(xbar, reg, 0, set);
+}
+
+static inline void dpxbar_clear32(struct apple_dpxbar *xbar, u32 reg, u32 clear)
+{
+	dpxbar_mask32(xbar, reg, clear, 0);
+}
+
+static int apple_dpxbar_set_t602x(struct mux_control *mux, int state)
+{
+	struct apple_dpxbar *dpxbar = mux_chip_priv(mux->chip);
+	unsigned int index = mux_control_get_index(mux);
+	unsigned long flags;
+	unsigned int mux_state;
+	unsigned int dispext_bit;
+	unsigned int dispext_bit_en;
+	bool enable;
+	int ret = 0;
+
+	if (state == MUX_IDLE_DISCONNECT) {
+		/*
+		 * Technically this will select dispext0,0 in the mux control
+		 * register. Practically that doesn't matter since everything
+		 * else is disabled.
+		 */
+		mux_state = 0;
+		enable = false;
+	} else if (state >= 0 && state < 9) {
+		dispext_bit = 1 << state;
+		dispext_bit_en = 1 << (2 * state);
+		mux_state = state;
+		enable = true;
+	} else {
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&dpxbar->lock, flags);
+
+	/* ensure the selected dispext isn't already used in this crossbar */
+	if (enable) {
+		for (int i = 0; i < MUX_MAX; ++i) {
+			if (i == index)
+				continue;
+			if (dpxbar->selected_dispext[i] == state) {
+				spin_unlock_irqrestore(&dpxbar->lock, flags);
+				return -EBUSY;
+			}
+		}
+	}
+
+	if (dpxbar->selected_dispext[index] >= 0) {
+		u32 prev_dispext_bit = 1 << dpxbar->selected_dispext[index];
+		u32 prev_dispext_bit_en = 1 << (2 * dpxbar->selected_dispext[index]);
+
+		dpxbar_clear32(dpxbar, T602X_FIFO_RD_UNK_EN, prev_dispext_bit);
+		dpxbar_clear32(dpxbar, T602X_FIFO_WR_DPTX_CLK_EN, prev_dispext_bit);
+		dpxbar_clear32(dpxbar, T602X_REG_00C, prev_dispext_bit_en);
+
+		dpxbar_clear32(dpxbar, T602X_REG_01C, 0x100);
+
+		dpxbar_clear32(dpxbar, T602X_FIFO_WR_UNK_EN, prev_dispext_bit);
+		dpxbar_clear32(dpxbar, T602X_REG_018, prev_dispext_bit_en);
+
+		dpxbar_clear32(dpxbar, T602X_FIFO_RD_N_CLK_EN, 0x100);
+
+		dpxbar_set32(dpxbar, T602X_FIFO_WR_N_CLK_EN, prev_dispext_bit);
+		dpxbar_set32(dpxbar, T602X_REG_014, 0x4);
+
+		dpxbar_set32(dpxbar, FIFO_RD_PCLK1_EN, 0x100);
+
+		dpxbar->selected_dispext[index] = -1;
+	}
+
+	if (enable) {
+		dpxbar_set32(dpxbar, T602X_REG_030, state << 20);
+		dpxbar_set32(dpxbar, T602X_REG_030, state << 8);
+		udelay(10);
+
+		dpxbar_clear32(dpxbar, T602X_FIFO_WR_N_CLK_EN, dispext_bit);
+		dpxbar_clear32(dpxbar, T602X_REG_014, 0x4);
+
+		dpxbar_clear32(dpxbar, T602X_FIFO_RD_PCLK2_EN, 0x100);
+
+		dpxbar_set32(dpxbar, T602X_FIFO_WR_UNK_EN, dispext_bit);
+		dpxbar_set32(dpxbar, T602X_REG_018, dispext_bit_en);
+
+		dpxbar_set32(dpxbar, T602X_FIFO_RD_N_CLK_EN, 0x100);
+		dpxbar_set32(dpxbar, T602X_FIFO_WR_DPTX_CLK_EN, dispext_bit);
+		dpxbar_set32(dpxbar, T602X_REG_00C, dispext_bit);
+
+		dpxbar_set32(dpxbar, T602X_REG_01C, 0x100);
+		dpxbar_set32(dpxbar, T602X_REG_034, 0x100);
+
+		dpxbar_set32(dpxbar, T602X_FIFO_RD_UNK_EN, dispext_bit);
+
+		dpxbar->selected_dispext[index] = state;
+	}
+
+	spin_unlock_irqrestore(&dpxbar->lock, flags);
+
+	if (enable)
+		dev_info(dpxbar->dev, "Switched %s to dispext%u,%u\n",
+			 apple_dpxbar_names[index], mux_state >> 1,
+			 mux_state & 1);
+	else
+		dev_info(dpxbar->dev, "Switched %s to disconnected state\n",
+			 apple_dpxbar_names[index]);
+
+	return ret;
+}
+
+static int apple_dpxbar_set(struct mux_control *mux, int state)
+{
+	struct apple_dpxbar *dpxbar = mux_chip_priv(mux->chip);
+	unsigned int index = mux_control_get_index(mux);
+	unsigned long flags;
+	unsigned int mux_state;
+	unsigned int dispext_bit;
+	unsigned int dispext_bit_en;
+	unsigned int atc_bit;
+	bool enable;
+	int ret = 0;
+	u32 mux_mask, mux_set;
+
+	if (state == MUX_IDLE_DISCONNECT) {
+		/*
+		 * Technically this will select dispext0,0 in the mux control
+		 * register. Practically that doesn't matter since everything
+		 * else is disabled.
+		 */
+		mux_state = 0;
+		enable = false;
+	} else if (state >= 0 && state < 9) {
+		dispext_bit = 1 << state;
+		dispext_bit_en = 1 << (2 * state);
+		mux_state = state;
+		enable = true;
+	} else {
+		return -EINVAL;
+	}
+
+	switch (index) {
+	case MUX_DPPHY:
+		mux_mask = CROSSBAR_MUX_CTRL_DPPHY_SELECT0 |
+			   CROSSBAR_MUX_CTRL_DPPHY_SELECT1;
+		mux_set =
+			FIELD_PREP(CROSSBAR_MUX_CTRL_DPPHY_SELECT0, mux_state) |
+			FIELD_PREP(CROSSBAR_MUX_CTRL_DPPHY_SELECT1, mux_state);
+		atc_bit = ATC_DPPHY;
+		break;
+	case MUX_DPIN0:
+		mux_mask = CROSSBAR_MUX_CTRL_DPIN0_SELECT0 |
+			   CROSSBAR_MUX_CTRL_DPIN0_SELECT1;
+		mux_set =
+			FIELD_PREP(CROSSBAR_MUX_CTRL_DPIN0_SELECT0, mux_state) |
+			FIELD_PREP(CROSSBAR_MUX_CTRL_DPIN0_SELECT1, mux_state);
+		atc_bit = ATC_DPIN0;
+		break;
+	case MUX_DPIN1:
+		mux_mask = CROSSBAR_MUX_CTRL_DPIN1_SELECT0 |
+			   CROSSBAR_MUX_CTRL_DPIN1_SELECT1;
+		mux_set =
+			FIELD_PREP(CROSSBAR_MUX_CTRL_DPIN1_SELECT0, mux_state) |
+			FIELD_PREP(CROSSBAR_MUX_CTRL_DPIN1_SELECT1, mux_state);
+		atc_bit = ATC_DPIN1;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&dpxbar->lock, flags);
+
+	/* ensure the selected dispext isn't already used in this crossbar */
+	if (enable) {
+		for (int i = 0; i < MUX_MAX; ++i) {
+			if (i == index)
+				continue;
+			if (dpxbar->selected_dispext[i] == state) {
+				spin_unlock_irqrestore(&dpxbar->lock, flags);
+				return -EBUSY;
+			}
+		}
+	}
+
+	dpxbar_set32(dpxbar, OUT_N_CLK_EN, atc_bit);
+	dpxbar_clear32(dpxbar, OUT_UNK_EN, atc_bit);
+	dpxbar_clear32(dpxbar, OUT_PCLK1_EN, atc_bit);
+	dpxbar_clear32(dpxbar, CROSSBAR_ATC_EN, atc_bit);
+
+	if (dpxbar->selected_dispext[index] >= 0) {
+		u32 prev_dispext_bit = 1 << dpxbar->selected_dispext[index];
+		u32 prev_dispext_bit_en = 1 << (2 * dpxbar->selected_dispext[index]);
+
+		dpxbar_set32(dpxbar, FIFO_WR_N_CLK_EN, prev_dispext_bit);
+		dpxbar_set32(dpxbar, FIFO_RD_N_CLK_EN, prev_dispext_bit);
+		dpxbar_clear32(dpxbar, FIFO_WR_UNK_EN, prev_dispext_bit);
+		dpxbar_clear32(dpxbar, FIFO_RD_UNK_EN, prev_dispext_bit_en);
+		dpxbar_clear32(dpxbar, FIFO_WR_DPTX_CLK_EN, prev_dispext_bit);
+		dpxbar_clear32(dpxbar, FIFO_RD_PCLK1_EN, prev_dispext_bit);
+		dpxbar_clear32(dpxbar, CROSSBAR_DISPEXT_EN, prev_dispext_bit);
+
+		dpxbar->selected_dispext[index] = -1;
+	}
+
+	dpxbar_mask32(dpxbar, CROSSBAR_MUX_CTRL, mux_mask, mux_set);
+
+	if (enable) {
+		dpxbar_clear32(dpxbar, FIFO_WR_N_CLK_EN, dispext_bit);
+		dpxbar_clear32(dpxbar, FIFO_RD_N_CLK_EN, dispext_bit);
+		dpxbar_clear32(dpxbar, OUT_N_CLK_EN, atc_bit);
+		dpxbar_set32(dpxbar, FIFO_WR_UNK_EN, dispext_bit);
+		dpxbar_set32(dpxbar, FIFO_RD_UNK_EN, dispext_bit_en);
+		dpxbar_set32(dpxbar, OUT_UNK_EN, atc_bit);
+		dpxbar_set32(dpxbar, FIFO_WR_DPTX_CLK_EN, dispext_bit);
+		dpxbar_set32(dpxbar, FIFO_RD_PCLK1_EN, dispext_bit);
+		dpxbar_set32(dpxbar, OUT_PCLK1_EN, atc_bit);
+		dpxbar_set32(dpxbar, CROSSBAR_ATC_EN, atc_bit);
+		dpxbar_set32(dpxbar, CROSSBAR_DISPEXT_EN, dispext_bit);
+
+		/*
+		 * Work around some HW quirk:
+		 * Without toggling the RD_PCLK enable here the connection
+		 * doesn't come up. Testing has shown that a delay of about
+		 * 5 usec is required which is doubled here to be on the
+		 * safe side.
+		 */
+		dpxbar_clear32(dpxbar, FIFO_RD_PCLK1_EN, dispext_bit);
+		udelay(10);
+		dpxbar_set32(dpxbar, FIFO_RD_PCLK1_EN, dispext_bit);
+
+		dpxbar->selected_dispext[index] = state;
+	}
+
+	spin_unlock_irqrestore(&dpxbar->lock, flags);
+
+	if (enable)
+		dev_info(dpxbar->dev, "Switched %s to dispext%u,%u\n",
+			 apple_dpxbar_names[index], mux_state >> 1,
+			 mux_state & 1);
+	else
+		dev_info(dpxbar->dev, "Switched %s to disconnected state\n",
+			 apple_dpxbar_names[index]);
+
+	return ret;
+}
+
+static const struct mux_control_ops apple_dpxbar_ops = {
+	.set = apple_dpxbar_set,
+};
+
+static const struct mux_control_ops apple_dpxbar_t602x_ops = {
+	.set = apple_dpxbar_set_t602x,
+};
+
+static int apple_dpxbar_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct mux_chip *mux_chip;
+	struct apple_dpxbar *dpxbar;
+	const struct apple_dpxbar_hw *hw;
+	int ret;
+
+	hw = of_device_get_match_data(dev);
+	mux_chip = devm_mux_chip_alloc(dev, MUX_MAX, sizeof(*dpxbar));
+	if (IS_ERR(mux_chip))
+		return PTR_ERR(mux_chip);
+
+	dpxbar = mux_chip_priv(mux_chip);
+	mux_chip->ops = hw->ops;
+	spin_lock_init(&dpxbar->lock);
+
+	dpxbar->dev = dev;
+	dpxbar->regs = devm_platform_ioremap_resource(pdev, 0);
+	if (IS_ERR(dpxbar->regs))
+		return PTR_ERR(dpxbar->regs);
+
+	if (!of_device_is_compatible(dev->of_node, "apple,t6020-display-crossbar")) {
+		readl(dpxbar->regs + UNK_TUNABLE);
+		writel(hw->tunable, dpxbar->regs + UNK_TUNABLE);
+		readl(dpxbar->regs + UNK_TUNABLE);
+	}
+
+	for (unsigned int i = 0; i < MUX_MAX; ++i) {
+		mux_chip->mux[i].states = hw->n_ufp;
+		mux_chip->mux[i].idle_state = MUX_IDLE_DISCONNECT;
+		dpxbar->selected_dispext[i] = -1;
+	}
+
+	ret = devm_mux_chip_register(dev, mux_chip);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static const struct apple_dpxbar_hw apple_dpxbar_hw_t8103 = {
+	.n_ufp = 2,
+	.tunable = 0,
+	.ops = &apple_dpxbar_ops,
+};
+
+static const struct apple_dpxbar_hw apple_dpxbar_hw_t8112 = {
+	.n_ufp = 4,
+	.tunable = 4278196325,
+	.ops = &apple_dpxbar_ops,
+};
+
+static const struct apple_dpxbar_hw apple_dpxbar_hw_t6000 = {
+	.n_ufp = 9,
+	.tunable = 5,
+	.ops = &apple_dpxbar_ops,
+};
+
+static const struct apple_dpxbar_hw apple_dpxbar_hw_t6020 = {
+	.n_ufp = 9,
+	.ops = &apple_dpxbar_t602x_ops,
+};
+
+static const struct of_device_id apple_dpxbar_ids[] = {
+	{
+		.compatible = "apple,t8103-display-crossbar",
+		.data = &apple_dpxbar_hw_t8103,
+	},
+	{
+		.compatible = "apple,t8112-display-crossbar",
+		.data = &apple_dpxbar_hw_t8112,
+	},
+	{
+		.compatible = "apple,t6000-display-crossbar",
+		.data = &apple_dpxbar_hw_t6000,
+	},
+	{
+		.compatible = "apple,t6020-display-crossbar",
+		.data = &apple_dpxbar_hw_t6020,
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, apple_dpxbar_ids);
+
+static struct platform_driver apple_dpxbar_driver = {
+	.driver = {
+		.name = "apple-display-crossbar",
+		.of_match_table	= apple_dpxbar_ids,
+	},
+	.probe = apple_dpxbar_probe,
+};
+module_platform_driver(apple_dpxbar_driver);
+
+MODULE_DESCRIPTION("Apple Silicon display crossbar multiplexer driver");
+MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/ax88796b_rust.rs b/drivers/net/phy/ax88796b_rust.rs
index 8c7eb009d9fc0f..bc73ebccc2aaca 100644
--- a/drivers/net/phy/ax88796b_rust.rs
+++ b/drivers/net/phy/ax88796b_rust.rs
@@ -19,7 +19,7 @@ kernel::module_phy_driver! {
         DeviceId::new_with_driver::<PhyAX88796B>()
     ],
     name: "rust_asix_phy",
-    author: "FUJITA Tomonori <fujita.tomonori@gmail.com>",
+    authors: ["FUJITA Tomonori <fujita.tomonori@gmail.com>"],
     description: "Rust Asix PHYs driver",
     license: "GPL",
 }
diff --git a/drivers/net/phy/qt2025.rs b/drivers/net/phy/qt2025.rs
index 1ab065798175b4..520daeb4208958 100644
--- a/drivers/net/phy/qt2025.rs
+++ b/drivers/net/phy/qt2025.rs
@@ -26,7 +26,7 @@ kernel::module_phy_driver! {
         phy::DeviceId::new_with_driver::<PhyQT2025>(),
     ],
     name: "qt2025_phy",
-    author: "FUJITA Tomonori <fujita.tomonori@gmail.com>",
+    authors: ["FUJITA Tomonori <fujita.tomonori@gmail.com>"],
     description: "AMCC QT2025 PHY driver",
     license: "GPL",
     firmware: ["qt2025-2.0.3.3.fw"],
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile
index e5ca0f51182271..6fd805023500be 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile
@@ -25,7 +25,11 @@ brcmfmac-objs += \
 		btcoex.o \
 		vendor.o \
 		pno.o \
-		xtlv.o
+		join_param.o \
+		scan_param.o \
+		xtlv.o \
+		interface_create.o
+
 brcmfmac-$(CONFIG_BRCMFMAC_PROTO_BCDC) += \
 		bcdc.o \
 		fwsignal.o
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
index fe31051a9e11b1..5efd7f6d757a4c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
@@ -107,6 +107,7 @@ struct brcmf_bus_ops {
 	void (*debugfs_create)(struct device *dev);
 	int (*reset)(struct device *dev);
 	void (*remove)(struct device *dev);
+	void (*d2h_mb_rx)(struct device *dev, u32 data);
 };
 
 
@@ -286,6 +287,15 @@ static inline void brcmf_bus_remove(struct brcmf_bus *bus)
 	bus->ops->remove(bus->dev);
 }
 
+static inline
+void brcmf_bus_d2h_mb_rx(struct brcmf_bus *bus, u32 data)
+{
+	if (!bus->ops->d2h_mb_rx)
+		return;
+
+	return bus->ops->d2h_mb_rx(bus->dev, data);
+}
+
 /*
  * interface functions from common layer
  */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 4b70845e1a2643..c2c1564e163fde 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -32,7 +32,11 @@
 #include "vendor.h"
 #include "bus.h"
 #include "common.h"
+#include "feature.h"
 #include "fwvid.h"
+#include "xtlv.h"
+#include "ratespec.h"
+#include "interface_create.h"
 
 #define BRCMF_SCAN_IE_LEN_MAX		2048
 
@@ -64,6 +68,8 @@
 #define RSN_CAP_MFPR_MASK		BIT(6)
 #define RSN_CAP_MFPC_MASK		BIT(7)
 #define RSN_PMKID_COUNT_LEN		2
+#define DPP_AKM_SUITE_TYPE		2
+#define WLAN_AKM_SUITE_DPP		SUITE(WLAN_OUI_WFA, DPP_AKM_SUITE_TYPE)
 
 #define VNDR_IE_CMD_LEN			4	/* length of the set command
 						 * string :"add", "del" (+ NUL)
@@ -77,10 +83,6 @@
 #define	DOT11_MGMT_HDR_LEN		24	/* d11 management header len */
 #define	DOT11_BCN_PRB_FIXED_LEN		12	/* beacon/probe fixed length */
 
-#define BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS	320
-#define BRCMF_SCAN_JOIN_PASSIVE_DWELL_TIME_MS	400
-#define BRCMF_SCAN_JOIN_PROBE_INTERVAL_MS	20
-
 #define BRCMF_SCAN_CHANNEL_TIME		40
 #define BRCMF_SCAN_UNASSOC_TIME		40
 #define BRCMF_SCAN_PASSIVE_TIME		120
@@ -99,9 +101,6 @@
 #define PKT_TOKEN_IDX			15
 #define IDLE_TOKEN_IDX			12
 
-#define BRCMF_ASSOC_PARAMS_FIXED_SIZE \
-	(sizeof(struct brcmf_assoc_params_le) - sizeof(u16))
-
 #define BRCMF_MAX_CHANSPEC_LIST \
 	(BRCMF_DCMD_MEDLEN / sizeof(__le32) - 1)
 
@@ -125,6 +124,13 @@ struct cca_msrmnt_query {
 	u32 time_req;
 };
 
+/* algo bit vector */
+#define KEY_ALGO_MASK(_algo)	(1 << (_algo))
+
+/* start enum value for BSS properties */
+#define WL_WSEC_INFO_BSS_BASE 0x0100
+#define WL_WSEC_INFO_BSS_ALGOS (WL_WSEC_INFO_BSS_BASE + 6)
+
 static bool check_vif_up(struct brcmf_cfg80211_vif *vif)
 {
 	if (!test_bit(BRCMF_VIF_STATUS_READY, &vif->sme_state)) {
@@ -179,6 +185,15 @@ static struct ieee80211_rate __wl_rates[] = {
 	.max_power		= 30,				\
 }
 
+#define CHAN6G(_channel) {					\
+	.band			= NL80211_BAND_6GHZ,		\
+	.center_freq		= ((_channel == 2) ? 5935 : 5950 + (5 * (_channel))),	\
+	.hw_value		= (_channel),			\
+	.max_antenna_gain	= 0,				\
+	.max_power		= 30,				\
+}
+
+
 static struct ieee80211_channel __wl_2ghz_channels[] = {
 	CHAN2G(1, 2412), CHAN2G(2, 2417), CHAN2G(3, 2422), CHAN2G(4, 2427),
 	CHAN2G(5, 2432), CHAN2G(6, 2437), CHAN2G(7, 2442), CHAN2G(8, 2447),
@@ -195,6 +210,23 @@ static struct ieee80211_channel __wl_5ghz_channels[] = {
 	CHAN5G(153), CHAN5G(157), CHAN5G(161), CHAN5G(165)
 };
 
+static struct ieee80211_channel __wl_6ghz_channels[] = {
+	CHAN6G(1),	CHAN6G(2),   CHAN6G(5),	  CHAN6G(9),   CHAN6G(13),
+	CHAN6G(17),	CHAN6G(21),  CHAN6G(25),  CHAN6G(29),  CHAN6G(33),
+	CHAN6G(37),	CHAN6G(41),  CHAN6G(45),  CHAN6G(49),  CHAN6G(53),
+	CHAN6G(57),	CHAN6G(61),  CHAN6G(65),  CHAN6G(69),  CHAN6G(73),
+	CHAN6G(77),	CHAN6G(81),  CHAN6G(85),  CHAN6G(89),  CHAN6G(93),
+	CHAN6G(97),	CHAN6G(101), CHAN6G(105), CHAN6G(109), CHAN6G(113),
+	CHAN6G(117),	CHAN6G(121), CHAN6G(125), CHAN6G(129), CHAN6G(133),
+	CHAN6G(137),	CHAN6G(141), CHAN6G(145), CHAN6G(149), CHAN6G(153),
+	CHAN6G(157),	CHAN6G(161), CHAN6G(165), CHAN6G(169), CHAN6G(173),
+	CHAN6G(177),	CHAN6G(181), CHAN6G(185), CHAN6G(189), CHAN6G(193),
+	CHAN6G(197),	CHAN6G(201), CHAN6G(205), CHAN6G(209), CHAN6G(213),
+	CHAN6G(217),	CHAN6G(221), CHAN6G(225), CHAN6G(229), CHAN6G(233),
+};
+
+struct ieee80211_sband_iftype_data sdata[NUM_NL80211_BANDS];
+
 /* Band templates duplicated per wiphy. The channel info
  * above is added to the band during setup.
  */
@@ -210,6 +242,12 @@ static const struct ieee80211_supported_band __wl_band_5ghz = {
 	.n_bitrates = wl_a_rates_size,
 };
 
+static const struct ieee80211_supported_band __wl_band_6ghz = {
+	.band = NL80211_BAND_6GHZ,
+	.bitrates = wl_a_rates,
+	.n_bitrates = wl_a_rates_size,
+};
+
 /* This is to override regulatory domains defined in cfg80211 module (reg.c)
  * By default world regulatory domain defined in reg.c puts the flags
  * NL80211_RRF_NO_IR for 5GHz channels (for * 36..48 and 149..165).
@@ -218,35 +256,43 @@ static const struct ieee80211_supported_band __wl_band_5ghz = {
  * domain are to be done here.
  */
 static const struct ieee80211_regdomain brcmf_regdom = {
-	.n_reg_rules = 4,
+	.n_reg_rules = 5,
 	.alpha2 =  "99",
 	.reg_rules = {
 		/* IEEE 802.11b/g, channels 1..11 */
-		REG_RULE(2412-10, 2472+10, 40, 6, 20, 0),
+		REG_RULE(2412 - 10, 2472 + 10, 40, 6, 20, 0),
 		/* If any */
 		/* IEEE 802.11 channel 14 - Only JP enables
 		 * this and for 802.11b only
 		 */
-		REG_RULE(2484-10, 2484+10, 20, 6, 20, 0),
+		REG_RULE(2484 - 10, 2484 + 10, 20, 6, 20, 0),
 		/* IEEE 802.11a, channel 36..64 */
-		REG_RULE(5150-10, 5350+10, 160, 6, 20, 0),
+		REG_RULE(5150 - 10, 5350 + 10, 160, 6, 20, 0),
 		/* IEEE 802.11a, channel 100..165 */
-		REG_RULE(5470-10, 5850+10, 160, 6, 20, 0), }
+		REG_RULE(5470 - 10, 5850 + 10, 160, 6, 20, 0),
+		/* IEEE 802.11ax, 6E */
+		REG_RULE(5935 - 10, 7115 + 10, 160, 6, 20, 0), }
 };
 
 /* Note: brcmf_cipher_suites is an array of int defining which cipher suites
  * are supported. A pointer to this array and the number of entries is passed
  * on to upper layers. AES_CMAC defines whether or not the driver supports MFP.
- * So the cipher suite AES_CMAC has to be the last one in the array, and when
- * device does not support MFP then the number of suites will be decreased by 1
+ * MFP support includes a few other suites,  so if MFP is not supported,
+ * then the number of suites will be decreased by 4
  */
 static const u32 brcmf_cipher_suites[] = {
 	WLAN_CIPHER_SUITE_WEP40,
 	WLAN_CIPHER_SUITE_WEP104,
 	WLAN_CIPHER_SUITE_TKIP,
 	WLAN_CIPHER_SUITE_CCMP,
-	/* Keep as last entry: */
-	WLAN_CIPHER_SUITE_AES_CMAC
+	WLAN_CIPHER_SUITE_CCMP_256,
+	WLAN_CIPHER_SUITE_GCMP,
+	WLAN_CIPHER_SUITE_GCMP_256,
+	/* Keep as last 4 entries: */
+	WLAN_CIPHER_SUITE_AES_CMAC,
+	WLAN_CIPHER_SUITE_BIP_CMAC_256,
+	WLAN_CIPHER_SUITE_BIP_GMAC_128,
+	WLAN_CIPHER_SUITE_BIP_GMAC_256
 };
 
 /* Vendor specific ie. id = 221, oui and type defines exact ie */
@@ -268,48 +314,6 @@ struct parsed_vndr_ies {
 	struct parsed_vndr_ie_info ie_info[VNDR_IE_PARSE_LIMIT];
 };
 
-#define WL_INTERFACE_CREATE_VER_1		1
-#define WL_INTERFACE_CREATE_VER_2		2
-#define WL_INTERFACE_CREATE_VER_3		3
-#define WL_INTERFACE_CREATE_VER_MAX		WL_INTERFACE_CREATE_VER_3
-
-#define WL_INTERFACE_MAC_DONT_USE	0x0
-#define WL_INTERFACE_MAC_USE		0x2
-
-#define WL_INTERFACE_CREATE_STA		0x0
-#define WL_INTERFACE_CREATE_AP		0x1
-
-struct wl_interface_create_v1 {
-	u16	ver;			/* structure version */
-	u32	flags;			/* flags for operation */
-	u8	mac_addr[ETH_ALEN];	/* MAC address */
-	u32	wlc_index;		/* optional for wlc index */
-};
-
-struct wl_interface_create_v2 {
-	u16	ver;			/* structure version */
-	u8	pad1[2];
-	u32	flags;			/* flags for operation */
-	u8	mac_addr[ETH_ALEN];	/* MAC address */
-	u8	iftype;			/* type of interface created */
-	u8	pad2;
-	u32	wlc_index;		/* optional for wlc index */
-};
-
-struct wl_interface_create_v3 {
-	u16 ver;			/* structure version */
-	u16 len;			/* length of structure + data */
-	u16 fixed_len;			/* length of structure */
-	u8 iftype;			/* type of interface created */
-	u8 wlc_index;			/* optional for wlc index */
-	u32 flags;			/* flags for operation */
-	u8 mac_addr[ETH_ALEN];		/* MAC address */
-	u8 bssid[ETH_ALEN];		/* optional for BSSID */
-	u8 if_index;			/* interface index request */
-	u8 pad[3];
-	u8 data[];			/* Optional for specific data */
-};
-
 static u8 nl80211_band_to_fwil(enum nl80211_band band)
 {
 	switch (band) {
@@ -317,6 +321,8 @@ static u8 nl80211_band_to_fwil(enum nl80211_band band)
 		return WLC_BAND_2G;
 	case NL80211_BAND_5GHZ:
 		return WLC_BAND_5G;
+	case NL80211_BAND_6GHZ:
+		return WLC_BAND_6G;
 	default:
 		WARN_ON(1);
 		break;
@@ -324,8 +330,25 @@ static u8 nl80211_band_to_fwil(enum nl80211_band band)
 	return 0;
 }
 
-static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf,
-			       struct cfg80211_chan_def *ch)
+static int nl80211_band_to_chanspec_band(enum nl80211_band band)
+{
+	switch (band) {
+	case NL80211_BAND_2GHZ:
+		return BRCMU_CHAN_BAND_2G;
+	case NL80211_BAND_5GHZ:
+		return BRCMU_CHAN_BAND_5G;
+	case NL80211_BAND_6GHZ:
+		return BRCMU_CHAN_BAND_6G;
+	case NL80211_BAND_60GHZ:
+	default:
+		WARN_ON_ONCE(1);
+		// Choose a safe default
+		return BRCMU_CHAN_BAND_2G;
+	}
+}
+
+u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf,
+			struct cfg80211_chan_def *ch)
 {
 	struct brcmu_chan ch_inf;
 	s32 primary_offset;
@@ -383,17 +406,7 @@ static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf,
 	default:
 		WARN_ON_ONCE(1);
 	}
-	switch (ch->chan->band) {
-	case NL80211_BAND_2GHZ:
-		ch_inf.band = BRCMU_CHAN_BAND_2G;
-		break;
-	case NL80211_BAND_5GHZ:
-		ch_inf.band = BRCMU_CHAN_BAND_5G;
-		break;
-	case NL80211_BAND_60GHZ:
-	default:
-		WARN_ON_ONCE(1);
-	}
+	ch_inf.band = nl80211_band_to_chanspec_band(ch->chan->band);
 	d11inf->encchspec(&ch_inf);
 
 	brcmf_dbg(TRACE, "chanspec: 0x%x\n", ch_inf.chspec);
@@ -405,6 +418,7 @@ u16 channel_to_chanspec(struct brcmu_d11inf *d11inf,
 {
 	struct brcmu_chan ch_inf;
 
+	ch_inf.band = nl80211_band_to_chanspec_band(ch->band);
 	ch_inf.chnum = ieee80211_frequency_to_channel(ch->center_freq);
 	ch_inf.bw = BRCMU_CHAN_BW_20;
 	d11inf->encchspec(&ch_inf);
@@ -582,231 +596,6 @@ brcmf_cfg80211_update_proto_addr_mode(struct wireless_dev *wdev)
 						ADDR_INDIRECT);
 }
 
-static int brcmf_get_first_free_bsscfgidx(struct brcmf_pub *drvr)
-{
-	int bsscfgidx;
-
-	for (bsscfgidx = 0; bsscfgidx < BRCMF_MAX_IFS; bsscfgidx++) {
-		/* bsscfgidx 1 is reserved for legacy P2P */
-		if (bsscfgidx == 1)
-			continue;
-		if (!drvr->iflist[bsscfgidx])
-			return bsscfgidx;
-	}
-
-	return -ENOMEM;
-}
-
-static void brcmf_set_vif_sta_macaddr(struct brcmf_if *ifp, u8 *mac_addr)
-{
-	u8 mac_idx = ifp->drvr->sta_mac_idx;
-
-	/* set difference MAC address with locally administered bit */
-	memcpy(mac_addr, ifp->mac_addr, ETH_ALEN);
-	mac_addr[0] |= 0x02;
-	mac_addr[3] ^= mac_idx ? 0xC0 : 0xA0;
-	mac_idx++;
-	mac_idx = mac_idx % 2;
-	ifp->drvr->sta_mac_idx = mac_idx;
-}
-
-static int brcmf_cfg80211_request_sta_if(struct brcmf_if *ifp, u8 *macaddr)
-{
-	struct wl_interface_create_v1 iface_v1;
-	struct wl_interface_create_v2 iface_v2;
-	struct wl_interface_create_v3 iface_v3;
-	u32 iface_create_ver;
-	int err;
-
-	/* interface_create version 1 */
-	memset(&iface_v1, 0, sizeof(iface_v1));
-	iface_v1.ver = WL_INTERFACE_CREATE_VER_1;
-	iface_v1.flags = WL_INTERFACE_CREATE_STA |
-			 WL_INTERFACE_MAC_USE;
-	if (!is_zero_ether_addr(macaddr))
-		memcpy(iface_v1.mac_addr, macaddr, ETH_ALEN);
-	else
-		brcmf_set_vif_sta_macaddr(ifp, iface_v1.mac_addr);
-
-	err = brcmf_fil_iovar_data_get(ifp, "interface_create",
-				       &iface_v1,
-				       sizeof(iface_v1));
-	if (err) {
-		brcmf_info("failed to create interface(v1), err=%d\n",
-			   err);
-	} else {
-		brcmf_dbg(INFO, "interface created(v1)\n");
-		return 0;
-	}
-
-	/* interface_create version 2 */
-	memset(&iface_v2, 0, sizeof(iface_v2));
-	iface_v2.ver = WL_INTERFACE_CREATE_VER_2;
-	iface_v2.flags = WL_INTERFACE_MAC_USE;
-	iface_v2.iftype = WL_INTERFACE_CREATE_STA;
-	if (!is_zero_ether_addr(macaddr))
-		memcpy(iface_v2.mac_addr, macaddr, ETH_ALEN);
-	else
-		brcmf_set_vif_sta_macaddr(ifp, iface_v2.mac_addr);
-
-	err = brcmf_fil_iovar_data_get(ifp, "interface_create",
-				       &iface_v2,
-				       sizeof(iface_v2));
-	if (err) {
-		brcmf_info("failed to create interface(v2), err=%d\n",
-			   err);
-	} else {
-		brcmf_dbg(INFO, "interface created(v2)\n");
-		return 0;
-	}
-
-	/* interface_create version 3+ */
-	/* get supported version from firmware side */
-	iface_create_ver = 0;
-	err = brcmf_fil_bsscfg_int_query(ifp, "interface_create",
-					 &iface_create_ver);
-	if (err) {
-		brcmf_err("fail to get supported version, err=%d\n", err);
-		return -EOPNOTSUPP;
-	}
-
-	switch (iface_create_ver) {
-	case WL_INTERFACE_CREATE_VER_3:
-		memset(&iface_v3, 0, sizeof(iface_v3));
-		iface_v3.ver = WL_INTERFACE_CREATE_VER_3;
-		iface_v3.flags = WL_INTERFACE_MAC_USE;
-		iface_v3.iftype = WL_INTERFACE_CREATE_STA;
-		if (!is_zero_ether_addr(macaddr))
-			memcpy(iface_v3.mac_addr, macaddr, ETH_ALEN);
-		else
-			brcmf_set_vif_sta_macaddr(ifp, iface_v3.mac_addr);
-
-		err = brcmf_fil_iovar_data_get(ifp, "interface_create",
-					       &iface_v3,
-					       sizeof(iface_v3));
-
-		if (!err)
-			brcmf_dbg(INFO, "interface created(v3)\n");
-		break;
-	default:
-		brcmf_err("not support interface create(v%d)\n",
-			  iface_create_ver);
-		err = -EOPNOTSUPP;
-		break;
-	}
-
-	if (err) {
-		brcmf_info("station interface creation failed (%d)\n",
-			   err);
-		return -EIO;
-	}
-
-	return 0;
-}
-
-static int brcmf_cfg80211_request_ap_if(struct brcmf_if *ifp)
-{
-	struct wl_interface_create_v1 iface_v1;
-	struct wl_interface_create_v2 iface_v2;
-	struct wl_interface_create_v3 iface_v3;
-	u32 iface_create_ver;
-	struct brcmf_pub *drvr = ifp->drvr;
-	struct brcmf_mbss_ssid_le mbss_ssid_le;
-	int bsscfgidx;
-	int err;
-
-	/* interface_create version 1 */
-	memset(&iface_v1, 0, sizeof(iface_v1));
-	iface_v1.ver = WL_INTERFACE_CREATE_VER_1;
-	iface_v1.flags = WL_INTERFACE_CREATE_AP |
-			 WL_INTERFACE_MAC_USE;
-
-	brcmf_set_vif_sta_macaddr(ifp, iface_v1.mac_addr);
-
-	err = brcmf_fil_iovar_data_get(ifp, "interface_create",
-				       &iface_v1,
-				       sizeof(iface_v1));
-	if (err) {
-		brcmf_info("failed to create interface(v1), err=%d\n",
-			   err);
-	} else {
-		brcmf_dbg(INFO, "interface created(v1)\n");
-		return 0;
-	}
-
-	/* interface_create version 2 */
-	memset(&iface_v2, 0, sizeof(iface_v2));
-	iface_v2.ver = WL_INTERFACE_CREATE_VER_2;
-	iface_v2.flags = WL_INTERFACE_MAC_USE;
-	iface_v2.iftype = WL_INTERFACE_CREATE_AP;
-
-	brcmf_set_vif_sta_macaddr(ifp, iface_v2.mac_addr);
-
-	err = brcmf_fil_iovar_data_get(ifp, "interface_create",
-				       &iface_v2,
-				       sizeof(iface_v2));
-	if (err) {
-		brcmf_info("failed to create interface(v2), err=%d\n",
-			   err);
-	} else {
-		brcmf_dbg(INFO, "interface created(v2)\n");
-		return 0;
-	}
-
-	/* interface_create version 3+ */
-	/* get supported version from firmware side */
-	iface_create_ver = 0;
-	err = brcmf_fil_bsscfg_int_query(ifp, "interface_create",
-					 &iface_create_ver);
-	if (err) {
-		brcmf_err("fail to get supported version, err=%d\n", err);
-		return -EOPNOTSUPP;
-	}
-
-	switch (iface_create_ver) {
-	case WL_INTERFACE_CREATE_VER_3:
-		memset(&iface_v3, 0, sizeof(iface_v3));
-		iface_v3.ver = WL_INTERFACE_CREATE_VER_3;
-		iface_v3.flags = WL_INTERFACE_MAC_USE;
-		iface_v3.iftype = WL_INTERFACE_CREATE_AP;
-		brcmf_set_vif_sta_macaddr(ifp, iface_v3.mac_addr);
-
-		err = brcmf_fil_iovar_data_get(ifp, "interface_create",
-					       &iface_v3,
-					       sizeof(iface_v3));
-
-		if (!err)
-			brcmf_dbg(INFO, "interface created(v3)\n");
-		break;
-	default:
-		brcmf_err("not support interface create(v%d)\n",
-			  iface_create_ver);
-		err = -EOPNOTSUPP;
-		break;
-	}
-
-	if (err) {
-		brcmf_info("Does not support interface_create (%d)\n",
-			   err);
-		memset(&mbss_ssid_le, 0, sizeof(mbss_ssid_le));
-		bsscfgidx = brcmf_get_first_free_bsscfgidx(ifp->drvr);
-		if (bsscfgidx < 0)
-			return bsscfgidx;
-
-		mbss_ssid_le.bsscfgidx = cpu_to_le32(bsscfgidx);
-		mbss_ssid_le.SSID_len = cpu_to_le32(5);
-		sprintf(mbss_ssid_le.SSID, "ssid%d", bsscfgidx);
-
-		err = brcmf_fil_bsscfg_data_set(ifp, "bsscfg:ssid", &mbss_ssid_le,
-						sizeof(mbss_ssid_le));
-
-		if (err < 0)
-			bphy_err(drvr, "setting ssid failed %d\n", err);
-	}
-
-	return err;
-}
-
 /**
  * brcmf_apsta_add_vif() - create a new AP or STA virtual interface
  *
@@ -1044,134 +833,13 @@ void brcmf_set_mpc(struct brcmf_if *ifp, int mpc)
 	}
 }
 
-static void brcmf_scan_params_v2_to_v1(struct brcmf_scan_params_v2_le *params_v2_le,
-				       struct brcmf_scan_params_le *params_le)
-{
-	size_t params_size;
-	u32 ch;
-	int n_channels, n_ssids;
-
-	memcpy(&params_le->ssid_le, &params_v2_le->ssid_le,
-	       sizeof(params_le->ssid_le));
-	memcpy(&params_le->bssid, &params_v2_le->bssid,
-	       sizeof(params_le->bssid));
-
-	params_le->bss_type = params_v2_le->bss_type;
-	params_le->scan_type = le32_to_cpu(params_v2_le->scan_type);
-	params_le->nprobes = params_v2_le->nprobes;
-	params_le->active_time = params_v2_le->active_time;
-	params_le->passive_time = params_v2_le->passive_time;
-	params_le->home_time = params_v2_le->home_time;
-	params_le->channel_num = params_v2_le->channel_num;
-
-	ch = le32_to_cpu(params_v2_le->channel_num);
-	n_channels = ch & BRCMF_SCAN_PARAMS_COUNT_MASK;
-	n_ssids = ch >> BRCMF_SCAN_PARAMS_NSSID_SHIFT;
-
-	params_size = sizeof(u16) * n_channels;
-	if (n_ssids > 0) {
-		params_size = roundup(params_size, sizeof(u32));
-		params_size += sizeof(struct brcmf_ssid_le) * n_ssids;
-	}
-
-	memcpy(&params_le->channel_list[0],
-	       &params_v2_le->channel_list[0], params_size);
-}
-
-static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
-			     struct brcmf_scan_params_v2_le *params_le,
-			     struct cfg80211_scan_request *request)
-{
-	u32 n_ssids;
-	u32 n_channels;
-	s32 i;
-	s32 offset;
-	u16 chanspec;
-	char *ptr;
-	int length;
-	struct brcmf_ssid_le ssid_le;
-
-	eth_broadcast_addr(params_le->bssid);
-
-	length = BRCMF_SCAN_PARAMS_V2_FIXED_SIZE;
 
-	params_le->version = cpu_to_le16(BRCMF_SCAN_PARAMS_VERSION_V2);
-	params_le->bss_type = DOT11_BSSTYPE_ANY;
-	params_le->scan_type = cpu_to_le32(BRCMF_SCANTYPE_ACTIVE);
-	params_le->channel_num = 0;
-	params_le->nprobes = cpu_to_le32(-1);
-	params_le->active_time = cpu_to_le32(-1);
-	params_le->passive_time = cpu_to_le32(-1);
-	params_le->home_time = cpu_to_le32(-1);
-	memset(&params_le->ssid_le, 0, sizeof(params_le->ssid_le));
-
-	/* Scan abort */
-	if (!request) {
-		length += sizeof(u16);
-		params_le->channel_num = cpu_to_le32(1);
-		params_le->channel_list[0] = cpu_to_le16(-1);
-		params_le->length = cpu_to_le16(length);
-		return;
-	}
-
-	n_ssids = request->n_ssids;
-	n_channels = request->n_channels;
-
-	/* Copy channel array if applicable */
-	brcmf_dbg(SCAN, "### List of channelspecs to scan ### %d\n",
-		  n_channels);
-	if (n_channels > 0) {
-		length += roundup(sizeof(u16) * n_channels, sizeof(u32));
-		for (i = 0; i < n_channels; i++) {
-			chanspec = channel_to_chanspec(&cfg->d11inf,
-						       request->channels[i]);
-			brcmf_dbg(SCAN, "Chan : %d, Channel spec: %x\n",
-				  request->channels[i]->hw_value, chanspec);
-			params_le->channel_list[i] = cpu_to_le16(chanspec);
-		}
-	} else {
-		brcmf_dbg(SCAN, "Scanning all channels\n");
-	}
-
-	/* Copy ssid array if applicable */
-	brcmf_dbg(SCAN, "### List of SSIDs to scan ### %d\n", n_ssids);
-	if (n_ssids > 0) {
-		offset = offsetof(struct brcmf_scan_params_v2_le, channel_list) +
-				n_channels * sizeof(u16);
-		offset = roundup(offset, sizeof(u32));
-		length += sizeof(ssid_le) * n_ssids;
-		ptr = (char *)params_le + offset;
-		for (i = 0; i < n_ssids; i++) {
-			memset(&ssid_le, 0, sizeof(ssid_le));
-			ssid_le.SSID_len =
-					cpu_to_le32(request->ssids[i].ssid_len);
-			memcpy(ssid_le.SSID, request->ssids[i].ssid,
-			       request->ssids[i].ssid_len);
-			if (!ssid_le.SSID_len)
-				brcmf_dbg(SCAN, "%d: Broadcast scan\n", i);
-			else
-				brcmf_dbg(SCAN, "%d: scan for  %.32s size=%d\n",
-					  i, ssid_le.SSID, ssid_le.SSID_len);
-			memcpy(ptr, &ssid_le, sizeof(ssid_le));
-			ptr += sizeof(ssid_le);
-		}
-	} else {
-		brcmf_dbg(SCAN, "Performing passive scan\n");
-		params_le->scan_type = cpu_to_le32(BRCMF_SCANTYPE_PASSIVE);
-	}
-	params_le->length = cpu_to_le16(length);
-	/* Adding mask to channel numbers */
-	params_le->channel_num =
-		cpu_to_le32((n_ssids << BRCMF_SCAN_PARAMS_NSSID_SHIFT) |
-			(n_channels & BRCMF_SCAN_PARAMS_COUNT_MASK));
-}
 
 s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
 				struct brcmf_if *ifp, bool aborted,
 				bool fw_abort)
 {
 	struct brcmf_pub *drvr = cfg->pub;
-	struct brcmf_scan_params_v2_le params_v2_le;
 	struct cfg80211_scan_request *scan_request;
 	u64 reqid;
 	u32 bucket;
@@ -1187,25 +855,16 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
 	timer_delete_sync(&cfg->escan_timeout);
 
 	if (fw_abort) {
+		u32 len;
+		void *data = drvr->scan_param_handler.get_struct_for_request(cfg, &len, NULL);
+		if (!data){
+			bphy_err(drvr, "Scan abort failed to prepare abort struct\n");
+			return 0;
+		}
 		/* Do a scan abort to stop the driver's scan engine */
 		brcmf_dbg(SCAN, "ABORT scan in firmware\n");
-
-		brcmf_escan_prep(cfg, &params_v2_le, NULL);
-
-		/* E-Scan (or anyother type) can be aborted by SCAN */
-		if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_SCAN_V2)) {
-			err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SCAN,
-						     &params_v2_le,
-						     sizeof(params_v2_le));
-		} else {
-			struct brcmf_scan_params_le params_le;
-
-			brcmf_scan_params_v2_to_v1(&params_v2_le, &params_le);
-			err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SCAN,
-						     &params_le,
-						     sizeof(params_le));
-		}
-
+		err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SCAN, data, len);
+		kfree(data);
 		if (err)
 			bphy_err(drvr, "Scan abort failed\n");
 	}
@@ -1429,19 +1088,24 @@ brcmf_run_escan(struct brcmf_cfg80211_info *cfg, struct brcmf_if *ifp,
 		struct cfg80211_scan_request *request)
 {
 	struct brcmf_pub *drvr = cfg->pub;
-	s32 params_size = BRCMF_SCAN_PARAMS_V2_FIXED_SIZE +
-			  offsetof(struct brcmf_escan_params_le, params_v2_le);
+	u32 struct_size = 0;
+	void *prepped_params = NULL;
+	u32 params_size = 0;
 	struct brcmf_escan_params_le *params;
 	s32 err = 0;
 
 	brcmf_dbg(SCAN, "E-SCAN START\n");
 
-	if (request != NULL) {
-		/* Allocate space for populating ssids in struct */
-		params_size += sizeof(u32) * ((request->n_channels + 1) / 2);
-
-		/* Allocate space for populating ssids in struct */
-		params_size += sizeof(struct brcmf_ssid_le) * request->n_ssids;
+	prepped_params = drvr->scan_param_handler.get_struct_for_request(cfg, &struct_size, request);
+	if (!prepped_params) {
+		err = -EINVAL;
+		goto exit;
+	}
+	params_size = struct_size +
+		      offsetof(struct brcmf_escan_params_le, params_v4_le);
+	if (!params_size) {
+		err = -EINVAL;
+		goto exit;
 	}
 
 	params = kzalloc(params_size, GFP_KERNEL);
@@ -1449,27 +1113,14 @@ brcmf_run_escan(struct brcmf_cfg80211_info *cfg, struct brcmf_if *ifp,
 		err = -ENOMEM;
 		goto exit;
 	}
-	BUG_ON(params_size + sizeof("escan") >= BRCMF_DCMD_MEDLEN);
-	brcmf_escan_prep(cfg, &params->params_v2_le, request);
-
-	params->version = cpu_to_le32(BRCMF_ESCAN_REQ_VERSION_V2);
-
-	if (!brcmf_feat_is_enabled(ifp, BRCMF_FEAT_SCAN_V2)) {
-		struct brcmf_escan_params_le *params_v1;
-
-		params_size -= BRCMF_SCAN_PARAMS_V2_FIXED_SIZE;
-		params_size += BRCMF_SCAN_PARAMS_FIXED_SIZE;
-		params_v1 = kzalloc(params_size, GFP_KERNEL);
-		if (!params_v1) {
-			err = -ENOMEM;
-			goto exit_params;
-		}
-		params_v1->version = cpu_to_le32(BRCMF_ESCAN_REQ_VERSION);
-		brcmf_scan_params_v2_to_v1(&params->params_v2_le, &params_v1->params_le);
-		kfree(params);
-		params = params_v1;
-	}
+	/* Copy into the largest part */
+	unsafe_memcpy(
+		&params->params_v4_le, prepped_params, struct_size,
+		/* A composite flex-array that is at least as large as the memcpy due to the allocation above */);
 
+	/* We can now free the original prepped parameters */
+	kfree(prepped_params);
+	params->version = cpu_to_le32(drvr->scan_param_handler.version);
 	params->action = cpu_to_le16(WL_ESCAN_ACTION_START);
 	params->sync_id = cpu_to_le16(0x1234);
 
@@ -1481,7 +1132,6 @@ brcmf_run_escan(struct brcmf_cfg80211_info *cfg, struct brcmf_if *ifp,
 			bphy_err(drvr, "error (%d)\n", err);
 	}
 
-exit_params:
 	kfree(params);
 exit:
 	return err;
@@ -1764,21 +1414,19 @@ static void brcmf_link_down(struct brcmf_cfg80211_vif *vif, u16 reason,
 	brcmf_dbg(TRACE, "Exit\n");
 }
 
-static s32
-brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
-		      struct cfg80211_ibss_params *params)
+static s32 brcmf_cfg80211_join_ibss(struct wiphy *wiphy,
+				    struct net_device *ndev,
+				    struct cfg80211_ibss_params *params)
 {
 	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
 	struct brcmf_if *ifp = netdev_priv(ndev);
 	struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
 	struct brcmf_pub *drvr = cfg->pub;
-	struct brcmf_join_params join_params;
-	size_t join_params_size = 0;
-	s32 err = 0;
+	void *join_params;
+	u32 join_params_size = 0;
 	s32 wsec = 0;
 	s32 bcnprd;
-	u16 chanspec;
-	u32 ssid_len;
+	s32 err = 0;
 
 	brcmf_dbg(TRACE, "Enter\n");
 	if (!check_vif_up(ifp->vif))
@@ -1852,58 +1500,39 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
 		goto done;
 	}
 
-	/* Configure required join parameter */
-	memset(&join_params, 0, sizeof(struct brcmf_join_params));
-
-	/* SSID */
-	ssid_len = min_t(u32, params->ssid_len, IEEE80211_MAX_SSID_LEN);
-	memcpy(join_params.ssid_le.SSID, params->ssid, ssid_len);
-	join_params.ssid_le.SSID_len = cpu_to_le32(ssid_len);
-	join_params_size = sizeof(join_params.ssid_le);
-
-	/* BSSID */
 	if (params->bssid) {
-		memcpy(join_params.params_le.bssid, params->bssid, ETH_ALEN);
-		join_params_size += BRCMF_ASSOC_PARAMS_FIXED_SIZE;
 		memcpy(profile->bssid, params->bssid, ETH_ALEN);
 	} else {
-		eth_broadcast_addr(join_params.params_le.bssid);
 		eth_zero_addr(profile->bssid);
 	}
 
-	/* Channel */
+	cfg->ibss_starter = false;
+	cfg->channel = 0;
 	if (params->chandef.chan) {
-		u32 target_channel;
+		u16 chanspec;
+		cfg->channel = ieee80211_frequency_to_channel(
+			params->chandef.chan->center_freq);
+		/* adding chanspec */
+		chanspec = chandef_to_chanspec(&cfg->d11inf, &params->chandef);
 
-		cfg->channel =
-			ieee80211_frequency_to_channel(
-				params->chandef.chan->center_freq);
-		if (params->channel_fixed) {
-			/* adding chanspec */
-			chanspec = chandef_to_chanspec(&cfg->d11inf,
-						       &params->chandef);
-			join_params.params_le.chanspec_list[0] =
-				cpu_to_le16(chanspec);
-			join_params.params_le.chanspec_num = cpu_to_le32(1);
-			join_params_size += sizeof(join_params.params_le);
-		}
-
-		/* set channel for starter */
-		target_channel = cfg->channel;
-		err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_CHANNEL,
-					    target_channel);
+		/* set chanspec */
+		err = brcmf_fil_iovar_int_set(ifp, "chanspec", chanspec);
 		if (err) {
-			bphy_err(drvr, "WLC_SET_CHANNEL failed (%d)\n", err);
+			bphy_err(drvr, "Setting chanspec failed (%d)\n", err);
 			goto done;
 		}
-	} else
-		cfg->channel = 0;
-
-	cfg->ibss_starter = false;
-
+	}
 
-	err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID,
-				     &join_params, join_params_size);
+	join_params = drvr->join_param_handler.get_struct_for_ibss(
+		cfg, &join_params_size, params);
+	if (!join_params) {
+		bphy_err(drvr, "Converting join params failed\n");
+		goto done;
+	}
+	err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID, join_params,
+				     join_params_size);
+	/* Free params no matter what */
+	kfree(join_params);
 	if (err) {
 		bphy_err(drvr, "WLC_SET_SSID failed (%d)\n", err);
 		goto done;
@@ -1947,15 +1576,20 @@ static s32 brcmf_set_wpa_version(struct net_device *ndev,
 	struct brcmf_cfg80211_security *sec;
 	s32 val = 0;
 	s32 err = 0;
-
-	if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_1)
+	if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_1) {
 		val = WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED;
-	else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2)
-		val = WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED;
-	else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_3)
+	} else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2) {
+		if (sme->crypto.akm_suites[0] == WLAN_AKM_SUITE_SAE)
+			val = WPA3_AUTH_SAE_PSK;
+		else if (sme->crypto.akm_suites[0] == WLAN_AKM_SUITE_OWE)
+			val = WPA3_AUTH_OWE;
+		else
+			val = WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED;
+	} else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_3) {
 		val = WPA3_AUTH_SAE_PSK;
-	else
+	} else {
 		val = WPA_AUTH_DISABLED;
+	}
 	brcmf_dbg(CONN, "setting wpa_auth to 0x%0x\n", val);
 	err = brcmf_fil_bsscfg_int_set(ifp, "wpa_auth", val);
 	if (err) {
@@ -2006,6 +1640,48 @@ static s32 brcmf_set_auth_type(struct net_device *ndev,
 	return err;
 }
 
+static s32 brcmf_set_wsec_info_algos(struct brcmf_if *ifp, u32 algos, u32 mask)
+{
+	struct brcmf_pub *drvr = ifp->drvr;
+	s32 err = 0;
+	struct brcmf_wsec_info *wsec_info;
+	struct brcmf_xtlv *wsec_info_tlv;
+	u16 tlv_data_len;
+	u8 tlv_data[8];
+	u32 param_len;
+	u8 *buf;
+
+	brcmf_dbg(TRACE, "Enter\n");
+
+	buf = kzalloc(sizeof(struct brcmf_wsec_info) + sizeof(tlv_data),
+		      GFP_KERNEL);
+	if (!buf) {
+		bphy_err(drvr, "unable to allocate.\n");
+		return -ENOMEM;
+	}
+	wsec_info = (struct brcmf_wsec_info *)buf;
+	wsec_info->version = BRCMF_WSEC_INFO_VER;
+	wsec_info_tlv =
+		(struct brcmf_xtlv *)(buf +
+				      offsetof(struct brcmf_wsec_info, tlvs));
+	wsec_info->num_tlvs++;
+	tlv_data_len = sizeof(tlv_data);
+	memcpy(tlv_data, &algos, sizeof(algos));
+	memcpy(tlv_data + sizeof(algos), &mask, sizeof(mask));
+	brcmf_xtlv_pack_header(wsec_info_tlv, WL_WSEC_INFO_BSS_ALGOS,
+			       tlv_data_len, tlv_data, 0);
+
+	param_len = offsetof(struct brcmf_wsec_info, tlvs) +
+		    offsetof(struct brcmf_wsec_info_tlv, data) + tlv_data_len;
+
+	err = brcmf_fil_bsscfg_data_set(ifp, "wsec_info", buf, param_len);
+	if (err)
+		brcmf_err("set wsec_info_error:%d\n", err);
+
+	kfree(buf);
+	return err;
+}
+
 static s32
 brcmf_set_wsec_mode(struct net_device *ndev,
 		    struct cfg80211_connect_params *sme)
@@ -2018,6 +1694,8 @@ brcmf_set_wsec_mode(struct net_device *ndev,
 	s32 gval = 0;
 	s32 wsec;
 	s32 err = 0;
+	u32 algos = 0;
+	u32 mask = 0;
 
 	if (sme->crypto.n_ciphers_pairwise) {
 		switch (sme->crypto.ciphers_pairwise[0]) {
@@ -2034,6 +1712,15 @@ brcmf_set_wsec_mode(struct net_device *ndev,
 		case WLAN_CIPHER_SUITE_AES_CMAC:
 			pval = AES_ENABLED;
 			break;
+		case WLAN_CIPHER_SUITE_GCMP_256:
+			if (!brcmf_feat_is_enabled(ifp, BRCMF_FEAT_GCMP)) {
+				brcmf_err("This chip does not support GCMP\n");
+				return -EOPNOTSUPP;
+			}
+			pval = AES_ENABLED;
+			algos = KEY_ALGO_MASK(CRYPTO_ALGO_AES_GCM256);
+			mask = algos | KEY_ALGO_MASK(CRYPTO_ALGO_AES_CCM);
+			break;
 		default:
 			bphy_err(drvr, "invalid cipher pairwise (%d)\n",
 				 sme->crypto.ciphers_pairwise[0]);
@@ -2055,6 +1742,15 @@ brcmf_set_wsec_mode(struct net_device *ndev,
 		case WLAN_CIPHER_SUITE_AES_CMAC:
 			gval = AES_ENABLED;
 			break;
+		case WLAN_CIPHER_SUITE_GCMP_256:
+			if (!brcmf_feat_is_enabled(ifp, BRCMF_FEAT_GCMP)) {
+				brcmf_err("This chip does not support GCMP\n");
+				return -EOPNOTSUPP;
+			}
+			gval = AES_ENABLED;
+			algos = KEY_ALGO_MASK(CRYPTO_ALGO_AES_GCM256);
+			mask = algos | KEY_ALGO_MASK(CRYPTO_ALGO_AES_CCM);
+			break;
 		default:
 			bphy_err(drvr, "invalid cipher group (%d)\n",
 				 sme->crypto.cipher_group);
@@ -2063,6 +1759,7 @@ brcmf_set_wsec_mode(struct net_device *ndev,
 	}
 
 	brcmf_dbg(CONN, "pval (%d) gval (%d)\n", pval, gval);
+	brcmf_dbg(CONN, "algos (0x%x) mask (0x%x)\n", algos, mask);
 	/* In case of privacy, but no security and WPS then simulate */
 	/* setting AES. WPS-2.0 allows no security                   */
 	if (brcmf_find_wpsie(sme->ie, sme->ie_len) && !pval && !gval &&
@@ -2075,6 +1772,15 @@ brcmf_set_wsec_mode(struct net_device *ndev,
 		bphy_err(drvr, "error (%d)\n", err);
 		return err;
 	}
+	if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_GCMP)) {
+		brcmf_dbg(CONN, "set_wsec_info algos (0x%x) mask (0x%x)\n",
+			  algos, mask);
+		err = brcmf_set_wsec_info_algos(ifp, algos, mask);
+		if (err) {
+			brcmf_err("set wsec_info error (%d)\n", err);
+			return err;
+		}
+	}
 
 	sec = &profile->sec;
 	sec->cipher_pairwise = sme->crypto.ciphers_pairwise[0];
@@ -2098,9 +1804,13 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
 	u16 rsn_cap;
 	u32 mfp;
 	u16 count;
+	s32 okc_enable;
+	u16 pmkid_count;
+	const u8 *group_mgmt_cs = NULL;
 
 	profile->use_fwsup = BRCMF_PROFILE_FWSUP_NONE;
 	profile->is_ft = false;
+	profile->is_okc = false;
 
 	if (!sme->crypto.n_akm_suites)
 		return 0;
@@ -2117,13 +1827,15 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
 			val = WPA_AUTH_UNSPECIFIED;
 			if (sme->want_1x)
 				profile->use_fwsup = BRCMF_PROFILE_FWSUP_1X;
+			else
+				profile->use_fwsup = BRCMF_PROFILE_FWSUP_ROAM;
 			break;
 		case WLAN_AKM_SUITE_PSK:
 			val = WPA_AUTH_PSK;
 			break;
 		default:
-			bphy_err(drvr, "invalid akm suite (%d)\n",
-				 sme->crypto.akm_suites[0]);
+			bphy_err(drvr, "invalid cipher group (%d)\n",
+				 sme->crypto.cipher_group);
 			return -EINVAL;
 		}
 	} else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED)) {
@@ -2132,11 +1844,15 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
 			val = WPA2_AUTH_UNSPECIFIED;
 			if (sme->want_1x)
 				profile->use_fwsup = BRCMF_PROFILE_FWSUP_1X;
+			else
+				profile->use_fwsup = BRCMF_PROFILE_FWSUP_ROAM;
 			break;
 		case WLAN_AKM_SUITE_8021X_SHA256:
 			val = WPA2_AUTH_1X_SHA256;
 			if (sme->want_1x)
 				profile->use_fwsup = BRCMF_PROFILE_FWSUP_1X;
+			else
+				profile->use_fwsup = BRCMF_PROFILE_FWSUP_ROAM;
 			break;
 		case WLAN_AKM_SUITE_PSK_SHA256:
 			val = WPA2_AUTH_PSK_SHA256;
@@ -2149,14 +1865,35 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
 			profile->is_ft = true;
 			if (sme->want_1x)
 				profile->use_fwsup = BRCMF_PROFILE_FWSUP_1X;
+			else
+				profile->use_fwsup = BRCMF_PROFILE_FWSUP_ROAM;
 			break;
 		case WLAN_AKM_SUITE_FT_PSK:
 			val = WPA2_AUTH_PSK | WPA2_AUTH_FT;
 			profile->is_ft = true;
+				if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_FWSUP))
+				profile->use_fwsup = BRCMF_PROFILE_FWSUP_PSK;
+			else
+				profile->use_fwsup = BRCMF_PROFILE_FWSUP_ROAM;
+			break;
+		case WLAN_AKM_SUITE_DPP:
+			val = WFA_AUTH_DPP;
+			profile->use_fwsup = BRCMF_PROFILE_FWSUP_NONE;
+			break;
+		case WLAN_AKM_SUITE_OWE:
+			val = WPA3_AUTH_OWE;
+			profile->use_fwsup = BRCMF_PROFILE_FWSUP_ROAM;
+			break;
+		case WLAN_AKM_SUITE_8021X_SUITE_B_192:
+			val = WPA3_AUTH_1X_SUITE_B_SHA384;
+			if (sme->want_1x)
+				profile->use_fwsup = BRCMF_PROFILE_FWSUP_1X;
+			else
+				profile->use_fwsup = BRCMF_PROFILE_FWSUP_ROAM;
 			break;
 		default:
-			bphy_err(drvr, "invalid akm suite (%d)\n",
-				 sme->crypto.akm_suites[0]);
+			bphy_err(drvr, "invalid cipher group (%d)\n",
+				 sme->crypto.cipher_group);
 			return -EINVAL;
 		}
 	} else if (val & WPA3_AUTH_SAE_PSK) {
@@ -2177,15 +1914,34 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
 			}
 			break;
 		default:
-			bphy_err(drvr, "invalid akm suite (%d)\n",
-				 sme->crypto.akm_suites[0]);
+			bphy_err(drvr, "invalid cipher group (%d)\n",
+				 sme->crypto.cipher_group);
 			return -EINVAL;
 		}
 	}
-
-	if (profile->use_fwsup == BRCMF_PROFILE_FWSUP_1X)
+	if ((profile->use_fwsup == BRCMF_PROFILE_FWSUP_1X) ||
+	    (profile->use_fwsup == BRCMF_PROFILE_FWSUP_ROAM)) {
 		brcmf_dbg(INFO, "using 1X offload\n");
-
+		err = brcmf_fil_bsscfg_int_get(netdev_priv(ndev), "okc_enable",
+					       &okc_enable);
+		if (err) {
+			bphy_err(drvr, "get okc_enable failed (%d)\n", err);
+		} else {
+			brcmf_dbg(INFO, "get okc_enable (%d)\n", okc_enable);
+			profile->is_okc = okc_enable;
+		}
+	} else if (profile->use_fwsup != BRCMF_PROFILE_FWSUP_SAE &&
+		   (val == WPA3_AUTH_SAE_PSK)) {
+		brcmf_dbg(INFO, "not using SAE offload\n");
+		err = brcmf_fil_bsscfg_int_get(netdev_priv(ndev), "okc_enable",
+					       &okc_enable);
+		if (err) {
+			bphy_err(drvr, "get okc_enable failed (%d)\n", err);
+		} else {
+			brcmf_dbg(INFO, "get okc_enable (%d)\n", okc_enable);
+			profile->is_okc = okc_enable;
+		}
+	}
 	if (!brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MFP))
 		goto skip_mfp_config;
 	/* The MFP mode (1 or 2) needs to be determined, parse IEs. The
@@ -2218,14 +1974,47 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
 		mfp = BRCMF_MFP_REQUIRED;
 	else if (rsn_cap & RSN_CAP_MFPC_MASK)
 		mfp = BRCMF_MFP_CAPABLE;
+	/* In case of dpp, very low tput is observed if MFPC is set in
+	 * firmmare. Firmware needs to ensure that MFPC is not set when
+	 * MFPR was requested from fmac. However since this change being
+	 * specific to DPP, fmac needs to set wpa_auth prior to mfp, so
+	 * that firmware can use this info to prevent MFPC being set in
+	 * case of dpp.
+	 */
+	if (val == WFA_AUTH_DPP) {
+		brcmf_dbg(CONN, "setting wpa_auth to 0x%0x\n", val);
+		err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "wpa_auth",
+					       val);
+		if (err) {
+			bphy_err(drvr, "could not set wpa_auth (%d)\n", err);
+			return err;
+		}
+	}
+
 	brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "mfp", mfp);
+	offset += RSN_CAP_LEN;
+	if (mfp && (ie_len - offset >= RSN_PMKID_COUNT_LEN)) {
+		pmkid_count = ie[offset] + (ie[offset + 1] << 8);
+		offset += RSN_PMKID_COUNT_LEN + (pmkid_count * WLAN_PMKID_LEN);
+		if (ie_len - offset >= WPA_IE_MIN_OUI_LEN) {
+			group_mgmt_cs = &ie[offset];
+			if (memcmp(group_mgmt_cs, RSN_OUI, TLV_OUI_LEN) == 0) {
+				brcmf_fil_bsscfg_data_set(ifp, "bip",
+							  (void *)group_mgmt_cs,
+							  WPA_IE_MIN_OUI_LEN);
+			}
+		}
+	}
 
 skip_mfp_config:
-	brcmf_dbg(CONN, "setting wpa_auth to %d\n", val);
-	err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "wpa_auth", val);
-	if (err) {
-		bphy_err(drvr, "could not set wpa_auth (%d)\n", err);
-		return err;
+	if (val != WFA_AUTH_DPP) {
+		brcmf_dbg(CONN, "setting wpa_auth to 0x%0x\n", val);
+		err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "wpa_auth",
+					       val);
+		if (err) {
+			bphy_err(drvr, "could not set wpa_auth (%d)\n", err);
+			return err;
+		}
 	}
 
 	return err;
@@ -2358,52 +2147,51 @@ static void brcmf_set_join_pref(struct brcmf_if *ifp,
 
 static s32
 brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
-		       struct cfg80211_connect_params *sme)
+		       struct cfg80211_connect_params *params)
 {
 	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
 	struct brcmf_if *ifp = netdev_priv(ndev);
 	struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
-	struct ieee80211_channel *chan = sme->channel;
+	struct ieee80211_channel *chan = params->channel;
 	struct brcmf_pub *drvr = ifp->drvr;
-	struct brcmf_join_params join_params;
-	size_t join_params_size;
+	void *join_params;
+	u32 join_params_size;
+	void *fallback_join_params;
+	u32 fallback_join_params_size;
 	const struct brcmf_tlv *rsn_ie;
 	const struct brcmf_vs_tlv *wpa_ie;
 	const void *ie;
 	u32 ie_len;
-	struct brcmf_ext_join_params_le *ext_join_params;
-	u16 chanspec;
 	s32 err = 0;
-	u32 ssid_len;
 
 	brcmf_dbg(TRACE, "Enter\n");
 	if (!check_vif_up(ifp->vif))
 		return -EIO;
 
-	if (!sme->ssid) {
+	if (!params->ssid) {
 		bphy_err(drvr, "Invalid ssid\n");
 		return -EOPNOTSUPP;
 	}
 
-	if (sme->channel_hint)
-		chan = sme->channel_hint;
+	if (params->channel_hint)
+		chan = params->channel_hint;
 
-	if (sme->bssid_hint)
-		sme->bssid = sme->bssid_hint;
+	if (params->bssid_hint)
+		params->bssid = params->bssid_hint;
 
 	if (ifp->vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif) {
 		/* A normal (non P2P) connection request setup. */
 		ie = NULL;
 		ie_len = 0;
 		/* find the WPA_IE */
-		wpa_ie = brcmf_find_wpaie((u8 *)sme->ie, sme->ie_len);
+		wpa_ie = brcmf_find_wpaie((u8 *)params->ie, params->ie_len);
 		if (wpa_ie) {
 			ie = wpa_ie;
 			ie_len = wpa_ie->len + TLV_HDR_LEN;
 		} else {
 			/* find the RSN_IE */
-			rsn_ie = brcmf_parse_tlvs((const u8 *)sme->ie,
-						  sme->ie_len,
+			rsn_ie = brcmf_parse_tlvs((const u8 *)params->ie,
+						  params->ie_len,
 						  WLAN_EID_RSN);
 			if (rsn_ie) {
 				ie = rsn_ie;
@@ -2414,7 +2202,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
 	}
 
 	err = brcmf_vif_set_mgmt_ie(ifp->vif, BRCMF_VNDR_IE_ASSOCREQ_FLAG,
-				    sme->ie, sme->ie_len);
+				    params->ie, params->ie_len);
 	if (err)
 		bphy_err(drvr, "Set Assoc REQ IE Failed\n");
 	else
@@ -2425,166 +2213,129 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
 	if (chan) {
 		cfg->channel =
 			ieee80211_frequency_to_channel(chan->center_freq);
-		chanspec = channel_to_chanspec(&cfg->d11inf, chan);
-		brcmf_dbg(CONN, "channel=%d, center_req=%d, chanspec=0x%04x\n",
-			  cfg->channel, chan->center_freq, chanspec);
+		brcmf_dbg(CONN, "channel=%d, center_req=%d\n",
+			  cfg->channel, chan->center_freq);
 	} else {
 		cfg->channel = 0;
-		chanspec = 0;
 	}
 
-	brcmf_dbg(INFO, "ie (%p), ie_len (%zd)\n", sme->ie, sme->ie_len);
+	brcmf_dbg(INFO, "ie (%p), ie_len (%zd)\n", params->ie, params->ie_len);
 
-	err = brcmf_set_wpa_version(ndev, sme);
+	err = brcmf_set_wpa_version(ndev, params);
 	if (err) {
 		bphy_err(drvr, "wl_set_wpa_version failed (%d)\n", err);
 		goto done;
 	}
 
-	sme->auth_type = brcmf_war_auth_type(ifp, sme->auth_type);
-	err = brcmf_set_auth_type(ndev, sme);
+	params->auth_type = brcmf_war_auth_type(ifp, params->auth_type);
+	err = brcmf_set_auth_type(ndev, params);
 	if (err) {
 		bphy_err(drvr, "wl_set_auth_type failed (%d)\n", err);
 		goto done;
 	}
 
-	err = brcmf_set_wsec_mode(ndev, sme);
+	err = brcmf_set_wsec_mode(ndev, params);
 	if (err) {
 		bphy_err(drvr, "wl_set_set_cipher failed (%d)\n", err);
 		goto done;
 	}
 
-	err = brcmf_set_key_mgmt(ndev, sme);
+	err = brcmf_set_key_mgmt(ndev, params);
 	if (err) {
 		bphy_err(drvr, "wl_set_key_mgmt failed (%d)\n", err);
 		goto done;
 	}
 
-	err = brcmf_set_sharedkey(ndev, sme);
-	if (err) {
-		bphy_err(drvr, "brcmf_set_sharedkey failed (%d)\n", err);
-		goto done;
-	}
-
-	if (sme->crypto.psk &&
-	    profile->use_fwsup != BRCMF_PROFILE_FWSUP_SAE) {
-		if (WARN_ON(profile->use_fwsup != BRCMF_PROFILE_FWSUP_NONE)) {
-			err = -EINVAL;
-			goto done;
-		}
-		brcmf_dbg(INFO, "using PSK offload\n");
-		profile->use_fwsup = BRCMF_PROFILE_FWSUP_PSK;
-	}
-
-	if (profile->use_fwsup != BRCMF_PROFILE_FWSUP_NONE) {
-		/* enable firmware supplicant for this interface */
-		err = brcmf_fil_iovar_int_set(ifp, "sup_wpa", 1);
-		if (err < 0) {
-			bphy_err(drvr, "failed to enable fw supplicant\n");
-			goto done;
-		}
-	}
-
-	if (profile->use_fwsup == BRCMF_PROFILE_FWSUP_PSK)
-		err = brcmf_set_pmk(ifp, sme->crypto.psk,
-				    BRCMF_WSEC_MAX_PSK_LEN);
-	else if (profile->use_fwsup == BRCMF_PROFILE_FWSUP_SAE) {
-		/* clean up user-space RSNE */
-		err = brcmf_fil_iovar_data_set(ifp, "wpaie", NULL, 0);
-		if (err) {
-			bphy_err(drvr, "failed to clean up user-space RSNE\n");
-			goto done;
-		}
-		err = brcmf_fwvid_set_sae_password(ifp, &sme->crypto);
-		if (!err && sme->crypto.psk)
-			err = brcmf_set_pmk(ifp, sme->crypto.psk,
-					    BRCMF_WSEC_MAX_PSK_LEN);
-	}
-	if (err)
-		goto done;
-
-	/* Join with specific BSSID and cached SSID
-	 * If SSID is zero join based on BSSID only
-	 */
-	join_params_size = offsetof(struct brcmf_ext_join_params_le, assoc_le) +
-		offsetof(struct brcmf_assoc_params_le, chanspec_list);
-	if (cfg->channel)
-		join_params_size += sizeof(u16);
-	ext_join_params = kzalloc(sizeof(*ext_join_params), GFP_KERNEL);
-	if (ext_join_params == NULL) {
-		err = -ENOMEM;
+	err = brcmf_set_sharedkey(ndev, params);
+	if (err) {
+		bphy_err(drvr, "brcmf_set_sharedkey failed (%d)\n", err);
 		goto done;
 	}
-	ssid_len = min_t(u32, sme->ssid_len, IEEE80211_MAX_SSID_LEN);
-	ext_join_params->ssid_le.SSID_len = cpu_to_le32(ssid_len);
-	memcpy(&ext_join_params->ssid_le.SSID, sme->ssid, ssid_len);
-	if (ssid_len < IEEE80211_MAX_SSID_LEN)
-		brcmf_dbg(CONN, "SSID \"%s\", len (%d)\n",
-			  ext_join_params->ssid_le.SSID, ssid_len);
+	if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_FWSUP)) {
+		if (params->crypto.psk) {
+			if ((profile->use_fwsup != BRCMF_PROFILE_FWSUP_SAE) &&
+			    (profile->use_fwsup != BRCMF_PROFILE_FWSUP_PSK)) {
+				if (WARN_ON(profile->use_fwsup !=
+					    BRCMF_PROFILE_FWSUP_NONE)) {
+					err = -EINVAL;
+					goto done;
+				}
+				brcmf_dbg(INFO, "using PSK offload\n");
+				profile->use_fwsup = BRCMF_PROFILE_FWSUP_PSK;
+			}
+		}
 
-	/* Set up join scan parameters */
-	ext_join_params->scan_le.scan_type = -1;
-	ext_join_params->scan_le.home_time = cpu_to_le32(-1);
+		if (profile->use_fwsup != BRCMF_PROFILE_FWSUP_NONE) {
+			/* enable firmware supplicant for this interface */
+			err = brcmf_fil_iovar_int_set(ifp, "sup_wpa", 1);
+			if (err < 0) {
+				bphy_err(drvr,
+					 "failed to enable fw supplicant\n");
+				goto done;
+			}
+		} else {
+			err = brcmf_fil_iovar_int_set(ifp, "sup_wpa", 0);
+		}
 
-	if (sme->bssid)
-		memcpy(&ext_join_params->assoc_le.bssid, sme->bssid, ETH_ALEN);
-	else
-		eth_broadcast_addr(ext_join_params->assoc_le.bssid);
+		if ((profile->use_fwsup == BRCMF_PROFILE_FWSUP_PSK) &&
+		    params->crypto.psk)
+			err = brcmf_set_pmk(ifp, params->crypto.psk,
+					    BRCMF_WSEC_MAX_PSK_LEN);
+		else if (profile->use_fwsup == BRCMF_PROFILE_FWSUP_SAE) {
+			/* clean up user-space RSNE */
+			if (brcmf_fil_iovar_data_set(ifp, "wpaie", NULL, 0)) {
+				bphy_err(
+					drvr,
+					"failed to clean up user-space RSNE\n");
+				goto done;
+			}
+			err = brcmf_fwvid_set_sae_password(ifp, &params->crypto);
+			if (!err && params->crypto.psk)
+				err = brcmf_set_pmk(ifp, params->crypto.psk,
+						    BRCMF_WSEC_MAX_PSK_LEN);
+		}
+		if (err)
+			goto done;
+	}
+	brcmf_set_join_pref(ifp, &params->bss_select);
+	if (params->ssid_len < IEEE80211_MAX_SSID_LEN)
+		brcmf_dbg(CONN, "SSID \"%s\", len (%zu)\n", params->ssid,
+			  params->ssid_len);
+	join_params = drvr->join_param_handler.get_struct_for_connect(
+		cfg, &join_params_size, params);
 
-	if (cfg->channel) {
-		ext_join_params->assoc_le.chanspec_num = cpu_to_le32(1);
+	if (join_params) {
+		err = brcmf_fil_bsscfg_data_set(ifp, "join", join_params,
+						join_params_size);
 
-		ext_join_params->assoc_le.chanspec_list[0] =
-			cpu_to_le16(chanspec);
-		/* Increase dwell time to receive probe response or detect
-		 * beacon from target AP at a noisy air only during connect
-		 * command.
-		 */
-		ext_join_params->scan_le.active_time =
-			cpu_to_le32(BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS);
-		ext_join_params->scan_le.passive_time =
-			cpu_to_le32(BRCMF_SCAN_JOIN_PASSIVE_DWELL_TIME_MS);
-		/* To sync with presence period of VSDB GO send probe request
-		 * more frequently. Probe request will be stopped when it gets
-		 * probe response from target AP/GO.
-		 */
-		ext_join_params->scan_le.nprobes =
-			cpu_to_le32(BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS /
-				    BRCMF_SCAN_JOIN_PROBE_INTERVAL_MS);
-	} else {
-		ext_join_params->scan_le.active_time = cpu_to_le32(-1);
-		ext_join_params->scan_le.passive_time = cpu_to_le32(-1);
-		ext_join_params->scan_le.nprobes = cpu_to_le32(-1);
+		/* We only free the join parameters if we were successful.
+		 * Otherwise they are used to extract the fallback, below */
+		if (!err) {
+			kfree(join_params);
+			/* This is it. join command worked, we are done */
+			goto done;
+		}
+		/* For versions >= 1, this should have worked, so report the error */
+		if (drvr->join_param_handler.version >= 1) {
+			bphy_err(drvr, "Failed to use join iovar to join: %d\n",
+				 err);
+		}
 	}
 
-	brcmf_set_join_pref(ifp, &sme->bss_select);
-
-	err  = brcmf_fil_bsscfg_data_set(ifp, "join", ext_join_params,
-					 join_params_size);
-	kfree(ext_join_params);
-	if (!err)
-		/* This is it. join command worked, we are done */
+	/* Fallback to using WLC_SET_SSID approach, which just uses join_params parts of the structure */
+	fallback_join_params = drvr->join_param_handler.get_join_from_ext_join(
+		join_params, &fallback_join_params_size);
+	if (!fallback_join_params) {
+		bphy_err(drvr, "Unable to generate fallback join params\n");
+		kfree(join_params);
 		goto done;
-
-	/* join command failed, fallback to set ssid */
-	memset(&join_params, 0, sizeof(join_params));
-	join_params_size = sizeof(join_params.ssid_le);
-
-	memcpy(&join_params.ssid_le.SSID, sme->ssid, ssid_len);
-	join_params.ssid_le.SSID_len = cpu_to_le32(ssid_len);
-
-	if (sme->bssid)
-		memcpy(join_params.params_le.bssid, sme->bssid, ETH_ALEN);
-	else
-		eth_broadcast_addr(join_params.params_le.bssid);
-
-	if (cfg->channel) {
-		join_params.params_le.chanspec_list[0] = cpu_to_le16(chanspec);
-		join_params.params_le.chanspec_num = cpu_to_le32(1);
-		join_params_size += sizeof(join_params.params_le);
 	}
 	err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID,
-				     &join_params, join_params_size);
+				     fallback_join_params,
+				     fallback_join_params_size);
+
+	kfree(join_params);
+	kfree(fallback_join_params);
 	if (err)
 		bphy_err(drvr, "BRCMF_C_SET_SSID failed (%d)\n", err);
 
@@ -2789,6 +2540,8 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
 	s32 val;
 	s32 wsec;
 	s32 err;
+	u32 algos = 0;
+	u32 mask = 0;
 	u8 keybuf[8];
 	bool ext_key;
 
@@ -2872,6 +2625,30 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
 		val = AES_ENABLED;
 		brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_CCMP\n");
 		break;
+	case WLAN_CIPHER_SUITE_GCMP_256:
+		if (!brcmf_feat_is_enabled(ifp, BRCMF_FEAT_GCMP)) {
+			brcmf_err("the low layer not support GCMP\n");
+			err = -EOPNOTSUPP;
+			goto done;
+		}
+		key->algo = CRYPTO_ALGO_AES_GCM256;
+		val = AES_ENABLED;
+		brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_GCMP_256\n");
+		algos = KEY_ALGO_MASK(CRYPTO_ALGO_AES_GCM256);
+		mask = algos | KEY_ALGO_MASK(CRYPTO_ALGO_AES_CCM);
+		break;
+	case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+		if (!brcmf_feat_is_enabled(ifp, BRCMF_FEAT_GCMP)) {
+			brcmf_err("the low layer not support GCMP\n");
+			err = -EOPNOTSUPP;
+			goto done;
+		}
+		key->algo = CRYPTO_ALGO_BIP_GMAC256;
+		val = AES_ENABLED;
+		algos = KEY_ALGO_MASK(CRYPTO_ALGO_BIP_GMAC256);
+		mask = algos | KEY_ALGO_MASK(CRYPTO_ALGO_AES_CCM);
+		brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_BIP_GMAC_256\n");
+		break;
 	default:
 		bphy_err(drvr, "Invalid cipher (0x%x)\n", params->cipher);
 		err = -EINVAL;
@@ -2893,6 +2670,17 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
 		bphy_err(drvr, "set wsec error (%d)\n", err);
 		goto done;
 	}
+	if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_GCMP)) {
+		brcmf_dbg(CONN,
+			  "set_wsdec_info algos (0x%x) mask (0x%x)\n",
+			  algos, mask);
+		err = brcmf_set_wsec_info_algos(ifp, algos, mask);
+		if (err) {
+			brcmf_err("set wsec_info error (%d)\n", err);
+			return err;
+		}
+	}
+
 
 done:
 	brcmf_dbg(TRACE, "Exit\n");
@@ -3113,6 +2901,70 @@ brcmf_cfg80211_get_station_ibss(struct brcmf_if *ifp,
 	return 0;
 }
 
+static void brcmf_convert_ratespec_to_rateinfo(u32 ratespec,
+					       struct rate_info *rateinfo)
+{
+	/* First extract the bandwidth info */
+	switch (ratespec & BRCMF_RSPEC_BW_MASK) {
+	case BRCMF_RSPEC_BW_20MHZ:
+		rateinfo->bw = RATE_INFO_BW_20;
+		break;
+	case BRCMF_RSPEC_BW_40MHZ:
+		rateinfo->bw = RATE_INFO_BW_40;
+		break;
+	case BRCMF_RSPEC_BW_80MHZ:
+		rateinfo->bw = RATE_INFO_BW_80;
+		break;
+	case BRCMF_RSPEC_BW_160MHZ:
+		rateinfo->bw = RATE_INFO_BW_160;
+		break;
+	case BRCMF_RSPEC_BW_320MHZ:
+		rateinfo->bw = RATE_INFO_BW_320;
+		break;
+	default:
+		/* Fill in nothing */
+		break;
+	}
+	if (BRCMF_RSPEC_ISHT(ratespec)) {
+		rateinfo->flags |= RATE_INFO_FLAGS_MCS;
+		rateinfo->mcs = ratespec & BRCMF_RSPEC_HT_MCS_MASK;
+	} else if (BRCMF_RSPEC_ISVHT(ratespec)) {
+		rateinfo->flags |= RATE_INFO_FLAGS_VHT_MCS;
+		rateinfo->mcs = ratespec & BRCMF_RSPEC_VHT_MCS_MASK;
+		rateinfo->nss = (ratespec & BRCMF_RSPEC_VHT_NSS_MASK) >>
+				BRCMF_RSPEC_VHT_NSS_SHIFT;
+	} else if (BRCMF_RSPEC_ISHE(ratespec)) {
+		u32 ltf_gi = BRCMF_RSPEC_HE_LTF_GI(ratespec);
+
+		rateinfo->flags |= RATE_INFO_FLAGS_HE_MCS;
+		rateinfo->mcs = ratespec & BRCMF_RSPEC_HE_MCS_MASK;
+		rateinfo->nss = (ratespec & BRCMF_RSPEC_HE_NSS_MASK) >>
+				BRCMF_RSPEC_HE_NSS_SHIFT;
+		rateinfo->he_dcm = BRCMF_RSPEC_HE_DCM(ratespec);
+		if (HE_IS_GI_0_8us(ltf_gi)) {
+			rateinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+		} else if (HE_IS_GI_1_6us(ltf_gi)) {
+			rateinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+		} else if (HE_IS_GI_3_2us(ltf_gi)) {
+			rateinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+		}
+	} else if (BRCMF_RSPEC_ISEHT(ratespec)) {
+		u32 ltf_gi = BRCMF_RSPEC_EHT_LTF_GI(ratespec);
+
+		rateinfo->flags |= RATE_INFO_FLAGS_EHT_MCS;
+		rateinfo->mcs = ratespec & BRCMF_RSPEC_EHT_MCS_MASK;
+		rateinfo->nss = (ratespec & BRCMF_RSPEC_EHT_NSS_MASK) >>
+				BRCMF_RSPEC_EHT_NSS_SHIFT;
+		if (EHT_IS_GI_0_8us(ltf_gi)) {
+			rateinfo->eht_gi = NL80211_RATE_INFO_EHT_GI_0_8;
+		} else if (EHT_IS_GI_1_6us(ltf_gi)) {
+			rateinfo->eht_gi = NL80211_RATE_INFO_EHT_GI_1_6;
+		} else if (EHT_IS_GI_3_2us(ltf_gi)) {
+			rateinfo->eht_gi = NL80211_RATE_INFO_EHT_GI_3_2;
+		}
+	}
+}
+
 static s32
 brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
 			   const u8 *mac, struct station_info *sinfo)
@@ -3130,6 +2982,8 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
 	s32 count_rssi = 0;
 	int rssi;
 	u32 i;
+	u16 struct_ver;
+	u16 info_len;
 
 	brcmf_dbg(TRACE, "Enter, MAC %pM\n", mac);
 	if (!check_vif_up(ifp->vif))
@@ -3153,7 +3007,9 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
 			goto done;
 		}
 	}
-	brcmf_dbg(TRACE, "version %d\n", le16_to_cpu(sta_info_le.ver));
+	info_len = le16_to_cpu(sta_info_le.len);
+	struct_ver = le16_to_cpu(sta_info_le.ver);
+	brcmf_dbg(TRACE, "version %d\n", struct_ver);
 	sinfo->filled = BIT_ULL(NL80211_STA_INFO_INACTIVE_TIME);
 	sinfo->inactive_time = le32_to_cpu(sta_info_le.idle) * 1000;
 	sta_flags = le32_to_cpu(sta_info_le.flags);
@@ -3187,12 +3043,13 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
 			sinfo->rxrate.legacy =
 				le32_to_cpu(sta_info_le.rx_rate) / 100;
 		}
-		if (le16_to_cpu(sta_info_le.ver) >= 4) {
+		if (struct_ver >= 4) {
 			sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES);
 			sinfo->tx_bytes = le64_to_cpu(sta_info_le.tx_tot_bytes);
 			sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES);
 			sinfo->rx_bytes = le64_to_cpu(sta_info_le.rx_tot_bytes);
 		}
+
 		for (i = 0; i < BRCMF_ANT_MAX; i++) {
 			if (sta_info_le.rssi[i] == 0 ||
 			    sta_info_le.rx_lastpkt_rssi[i] == 0)
@@ -3231,6 +3088,25 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
 			}
 		}
 	}
+	/* Some version 7 structs have ratespecs from the last packet. */
+	if (struct_ver >= 7) {
+		if (info_len >= sizeof(sta_info_le)) {
+			brcmf_convert_ratespec_to_rateinfo(
+				le32_to_cpu(sta_info_le.v7.tx_rspec),
+				&sinfo->txrate);
+			brcmf_convert_ratespec_to_rateinfo(
+				le32_to_cpu(sta_info_le.v7.rx_rspec),
+				&sinfo->rxrate);
+		} else {
+			/* We didn't get the fields we were expecting, fallback to nrate */
+			u32 nrate = 0;
+			err = brcmf_fil_iovar_int_get(ifp, "nrate", &nrate);
+			if (!err) {
+				brcmf_convert_ratespec_to_rateinfo(
+					nrate, &sinfo->txrate);
+			}
+		}
+	}
 done:
 	brcmf_dbg(TRACE, "Exit\n");
 	return err;
@@ -3331,6 +3207,7 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg,
 	struct cfg80211_bss *bss;
 	enum nl80211_band band;
 	struct brcmu_chan ch;
+	u16 chanspec;
 	u16 channel;
 	u32 freq;
 	u16 notify_capability;
@@ -3344,20 +3221,41 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg,
 		return -EINVAL;
 	}
 
+	chanspec = le16_to_cpu(bi->chanspec);
 	if (!bi->ctl_ch) {
-		ch.chspec = le16_to_cpu(bi->chanspec);
+		ch.chspec = chanspec;
 		cfg->d11inf.decchspec(&ch);
 		bi->ctl_ch = ch.control_ch_num;
 	}
 	channel = bi->ctl_ch;
 
-	if (channel <= CH_MAX_2G_CHANNEL)
-		band = NL80211_BAND_2GHZ;
-	else
+	if (CHSPEC_IS6G(chanspec))
+		band = NL80211_BAND_6GHZ;
+	else if (CHSPEC_IS5G(chanspec))
 		band = NL80211_BAND_5GHZ;
+	else
+		band = NL80211_BAND_2GHZ;
 
 	freq = ieee80211_channel_to_frequency(channel, band);
+	if (!freq) {
+		brcmf_err("Invalid frequency %d returned for channel %d, band %d. chanspec was %04x\n",
+			  freq, channel, band, bi->chanspec);
+
+		/* We ignore this BSS ID rather than try to continue on.
+		 * Otherwise we will cause an OOPs because our frequency is 0.
+		 * The main case this occurs is some new frequency band
+		 * we have not seen before, and if we return an error,
+		 * we will cause the scan to fail.  It seems better to
+		 * report the error, skip this BSS, and move on.
+		 */
+		return 0;
+	}
 	bss_data.chan = ieee80211_get_channel(wiphy, freq);
+	if (!bss_data.chan) {
+		brcmf_err("Could not convert frequency into channel for channel %d, band %d, chanspec was %04x\n",
+			  channel, band, bi->chanspec);
+		return 0;
+	}
 	bss_data.boottime_ns = ktime_to_ns(ktime_get_boottime());
 
 	notify_capability = le16_to_cpu(bi->capability);
@@ -3406,8 +3304,9 @@ static s32 brcmf_inform_bss(struct brcmf_cfg80211_info *cfg)
 
 	bss_list = (struct brcmf_scan_results *)cfg->escan_info.escan_buf;
 	if (bss_list->count != 0 &&
-	    bss_list->version != BRCMF_BSS_INFO_VERSION) {
-		bphy_err(drvr, "Version %d != WL_BSS_INFO_VERSION\n",
+	    (bss_list->version < BRCMF_BSS_INFO_MIN_VERSION ||
+	    bss_list->version > BRCMF_BSS_INFO_MAX_VERSION)) {
+		bphy_err(drvr, "BSS info version %d unsupported\n",
 			 bss_list->version);
 		return -EOPNOTSUPP;
 	}
@@ -3445,7 +3344,7 @@ static s32 brcmf_inform_ibss(struct brcmf_cfg80211_info *cfg,
 	buf = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL);
 	if (buf == NULL) {
 		err = -ENOMEM;
-		goto CleanUp;
+		goto cleanup;
 	}
 
 	*(__le32 *)buf = cpu_to_le32(WL_BSS_INFO_MAX);
@@ -3454,7 +3353,7 @@ static s32 brcmf_inform_ibss(struct brcmf_cfg80211_info *cfg,
 				     buf, WL_BSS_INFO_MAX);
 	if (err) {
 		bphy_err(drvr, "WLC_GET_BSS_INFO failed: %d\n", err);
-		goto CleanUp;
+		goto cleanup;
 	}
 
 	bi = (struct brcmf_bss_info_le *)(buf + 4);
@@ -3464,10 +3363,18 @@ static s32 brcmf_inform_ibss(struct brcmf_cfg80211_info *cfg,
 
 	if (ch.band == BRCMU_CHAN_BAND_2G)
 		band = wiphy->bands[NL80211_BAND_2GHZ];
-	else
+	else if (ch.band == BRCMU_CHAN_BAND_5G)
 		band = wiphy->bands[NL80211_BAND_5GHZ];
+	else
+		band = wiphy->bands[NL80211_BAND_6GHZ];
 
 	freq = ieee80211_channel_to_frequency(ch.control_ch_num, band->band);
+	if (freq == 0) {
+		brcmf_err("Invalid frequency %d returned for channel %d, band %d. chanspec was %04x\n",
+			  freq, ch.control_ch_num, ch.band, bi->chanspec);
+		goto cleanup;
+	}
+
 	cfg->channel = freq;
 	notify_channel = ieee80211_get_channel(wiphy, freq);
 
@@ -3490,12 +3397,12 @@ static s32 brcmf_inform_ibss(struct brcmf_cfg80211_info *cfg,
 
 	if (!bss) {
 		err = -ENOMEM;
-		goto CleanUp;
+		goto cleanup;
 	}
 
 	cfg80211_put_bss(wiphy, bss);
 
-CleanUp:
+cleanup:
 
 	kfree(buf);
 
@@ -3747,17 +3654,11 @@ brcmf_alloc_internal_escan_request(struct wiphy *wiphy, u32 n_netinfo) {
 }
 
 static int brcmf_internal_escan_add_info(struct cfg80211_scan_request *req,
-					 u8 *ssid, u8 ssid_len, u8 channel)
+					 u8 *ssid, u8 ssid_len, u8 channel, enum nl80211_band band)
 {
 	struct ieee80211_channel *chan;
-	enum nl80211_band band;
 	int freq, i;
 
-	if (channel <= CH_MAX_2G_CHANNEL)
-		band = NL80211_BAND_2GHZ;
-	else
-		band = NL80211_BAND_5GHZ;
-
 	freq = ieee80211_channel_to_frequency(channel, band);
 	if (!freq)
 		return -EINVAL;
@@ -3813,53 +3714,30 @@ static int brcmf_start_internal_escan(struct brcmf_if *ifp, u32 fwmap,
 	return 0;
 }
 
-static struct brcmf_pno_net_info_le *
-brcmf_get_netinfo_array(struct brcmf_pno_scanresults_le *pfn_v1)
-{
-	struct brcmf_pno_scanresults_v2_le *pfn_v2;
-	struct brcmf_pno_net_info_le *netinfo;
-
-	switch (pfn_v1->version) {
-	default:
-		WARN_ON(1);
-		fallthrough;
-	case cpu_to_le32(1):
-		netinfo = (struct brcmf_pno_net_info_le *)(pfn_v1 + 1);
-		break;
-	case cpu_to_le32(2):
-		pfn_v2 = (struct brcmf_pno_scanresults_v2_le *)pfn_v1;
-		netinfo = (struct brcmf_pno_net_info_le *)(pfn_v2 + 1);
-		break;
-	}
-
-	return netinfo;
-}
-
 /* PFN result doesn't have all the info which are required by the supplicant
  * (For e.g IEs) Do a target Escan so that sched scan results are reported
  * via wl_inform_single_bss in the required format. Escan does require the
  * scan request in the form of cfg80211_scan_request. For timebeing, create
  * cfg80211_scan_request one out of the received PNO event.
  */
-static s32
-brcmf_notify_sched_scan_results(struct brcmf_if *ifp,
-				const struct brcmf_event_msg *e, void *data)
+static s32 brcmf_notify_sched_scan_results(struct brcmf_if *ifp,
+					   const struct brcmf_event_msg *e,
+					   void *data)
 {
 	struct brcmf_pub *drvr = ifp->drvr;
 	struct brcmf_cfg80211_info *cfg = drvr->config;
-	struct brcmf_pno_net_info_le *netinfo, *netinfo_start;
 	struct cfg80211_scan_request *request = NULL;
 	struct wiphy *wiphy = cfg_to_wiphy(cfg);
 	int i, err = 0;
-	struct brcmf_pno_scanresults_le *pfn_result;
 	u32 bucket_map;
 	u32 result_count;
 	u32 status;
-	u32 datalen;
+	u32 min_data_len;
 
 	brcmf_dbg(SCAN, "Enter\n");
+	min_data_len = drvr->pno_handler.get_min_data_len();
 
-	if (e->datalen < (sizeof(*pfn_result) + sizeof(*netinfo))) {
+	if (e->datalen < min_data_len) {
 		brcmf_dbg(SCAN, "Event data to small. Ignore\n");
 		return 0;
 	}
@@ -3869,9 +3747,8 @@ brcmf_notify_sched_scan_results(struct brcmf_if *ifp,
 		return 0;
 	}
 
-	pfn_result = (struct brcmf_pno_scanresults_le *)data;
-	result_count = le32_to_cpu(pfn_result->count);
-	status = le32_to_cpu(pfn_result->status);
+	result_count = drvr->pno_handler.get_result_count(data);
+	status = drvr->pno_handler.get_result_status(data);
 
 	/* PFN event is limited to fit 512 bytes so we may get
 	 * multiple NET_FOUND events. For now place a warning here.
@@ -3882,38 +3759,33 @@ brcmf_notify_sched_scan_results(struct brcmf_if *ifp,
 		bphy_err(drvr, "FALSE PNO Event. (pfn_count == 0)\n");
 		goto out_err;
 	}
-
-	netinfo_start = brcmf_get_netinfo_array(pfn_result);
-	datalen = e->datalen - ((void *)netinfo_start - (void *)pfn_result);
-	if (datalen < result_count * sizeof(*netinfo)) {
-		bphy_err(drvr, "insufficient event data\n");
+	err = drvr->pno_handler.validate_pfn_results(data, e->datalen);
+	if (err) {
+		bphy_err(drvr, "Invalid escan results (%d)", err);
 		goto out_err;
 	}
-
-	request = brcmf_alloc_internal_escan_request(wiphy,
-						     result_count);
+	request = brcmf_alloc_internal_escan_request(wiphy, result_count);
 	if (!request) {
 		err = -ENOMEM;
 		goto out_err;
 	}
-
 	bucket_map = 0;
 	for (i = 0; i < result_count; i++) {
-		netinfo = &netinfo_start[i];
-
-		if (netinfo->SSID_len > IEEE80211_MAX_SSID_LEN)
-			netinfo->SSID_len = IEEE80211_MAX_SSID_LEN;
-		brcmf_dbg(SCAN, "SSID:%.32s Channel:%d\n",
-			  netinfo->SSID, netinfo->channel);
-		bucket_map |= brcmf_pno_get_bucket_map(cfg->pno, netinfo);
-		err = brcmf_internal_escan_add_info(request,
-						    netinfo->SSID,
-						    netinfo->SSID_len,
-						    netinfo->channel);
+		u8 channel;
+		enum nl80211_band band;
+		u8 ssid[IEEE80211_MAX_SSID_LEN];
+		u8 ssid_len;
+
+		drvr->pno_handler.get_result_info(data, i, &ssid, &ssid_len,
+						 &channel, &band);
+		brcmf_dbg(SCAN, "SSID:%.32s Channel:%d Band:%d\n", ssid,
+			  channel, band);
+		bucket_map |= drvr->pno_handler.get_bucket_map(data, i, cfg->pno);
+		err = brcmf_internal_escan_add_info(request, ssid, ssid_len,
+						    channel, band);
 		if (err)
 			goto out_err;
 	}
-
 	if (!bucket_map)
 		goto free_req;
 
@@ -4016,48 +3888,50 @@ static s32 brcmf_config_wowl_pattern(struct brcmf_if *ifp, u8 cmd[4],
 	return ret;
 }
 
-static s32
-brcmf_wowl_nd_results(struct brcmf_if *ifp, const struct brcmf_event_msg *e,
-		      void *data)
+static s32 brcmf_wowl_nd_results(struct brcmf_if *ifp,
+				 const struct brcmf_event_msg *e, void *data)
 {
 	struct brcmf_pub *drvr = ifp->drvr;
 	struct brcmf_cfg80211_info *cfg = drvr->config;
-	struct brcmf_pno_scanresults_le *pfn_result;
-	struct brcmf_pno_net_info_le *netinfo;
+	u32 min_data_len;
+	u8 channel;
+	enum nl80211_band band;
+	u8 ssid[IEEE80211_MAX_SSID_LEN];
+	u8 ssid_len;
+	u32 result_count;
 
 	brcmf_dbg(SCAN, "Enter\n");
 
-	if (e->datalen < (sizeof(*pfn_result) + sizeof(*netinfo))) {
+	min_data_len = drvr->pno_handler.get_min_data_len();
+
+	if (e->datalen < min_data_len) {
 		brcmf_dbg(SCAN, "Event data to small. Ignore\n");
 		return 0;
 	}
 
-	pfn_result = (struct brcmf_pno_scanresults_le *)data;
 
 	if (e->event_code == BRCMF_E_PFN_NET_LOST) {
 		brcmf_dbg(SCAN, "PFN NET LOST event. Ignore\n");
 		return 0;
 	}
 
-	if (le32_to_cpu(pfn_result->count) < 1) {
+	result_count = drvr->pno_handler.get_result_count(data);
+	if (result_count < 1) {
 		bphy_err(drvr, "Invalid result count, expected 1 (%d)\n",
-			 le32_to_cpu(pfn_result->count));
+			 result_count);
 		return -EINVAL;
 	}
 
-	netinfo = brcmf_get_netinfo_array(pfn_result);
-	if (netinfo->SSID_len > IEEE80211_MAX_SSID_LEN)
-		netinfo->SSID_len = IEEE80211_MAX_SSID_LEN;
-	memcpy(cfg->wowl.nd->ssid.ssid, netinfo->SSID, netinfo->SSID_len);
-	cfg->wowl.nd->ssid.ssid_len = netinfo->SSID_len;
+	drvr->pno_handler.get_result_info(data, 0, &ssid, &ssid_len, &channel,
+					 &band);
+	memcpy(cfg->wowl.nd->ssid.ssid, ssid, ssid_len);
+	cfg->wowl.nd->ssid.ssid_len = ssid_len;
 	cfg->wowl.nd->n_channels = 1;
 	cfg->wowl.nd->channels[0] =
-		ieee80211_channel_to_frequency(netinfo->channel,
-			netinfo->channel <= CH_MAX_2G_CHANNEL ?
-					NL80211_BAND_2GHZ : NL80211_BAND_5GHZ);
+		ieee80211_channel_to_frequency(channel, band);
+
 	cfg->wowl.nd_info->n_matches = 1;
 	cfg->wowl.nd_info->matches[0] = cfg->wowl.nd;
-
 	/* Inform (the resume task) that the net detect information was recvd */
 	cfg->wowl.nd_data_completed = true;
 	wake_up(&cfg->wowl.nd_data_wait);
@@ -5131,6 +5005,25 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
 		  settings->inactivity_timeout);
 	dev_role = ifp->vif->wdev.iftype;
 	mbss = ifp->vif->mbss;
+	/* Bring firmware into correct state for AP mode*/
+	if (dev_role == NL80211_IFTYPE_AP) {
+		brcmf_dbg(TRACE, "set AP mode\n");
+		err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 1);
+		if (err < 0) {
+			bphy_err(drvr, "setting AP mode failed %d\n",
+				err);
+			goto exit;
+		}
+
+		bss_enable.bsscfgidx = cpu_to_le32(ifp->bsscfgidx);
+		bss_enable.enable = cpu_to_le32(WLC_AP_IOV_OP_MANUAL_AP_BSSCFG_CREATE);
+		err = brcmf_fil_iovar_data_set(ifp, "bss", &bss_enable,
+							sizeof(bss_enable));
+		if (err < 0) {
+			bphy_err(drvr, "AP role set error, %d\n", err);
+			goto exit;
+		}
+	}
 
 	/* store current 11d setting */
 	if (brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_REGULATORY,
@@ -5716,6 +5609,9 @@ static int brcmf_cfg80211_get_channel(struct wiphy *wiphy,
 	case BRCMU_CHAN_BAND_5G:
 		band = NL80211_BAND_5GHZ;
 		break;
+	case BRCMU_CHAN_BAND_6G:
+		band = NL80211_BAND_6GHZ;
+		break;
 	}
 
 	switch (ch.bw) {
@@ -5737,9 +5633,19 @@ static int brcmf_cfg80211_get_channel(struct wiphy *wiphy,
 	}
 
 	freq = ieee80211_channel_to_frequency(ch.control_ch_num, band);
+	if (freq == 0) {
+		brcmf_err("Invalid frequency %d returned for channel %d, band %d. chanspec was %04x\n",
+			  freq, ch.control_ch_num, ch.band, chanspec);
+		return -EINVAL;
+	}
 	chandef->chan = ieee80211_get_channel(wiphy, freq);
 	chandef->width = width;
 	chandef->center_freq1 = ieee80211_channel_to_frequency(ch.chnum, band);
+	if (chandef->center_freq1 == 0) {
+		brcmf_err("Invalid frequency %d returned for channel %d, band %d. chanspec was %04x\n",
+			  freq, ch.chnum, ch.band, chanspec);
+		return -EINVAL;
+	}
 	chandef->center_freq2 = 0;
 
 	return 0;
@@ -5904,17 +5810,29 @@ static int brcmf_cfg80211_set_pmk(struct wiphy *wiphy, struct net_device *dev,
 				  const struct cfg80211_pmk_conf *conf)
 {
 	struct brcmf_if *ifp;
-
+	struct brcmf_pub *drvr;
+	int ret;
 	brcmf_dbg(TRACE, "enter\n");
 
 	/* expect using firmware supplicant for 1X */
 	ifp = netdev_priv(dev);
-	if (WARN_ON(ifp->vif->profile.use_fwsup != BRCMF_PROFILE_FWSUP_1X))
+	drvr = ifp->drvr;
+	if (WARN_ON((ifp->vif->profile.use_fwsup != BRCMF_PROFILE_FWSUP_1X) &&
+		    (ifp->vif->profile.use_fwsup != BRCMF_PROFILE_FWSUP_ROAM) &&
+		    (ifp->vif->profile.is_ft != true) &&
+		    (ifp->vif->profile.is_okc != true)))
 		return -EINVAL;
 
 	if (conf->pmk_len > BRCMF_WSEC_MAX_PSK_LEN)
 		return -ERANGE;
 
+	if (ifp->vif->profile.is_okc) {
+		ret = brcmf_fil_iovar_data_set(ifp, "okc_info_pmk", conf->pmk,
+					       conf->pmk_len);
+		if (ret < 0)
+			bphy_err(drvr, "okc_info_pmk iovar failed: ret=%d\n",
+				 ret);
+	}
 	return brcmf_set_pmk(ifp, conf->pmk, conf->pmk_len);
 }
 
@@ -6351,6 +6269,46 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg,
 	return err;
 }
 
+static bool brcmf_has_pmkid(const u8 *parse, u32 len)
+{
+	const struct brcmf_tlv *rsn_ie;
+	const u8 *ie;
+	u32 ie_len;
+	u32 offset;
+	u16 count;
+
+	rsn_ie = brcmf_parse_tlvs(parse, len, WLAN_EID_RSN);
+	if (!rsn_ie)
+		goto done;
+	ie = (const u8 *)rsn_ie;
+	ie_len = rsn_ie->len + TLV_HDR_LEN;
+	/* Skip group data cipher suite */
+	offset = TLV_HDR_LEN + WPA_IE_VERSION_LEN + WPA_IE_MIN_OUI_LEN;
+	if (offset + WPA_IE_SUITE_COUNT_LEN >= ie_len)
+		goto done;
+	/* Skip pairwise cipher suite(s) */
+	count = ie[offset] + (ie[offset + 1] << 8);
+	offset += WPA_IE_SUITE_COUNT_LEN + (count * WPA_IE_MIN_OUI_LEN);
+	if (offset + WPA_IE_SUITE_COUNT_LEN >= ie_len)
+		goto done;
+	/* Skip auth key management suite(s) */
+	count = ie[offset] + (ie[offset + 1] << 8);
+	offset += WPA_IE_SUITE_COUNT_LEN + (count * WPA_IE_MIN_OUI_LEN);
+	if (offset + RSN_CAP_LEN >= ie_len)
+		goto done;
+	/* Skip rsn capabilities */
+	offset += RSN_CAP_LEN;
+	if (offset + RSN_PMKID_COUNT_LEN > ie_len)
+		goto done;
+	/* Extract PMKID count */
+	count = ie[offset] + (ie[offset + 1] << 8);
+	if (count)
+		return true;
+
+done:
+	return false;
+}
+
 static s32
 brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg,
 		       struct net_device *ndev,
@@ -6395,10 +6353,17 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg,
 
 	if (ch.band == BRCMU_CHAN_BAND_2G)
 		band = wiphy->bands[NL80211_BAND_2GHZ];
-	else
+	else if (ch.band == BRCMU_CHAN_BAND_5G)
 		band = wiphy->bands[NL80211_BAND_5GHZ];
+	else
+		band = wiphy->bands[NL80211_BAND_6GHZ];
 
 	freq = ieee80211_channel_to_frequency(ch.control_ch_num, band->band);
+	if (freq == 0) {
+		brcmf_err("Invalid frequency %d returned for channel %d, band %d. chanspec was %04x\n",
+			  freq, ch.control_ch_num, ch.band, bi->chanspec);
+		goto done;
+	}
 	notify_channel = ieee80211_get_channel(wiphy, freq);
 
 done:
@@ -6414,11 +6379,16 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg,
 	cfg80211_roamed(ndev, &roam_info, GFP_KERNEL);
 	brcmf_dbg(CONN, "Report roaming result\n");
 
-	if (profile->use_fwsup == BRCMF_PROFILE_FWSUP_1X && profile->is_ft) {
-		cfg80211_port_authorized(ndev, profile->bssid, NULL, 0, GFP_KERNEL);
+	if (((profile->use_fwsup == BRCMF_PROFILE_FWSUP_1X ||
+	    profile->use_fwsup == BRCMF_PROFILE_FWSUP_ROAM) &&
+	    (brcmf_has_pmkid(roam_info.req_ie, roam_info.req_ie_len) ||
+	     profile->is_ft || profile->is_okc))) {
+		cfg80211_port_authorized(ndev, profile->bssid, NULL, 0,
+					 GFP_KERNEL);
 		brcmf_dbg(CONN, "Report port authorized\n");
 	}
 
+	clear_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state);
 	set_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state);
 	brcmf_dbg(TRACE, "Exit\n");
 	return err;
@@ -6875,8 +6845,6 @@ static s32 brcmf_dongle_roam(struct brcmf_if *ifp)
 	if (err)
 		bphy_err(drvr, "WLC_SET_ROAM_TRIGGER error (%d)\n", err);
 
-	roam_delta[0] = cpu_to_le32(WL_ROAM_DELTA);
-	roam_delta[1] = cpu_to_le32(BRCM_BAND_ALL);
 	err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_ROAM_DELTA,
 				     (void *)roam_delta, sizeof(roam_delta));
 	if (err)
@@ -6969,15 +6937,34 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
 		goto fail_pbuf;
 	}
 
+	/* Changing regulatory domain may change power limits upwards.
+	 * To ensure that we correctly set the new band info, copy the original
+	 * info first.
+	 */
 	band = wiphy->bands[NL80211_BAND_2GHZ];
-	if (band)
+	if (band) {
+		memcpy(band->channels, &__wl_2ghz_channels,
+		       sizeof(__wl_2ghz_channels));
+		band->n_channels = ARRAY_SIZE(__wl_2ghz_channels);
 		for (i = 0; i < band->n_channels; i++)
 			band->channels[i].flags = IEEE80211_CHAN_DISABLED;
+	}
 	band = wiphy->bands[NL80211_BAND_5GHZ];
-	if (band)
+	if (band) {
+		memcpy(band->channels, &__wl_5ghz_channels,
+		       sizeof(__wl_5ghz_channels));
+		band->n_channels = ARRAY_SIZE(__wl_5ghz_channels);
 		for (i = 0; i < band->n_channels; i++)
 			band->channels[i].flags = IEEE80211_CHAN_DISABLED;
-
+	}
+	band = wiphy->bands[NL80211_BAND_6GHZ];
+	if (band) {
+		memcpy(band->channels, &__wl_6ghz_channels,
+		       sizeof(__wl_6ghz_channels));
+		band->n_channels = ARRAY_SIZE(__wl_6ghz_channels);
+		for (i = 0; i < band->n_channels; i++)
+			band->channels[i].flags = IEEE80211_CHAN_DISABLED;
+	}
 	total = le32_to_cpu(list->count);
 	if (total > BRCMF_MAX_CHANSPEC_LIST) {
 		bphy_err(drvr, "Invalid count of channel Spec. (%u)\n",
@@ -6994,6 +6981,8 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
 			band = wiphy->bands[NL80211_BAND_2GHZ];
 		} else if (ch.band == BRCMU_CHAN_BAND_5G) {
 			band = wiphy->bands[NL80211_BAND_5GHZ];
+		} else if (ch.band == BRCMU_CHAN_BAND_6G) {
+			band = wiphy->bands[NL80211_BAND_6GHZ];
 		} else {
 			bphy_err(drvr, "Invalid channel Spec. 0x%x.\n",
 				 ch.chspec);
@@ -7015,6 +7004,7 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
 				break;
 			}
 		}
+
 		if (!channel) {
 			/* It seems firmware supports some channel we never
 			 * considered. Something new in IEEE standard?
@@ -7087,17 +7077,25 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg)
 	struct brcmu_chan ch;
 	u32 num_chan;
 	int i, j;
+	s32 updown;
 
 	/* verify support for bw_cap command */
-	val = WLC_BAND_5G;
+	val = WLC_BAND_2G;
 	err = brcmf_fil_iovar_int_query(ifp, "bw_cap", &val);
-
+	brcmf_dbg(INFO, "Check bw_cap support:%d\n", err);
 	if (!err) {
+		/* Setting the bw_cap is DOWN restricted. */
+		updown = 0;
+		brcmf_fil_cmd_data_set(ifp, BRCMF_C_DOWN, &updown, sizeof(s32));
 		/* only set 2G bandwidth using bw_cap command */
 		band_bwcap.band = cpu_to_le32(WLC_BAND_2G);
 		band_bwcap.bw_cap = cpu_to_le32(WLC_BW_CAP_40MHZ);
 		err = brcmf_fil_iovar_data_set(ifp, "bw_cap", &band_bwcap,
 					       sizeof(band_bwcap));
+		brcmf_dbg(INFO, "set bw_cap support:%d\n", err);
+		brcmf_c_set_joinpref_default(ifp);
+		updown = 1;
+		brcmf_fil_cmd_data_set(ifp, BRCMF_C_UP, &updown, sizeof(s32));
 	} else {
 		brcmf_dbg(INFO, "fallback to mimo_bw_cap\n");
 		val = WLC_N_BW_40ALL;
@@ -7159,7 +7157,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg)
 	return err;
 }
 
-static void brcmf_get_bwcap(struct brcmf_if *ifp, u32 bw_cap[])
+static void brcmf_get_bwcap(struct brcmf_if *ifp, u32 bw_cap[4], bool has_6g)
 {
 	struct brcmf_pub *drvr = ifp->drvr;
 	u32 band, mimo_bwcap;
@@ -7167,17 +7165,29 @@ static void brcmf_get_bwcap(struct brcmf_if *ifp, u32 bw_cap[])
 
 	band = WLC_BAND_2G;
 	err = brcmf_fil_iovar_int_query(ifp, "bw_cap", &band);
-	if (!err) {
-		bw_cap[NL80211_BAND_2GHZ] = band;
-		band = WLC_BAND_5G;
-		err = brcmf_fil_iovar_int_query(ifp, "bw_cap", &band);
-		if (!err) {
-			bw_cap[NL80211_BAND_5GHZ] = band;
-			return;
-		}
-		WARN_ON(1);
+	if (err)
+		goto fallback;
+	bw_cap[NL80211_BAND_2GHZ] = band;
+	band = WLC_BAND_5G;
+	err |= brcmf_fil_iovar_int_query(ifp, "bw_cap", &band);
+	if (err)
+		goto fallback;
+	bw_cap[NL80211_BAND_5GHZ] = band;
+	if (!has_6g)
 		return;
-	}
+	band = WLC_BAND_6G;
+	err |= brcmf_fil_iovar_int_query(ifp, "bw_cap", &band);
+	/* Prior to the introduction of 6g, this function only
+	 * did fallback in the case of 2g and 5g -failing.
+	 * As mimo_bwcap does not have 6g bwcap info anyway,
+	 * we keep that behavior.
+	 */
+	if (err)
+		return;
+	bw_cap[NL80211_BAND_6GHZ] = band;
+	return;
+fallback:
+
 	brcmf_dbg(INFO, "fallback to mimo_bw_cap info\n");
 	err = brcmf_fil_iovar_int_get(ifp, "mimo_bw_cap", &mimo_bwcap);
 	if (err)
@@ -7201,8 +7211,11 @@ static void brcmf_get_bwcap(struct brcmf_if *ifp, u32 bw_cap[])
 }
 
 static void brcmf_update_ht_cap(struct ieee80211_supported_band *band,
-				u32 bw_cap[2], u32 nchain)
+				u32 bw_cap[4], u32 nrxchain)
 {
+	/* Not supported in 6G band */
+	if (band->band == NL80211_BAND_6GHZ)
+		return;
 	band->ht_cap.ht_supported = true;
 	if (bw_cap[band->band] & WLC_BW_40MHZ_BIT) {
 		band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
@@ -7212,32 +7225,49 @@ static void brcmf_update_ht_cap(struct ieee80211_supported_band *band,
 	band->ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
 	band->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
 	band->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
-	memset(band->ht_cap.mcs.rx_mask, 0xff, nchain);
+	memset(band->ht_cap.mcs.rx_mask, 0xff, nrxchain);
 	band->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
 }
 
-static __le16 brcmf_get_mcs_map(u32 nchain, enum ieee80211_vht_mcs_support supp)
+static __le16 brcmf_get_mcs_map(u32 nstreams,
+				enum ieee80211_vht_mcs_support supp)
 {
 	u16 mcs_map;
 	int i;
 
-	for (i = 0, mcs_map = 0xFFFF; i < nchain; i++)
+	for (i = 0, mcs_map = 0xFFFF; i < nstreams; i++)
 		mcs_map = (mcs_map << 2) | supp;
 
 	return cpu_to_le16(mcs_map);
 }
 
 static void brcmf_update_vht_cap(struct ieee80211_supported_band *band,
-				 u32 bw_cap[2], u32 nchain, u32 txstreams,
-				 u32 txbf_bfe_cap, u32 txbf_bfr_cap)
+				 u32 bw_cap[4], u32 txstreams, u32 rxstreams,
+				 u32 txbf_bfe_cap, u32 txbf_bfr_cap,
+				 u32 ldpc_cap, u32 stbc_rx, u32 stbc_tx)
 {
 	__le16 mcs_map;
 
-	/* not allowed in 2.4G band */
-	if (band->band == NL80211_BAND_2GHZ)
+	/* not allowed in 2.4G or 6G band */
+	if (band->band == NL80211_BAND_2GHZ || band->band == NL80211_BAND_6GHZ)
 		return;
 
 	band->vht_cap.vht_supported = true;
+	band->vht_cap.vht_mcs.tx_highest = cpu_to_le16(433 * txstreams);
+	band->vht_cap.vht_mcs.rx_highest = cpu_to_le16(433 * rxstreams);
+
+	band->vht_cap.cap |= IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN |
+			     IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN;
+
+	if (ldpc_cap)
+		band->vht_cap.cap |= IEEE80211_VHT_CAP_RXLDPC;
+	if (stbc_tx)
+		band->vht_cap.cap |= IEEE80211_VHT_CAP_TXSTBC;
+
+	if (stbc_rx)
+		band->vht_cap.cap |=
+			(stbc_rx << IEEE80211_VHT_CAP_RXSTBC_SHIFT);
+
 	/* 80MHz is mandatory */
 	band->vht_cap.cap |= IEEE80211_VHT_CAP_SHORT_GI_80;
 	if (bw_cap[band->band] & WLC_BW_160MHZ_BIT) {
@@ -7245,8 +7275,10 @@ static void brcmf_update_vht_cap(struct ieee80211_supported_band *band,
 		band->vht_cap.cap |= IEEE80211_VHT_CAP_SHORT_GI_160;
 	}
 	/* all support 256-QAM */
-	mcs_map = brcmf_get_mcs_map(nchain, IEEE80211_VHT_MCS_SUPPORT_0_9);
+	mcs_map = brcmf_get_mcs_map(rxstreams, IEEE80211_VHT_MCS_SUPPORT_0_9);
 	band->vht_cap.vht_mcs.rx_mcs_map = mcs_map;
+	mcs_map = brcmf_get_mcs_map(txstreams, IEEE80211_VHT_MCS_SUPPORT_0_9);
+
 	band->vht_cap.vht_mcs.tx_mcs_map = mcs_map;
 
 	/* Beamforming support information */
@@ -7262,11 +7294,129 @@ static void brcmf_update_vht_cap(struct ieee80211_supported_band *band,
 	if ((txbf_bfe_cap || txbf_bfr_cap) && (txstreams > 1)) {
 		band->vht_cap.cap |=
 			(2 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT);
-		band->vht_cap.cap |= ((txstreams - 1) <<
-				IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT);
+		band->vht_cap.cap |=
+			((txstreams - 1)
+			 << IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT);
 		band->vht_cap.cap |=
 			IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB;
 	}
+	/* AMPDU length limit, support max 1MB (2 ^ (13 + 7)) */
+	band->vht_cap.cap |=
+		(7 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT);
+}
+
+static void brcmf_update_he_cap(struct ieee80211_supported_band *band,
+				struct ieee80211_sband_iftype_data *data)
+{
+	int idx = 1;
+	struct ieee80211_sta_he_cap *he_cap = &data->he_cap;
+	struct ieee80211_he_cap_elem *he_cap_elem = &he_cap->he_cap_elem;
+	struct ieee80211_he_mcs_nss_supp *he_mcs = &he_cap->he_mcs_nss_supp;
+	struct ieee80211_he_6ghz_capa *he_6ghz_capa = &data->he_6ghz_capa;
+
+	if (!data) {
+		brcmf_err("failed to allocate sdata\n");
+		return;
+	}
+
+	data->types_mask = BIT(NL80211_IFTYPE_STATION);
+	he_cap->has_he = true;
+
+	/* HE MAC Capabilities Information */
+	he_cap_elem->mac_cap_info[0] = IEEE80211_HE_MAC_CAP0_HTC_HE |
+				       IEEE80211_HE_MAC_CAP0_TWT_REQ |
+				       IEEE80211_HE_MAC_CAP0_TWT_RES;
+
+	he_cap_elem->mac_cap_info[1] =
+		IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_8US |
+		IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US;
+
+	he_cap_elem->mac_cap_info[2] = IEEE80211_HE_MAC_CAP2_BSR |
+				       IEEE80211_HE_MAC_CAP2_BCAST_TWT;
+
+	he_cap_elem->mac_cap_info[3] =
+		IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
+		IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_1 |
+		IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED;
+
+	he_cap_elem->mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU;
+
+	/* HE PHY Capabilities Information */
+	he_cap_elem->phy_cap_info[0] =
+		IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G |
+		IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
+		IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
+	;
+
+	he_cap_elem->phy_cap_info[1] =
+		IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD;
+
+	he_cap_elem->phy_cap_info[2] =
+		IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
+		IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO |
+		IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO;
+
+	he_cap_elem->phy_cap_info[3] =
+		IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK |
+		IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_2 |
+		IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM |
+		IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER;
+
+	he_cap_elem->phy_cap_info[4] =
+		IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
+		IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_MASK |
+		IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4 |
+		IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8;
+
+	he_cap_elem->phy_cap_info[5] =
+		IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK |
+		IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK |
+		IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2;
+
+	he_cap_elem->phy_cap_info[6] =
+		IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU |
+		IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU |
+		IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB |
+		IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB |
+		IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB |
+		IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE |
+		IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT;
+
+	he_cap_elem->phy_cap_info[7] =
+		IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI |
+		IEEE80211_HE_PHY_CAP7_MAX_NC_1;
+
+	he_cap_elem->phy_cap_info[8] =
+		IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
+		IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
+		IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
+		IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU;
+
+	he_cap_elem->phy_cap_info[9] =
+		IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU |
+		IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU |
+		IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
+		IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB;
+
+	/* HE Supported MCS and NSS Set */
+	he_mcs->rx_mcs_80 = cpu_to_le16(0xfffa);
+	he_mcs->tx_mcs_80 = cpu_to_le16(0xfffa);
+	he_mcs->rx_mcs_160 = cpu_to_le16(0xfffa);
+	he_mcs->tx_mcs_160 = cpu_to_le16(0xfffa);
+	/* HE 6 GHz band capabilities */
+	if (band->band == NL80211_BAND_6GHZ) {
+		u16 capa = 0;
+
+		capa = FIELD_PREP(IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START,
+				  IEEE80211_HT_MPDU_DENSITY_8) |
+		       FIELD_PREP(IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP,
+				  IEEE80211_VHT_MAX_AMPDU_1024K) |
+		       FIELD_PREP(IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN,
+				  IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454);
+		he_6ghz_capa->capa = cpu_to_le16(capa);
+	}
+	band->n_iftype_data = idx;
+	band->iftype_data = data;
 }
 
 static int brcmf_setup_wiphybands(struct brcmf_cfg80211_info *cfg)
@@ -7276,26 +7426,49 @@ static int brcmf_setup_wiphybands(struct brcmf_cfg80211_info *cfg)
 	struct wiphy *wiphy = cfg_to_wiphy(cfg);
 	u32 nmode;
 	u32 vhtmode = 0;
-	u32 bw_cap[2] = { WLC_BW_20MHZ_BIT, WLC_BW_20MHZ_BIT };
+	/* 2GHZ, 5GHZ, 60GHZ, 6GHZ */
+	u32 bw_cap[4] = { 0, 0, 0, 0 };
 	u32 rxchain;
-	u32 nchain;
+	u32 txchain;
+	u32 nrxchain;
+	u32 ntxchain;
 	int err;
 	s32 i;
 	struct ieee80211_supported_band *band;
 	u32 txstreams = 0;
+	u32 rxstreams = 0;
 	u32 txbf_bfe_cap = 0;
 	u32 txbf_bfr_cap = 0;
+	u8 he_enable;
+	struct brcmf_he_defcap he_cap;
+	u32 ldpc_cap = 0;
+	u32 stbc_rx = 0;
+	u32 stbc_tx = 0;
 
 	(void)brcmf_fil_iovar_int_get(ifp, "vhtmode", &vhtmode);
+	(void)brcmf_fil_iovar_int_get(ifp, "ldpc_cap", &ldpc_cap);
+	(void)brcmf_fil_iovar_int_get(ifp, "stbc_rx", &stbc_rx);
+	(void)brcmf_fil_iovar_int_get(ifp, "stbc_tx", &stbc_tx);
+	err = brcmf_fil_xtlv_int8_get(ifp, "he", BRCMF_HE_CMD_ENABLE,
+				      &he_enable);
+	if (!err && he_enable) {
+		brcmf_fil_xtlv_data_get(ifp, "he", BRCMF_HE_CMD_DEFCAP, &he_cap,
+					sizeof(he_cap));
+		brcmf_dbg_hex_dump(BRCMF_INFO_ON(), he_cap.mac_cap, 6,
+				   "default HE mac cap\n");
+		brcmf_dbg_hex_dump(BRCMF_INFO_ON(), he_cap.phy_cap, 11,
+				   "default HE phy cap\n");
+	}
 	err = brcmf_fil_iovar_int_get(ifp, "nmode", &nmode);
 	if (err) {
 		bphy_err(drvr, "nmode error (%d)\n", err);
-	} else {
-		brcmf_get_bwcap(ifp, bw_cap);
 	}
-	brcmf_dbg(INFO, "nmode=%d, vhtmode=%d, bw_cap=(%d, %d)\n",
+	brcmf_get_bwcap(ifp, bw_cap, he_enable != 0);
+	brcmf_dbg(INFO,
+		  "nmode=%d, vhtmode=%d, bw_cap=(%d, %d, %d), he_enable=%d\n",
 		  nmode, vhtmode, bw_cap[NL80211_BAND_2GHZ],
-		  bw_cap[NL80211_BAND_5GHZ]);
+		  bw_cap[NL80211_BAND_5GHZ], bw_cap[NL80211_BAND_6GHZ],
+		  he_enable);
 
 	err = brcmf_fil_iovar_int_get(ifp, "rxchain", &rxchain);
 	if (err) {
@@ -7305,12 +7478,31 @@ static int brcmf_setup_wiphybands(struct brcmf_cfg80211_info *cfg)
 		else
 			bphy_err(drvr, "rxchain error (%d)\n", err);
 
-		nchain = 1;
+		nrxchain = 1;
+		rxchain = 1;
 	} else {
-		for (nchain = 0; rxchain; nchain++)
+		for (nrxchain = 0; rxchain; nrxchain++)
 			rxchain = rxchain & (rxchain - 1);
 	}
-	brcmf_dbg(INFO, "nchain=%d\n", nchain);
+	brcmf_dbg(INFO, "nrxchain=%d\n", nrxchain);
+	err = brcmf_fil_iovar_int_get(ifp, "txchain", &txchain);
+	if (err) {
+		/* rxchain unsupported by firmware of older chips */
+		if (err == -EBADE)
+			bphy_info_once(drvr, "rxchain unsupported\n");
+		else
+			bphy_err(drvr, "rxchain error (%d)\n", err);
+
+		ntxchain = 1;
+		txchain = 1;
+	} else {
+		for (ntxchain = 0; txchain; ntxchain++)
+			txchain = txchain & (txchain - 1);
+	}
+	brcmf_dbg(INFO, "ntxchain=%d\n", ntxchain);
+
+	wiphy->available_antennas_rx = nrxchain;
+	wiphy->available_antennas_tx = ntxchain;
 
 	err = brcmf_construct_chaninfo(cfg, bw_cap);
 	if (err) {
@@ -7319,6 +7511,7 @@ static int brcmf_setup_wiphybands(struct brcmf_cfg80211_info *cfg)
 	}
 
 	if (vhtmode) {
+		(void)brcmf_fil_iovar_int_get(ifp, "rxstreams", &rxstreams);
 		(void)brcmf_fil_iovar_int_get(ifp, "txstreams", &txstreams);
 		(void)brcmf_fil_iovar_int_get(ifp, "txbf_bfe_cap",
 					      &txbf_bfe_cap);
@@ -7332,10 +7525,13 @@ static int brcmf_setup_wiphybands(struct brcmf_cfg80211_info *cfg)
 			continue;
 
 		if (nmode)
-			brcmf_update_ht_cap(band, bw_cap, nchain);
+			brcmf_update_ht_cap(band, bw_cap, nrxchain);
 		if (vhtmode)
-			brcmf_update_vht_cap(band, bw_cap, nchain, txstreams,
-					     txbf_bfe_cap, txbf_bfr_cap);
+			brcmf_update_vht_cap(band, bw_cap, txstreams, rxstreams,
+					     txbf_bfe_cap, txbf_bfr_cap,
+					     ldpc_cap, stbc_rx, stbc_tx);
+		if (he_enable)
+			brcmf_update_he_cap(band, &sdata[band->band]);
 	}
 
 	return 0;
@@ -7589,7 +7785,7 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
 	struct ieee80211_supported_band *band;
 	u16 max_interfaces = 0;
 	bool gscan;
-	__le32 bandlist[3];
+	__le32 bandlist[16];
 	u32 n_bands;
 	int err, i;
 
@@ -7653,6 +7849,18 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
 			wiphy_ext_feature_set(wiphy,
 					      NL80211_EXT_FEATURE_SAE_OFFLOAD_AP);
 	}
+
+	/* FIXME: Currently our partial SAE offload is breaking with some AP's */
+	if (0 && brcmf_feat_is_enabled(ifp, BRCMF_FEAT_SAE)) {
+		wiphy->features |= NL80211_FEATURE_SAE;
+	}
+
+	/* High accuracy and low power scans are always supported. */
+	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_HIGH_ACCURACY_SCAN);
+	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_LOW_POWER_SCAN);
+	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_LOW_SPAN_SCAN);
+	wiphy->features |= NL80211_FEATURE_LOW_PRIORITY_SCAN;
+
 	wiphy->mgmt_stypes = brcmf_txrx_stypes;
 	wiphy->max_remain_on_channel_duration = 5000;
 	if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_PNO)) {
@@ -7708,12 +7916,27 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
 			band->n_channels = ARRAY_SIZE(__wl_5ghz_channels);
 			wiphy->bands[NL80211_BAND_5GHZ] = band;
 		}
-	}
+		if (bandlist[i] == cpu_to_le32(WLC_BAND_6G)) {
+			band = kmemdup(&__wl_band_6ghz, sizeof(__wl_band_6ghz),
+				       GFP_KERNEL);
+			if (!band)
+				return -ENOMEM;
+
+			band->channels = kmemdup(&__wl_6ghz_channels,
+						 sizeof(__wl_6ghz_channels),
+						 GFP_KERNEL);
+			if (!band->channels) {
+				kfree(band);
+				return -ENOMEM;
+			}
 
+			band->n_channels = ARRAY_SIZE(__wl_6ghz_channels);
+			wiphy->bands[NL80211_BAND_6GHZ] = band;
+		}
+	}
 	if (wiphy->bands[NL80211_BAND_5GHZ] &&
 	    brcmf_feat_is_enabled(ifp, BRCMF_FEAT_DOT11H))
-		wiphy_ext_feature_set(wiphy,
-				      NL80211_EXT_FEATURE_DFS_OFFLOAD);
+		wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_DFS_OFFLOAD);
 
 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
 
@@ -8212,9 +8435,17 @@ static void brcmf_cfg80211_reg_notifier(struct wiphy *wiphy,
 	}
 
 	err = brcmf_translate_country_code(ifp->drvr, req->alpha2, &ccreq);
-	if (err)
-		return;
-
+	if (err) {
+		/* Because we ignore the default country code above,
+		 * we will start out in our custom reg domain, but the chip
+		 * may already be set to the right country.
+		 * As such, we force the bands to be re-set the first
+		 * time we try to set a country for real.
+		 */
+		if (err != -EAGAIN || !cfg->force_band_setup)
+			return;
+	}
+	cfg->force_band_setup = false;
 	err = brcmf_fil_iovar_data_set(ifp, "country", &ccreq, sizeof(ccreq));
 	if (err) {
 		bphy_err(drvr, "Firmware rejected country setting\n");
@@ -8243,6 +8474,10 @@ static void brcmf_free_wiphy(struct wiphy *wiphy)
 		kfree(wiphy->bands[NL80211_BAND_5GHZ]->channels);
 		kfree(wiphy->bands[NL80211_BAND_5GHZ]);
 	}
+	if (wiphy->bands[NL80211_BAND_6GHZ]) {
+		kfree(wiphy->bands[NL80211_BAND_6GHZ]->channels);
+		kfree(wiphy->bands[NL80211_BAND_6GHZ]);
+	}
 #if IS_ENABLED(CONFIG_PM)
 	if (wiphy->wowlan != &brcmf_wowlan_support)
 		kfree(wiphy->wowlan);
@@ -8277,6 +8512,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
 	cfg->pub = drvr;
 	init_vif_event(&cfg->vif_event);
 	INIT_LIST_HEAD(&cfg->vif_list);
+	cfg->force_band_setup = true;
 
 	vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_STATION);
 	if (IS_ERR(vif))
@@ -8334,18 +8570,21 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
 	if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_DUMP_OBSS))
 		ops->dump_survey = brcmf_cfg80211_dump_survey;
 
-	err = wiphy_register(wiphy);
-	if (err < 0) {
-		bphy_err(drvr, "Could not register wiphy device (%d)\n", err);
-		goto priv_out;
-	}
-
+	/* We have to configure the bands before we register the wiphy device
+	 * because it requires that band capabilities be correct.
+	 */
 	err = brcmf_setup_wiphybands(cfg);
 	if (err) {
 		bphy_err(drvr, "Setting wiphy bands failed (%d)\n", err);
 		goto wiphy_unreg_out;
 	}
 
+	err = wiphy_register(wiphy);
+	if (err < 0) {
+		bphy_err(drvr, "Could not register wiphy device (%d)\n", err);
+		goto priv_out;
+	}
+
 	/* If cfg80211 didn't disable 40MHz HT CAP in wiphy_register(),
 	 * setup 40MHz in 2GHz band and enable OBSS scanning.
 	 */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
index 2abae8894614b6..94c641e43fd0aa 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
@@ -8,6 +8,7 @@
 
 /* for brcmu_d11inf */
 #include <brcmu_d11.h>
+#include <brcmu_wifi.h>
 
 #include "core.h"
 #include "fwil_types.h"
@@ -125,7 +126,8 @@ enum brcmf_profile_fwsup {
 	BRCMF_PROFILE_FWSUP_NONE,
 	BRCMF_PROFILE_FWSUP_PSK,
 	BRCMF_PROFILE_FWSUP_1X,
-	BRCMF_PROFILE_FWSUP_SAE
+	BRCMF_PROFILE_FWSUP_SAE,
+	BRCMF_PROFILE_FWSUP_ROAM
 };
 
 /**
@@ -155,6 +157,7 @@ struct brcmf_cfg80211_profile {
 	enum brcmf_profile_fwsup use_fwsup;
 	u16 use_fwauth;
 	bool is_ft;
+	bool is_okc;
 };
 
 /**
@@ -327,6 +330,7 @@ struct brcmf_cfg80211_wowl {
  * @dongle_up: indicate whether dongle up or not.
  * @roam_on: on/off switch for dongle self-roaming.
  * @scan_tried: indicates if first scan attempted.
+ * @force_band_setup: indicates if we should force band setup
  * @dcmd_buf: dcmd buffer.
  * @extra_buf: mainly to grab assoc information.
  * @debugfsdir: debugfs folder for this device.
@@ -357,6 +361,7 @@ struct brcmf_cfg80211_info {
 	bool pwr_save;
 	bool dongle_up;
 	bool scan_tried;
+	bool force_band_setup;
 	u8 *dcmd_buf;
 	u8 *extra_buf;
 	struct dentry *debugfsdir;
@@ -386,6 +391,22 @@ struct brcmf_tlv {
 	u8 data[];
 };
 
+static inline enum nl80211_band fwil_band_to_nl80211(u16 band)
+{
+	switch (band) {
+	case WLC_BAND_2G:
+		return NL80211_BAND_2GHZ;
+	case WLC_BAND_5G:
+		return NL80211_BAND_5GHZ;
+	case WLC_BAND_6G:
+		return NL80211_BAND_6GHZ;
+	default:
+		WARN_ON(1);
+		break;
+	}
+	return 0;
+}
+
 static inline struct wiphy *cfg_to_wiphy(struct brcmf_cfg80211_info *cfg)
 {
 	return cfg->wiphy;
@@ -453,6 +474,8 @@ s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag,
 s32 brcmf_vif_clear_mgmt_ies(struct brcmf_cfg80211_vif *vif);
 u16 channel_to_chanspec(struct brcmu_d11inf *d11inf,
 			struct ieee80211_channel *ch);
+u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf,
+			struct cfg80211_chan_def *ch);
 bool brcmf_get_vif_state_any(struct brcmf_cfg80211_info *cfg,
 			     unsigned long state);
 void brcmf_cfg80211_arm_vif_event(struct brcmf_cfg80211_info *cfg,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
index 2ef92ef25517e8..9d7d69e5f99340 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
@@ -162,6 +162,15 @@ struct sbconfig {
 #define	SRCI_SRBSZ_SHIFT	0
 #define SR_BSZ_BASE		14
 
+#define SYSMEM_SRCI_ROMNB_MASK		0x3e0
+#define SYSMEM_SRCI_ROMNB_SHIFT		5
+#define SYSMEM_SRCI_SRNB_MASK		0x1f
+#define SYSMEM_SRCI_SRNB_SHIFT		0
+#define SYSMEM_SRCI_NEW_ROMNB_MASK	0xff000000
+#define SYSMEM_SRCI_NEW_ROMNB_SHIFT	24
+#define SYSMEM_SRCI_NEW_SRNB_MASK	0xff0000
+#define SYSMEM_SRCI_NEW_SRNB_SHIFT	16
+
 struct sbsocramregs {
 	u32 coreinfo;
 	u32 bwalloc;
@@ -436,25 +445,11 @@ static void brcmf_chip_ai_resetcore(struct brcmf_core_priv *core, u32 prereset,
 {
 	struct brcmf_chip_priv *ci;
 	int count;
-	struct brcmf_core *d11core2 = NULL;
-	struct brcmf_core_priv *d11priv2 = NULL;
 
 	ci = core->chip;
 
-	/* special handle two D11 cores reset */
-	if (core->pub.id == BCMA_CORE_80211) {
-		d11core2 = brcmf_chip_get_d11core(&ci->pub, 1);
-		if (d11core2) {
-			brcmf_dbg(INFO, "found two d11 cores, reset both\n");
-			d11priv2 = container_of(d11core2,
-						struct brcmf_core_priv, pub);
-		}
-	}
-
 	/* must disable first to work for arbitrary current core state */
 	brcmf_chip_ai_coredisable(core, prereset, reset);
-	if (d11priv2)
-		brcmf_chip_ai_coredisable(d11priv2, prereset, reset);
 
 	count = 0;
 	while (ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL) &
@@ -466,30 +461,9 @@ static void brcmf_chip_ai_resetcore(struct brcmf_core_priv *core, u32 prereset,
 		usleep_range(40, 60);
 	}
 
-	if (d11priv2) {
-		count = 0;
-		while (ci->ops->read32(ci->ctx,
-				       d11priv2->wrapbase + BCMA_RESET_CTL) &
-				       BCMA_RESET_CTL_RESET) {
-			ci->ops->write32(ci->ctx,
-					 d11priv2->wrapbase + BCMA_RESET_CTL,
-					 0);
-			count++;
-			if (count > 50)
-				break;
-			usleep_range(40, 60);
-		}
-	}
-
 	ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL,
 			 postreset | BCMA_IOCTL_CLK);
 	ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL);
-
-	if (d11priv2) {
-		ci->ops->write32(ci->ctx, d11priv2->wrapbase + BCMA_IOCTL,
-				 postreset | BCMA_IOCTL_CLK);
-		ci->ops->read32(ci->ctx, d11priv2->wrapbase + BCMA_IOCTL);
-	}
 }
 
 char *brcmf_chip_name(u32 id, u32 rev, char *buf, uint len)
@@ -659,6 +633,7 @@ static u32 brcmf_chip_sysmem_ramsize(struct brcmf_core_priv *sysmem)
 	u32 memsize = 0;
 	u32 coreinfo;
 	u32 idx;
+	u32 nrb;
 	u32 nb;
 	u32 banksize;
 
@@ -666,10 +641,16 @@ static u32 brcmf_chip_sysmem_ramsize(struct brcmf_core_priv *sysmem)
 		brcmf_chip_resetcore(&sysmem->pub, 0, 0, 0);
 
 	coreinfo = brcmf_chip_core_read32(sysmem, SYSMEMREGOFFS(coreinfo));
-	nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+	if (sysmem->pub.rev >= 12) {
+		nrb = (coreinfo & SYSMEM_SRCI_NEW_ROMNB_MASK) >> SYSMEM_SRCI_NEW_ROMNB_SHIFT;
+		nb = (coreinfo & SYSMEM_SRCI_NEW_SRNB_MASK) >> SYSMEM_SRCI_NEW_SRNB_SHIFT;
+	} else {
+		nrb = (coreinfo & SYSMEM_SRCI_ROMNB_MASK) >> SYSMEM_SRCI_ROMNB_SHIFT;
+		nb = (coreinfo & SYSMEM_SRCI_SRNB_MASK) >> SYSMEM_SRCI_SRNB_SHIFT;
+	}
 
 	for (idx = 0; idx < nb; idx++) {
-		brcmf_chip_socram_banksize(sysmem, idx, &banksize);
+		brcmf_chip_socram_banksize(sysmem, idx + nrb, &banksize);
 		memsize += banksize;
 	}
 
@@ -731,6 +712,7 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci)
 	case BRCM_CC_4366_CHIP_ID:
 	case BRCM_CC_43664_CHIP_ID:
 	case BRCM_CC_43666_CHIP_ID:
+	case BRCM_CC_4388_CHIP_ID:
 		return 0x200000;
 	case BRCM_CC_4355_CHIP_ID:
 	case BRCM_CC_4359_CHIP_ID:
@@ -1337,14 +1319,15 @@ static inline void
 brcmf_chip_ca7_set_passive(struct brcmf_chip_priv *chip)
 {
 	struct brcmf_core *core;
+	int i;
 
 	brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CA7);
 
-	core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_80211);
-	brcmf_chip_resetcore(core, D11_BCMA_IOCTL_PHYRESET |
-				   D11_BCMA_IOCTL_PHYCLOCKEN,
-			     D11_BCMA_IOCTL_PHYCLOCKEN,
-			     D11_BCMA_IOCTL_PHYCLOCKEN);
+	/* Disable the cores only and let the firmware enable them. */
+	for (i = 0; (core = brcmf_chip_get_d11core(&chip->pub, i)); i++)
+		brcmf_chip_coredisable(core, D11_BCMA_IOCTL_PHYRESET |
+				       D11_BCMA_IOCTL_PHYCLOCKEN,
+				       D11_BCMA_IOCTL_PHYCLOCKEN);
 }
 
 static bool brcmf_chip_ca7_set_active(struct brcmf_chip_priv *chip, u32 rstvec)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
index f26e4679e4ff02..81e8b612468d7f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
@@ -13,6 +13,7 @@
 #include "core.h"
 #include "bus.h"
 #include "debug.h"
+#include "fweh.h"
 #include "fwil.h"
 #include "fwil_types.h"
 #include "tracepoint.h"
@@ -266,7 +267,6 @@ static int brcmf_c_process_cal_blob(struct brcmf_if *ifp)
 int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
 {
 	struct brcmf_pub *drvr = ifp->drvr;
-	struct brcmf_fweh_info *fweh = drvr->fweh;
 	u8 buf[BRCMF_DCMD_SMLEN];
 	struct brcmf_bus *bus;
 	struct brcmf_rev_info_le revinfo;
@@ -412,27 +412,6 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
 
 	brcmf_c_set_joinpref_default(ifp);
 
-	/* Setup event_msgs, enable E_IF */
-	err = brcmf_fil_iovar_data_get(ifp, "event_msgs", fweh->event_mask,
-				       fweh->event_mask_len);
-	if (err) {
-		bphy_err(drvr, "Get event_msgs error (%d)\n", err);
-		goto done;
-	}
-	/*
-	 * BRCMF_E_IF can safely be used to set the appropriate bit
-	 * in the event_mask as the firmware event code is guaranteed
-	 * to match the value of BRCMF_E_IF because it is old cruft
-	 * that all vendors have.
-	 */
-	setbit(fweh->event_mask, BRCMF_E_IF);
-	err = brcmf_fil_iovar_data_set(ifp, "event_msgs", fweh->event_mask,
-				       fweh->event_mask_len);
-	if (err) {
-		bphy_err(drvr, "Set event_msgs error (%d)\n", err);
-		goto done;
-	}
-
 	/* Setup default scan channel time */
 	err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_CHANNEL_TIME,
 				    BRCMF_DEFAULT_SCAN_CHANNEL_TIME);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 3d63010ae079b4..9029ff0d36ca77 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -1222,7 +1222,14 @@ static int brcmf_bus_started(struct brcmf_pub *drvr, struct cfg80211_ops *ops)
 	if (ret < 0)
 		goto fail;
 
-	brcmf_feat_attach(drvr);
+	ret = brcmf_feat_attach(drvr);
+	if (ret)
+		goto fail;
+
+	/* Setup event_msgs, enable E_IF */
+	ret = brcmf_fweh_init_events(ifp);
+	if (ret)
+		goto fail;
 
 	ret = brcmf_proto_init_done(drvr);
 	if (ret < 0)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
index d53839f855d726..731ea81edde8bc 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
@@ -97,6 +97,68 @@ struct brcmf_rev_info {
 	u32 nvramrev;
 };
 
+struct brcmf_pno_info;
+enum nl80211_band;
+/**
+ * struct pno_struct_handler
+ */
+struct pno_struct_handler {
+	u8 version;
+	int (*pno_config)(struct brcmf_if *ifp, u32 scan_freq, u32 mscan,
+			  u32 bestn);
+	u32 (*get_min_data_len)(void);
+	u32 (*get_result_count)(void *data);
+	u32 (*get_result_status)(void *data);
+	int (*validate_pfn_results)(void *data, u32 event_datalen);
+	u32 (*get_bucket_map)(void *data, int idx, struct brcmf_pno_info *pi);
+	int (*get_result_info)(void *data, int result_idx,
+			       u8 (*ssid)[IEEE80211_MAX_SSID_LEN], u8 *ssid_len,
+			       u8 *channel, enum nl80211_band *band);
+};
+
+struct cfg80211_scan_request;
+struct scan_param_struct_handler {
+	u8 version;
+	void *(*get_struct_for_request)(struct brcmf_cfg80211_info *cfg,
+					u32 *struct_size,
+					struct cfg80211_scan_request *request);
+};
+
+struct cfg80211_ibss_params;
+struct cfg80211_connect_params;
+
+/**
+ * struct join_param_struct_handler - Handler for different join parameter versions
+ *
+ * There are a number of different, incompatible structures and interface versions for join/extended join parameters
+ * We abstract away the actual structures used, so that code does not have to worry about filling in structs properly.
+ *
+ * This interface deliberately takes and returns opaque structures.
+ *
+ * @version - Interface version the firmware supports/uses
+ * @get_struct_for_ibss - Return a join parameter structure for a set of IBSS parameters.
+ * This structure can be used to join the passed BSS.
+ * @get_struct_for_connect - Return an extended join parameter structure for a set of connect
+ * parameters.  This structure can be used to join the SSID specified in the parameters.
+ * @get_join_from_ext_join - When an extended join does not work, we fall back to a regular join.
+ * This function produces a join parameter struture from an extended join one.
+ */
+struct join_param_struct_handler {
+	u8 version;
+	/* This returns a join_param type struct */
+	void *(*get_struct_for_ibss)(struct brcmf_cfg80211_info *cfg,
+				     u32 *struct_size,
+				     struct cfg80211_ibss_params *params);
+	/* This returns an ext_join_param type struct */
+	void *(*get_struct_for_connect)(struct brcmf_cfg80211_info *cfg,
+					u32 *struct_size,
+					struct cfg80211_connect_params *params);
+	/* This returns the join param portion of an ext_join_param type struct.
+	 * The memory returned is separately allocated from the passed-in struct.
+	 */
+	void *(*get_join_from_ext_join)(void *ext_join_param, u32 *struct_size);
+};
+
 /* Common structure for module and instance linkage */
 struct brcmf_pub {
 	/* Linkage ponters */
@@ -145,6 +207,10 @@ struct brcmf_pub {
 	u8 sta_mac_idx;
 	const struct brcmf_fwvid_ops *vops;
 	void *vdata;
+	u16 cnt_ver;
+	struct pno_struct_handler pno_handler;
+	struct scan_param_struct_handler scan_param_handler;
+	struct join_param_struct_handler join_param_handler;
 };
 
 /* forward declarations */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h
index 9bb5f709d41a27..432d93ae8fb854 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h
@@ -85,6 +85,7 @@ do {								\
 #define BRCMF_FIL_ON()		(brcmf_msg_level & BRCMF_FIL_VAL)
 #define BRCMF_FWCON_ON()	(brcmf_msg_level & BRCMF_FWCON_VAL)
 #define BRCMF_SCAN_ON()		(brcmf_msg_level & BRCMF_SCAN_VAL)
+#define BRCMF_INFO_ON()		(brcmf_msg_level & BRCMF_INFO_VAL)
 
 #else /* defined(DEBUG) || defined(CONFIG_BRCM_TRACING) */
 
@@ -104,6 +105,7 @@ do {								\
 #define BRCMF_FIL_ON()		0
 #define BRCMF_FWCON_ON()	0
 #define BRCMF_SCAN_ON()		0
+#define BRCMF_INFO_ON()		0
 
 #endif /* defined(DEBUG) || defined(CONFIG_BRCM_TRACING) */
 
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index 0d9ae197fa1ec3..4575c250202052 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -16,9 +16,24 @@
 #include "fwvid.h"
 #include "feature.h"
 #include "common.h"
+#include "pno.h"
+#include "scan_param.h"
+#include "join_param.h"
 
 #define BRCMF_FW_UNSUPPORTED	23
 
+/* MIN branch version supporting join iovar versioning */
+#define MIN_JOINEXT_V1_FW_MAJOR 17u
+/* Branch/es supporting join iovar versioning prior to
+ * MIN_JOINEXT_V1_FW_MAJOR
+ */
+#define MIN_JOINEXT_V1_BR2_FW_MAJOR      16
+#define MIN_JOINEXT_V1_BR2_FW_MINOR      1
+
+#define MIN_JOINEXT_V1_BR1_FW_MAJOR      14
+#define MIN_JOINEXT_V1_BR1_FW_MINOR_2    2
+#define MIN_JOINEXT_V1_BR1_FW_MINOR_4    4
+
 /*
  * expand feature list to array of feature strings.
  */
@@ -44,6 +59,7 @@ static const struct brcmf_feat_fwcap brcmf_fwcap_map[] = {
 	{ BRCMF_FEAT_DOT11H, "802.11h" },
 	{ BRCMF_FEAT_SAE, "sae" },
 	{ BRCMF_FEAT_FWAUTH, "idauth" },
+	{ BRCMF_FEAT_GCMP, "gcmp" }
 };
 
 #ifdef DEBUG
@@ -135,7 +151,7 @@ struct brcmf_feat_wlcfeat {
 
 static const struct brcmf_feat_wlcfeat brcmf_feat_wlcfeat_map[] = {
 	{ 12, 0, BIT(BRCMF_FEAT_PMKID_V2) },
-	{ 13, 0, BIT(BRCMF_FEAT_PMKID_V3) },
+	{ 13, 0, BIT(BRCMF_FEAT_PMKID_V3) }
 };
 
 static void brcmf_feat_wlc_version_overrides(struct brcmf_pub *drv)
@@ -285,9 +301,12 @@ static int brcmf_feat_fwcap_debugfs_read(struct seq_file *seq, void *data)
 	return 0;
 }
 
-void brcmf_feat_attach(struct brcmf_pub *drvr)
+int brcmf_feat_attach(struct brcmf_pub *drvr)
 {
 	struct brcmf_if *ifp = brcmf_get_ifp(drvr, 0);
+	struct brcmf_join_version_le join_ver;
+	struct brcmf_scan_version_le scan_ver;
+	struct brcmf_pno_param_v3_le pno_params;
 	struct brcmf_pno_macaddr_le pfn_mac;
 	struct brcmf_gscan_config gscan_cfg;
 	u32 wowl_cap;
@@ -330,6 +349,7 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
 	brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_TDLS, "tdls_enable");
 	brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_MFP, "mfp");
 	brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_DUMP_OBSS, "dump_obss");
+	brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_EVENT_MSGS_EXT, "event_msgs_ext");
 
 	pfn_mac.version = BRCMF_PFN_MACADDR_CFG_VER;
 	err = brcmf_fil_iovar_data_get(ifp, "pfn_macaddr", &pfn_mac,
@@ -338,13 +358,71 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
 		ifp->drvr->feat_flags |= BIT(BRCMF_FEAT_SCAN_RANDOM_MAC);
 
 	brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_FWSUP, "sup_wpa");
-	brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_SCAN_V2, "scan_ver");
+
+	err = brcmf_fil_iovar_data_get(ifp, "join_ver", &join_ver, sizeof(join_ver));
+	if (!err) {
+		u16 ver = le16_to_cpu(join_ver.join_ver_major);
+		err = brcmf_join_param_setup_for_version(drvr, ver);
+	} else {
+		/* Default to version 0, unless it is one of the firmware branches
+		 * that doesn't have a join_ver iovar but are still version 1 */
+		u8 version = 0;
+		struct brcmf_wlc_version_le ver;
+		err = brcmf_fil_iovar_data_get(ifp, "wlc_ver", &ver,
+					       sizeof(ver));
+		if (!err) {
+			u16 major = le16_to_cpu(ver.wlc_ver_major);
+			u16 minor = le16_to_cpu(ver.wlc_ver_minor);
+			if (((major == MIN_JOINEXT_V1_BR1_FW_MAJOR) &&
+			     ((minor == MIN_JOINEXT_V1_BR1_FW_MINOR_2) ||
+			      (minor == MIN_JOINEXT_V1_BR1_FW_MINOR_4))) ||
+			    ((major == MIN_JOINEXT_V1_BR2_FW_MAJOR) &&
+			     (minor >= MIN_JOINEXT_V1_BR2_FW_MINOR)) ||
+			    (major >= MIN_JOINEXT_V1_FW_MAJOR)) {
+				version = 1;
+			}
+		}
+		err = brcmf_join_param_setup_for_version(drvr, version);
+	}
+	if (err) {
+		bphy_err(drvr, "Error setting up join structure handler: %d\n",
+			 err);
+		return err;
+	}
+	err = brcmf_fil_iovar_data_get(ifp, "scan_ver", &scan_ver,
+				       sizeof(scan_ver));
+	if (!err) {
+		u16 ver = le16_to_cpu(scan_ver.scan_ver_major);
+		err = brcmf_scan_param_setup_for_version(drvr, ver);
+	} else {
+		/* Default to version 1. */
+		err = brcmf_scan_param_setup_for_version(drvr, 1);
+	}
+	if (err) {
+		bphy_err(drvr, "Error setting up scan structure handler: %d\n",
+			 err);
+		return err;
+	}
+	/* See what version of PFN scan is supported*/
+	err = brcmf_fil_iovar_data_get(ifp, "pno_set", &pno_params,
+				       sizeof(pno_params));
+	if (!err) {
+		err = brcmf_pno_setup_for_version(
+			drvr, le16_to_cpu(pno_params.version));
+	} else {
+		/* Default to version 2, supported by all chips we support. */
+		err = brcmf_pno_setup_for_version(drvr, 2);
+	}
+	if (err) {
+		bphy_err(drvr, "Error setting up escan structure handler: %d\n",
+			 err);
+		return err;
+	}
 
 	brcmf_feat_wlc_version_overrides(drvr);
 	brcmf_feat_firmware_overrides(drvr);
 
 	brcmf_fwvid_feat_attach(ifp);
-
 	if (drvr->settings->feature_disable) {
 		brcmf_dbg(INFO, "Features: 0x%02x, disable: 0x%02x\n",
 			  ifp->drvr->feat_flags,
@@ -364,6 +442,7 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
 		/* no quirks */
 		break;
 	}
+	return 0;
 }
 
 void brcmf_feat_debugfs_create(struct brcmf_pub *drvr)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
index 7f4f0b3e4a7b4a..66e533e993e22f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
@@ -30,7 +30,11 @@
  * SAE: simultaneous authentication of equals
  * FWAUTH: Firmware authenticator
  * DUMP_OBSS: Firmware has capable to dump obss info to support ACS
- * SCAN_V2: Version 2 scan params
+ * PMKID_V2: Version 2 PMKID
+ * PMKID_V3: Version 3 PMKID
+ * EVENT_MSGS_EXT: Event messages extension
+ * JOIN_V1: Version 1 join struct
+ * GCMP: GCMP Cipher suite support
  */
 #define BRCMF_FEAT_LIST \
 	BRCMF_FEAT_DEF(MBSS) \
@@ -55,9 +59,10 @@
 	BRCMF_FEAT_DEF(SAE) \
 	BRCMF_FEAT_DEF(FWAUTH) \
 	BRCMF_FEAT_DEF(DUMP_OBSS) \
-	BRCMF_FEAT_DEF(SCAN_V2) \
 	BRCMF_FEAT_DEF(PMKID_V2) \
-	BRCMF_FEAT_DEF(PMKID_V3)
+	BRCMF_FEAT_DEF(PMKID_V3) \
+	BRCMF_FEAT_DEF(EVENT_MSGS_EXT) \
+	BRCMF_FEAT_DEF(GCMP)
 
 /*
  * Quirks:
@@ -95,8 +100,10 @@ enum brcmf_feat_quirk {
  * brcmf_feat_attach() - determine features and quirks.
  *
  * @drvr: driver instance.
+ *
+ * Return: 0 in case of success, error code otherwise.
  */
-void brcmf_feat_attach(struct brcmf_pub *drvr);
+int brcmf_feat_attach(struct brcmf_pub *drvr);
 
 /**
  * brcmf_feat_debugfs_create() - create debugfs entries.
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
index f0b6a7607f1604..4ad83ea9ba165b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
@@ -11,8 +11,10 @@
 #include "core.h"
 #include "debug.h"
 #include "tracepoint.h"
+#include "feature.h"
 #include "fweh.h"
 #include "fwil.h"
+#include "fwil_types.h"
 #include "proto.h"
 #include "bus.h"
 #include "fwvid.h"
@@ -423,6 +425,67 @@ void brcmf_fweh_unregister(struct brcmf_pub *drvr,
 	drvr->fweh->evt_handler[evt_handler_idx] = NULL;
 }
 
+/**
+ * brcmf_fweh_init_events() - initialize event handling.
+ *
+ * @ifp: primary interface object.
+ */
+int brcmf_fweh_init_events(struct brcmf_if *ifp)
+{
+	struct brcmf_pub *drvr = ifp->drvr;
+	struct brcmf_eventmsgs_ext_le *eventmsgs;
+	size_t size = sizeof(*eventmsgs) + drvr->fweh->event_mask_len;
+	int err;
+
+	eventmsgs = kzalloc(size, GFP_KERNEL);
+	if(!eventmsgs)
+		return -ENOMEM;
+
+	eventmsgs->version = EVENTMSGS_VER;
+	eventmsgs->command = EVENTMSGS_NONE;
+	eventmsgs->len = drvr->fweh->event_mask_len;
+	eventmsgs->maxgetsize = drvr->fweh->event_mask_len;
+
+	if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_EVENT_MSGS_EXT))
+		err = brcmf_fil_iovar_data_get(ifp, "event_msgs_ext",
+					       eventmsgs, size);
+	else
+		err = brcmf_fil_iovar_data_get(ifp, "event_msgs",
+					       drvr->fweh->event_mask,
+					       drvr->fweh->event_mask_len);
+
+	if (err) {
+		bphy_err(drvr, "Get event_msgs error (%d)\n", err);
+		kfree(eventmsgs);
+		return err;
+	}
+
+	brcmf_dbg(EVENT, "Event mask len: driver=%d fw=%d\n",
+		  drvr->fweh->event_mask_len, eventmsgs->len);
+
+	/* want to handle IF event as well */
+	brcmf_dbg(EVENT, "enable event IF\n");
+	setbit(eventmsgs->mask, BRCMF_E_IF);
+
+	eventmsgs->version = EVENTMSGS_VER;
+	eventmsgs->command = EVENTMSGS_SET_MASK;
+	eventmsgs->len = drvr->fweh->event_mask_len;
+
+	if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_EVENT_MSGS_EXT))
+		err = brcmf_fil_iovar_data_set(ifp, "event_msgs_ext",
+					       eventmsgs, size);
+	else
+		err = brcmf_fil_iovar_data_set(ifp, "event_msgs",
+					       drvr->fweh->event_mask,
+					       drvr->fweh->event_mask_len);
+
+	if (err)
+		bphy_err(drvr, "Set event_msgs error (%d)\n", err);
+
+	kfree(eventmsgs);
+	return err;
+}
+
 /**
  * brcmf_fweh_activate_events() - enables firmware events registered.
  *
@@ -430,29 +493,43 @@ void brcmf_fweh_unregister(struct brcmf_pub *drvr,
  */
 int brcmf_fweh_activate_events(struct brcmf_if *ifp)
 {
-	struct brcmf_fweh_info *fweh = ifp->drvr->fweh;
-	enum brcmf_fweh_event_code code;
+	struct brcmf_pub *drvr = ifp->drvr;
+	struct brcmf_eventmsgs_ext_le *eventmsgs;
+	size_t size = sizeof(*eventmsgs) + drvr->fweh->event_mask_len;
 	int i, err;
 
-	memset(fweh->event_mask, 0, fweh->event_mask_len);
-	for (i = 0; i < fweh->num_event_codes; i++) {
-		if (fweh->evt_handler[i]) {
-			brcmf_fweh_map_fwevt_code(fweh, i, &code);
+	eventmsgs = kzalloc(size, GFP_KERNEL);
+	if(!eventmsgs)
+		return -ENOMEM;
+
+	for (i = 0; i < drvr->fweh->num_event_codes; i++) {
+		if (drvr->fweh->evt_handler[i]) {
 			brcmf_dbg(EVENT, "enable event %s\n",
-				  brcmf_fweh_event_name(code));
-			setbit(fweh->event_mask, i);
+				  brcmf_fweh_event_name(i));
+			setbit(eventmsgs->mask, i);
 		}
 	}
 
 	/* want to handle IF event as well */
 	brcmf_dbg(EVENT, "enable event IF\n");
-	setbit(fweh->event_mask, BRCMF_E_IF);
+	setbit(eventmsgs->mask, BRCMF_E_IF);
+
+	eventmsgs->version = EVENTMSGS_VER;
+	eventmsgs->command = EVENTMSGS_SET_MASK;
+	eventmsgs->len = drvr->fweh->event_mask_len;
+
+	if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_EVENT_MSGS_EXT))
+		err = brcmf_fil_iovar_data_set(ifp, "event_msgs_ext",
+					       eventmsgs, size);
+	else
+		err = brcmf_fil_iovar_data_set(ifp, "event_msgs",
+					       drvr->fweh->event_mask,
+					       drvr->fweh->event_mask_len);
 
-	err = brcmf_fil_iovar_data_set(ifp, "event_msgs", fweh->event_mask,
-				       fweh->event_mask_len);
 	if (err)
-		bphy_err(fweh->drvr, "Set event_msgs error (%d)\n", err);
+		bphy_err(drvr, "Set event_msgs error (%d)\n", err);
 
+	kfree(eventmsgs);
 	return err;
 }
 
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
index eed439b840109f..a09eb36eed4360 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
@@ -352,6 +352,7 @@ int brcmf_fweh_register(struct brcmf_pub *drvr, enum brcmf_fweh_event_code code,
 				       void *data));
 void brcmf_fweh_unregister(struct brcmf_pub *drvr,
 			   enum brcmf_fweh_event_code code);
+int brcmf_fweh_init_events(struct brcmf_if *ifp);
 int brcmf_fweh_activate_events(struct brcmf_if *ifp);
 void brcmf_fweh_process_event(struct brcmf_pub *drvr,
 			      struct brcmf_event *event_packet,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
index e74a23e11830c1..7b8f809cdc412d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
@@ -18,7 +18,8 @@
 #define BRCMF_ARP_OL_HOST_AUTO_REPLY	0x00000004
 #define BRCMF_ARP_OL_PEER_AUTO_REPLY	0x00000008
 
-#define	BRCMF_BSS_INFO_VERSION	109 /* curr ver of brcmf_bss_info_le struct */
+#define	BRCMF_BSS_INFO_MIN_VERSION	109 /* min ver of brcmf_bss_info_le struct */
+#define	BRCMF_BSS_INFO_MAX_VERSION	112 /* max ver of brcmf_bss_info_le struct */
 #define BRCMF_BSS_RSSI_ON_CHANNEL	0x0004
 
 #define BRCMF_STA_BRCM			0x00000001	/* Running a Broadcom driver */
@@ -46,12 +47,10 @@
 #define BRCMF_STA_DWDS_CAP		0x01000000	/* DWDS CAP */
 #define BRCMF_STA_DWDS			0x02000000	/* DWDS active */
 
-/* size of brcmf_scan_params not including variable length array */
-#define BRCMF_SCAN_PARAMS_FIXED_SIZE	64
-#define BRCMF_SCAN_PARAMS_V2_FIXED_SIZE	72
-
 /* version of brcmf_scan_params structure */
 #define BRCMF_SCAN_PARAMS_VERSION_V2	2
+#define BRCMF_SCAN_PARAMS_VERSION_V3	3
+#define BRCMF_SCAN_PARAMS_VERSION_V4	4
 
 /* masks for channel and ssid count */
 #define BRCMF_SCAN_PARAMS_COUNT_MASK	0x0000ffff
@@ -62,16 +61,26 @@
 #define BRCMF_SCANTYPE_ACTIVE		0
 #define BRCMF_SCANTYPE_PASSIVE		1
 
+/* Additional scanning flags */
+#define BRCMF_SCANFLAGS_LOW_PRIO 	0x2
+#define BRCMF_SCANFLAGS_LOW_POWER	0x1000
+#define BRCMF_SCANFLAGS_HIGH_ACCURACY	0x2000
+#define BRCMF_SCANFLAGS_LOW_SPAN	0x4000
+
+/* scan ssid_type flags */
+#define BRCMF_SCANSSID_INC_RNR		0x02 /* Include RNR channels*/
+
 #define BRCMF_WSEC_MAX_PSK_LEN		32
 #define	BRCMF_WSEC_PASSPHRASE		BIT(0)
 
-#define BRCMF_WSEC_MAX_SAE_PASSWORD_LEN 128
+#define BRCMF_WSEC_MAX_SAE_PASSWORD_LEN	256
 
 /* primary (ie tx) key */
 #define BRCMF_PRIMARY_KEY		(1 << 1)
 #define DOT11_BSSTYPE_ANY		2
 #define BRCMF_ESCAN_REQ_VERSION		1
 #define BRCMF_ESCAN_REQ_VERSION_V2	2
+#define BRCMF_ESCAN_REQ_VERSION_V3	3
 
 #define BRCMF_MAXRATES_IN_SET		16	/* max # of rates in rateset */
 
@@ -320,29 +329,57 @@ struct brcmf_bss_info_le {
 	__le16 beacon_period;	/* units are Kusec */
 	__le16 capability;	/* Capability information */
 	u8 SSID_len;
-	u8 SSID[32];
+	u8 SSID[IEEE80211_MAX_SSID_LEN];
+	u8 bcnflags;		/* additional flags w.r.t. beacon */
 	struct {
 		__le32 count;   /* # rates in this set */
 		u8 rates[16]; /* rates in 500kbps units w/hi bit set if basic */
 	} rateset;		/* supported rates */
 	__le16 chanspec;	/* chanspec for bss */
 	__le16 atim_window;	/* units are Kusec */
-	u8 dtim_period;	/* DTIM period */
+	u8 dtim_period;		/* DTIM period */
+	u8 accessnet;		/* from beacon interwork IE (if bcnflags) */
 	__le16 RSSI;		/* receive signal strength (in dBm) */
 	s8 phy_noise;		/* noise (in dBm) */
 
 	u8 n_cap;		/* BSS is 802.11N Capable */
+	u8 he_cap;		/* BSS is he capable */
+	u8 load;		/* BSS Load from QBSS load IE if available */
 	/* 802.11N BSS Capabilities (based on HT_CAP_*): */
 	__le32 nbss_cap;
 	u8 ctl_ch;		/* 802.11N BSS control channel number */
-	__le32 reserved32[1];	/* Reserved for expansion of BSS properties */
+	u8 reserved1[3];	/* Reserved for expansion of BSS properties */
+	__le16 vht_rxmcsmap;	/* VHT rx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */
+	__le16 vht_txmcsmap;	/* VHT tx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */
 	u8 flags;		/* flags */
-	u8 reserved[3];	/* Reserved for expansion of BSS properties */
+	u8 vht_cap;		/* BSS is vht capable */
+	u8 reserved2[2];	/* Reserved for expansion of BSS properties */
 	u8 basic_mcs[BRCMF_MCSSET_LEN];	/* 802.11N BSS required MCS set */
 
 	__le16 ie_offset;	/* offset at which IEs start, from beginning */
+	u8 reserved3[2];	/* Reserved for expansion of BSS properties */
 	__le32 ie_length;	/* byte length of Information Elements */
 	__le16 SNR;		/* average SNR of during frame reception */
+	__le16		vht_mcsmap;		/**< STA's Associated vhtmcsmap */
+	__le16		vht_mcsmap_prop;	/**< STA's Associated prop vhtmcsmap */
+	__le16		vht_txmcsmap_prop;	/**< prop VHT tx mcs prop */
+	__le32		he_mcsmap;	/**< STA's Associated hemcsmap */
+	__le32		he_rxmcsmap;	/**< HE rx mcs map (802.11ax IE, HE_CAP_MCS_MAP_*) */
+	__le32		he_txmcsmap;	/**< HE tx mcs map (802.11ax IE, HE_CAP_MCS_MAP_*) */
+	__le32		timestamp[2];  /* Beacon Timestamp for FAKEAP req */
+	/* V112 fields follow */
+	u8		eht_cap;		/* BSS is EHT capable */
+	u8		reserved4[3];	/* Reserved for expansion of BSS properties */
+	/* by the spec. it is maximum 16 streams hence all mcs code for all nss may not fit
+	 * in a 32 bit mcs nss map but since this field only reflects the common mcs nss map
+	 * between that of the peer and our device so it's probably ok to make it 32 bit and
+	 * allow only a limited number of nss e.g. upto 8 of them in the map given the fact
+	 * that our device probably won't exceed 4 streams anyway...
+	 */
+	__le32		eht_mcsmap;		/* STA's associated EHT mcs code map */
+	/* FIXME: change the following mcs code map to uint32 if all mcs+nss can fit in */
+	u8		eht_rxmcsmap[6];	/* EHT rx mcs code map */
+	u8		eht_txmcsmap[6];	/* EHT tx mcs code map */
 	/* Add new fields here */
 	/* variable length Information Elements */
 };
@@ -366,23 +403,23 @@ struct brcmf_ssid8_le {
 };
 
 struct brcmf_scan_params_le {
-	struct brcmf_ssid_le ssid_le;	/* default: {0, ""} */
-	u8 bssid[ETH_ALEN];	/* default: bcast */
-	s8 bss_type;		/* default: any,
+	struct brcmf_ssid_le ssid_le; /* default: {0, ""} */
+	u8 bssid[ETH_ALEN]; 	/* default: bcast */
+	s8 bss_type; 		/* default: any,
 				 * DOT11_BSSTYPE_ANY/INFRASTRUCTURE/INDEPENDENT
 				 */
-	u8 scan_type;	/* flags, 0 use default */
-	__le32 nprobes;	  /* -1 use default, number of probes per channel */
-	__le32 active_time;	/* -1 use default, dwell time per channel for
+	u8 scan_type; 		/* flags, 0 use default */
+	__le32 nprobes; 	/* -1 use default, number of probes per channel */
+	__le32 active_time; 	/* -1 use default, dwell time per channel for
 				 * active scanning
 				 */
-	__le32 passive_time;	/* -1 use default, dwell time per channel
+	__le32 passive_time; 	/* -1 use default, dwell time per channel
 				 * for passive scanning
 				 */
 	__le32 home_time;	/* -1 use default, dwell time for the
 				 * home channel between channel scans
 				 */
-	__le32 channel_num;	/* count of channels and ssids that follow
+	__le32 channel_num; 	/* count of channels and ssids that follow
 				 *
 				 * low half is count of channels in
 				 * channel_list, 0 means default (use all
@@ -398,56 +435,125 @@ struct brcmf_scan_params_le {
 				 * fixed parameter portion is assumed, otherwise
 				 * ssid in the fixed portion is ignored
 				 */
-	union {
-		__le16 padding;	/* Reserve space for at least 1 entry for abort
-				 * which uses an on stack brcmf_scan_params_le
-				 */
-		DECLARE_FLEX_ARRAY(__le16, channel_list);	/* chanspecs */
-	};
+	__le16 channel_list[]; /* chanspecs */
 };
 
 struct brcmf_scan_params_v2_le {
-	__le16 version;		/* structure version */
-	__le16 length;		/* structure length */
-	struct brcmf_ssid_le ssid_le;	/* default: {0, ""} */
-	u8 bssid[ETH_ALEN];	/* default: bcast */
-	s8 bss_type;		/* default: any,
-				 * DOT11_BSSTYPE_ANY/INFRASTRUCTURE/INDEPENDENT
-				 */
-	u8 pad;
-	__le32 scan_type;	/* flags, 0 use default */
-	__le32 nprobes;		/* -1 use default, number of probes per channel */
-	__le32 active_time;	/* -1 use default, dwell time per channel for
-				 * active scanning
-				 */
-	__le32 passive_time;	/* -1 use default, dwell time per channel
-				 * for passive scanning
-				 */
-	__le32 home_time;	/* -1 use default, dwell time for the
-				 * home channel between channel scans
-				 */
-	__le32 channel_num;	/* count of channels and ssids that follow
-				 *
-				 * low half is count of channels in
-				 * channel_list, 0 means default (use all
-				 * available channels)
-				 *
-				 * high half is entries in struct brcmf_ssid
-				 * array that follows channel_list, aligned for
-				 * s32 (4 bytes) meaning an odd channel count
-				 * implies a 2-byte pad between end of
-				 * channel_list and first ssid
-				 *
-				 * if ssid count is zero, single ssid in the
-				 * fixed parameter portion is assumed, otherwise
-				 * ssid in the fixed portion is ignored
-				 */
-	union {
-		__le16 padding;	/* Reserve space for at least 1 entry for abort
-				 * which uses an on stack brcmf_scan_params_v2_le
-				 */
-		DECLARE_FLEX_ARRAY(__le16, channel_list);	/* chanspecs */
-	};
+	__le16 version; /* structure version */
+	__le16 length; /* structure length */
+	struct brcmf_ssid_le ssid_le;  /* default: {0, ""} */
+	u8 bssid[ETH_ALEN];	       /* default: bcast */
+	s8 bss_type; 		       /* default: any,
+					* DOT11_BSSTYPE_ANY/INFRASTRUCTURE/INDEPENDENT
+					*/
+	u8 PAD;
+	__le32 scan_type; 	       /* flags, 0 use default */
+	__le32 nprobes; 	       /* -1 use default, number of probes per channel */
+	__le32 active_time; 	       /* -1 use default, dwell time per channel for
+					* active scanning
+					*/
+	__le32 passive_time;	       /* -1 use default, dwell time per channel
+					* for passive scanning
+					*/
+	__le32 home_time;	       /* -1 use default, dwell time for the
+					* home channel between channel scans
+					*/
+	__le32 channel_num;	       /* count of channels and ssids that follow
+					*
+					* low half is count of channels in
+					* channel_list, 0 means default (use all
+					* available channels)
+					*
+					* high half is entries in struct brcmf_ssid
+					* array that follows channel_list, aligned for
+					* s32 (4 bytes) meaning an odd channel count
+					* implies a 2-byte pad between end of
+					* channel_list and first ssid
+					*
+					* if ssid count is zero, single ssid in the
+					* fixed parameter portion is assumed, otherwise
+					* ssid in the fixed portion is ignored
+					*/
+	__le16 channel_list[]; 		/* chanspecs */
+};
+
+struct brcmf_scan_params_v3_le {
+	__le16 version; 	       /* structure version */
+	__le16 length; 		       /* structure length */
+	struct brcmf_ssid_le ssid_le;  /* default: {0, ""} */
+	u8 bssid[ETH_ALEN]; 	       /* default: bcast */
+	s8 bss_type; 		       /* default: any,
+					* DOT11_BSSTYPE_ANY/INFRASTRUCTURE/INDEPENDENT
+					*/
+	u8 ssid_type;		       /* short vs regular SSID */
+	__le32 scan_type; 	       /* flags, 0 use default */
+	__le32 nprobes;		       /* -1 use default, number of probes per channel */
+	__le32 active_time; 	       /* -1 use default, dwell time per channel for
+					* active scanning
+					*/
+	__le32 passive_time;	       /* -1 use default, dwell time per channel
+					* for passive scanning
+					*/
+	__le32 home_time; 	       /* -1 use default, dwell time for the
+					* home channel between channel scans
+					*/
+	__le32 channel_num; 	       /* count of channels and ssids that follow
+					*
+					* low half is count of channels in
+					* channel_list, 0 means default (use all
+					* available channels)
+					*
+					* high half is entries in struct brcmf_ssid
+					* array that follows channel_list, aligned for
+					* s32 (4 bytes) meaning an odd channel count
+					* implies a 2-byte pad between end of
+					* channel_list and first ssid
+					*
+					* if ssid count is zero, single ssid in the
+					* fixed parameter portion is assumed, otherwise
+					* ssid in the fixed portion is ignored
+					*/
+	__le16 channel_list[]; 		/* chanspecs */
+};
+
+struct brcmf_scan_params_v4_le {
+	__le16 version; 	       /* structure version */
+	__le16 length; 		       /* structure length */
+	struct brcmf_ssid_le ssid_le;  /* default: {0, ""} */
+	u8 bssid[ETH_ALEN]; 	       /* default: bcast */
+	s8 bss_type;		       /* default: any,
+					* DOT11_BSSTYPE_ANY/INFRASTRUCTURE/INDEPENDENT
+					*/
+	u8 ssid_type; 		       /* short vs regular SSID */
+	__le32 scan_type;	       /* flags, 0 use default */
+	__le32 scan_type_ext;	       /* ext flags, 0 use default */
+	__le32 nprobes; 	       /* -1 use default, number of probes per channel */
+	__le32 active_time;	       /* -1 use default, dwell time per channel for
+					* active scanning
+					*/
+	__le32 passive_time; 	       /* -1 use default, dwell time per channel
+					* for passive scanning
+					*/
+	__le32 home_time; 	       /* -1 use default, dwell time for the
+					* home channel between channel scans
+					*/
+	__le32 channel_num;	       /* count of channels and ssids that follow
+					*
+					* low half is count of channels in
+					* channel_list, 0 means default (use all
+					* available channels)
+					*
+					* high half is entries in struct brcmf_ssid
+					* array that follows channel_list, aligned for
+					* s32 (4 bytes) meaning an odd channel count
+					* implies a 2-byte pad between end of
+					* channel_list and first ssid
+					*
+					* if ssid count is zero, single ssid in the
+					* fixed parameter portion is assumed, otherwise
+					* ssid in the fixed portion is ignored
+					*/
+	__le16 channel_list[]; 	       /* chanspecs */
 };
 
 struct brcmf_scan_results {
@@ -464,6 +570,8 @@ struct brcmf_escan_params_le {
 	union {
 		struct brcmf_scan_params_le params_le;
 		struct brcmf_scan_params_v2_le params_v2_le;
+		struct brcmf_scan_params_v3_le params_v3_le;
+		struct brcmf_scan_params_v4_le params_v4_le;
 	};
 };
 
@@ -482,11 +590,67 @@ struct brcmf_escan_result_le {
 struct brcmf_assoc_params_le {
 	/* 00:00:00:00:00:00: broadcast scan */
 	u8 bssid[ETH_ALEN];
+	/* 0: use chanspec_num, and the single bssid,
+	 * otherwise count of chanspecs in chanspec_list
+	 * AND paired bssids following chanspec_list
+	 * also, chanspec_num has to be set to zero
+	 * for bssid list to be used
+	 */
+	__le16 bssid_cnt;
+	/* 0: all available channels, otherwise count of chanspecs in
+	 * chanspec_list */
+	__le32 chanspec_num;
+	/* list of chanspecs */
+	__le16 chanspec_list[];
+};
+
+struct brcmf_assoc_params_v1_le {
+	__le16 version;
+	__le16 flags;
+	/* 00:00:00:00:00:00: broadcast scan */
+	u8 bssid[ETH_ALEN];
+	/* 0: use chanspec_num, and the single bssid,
+	 * otherwise count of chanspecs in chanspec_list
+	 * AND paired bssids following chanspec_list
+	 * also, chanspec_num has to be set to zero
+	 * for bssid list to be used
+	 */
+	__le16 bssid_cnt;
 	/* 0: all available channels, otherwise count of chanspecs in
 	 * chanspec_list */
 	__le32 chanspec_num;
 	/* list of chanspecs */
-	__le16 chanspec_list[1];
+	__le16 chanspec_list[];
+};
+
+/* ML assoc and scan params */
+struct brcmf_ml_assoc_scan_params_v1_le {
+	/* whether to follow strictly ordered assoc ? */
+	u8 ml_assoc_mode;
+	/* to identify whether ml scan needs to be triggered */
+	u8 ml_scan_mode;
+	u8 pad[2];
+};
+
+struct brcmf_assoc_params_v2_le {
+	__le16 version;
+	__le16 flags;
+	/* 00:00:00:00:00:00: broadcast scan */
+	u8 bssid[ETH_ALEN];
+	/* 0: use chanspec_num, and the single bssid,
+	 * otherwise count of chanspecs in chanspec_list
+	 * AND paired bssids following chanspec_list
+	 * also, chanspec_num has to be set to zero
+	 * for bssid list to be used
+	 */
+	__le16 bssid_cnt;
+	/* Multilink association and scan params */
+	struct brcmf_ml_assoc_scan_params_v1_le ml_assoc_scan_params;
+	/* 0: all available channels, otherwise count of chanspecs in
+	 * chanspec_list */
+	__le32 chanspec_num;
+	/* list of chanspecs */
+	__le16 chanspec_list[];
 };
 
 /**
@@ -511,9 +675,19 @@ struct brcmf_join_params {
 	struct brcmf_assoc_params_le params_le;
 };
 
+struct brcmf_join_params_v1 {
+	struct brcmf_ssid_le ssid_le;
+	struct brcmf_assoc_params_v1_le params_le;
+};
+struct brcmf_join_params_v2 {
+	struct brcmf_ssid_le ssid_le;
+	struct brcmf_assoc_params_v2_le params_le;
+};
+
 /* scan params for extended join */
 struct brcmf_join_scan_params_le {
 	u8 scan_type;		/* 0 use default, active or passive scan */
+	u8 PAD[3];
 	__le32 nprobes;		/* -1 use default, nr of probes per channel */
 	__le32 active_time;	/* -1 use default, dwell time per channel for
 				 * active scanning
@@ -526,6 +700,23 @@ struct brcmf_join_scan_params_le {
 				 */
 };
 
+/* scan params for extended join */
+struct brcmf_join_scan_params_v1_le {
+	u8 scan_type; /* 0 use default, active or passive scan */
+	u8 ml_scan_mode; /* 0 scan ML channels in RNR, 1 scan only provided channels */
+	u8 PAD[2];
+	__le32 nprobes; /* -1 use default, nr of probes per channel */
+	__le32 active_time; /* -1 use default, dwell time per channel for
+				 * active scanning
+				 */
+	__le32 passive_time; /* -1 use default, dwell time per channel
+				 * for passive scanning
+				 */
+	__le32 home_time; /* -1 use default, dwell time for the home
+				 * channel between channel scans
+				 */
+};
+
 /* extended join params */
 struct brcmf_ext_join_params_le {
 	struct brcmf_ssid_le ssid_le;	/* {0, ""}: wildcard scan */
@@ -533,6 +724,24 @@ struct brcmf_ext_join_params_le {
 	struct brcmf_assoc_params_le assoc_le;
 };
 
+/* extended join params */
+struct brcmf_ext_join_params_v1_le {
+	__le16 version;
+	u16 pad;
+	struct brcmf_ssid_le ssid_le;	/* {0, ""}: wildcard scan */
+	struct brcmf_join_scan_params_le scan_le;
+	struct brcmf_assoc_params_v1_le assoc_le;
+};
+
+/* extended join params v2 */
+struct brcmf_ext_join_params_v2_le {
+	__le16 version;
+	u16 pad;
+	struct brcmf_ssid_le ssid_le;	/* {0, ""}: wildcard scan */
+	struct brcmf_join_scan_params_v1_le scan_le;
+	struct brcmf_assoc_params_v2_le assoc_le;
+};
+
 struct brcmf_wsec_key {
 	u32 index;		/* key index */
 	u32 len;		/* key length */
@@ -580,11 +789,15 @@ struct brcmf_wsec_key_le {
  * @key_len: number of octets in key material.
  * @flags: key handling qualifiers.
  * @key: PMK key material.
+ * @opt_len: optional field length
+ * @opt_tlvs: optional fields in TLV format
  */
 struct brcmf_wsec_pmk_le {
 	__le16  key_len;
 	__le16  flags;
 	u8 key[BRCMF_WSEC_MAX_SAE_PASSWORD_LEN];
+	__le16  opt_len;
+	u8   opt_tlvs[];
 };
 
 /**
@@ -611,13 +824,17 @@ struct brcmf_channel_info_le {
 	__le32 scan_channel;
 };
 
+#define BRCMF_MAX_ASSOC_OUI_NUM 6
+#define BRCMF_ASSOC_OUI_LEN 3
 struct brcmf_sta_info_le {
 	__le16 ver;		/* version of this struct */
 	__le16 len;		/* length in bytes of this structure */
 	__le16 cap;		/* sta's advertised capabilities */
+	u16 PAD;
 	__le32 flags;		/* flags defined below */
 	__le32 idle;		/* time since data pkt rx'd from sta */
 	u8 ea[ETH_ALEN];		/* Station address */
+	u16 PAD2;
 	__le32 count;			/* # rates in this set */
 	u8 rates[BRCMF_MAXRATES_IN_SET];	/* rates in 500kbps units */
 						/* w/hi bit set if basic */
@@ -649,6 +866,7 @@ struct brcmf_sta_info_le {
 	__le16 aid;                    /* association ID */
 	__le16 ht_capabilities;        /* advertised ht caps */
 	__le16 vht_flags;              /* converted vht flags */
+	u16 PAD3;
 	__le32 tx_pkts_retry_cnt;      /* # of frames where a retry was
 					 * exhausted.
 					 */
@@ -701,6 +919,13 @@ struct brcmf_sta_info_le {
 			__le32 tx_rspec;	/* Rate of last successful tx frame */
 			__le32 rx_rspec;	/* Rate of last successful rx frame */
 			__le32 wnm_cap;		/* wnm capabilities */
+			__le16 he_flags;	/* converted he flags */
+			u16 PAD;
+			struct {
+				u8 count;
+				u8 oui[BRCMF_MAX_ASSOC_OUI_NUM][BRCMF_ASSOC_OUI_LEN];
+			} vendor_oui;
+			u8 link_bw;
 		} v7;
 	};
 };
@@ -833,6 +1058,30 @@ struct brcmf_wlc_version_le {
 	__le16 wlc_ver_minor;
 };
 
+/**
+ * struct brcmf_join_version_le - join interface version
+ */
+struct brcmf_join_version_le {
+	__le16	version;		/**< version of the structure */
+	__le16	length;			/**< length of the entire structure */
+
+	/* join interface version numbers */
+	__le16	join_ver_major;		/**< join interface major version number */
+	u8	pad[2];
+};
+#define BRCMF_JOIN_VERSION_VERSION 1
+
+/**
+ * struct brcmf_scan_version_le - scan interface version
+ */
+struct brcmf_scan_version_le {
+        __le16  version;
+        __le16  length;
+        __le16  scan_ver_major;
+};
+
+#define BRCMF_SCAN_VERSION_VERSION 1
+
 /**
  * struct brcmf_assoclist_le - request assoc list.
  *
@@ -1009,6 +1258,46 @@ struct brcmf_pno_param_le {
 	__le32 slow_freq;
 };
 
+/**
+ * struct brcmf_pno_param_le - PNO scan configuration parameters
+ *
+ * @version: PNO parameters version.
+ * @length: Length of PNO structure
+ * @scan_freq: scan frequency.
+ * @lost_network_timeout: #sec. to declare discovered network as lost.
+ * @flags: Bit field to control features of PFN such as sort criteria auto
+ *	enable switch and background scan.
+ * @rssi_margin: Margin to avoid jitter for choosing a PFN based on RSSI sort
+ *	criteria.
+ * @bestn: number of best networks in each scan.
+ * @mscan: number of scans recorded.
+ * @repeat: minimum number of scan intervals before scan frequency changes
+ *	in adaptive scan.
+ * @exp: exponent of 2 for maximum scan interval.
+ * @slow_freq: slow scan period.
+ * @min_bound: min bound for scan time randomization
+ * @max_bound: max bound for scan time randomization
+ * @pfn_lp_scan_disable: unused
+ * @pfn_lp_scan_cnt: allow interleaving lp scan with hp scan
+ */
+struct brcmf_pno_param_v3_le {
+	__le16 version;
+	__le16 length;
+	__le32 scan_freq;
+	__le32 lost_network_timeout;
+	__le16 flags;
+	__le16 rssi_margin;
+	u8 bestn;
+	u8 mscan;
+	u8 repeat;
+	u8 exp;
+	__le32 slow_freq;
+	u8 min_bound;
+	u8 max_bound;
+	u8 pfn_lp_scan_disable;
+	u8 pfn_lp_scan_cnt;
+};
+
 /**
  * struct brcmf_pno_config_le - PNO channel configuration.
  *
@@ -1062,6 +1351,28 @@ struct brcmf_pno_net_info_le {
 	__le16	timestamp;
 };
 
+/**
+ * struct brcmf_pno_net_info_v3_le - information per found network.
+ *
+ * @bssid: BSS network identifier.
+ * @chanspec: channel spec.
+ * @SSID_len: length of ssid.
+ * @SSID: ssid characters.
+ * @flags: flags
+ * @RSSI: receive signal strength (in dBm).
+ * @timestamp: age in seconds.
+ */
+struct brcmf_pno_net_info_v3_le {
+	u8 bssid[6];
+	u16 chanspec;
+	u8 SSID_len;
+	u8 padding;
+	u16 flags;
+	u8 SSID[32];
+	__le16 RSSI;
+	__le16 timestamp;
+};
+
 /**
  * struct brcmf_pno_scanresults_le - result returned in PNO NET FOUND event.
  *
@@ -1082,6 +1393,14 @@ struct brcmf_pno_scanresults_v2_le {
 	__le32 scan_ch_bucket;
 };
 
+/* V2 and V3 structs are the same */
+struct brcmf_pno_scanresults_v3_le {
+	__le32 version;
+	__le32 status;
+	__le32 count;
+	__le32 scan_ch_bucket;
+};
+
 /**
  * struct brcmf_pno_macaddr_le - to configure PNO macaddr randomization.
  *
@@ -1236,4 +1555,141 @@ struct brcmf_mkeep_alive_pkt_le {
 	u8   data[];
 } __packed;
 
+enum event_msgs_ext_command {
+	EVENTMSGS_NONE		=	0,
+	EVENTMSGS_SET_BIT	=	1,
+	EVENTMSGS_RESET_BIT	=	2,
+	EVENTMSGS_SET_MASK	=	3
+};
+
+#define EVENTMSGS_VER 1
+
+/**
+ * struct brcmf_eventmsgs_ext_le - new event message mask commands
+ *
+ * @version: EVENTMSGS_VER
+ * @command: one of enum event_msgs_ext_command
+ * @len: for set, the mask size from the application to the firmware.
+ *       for get, the actual firmware mask size.
+ * @maxgetsize: for get, the max size that the application can read from
+ *              the firmware.
+ */
+struct brcmf_eventmsgs_ext_le {
+	u8	version;
+	u8	command;
+	u8	len;
+	u8	maxgetsize;
+	u8	mask[];
+};
+
+/* version of the brcmf_wl_wsec_info structure */
+#define BRCMF_WSEC_INFO_VER 1
+
+/* tlv used to return wl_wsec_info properties */
+struct brcmf_wsec_info_tlv {
+	u16 type;
+	u16 len; /* data length */
+	u8 data[1]; /* data follows */
+};
+
+/* input/output data type for wsec_info iovar */
+struct brcmf_wsec_info {
+	u8 version; /* structure version */
+	u8 pad[2];
+	u8 num_tlvs;
+	struct brcmf_wsec_info_tlv tlvs[1]; /* tlv data follows */
+};
+
+/* HE top level command IDs */
+enum {
+	BRCMF_HE_CMD_ENABLE = 0,
+	BRCMF_HE_CMD_FEATURES = 1,
+	BRCMF_HE_CMD_SR = 2,
+	BRCMF_HE_CMD_TESTBED = 3,
+	BRCMF_HE_CMD_BSR_SUPPORT = 4,
+	BRCMF_HE_CMD_BSSCOLOR = 5,
+	BRCMF_HE_CMD_PARTIAL_BSSCOLOR = 6,
+	BRCMF_HE_CMD_CAP = 7,
+	BRCMF_HE_CMD_OMI = 8,
+	BRCMF_HE_CMD_RANGE_EXT = 9,
+	BRCMF_HE_CMD_RTSDURTHRESH = 10,
+	BRCMF_HE_CMD_PEDURATION = 11,
+	BRCMF_HE_CMD_MUEDCA = 12,
+	BRCMF_HE_CMD_DYNFRAG = 13,
+	BRCMF_HE_CMD_PPET = 14,
+	BRCMF_HE_CMD_HTC = 15,
+	BRCMF_HE_CMD_AXMODE = 16,
+	BRCMF_HE_CMD_FRAGTX = 17,
+	BRCMF_HE_CMD_DEFCAP = 18,
+};
+
+#define BRCMF_HE_VER_1 1
+
+struct brcmf_he_bsscolor {
+	u8 color; /* 1..63, on get returns currently in use color */
+	u8 disabled; /* 0/1, 0 means disabled is false, so coloring is enabled */
+	u8 switch_count; /* 0, immediate programming, 1 .. 255 beacon count down */
+	u8 PAD;
+};
+
+struct brcmf_he_omi {
+	u8 peer[ETH_ALEN]; /* leave it all 0s' for non-AP */
+	u8 rx_nss; /* 0..7 */
+	u8 channel_width; /* 0:20, 1:40, 2:80, 3:160 */
+	u8 ul_mu_disable; /* 0|1 */
+	u8 tx_nsts; /* 0..7 */
+	u8 er_su_disable; /* 0|1 */
+	u8 dl_mumimo_resound; /* 0|1 */
+	u8 ul_mu_data_disable; /* 0|1 */
+	u8 tx_override; /* 0, only used for testbed AP */
+	u8 PAD[2];
+};
+
+struct brcmf_he_edca_v1 {
+	u8 aci_aifsn;
+	u8 ecw_min_max;
+	u8 muedca_timer;
+	u8 PAD;
+};
+
+#define BRCMF_AC_COUNT 4
+struct brcmf_he_muedca_v1 {
+	/* structure control */
+	__le16 version; /* structure version */
+	__le16 length; /* data length (starting after this field) */
+	struct brcmf_he_edca_v1 ac_param_ap[BRCMF_AC_COUNT];
+	struct brcmf_he_edca_v1 ac_param_sta[BRCMF_AC_COUNT];
+};
+
+#define BRCMF_HE_SR_VER_1 1
+
+#define SRC_PSR_DIS 0x01
+#define SRC_NON_SRG_OBSS_PD_SR_DIS 0x02
+#define SRC_NON_SRG_OFFSET_PRESENT 0x04
+#define SRC_SRG_INFORMATION_PRESENT 0x08
+#define SRC_HESIGA_SPATIAL_REUSE_VALUE15_ALLOWED 0x10
+
+#define HE_SR_SRG_INFO_LEN 18
+
+struct brcmf_he_sr_v1 {
+	/* structure control */
+	__le16 version; /* structure version */
+	__le16 length; /* data length (starting after this field) */
+	u8 enabled;
+	u8 src; /* SR control, see above defines. */
+	u8 non_srg_offset; /* Non-SRG Offset */
+	u8 srg[HE_SR_SRG_INFO_LEN]; /* SRG Information */
+};
+
+#define BRCMF_HE_DEFCAP_VER_1 1
+
+struct brcmf_he_defcap {
+	__le16 version; /* structure version */
+	__le16 length; /* data length (starting after this field) */
+	u8 bsscfg_type;
+	u8 bsscfg_subtype;
+	u8 mac_cap[6];
+	u8 phy_cap[11];
+};
+
 #endif /* FWIL_TYPES_H_ */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/interface_create.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/interface_create.c
new file mode 100644
index 00000000000000..1f40ff8d632c25
--- /dev/null
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/interface_create.c
@@ -0,0 +1,270 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2023 Daniel Berlin
+ */
+
+/* This file handles firmware-side interface creation */
+
+#include <linux/kernel.h>
+#include <linux/etherdevice.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <net/cfg80211.h>
+#include <net/netlink.h>
+
+#include <brcmu_utils.h>
+#include <defs.h>
+#include "cfg80211.h"
+#include "debug.h"
+#include "fwil.h"
+#include "proto.h"
+#include "bus.h"
+#include "common.h"
+#include "interface_create.h"
+
+#define BRCMF_INTERFACE_CREATE_VER_1 1
+#define BRCMF_INTERFACE_CREATE_VER_2 2
+#define BRCMF_INTERFACE_CREATE_VER_3 3
+#define BRCMF_INTERFACE_CREATE_VER_MAX BRCMF_INTERFACE_CREATE_VER_3
+
+/* These sets of flags specify whether to use various fields in the interface create structures */
+
+/* This is only used with version 0 or 1 */
+#define BRCMF_INTERFACE_CREATE_STA (0 << 0)
+#define BRCMF_INTERFACE_CREATE_AP (1 << 0)
+
+#define BRCMF_INTERFACE_MAC_DONT_USE (0 << 1)
+#define BRCMF_INTERFACE_MAC_USE (1 << 1)
+
+#define BRCMF_INTERFACE_WLC_INDEX_DONT_USE (0 << 2)
+#define BRCMF_INTERFACE_WLC_INDEX_USE (1 << 2)
+
+#define BRCMF_INTERFACE_IF_INDEX_DONT_USE (0 << 3)
+#define BRCMF_INTERFACE_IF_INDEX_USE (1 << 3)
+
+#define BRCMF_INTERFACE_BSSID_DONT_USE (0 << 4)
+#define BRCMF_INTERFACE_BSSID_USE (1 << 4)
+
+/*
+ * From revision >= 2 Bit 0 of flags field will not be used  for STA or AP interface creation.
+ * "iftype" field shall be used for identifying the interface type.
+ */
+enum brcmf_interface_type {
+	BRCMF_INTERFACE_TYPE_STA = 0,
+	BRCMF_INTERFACE_TYPE_AP = 1,
+	/* The missing number here is deliberate */
+	BRCMF_INTERFACE_TYPE_NAN = 3,
+	BRCMF_INTERFACE_TYPE_P2P_GO = 4,
+	BRCMF_INTERFACE_TYPE_P2P_GC = 5,
+	BRCMF_INTERFACE_TYPE_P2P_DISC = 6,
+	BRCMF_INTERFACE_TYPE_IBSS = 7,
+	BRCMF_INTERFACE_TYPE_MESH = 8
+};
+
+
+/* All sources treat these structures as being host endian.
+ * However, firmware treats it as little endian, so we do as well */
+
+struct brcmf_interface_create_v1 {
+	__le16 ver; /* structure version */
+	u8 pad1[2];
+	__le32 flags; /* flags for operation */
+	u8 mac_addr[ETH_ALEN]; /* MAC address */
+	u8 pad2[2];
+	__le32 wlc_index; /* optional for wlc index */
+};
+
+struct brcmf_interface_create_v2 {
+	__le16 ver; /* structure version */
+	u8 pad1[2];
+	__le32 flags; /* flags for operation */
+	u8 mac_addr[ETH_ALEN]; /* MAC address */
+	u8 iftype; /* type of interface created */
+	u8 pad2;
+	u32 wlc_index; /* optional for wlc index */
+};
+
+struct brcmf_interface_create_v3 {
+	__le16 ver; /* structure version */
+	__le16 len; /* length of structure + data */
+	__le16 fixed_len; /* length of structure */
+	u8 iftype; /* type of interface created */
+	u8 wlc_index; /* optional for wlc index */
+	__le32 flags; /* flags for operation */
+	u8 mac_addr[ETH_ALEN]; /* MAC address */
+	u8 bssid[ETH_ALEN]; /* optional for BSSID */
+	u8 if_index; /* interface index request */
+	u8 pad[3];
+	u8 data[]; /* Optional for specific data */
+};
+
+static int brcmf_get_first_free_bsscfgidx(struct brcmf_pub *drvr)
+{
+	int bsscfgidx;
+
+	for (bsscfgidx = 0; bsscfgidx < BRCMF_MAX_IFS; bsscfgidx++) {
+		/* bsscfgidx 1 is reserved for legacy P2P */
+		if (bsscfgidx == 1)
+			continue;
+		if (!drvr->iflist[bsscfgidx])
+			return bsscfgidx;
+	}
+
+	return -ENOMEM;
+}
+
+static void brcmf_set_vif_sta_macaddr(struct brcmf_if *ifp, u8 *mac_addr)
+{
+	u8 mac_idx = ifp->drvr->sta_mac_idx;
+
+	/* set difference MAC address with locally administered bit */
+	memcpy(mac_addr, ifp->mac_addr, ETH_ALEN);
+	mac_addr[0] |= 0x02;
+	mac_addr[3] ^= mac_idx ? 0xC0 : 0xA0;
+	mac_idx++;
+	mac_idx = mac_idx % 2;
+	ifp->drvr->sta_mac_idx = mac_idx;
+}
+
+static int brcmf_cfg80211_request_if_internal(struct brcmf_if *ifp, u32 version,
+					      enum brcmf_interface_type if_type,
+					      u8 *macaddr)
+{
+	switch (version) {
+	case BRCMF_INTERFACE_CREATE_VER_1: {
+		struct brcmf_interface_create_v1 iface_v1 = {};
+		u32 flags = if_type;
+
+		iface_v1.ver = cpu_to_le16(BRCMF_INTERFACE_CREATE_VER_1);
+		if (macaddr) {
+			flags |= BRCMF_INTERFACE_MAC_USE;
+			if (!is_zero_ether_addr(macaddr))
+				memcpy(iface_v1.mac_addr, macaddr, ETH_ALEN);
+			else
+				brcmf_set_vif_sta_macaddr(ifp,
+							  iface_v1.mac_addr);
+		}
+		iface_v1.flags = cpu_to_le32(flags);
+		return brcmf_fil_iovar_data_get(ifp, "interface_create",
+						&iface_v1, sizeof(iface_v1));
+	}
+	case BRCMF_INTERFACE_CREATE_VER_2: {
+		struct brcmf_interface_create_v2 iface_v2 = {};
+		u32 flags = 0;
+
+		iface_v2.ver = cpu_to_le16(BRCMF_INTERFACE_CREATE_VER_2);
+		iface_v2.iftype = if_type;
+		if (macaddr) {
+			flags = BRCMF_INTERFACE_MAC_USE;
+			if (!is_zero_ether_addr(macaddr))
+				memcpy(iface_v2.mac_addr, macaddr, ETH_ALEN);
+			else
+				brcmf_set_vif_sta_macaddr(ifp,
+							  iface_v2.mac_addr);
+		}
+		iface_v2.flags = cpu_to_le32(flags);
+		return brcmf_fil_iovar_data_get(ifp, "interface_create",
+						&iface_v2, sizeof(iface_v2));
+	}
+	case BRCMF_INTERFACE_CREATE_VER_3: {
+		struct brcmf_interface_create_v3 iface_v3 = {};
+		u32 flags = 0;
+
+		iface_v3.ver = cpu_to_le16(BRCMF_INTERFACE_CREATE_VER_3);
+		iface_v3.iftype = if_type;
+		iface_v3.len = cpu_to_le16(sizeof(iface_v3));
+		iface_v3.fixed_len = cpu_to_le16(sizeof(iface_v3));
+		if (macaddr) {
+			flags = BRCMF_INTERFACE_MAC_USE;
+			if (!is_zero_ether_addr(macaddr))
+				memcpy(iface_v3.mac_addr, macaddr, ETH_ALEN);
+			else
+				brcmf_set_vif_sta_macaddr(ifp,
+							  iface_v3.mac_addr);
+		}
+		iface_v3.flags = cpu_to_le32(flags);
+		return brcmf_fil_iovar_data_get(ifp, "interface_create",
+						&iface_v3, sizeof(iface_v3));
+	}
+	default:
+		bphy_err(ifp->drvr, "Unknown interface create version:%d\n",
+			 version);
+		return -EINVAL;
+	}
+}
+static int brcmf_cfg80211_request_if(struct brcmf_if *ifp,
+				     enum brcmf_interface_type if_type,
+				     u8 *macaddr)
+{
+	s32 err;
+	u32 iface_create_ver;
+
+	/* Query the creation version, see if the firmware knows */
+	iface_create_ver = 0;
+	err = brcmf_fil_bsscfg_int_query(ifp, "interface_create",
+					 &iface_create_ver);
+	if (!err) {
+		err = brcmf_cfg80211_request_if_internal(ifp, iface_create_ver,
+							 if_type, macaddr);
+		if (!err) {
+			brcmf_info("interface created (version %d)\n",
+				   iface_create_ver);
+		} else {
+			bphy_err(ifp->drvr,
+				 "failed to create interface (version %d):%d\n",
+				 iface_create_ver, err);
+		}
+		return err;
+	}
+	/* Either version one or version two */
+	err = brcmf_cfg80211_request_if_internal(
+		ifp, if_type, BRCMF_INTERFACE_CREATE_VER_2, macaddr);
+	if (!err) {
+		brcmf_info("interface created (version 2)\n");
+		return 0;
+	}
+	err = brcmf_cfg80211_request_if_internal(
+		ifp, if_type, BRCMF_INTERFACE_CREATE_VER_1, macaddr);
+	if (!err) {
+		brcmf_info("interface created (version 1)\n");
+		return 0;
+	}
+	bphy_err(ifp->drvr,
+		 "interface creation failed, tried query, v2, v1: %d\n", err);
+	return -EINVAL;
+}
+
+int brcmf_cfg80211_request_sta_if(struct brcmf_if *ifp, u8 *macaddr)
+{
+	return brcmf_cfg80211_request_if(ifp, BRCMF_INTERFACE_TYPE_STA,
+					 macaddr);
+}
+
+int brcmf_cfg80211_request_ap_if(struct brcmf_if *ifp)
+{
+	int err;
+
+	err = brcmf_cfg80211_request_if(ifp, BRCMF_INTERFACE_TYPE_AP, NULL);
+	if (err) {
+		struct brcmf_mbss_ssid_le mbss_ssid_le;
+		int bsscfgidx;
+
+		brcmf_info("Does not support interface_create (%d)\n", err);
+		memset(&mbss_ssid_le, 0, sizeof(mbss_ssid_le));
+		bsscfgidx = brcmf_get_first_free_bsscfgidx(ifp->drvr);
+		if (bsscfgidx < 0)
+			return bsscfgidx;
+
+		mbss_ssid_le.bsscfgidx = cpu_to_le32(bsscfgidx);
+		mbss_ssid_le.SSID_len = cpu_to_le32(5);
+		sprintf(mbss_ssid_le.SSID, "ssid%d", bsscfgidx);
+
+		err = brcmf_fil_bsscfg_data_set(ifp, "bsscfg:ssid",
+						&mbss_ssid_le,
+						sizeof(mbss_ssid_le));
+
+		if (err < 0)
+			bphy_err(ifp->drvr, "setting ssid failed %d\n", err);
+	}
+	return err;
+}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/interface_create.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/interface_create.h
new file mode 100644
index 00000000000000..669fa1508b67f6
--- /dev/null
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/interface_create.h
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2023 Daniel Berlin
+ */
+
+#ifndef _BRCMF_INTERFACE_CREATE_H_
+#define _BRCMF_INTERFACE_CREATE_H_
+#include "core.h"
+
+int brcmf_cfg80211_request_sta_if(struct brcmf_if *ifp, u8 *macaddr);
+int brcmf_cfg80211_request_ap_if(struct brcmf_if *ifp);
+
+#endif /* _BRCMF_INTERFACE_CREATE_H_ */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/join_param.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/join_param.c
new file mode 100644
index 00000000000000..4f026571c7e7eb
--- /dev/null
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/join_param.c
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2023 Daniel Berlin
+ */
+#include <linux/gcd.h>
+#include <net/cfg80211.h>
+
+#include "core.h"
+#include "debug.h"
+#include "fwil_types.h"
+#include "cfg80211.h"
+#include "join_param.h"
+
+/* These defaults are the same as found in the DHD drivers, and represent
+ * reasonable defaults for various scan dwell and probe times.   */
+#define BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS 320
+#define BRCMF_SCAN_JOIN_PASSIVE_DWELL_TIME_MS 400
+#define BRCMF_SCAN_JOIN_PROBE_INTERVAL_MS 20
+
+/* Most of the actual structure fields we fill in are the same for various versions
+ * However, due to various incompatible changes and variants, the fields are not always
+ * in the same place.
+ * This makes for code duplication, so we try to commonize setting fields where it makes sense.
+ */
+
+static void brcmf_joinscan_set_ssid(struct brcmf_ssid_le *ssid_le,
+				    const u8 *ssid, u32 ssid_len)
+{
+	ssid_len = min_t(u32, ssid_len, IEEE80211_MAX_SSID_LEN);
+	ssid_le->SSID_len = cpu_to_le32(ssid_len);
+	memcpy(ssid_le->SSID, ssid, ssid_len);
+}
+
+static void brcmf_joinscan_set_bssid(u8 out_bssid[6], const u8 *in_bssid)
+{
+	if (in_bssid) {
+		memcpy(out_bssid, in_bssid, ETH_ALEN);
+	} else {
+		eth_broadcast_addr(out_bssid);
+	}
+}
+
+/* Create a single channel chanspec list from a wireless stack channel */
+static void brcmf_joinscan_set_single_chanspec_from_channel(
+	struct brcmf_cfg80211_info *cfg, struct ieee80211_channel *chan,
+	__le32 *chanspec_count, __le16 (*chanspec_list)[])
+{
+	u16 chanspec = channel_to_chanspec(&cfg->d11inf, chan);
+	*chanspec_count = cpu_to_le32(1);
+	(*chanspec_list)[0] = cpu_to_le16(chanspec);
+}
+
+/* Create a single channel chanspec list from a wireless stack chandef */
+static void brcmf_joinscan_set_single_chanspec_from_chandef(
+	struct brcmf_cfg80211_info *cfg, struct cfg80211_chan_def *chandef,
+	__le32 *chanspec_count, __le16 (*chanspec_list)[])
+{
+	u16 chanspec = chandef_to_chanspec(&cfg->d11inf, chandef);
+	*chanspec_count = cpu_to_le32(1);
+	(*chanspec_list)[0] = cpu_to_le16(chanspec);
+}
+
+static void *brcmf_get_struct_for_ibss_v0(struct brcmf_cfg80211_info *cfg,
+					  u32 *struct_size,
+					  struct cfg80211_ibss_params *params)
+{
+	struct brcmf_join_params *join_params;
+
+	u32 join_params_size = struct_size(join_params, params_le.chanspec_list,
+					   params->chandef.chan != NULL);
+
+	*struct_size = join_params_size;
+	join_params = kzalloc(join_params_size, GFP_KERNEL);
+	if (!join_params) {
+		bphy_err(cfg, "Unable to allocate memory for join params\n");
+		return NULL;
+	}
+	brcmf_joinscan_set_ssid(&join_params->ssid_le, params->ssid,
+				params->ssid_len);
+	brcmf_joinscan_set_bssid(join_params->params_le.bssid, params->bssid);
+	/* Channel */
+	if (cfg->channel) {
+		brcmf_joinscan_set_single_chanspec_from_chandef(
+			cfg, &params->chandef,
+			&join_params->params_le.chanspec_num,
+			&join_params->params_le.chanspec_list);
+	}
+	return join_params;
+}
+
+static void *
+brcmf_get_prepped_struct_for_ibss_v1(struct brcmf_cfg80211_info *cfg,
+				     u32 *struct_size,
+				     struct cfg80211_ibss_params *params)
+{
+	struct brcmf_join_params_v1 *join_params;
+	u32 join_params_size = struct_size(join_params, params_le.chanspec_list,
+					   params->chandef.chan != NULL);
+
+	*struct_size = join_params_size;
+	join_params = kzalloc(join_params_size, GFP_KERNEL);
+	if (!join_params) {
+		bphy_err(cfg, "Unable to allocate memory for join params\n");
+		return NULL;
+	}
+	join_params->params_le.version = cpu_to_le16(1);
+	brcmf_joinscan_set_ssid(&join_params->ssid_le, params->ssid,
+				params->ssid_len);
+	brcmf_joinscan_set_bssid(join_params->params_le.bssid, params->bssid);
+	/* Channel */
+	if (cfg->channel) {
+		brcmf_joinscan_set_single_chanspec_from_chandef(
+			cfg, &params->chandef,
+			&join_params->params_le.chanspec_num,
+			&join_params->params_le.chanspec_list);
+	}
+	return join_params;
+}
+
+static void
+brcmf_joinscan_set_common_v0v1_params(struct brcmf_join_scan_params_le *scan_le,
+				      bool have_channel)
+{
+	/* Set up join scan parameters */
+	scan_le->scan_type = 0;
+	scan_le->home_time = cpu_to_le32(-1);
+
+	if (have_channel) {
+		/* Increase dwell time to receive probe response or detect
+		 * beacon from target AP at a noisy air only during connect
+		 * command.
+		 */
+		scan_le->active_time =
+			cpu_to_le32(BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS);
+		scan_le->passive_time =
+			cpu_to_le32(BRCMF_SCAN_JOIN_PASSIVE_DWELL_TIME_MS);
+		/* To sync with presence period of VSDB GO send probe request
+		 * more frequently. Probe request will be stopped when it gets
+		 * probe response from target AP/GO.
+		 */
+		scan_le->nprobes =
+			cpu_to_le32(BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS /
+				    BRCMF_SCAN_JOIN_PROBE_INTERVAL_MS);
+	} else {
+		scan_le->active_time = cpu_to_le32(-1);
+		scan_le->passive_time = cpu_to_le32(-1);
+		scan_le->nprobes = cpu_to_le32(-1);
+	}
+}
+static void *
+brcmf_get_struct_for_connect_v0(struct brcmf_cfg80211_info *cfg,
+				u32 *struct_size,
+				struct cfg80211_connect_params *params)
+{
+	struct brcmf_ext_join_params_le *ext_v0;
+	u32 join_params_size =
+		struct_size(ext_v0, assoc_le.chanspec_list, cfg->channel != 0);
+
+	*struct_size = join_params_size;
+	ext_v0 = kzalloc(join_params_size, GFP_KERNEL);
+	if (!ext_v0) {
+		bphy_err(
+			cfg,
+			"Could not allocate memory for extended join parameters\n");
+		return NULL;
+	}
+	brcmf_joinscan_set_ssid(&ext_v0->ssid_le, params->ssid,
+				params->ssid_len);
+	brcmf_joinscan_set_common_v0v1_params(&ext_v0->scan_le,
+					      cfg->channel != 0);
+	brcmf_joinscan_set_bssid(ext_v0->assoc_le.bssid, params->bssid);
+	if (cfg->channel) {
+		struct ieee80211_channel *chan = params->channel_hint ?
+							 params->channel_hint :
+							 params->channel;
+		brcmf_joinscan_set_single_chanspec_from_channel(
+			cfg, chan, &ext_v0->assoc_le.chanspec_num,
+			&ext_v0->assoc_le.chanspec_list);
+	}
+	return ext_v0;
+}
+
+static void *
+brcmf_get_struct_for_connect_v1(struct brcmf_cfg80211_info *cfg,
+				u32 *struct_size,
+				struct cfg80211_connect_params *params)
+{
+	struct brcmf_ext_join_params_v1_le *ext_v1;
+	u32 join_params_size =
+		struct_size(ext_v1, assoc_le.chanspec_list, cfg->channel != 0);
+
+	*struct_size = join_params_size;
+	ext_v1 = kzalloc(join_params_size, GFP_KERNEL);
+	if (!ext_v1) {
+		bphy_err(
+			cfg,
+			"Could not allocate memory for extended join parameters\n");
+		return NULL;
+	}
+	ext_v1->version = cpu_to_le16(1);
+	ext_v1->assoc_le.version = cpu_to_le16(1);
+	brcmf_joinscan_set_ssid(&ext_v1->ssid_le, params->ssid,
+				params->ssid_len);
+	brcmf_joinscan_set_common_v0v1_params(&ext_v1->scan_le,
+					      cfg->channel != 0);
+	brcmf_joinscan_set_bssid(ext_v1->assoc_le.bssid, params->bssid);
+	if (cfg->channel) {
+		struct ieee80211_channel *chan = params->channel_hint ?
+							 params->channel_hint :
+							 params->channel;
+		brcmf_joinscan_set_single_chanspec_from_channel(
+			cfg, chan, &ext_v1->assoc_le.chanspec_num,
+			&ext_v1->assoc_le.chanspec_list);
+	}
+	return ext_v1;
+}
+
+static void *brcmf_get_join_from_ext_join_v0(void *ext_join, u32 *struct_size)
+{
+	struct brcmf_ext_join_params_le *ext_join_v0 =
+		(struct brcmf_ext_join_params_le *)ext_join;
+	u32 chanspec_num = le32_to_cpu(ext_join_v0->assoc_le.chanspec_num);
+	struct brcmf_join_params *join_params;
+	u32 join_params_size =
+		struct_size(join_params, params_le.chanspec_list, chanspec_num);
+	u32 assoc_size = struct_size_t(struct brcmf_assoc_params_le,
+				       chanspec_list, chanspec_num);
+
+	*struct_size = join_params_size;
+	join_params = kzalloc(join_params_size, GFP_KERNEL);
+	if (!join_params) {
+		return NULL;
+	}
+	memcpy(&join_params->ssid_le, &ext_join_v0->ssid_le,
+	       sizeof(ext_join_v0->ssid_le));
+	memcpy(&join_params->params_le, &ext_join_v0->assoc_le, assoc_size);
+
+	return join_params;
+}
+
+static void *brcmf_get_join_from_ext_join_v1(void *ext_join, u32 *struct_size)
+{
+	struct brcmf_ext_join_params_v1_le *ext_join_v1 =
+		(struct brcmf_ext_join_params_v1_le *)ext_join;
+	u32 chanspec_num = le32_to_cpu(ext_join_v1->assoc_le.chanspec_num);
+	struct brcmf_join_params_v1 *join_params;
+	u32 join_params_size =
+		struct_size(join_params, params_le.chanspec_list, chanspec_num);
+	u32 assoc_size = struct_size_t(struct brcmf_assoc_params_le,
+				       chanspec_list, chanspec_num);
+
+	*struct_size = join_params_size;
+	join_params = kzalloc(join_params_size, GFP_KERNEL);
+	if (!join_params) {
+		return NULL;
+	}
+	memcpy(&join_params->ssid_le, &ext_join_v1->ssid_le,
+	       sizeof(ext_join_v1->ssid_le));
+	memcpy(&join_params->params_le, &ext_join_v1->assoc_le, assoc_size);
+
+	return join_params;
+}
+
+int brcmf_join_param_setup_for_version(struct brcmf_pub *drvr, u8 version)
+{
+	drvr->join_param_handler.version = version;
+	switch (version) {
+	case 0:
+		drvr->join_param_handler.get_struct_for_ibss =
+			brcmf_get_struct_for_ibss_v0;
+		drvr->join_param_handler.get_struct_for_connect =
+			brcmf_get_struct_for_connect_v0;
+		drvr->join_param_handler.get_join_from_ext_join =
+			brcmf_get_join_from_ext_join_v0;
+		break;
+	case 1:
+		drvr->join_param_handler.get_struct_for_ibss =
+			brcmf_get_prepped_struct_for_ibss_v1;
+		drvr->join_param_handler.get_struct_for_connect =
+			brcmf_get_struct_for_connect_v1;
+		drvr->join_param_handler.get_join_from_ext_join =
+			brcmf_get_join_from_ext_join_v1;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/join_param.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/join_param.h
new file mode 100644
index 00000000000000..f549fe2a740823
--- /dev/null
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/join_param.h
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2023 Daniel Berlin
+ */
+
+#ifndef _BRCMF_JOIN_PARAM_H
+#define _BRCMF_JOIN_PARAM_H
+
+struct brcmf_pub;
+
+/**
+ * brcmf_join_param_setup_for_version() - Setup the driver to handle join structures
+ *
+ * There are a number of different structures and interface versions for join/extended join parameters
+ * This sets up the driver to handle a particular interface version.
+ *
+ * @drvr Driver structure to setup
+ * @ver Interface version
+ * Return: %0 if okay, error code otherwise
+ */
+int brcmf_join_param_setup_for_version(struct brcmf_pub *drvr, u8 ver);
+#endif /* _BRCMF_JOIN_PARAM_H */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
index 45fbcbdc7d9e4b..0e41d618486d39 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
@@ -47,9 +47,35 @@
 #define MSGBUF_TYPE_RX_CMPLT			0x12
 #define MSGBUF_TYPE_LPBK_DMAXFER		0x13
 #define MSGBUF_TYPE_LPBK_DMAXFER_CMPLT		0x14
+#define MSGBUF_TYPE_FLOW_RING_RESUME		0x15
+#define MSGBUF_TYPE_FLOW_RING_RESUME_CMPLT	0x16
+#define MSGBUF_TYPE_FLOW_RING_SUSPEND		0x17
+#define MSGBUF_TYPE_FLOW_RING_SUSPEND_CMPLT	0x18
+#define MSGBUF_TYPE_INFO_BUF_POST		0x19
+#define MSGBUF_TYPE_INFO_BUF_CMPLT		0x1A
+#define MSGBUF_TYPE_H2D_RING_CREATE		0x1B
+#define MSGBUF_TYPE_D2H_RING_CREATE		0x1C
+#define MSGBUF_TYPE_H2D_RING_CREATE_CMPLT	0x1D
+#define MSGBUF_TYPE_D2H_RING_CREATE_CMPLT	0x1E
+#define MSGBUF_TYPE_H2D_RING_CONFIG		0x1F
+#define MSGBUF_TYPE_D2H_RING_CONFIG		0x20
+#define MSGBUF_TYPE_H2D_RING_CONFIG_CMPLT	0x21
+#define MSGBUF_TYPE_D2H_RING_CONFIG_CMPLT	0x22
+#define MSGBUF_TYPE_H2D_MAILBOX_DATA		0x23
+#define MSGBUF_TYPE_D2H_MAILBOX_DATA		0x24
+#define MSGBUF_TYPE_TIMSTAMP_BUFPOST		0x25
+#define MSGBUF_TYPE_HOSTTIMSTAMP		0x26
+#define MSGBUF_TYPE_HOSTTIMSTAMP_CMPLT		0x27
+#define MSGBUF_TYPE_FIRMWARE_TIMESTAMP		0x28
+#define MSGBUF_TYPE_SNAPSHOT_UPLOAD		0x29
+#define MSGBUF_TYPE_SNAPSHOT_CMPLT		0x2A
+#define MSGBUF_TYPE_H2D_RING_DELETE		0x2B
+#define MSGBUF_TYPE_D2H_RING_DELETE		0x2C
+#define MSGBUF_TYPE_H2D_RING_DELETE_CMPLT	0x2D
+#define MSGBUF_TYPE_D2H_RING_DELETE_CMPLT	0x2E
 
 #define NR_TX_PKTIDS				2048
-#define NR_RX_PKTIDS				1024
+#define NR_RX_PKTIDS				2048
 
 #define BRCMF_IOCTL_REQ_PKTID			0xFFFE
 
@@ -218,6 +244,19 @@ struct msgbuf_flowring_flush_resp {
 	__le32				rsvd0[3];
 };
 
+struct msgbuf_h2d_mailbox_data {
+	struct msgbuf_common_hdr	msg;
+	__le32				data;
+	__le32				rsvd0[7];
+};
+
+struct msgbuf_d2h_mailbox_data {
+	struct msgbuf_common_hdr	msg;
+	struct msgbuf_completion_hdr	compl_hdr;
+	__le32				data;
+	__le32				rsvd0[2];
+};
+
 struct brcmf_msgbuf_work_item {
 	struct list_head queue;
 	u32 flowid;
@@ -1285,6 +1324,16 @@ brcmf_msgbuf_process_flow_ring_delete_response(struct brcmf_msgbuf *msgbuf,
 }
 
 
+static void brcmf_msgbuf_process_d2h_mailbox_data(struct brcmf_msgbuf *msgbuf,
+						  void *buf)
+{
+	struct msgbuf_d2h_mailbox_data *d2h_mb_data = buf;
+	struct brcmf_pub *drvr = msgbuf->drvr;
+
+	brcmf_bus_d2h_mb_rx(drvr->bus_if, le32_to_cpu(d2h_mb_data->data));
+}
+
+
 static void brcmf_msgbuf_process_msgtype(struct brcmf_msgbuf *msgbuf, void *buf)
 {
 	struct brcmf_pub *drvr = msgbuf->drvr;
@@ -1327,6 +1376,10 @@ static void brcmf_msgbuf_process_msgtype(struct brcmf_msgbuf *msgbuf, void *buf)
 		brcmf_dbg(MSGBUF, "MSGBUF_TYPE_RX_CMPLT\n");
 		brcmf_msgbuf_process_rx_complete(msgbuf, buf);
 		break;
+	case MSGBUF_TYPE_D2H_MAILBOX_DATA:
+		brcmf_dbg(MSGBUF, "MSGBUF_TYPE_D2H_MAILBOX_DATA\n");
+		brcmf_msgbuf_process_d2h_mailbox_data(msgbuf, buf);
+		break;
 	default:
 		bphy_err(drvr, "Unsupported msgtype %d\n", msg->msgtype);
 		break;
@@ -1465,6 +1518,38 @@ void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u16 flowid)
 	}
 }
 
+
+int brcmf_msgbuf_h2d_mb_write(struct brcmf_pub *drvr, u32 data)
+{
+	struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
+	struct brcmf_commonring *commonring;
+	struct msgbuf_h2d_mailbox_data *request;
+	void *ret_ptr;
+	int err;
+
+	commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
+	brcmf_commonring_lock(commonring);
+	ret_ptr = brcmf_commonring_reserve_for_write(commonring);
+	if (!ret_ptr) {
+		bphy_err(drvr, "Failed to reserve space in commonring\n");
+		brcmf_commonring_unlock(commonring);
+		return -ENOMEM;
+	}
+
+	request = (struct msgbuf_h2d_mailbox_data *)ret_ptr;
+	request->msg.msgtype = MSGBUF_TYPE_H2D_MAILBOX_DATA;
+	request->msg.ifidx = -1;
+	request->msg.flags = 0;
+	request->msg.request_id = 0;
+	request->data = data;
+
+	err = brcmf_commonring_write_complete(commonring);
+	brcmf_commonring_unlock(commonring);
+
+	return err;
+}
+
+
 #ifdef DEBUG
 static int brcmf_msgbuf_stats_read(struct seq_file *seq, void *data)
 {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h
index 6a849f4a94dd7f..0ed48cf13d93cf 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h
@@ -8,10 +8,10 @@
 #ifdef CONFIG_BRCMFMAC_PROTO_MSGBUF
 
 #define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM	64
-#define BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM	1024
+#define BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM	2048
 #define BRCMF_D2H_MSGRING_CONTROL_COMPLETE_MAX_ITEM	64
 #define BRCMF_D2H_MSGRING_TX_COMPLETE_MAX_ITEM		1024
-#define BRCMF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM		1024
+#define BRCMF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM		2048
 #define BRCMF_H2D_TXFLOWRING_MAX_ITEM			512
 
 #define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE	40
@@ -32,6 +32,7 @@ int brcmf_proto_msgbuf_rx_trigger(struct device *dev);
 void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u16 flowid);
 int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr);
 void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr);
+int brcmf_msgbuf_h2d_mb_write(struct brcmf_pub *drvr, u32 data);
 #else
 static inline int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
 {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index 6e0c90f4718b58..543d3cba1c6156 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -1793,8 +1793,8 @@ bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg,
 		/* do not configure anything. it will be */
 		/* sent with a default configuration     */
 	} else {
-		bphy_err(drvr, "Unknown Frame: category 0x%x, action 0x%x\n",
-			 category, action);
+		bphy_info_once(drvr, "Unknown Frame: category 0x%x, action 0x%x\n",
+			       category, action);
 		return false;
 	}
 
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index d2caa80e941235..bb534d69e12df0 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -71,6 +71,8 @@ BRCMF_FW_CLM_DEF(4377B3, "brcmfmac4377b3-pcie");
 BRCMF_FW_CLM_DEF(4378B1, "brcmfmac4378b1-pcie");
 BRCMF_FW_CLM_DEF(4378B3, "brcmfmac4378b3-pcie");
 BRCMF_FW_CLM_DEF(4387C2, "brcmfmac4387c2-pcie");
+BRCMF_FW_CLM_DEF(4388B0, "brcmfmac4388b0-pcie");
+BRCMF_FW_CLM_DEF(4388C0, "brcmfmac4388c0-pcie");
 
 /* firmware config files */
 MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.txt");
@@ -110,6 +112,8 @@ static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
 	BRCMF_FW_ENTRY(BRCM_CC_4378_CHIP_ID, 0x0000000F, 4378B1), /* revision ID 3 */
 	BRCMF_FW_ENTRY(BRCM_CC_4378_CHIP_ID, 0xFFFFFFE0, 4378B3), /* revision ID 5 */
 	BRCMF_FW_ENTRY(BRCM_CC_4387_CHIP_ID, 0xFFFFFFFF, 4387C2), /* revision ID 7 */
+	BRCMF_FW_ENTRY(BRCM_CC_4388_CHIP_ID, 0x0000000F, 4388B0),
+	BRCMF_FW_ENTRY(BRCM_CC_4388_CHIP_ID, 0xFFFFFFF0, 4388C0), /* revision ID 4 */
 };
 
 #define BRCMF_PCIE_FW_UP_TIMEOUT		5000 /* msec */
@@ -217,11 +221,64 @@ static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
 #define BRCMF_PCIE_SHARED_VERSION_MASK		0x00FF
 #define BRCMF_PCIE_SHARED_DMA_INDEX		0x10000
 #define BRCMF_PCIE_SHARED_DMA_2B_IDX		0x100000
+#define BRCMF_PCIE_SHARED_USE_MAILBOX		0x2000000
+#define BRCMF_PCIE_SHARED_TIMESTAMP_DB0		0x8000000
 #define BRCMF_PCIE_SHARED_HOSTRDY_DB1		0x10000000
+#define BRCMF_PCIE_SHARED_NO_OOB_DW		0x20000000
+#define BRCMF_PCIE_SHARED_INBAND_DS		0x40000000
+#define BRCMF_PCIE_SHARED_DAR			0x80000000
+
+#define BRCMF_PCIE_SHARED2_EXTENDED_TRAP_DATA	0x1
+#define BRCMF_PCIE_SHARED2_TXSTATUS_METADATA	0x2
+#define BRCMF_PCIE_SHARED2_BT_LOGGING		0x4
+#define BRCMF_PCIE_SHARED2_SNAPSHOT_UPLOAD	0x8
+#define BRCMF_PCIE_SHARED2_SUBMIT_COUNT_WAR	0x10
+#define BRCMF_PCIE_SHARED2_FAST_DELETE_RING	0x20
+#define BRCMF_PCIE_SHARED2_EVTBUF_MAX_MASK	0xC0
+#define BRCMF_PCIE_SHARED2_PKT_TX_STATUS	0x100
+#define BRCMF_PCIE_SHARED2_FW_SMALL_MEMDUMP	0x200
+#define BRCMF_PCIE_SHARED2_FW_HC_ON_TRAP	0x400
+#define BRCMF_PCIE_SHARED2_HSCB			0x800
+#define BRCMF_PCIE_SHARED2_EDL_RING		0x1000
+#define BRCMF_PCIE_SHARED2_DEBUG_BUF_DEST	0x2000
+#define BRCMF_PCIE_SHARED2_PCIE_ENUM_RESET_FLR	0x4000
+#define BRCMF_PCIE_SHARED2_PKT_TIMESTAMP	0x8000
+#define BRCMF_PCIE_SHARED2_HP2P			0x10000
+#define BRCMF_PCIE_SHARED2_HWA			0x20000
+#define BRCMF_PCIE_SHARED2_TRAP_ON_HOST_DB7	0x40000
+#define BRCMF_PCIE_SHARED2_DURATION_SCALE	0x100000
+#define BRCMF_PCIE_SHARED2_D2H_D11_TX_STATUS	0x40000000
+#define BRCMF_PCIE_SHARED2_H2D_D11_TX_STATUS	0x80000000
 
 #define BRCMF_PCIE_FLAGS_HTOD_SPLIT		0x4000
 #define BRCMF_PCIE_FLAGS_DTOH_SPLIT		0x8000
 
+#define BRCMF_HOSTCAP_PCIEAPI_VERSION_MASK	0x000000FF
+#define BRCMF_HOSTCAP_H2D_VALID_PHASE		0x00000100
+#define BRCMF_HOSTCAP_H2D_ENABLE_TRAP_ON_BADPHASE	0x00000200
+#define BRCMF_HOSTCAP_H2D_ENABLE_HOSTRDY	0x400
+#define BRCMF_HOSTCAP_DB0_TIMESTAMP		0x800
+#define BRCMF_HOSTCAP_DS_NO_OOB_DW		0x1000
+#define BRCMF_HOSTCAP_DS_INBAND_DW		0x2000
+#define BRCMF_HOSTCAP_H2D_IDMA			0x4000
+#define BRCMF_HOSTCAP_H2D_IFRM			0x8000
+#define BRCMF_HOSTCAP_H2D_DAR			0x10000
+#define BRCMF_HOSTCAP_EXTENDED_TRAP_DATA	0x20000
+#define BRCMF_HOSTCAP_TXSTATUS_METADATA		0x40000
+#define BRCMF_HOSTCAP_BT_LOGGING		0x80000
+#define BRCMF_HOSTCAP_SNAPSHOT_UPLOAD		0x100000
+#define BRCMF_HOSTCAP_FAST_DELETE_RING		0x200000
+#define BRCMF_HOSTCAP_PKT_TXSTATUS		0x400000
+#define BRCMF_HOSTCAP_UR_FW_NO_TRAP		0x800000
+#define BRCMF_HOSTCAP_HSCB			0x2000000
+#define BRCMF_HOSTCAP_EXT_TRAP_DBGBUF		0x4000000
+#define BRCMF_HOSTCAP_EDL_RING			0x10000000
+#define BRCMF_HOSTCAP_PKT_TIMESTAMP		0x20000000
+#define BRCMF_HOSTCAP_PKT_HP2P			0x40000000
+#define BRCMF_HOSTCAP_HWA			0x80000000
+#define BRCMF_HOSTCAP2_DURATION_SCALE_MASK	0x3F
+
+#define BRCMF_SHARED_FLAGS_OFFSET		0
 #define BRCMF_SHARED_MAX_RXBUFPOST_OFFSET	34
 #define BRCMF_SHARED_RING_BASE_OFFSET		52
 #define BRCMF_SHARED_RX_DATAOFFSET_OFFSET	36
@@ -233,6 +290,11 @@ static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
 #define BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET	56
 #define BRCMF_SHARED_DMA_RINGUPD_LEN_OFFSET	64
 #define BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET	68
+#define BRCMF_SHARED_FLAGS2_OFFSET		80
+#define BRCMF_SHARED_HOST_CAP_OFFSET		84
+#define BRCMF_SHARED_FLAGS3_OFFSET		108
+#define BRCMF_SHARED_HOST_CAP2_OFFSET		112
+#define BRCMF_SHARED_HOST_CAP3_OFFSET		116
 
 #define BRCMF_RING_H2D_RING_COUNT_OFFSET	0
 #define BRCMF_RING_D2H_RING_COUNT_OFFSET	1
@@ -278,6 +340,7 @@ static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
 #define BRCMF_PCIE_CFGREG_PML1_SUB_CTRL1	0x248
 #define BRCMF_PCIE_CFGREG_REG_BAR2_CONFIG	0x4E0
 #define BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG	0x4F4
+#define BRCMF_PCIE_CFGREG_TLCNTRL_5		0x814
 #define BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB	3
 
 /* Magic number at a magic location to find RAM size */
@@ -297,6 +360,8 @@ struct brcmf_pcie_console {
 struct brcmf_pcie_shared_info {
 	u32 tcm_base_address;
 	u32 flags;
+	u32 flags2;
+	u32 flags3;
 	struct brcmf_pcie_ringbuf *commonrings[BRCMF_NROF_COMMON_MSGRINGS];
 	struct brcmf_pcie_ringbuf *flowrings;
 	u16 max_rxbufpost;
@@ -313,6 +378,7 @@ struct brcmf_pcie_shared_info {
 	void *ringupd;
 	dma_addr_t ringupd_dmahandle;
 	u8 version;
+	bool mb_via_ctl;
 };
 
 #define BRCMF_OTP_MAX_PARAM_LEN 16
@@ -329,6 +395,7 @@ struct brcmf_pciedev_info {
 	bool in_irq;
 	struct pci_dev *pdev;
 	char fw_name[BRCMF_FW_NAME_LEN];
+	char sig_name[BRCMF_FW_NAME_LEN];
 	char nvram_name[BRCMF_FW_NAME_LEN];
 	char clm_name[BRCMF_FW_NAME_LEN];
 	char txcap_name[BRCMF_FW_NAME_LEN];
@@ -337,14 +404,16 @@ struct brcmf_pciedev_info {
 	const struct brcmf_pcie_reginfo *reginfo;
 	void __iomem *regs;
 	void __iomem *tcm;
-	u32 ram_base;
-	u32 ram_size;
+	u32 fw_size;
+	bool skip_reset_vector;
 	struct brcmf_chip *ci;
 	u32 coreid;
 	struct brcmf_pcie_shared_info shared;
 	wait_queue_head_t mbdata_resp_wait;
 	bool mbdata_completed;
 	bool irq_allocated;
+	bool irq_ready;
+	bool have_msi;
 	bool wowl_enabled;
 	u8 dma_idx_sz;
 	void *idxbuf;
@@ -431,8 +500,6 @@ struct brcmf_pcie_reginfo {
 	u32 intmask;
 	u32 mailboxint;
 	u32 mailboxmask;
-	u32 h2d_mailbox_0;
-	u32 h2d_mailbox_1;
 	u32 int_d2h_db;
 	u32 int_fn0;
 };
@@ -441,8 +508,6 @@ static const struct brcmf_pcie_reginfo brcmf_reginfo_default = {
 	.intmask = BRCMF_PCIE_PCIE2REG_INTMASK,
 	.mailboxint = BRCMF_PCIE_PCIE2REG_MAILBOXINT,
 	.mailboxmask = BRCMF_PCIE_PCIE2REG_MAILBOXMASK,
-	.h2d_mailbox_0 = BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_0,
-	.h2d_mailbox_1 = BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_1,
 	.int_d2h_db = BRCMF_PCIE_MB_INT_D2H_DB,
 	.int_fn0 = BRCMF_PCIE_MB_INT_FN0,
 };
@@ -451,8 +516,6 @@ static const struct brcmf_pcie_reginfo brcmf_reginfo_64 = {
 	.intmask = BRCMF_PCIE_64_PCIE2REG_INTMASK,
 	.mailboxint = BRCMF_PCIE_64_PCIE2REG_MAILBOXINT,
 	.mailboxmask = BRCMF_PCIE_64_PCIE2REG_MAILBOXMASK,
-	.h2d_mailbox_0 = BRCMF_PCIE_64_PCIE2REG_H2D_MAILBOX_0,
-	.h2d_mailbox_1 = BRCMF_PCIE_64_PCIE2REG_H2D_MAILBOX_1,
 	.int_d2h_db = BRCMF_PCIE_64_MB_INT_D2H_DB,
 	.int_fn0 = 0,
 };
@@ -491,6 +554,19 @@ brcmf_pcie_write_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset,
 	iowrite32(value, address);
 }
 
+static u32
+brcmf_pcie_read_pcie32(struct brcmf_pciedev_info *devinfo, u32 reg_offset)
+{
+	return brcmf_pcie_read_reg32(devinfo, 0x2000 + reg_offset);
+}
+
+
+static void
+brcmf_pcie_write_pcie32(struct brcmf_pciedev_info *devinfo, u32 reg_offset,
+		       u32 value)
+{
+	brcmf_pcie_write_reg32(devinfo, 0x2000 + reg_offset, value);
+}
 
 static u8
 brcmf_pcie_read_tcm8(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
@@ -682,8 +758,30 @@ static void brcmf_pcie_reset_device(struct brcmf_pciedev_info *devinfo)
 
 	/* Watchdog reset */
 	brcmf_pcie_select_core(devinfo, BCMA_CORE_CHIPCOMMON);
-	WRITECC32(devinfo, watchdog, 4);
-	msleep(100);
+	core = brcmf_chip_get_chipcommon(devinfo->ci);
+
+	if (core->rev >= 65) {
+		u32 mask = CC_WD_SSRESET_PCIE_F0_EN;
+
+		core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_PCIE2);
+		if (core->rev < 66)
+			mask |= CC_WD_SSRESET_PCIE_ALL_FN_EN;
+
+		val = READCC32(devinfo, watchdog);
+		val &= ~CC_WD_ENABLE_MASK;
+		val |= mask;
+		WRITECC32(devinfo, watchdog, val);
+		val &= ~CC_WD_COUNTER_MASK;
+		val |= 4;
+		WRITECC32(devinfo, watchdog, val);
+		msleep(10);
+		val = READCC32(devinfo, intstatus);
+		val |= mask;
+		WRITECC32(devinfo, intstatus, val);
+	} else {
+		WRITECC32(devinfo, watchdog, 4);
+		msleep(100);
+	}
 
 	/* Restore ASPM */
 	brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
@@ -693,14 +791,14 @@ static void brcmf_pcie_reset_device(struct brcmf_pciedev_info *devinfo)
 	core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_PCIE2);
 	if (core->rev <= 13) {
 		for (i = 0; i < ARRAY_SIZE(cfg_offset); i++) {
-			brcmf_pcie_write_reg32(devinfo,
+			brcmf_pcie_write_pcie32(devinfo,
 					       BRCMF_PCIE_PCIE2REG_CONFIGADDR,
 					       cfg_offset[i]);
-			val = brcmf_pcie_read_reg32(devinfo,
+			val = brcmf_pcie_read_pcie32(devinfo,
 				BRCMF_PCIE_PCIE2REG_CONFIGDATA);
 			brcmf_dbg(PCIE, "config offset 0x%04x, value 0x%04x\n",
 				  cfg_offset[i], val);
-			brcmf_pcie_write_reg32(devinfo,
+			brcmf_pcie_write_pcie32(devinfo,
 					       BRCMF_PCIE_PCIE2REG_CONFIGDATA,
 					       val);
 		}
@@ -714,9 +812,9 @@ static void brcmf_pcie_attach(struct brcmf_pciedev_info *devinfo)
 
 	/* BAR1 window may not be sized properly */
 	brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
-	brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR, 0x4e0);
-	config = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA);
-	brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, config);
+	brcmf_pcie_write_pcie32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR, 0x4e0);
+	config = brcmf_pcie_read_pcie32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA);
+	brcmf_pcie_write_pcie32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, config);
 
 	device_wakeup_enable(&devinfo->pdev->dev);
 }
@@ -735,6 +833,21 @@ static int brcmf_pcie_enter_download_state(struct brcmf_pciedev_info *devinfo)
 		brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKPDA,
 				       0);
 	}
+
+	/* Ensure all IRQs are masked so the firmware doesn't get
+	 * a hostready notification too early.
+	 */
+
+	brcmf_pcie_write_pcie32(devinfo, devinfo->reginfo->mailboxmask, 0);
+	brcmf_pcie_write_pcie32(devinfo, devinfo->reginfo->mailboxint,
+				0xffffffff);
+
+	pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_INTMASK, 0);
+
+	brcmf_pcie_write_pcie32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR,
+				BRCMF_PCIE_CFGREG_TLCNTRL_5);
+	brcmf_pcie_write_pcie32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA,
+				0xffffffff);
 	return 0;
 }
 
@@ -765,6 +878,19 @@ brcmf_pcie_send_mb_data(struct brcmf_pciedev_info *devinfo, u32 htod_mb_data)
 	u32 i;
 
 	shared = &devinfo->shared;
+
+	if (shared->mb_via_ctl) {
+		struct pci_dev *pdev = devinfo->pdev;
+		struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev);
+		int ret;
+
+		ret = brcmf_msgbuf_h2d_mb_write(bus->drvr, htod_mb_data);
+		if (ret < 0)
+			brcmf_err(bus, "Failed to send H2D mailbox data (%d)\n",
+				  ret);
+		return ret;
+	}
+
 	addr = shared->htod_mb_data_addr;
 	cur_htod_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
 
@@ -792,8 +918,29 @@ brcmf_pcie_send_mb_data(struct brcmf_pciedev_info *devinfo, u32 htod_mb_data)
 	return 0;
 }
 
+static void brcmf_pcie_handle_mb_data(struct brcmf_pciedev_info *devinfo, u32 data)
+{
+	brcmf_dbg(PCIE, "D2H_MB_DATA: 0x%04x\n", data);
+	if (data & BRCMF_D2H_DEV_DS_ENTER_REQ)  {
+		brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP REQ\n");
+		brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_DS_ACK);
+		brcmf_dbg(PCIE, "D2H_MB_DATA: sent DEEP SLEEP ACK\n");
+	}
+	if (data & BRCMF_D2H_DEV_DS_EXIT_NOTE)
+		brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP EXIT\n");
+	if (data & BRCMF_D2H_DEV_D3_ACK) {
+		brcmf_dbg(PCIE, "D2H_MB_DATA: D3 ACK\n");
+		devinfo->mbdata_completed = true;
+		wake_up(&devinfo->mbdata_resp_wait);
+	}
+	if (data & BRCMF_D2H_DEV_FWHALT) {
+		brcmf_dbg(PCIE, "D2H_MB_DATA: FW HALT\n");
+		brcmf_fw_crashed(&devinfo->pdev->dev);
+	}
+}
 
-static void brcmf_pcie_handle_mb_data(struct brcmf_pciedev_info *devinfo)
+
+static void brcmf_pcie_poll_mb_data(struct brcmf_pciedev_info *devinfo)
 {
 	struct brcmf_pcie_shared_info *shared;
 	u32 addr;
@@ -808,23 +955,16 @@ static void brcmf_pcie_handle_mb_data(struct brcmf_pciedev_info *devinfo)
 
 	brcmf_pcie_write_tcm32(devinfo, addr, 0);
 
-	brcmf_dbg(PCIE, "D2H_MB_DATA: 0x%04x\n", dtoh_mb_data);
-	if (dtoh_mb_data & BRCMF_D2H_DEV_DS_ENTER_REQ)  {
-		brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP REQ\n");
-		brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_DS_ACK);
-		brcmf_dbg(PCIE, "D2H_MB_DATA: sent DEEP SLEEP ACK\n");
-	}
-	if (dtoh_mb_data & BRCMF_D2H_DEV_DS_EXIT_NOTE)
-		brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP EXIT\n");
-	if (dtoh_mb_data & BRCMF_D2H_DEV_D3_ACK) {
-		brcmf_dbg(PCIE, "D2H_MB_DATA: D3 ACK\n");
-		devinfo->mbdata_completed = true;
-		wake_up(&devinfo->mbdata_resp_wait);
-	}
-	if (dtoh_mb_data & BRCMF_D2H_DEV_FWHALT) {
-		brcmf_dbg(PCIE, "D2H_MB_DATA: FW HALT\n");
-		brcmf_fw_crashed(&devinfo->pdev->dev);
-	}
+	brcmf_pcie_handle_mb_data(devinfo, dtoh_mb_data);
+}
+
+
+static void brcmf_pcie_d2h_mb_rx(struct device *dev, u32 data)
+{
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
+
+	brcmf_pcie_handle_mb_data(buspub->devinfo, data);
 }
 
 
@@ -903,33 +1043,45 @@ static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo,
 
 static void brcmf_pcie_intr_disable(struct brcmf_pciedev_info *devinfo)
 {
-	brcmf_pcie_write_reg32(devinfo, devinfo->reginfo->mailboxmask, 0);
+	brcmf_pcie_write_pcie32(devinfo, devinfo->reginfo->mailboxmask, 0);
+
+	devinfo->irq_ready = false;
 }
 
 
 static void brcmf_pcie_intr_enable(struct brcmf_pciedev_info *devinfo)
 {
-	brcmf_pcie_write_reg32(devinfo, devinfo->reginfo->mailboxmask,
+	brcmf_pcie_write_pcie32(devinfo, devinfo->reginfo->mailboxmask,
 			       devinfo->reginfo->int_d2h_db |
 			       devinfo->reginfo->int_fn0);
+
+	devinfo->irq_ready = true;
 }
 
 static void brcmf_pcie_hostready(struct brcmf_pciedev_info *devinfo)
 {
-	if (devinfo->shared.flags & BRCMF_PCIE_SHARED_HOSTRDY_DB1)
-		brcmf_pcie_write_reg32(devinfo,
-				       devinfo->reginfo->h2d_mailbox_1, 1);
+	if (devinfo->shared.flags & BRCMF_PCIE_SHARED_HOSTRDY_DB1) {
+		if (devinfo->shared.flags & BRCMF_PCIE_SHARED_DAR)
+			brcmf_pcie_write_pcie32(devinfo, BRCMF_PCIE_64_PCIE2REG_H2D_MAILBOX_1, 1);
+		else
+			brcmf_pcie_write_pcie32(devinfo, BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_1, 1);
+	}
 }
 
 static irqreturn_t brcmf_pcie_quick_check_isr(int irq, void *arg)
 {
 	struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
 
-	if (brcmf_pcie_read_reg32(devinfo, devinfo->reginfo->mailboxint)) {
+	if (brcmf_pcie_read_pcie32(devinfo, devinfo->reginfo->mailboxint)) {
 		brcmf_pcie_intr_disable(devinfo);
 		brcmf_dbg(PCIE, "Enter\n");
 		return IRQ_WAKE_THREAD;
 	}
+
+	/* mailboxint is cleared by the firmware in MSI mode */
+	if (devinfo->have_msi)
+		return IRQ_WAKE_THREAD;
+
 	return IRQ_NONE;
 }
 
@@ -940,19 +1092,19 @@ static irqreturn_t brcmf_pcie_isr_thread(int irq, void *arg)
 	u32 status;
 
 	devinfo->in_irq = true;
-	status = brcmf_pcie_read_reg32(devinfo, devinfo->reginfo->mailboxint);
+	status = brcmf_pcie_read_pcie32(devinfo, devinfo->reginfo->mailboxint);
 	brcmf_dbg(PCIE, "Enter %x\n", status);
 	if (status) {
-		brcmf_pcie_write_reg32(devinfo, devinfo->reginfo->mailboxint,
+		brcmf_pcie_write_pcie32(devinfo, devinfo->reginfo->mailboxint,
 				       status);
 		if (status & devinfo->reginfo->int_fn0)
-			brcmf_pcie_handle_mb_data(devinfo);
-		if (status & devinfo->reginfo->int_d2h_db) {
-			if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
-				brcmf_proto_msgbuf_rx_trigger(
-							&devinfo->pdev->dev);
-		}
+			brcmf_pcie_poll_mb_data(devinfo);
 	}
+	if (devinfo->have_msi || status & devinfo->reginfo->int_d2h_db) {
+		if (devinfo->state == BRCMFMAC_PCIE_STATE_UP && devinfo->irq_ready)
+			brcmf_proto_msgbuf_rx_trigger(&devinfo->pdev->dev);
+	}
+
 	brcmf_pcie_bus_console_read(devinfo, false);
 	if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
 		brcmf_pcie_intr_enable(devinfo);
@@ -970,7 +1122,10 @@ static int brcmf_pcie_request_irq(struct brcmf_pciedev_info *devinfo)
 
 	brcmf_dbg(PCIE, "Enter\n");
 
-	pci_enable_msi(pdev);
+	devinfo->have_msi = pci_enable_msi(pdev) >= 0;
+	if (devinfo->have_msi)
+		brcmf_dbg(PCIE, "MSI enabled\n");
+
 	if (request_threaded_irq(pdev->irq, brcmf_pcie_quick_check_isr,
 				 brcmf_pcie_isr_thread, IRQF_SHARED,
 				 "brcmf_pcie_intr", devinfo)) {
@@ -1006,8 +1161,8 @@ static void brcmf_pcie_release_irq(struct brcmf_pciedev_info *devinfo)
 	if (devinfo->in_irq)
 		brcmf_err(bus, "Still in IRQ (processing) !!!\n");
 
-	status = brcmf_pcie_read_reg32(devinfo, devinfo->reginfo->mailboxint);
-	brcmf_pcie_write_reg32(devinfo, devinfo->reginfo->mailboxint, status);
+	status = brcmf_pcie_read_pcie32(devinfo, devinfo->reginfo->mailboxint);
+	brcmf_pcie_write_pcie32(devinfo, devinfo->reginfo->mailboxint, status);
 
 	devinfo->irq_allocated = false;
 }
@@ -1059,7 +1214,10 @@ static int brcmf_pcie_ring_mb_ring_bell(void *ctx)
 
 	brcmf_dbg(PCIE, "RING !\n");
 	/* Any arbitrary value will do, lets use 1 */
-	brcmf_pcie_write_reg32(devinfo, devinfo->reginfo->h2d_mailbox_0, 1);
+	if (devinfo->shared.flags & BRCMF_PCIE_SHARED_DAR)
+		brcmf_pcie_write_pcie32(devinfo, BRCMF_PCIE_64_PCIE2REG_H2D_MAILBOX_0, 1);
+	else
+		brcmf_pcie_write_pcie32(devinfo, BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_0, 1);
 
 	return 0;
 }
@@ -1585,6 +1743,7 @@ static const struct brcmf_bus_ops brcmf_pcie_bus_ops = {
 	.get_blob = brcmf_pcie_get_blob,
 	.reset = brcmf_pcie_reset,
 	.debugfs_create = brcmf_pcie_debugfs_create,
+	.d2h_mb_rx = brcmf_pcie_d2h_mb_rx,
 };
 
 
@@ -1616,12 +1775,16 @@ brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info *devinfo,
 {
 	struct brcmf_bus *bus = dev_get_drvdata(&devinfo->pdev->dev);
 	struct brcmf_pcie_shared_info *shared;
+	u32 host_cap;
+	u32 host_cap2;
 	u32 addr;
 
 	shared = &devinfo->shared;
 	shared->tcm_base_address = sharedram_addr;
 
-	shared->flags = brcmf_pcie_read_tcm32(devinfo, sharedram_addr);
+	shared->flags = brcmf_pcie_read_tcm32(devinfo, sharedram_addr +
+	                                      BRCMF_SHARED_FLAGS_OFFSET);
+
 	shared->version = (u8)(shared->flags & BRCMF_PCIE_SHARED_VERSION_MASK);
 	brcmf_dbg(PCIE, "PCIe protocol version %d\n", shared->version);
 	if ((shared->version > BRCMF_PCIE_MAX_SHARED_VERSION) ||
@@ -1662,29 +1825,223 @@ brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info *devinfo,
 	brcmf_pcie_bus_console_init(devinfo);
 	brcmf_pcie_bus_console_read(devinfo, false);
 
+	/* Features added in revision 6 follow */
+	if (shared->version < 6)
+		return 0;
+
+	shared->flags2 = brcmf_pcie_read_tcm32(devinfo, sharedram_addr +
+	                                       BRCMF_SHARED_FLAGS2_OFFSET);
+	shared->flags3 = brcmf_pcie_read_tcm32(devinfo, sharedram_addr +
+	                                       BRCMF_SHARED_FLAGS3_OFFSET);
+
+	/* Check which mailbox mechanism to use */
+	if (!(shared->flags & BRCMF_PCIE_SHARED_USE_MAILBOX))
+		shared->mb_via_ctl = true;
+
+	/* Update host support flags */
+	host_cap = shared->version;
+	host_cap2 = 0;
+
+	if (shared->flags & BRCMF_PCIE_SHARED_HOSTRDY_DB1)
+		host_cap |= BRCMF_HOSTCAP_H2D_ENABLE_HOSTRDY;
+
+	if (shared->flags & BRCMF_PCIE_SHARED_DAR)
+		host_cap |= BRCMF_HOSTCAP_H2D_DAR;
+
+	/* Disable DS: this is not currently properly supported */
+	host_cap |= BRCMF_HOSTCAP_DS_NO_OOB_DW;
+
+	brcmf_pcie_write_tcm32(devinfo, sharedram_addr +
+			       BRCMF_SHARED_HOST_CAP_OFFSET, host_cap);
+	brcmf_pcie_write_tcm32(devinfo, sharedram_addr +
+			       BRCMF_SHARED_HOST_CAP2_OFFSET, host_cap2);
+
 	return 0;
 }
 
-struct brcmf_random_seed_footer {
+struct brcmf_rtlv_footer {
 	__le32 length;
 	__le32 magic;
 };
 
+/** struct brcmf_fw_memmap_region - start/end of memory regions for chip
+ */
+struct brcmf_fw_memmap_region {
+	u32 start;
+	u32 end;
+};
+
+/** struct brcmf_fw_memmap
+ *
+ * @reset_vec - Reset vector - read only
+ * @int_vec - copied from ram, jumps here on success
+ * @rom - bootloader at rom start
+ * @mmap - struct/memory map written by host
+ * @vstatus - verification status
+ * @fw - firmware
+ * @sig - firwmare signature
+ * @heap - region for heap allocations
+ * @stack - region for stack allocations
+ * @prng - PRNG data, may be 0 length
+ * @nvram - NVRAM data
+ */
+struct brcmf_fw_memmap {
+	struct brcmf_fw_memmap_region reset_vec;
+	struct brcmf_fw_memmap_region int_vec;
+	struct brcmf_fw_memmap_region rom;
+	struct brcmf_fw_memmap_region mmap;
+	struct brcmf_fw_memmap_region vstatus;
+	struct brcmf_fw_memmap_region fw;
+	struct brcmf_fw_memmap_region sig;
+	struct brcmf_fw_memmap_region heap;
+	struct brcmf_fw_memmap_region stack;
+	struct brcmf_fw_memmap_region prng;
+	struct brcmf_fw_memmap_region nvram;
+};
+
+#define BRCMF_BL_HEAP_START_GAP		0x1000
+#define BRCMF_BL_HEAP_SIZE		0x10000
 #define BRCMF_RANDOM_SEED_MAGIC		0xfeedc0de
 #define BRCMF_RANDOM_SEED_LENGTH	0x100
+#define BRCMF_FW_SIG_MAGIC		0xfeedfe51
+#define BRCMF_NVRAM_SIG_MAGIC		0xfeedfe52
+#define BRCMF_MEMMAP_MAGIC		0xfeedfe53
+#define BRCMF_VSTATUS_MAGIC		0xfeedfe54
+#define BRCMF_VSTATUS_SIZE		0x28
+#define BRCMF_END_MAGIC			0xfeed0e2d
 
-static noinline_for_stack void
-brcmf_pcie_provide_random_bytes(struct brcmf_pciedev_info *devinfo, u32 address)
+static int brcmf_alloc_rtlv(struct brcmf_pciedev_info *devinfo, u32 *address, u32 type, u32 length)
 {
+	struct brcmf_bus *bus = dev_get_drvdata(&devinfo->pdev->dev);
+	u32 fw_top = devinfo->ci->rambase + devinfo->fw_size;
+	u32 ram_start = ALIGN(fw_top + BRCMF_BL_HEAP_START_GAP, 4);
+	u32 ram_end = ram_start + BRCMF_BL_HEAP_SIZE;
+	u32 start_addr;
+	struct brcmf_rtlv_footer footer = {
+		.magic = type,
+	};
+
+	length = ALIGN(length, 4);
+	start_addr = *address - length - sizeof(struct brcmf_rtlv_footer);
+
+	if (length > 0xffff || start_addr > *address || start_addr < ram_end) {
+		brcmf_err(bus, "failed to allocate 0x%x bytes for rTLV type 0x%x\n",
+			  length, type);
+		return -ENOMEM;
+	}
+
+	/* Random seed does not use the length check code */
+	if (type == BRCMF_RANDOM_SEED_MAGIC)
+		footer.length = length;
+	else
+		footer.length = length | ((length ^ 0xffff) << 16);
+
+	memcpy_toio(devinfo->tcm + *address - sizeof(struct brcmf_rtlv_footer),
+		    &footer, sizeof(struct brcmf_rtlv_footer));
+
+	*address = start_addr;
+
+	return 0;
+}
+
+static noinline_for_stack int
+brcmf_pcie_add_random_seed(struct brcmf_pciedev_info *devinfo, u32 *address)
+{
+	int err;
 	u8 randbuf[BRCMF_RANDOM_SEED_LENGTH];
 
+	err = brcmf_alloc_rtlv(devinfo, address,
+			       BRCMF_RANDOM_SEED_MAGIC, BRCMF_RANDOM_SEED_LENGTH);
+	if (err)
+		return err;
+
+	/* Some Apple chips/firmwares expect a buffer of random
+	 * data to be present before NVRAM
+	 */
+	brcmf_dbg(PCIE, "Download random seed\n");
+
 	get_random_bytes(randbuf, BRCMF_RANDOM_SEED_LENGTH);
-	memcpy_toio(devinfo->tcm + address, randbuf, BRCMF_RANDOM_SEED_LENGTH);
+	memcpy_toio(devinfo->tcm + *address, randbuf, BRCMF_RANDOM_SEED_LENGTH);
+
+	return 0;
+}
+
+static int brcmf_pcie_add_signature(struct brcmf_pciedev_info *devinfo,
+				    u32 *address, const struct firmware *fwsig)
+{
+	int err;
+	struct brcmf_fw_memmap memmap;
+
+	brcmf_dbg(PCIE, "Download firmware signature\n");
+
+	memset(&memmap, 0, sizeof(memmap));
+
+	memmap.sig.end = *address;
+	err = brcmf_alloc_rtlv(devinfo, address, BRCMF_FW_SIG_MAGIC, fwsig->size);
+	if (err)
+		return err;
+	memmap.sig.start = *address;
+
+	memmap.vstatus.end = *address;
+	err = brcmf_alloc_rtlv(devinfo, address, BRCMF_VSTATUS_MAGIC, BRCMF_VSTATUS_SIZE);
+	if (err)
+		return err;
+	memmap.vstatus.start = *address;
+
+	err = brcmf_alloc_rtlv(devinfo, address, BRCMF_MEMMAP_MAGIC, sizeof(memmap));
+	if (err)
+		return err;
+
+	memmap.fw.start = devinfo->ci->rambase;
+	memmap.fw.end = memmap.fw.start + devinfo->fw_size;
+	memmap.heap.start = ALIGN(memmap.fw.end + BRCMF_BL_HEAP_START_GAP, 4);
+	memmap.heap.end = memmap.heap.start + BRCMF_BL_HEAP_SIZE;
+
+	if (memmap.heap.end > *address)
+		return -ENOMEM;
+
+	memcpy_toio(devinfo->tcm + memmap.sig.start, fwsig->data, fwsig->size);
+	memset_io(devinfo->tcm + memmap.vstatus.start, 0, BRCMF_VSTATUS_SIZE);
+	memcpy_toio(devinfo->tcm + *address, &memmap, sizeof(memmap));
+
+	err = brcmf_alloc_rtlv(devinfo, address, BRCMF_END_MAGIC, 0);
+	if (err)
+		return err;
+
+	devinfo->skip_reset_vector = true;
+
+	return 0;
+}
+
+static int brcmf_pcie_populate_footers(struct brcmf_pciedev_info *devinfo,
+				       u32 *address, const struct firmware *fwsig)
+{
+	int err;
+
+	/* We only do this for Apple firmwares. If any other
+	 * production firmwares are found to need this, the condition
+	 * needs to be adjusted.
+	 */
+	if (!devinfo->fwseed)
+		return 0;
+
+	err = brcmf_pcie_add_random_seed(devinfo, address);
+	if (err)
+		return err;
+
+	if (fwsig) {
+		err = brcmf_pcie_add_signature(devinfo, address, fwsig);
+		if (err)
+			return err;
+	}
+
+	return 0;
 }
 
 static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
-					const struct firmware *fw, void *nvram,
-					u32 nvram_len)
+					const struct firmware *fw,
+					const struct firmware *fwsig,
+					void *nvram, u32 nvram_len)
 {
 	struct brcmf_bus *bus = dev_get_drvdata(&devinfo->pdev->dev);
 	u32 sharedram_addr;
@@ -1704,6 +2061,7 @@ static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
 		    (void *)fw->data, fw->size);
 
 	resetintr = get_unaligned_le32(fw->data);
+	devinfo->fw_size = fw->size;
 	release_firmware(fw);
 
 	/* reset last 4 bytes of RAM address. to be used for shared
@@ -1711,37 +2069,31 @@ static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
 	 */
 	brcmf_pcie_write_ram32(devinfo, devinfo->ci->ramsize - 4, 0);
 
+	address = devinfo->ci->rambase + devinfo->ci->ramsize;
+
 	if (nvram) {
 		brcmf_dbg(PCIE, "Download NVRAM %s\n", devinfo->nvram_name);
-		address = devinfo->ci->rambase + devinfo->ci->ramsize -
-			  nvram_len;
+		address -= nvram_len;
 		memcpy_toio(devinfo->tcm + address, nvram, nvram_len);
 		brcmf_fw_nvram_free(nvram);
 
-		if (devinfo->fwseed) {
-			size_t rand_len = BRCMF_RANDOM_SEED_LENGTH;
-			struct brcmf_random_seed_footer footer = {
-				.length = cpu_to_le32(rand_len),
-				.magic = cpu_to_le32(BRCMF_RANDOM_SEED_MAGIC),
-			};
-
-			/* Some chips/firmwares expect a buffer of random
-			 * data to be present before NVRAM
-			 */
-			brcmf_dbg(PCIE, "Download random seed\n");
-
-			address -= sizeof(footer);
-			memcpy_toio(devinfo->tcm + address, &footer,
-				    sizeof(footer));
-
-			address -= rand_len;
-			brcmf_pcie_provide_random_bytes(devinfo, address);
-		}
+		err = brcmf_pcie_populate_footers(devinfo, &address, fwsig);
+		if (err)
+			brcmf_err(bus, "failed to populate firmware footers err=%d\n", err);
 	} else {
 		brcmf_dbg(PCIE, "No matching NVRAM file found %s\n",
 			  devinfo->nvram_name);
 	}
 
+	release_firmware(fwsig);
+
+	/* Clear free TCM. This isn't really necessary, but it
+	 * makes debugging memory dumps a lot easier since we
+	 * don't get a bunch of junk filling up the free space.
+	 */
+	memset_io(devinfo->tcm + devinfo->ci->rambase + devinfo->fw_size,
+		  0, address - devinfo->fw_size - devinfo->ci->rambase);
+
 	sharedram_addr_written = brcmf_pcie_read_ram32(devinfo,
 						       devinfo->ci->ramsize -
 						       4);
@@ -1885,9 +2237,9 @@ static int brcmf_pcie_buscore_reset(void *ctx, struct brcmf_chip *chip)
 	else
 		reg = BRCMF_PCIE_PCIE2REG_MAILBOXINT;
 
-	val = brcmf_pcie_read_reg32(devinfo, reg);
+	val = brcmf_pcie_read_pcie32(devinfo, reg);
 	if (val != 0xffffffff)
-		brcmf_pcie_write_reg32(devinfo, reg, val);
+		brcmf_pcie_write_pcie32(devinfo, reg, val);
 
 	return 0;
 }
@@ -1898,7 +2250,8 @@ static void brcmf_pcie_buscore_activate(void *ctx, struct brcmf_chip *chip,
 {
 	struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
 
-	brcmf_pcie_write_tcm32(devinfo, 0, rstvec);
+	if (!devinfo->skip_reset_vector)
+		brcmf_pcie_write_tcm32(devinfo, 0, rstvec);
 }
 
 
@@ -2069,6 +2422,11 @@ static int brcmf_pcie_read_otp(struct brcmf_pciedev_info *devinfo)
 		base = 0x113c;
 		words = 0x170;
 		break;
+	case BRCM_CC_4388_CHIP_ID:
+		coreid = BCMA_CORE_GCI;
+		base = 0x115c;
+		words = 0x150;
+		break;
 	default:
 		/* OTP not supported on this chip */
 		return 0;
@@ -2127,11 +2485,12 @@ static int brcmf_pcie_read_otp(struct brcmf_pciedev_info *devinfo)
 #define BRCMF_PCIE_FW_NVRAM	1
 #define BRCMF_PCIE_FW_CLM	2
 #define BRCMF_PCIE_FW_TXCAP	3
+#define BRCMF_PCIE_FW_SIG	4
 
 static void brcmf_pcie_setup(struct device *dev, int ret,
 			     struct brcmf_fw_request *fwreq)
 {
-	const struct firmware *fw;
+	const struct firmware *fw, *fwsig;
 	void *nvram;
 	struct brcmf_bus *bus;
 	struct brcmf_pciedev *pcie_bus_dev;
@@ -2150,6 +2509,7 @@ static void brcmf_pcie_setup(struct device *dev, int ret,
 	brcmf_pcie_attach(devinfo);
 
 	fw = fwreq->items[BRCMF_PCIE_FW_CODE].binary;
+	fwsig = fwreq->items[BRCMF_PCIE_FW_SIG].binary;
 	nvram = fwreq->items[BRCMF_PCIE_FW_NVRAM].nv_data.data;
 	nvram_len = fwreq->items[BRCMF_PCIE_FW_NVRAM].nv_data.len;
 	devinfo->clm_fw = fwreq->items[BRCMF_PCIE_FW_CLM].binary;
@@ -2160,6 +2520,7 @@ static void brcmf_pcie_setup(struct device *dev, int ret,
 	if (ret) {
 		brcmf_err(bus, "Failed to get RAM info\n");
 		release_firmware(fw);
+		release_firmware(fwsig);
 		brcmf_fw_nvram_free(nvram);
 		goto fail;
 	}
@@ -2171,7 +2532,15 @@ static void brcmf_pcie_setup(struct device *dev, int ret,
 	 */
 	brcmf_pcie_adjust_ramsize(devinfo, (u8 *)fw->data, fw->size);
 
-	ret = brcmf_pcie_download_fw_nvram(devinfo, fw, nvram, nvram_len);
+	/* Newer firmwares will signal firmware boot via MSI, so make sure we
+	 * initialize that upfront.
+	 */
+	brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
+	ret = brcmf_pcie_request_irq(devinfo);
+	if (ret)
+		goto fail;
+
+	ret = brcmf_pcie_download_fw_nvram(devinfo, fw, fwsig, nvram, nvram_len);
 	if (ret)
 		goto fail;
 
@@ -2186,9 +2555,6 @@ static void brcmf_pcie_setup(struct device *dev, int ret,
 		goto fail;
 
 	brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
-	ret = brcmf_pcie_request_irq(devinfo);
-	if (ret)
-		goto fail;
 
 	/* hook the commonrings in the bus structure. */
 	for (i = 0; i < BRCMF_NROF_COMMON_MSGRINGS; i++)
@@ -2236,6 +2602,7 @@ brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo)
 		{ ".txt", devinfo->nvram_name },
 		{ ".clm_blob", devinfo->clm_name },
 		{ ".txcap_blob", devinfo->txcap_name },
+		{ ".sig", devinfo->sig_name },
 	};
 
 	fwreq = brcmf_fw_alloc_request(devinfo->ci->chip, devinfo->ci->chiprev,
@@ -2246,6 +2613,8 @@ brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo)
 		return NULL;
 
 	fwreq->items[BRCMF_PCIE_FW_CODE].type = BRCMF_FW_TYPE_BINARY;
+	fwreq->items[BRCMF_PCIE_FW_SIG].type = BRCMF_FW_TYPE_BINARY;
+	fwreq->items[BRCMF_PCIE_FW_SIG].flags = BRCMF_FW_REQF_OPTIONAL;
 	fwreq->items[BRCMF_PCIE_FW_NVRAM].type = BRCMF_FW_TYPE_NVRAM;
 	fwreq->items[BRCMF_PCIE_FW_NVRAM].flags = BRCMF_FW_REQF_OPTIONAL;
 	fwreq->items[BRCMF_PCIE_FW_CLM].type = BRCMF_FW_TYPE_BINARY;
@@ -2654,12 +3023,13 @@ static int brcmf_pcie_pm_leave_D3(struct device *dev)
 	brcmf_dbg(PCIE, "Enter, dev=%p, bus=%p\n", dev, bus);
 
 	/* Check if device is still up and running, if so we are ready */
-	if (brcmf_pcie_read_reg32(devinfo, devinfo->reginfo->intmask) != 0) {
+	if (brcmf_pcie_read_pcie32(devinfo, devinfo->reginfo->intmask) != 0) {
 		brcmf_dbg(PCIE, "Try to wakeup device....\n");
+		/* Set the device up, so we can write the MB data message in ring mode */
+		devinfo->state = BRCMFMAC_PCIE_STATE_UP;
 		if (brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D0_INFORM))
 			goto cleanup;
 		brcmf_dbg(PCIE, "Hot resume, continue....\n");
-		devinfo->state = BRCMFMAC_PCIE_STATE_UP;
 		brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
 		brcmf_bus_change_state(bus, BRCMF_BUS_UP);
 		brcmf_pcie_intr_enable(devinfo);
@@ -2669,6 +3039,7 @@ static int brcmf_pcie_pm_leave_D3(struct device *dev)
 	}
 
 cleanup:
+	devinfo->state = BRCMFMAC_PCIE_STATE_DOWN;
 	brcmf_chip_detach(devinfo->ci);
 	devinfo->ci = NULL;
 	pdev = devinfo->pdev;
@@ -2736,6 +3107,7 @@ static const struct pci_device_id brcmf_pcie_devid_table[] = {
 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4377_DEVICE_ID, WCC_SEED),
 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4378_DEVICE_ID, WCC_SEED),
 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4387_DEVICE_ID, WCC_SEED),
+	BRCMF_PCIE_DEVICE(BRCM_PCIE_4388_DEVICE_ID, WCC_SEED),
 	BRCMF_PCIE_DEVICE(BRCM_PCIE_43752_DEVICE_ID, WCC_SEED),
 
 	{ /* end: all zeroes */ }
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
index 05f66ab13bed6d..dbeeaef75b165a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
@@ -12,8 +12,10 @@
 #include "fwil_types.h"
 #include "cfg80211.h"
 #include "pno.h"
+#include "feature.h"
 
-#define BRCMF_PNO_VERSION		2
+#define BRCMF_PNO_VERSION_2		2
+#define BRCMF_PNO_VERSION_3		3
 #define BRCMF_PNO_REPEAT		4
 #define BRCMF_PNO_FREQ_EXPO_MAX		3
 #define BRCMF_PNO_IMMEDIATE_SCAN_BIT	3
@@ -99,8 +101,62 @@ static int brcmf_pno_channel_config(struct brcmf_if *ifp,
 	return brcmf_fil_iovar_data_set(ifp, "pfn_cfg", cfg, sizeof(*cfg));
 }
 
-static int brcmf_pno_config(struct brcmf_if *ifp, u32 scan_freq,
-			    u32 mscan, u32 bestn)
+static int brcmf_pno_config_v3(struct brcmf_if *ifp, u32 scan_freq, u32 mscan,
+			       u32 bestn)
+{
+	struct brcmf_pub *drvr = ifp->drvr;
+	struct brcmf_pno_param_v3_le pfn_param;
+	u16 flags;
+	u32 pfnmem;
+	s32 err;
+
+	memset(&pfn_param, 0, sizeof(pfn_param));
+	pfn_param.version = cpu_to_le16(BRCMF_PNO_VERSION_3);
+	pfn_param.length = cpu_to_le16(sizeof(struct brcmf_pno_param_v3_le));
+
+	/* set extra pno params */
+	flags = BIT(BRCMF_PNO_IMMEDIATE_SCAN_BIT) |
+		BIT(BRCMF_PNO_ENABLE_ADAPTSCAN_BIT);
+	pfn_param.repeat = BRCMF_PNO_REPEAT;
+	pfn_param.exp = BRCMF_PNO_FREQ_EXPO_MAX;
+
+	/* set up pno scan fr */
+	pfn_param.scan_freq = cpu_to_le32(scan_freq);
+
+	if (mscan) {
+		pfnmem = bestn;
+
+		/* set bestn in firmware */
+		err = brcmf_fil_iovar_int_set(ifp, "pfnmem", pfnmem);
+		if (err < 0) {
+			bphy_err(drvr, "failed to set pfnmem\n");
+			goto exit;
+		}
+		/* get max mscan which the firmware supports */
+		err = brcmf_fil_iovar_int_get(ifp, "pfnmem", &pfnmem);
+		if (err < 0) {
+			bphy_err(drvr, "failed to get pfnmem\n");
+			goto exit;
+		}
+		mscan = min_t(u32, mscan, pfnmem);
+		pfn_param.mscan = mscan;
+		pfn_param.bestn = bestn;
+		flags |= BIT(BRCMF_PNO_ENABLE_BD_SCAN_BIT);
+		brcmf_dbg(INFO, "mscan=%d, bestn=%d\n", mscan, bestn);
+	}
+
+	pfn_param.flags = cpu_to_le16(flags);
+	err = brcmf_fil_iovar_data_set(ifp, "pfn_set", &pfn_param,
+				       sizeof(pfn_param));
+	if (err)
+		bphy_err(drvr, "pfn_set failed, err=%d\n", err);
+
+exit:
+	return err;
+}
+
+static int brcmf_pno_config_v2(struct brcmf_if *ifp, u32 scan_freq, u32 mscan,
+			       u32 bestn)
 {
 	struct brcmf_pub *drvr = ifp->drvr;
 	struct brcmf_pno_param_le pfn_param;
@@ -109,7 +165,7 @@ static int brcmf_pno_config(struct brcmf_if *ifp, u32 scan_freq,
 	s32 err;
 
 	memset(&pfn_param, 0, sizeof(pfn_param));
-	pfn_param.version = cpu_to_le32(BRCMF_PNO_VERSION);
+	pfn_param.version = cpu_to_le32(BRCMF_PNO_VERSION_2);
 
 	/* set extra pno params */
 	flags = BIT(BRCMF_PNO_IMMEDIATE_SCAN_BIT) |
@@ -152,6 +208,12 @@ static int brcmf_pno_config(struct brcmf_if *ifp, u32 scan_freq,
 	return err;
 }
 
+static int brcmf_pno_config(struct brcmf_if *ifp, u32 scan_freq, u32 mscan,
+			    u32 bestn)
+{
+	return ifp->drvr->pno_handler.pno_config(ifp, scan_freq, mscan, bestn);
+}
+
 static int brcmf_pno_set_random(struct brcmf_if *ifp, struct brcmf_pno_info *pi)
 {
 	struct brcmf_pub *drvr = ifp->drvr;
@@ -275,7 +337,7 @@ static int brcmf_pno_get_bucket_channels(struct cfg80211_sched_scan_request *r,
 {
 	u32 n_chan = le32_to_cpu(pno_cfg->channel_num);
 	u16 chan;
-	int i, err = 0;
+	int i, err;
 
 	for (i = 0; i < r->n_channels; i++) {
 		if (n_chan >= BRCMF_NUMCHANNELS) {
@@ -562,9 +624,82 @@ u64 brcmf_pno_find_reqid_by_bucket(struct brcmf_pno_info *pi, u32 bucket)
 	return reqid;
 }
 
-u32 brcmf_pno_get_bucket_map(struct brcmf_pno_info *pi,
-			     struct brcmf_pno_net_info_le *ni)
+
+static struct brcmf_pno_net_info_le *
+brcmf_get_netinfo_array(void *pfn_v1_data)
+{
+	struct brcmf_pno_scanresults_le *pfn_v1 =
+		(struct brcmf_pno_scanresults_le *)pfn_v1_data;
+	struct brcmf_pno_scanresults_v2_le *pfn_v2;
+	struct brcmf_pno_net_info_le *netinfo = NULL;
+
+	switch (pfn_v1->version) {
+	default:
+		WARN_ON(1);
+		fallthrough;
+	case cpu_to_le32(1):
+		netinfo = (struct brcmf_pno_net_info_le *)(pfn_v1 + 1);
+		break;
+	case cpu_to_le32(2):
+		pfn_v2 = (struct brcmf_pno_scanresults_v2_le *)pfn_v1;
+		netinfo = (struct brcmf_pno_net_info_le *)(pfn_v2 + 1);
+		break;
+	case cpu_to_le32(3):
+		brcmf_err("Need to use brcmf_get_netinfo_v3_array\n");
+		break;
+	}
+
+	return netinfo;
+}
+
+static struct brcmf_pno_net_info_v3_le *
+brcmf_get_netinfo_v3_array(void*pfn_v3_data)
+{
+	struct brcmf_pno_scanresults_v3_le *pfn_v3 =
+		(struct brcmf_pno_scanresults_v3_le *)pfn_v3_data;
+	return (struct brcmf_pno_net_info_v3_le *) (pfn_v3 + 1);
+}
+
+static u32 brcmf_pno_get_bucket_map(void *data, int idx, struct brcmf_pno_info *pi)
+{
+
+	struct brcmf_pno_net_info_le *netinfo_start =
+		brcmf_get_netinfo_array(data);
+	struct brcmf_pno_net_info_le *ni = &netinfo_start[idx];
+	struct cfg80211_sched_scan_request *req;
+	struct cfg80211_match_set *ms;
+	u32 bucket_map = 0;
+	int i, j;
+
+	mutex_lock(&pi->req_lock);
+	for (i = 0; i < pi->n_reqs; i++) {
+		req = pi->reqs[i];
+
+		if (!req->n_match_sets)
+			continue;
+		for (j = 0; j < req->n_match_sets; j++) {
+			ms = &req->match_sets[j];
+			if (ms->ssid.ssid_len == ni->SSID_len &&
+			    !memcmp(ms->ssid.ssid, ni->SSID, ni->SSID_len)) {
+				bucket_map |= BIT(i);
+				break;
+			}
+			if (is_valid_ether_addr(ms->bssid) &&
+			    !memcmp(ms->bssid, ni->bssid, ETH_ALEN)) {
+				bucket_map |= BIT(i);
+				break;
+			}
+		}
+	}
+	mutex_unlock(&pi->req_lock);
+	return bucket_map;
+}
+
+static u32 brcmf_pno_get_bucket_map_v3(void *data, int idx, struct brcmf_pno_info *pi)
 {
+	struct brcmf_pno_net_info_v3_le *netinfo_v3_start =
+		brcmf_get_netinfo_v3_array(data);
+	struct brcmf_pno_net_info_v3_le *ni = &netinfo_v3_start[idx];
 	struct cfg80211_sched_scan_request *req;
 	struct cfg80211_match_set *ms;
 	u32 bucket_map = 0;
@@ -593,3 +728,148 @@ u32 brcmf_pno_get_bucket_map(struct brcmf_pno_info *pi,
 	mutex_unlock(&pi->req_lock);
 	return bucket_map;
 }
+
+static u32 brcmf_pno_min_data_len(void)
+{
+	return sizeof(struct brcmf_pno_scanresults_le) +
+	       sizeof(struct brcmf_pno_net_info_le);
+}
+static u32 brcmf_pno_min_data_len_v3(void)
+{
+	return sizeof(struct brcmf_pno_scanresults_v3_le) +
+	       sizeof(struct brcmf_pno_net_info_v3_le);
+}
+
+static int brcmf_pno_validate_pfn_results_v3(void *data, u32 eventlen)
+{
+	struct brcmf_pno_scanresults_v3_le *scanresult =
+		(struct brcmf_pno_scanresults_v3_le *)data;
+	struct brcmf_pno_net_info_v3_le *netinfo_v3_start =
+		brcmf_get_netinfo_v3_array(scanresult);
+	u32 datalen;
+
+	if (!netinfo_v3_start) {
+		brcmf_err("did not get netinfo_v3 data\n");
+		return -EINVAL;
+	}
+	datalen = eventlen - ((void *)netinfo_v3_start - (void *)data);
+	if (datalen < le32_to_cpu(scanresult->count) * sizeof(struct brcmf_pno_net_info_v3_le)) {
+		brcmf_err("insufficient event data\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int brcmf_pno_validate_pfn_results(void *data, u32 eventlen)
+{
+	struct brcmf_pno_scanresults_le *scanresult =
+		(struct brcmf_pno_scanresults_le *)data;
+	struct brcmf_pno_net_info_le *netinfo_start =
+		brcmf_get_netinfo_array(scanresult);
+	u32 datalen;
+
+	if (!netinfo_start) {
+		brcmf_err("did not get netinfo data\n");
+		return -EINVAL;
+	}
+	datalen = eventlen - ((void *)netinfo_start - (void *)data);
+	if (datalen < le32_to_cpu(scanresult->count) * sizeof(struct brcmf_pno_net_info_le)) {
+		brcmf_err("insufficient event data\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int brcmf_pno_get_result_info(void *data, int result_idx,
+				     u8 (*ssid)[IEEE80211_MAX_SSID_LEN],
+				     u8 *ssid_len, u8 *channel,
+				     enum nl80211_band *band)
+{
+	struct brcmf_pno_scanresults_le *scanresult =
+		(struct brcmf_pno_scanresults_le *)data;
+	struct brcmf_pno_net_info_le *netinfo_start =
+		brcmf_get_netinfo_array(scanresult);
+	struct brcmf_pno_net_info_le *netinfo = &netinfo_start[result_idx];
+
+	*channel = netinfo->channel;
+	*band = netinfo->channel <= CH_MAX_2G_CHANNEL ? NL80211_BAND_2GHZ :
+							NL80211_BAND_5GHZ;
+	*ssid_len = netinfo->SSID_len;
+	if (netinfo->SSID_len > IEEE80211_MAX_SSID_LEN)
+		*ssid_len = IEEE80211_MAX_SSID_LEN;
+	memcpy(ssid, netinfo->SSID, *ssid_len);
+
+	return 0;
+}
+
+static int brcmf_pno_get_result_info_v3(void *data, int result_idx,
+					u8 (*ssid)[IEEE80211_MAX_SSID_LEN],
+					u8 *ssid_len, u8 *channel,
+					enum nl80211_band *band)
+{
+	struct brcmf_pno_scanresults_v3_le *scanresult =
+		(struct brcmf_pno_scanresults_v3_le *)data;
+	struct brcmf_pno_net_info_v3_le *netinfo_v3_start =
+		brcmf_get_netinfo_v3_array(scanresult);
+	struct brcmf_pno_net_info_v3_le *netinfo_v3 =
+		&netinfo_v3_start[result_idx];
+
+	*channel = CHSPEC_CHANNEL(netinfo_v3->chanspec);
+	*band = fwil_band_to_nl80211(CHSPEC_BAND(netinfo_v3->chanspec));
+	*ssid_len = netinfo_v3->SSID_len;
+	if (netinfo_v3->SSID_len > IEEE80211_MAX_SSID_LEN)
+		*ssid_len = IEEE80211_MAX_SSID_LEN;
+	memcpy(ssid, netinfo_v3->SSID, *ssid_len);
+
+	return 0;
+}
+
+/* The count and status fields are in the same place for v1/2/3 */
+static u32 brcmf_pno_get_result_count_v123(void *data)
+{
+	struct brcmf_pno_scanresults_le *results =
+		(struct brcmf_pno_scanresults_le *)data;
+	return le32_to_cpu(results->count);
+}
+static u32 brcmf_pno_get_result_status_v123(void *data)
+{
+	struct brcmf_pno_scanresults_le *results =
+		(struct brcmf_pno_scanresults_le *)data;
+	return le32_to_cpu(results->status);
+}
+
+int brcmf_pno_setup_for_version(struct brcmf_pub *drvr, u8 vers)
+{
+	/* The first supported version by this driver was version 2.
+	 * The v2 functions handle version one structures if handed to them,
+	 * but the config was always set to interface version 2.  */
+	switch (vers) {
+	case BRCMF_PNO_VERSION_2: {
+		drvr->pno_handler.version = BRCMF_PNO_VERSION_2;
+		drvr->pno_handler.pno_config = brcmf_pno_config_v2;
+		drvr->pno_handler.get_result_count = brcmf_pno_get_result_count_v123;
+		drvr->pno_handler.get_result_status = brcmf_pno_get_result_status_v123;
+		drvr->pno_handler.get_bucket_map = brcmf_pno_get_bucket_map;
+		drvr->pno_handler.get_min_data_len = brcmf_pno_min_data_len;
+		drvr->pno_handler.get_result_info = brcmf_pno_get_result_info;
+		drvr->pno_handler.validate_pfn_results =
+			brcmf_pno_validate_pfn_results;
+		break;
+	}
+	case BRCMF_PNO_VERSION_3: {
+		drvr->pno_handler.version = BRCMF_PNO_VERSION_3;
+		drvr->pno_handler.pno_config = brcmf_pno_config_v3;
+		drvr->pno_handler.get_result_count = brcmf_pno_get_result_count_v123;
+		drvr->pno_handler.get_result_status = brcmf_pno_get_result_status_v123;
+		drvr->pno_handler.get_bucket_map = brcmf_pno_get_bucket_map_v3;
+		drvr->pno_handler.get_min_data_len = brcmf_pno_min_data_len_v3;
+		drvr->pno_handler.get_result_info = brcmf_pno_get_result_info_v3;
+		drvr->pno_handler.validate_pfn_results =
+			brcmf_pno_validate_pfn_results_v3;
+		break;
+	}
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.h
index 25d406019ac340..0163c762f5385a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.h
@@ -61,12 +61,12 @@ void brcmf_pno_detach(struct brcmf_cfg80211_info *cfg);
 u64 brcmf_pno_find_reqid_by_bucket(struct brcmf_pno_info *pi, u32 bucket);
 
 /**
- * brcmf_pno_get_bucket_map - determine bucket map for given netinfo.
+ * brcmf_pno_setup_for_version - setup our PNO handler for whatever version structures
+ * are supported by the chip
  *
- * @pi: pno instance used.
- * @netinfo: netinfo to compare with bucket configuration.
+ * @cfg: CFG to fill in.
+ * @vers: Version to use
  */
-u32 brcmf_pno_get_bucket_map(struct brcmf_pno_info *pi,
-			     struct brcmf_pno_net_info_le *netinfo);
+int brcmf_pno_setup_for_version(struct brcmf_pub *drvr, u8 vers);
 
 #endif /* _BRCMF_PNO_H */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/ratespec.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/ratespec.h
new file mode 100644
index 00000000000000..37e722daab14d4
--- /dev/null
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/ratespec.h
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2023 Daniel Berlin
+ */
+
+#ifndef BRCMFMAC_RATESPEC_H
+#define BRCMFMAC_RATESPEC_H
+/* Rate spec. definitions */
+/* for BRCMF_RSPEC_ENCODING field >= BRCMF_RSPEC_ENCODING_HE, backward compatible */
+
+/**< Legacy rate or MCS or MCS + NSS */
+#define BRCMF_RSPEC_RATE_MASK 0x000000FFu
+/**< Tx chain expansion beyond Nsts */
+#define BRCMF_RSPEC_TXEXP_MASK 0x00000300u
+#define BRCMF_RSPEC_TXEXP_SHIFT 8u
+/* EHT GI indices */
+#define BRCMF_RSPEC_EHT_GI_MASK 0x00000C00u
+#define BRCMF_RSPEC_EHT_GI_SHIFT 10u
+/* HE GI indices */
+#define BRCMF_RSPEC_HE_GI_MASK 0x00000C00u
+#define BRCMF_RSPEC_HE_GI_SHIFT 10u
+/**< Range extension mask */
+#define BRCMF_RSPEC_ER_MASK 0x0000C000u
+#define BRCMF_RSPEC_ER_SHIFT 14u
+/**< Range extension tone config */
+#define BRCMF_RSPEC_ER_TONE_MASK 0x00004000u
+#define BRCMF_RSPEC_ER_TONE_SHIFT 14u
+/**< Range extension enable */
+#define BRCMF_RSPEC_ER_ENAB_MASK 0x00008000u
+#define BRCMF_RSPEC_ER_ENAB_SHIFT 15u
+/**< Bandwidth */
+#define BRCMF_RSPEC_BW_MASK 0x00070000u
+#define BRCMF_RSPEC_BW_SHIFT 16u
+/**< Dual Carrier Modulation */
+#define BRCMF_RSPEC_DCM 0x00080000u
+#define BRCMF_RSPEC_DCM_SHIFT 19u
+/**< STBC expansion, Nsts = 2 * Nss */
+#define BRCMF_RSPEC_STBC 0x00100000u
+#define BRCMF_RSPEC_TXBF 0x00200000u
+#define BRCMF_RSPEC_LDPC 0x00400000u
+/* HT/VHT SGI indication */
+#define BRCMF_RSPEC_SGI 0x00800000u
+/**< DSSS short preable - Encoding 0 */
+#define BRCMF_RSPEC_SHORT_PREAMBLE 0x00800000u
+/**< Encoding of RSPEC_RATE field */
+#define BRCMF_RSPEC_ENCODING_MASK 0x07000000u
+#define BRCMF_RSPEC_ENCODING_SHIFT 24u
+#define BRCMF_RSPEC_OVERRIDE_RATE 0x40000000u /**< override rate only */
+#define BRCMF_RSPEC_OVERRIDE_MODE 0x80000000u /**< override both rate & mode */
+
+/* ======== RSPEC_EHT_GI|RSPEC_SGI fields for EHT ======== */
+/* 11be Draft 0.4 Table 36-35:Common field for non-OFDMA transmission.
+ * Table 36-32 Common field for OFDMA transmission
+ */
+#define BRCMF_RSPEC_EHT_LTF_GI(rspec) \
+	(((rspec) & BRCMF_RSPEC_EHT_GI_MASK) >> BRCMF_RSPEC_EHT_GI_SHIFT)
+#define BRCMF_RSPEC_EHT_2x_LTF_GI_0_8us (0x0u)
+#define BRCMF_RSPEC_EHT_2x_LTF_GI_1_6us (0x1u)
+#define BRCMF_RSPEC_EHT_4x_LTF_GI_0_8us (0x2u)
+#define BRCMF_RSPEC_EHT_4x_LTF_GI_3_2us (0x3u)
+#define WL_EHT_GI_TO_RSPEC(gi)                             \
+	((u32)(((gi) << BRCMF_RSPEC_EHT_GI_SHIFT) & \
+		      BRCMF_RSPEC_EHT_GI_MASK))
+#define WL_EHT_GI_TO_RSPEC_SET(rspec, gi) \
+	((rspec & (~BRCMF_RSPEC_EHT_GI_MASK)) | WL_EHT_GI_TO_RSPEC(gi))
+
+/* Macros for EHT LTF and GI */
+#define EHT_IS_2X_LTF(gi)                             \
+	(((gi) == BRCMF_RSPEC_EHT_2x_LTF_GI_0_8us) || \
+	 ((gi) == BRCMF_RSPEC_EHT_2x_LTF_GI_1_6us))
+#define EHT_IS_4X_LTF(gi)                             \
+	(((gi) == BRCMF_RSPEC_EHT_4x_LTF_GI_0_8us) || \
+	 ((gi) == BRCMF_RSPEC_EHT_4x_LTF_GI_3_2us))
+
+#define EHT_IS_GI_0_8us(gi)                           \
+	(((gi) == BRCMF_RSPEC_EHT_2x_LTF_GI_0_8us) || \
+	 ((gi) == BRCMF_RSPEC_EHT_4x_LTF_GI_0_8us))
+#define EHT_IS_GI_1_6us(gi) ((gi) == BRCMF_RSPEC_EHT_2x_LTF_GI_1_6us)
+#define EHT_IS_GI_3_2us(gi) ((gi) == BRCMF_RSPEC_EHT_4x_LTF_GI_3_2us)
+
+/* ======== RSPEC_HE_GI|RSPEC_SGI fields for HE ======== */
+
+/* GI for HE */
+#define BRCMF_RSPEC_HE_LTF_GI(rspec) \
+	(((rspec) & BRCMF_RSPEC_HE_GI_MASK) >> BRCMF_RSPEC_HE_GI_SHIFT)
+#define BRCMF_RSPEC_HE_1x_LTF_GI_0_8us (0x0u)
+#define BRCMF_RSPEC_HE_2x_LTF_GI_0_8us (0x1u)
+#define BRCMF_RSPEC_HE_2x_LTF_GI_1_6us (0x2u)
+#define BRCMF_RSPEC_HE_4x_LTF_GI_3_2us (0x3u)
+#define BRCMF_RSPEC_ISHEGI(rspec) \
+	(RSPEC_HE_LTF_GI(rspec) > BRCMF_RSPEC_HE_1x_LTF_GI_0_8us)
+#define HE_GI_TO_RSPEC(gi) \
+	(((u32)(gi) << BRCMF_RSPEC_HE_GI_SHIFT) & BRCMF_RSPEC_HE_GI_MASK)
+#define HE_GI_TO_RSPEC_SET(rspec, gi) \
+	((rspec & (~BRCMF_RSPEC_HE_GI_MASK)) | HE_GI_TO_RSPEC(gi))
+
+/* Macros for HE LTF and GI */
+#define HE_IS_1X_LTF(gi) ((gi) == BRCMF_RSPEC_HE_1x_LTF_GI_0_8us)
+#define HE_IS_2X_LTF(gi)                             \
+	(((gi) == BRCMF_RSPEC_HE_2x_LTF_GI_0_8us) || \
+	 ((gi) == BRCMF_RSPEC_HE_2x_LTF_GI_1_6us))
+#define HE_IS_4X_LTF(gi) ((gi) == BRCMF_RSPEC_HE_4x_LTF_GI_3_2us)
+
+#define HE_IS_GI_0_8us(gi)                           \
+	(((gi) == BRCMF_RSPEC_HE_1x_LTF_GI_0_8us) || \
+	 ((gi) == BRCMF_RSPEC_HE_2x_LTF_GI_0_8us))
+#define HE_IS_GI_1_6us(gi) ((gi) == BRCMF_RSPEC_HE_2x_LTF_GI_1_6us)
+#define HE_IS_GI_3_2us(gi) ((gi) == BRCMF_RSPEC_HE_4x_LTF_GI_3_2us)
+
+/* RSPEC Macros for extracting and using HE-ER and DCM */
+#define BRCMF_RSPEC_HE_DCM(rspec) \
+	(((rspec) & BRCMF_RSPEC_DCM) >> BRCMF_RSPEC_DCM_SHIFT)
+#define BRCMF_RSPEC_HE_ER(rspec) \
+	(((rspec) & BRCMF_RSPEC_ER_MASK) >> BRCMF_RSPEC_ER_SHIFT)
+#define BRCMF_RSPEC_HE_ER_ENAB(rspec) \
+	(((rspec) & BRCMF_RSPEC_ER_ENAB_MASK) >> BRCMF_RSPEC_ER_ENAB_SHIFT)
+#define BRCMF_RSPEC_HE_ER_TONE(rspec) \
+	(((rspec) & BRCMF_RSPEC_ER_TONE_MASK) >> BRCMF_RSPEC_ER_TONE_SHIFT)
+/* ======== RSPEC_RATE field ======== */
+
+/* Encoding 0 - legacy rate */
+/* DSSS, CCK, and OFDM rates in [500kbps] units */
+#define BRCMF_RSPEC_LEGACY_RATE_MASK 0x0000007F
+#define WLC_RATE_1M 2
+#define WLC_RATE_2M 4
+#define WLC_RATE_5M5 11
+#define WLC_RATE_11M 22
+#define WLC_RATE_6M 12
+#define WLC_RATE_9M 18
+#define WLC_RATE_12M 24
+#define WLC_RATE_18M 36
+#define WLC_RATE_24M 48
+#define WLC_RATE_36M 72
+#define WLC_RATE_48M 96
+#define WLC_RATE_54M 108
+
+/* Encoding 1 - HT MCS */
+/**< HT MCS value mask in rspec */
+#define BRCMF_RSPEC_HT_MCS_MASK 0x0000007F
+
+/* Encoding >= 2 */
+/* NSS & MCS values mask in rspec */
+#define BRCMF_RSPEC_NSS_MCS_MASK 0x000000FF
+/* mimo MCS value mask in rspec */
+#define BRCMF_RSPEC_MCS_MASK 0x0000000F
+/* mimo NSS value mask in rspec */
+#define BRCMF_RSPEC_NSS_MASK 0x000000F0
+/* mimo NSS value shift in rspec */
+#define BRCMF_RSPEC_NSS_SHIFT 4
+
+/* Encoding 2 - VHT MCS + NSS */
+/**< VHT MCS value mask in rspec */
+#define BRCMF_RSPEC_VHT_MCS_MASK BRCMF_RSPEC_MCS_MASK
+/**< VHT Nss value mask in rspec */
+#define BRCMF_RSPEC_VHT_NSS_MASK BRCMF_RSPEC_NSS_MASK
+/**< VHT Nss value shift in rspec */
+#define BRCMF_RSPEC_VHT_NSS_SHIFT BRCMF_RSPEC_NSS_SHIFT
+
+/* Encoding 3 - HE MCS + NSS */
+/**< HE MCS value mask in rspec */
+#define BRCMF_RSPEC_HE_MCS_MASK BRCMF_RSPEC_MCS_MASK
+/**< HE Nss value mask in rspec */
+#define BRCMF_RSPEC_HE_NSS_MASK BRCMF_RSPEC_NSS_MASK
+/**< HE Nss value shift in rpsec */
+#define BRCMF_RSPEC_HE_NSS_SHIFT BRCMF_RSPEC_NSS_SHIFT
+
+#define BRCMF_RSPEC_HE_NSS_UNSPECIFIED 0xf
+
+/* Encoding 4 - EHT MCS + NSS */
+/**< EHT MCS value mask in rspec */
+#define BRCMF_RSPEC_EHT_MCS_MASK BRCMF_RSPEC_MCS_MASK
+/**< EHT Nss value mask in rspec */
+#define BRCMF_RSPEC_EHT_NSS_MASK BRCMF_RSPEC_NSS_MASK
+/**< EHT Nss value shift in rpsec */
+#define BRCMF_RSPEC_EHT_NSS_SHIFT BRCMF_RSPEC_NSS_SHIFT
+
+/* ======== RSPEC_BW field ======== */
+
+#define BRCMF_RSPEC_BW_UNSPECIFIED 0u
+#define BRCMF_RSPEC_BW_20MHZ 0x00010000u
+#define BRCMF_RSPEC_BW_40MHZ 0x00020000u
+#define BRCMF_RSPEC_BW_80MHZ 0x00030000u
+#define BRCMF_RSPEC_BW_160MHZ 0x00040000u
+#define BRCMF_RSPEC_BW_320MHZ 0x00060000u
+
+/* ======== RSPEC_ENCODING field ======== */
+
+/* NOTE: Assuming the rate field is always NSS+MCS starting from VHT encoding!
+ *       Modify/fix RSPEC_ISNSSMCS() macro if above condition changes any time.
+ */
+/**< Legacy rate is stored in RSPEC_RATE */
+#define BRCMF_RSPEC_ENCODE_RATE 0x00000000u
+/**< HT MCS is stored in RSPEC_RATE */
+#define BRCMF_RSPEC_ENCODE_HT 0x01000000u
+/**< VHT MCS and NSS are stored in RSPEC_RATE */
+#define BRCMF_RSPEC_ENCODE_VHT 0x02000000u
+/**< HE MCS and NSS are stored in RSPEC_RATE */
+#define BRCMF_RSPEC_ENCODE_HE 0x03000000u
+/**< EHT MCS and NSS are stored in RSPEC_RATE */
+#define BRCMF_RSPEC_ENCODE_EHT 0x04000000u
+
+/**
+ * ===============================
+ * Handy macros to parse rate spec
+ * ===============================
+ */
+#define BRCMF_RSPEC_BW(rspec) ((rspec) & BRCMF_RSPEC_BW_MASK)
+#define BRCMF_RSPEC_IS20MHZ(rspec) (RSPEC_BW(rspec) == BRCMF_RSPEC_BW_20MHZ)
+#define BRCMF_RSPEC_IS40MHZ(rspec) (RSPEC_BW(rspec) == BRCMF_RSPEC_BW_40MHZ)
+#define BRCMF_RSPEC_IS80MHZ(rspec) (RSPEC_BW(rspec) == BRCMF_RSPEC_BW_80MHZ)
+#define BRCMF_RSPEC_IS160MHZ(rspec) (RSPEC_BW(rspec) == BRCMF_RSPEC_BW_160MHZ)
+#if defined(WL_BW320MHZ)
+#define BRCMF_RSPEC_IS320MHZ(rspec) (RSPEC_BW(rspec) == BRCMF_RSPEC_BW_320MHZ)
+#else
+#define BRCMF_RSPEC_IS320MHZ(rspec) (FALSE)
+#endif /* WL_BW320MHZ */
+
+#define BRCMF_RSPEC_BW_GE(rspec, rspec_bw) (RSPEC_BW(rspec) >= rspec_bw)
+#define BRCMF_RSPEC_BW_LE(rspec, rspec_bw) (RSPEC_BW(rspec) <= rspec_bw)
+#define BRCMF_RSPEC_BW_GT(rspec, rspec_bw) (!RSPEC_BW_LE(rspec, rspec_bw))
+#define BRCMF_RSPEC_BW_LT(rspec, rspec_bw) (!RSPEC_BW_GE(rspec, rspec_bw))
+
+#define BRCMF_RSPEC_ISSGI(rspec) (((rspec) & BRCMF_RSPEC_SGI) != 0)
+#define BRCMF_RSPEC_ISLDPC(rspec) (((rspec) & BRCMF_RSPEC_LDPC) != 0)
+#define BRCMF_RSPEC_ISSTBC(rspec) (((rspec) & BRCMF_RSPEC_STBC) != 0)
+#define BRCMF_RSPEC_ISTXBF(rspec) (((rspec) & BRCMF_RSPEC_TXBF) != 0)
+
+#define BRCMF_RSPEC_TXEXP(rspec) \
+	(((rspec) & BRCMF_RSPEC_TXEXP_MASK) >> BRCMF_RSPEC_TXEXP_SHIFT)
+
+#define BRCMF_RSPEC_ENCODE(rspec) \
+	(((rspec) & BRCMF_RSPEC_ENCODING_MASK) >> BRCMF_RSPEC_ENCODING_SHIFT)
+#define BRCMF_RSPEC_ISLEGACY(rspec) \
+	(((rspec) & BRCMF_RSPEC_ENCODING_MASK) == BRCMF_RSPEC_ENCODE_RATE)
+
+#define BRCMF_RSPEC_ISHT(rspec) \
+	(((rspec) & BRCMF_RSPEC_ENCODING_MASK) == BRCMF_RSPEC_ENCODE_HT)
+#define BRCMF_RSPEC_ISVHT(rspec) \
+	(((rspec) & BRCMF_RSPEC_ENCODING_MASK) == BRCMF_RSPEC_ENCODE_VHT)
+#define BRCMF_RSPEC_ISHE(rspec) \
+	(((rspec) & BRCMF_RSPEC_ENCODING_MASK) == BRCMF_RSPEC_ENCODE_HE)
+#define BRCMF_RSPEC_ISEHT(rspec) \
+	(((rspec) & BRCMF_RSPEC_ENCODING_MASK) == BRCMF_RSPEC_ENCODE_EHT)
+
+/* fast check if rate field is NSS+MCS format (starting from VHT ratespec) */
+#define BRCMF_RSPEC_ISVHTEXT(rspec) \
+	(((rspec) & BRCMF_RSPEC_ENCODING_MASK) >= BRCMF_RSPEC_ENCODE_VHT)
+/* fast check if rate field is NSS+MCS format (starting from HE ratespec) */
+#define BRCMF_RSPEC_ISHEEXT(rspec) \
+	(((rspec) & BRCMF_RSPEC_ENCODING_MASK) >= BRCMF_RSPEC_ENCODE_HE)
+
+#endif /* BRCMFMAC_RATESPEC_H */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/scan_param.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/scan_param.c
new file mode 100644
index 00000000000000..4f634509d25256
--- /dev/null
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/scan_param.c
@@ -0,0 +1,446 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2023 Daniel Berlin
+ */
+#include <linux/gcd.h>
+#include <net/cfg80211.h>
+
+#include "core.h"
+#include "debug.h"
+#include "fwil_types.h"
+#include "cfg80211.h"
+#include "scan_param.h"
+
+static void brcmf_scan_param_set_defaults(u8 (*bssid)[ETH_ALEN], s8 *bss_type, __le32 *channel_num,
+					  __le32 *nprobes, __le32 *active_time,
+					  __le32 *passive_time,
+					  __le32 *home_time)
+{
+	eth_broadcast_addr(*bssid);
+	*bss_type = DOT11_BSSTYPE_ANY;
+	*channel_num = 0;
+	*nprobes = cpu_to_le32(-1);
+	*active_time = cpu_to_le32(-1);
+	*passive_time = cpu_to_le32(-1);
+	*home_time = cpu_to_le32(-1);
+}
+
+static void brcmf_scan_param_copy_chanspecs(
+	struct brcmf_cfg80211_info *cfg, __le16 (*dest_channels)[],
+	struct ieee80211_channel **in_channels, u32 n_channels)
+{
+	int i;
+	for (i = 0; i < n_channels; i++) {
+		u32 chanspec =
+			channel_to_chanspec(&cfg->d11inf, in_channels[i]);
+		brcmf_dbg(SCAN, "Chan : %d, Channel spec: %x\n",
+			  in_channels[i]->hw_value, chanspec);
+		(*dest_channels)[i] = cpu_to_le16(chanspec);
+	}
+}
+
+static void brcmf_scan_param_copy_ssids(char *dest_ssids,
+					struct cfg80211_ssid *in_ssids,
+					u32 n_ssids)
+{
+	int i;
+	for (i = 0; i < n_ssids; i++) {
+		struct brcmf_ssid_le ssid_le;
+		memset(&ssid_le, 0, sizeof(ssid_le));
+		ssid_le.SSID_len = cpu_to_le32(in_ssids[i].ssid_len);
+		memcpy(ssid_le.SSID, in_ssids[i].ssid, in_ssids[i].ssid_len);
+		if (!ssid_le.SSID_len)
+			brcmf_dbg(SCAN, "%d: Broadcast scan\n", i);
+		else
+			brcmf_dbg(SCAN, "%d: scan for  %.32s size=%d\n", i,
+				  ssid_le.SSID, ssid_le.SSID_len);
+		memcpy(dest_ssids, &ssid_le, sizeof(ssid_le));
+		dest_ssids += sizeof(ssid_le);
+	}
+}
+
+/* The scan parameter structures have an array of SSID's that appears at the end in some cases.
+ * In these cases, the chan list is really the lower half of a pair, the upper half is a ssid number,
+ * and then after all of that there is an array of SSIDs */
+static u32
+brcmf_scan_param_tail_size(const struct cfg80211_scan_request *request,
+			   u32 params_size)
+{
+	if (request != NULL) {
+		/* Allocate space for populating ssid upper half in struct */
+		params_size += sizeof(u32) * ((request->n_channels + 1) / 2);
+		/* Allocate space for populating ssids in struct */
+		params_size += sizeof(struct brcmf_ssid_le) * request->n_ssids;
+	} else {
+		params_size += sizeof(u16);
+	}
+	return params_size;
+}
+
+static u32 brcmf_nl80211_scan_flags_to_scan_flags(u32 nl80211_flags)
+{
+	u32 scan_flags = 0;
+	if (nl80211_flags & NL80211_SCAN_FLAG_LOW_SPAN) {
+		scan_flags |= BRCMF_SCANFLAGS_LOW_SPAN;
+		brcmf_dbg(SCAN, "requested low span scan\n");
+	}
+	if (nl80211_flags & NL80211_SCAN_FLAG_HIGH_ACCURACY) {
+		scan_flags |= BRCMF_SCANFLAGS_HIGH_ACCURACY;
+		brcmf_dbg(SCAN, "requested high accuracy scan\n");
+	}
+	if (nl80211_flags & NL80211_SCAN_FLAG_LOW_POWER) {
+		scan_flags |= BRCMF_SCANFLAGS_LOW_POWER;
+		brcmf_dbg(SCAN, "requested low power scan\n");
+	}
+	if (nl80211_flags & NL80211_SCAN_FLAG_LOW_PRIORITY) {
+		scan_flags |= BRCMF_SCANFLAGS_LOW_PRIO;
+		brcmf_dbg(SCAN, "requested low priority scan\n");
+	}
+	return scan_flags;
+}
+
+static void *
+brcmf_scan_param_get_prepped_struct_v1(struct brcmf_cfg80211_info *cfg,
+				       u32 *struct_size,
+				       struct cfg80211_scan_request *request)
+{
+	u32 n_ssids;
+	u32 n_channels;
+	u32 params_size = sizeof(struct brcmf_scan_params_le);
+	u32 length;
+	struct brcmf_scan_params_le *params_le = NULL;
+	u8 scan_type = BRCMF_SCANTYPE_ACTIVE;
+
+	length = offsetof(struct brcmf_scan_params_le, channel_list);
+	params_size = brcmf_scan_param_tail_size(request, params_size);
+	params_le = kzalloc(params_size, GFP_KERNEL);
+	if (!params_le) {
+		bphy_err(cfg, "Could not allocate scan params\n");
+		return NULL;
+	}
+	brcmf_scan_param_set_defaults(&params_le->bssid,
+		&params_le->bss_type, &params_le->channel_num,
+		&params_le->nprobes, &params_le->active_time,
+		&params_le->passive_time, &params_le->home_time);
+
+	/* Scan abort */
+	if (!request) {
+		params_le->channel_num = cpu_to_le32(1);
+		params_le->channel_list[0] = cpu_to_le16(-1);
+		goto done;
+	}
+
+	n_ssids = request->n_ssids;
+	n_channels = request->n_channels;
+
+	/* Copy channel array if applicable */
+	brcmf_dbg(SCAN, "### List of channelspecs to scan ### %d\n",
+		  n_channels);
+	if (n_channels > 0) {
+		length += roundup(sizeof(u16) * n_channels, sizeof(u32));
+		brcmf_scan_param_copy_chanspecs(cfg, &params_le->channel_list,
+						request->channels, n_channels);
+	} else {
+		brcmf_dbg(SCAN, "Scanning all channels\n");
+	}
+
+	/* Copy ssid array if applicable */
+	brcmf_dbg(SCAN, "### List of SSIDs to scan ### %d\n", n_ssids);
+	if (n_ssids > 0) {
+		s32 offset;
+		char *ptr;
+
+		offset =
+			offsetof(struct brcmf_scan_params_le, channel_list) +
+			n_channels * sizeof(u16);
+		offset = roundup(offset, sizeof(u32));
+		length += sizeof(struct brcmf_ssid_le) * n_ssids;
+		ptr = (char *)params_le + offset;
+		brcmf_scan_param_copy_ssids(ptr, request->ssids, n_ssids);
+	} else {
+		brcmf_dbg(SCAN, "Performing passive scan\n");
+		scan_type = BRCMF_SCANTYPE_PASSIVE;
+	}
+	scan_type |= brcmf_nl80211_scan_flags_to_scan_flags(request->flags);
+	params_le->scan_type =scan_type;
+	/* Adding mask to channel numbers */
+	params_le->channel_num =
+		cpu_to_le32((n_ssids << BRCMF_SCAN_PARAMS_NSSID_SHIFT) |
+			    (n_channels & BRCMF_SCAN_PARAMS_COUNT_MASK));
+done:
+	*struct_size = length;
+	return params_le;
+}
+
+static void *
+brcmf_scan_param_get_prepped_struct_v2(struct brcmf_cfg80211_info *cfg,
+				       u32 *struct_size,
+				       struct cfg80211_scan_request *request)
+{
+	u32 n_ssids;
+	u32 n_channels;
+	u32 params_size = sizeof(struct brcmf_scan_params_v2_le);
+	u32 length;
+	struct brcmf_scan_params_v2_le *params_le = NULL;
+	u32 scan_type = BRCMF_SCANTYPE_ACTIVE;
+
+	length = offsetof(struct brcmf_scan_params_v2_le, channel_list);
+	params_size = brcmf_scan_param_tail_size(request, params_size);
+	params_le = kzalloc(params_size, GFP_KERNEL);
+	if (!params_le) {
+		bphy_err(cfg, "Could not allocate scan params\n");
+		return NULL;
+	}
+	params_le->version = cpu_to_le16(BRCMF_SCAN_PARAMS_VERSION_V2);
+	brcmf_scan_param_set_defaults(&params_le->bssid,
+		&params_le->bss_type, &params_le->channel_num,
+		&params_le->nprobes, &params_le->active_time,
+		&params_le->passive_time, &params_le->home_time);
+
+	/* Scan abort */
+	if (!request) {
+		length += sizeof(u16);
+		params_le->channel_num = cpu_to_le32(1);
+		params_le->channel_list[0] = cpu_to_le16(-1);
+		params_le->length = cpu_to_le16(length);
+		goto done;
+	}
+
+	n_ssids = request->n_ssids;
+	n_channels = request->n_channels;
+
+	/* Copy channel array if applicable */
+	brcmf_dbg(SCAN, "### List of channelspecs to scan ### %d\n",
+		  n_channels);
+	if (n_channels > 0) {
+		length += roundup(sizeof(u16) * n_channels, sizeof(u32));
+		brcmf_scan_param_copy_chanspecs(cfg, &params_le->channel_list,
+						request->channels, n_channels);
+	} else {
+		brcmf_dbg(SCAN, "Scanning all channels\n");
+	}
+
+	/* Copy ssid array if applicable */
+	brcmf_dbg(SCAN, "### List of SSIDs to scan ### %d\n", n_ssids);
+	if (n_ssids > 0) {
+		s32 offset;
+		char *ptr;
+
+		offset =
+			offsetof(struct brcmf_scan_params_v2_le, channel_list) +
+			n_channels * sizeof(u16);
+		offset = roundup(offset, sizeof(u32));
+		length += sizeof(struct brcmf_ssid_le) * n_ssids;
+		ptr = (char *)params_le + offset;
+		brcmf_scan_param_copy_ssids(ptr, request->ssids, n_ssids);
+
+	} else {
+		brcmf_dbg(SCAN, "Performing passive scan\n");
+		scan_type = BRCMF_SCANTYPE_PASSIVE;
+	}
+	scan_type |= brcmf_nl80211_scan_flags_to_scan_flags(request->flags);
+	params_le->scan_type = cpu_to_le32(scan_type);
+	params_le->length = cpu_to_le16(length);
+	/* Adding mask to channel numbers */
+	params_le->channel_num =
+		cpu_to_le32((n_ssids << BRCMF_SCAN_PARAMS_NSSID_SHIFT) |
+			    (n_channels & BRCMF_SCAN_PARAMS_COUNT_MASK));
+done:
+	*struct_size = length;
+	return params_le;
+}
+
+static void *
+brcmf_scan_param_get_prepped_struct_v3(struct brcmf_cfg80211_info *cfg,
+				       u32 *struct_size,
+				       struct cfg80211_scan_request *request)
+{
+	u32 n_ssids;
+	u32 n_channels;
+	u32 params_size = sizeof(struct brcmf_scan_params_v3_le);
+	u32 length;
+	struct brcmf_scan_params_v3_le *params_le = NULL;
+	u32 scan_type = BRCMF_SCANTYPE_ACTIVE;
+
+	length = offsetof(struct brcmf_scan_params_v3_le, channel_list);
+	params_size = brcmf_scan_param_tail_size(request, params_size);
+	params_le = kzalloc(params_size, GFP_KERNEL);
+	if (!params_le) {
+		bphy_err(cfg, "Could not allocate scan params\n");
+		return NULL;
+	}
+
+	params_le->version = cpu_to_le16(BRCMF_SCAN_PARAMS_VERSION_V3);
+	params_le->ssid_type = 0;
+	brcmf_scan_param_set_defaults(&params_le->bssid,
+		&params_le->bss_type, &params_le->channel_num,
+		&params_le->nprobes, &params_le->active_time,
+		&params_le->passive_time, &params_le->home_time);
+
+	/* Scan abort */
+	if (!request) {
+		length += sizeof(u16);
+		params_le->channel_num = cpu_to_le32(1);
+		params_le->channel_list[0] = cpu_to_le16(-1);
+		params_le->length = cpu_to_le16(length);
+		goto done;
+	}
+
+	n_ssids = request->n_ssids;
+	n_channels = request->n_channels;
+
+	/* Copy channel array if applicable */
+	brcmf_dbg(SCAN, "### List of channelspecs to scan ### %d\n",
+		  n_channels);
+	if (n_channels > 0) {
+		length += roundup(sizeof(u16) * n_channels, sizeof(u32));
+		brcmf_scan_param_copy_chanspecs(cfg, &params_le->channel_list,
+						request->channels, n_channels);
+
+	} else {
+		brcmf_dbg(SCAN, "Scanning all channels\n");
+	}
+
+	/* Copy ssid array if applicable */
+	brcmf_dbg(SCAN, "### List of SSIDs to scan ### %d\n", n_ssids);
+	if (n_ssids > 0) {
+		s32 offset;
+		char *ptr;
+
+		offset =
+			offsetof(struct brcmf_scan_params_v3_le, channel_list) +
+			n_channels * sizeof(u16);
+		offset = roundup(offset, sizeof(u32));
+		length += sizeof(struct brcmf_ssid_le) * n_ssids;
+		ptr = (char *)params_le + offset;
+		brcmf_scan_param_copy_ssids(ptr, request->ssids, n_ssids);
+
+	} else {
+		brcmf_dbg(SCAN, "Performing passive scan\n");
+		scan_type = BRCMF_SCANTYPE_PASSIVE;
+	}
+	scan_type |= brcmf_nl80211_scan_flags_to_scan_flags(request->flags);
+	params_le->scan_type = cpu_to_le32(scan_type);
+	params_le->length = cpu_to_le16(length);
+	params_le->channel_num =
+		cpu_to_le32((n_ssids << BRCMF_SCAN_PARAMS_NSSID_SHIFT) |
+			    (n_channels & BRCMF_SCAN_PARAMS_COUNT_MASK));
+
+	/* Include RNR results if requested */
+	if (request->flags & NL80211_SCAN_FLAG_COLOCATED_6GHZ) {
+		params_le->ssid_type |= BRCMF_SCANSSID_INC_RNR;
+	}
+	/* Adding mask to channel numbers */
+done:
+	*struct_size = length;
+	return params_le;
+}
+
+static void *
+brcmf_scan_param_get_prepped_struct_v4(struct brcmf_cfg80211_info *cfg,
+				       u32 *struct_size,
+				       struct cfg80211_scan_request *request)
+{
+	u32 n_ssids;
+	u32 n_channels;
+	u32 params_size = sizeof(struct brcmf_scan_params_v4_le);
+	u32 length;
+	struct brcmf_scan_params_v4_le *params_le = NULL;
+	u32 scan_type = BRCMF_SCANTYPE_ACTIVE;
+
+	length = offsetof(struct brcmf_scan_params_v4_le, channel_list);
+	params_size = brcmf_scan_param_tail_size(request, params_size);
+	params_le = kzalloc(params_size, GFP_KERNEL);
+	if (!params_le) {
+		bphy_err(cfg, "Could not allocate scan params\n");
+		return NULL;
+	}
+	params_le->version = cpu_to_le16(BRCMF_SCAN_PARAMS_VERSION_V4);
+	params_le->ssid_type = 0;
+	brcmf_scan_param_set_defaults(&params_le->bssid,
+		&params_le->bss_type, &params_le->channel_num,
+		&params_le->nprobes, &params_le->active_time,
+		&params_le->passive_time, &params_le->home_time);
+
+	/* Scan abort */
+	if (!request) {
+		length += sizeof(u16);
+		params_le->channel_num = cpu_to_le32(1);
+		params_le->channel_list[0] = cpu_to_le16(-1);
+		params_le->length = cpu_to_le16(length);
+		goto done;
+	}
+
+	n_ssids = request->n_ssids;
+	n_channels = request->n_channels;
+
+	/* Copy channel array if applicable */
+	brcmf_dbg(SCAN, "### List of channelspecs to scan ### %d\n",
+		  n_channels);
+	if (n_channels > 0) {
+		length += roundup(sizeof(u16) * n_channels, sizeof(u32));
+		brcmf_scan_param_copy_chanspecs(cfg, &params_le->channel_list,
+						request->channels, n_channels);
+	} else {
+		brcmf_dbg(SCAN, "Scanning all channels\n");
+	}
+
+	/* Copy ssid array if applicable */
+	brcmf_dbg(SCAN, "### List of SSIDs to scan ### %d\n", n_ssids);
+	if (n_ssids > 0) {
+		s32 offset;
+		char *ptr;
+
+		offset =
+			offsetof(struct brcmf_scan_params_v4_le, channel_list) +
+			n_channels * sizeof(u16);
+		offset = roundup(offset, sizeof(u32));
+		length += sizeof(struct brcmf_ssid_le) * n_ssids;
+		ptr = (char *)params_le + offset;
+		brcmf_scan_param_copy_ssids(ptr, request->ssids, n_ssids);
+	} else {
+		brcmf_dbg(SCAN, "Performing passive scan\n");
+		scan_type = BRCMF_SCANTYPE_PASSIVE;
+	}
+	scan_type |= brcmf_nl80211_scan_flags_to_scan_flags(request->flags);
+	params_le->scan_type = cpu_to_le32(scan_type);
+	params_le->length = cpu_to_le16(length);
+	/* Adding mask to channel numbers */
+	params_le->channel_num =
+		cpu_to_le32((n_ssids << BRCMF_SCAN_PARAMS_NSSID_SHIFT) |
+			    (n_channels & BRCMF_SCAN_PARAMS_COUNT_MASK));
+	/* Include RNR results if requested */
+	if (request->flags & NL80211_SCAN_FLAG_COLOCATED_6GHZ) {
+		params_le->ssid_type |= BRCMF_SCANSSID_INC_RNR;
+	}
+done:
+	*struct_size = length;
+	return params_le;
+}
+
+int brcmf_scan_param_setup_for_version(struct brcmf_pub *drvr, u8 version)
+{
+	drvr->scan_param_handler.version = version;
+	switch (version) {
+	case 1: {
+		drvr->scan_param_handler.get_struct_for_request =
+			brcmf_scan_param_get_prepped_struct_v1;
+	} break;
+	case 2: {
+		drvr->scan_param_handler.get_struct_for_request =
+			brcmf_scan_param_get_prepped_struct_v2;
+	} break;
+	case 3: {
+		drvr->scan_param_handler.get_struct_for_request =
+			brcmf_scan_param_get_prepped_struct_v3;
+	} break;
+	case 4: {
+		drvr->scan_param_handler.get_struct_for_request =
+			brcmf_scan_param_get_prepped_struct_v4;
+
+	} break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/scan_param.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/scan_param.h
new file mode 100644
index 00000000000000..577de083c6e3cd
--- /dev/null
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/scan_param.h
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2023 Daniel Berlin
+ */
+
+#ifndef _BRCMF_SCAN_PARAM_H
+#define _BRCMF_SCAN_PARAM_H
+
+struct brcmf_pub;
+
+/**
+ * brcmf_scan_param_setup_for_version() - Setup the driver to handle join structures
+ *
+ * There are a number of different structures and interface versions for scanning info
+ * This sets up the driver to handle a particular interface version.
+ *
+ * @drvr Driver structure to setup
+ * @ver Interface version
+ * Return: %0 if okay, error code otherwise
+ */
+int brcmf_scan_param_setup_for_version(struct brcmf_pub *, u8 ver);
+#endif /* _BRCMF_SCAN_PARAM_H */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c b/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c
index 1e2b1e487eb76e..faf7eeeeb2d57e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c
@@ -87,10 +87,20 @@ static void brcmu_d11ac_encchspec(struct brcmu_chan *ch)
 			0, d11ac_bw(ch->bw));
 
 	ch->chspec &= ~BRCMU_CHSPEC_D11AC_BND_MASK;
-	if (ch->chnum <= CH_MAX_2G_CHANNEL)
-		ch->chspec |= BRCMU_CHSPEC_D11AC_BND_2G;
-	else
+	switch (ch->band) {
+	case BRCMU_CHAN_BAND_6G:
+		ch->chspec |= BRCMU_CHSPEC_D11AC_BND_6G;
+		break;
+	case BRCMU_CHAN_BAND_5G:
 		ch->chspec |= BRCMU_CHSPEC_D11AC_BND_5G;
+		break;
+	case BRCMU_CHAN_BAND_2G:
+		ch->chspec |= BRCMU_CHSPEC_D11AC_BND_2G;
+		break;
+	default:
+		WARN_ONCE(1, "Invalid band 0x%04x\n", ch->band);
+		break;
+	}
 }
 
 static void brcmu_d11n_decchspec(struct brcmu_chan *ch)
@@ -117,7 +127,9 @@ static void brcmu_d11n_decchspec(struct brcmu_chan *ch)
 		}
 		break;
 	default:
-		WARN_ONCE(1, "Invalid chanspec 0x%04x\n", ch->chspec);
+		WARN_ONCE(1,
+			  "Invalid chanspec - unknown 11n bandwidth 0x%04x\n",
+			  ch->chspec);
 		break;
 	}
 
@@ -129,7 +141,8 @@ static void brcmu_d11n_decchspec(struct brcmu_chan *ch)
 		ch->band = BRCMU_CHAN_BAND_2G;
 		break;
 	default:
-		WARN_ONCE(1, "Invalid chanspec 0x%04x\n", ch->chspec);
+		WARN_ONCE(1, "Invalid chanspec - unknown 11n band 0x%04x\n",
+			  ch->chspec);
 		break;
 	}
 }
@@ -156,7 +169,9 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch)
 			ch->sb = BRCMU_CHAN_SB_U;
 			ch->control_ch_num += CH_10MHZ_APART;
 		} else {
-			WARN_ONCE(1, "Invalid chanspec 0x%04x\n", ch->chspec);
+			WARN_ONCE(1,
+				  "Invalid chanspec - unknown 11ac channel distance 0x%04x\n",
+				  ch->chspec);
 		}
 		break;
 	case BRCMU_CHSPEC_D11AC_BW_80:
@@ -177,7 +192,9 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch)
 			ch->control_ch_num += CH_30MHZ_APART;
 			break;
 		default:
-			WARN_ONCE(1, "Invalid chanspec 0x%04x\n", ch->chspec);
+			WARN_ONCE(1,
+				  "Invalid chanspec - unknown 11ac channel distance 0x%04x\n",
+				  ch->chspec);
 			break;
 		}
 		break;
@@ -211,17 +228,24 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch)
 			ch->control_ch_num += CH_70MHZ_APART;
 			break;
 		default:
-			WARN_ONCE(1, "Invalid chanspec 0x%04x\n", ch->chspec);
+			WARN_ONCE(1,
+				  "Invalid chanspec - unknown 11ac channel distance 0x%04x\n",
+				  ch->chspec);
 			break;
 		}
 		break;
 	case BRCMU_CHSPEC_D11AC_BW_8080:
 	default:
-		WARN_ONCE(1, "Invalid chanspec 0x%04x\n", ch->chspec);
+		WARN_ONCE(1,
+			  "Invalid chanspec - unknown 11ac channel bandwidth 0x%04x\n",
+			  ch->chspec);
 		break;
 	}
 
 	switch (ch->chspec & BRCMU_CHSPEC_D11AC_BND_MASK) {
+	case BRCMU_CHSPEC_D11AC_BND_6G:
+		ch->band = BRCMU_CHAN_BAND_6G;
+		break;
 	case BRCMU_CHSPEC_D11AC_BND_5G:
 		ch->band = BRCMU_CHAN_BAND_5G;
 		break;
@@ -229,7 +253,9 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch)
 		ch->band = BRCMU_CHAN_BAND_2G;
 		break;
 	default:
-		WARN_ONCE(1, "Invalid chanspec 0x%04x\n", ch->chspec);
+		WARN_ONCE(1,
+			  "Invalid chanspec - unknown 11ac channel band 0x%04x\n",
+			  ch->chspec);
 		break;
 	}
 }
diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
index c1e22c589d85eb..424afe4b8f2e0b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
@@ -56,6 +56,7 @@
 #define BRCM_CC_4377_CHIP_ID		0x4377
 #define BRCM_CC_4378_CHIP_ID		0x4378
 #define BRCM_CC_4387_CHIP_ID		0x4387
+#define BRCM_CC_4388_CHIP_ID		0x4388
 #define CY_CC_4373_CHIP_ID		0x4373
 #define CY_CC_43012_CHIP_ID		43012
 #define CY_CC_43439_CHIP_ID		43439
@@ -99,6 +100,7 @@
 #define BRCM_PCIE_4377_DEVICE_ID	0x4488
 #define BRCM_PCIE_4378_DEVICE_ID	0x4425
 #define BRCM_PCIE_4387_DEVICE_ID	0x4433
+#define BRCM_PCIE_4388_DEVICE_ID	0x4434
 
 /* brcmsmac IDs */
 #define BCM4313_D11N2G_ID	0x4727	/* 4313 802.11n 2.4G device */
diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcmu_d11.h b/drivers/net/wireless/broadcom/brcm80211/include/brcmu_d11.h
index f6344023855c36..bb48b744206223 100644
--- a/drivers/net/wireless/broadcom/brcm80211/include/brcmu_d11.h
+++ b/drivers/net/wireless/broadcom/brcm80211/include/brcmu_d11.h
@@ -69,24 +69,44 @@
 #define  BRCMU_CHSPEC_D11AC_SB_UU	BRCMU_CHSPEC_D11AC_SB_LUU
 #define  BRCMU_CHSPEC_D11AC_SB_L	BRCMU_CHSPEC_D11AC_SB_LLL
 #define  BRCMU_CHSPEC_D11AC_SB_U	BRCMU_CHSPEC_D11AC_SB_LLU
+/* channel sideband indication for frequency >= 240MHz */
+#define BRCMU_CHSPEC_D11AC_320_SB_MASK	0x0780
+#define BRCMU_CHSPEC_D11AC_320_SB_SHIFT	7
+#define BRCMU_CHSPEC_D11AC_SB_LLLL	0x0000
+#define BRCMU_CHSPEC_D11AC_SB_LLLU	0x0080
+#define BRCMU_CHSPEC_D11AC_SB_LLUL	0x0100
+#define BRCMU_CHSPEC_D11AC_SB_LLUU	0x0180
+#define BRCMU_CHSPEC_D11AC_SB_LULL	0x0200
+#define BRCMU_CHSPEC_D11AC_SB_LULU	0x0280
+#define BRCMU_CHSPEC_D11AC_SB_LUUL	0x0300
+#define BRCMU_CHSPEC_D11AC_SB_LUUU	0x0380
+#define BRCMU_CHSPEC_D11AC_SB_ULLL	0x0400
+#define BRCMU_CHSPEC_D11AC_SB_ULLU	0x0480
+#define BRCMU_CHSPEC_D11AC_SB_ULUL	0x0500
+#define BRCMU_CHSPEC_D11AC_SB_ULUU	0x0580
+#define BRCMU_CHSPEC_D11AC_SB_UULL	0x0600
+#define BRCMU_CHSPEC_D11AC_SB_UULU	0x0680
+#define BRCMU_CHSPEC_D11AC_SB_UUUL	0x0700
+#define BRCMU_CHSPEC_D11AC_SB_UUUU	0x0780
 #define BRCMU_CHSPEC_D11AC_BW_MASK	0x3800
 #define BRCMU_CHSPEC_D11AC_BW_SHIFT	11
-#define  BRCMU_CHSPEC_D11AC_BW_5	0x0000
-#define  BRCMU_CHSPEC_D11AC_BW_10	0x0800
-#define  BRCMU_CHSPEC_D11AC_BW_20	0x1000
-#define  BRCMU_CHSPEC_D11AC_BW_40	0x1800
-#define  BRCMU_CHSPEC_D11AC_BW_80	0x2000
-#define  BRCMU_CHSPEC_D11AC_BW_160	0x2800
-#define  BRCMU_CHSPEC_D11AC_BW_8080	0x3000
-#define BRCMU_CHSPEC_D11AC_BND_MASK	0xc000
-#define BRCMU_CHSPEC_D11AC_BND_SHIFT	14
-#define  BRCMU_CHSPEC_D11AC_BND_2G	0x0000
-#define  BRCMU_CHSPEC_D11AC_BND_3G	0x4000
-#define  BRCMU_CHSPEC_D11AC_BND_4G	0x8000
-#define  BRCMU_CHSPEC_D11AC_BND_5G	0xc000
+#define BRCMU_CHSPEC_D11AC_BW_10    0x0800
+#define BRCMU_CHSPEC_D11AC_BW_20    0x1000
+#define BRCMU_CHSPEC_D11AC_BW_40    0x1800
+#define BRCMU_CHSPEC_D11AC_BW_80    0x2000
+#define BRCMU_CHSPEC_D11AC_BW_160   0x2800
+#define BRCMU_CHSPEC_D11AC_BW_320   0x0000
+#define BRCMU_CHSPEC_D11AC_BW_8080  0x3000
+#define BRCMU_CHSPEC_D11AC_BND_MASK 0xc000
+#define BRCMU_CHSPEC_D11AC_BND_SHIFT 14
+#define BRCMU_CHSPEC_D11AC_BND_2G   0x0000
+#define BRCMU_CHSPEC_D11AC_BND_4G   0x8000
+#define BRCMU_CHSPEC_D11AC_BND_5G   0xc000
+#define BRCMU_CHSPEC_D11AC_BND_6G   0x4000
 
 #define BRCMU_CHAN_BAND_2G		0
 #define BRCMU_CHAN_BAND_5G		1
+#define BRCMU_CHAN_BAND_6G		2
 
 enum brcmu_chan_bw {
 	BRCMU_CHAN_BW_20,
diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h b/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h
index 7552bdb91991ce..ef042beeb586f9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h
+++ b/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h
@@ -31,6 +31,7 @@
 /* bandstate array indices */
 #define BAND_2G_INDEX		0	/* wlc->bandstate[x] index */
 #define BAND_5G_INDEX		1	/* wlc->bandstate[x] index */
+#define BAND_6G_INDEX		2	/* wlc->bandstate[x] index */
 
 /*
  * max # supported channels. The max channel no is 216, this is that + 1
@@ -48,17 +49,22 @@
 #define WL_CHANSPEC_CTL_SB_UPPER	0x0200
 #define WL_CHANSPEC_CTL_SB_NONE		0x0300
 
-#define WL_CHANSPEC_BW_MASK		0x0C00
-#define WL_CHANSPEC_BW_SHIFT		    10
+#define WL_CHANSPEC_BW_MASK		0x3800
+#define WL_CHANSPEC_BW_SHIFT	11
 #define WL_CHANSPEC_BW_10		0x0400
 #define WL_CHANSPEC_BW_20		0x0800
 #define WL_CHANSPEC_BW_40		0x0C00
 #define WL_CHANSPEC_BW_80		0x2000
-
-#define WL_CHANSPEC_BAND_MASK		0xf000
-#define WL_CHANSPEC_BAND_SHIFT		12
-#define WL_CHANSPEC_BAND_5G		0x1000
-#define WL_CHANSPEC_BAND_2G		0x2000
+#define WL_CHANSPEC_BW_160	0x2800
+#define WL_CHANSPEC_BW_8080 0x3000
+#define WL_CHANSPEC_BW_320  0x0000
+
+#define WL_CHANSPEC_BAND_MASK		0xc000
+#define WL_CHANSPEC_BAND_SHIFT		14
+#define WL_CHANSPEC_BAND_2G		0x0000
+#define WL_CHANSPEC_BAND_4G		0x8000
+#define WL_CHANSPEC_BAND_5G		0xc000
+#define WL_CHANSPEC_BAND_6G		0x4000
 #define INVCHANSPEC			255
 
 #define WL_CHAN_VALID_HW		(1 << 0) /* valid with current HW */
@@ -93,6 +99,9 @@
 #define	WLC_BAND_5G			1	/* 5 Ghz */
 #define	WLC_BAND_2G			2	/* 2.4 Ghz */
 #define	WLC_BAND_ALL			3	/* all bands */
+#define WLC_BAND_6G			4	/* 6 Ghz */
+
+#define WLC_AP_IOV_OP_MANUAL_AP_BSSCFG_CREATE	2
 
 #define CHSPEC_CHANNEL(chspec)	((u8)((chspec) & WL_CHANSPEC_CHAN_MASK))
 #define CHSPEC_BAND(chspec)	((chspec) & WL_CHANSPEC_BAND_MASK)
@@ -112,6 +121,12 @@
 #define CHSPEC_IS80(chspec) \
 	(((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_80)
 
+#define CHSPEC_IS160(chspec) \
+	(((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_160)
+
+#define CHSPEC_IS6G(chspec) \
+	(((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_6G)
+
 #define CHSPEC_IS5G(chspec) \
 	(((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_5G)
 
@@ -200,6 +215,13 @@ static inline bool ac_bitmap_tst(u8 bitmap, int prec)
 #define CRYPTO_ALGO_AES_RESERVED1	5
 #define CRYPTO_ALGO_AES_RESERVED2	6
 #define CRYPTO_ALGO_NALG		7
+#define CRYPTO_ALGO_AES_GCM     14  /* 128 bit GCM */
+#define CRYPTO_ALGO_AES_CCM256  15  /* 256 bit CCM */
+#define CRYPTO_ALGO_AES_GCM256  16  /* 256 bit GCM */
+#define CRYPTO_ALGO_BIP_CMAC256 17  /* 256 bit BIP CMAC */
+#define CRYPTO_ALGO_BIP_GMAC    18  /* 128 bit BIP GMAC */
+#define CRYPTO_ALGO_BIP_GMAC256 19  /* 256 bit BIP GMAC */
+
 
 /* wireless security bitvec */
 
@@ -232,6 +254,13 @@ static inline bool ac_bitmap_tst(u8 bitmap, int prec)
 #define WPA2_AUTH_PSK_SHA256	0x8000	/* PSK with SHA256 key derivation */
 
 #define WPA3_AUTH_SAE_PSK	0x40000	/* SAE with 4-way handshake */
+#define WPA3_AUTH_OWE		0x100000 /* OWE */
+#define WFA_AUTH_DPP		0x200000 /* WFA DPP AUTH */
+#define WPA3_AUTH_1X_SUITE_B_SHA384	0x400000 /* Suite B-192 SHA384 */
+
+
+#define WFA_OUI			"\x50\x6F\x9A"	/* WFA OUI */
+#define DPP_VER			0x1A	/* WFA DPP v1.0 */
 
 #define DOT11_DEFAULT_RTS_LEN		2347
 #define DOT11_DEFAULT_FRAG_LEN		2346
diff --git a/drivers/net/wireless/broadcom/brcm80211/include/chipcommon.h b/drivers/net/wireless/broadcom/brcm80211/include/chipcommon.h
index 0340bba968688f..5c3b8fb41194ae 100644
--- a/drivers/net/wireless/broadcom/brcm80211/include/chipcommon.h
+++ b/drivers/net/wireless/broadcom/brcm80211/include/chipcommon.h
@@ -302,6 +302,14 @@ struct chipcregs {
 #define PMU_RCTL_LOGIC_DISABLE_MASK         (1 << 27)
 
 
+/* watchdog */
+#define CC_WD_SSRESET_PCIE_F0_EN	0x10000000
+#define CC_WD_SSRESET_PCIE_F1_EN	0x20000000
+#define CC_WD_SSRESET_PCIE_F2_EN	0x40000000
+#define CC_WD_SSRESET_PCIE_ALL_FN_EN	0x80000000
+#define CC_WD_COUNTER_MASK		0x0fffffff
+#define CC_WD_ENABLE_MASK		0xf0000000
+
 /*
 * Maximum delay for the PMU state transition in us.
 * This is an upper bound intended for spinwaits etc.
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
index 8971aca41e63dc..c5f7b342cd8c87 100644
--- a/drivers/nvme/host/apple.c
+++ b/drivers/nvme/host/apple.c
@@ -195,8 +195,20 @@ struct apple_nvme {
 
 	int irq;
 	spinlock_t lock;
+
+	/*
+	 * Delayed cache flush handling state
+	 */
+	struct nvme_ns *flush_ns;
+	unsigned long flush_interval;
+	unsigned long last_flush;
+	struct delayed_work flush_dwork;
 };
 
+unsigned int flush_interval = 1000;
+module_param(flush_interval, uint, 0644);
+MODULE_PARM_DESC(flush_interval, "Grace period in msecs between flushes");
+
 static_assert(sizeof(struct nvme_command) == 64);
 static_assert(sizeof(struct apple_nvmmu_tcb) == 128);
 
@@ -221,7 +233,7 @@ static unsigned int apple_nvme_queue_depth(struct apple_nvme_queue *q)
 	return APPLE_ANS_MAX_QUEUE_DEPTH;
 }
 
-static void apple_nvme_rtkit_crashed(void *cookie)
+static void apple_nvme_rtkit_crashed(void *cookie, const void *crashlog, size_t crashlog_size)
 {
 	struct apple_nvme *anv = cookie;
 
@@ -730,6 +742,26 @@ static int apple_nvme_remove_sq(struct apple_nvme *anv)
 	return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0);
 }
 
+static bool apple_nvme_delayed_flush(struct apple_nvme *anv, struct nvme_ns *ns,
+				     struct request *req)
+{
+	if (!anv->flush_interval || req_op(req) != REQ_OP_FLUSH)
+		return false;
+	if (delayed_work_pending(&anv->flush_dwork))
+		return true;
+	if (time_before(jiffies, anv->last_flush + anv->flush_interval)) {
+		kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &anv->flush_dwork,
+						anv->flush_interval);
+		if (WARN_ON_ONCE(anv->flush_ns && anv->flush_ns != ns))
+			goto out;
+		anv->flush_ns = ns;
+		return true;
+	}
+out:
+	anv->last_flush = jiffies;
+	return false;
+}
+
 static blk_status_t apple_nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
 					const struct blk_mq_queue_data *bd)
 {
@@ -765,6 +797,12 @@ static blk_status_t apple_nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
 	}
 
 	nvme_start_request(req);
+
+	if (apple_nvme_delayed_flush(anv, ns, req)) {
+		blk_mq_complete_request(req);
+		return BLK_STS_OK;
+	}
+
 	apple_nvme_submit_cmd(q, cmnd);
 	return BLK_STS_OK;
 
@@ -1399,6 +1437,28 @@ static void devm_apple_nvme_mempool_destroy(void *data)
 	mempool_destroy(data);
 }
 
+static void apple_nvme_flush_work(struct work_struct *work)
+{
+	struct nvme_command c = { };
+	struct apple_nvme *anv;
+	struct nvme_ns *ns;
+	int err;
+
+	anv = container_of(work, struct apple_nvme, flush_dwork.work);
+	ns = anv->flush_ns;
+	if (WARN_ON_ONCE(!ns))
+		return;
+
+	c.common.opcode = nvme_cmd_flush;
+	c.common.nsid = cpu_to_le32(anv->flush_ns->head->ns_id);
+	err = nvme_submit_sync_cmd(ns->queue, &c, NULL, 0);
+	if (err) {
+		dev_err(anv->dev, "Deferred flush failed: %d\n", err);
+	} else {
+		anv->last_flush = jiffies;
+	}
+}
+
 static struct apple_nvme *apple_nvme_alloc(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
@@ -1554,6 +1614,14 @@ static int apple_nvme_probe(struct platform_device *pdev)
 		goto out_uninit_ctrl;
 	}
 
+	if (flush_interval) {
+		anv->flush_interval = msecs_to_jiffies(flush_interval);
+		anv->flush_ns = NULL;
+		anv->last_flush = jiffies - anv->flush_interval;
+	}
+
+	INIT_DELAYED_WORK(&anv->flush_dwork, apple_nvme_flush_work);
+
 	nvme_reset_ctrl(&anv->ctrl);
 	async_schedule(apple_nvme_async_probe, anv);
 
@@ -1591,6 +1659,7 @@ static void apple_nvme_shutdown(struct platform_device *pdev)
 {
 	struct apple_nvme *anv = platform_get_drvdata(pdev);
 
+	flush_delayed_work(&anv->flush_dwork);
 	apple_nvme_disable(anv, true);
 	if (apple_rtkit_is_running(anv->rtk)) {
 		apple_rtkit_shutdown(anv->rtk);
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index 8671b7c974b933..66c1d12ccdaac1 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -310,6 +310,19 @@ config NVMEM_SNVS_LPGPR
 	  This driver can also be built as a module. If so, the module
 	  will be called nvmem-snvs-lpgpr.
 
+config NVMEM_SPMI_MFD
+	tristate "Generic SPMI MFD NVMEM"
+	depends on MFD_SIMPLE_MFD_SPMI || COMPILE_TEST
+	default ARCH_APPLE
+	help
+	  Say y here to build a generic driver to expose an SPMI MFD device
+	  as a NVMEM provider. This can be used for PMIC/PMU devices which
+	  are used to store power and RTC-related settings on certain
+	  platforms, such as Apple Silicon Macs.
+
+	  This driver can also be built as a module. If so, the module
+	  will be called nvmem-spmi-mfd.
+
 config NVMEM_SPMI_SDAM
 	tristate "SPMI SDAM Support"
 	depends on SPMI
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index 5b77bbb6488bf8..2765f642c4b582 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -64,6 +64,8 @@ obj-$(CONFIG_NVMEM_SC27XX_EFUSE)	+= nvmem-sc27xx-efuse.o
 nvmem-sc27xx-efuse-y			:= sc27xx-efuse.o
 obj-$(CONFIG_NVMEM_SNVS_LPGPR)		+= nvmem_snvs_lpgpr.o
 nvmem_snvs_lpgpr-y			:= snvs_lpgpr.o
+obj-$(CONFIG_NVMEM_SPMI_MFD)		+= nvmem_spmi_mfd.o
+nvmem_spmi_mfd-y 			:= spmi-mfd-nvmem.o
 obj-$(CONFIG_NVMEM_SPMI_SDAM)		+= nvmem_qcom-spmi-sdam.o
 nvmem_qcom-spmi-sdam-y			+= qcom-spmi-sdam.o
 obj-$(CONFIG_NVMEM_SPRD_EFUSE)		+= nvmem_sprd_efuse.o
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index fff85bbf0ecd0f..0d877071f02c05 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -595,8 +595,8 @@ static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device *nvmem,
 	cell->np = info->np;
 
 	if (cell->nbits)
-		cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
-					   BITS_PER_BYTE);
+		cell->bytes = round_up(DIV_ROUND_UP(cell->nbits + cell->bit_offset,
+					   BITS_PER_BYTE), nvmem->word_size);
 
 	if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
 		dev_err(&nvmem->dev,
@@ -837,11 +837,6 @@ static int nvmem_add_cells_from_dt(struct nvmem_device *nvmem, struct device_nod
 		if (addr && len == (2 * sizeof(u32))) {
 			info.bit_offset = be32_to_cpup(addr++);
 			info.nbits = be32_to_cpup(addr);
-			if (info.bit_offset >= BITS_PER_BYTE || info.nbits < 1) {
-				dev_err(dev, "nvmem: invalid bits on %pOF\n", child);
-				of_node_put(child);
-				return -EINVAL;
-			}
 		}
 
 		info.np = of_node_get(child);
@@ -1630,15 +1625,23 @@ EXPORT_SYMBOL_GPL(nvmem_cell_put);
 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell_entry *cell, void *buf)
 {
 	u8 *p, *b;
-	int i, extra, bit_offset = cell->bit_offset;
+	int i, padding, extra, bit_offset = cell->bit_offset;
+	int bytes = cell->bytes;
 
 	p = b = buf;
 	if (bit_offset) {
+		padding = bit_offset/8;
+		if (padding) {
+		      memmove(buf, buf + padding, bytes - padding);
+		      bit_offset -= BITS_PER_BYTE * padding;
+		      bytes -= padding;
+		}
+
 		/* First shift */
 		*b++ >>= bit_offset;
 
 		/* setup rest of the bytes if any */
-		for (i = 1; i < cell->bytes; i++) {
+		for (i = 1; i < bytes; i++) {
 			/* Get bits from next byte and shift them towards msb */
 			*p |= *b << (BITS_PER_BYTE - bit_offset);
 
@@ -1651,7 +1654,7 @@ static void nvmem_shift_read_buffer_in_place(struct nvmem_cell_entry *cell, void
 	}
 
 	/* result fits in less bytes */
-	extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE);
+	extra = bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE);
 	while (--extra >= 0)
 		*p-- = 0;
 
diff --git a/drivers/nvmem/spmi-mfd-nvmem.c b/drivers/nvmem/spmi-mfd-nvmem.c
new file mode 100644
index 00000000000000..462f350640d1e4
--- /dev/null
+++ b/drivers/nvmem/spmi-mfd-nvmem.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Generic SPMI MFD NVMEM driver
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+struct spmi_mfd_nvmem {
+	struct regmap *regmap;
+	unsigned int base;
+};
+
+static int spmi_mfd_nvmem_read(void *priv, unsigned int offset,
+                               void *val, size_t bytes)
+{
+	struct spmi_mfd_nvmem *nvmem = priv;
+
+        return regmap_bulk_read(nvmem->regmap, nvmem->base + offset, val, bytes);
+}
+
+static int spmi_mfd_nvmem_write(void *priv, unsigned int offset,
+                                void *val, size_t bytes)
+{
+	struct spmi_mfd_nvmem *nvmem = priv;
+
+	return regmap_bulk_write(nvmem->regmap, nvmem->base + offset, val, bytes);
+}
+
+static int spmi_mfd_nvmem_probe(struct platform_device *pdev)
+{
+	struct spmi_mfd_nvmem *nvmem;
+	const __be32 *addr;
+	int len;
+	struct nvmem_config nvmem_cfg = {
+		.dev = &pdev->dev,
+		.name = "spmi_mfd_nvmem",
+		.id = NVMEM_DEVID_AUTO,
+		.word_size = 1,
+		.stride = 1,
+		.reg_read = spmi_mfd_nvmem_read,
+		.reg_write = spmi_mfd_nvmem_write,
+		.add_legacy_fixed_of_cells = true,
+	};
+
+	nvmem = devm_kzalloc(&pdev->dev, sizeof(*nvmem), GFP_KERNEL);
+	if (!nvmem)
+		return -ENOMEM;
+
+	nvmem_cfg.priv = nvmem;
+
+	nvmem->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!nvmem->regmap) {
+		dev_err(&pdev->dev, "Parent regmap unavailable.\n");
+		return -ENXIO;
+	}
+
+	addr = of_get_property(pdev->dev.of_node, "reg", &len);
+	if (!addr) {
+		dev_err(&pdev->dev, "no reg property\n");
+		return -EINVAL;
+	}
+	if (len != 2 * sizeof(u32)) {
+		dev_err(&pdev->dev, "invalid reg property\n");
+		return -EINVAL;
+	}
+
+	nvmem->base = be32_to_cpup(&addr[0]);
+	nvmem_cfg.size = be32_to_cpup(&addr[1]);
+
+	return PTR_ERR_OR_ZERO(devm_nvmem_register(&pdev->dev, &nvmem_cfg));
+}
+
+static const struct of_device_id spmi_mfd_nvmem_id_table[] = {
+	{ .compatible = "apple,spmi-pmu-nvmem" },
+	{ .compatible = "spmi-mfd-nvmem" },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, spmi_mfd_nvmem_id_table);
+
+static struct platform_driver spmi_mfd_nvmem_driver = {
+	.probe = spmi_mfd_nvmem_probe,
+	.driver = {
+		.name = "spmi-mfd-nvmem",
+		.of_match_table	= spmi_mfd_nvmem_id_table,
+	},
+};
+
+module_platform_driver(spmi_mfd_nvmem_driver);
+
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_AUTHOR("Hector Martin <marcan@marcan.st>");
+MODULE_DESCRIPTION("SPMI MFD NVMEM driver");
diff --git a/drivers/of/address.c b/drivers/of/address.c
index d177a2b9edaf8d..3081197c555f89 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -328,6 +328,15 @@ static int of_bus_default_flags_match(struct device_node *np)
 
 static int of_bus_default_match(struct device_node *np)
 {
+	/**
+	 * To avoid issues with "missing" '#{address,size}-cells' properties
+	 * in dcp/dcpext nodes while evaluatting the 'piodma' sub device.
+	 * Keep this at least until v6.13 + 2 to ensure fixed devicetrees are
+	 * deployed.
+	 */
+	if (of_device_is_compatible(np, "apple,dcp") ||
+		of_device_is_compatible(np, "apple,dcpext"))
+		return true;
 	/*
 	 * Check for presence first since of_bus_n_addr_cells() will warn when
 	 * walking parent nodes.
@@ -564,7 +573,7 @@ static u64 __of_translate_address(struct device_node *node,
 			return OF_BAD_ADDR;
 		pbus->count_cells(dev, &pna, &pns);
 		if (!OF_CHECK_COUNTS(pna, pns)) {
-			pr_err("Bad cell count for %pOF\n", dev);
+			pr_debug("Bad cell count for %pOF\n", dev);
 			return OF_BAD_ADDR;
 		}
 
diff --git a/drivers/of/base.c b/drivers/of/base.c
index af6c68bbb4277e..c068ed603db70d 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -89,6 +89,8 @@ static bool __of_node_is_type(const struct device_node *np, const char *type)
 
 #define EXCLUDED_DEFAULT_CELLS_PLATFORMS ( \
 	IS_ENABLED(CONFIG_SPARC) || \
+	of_find_compatible_node(NULL, NULL, "apple,dcp") || \
+	of_find_compatible_node(NULL, NULL, "apple,dcpext") || \
 	of_find_compatible_node(NULL, NULL, "coreboot") \
 )
 
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 9800b768105402..507e6ac5d65257 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -39,6 +39,7 @@ config PCIE_APPLE
 	depends on ARCH_APPLE || COMPILE_TEST
 	depends on OF
 	depends on PCI_MSI
+	depends on PAGE_SIZE_16KB || COMPILE_TEST
 	select PCI_HOST_COMMON
 	help
 	  Say Y here if you want to enable PCIe controller support on Apple
diff --git a/drivers/pci/controller/pci-host-common.c b/drivers/pci/controller/pci-host-common.c
index f441bfd6f96a8b..466a1e6a7ffcdc 100644
--- a/drivers/pci/controller/pci-host-common.c
+++ b/drivers/pci/controller/pci-host-common.c
@@ -49,23 +49,17 @@ static struct pci_config_window *gen_pci_init(struct device *dev,
 	return cfg;
 }
 
-int pci_host_common_probe(struct platform_device *pdev)
+int pci_host_common_init(struct platform_device *pdev,
+			 const struct pci_ecam_ops *ops)
 {
 	struct device *dev = &pdev->dev;
 	struct pci_host_bridge *bridge;
 	struct pci_config_window *cfg;
-	const struct pci_ecam_ops *ops;
-
-	ops = of_device_get_match_data(&pdev->dev);
-	if (!ops)
-		return -ENODEV;
 
 	bridge = devm_pci_alloc_host_bridge(dev, 0);
 	if (!bridge)
 		return -ENOMEM;
 
-	platform_set_drvdata(pdev, bridge);
-
 	of_pci_check_probe_only();
 
 	/* Parse and map our Configuration Space windows */
@@ -73,6 +67,8 @@ int pci_host_common_probe(struct platform_device *pdev)
 	if (IS_ERR(cfg))
 		return PTR_ERR(cfg);
 
+	platform_set_drvdata(pdev, bridge);
+
 	bridge->sysdata = cfg;
 	bridge->ops = (struct pci_ops *)&ops->pci_ops;
 	bridge->enable_device = ops->enable_device;
@@ -81,6 +77,18 @@ int pci_host_common_probe(struct platform_device *pdev)
 
 	return pci_host_probe(bridge);
 }
+EXPORT_SYMBOL_GPL(pci_host_common_init);
+
+int pci_host_common_probe(struct platform_device *pdev)
+{
+	const struct pci_ecam_ops *ops;
+
+	ops = of_device_get_match_data(&pdev->dev);
+	if (!ops)
+		return -ENODEV;
+
+	return pci_host_common_init(pdev, ops);
+}
 EXPORT_SYMBOL_GPL(pci_host_common_probe);
 
 void pci_host_common_remove(struct platform_device *pdev)
diff --git a/drivers/pci/controller/pcie-apple.c b/drivers/pci/controller/pcie-apple.c
index a7e51bc1c2fe8e..c0cbd25f71e030 100644
--- a/drivers/pci/controller/pcie-apple.c
+++ b/drivers/pci/controller/pcie-apple.c
@@ -18,6 +18,7 @@
  * Author: Marc Zyngier <maz@kernel.org>
  */
 
+#include <linux/bitfield.h>
 #include <linux/gpio/consumer.h>
 #include <linux/kernel.h>
 #include <linux/iopoll.h>
@@ -29,6 +30,11 @@
 #include <linux/of_irq.h>
 #include <linux/pci-ecam.h>
 
+static int link_up_timeout = 500;
+module_param(link_up_timeout, int, 0644);
+MODULE_PARM_DESC(link_up_timeout, "PCIe link training timeout in milliseconds");
+
+/* T8103 (original M1) and related SoCs */
 #define CORE_RC_PHYIF_CTL		0x00024
 #define   CORE_RC_PHYIF_CTL_RUN		BIT(0)
 #define CORE_RC_PHYIF_STAT		0x00028
@@ -39,14 +45,18 @@
 #define   CORE_RC_STAT_READY		BIT(0)
 #define CORE_FABRIC_STAT		0x04000
 #define   CORE_FABRIC_STAT_MASK		0x001F001F
-#define CORE_LANE_CFG(port)		(0x84000 + 0x4000 * (port))
-#define   CORE_LANE_CFG_REFCLK0REQ	BIT(0)
-#define   CORE_LANE_CFG_REFCLK1REQ	BIT(1)
-#define   CORE_LANE_CFG_REFCLK0ACK	BIT(2)
-#define   CORE_LANE_CFG_REFCLK1ACK	BIT(3)
-#define   CORE_LANE_CFG_REFCLKEN	(BIT(9) | BIT(10))
-#define CORE_LANE_CTL(port)		(0x84004 + 0x4000 * (port))
-#define   CORE_LANE_CTL_CFGACC		BIT(15)
+
+#define CORE_PHY_DEFAULT_BASE(port)	(0x84000 + 0x4000 * (port))
+
+#define PHY_LANE_CFG			0x00000
+#define   PHY_LANE_CFG_REFCLK0REQ	BIT(0)
+#define   PHY_LANE_CFG_REFCLK1REQ	BIT(1)
+#define   PHY_LANE_CFG_REFCLK0ACK	BIT(2)
+#define   PHY_LANE_CFG_REFCLK1ACK	BIT(3)
+#define   PHY_LANE_CFG_REFCLKEN		(BIT(9) | BIT(10))
+#define   PHY_LANE_CFG_REFCLKCGEN	(BIT(30) | BIT(31))
+#define PHY_LANE_CTL			0x00004
+#define   PHY_LANE_CTL_CFGACC		BIT(15)
 
 #define PORT_LTSSMCTL			0x00080
 #define   PORT_LTSSMCTL_START		BIT(0)
@@ -100,7 +110,7 @@
 #define   PORT_REFCLK_CGDIS		BIT(8)
 #define PORT_PERST			0x00814
 #define   PORT_PERST_OFF		BIT(0)
-#define PORT_RID2SID(i16)		(0x00828 + 4 * (i16))
+#define PORT_RID2SID			0x00828
 #define   PORT_RID2SID_VALID		BIT(31)
 #define   PORT_RID2SID_SID_SHIFT	16
 #define   PORT_RID2SID_BUS_SHIFT	8
@@ -118,7 +128,15 @@
 #define   PORT_TUNSTAT_PERST_ACK_PEND	BIT(1)
 #define PORT_PREFMEM_ENABLE		0x00994
 
-#define MAX_RID2SID			64
+/* T602x (M2-pro and co) */
+#define PORT_T602X_MSIADDR	0x016c
+#define PORT_T602X_MSIADDR_HI	0x0170
+#define PORT_T602X_PERST	0x082c
+#define PORT_T602X_RID2SID	0x3000
+#define PORT_T602X_MSIMAP	0x3800
+
+#define PORT_MSIMAP_ENABLE	BIT(31)
+#define PORT_MSIMAP_TARGET	GENMASK(7, 0)
 
 /*
  * The doorbell address is set to 0xfffff000, which by convention
@@ -129,10 +147,45 @@
  */
 #define DOORBELL_ADDR		CONFIG_PCIE_APPLE_MSI_DOORBELL_ADDR
 
+struct hw_info {
+	u32 phy_lane_ctl;
+	u32 port_msiaddr;
+	u32 port_msiaddr_hi;
+	u32 port_refclk;
+	u32 port_perst;
+	u32 port_rid2sid;
+	u32 port_msimap;
+	u32 max_rid2sid;
+};
+
+static const struct hw_info t8103_hw = {
+	.phy_lane_ctl		= PHY_LANE_CTL,
+	.port_msiaddr		= PORT_MSIADDR,
+	.port_msiaddr_hi	= 0,
+	.port_refclk		= PORT_REFCLK,
+	.port_perst		= PORT_PERST,
+	.port_rid2sid		= PORT_RID2SID,
+	.port_msimap		= 0,
+	.max_rid2sid		= 64,
+};
+
+static const struct hw_info t602x_hw = {
+	.phy_lane_ctl		= 0,
+	.port_msiaddr		= PORT_T602X_MSIADDR,
+	.port_msiaddr_hi	= PORT_T602X_MSIADDR_HI,
+	.port_refclk		= 0,
+	.port_perst		= PORT_T602X_PERST,
+	.port_rid2sid		= PORT_T602X_RID2SID,
+	.port_msimap		= PORT_T602X_MSIMAP,
+	/* 16 on t602x, guess for autodetect on future HW */
+	.max_rid2sid		= 512,
+};
+
 struct apple_pcie {
 	struct mutex		lock;
 	struct device		*dev;
 	void __iomem            *base;
+	const struct hw_info	*hw;
 	struct irq_domain	*domain;
 	unsigned long		*bitmap;
 	struct list_head	ports;
@@ -142,12 +195,14 @@ struct apple_pcie {
 };
 
 struct apple_pcie_port {
+	raw_spinlock_t		lock;
 	struct apple_pcie	*pcie;
 	struct device_node	*np;
 	void __iomem		*base;
+	void __iomem		*phy;
 	struct irq_domain	*domain;
 	struct list_head	entry;
-	DECLARE_BITMAP(sid_map, MAX_RID2SID);
+	unsigned long		*sid_map;
 	int			sid_map_sz;
 	int			idx;
 };
@@ -261,14 +316,16 @@ static void apple_port_irq_mask(struct irq_data *data)
 {
 	struct apple_pcie_port *port = irq_data_get_irq_chip_data(data);
 
-	writel_relaxed(BIT(data->hwirq), port->base + PORT_INTMSKSET);
+	guard(raw_spinlock_irqsave)(&port->lock);
+	rmw_set(BIT(data->hwirq), port->base + PORT_INTMSK);
 }
 
 static void apple_port_irq_unmask(struct irq_data *data)
 {
 	struct apple_pcie_port *port = irq_data_get_irq_chip_data(data);
 
-	writel_relaxed(BIT(data->hwirq), port->base + PORT_INTMSKCLR);
+	guard(raw_spinlock_irqsave)(&port->lock);
+	rmw_clear(BIT(data->hwirq), port->base + PORT_INTMSK);
 }
 
 static bool hwirq_is_intx(unsigned int hwirq)
@@ -372,7 +429,9 @@ static void apple_port_irq_handler(struct irq_desc *desc)
 static int apple_pcie_port_setup_irq(struct apple_pcie_port *port)
 {
 	struct fwnode_handle *fwnode = &port->np->fwnode;
+	struct apple_pcie *pcie = port->pcie;
 	unsigned int irq;
+	u32 val = 0;
 
 	/* FIXME: consider moving each interrupt under each port */
 	irq = irq_of_parse_and_map(to_of_node(dev_fwnode(port->pcie->dev)),
@@ -387,20 +446,31 @@ static int apple_pcie_port_setup_irq(struct apple_pcie_port *port)
 		return -ENOMEM;
 
 	/* Disable all interrupts */
-	writel_relaxed(~0, port->base + PORT_INTMSKSET);
+	writel_relaxed(~0, port->base + PORT_INTMSK);
 	writel_relaxed(~0, port->base + PORT_INTSTAT);
+	writel_relaxed(~0, port->base + PORT_LINKCMDSTS);
 
 	irq_set_chained_handler_and_data(irq, apple_port_irq_handler, port);
 
 	/* Configure MSI base address */
 	BUILD_BUG_ON(upper_32_bits(DOORBELL_ADDR));
-	writel_relaxed(lower_32_bits(DOORBELL_ADDR), port->base + PORT_MSIADDR);
+	writel_relaxed(lower_32_bits(DOORBELL_ADDR),
+		       port->base + pcie->hw->port_msiaddr);
+	if (pcie->hw->port_msiaddr_hi)
+		writel_relaxed(0, port->base + pcie->hw->port_msiaddr_hi);
 
 	/* Enable MSIs, shared between all ports */
-	writel_relaxed(0, port->base + PORT_MSIBASE);
-	writel_relaxed((ilog2(port->pcie->nvecs) << PORT_MSICFG_L2MSINUM_SHIFT) |
-		       PORT_MSICFG_EN, port->base + PORT_MSICFG);
+	if (pcie->hw->port_msimap) {
+		for (int i = 0; i < pcie->nvecs; i++)
+			writel_relaxed(FIELD_PREP(PORT_MSIMAP_TARGET, i) |
+				       PORT_MSIMAP_ENABLE,
+				       port->base + pcie->hw->port_msimap + 4 * i);
+	} else {
+		writel_relaxed(0, port->base + PORT_MSIBASE);
+		val = ilog2(pcie->nvecs) << PORT_MSICFG_L2MSINUM_SHIFT;
+	}
 
+	writel_relaxed(val | PORT_MSICFG_EN, port->base + PORT_MSICFG);
 	return 0;
 }
 
@@ -467,91 +537,101 @@ static int apple_pcie_setup_refclk(struct apple_pcie *pcie,
 	u32 stat;
 	int res;
 
-	res = readl_relaxed_poll_timeout(pcie->base + CORE_RC_PHYIF_STAT, stat,
-					 stat & CORE_RC_PHYIF_STAT_REFCLK,
-					 100, 50000);
-	if (res < 0)
-		return res;
+	if (pcie->hw->phy_lane_ctl)
+		rmw_set(PHY_LANE_CTL_CFGACC, port->phy + pcie->hw->phy_lane_ctl);
 
-	rmw_set(CORE_LANE_CTL_CFGACC, pcie->base + CORE_LANE_CTL(port->idx));
-	rmw_set(CORE_LANE_CFG_REFCLK0REQ, pcie->base + CORE_LANE_CFG(port->idx));
+	rmw_set(PHY_LANE_CFG_REFCLK0REQ, port->phy + PHY_LANE_CFG);
 
-	res = readl_relaxed_poll_timeout(pcie->base + CORE_LANE_CFG(port->idx),
-					 stat, stat & CORE_LANE_CFG_REFCLK0ACK,
+	res = readl_relaxed_poll_timeout(port->phy + PHY_LANE_CFG,
+					 stat, stat & PHY_LANE_CFG_REFCLK0ACK,
 					 100, 50000);
 	if (res < 0)
 		return res;
 
-	rmw_set(CORE_LANE_CFG_REFCLK1REQ, pcie->base + CORE_LANE_CFG(port->idx));
-	res = readl_relaxed_poll_timeout(pcie->base + CORE_LANE_CFG(port->idx),
-					 stat, stat & CORE_LANE_CFG_REFCLK1ACK,
+	rmw_set(PHY_LANE_CFG_REFCLK1REQ, port->phy + PHY_LANE_CFG);
+	res = readl_relaxed_poll_timeout(port->phy + PHY_LANE_CFG,
+					 stat, stat & PHY_LANE_CFG_REFCLK1ACK,
 					 100, 50000);
 
 	if (res < 0)
 		return res;
 
-	rmw_clear(CORE_LANE_CTL_CFGACC, pcie->base + CORE_LANE_CTL(port->idx));
+	if (pcie->hw->phy_lane_ctl)
+		rmw_clear(PHY_LANE_CTL_CFGACC, port->phy + pcie->hw->phy_lane_ctl);
+
+	rmw_set(PHY_LANE_CFG_REFCLKEN, port->phy + PHY_LANE_CFG);
 
-	rmw_set(CORE_LANE_CFG_REFCLKEN, pcie->base + CORE_LANE_CFG(port->idx));
-	rmw_set(PORT_REFCLK_EN, port->base + PORT_REFCLK);
+	if (pcie->hw->port_refclk)
+		rmw_set(PORT_REFCLK_EN, port->base + pcie->hw->port_refclk);
 
 	return 0;
 }
 
+static void __iomem *port_rid2sid_addr(struct apple_pcie_port *port, int idx)
+{
+	return port->base + port->pcie->hw->port_rid2sid + 4 * idx;
+}
+
 static u32 apple_pcie_rid2sid_write(struct apple_pcie_port *port,
 				    int idx, u32 val)
 {
-	writel_relaxed(val, port->base + PORT_RID2SID(idx));
+	writel_relaxed(val, port_rid2sid_addr(port, idx));
 	/* Read back to ensure completion of the write */
-	return readl_relaxed(port->base + PORT_RID2SID(idx));
+	return readl_relaxed(port_rid2sid_addr(port, idx));
 }
 
-static int apple_pcie_setup_port(struct apple_pcie *pcie,
+static int apple_pcie_setup_link(struct apple_pcie *pcie,
+				 struct apple_pcie_port *port,
 				 struct device_node *np)
 {
-	struct platform_device *platform = to_platform_device(pcie->dev);
-	struct apple_pcie_port *port;
-	struct gpio_desc *reset;
-	u32 stat, idx;
-	int ret, i;
+	struct gpio_desc *reset, *pwren = NULL;
+	u32 stat;
+	int ret;
 
+	/*
+	 * Assert PERST# and configure the pin as output.
+	 * The Aquantia AQC113 10GB nic used desktop macs is sensitive to
+	 * deasserting it without prior clock setup.
+	 * Observed on M1 Max/Ultra Mac Studios under m1n1's hypervisor.
+	 */
 	reset = devm_fwnode_gpiod_get(pcie->dev, of_fwnode_handle(np), "reset",
-				      GPIOD_OUT_LOW, "PERST#");
+				      GPIOD_OUT_HIGH, "PERST#");
 	if (IS_ERR(reset))
 		return PTR_ERR(reset);
 
-	port = devm_kzalloc(pcie->dev, sizeof(*port), GFP_KERNEL);
-	if (!port)
-		return -ENOMEM;
-
-	ret = of_property_read_u32_index(np, "reg", 0, &idx);
-	if (ret)
-		return ret;
-
-	/* Use the first reg entry to work out the port index */
-	port->idx = idx >> 11;
-	port->pcie = pcie;
-	port->np = np;
-
-	port->base = devm_platform_ioremap_resource(platform, port->idx + 2);
-	if (IS_ERR(port->base))
-		return PTR_ERR(port->base);
+	pwren = devm_fwnode_gpiod_get(pcie->dev, of_fwnode_handle(np), "pwren",
+					    GPIOD_ASIS, "PWREN");
+	if (IS_ERR(pwren)) {
+		if (PTR_ERR(pwren) == -ENOENT)
+			pwren = NULL;
+		else
+			return PTR_ERR(pwren);
+	}
 
 	rmw_set(PORT_APPCLK_EN, port->base + PORT_APPCLK);
 
 	/* Assert PERST# before setting up the clock */
-	gpiod_set_value(reset, 1);
+	gpiod_set_value_cansleep(reset, 1);
+
+	/* Power on the device if required */
+	gpiod_set_value_cansleep(pwren, 1);
 
 	ret = apple_pcie_setup_refclk(pcie, port);
 	if (ret < 0)
 		return ret;
 
-	/* The minimal Tperst-clk value is 100us (PCIe CEM r5.0, 2.9.2) */
-	usleep_range(100, 200);
+	/*
+	 * The minimal Tperst-clk value is 100us (PCIe CEM r5.0, 2.9.2)
+	 * If powering up, the minimal Tpvperl is 100ms
+	 */
+	if (pwren)
+		msleep(100);
+	else
+		usleep_range(100, 200);
 
 	/* Deassert PERST# */
-	rmw_set(PORT_PERST_OFF, port->base + PORT_PERST);
-	gpiod_set_value(reset, 0);
+	rmw_set(PORT_PERST_OFF, port->base + pcie->hw->port_perst);
+	gpiod_set_value_cansleep(reset, 0);
 
 	/* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */
 	msleep(100);
@@ -563,7 +643,67 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
 		return ret;
 	}
 
-	rmw_clear(PORT_REFCLK_CGDIS, port->base + PORT_REFCLK);
+	return 0;
+}
+
+static int apple_pcie_setup_port(struct apple_pcie *pcie,
+				 struct device_node *np)
+{
+	struct platform_device *platform = to_platform_device(pcie->dev);
+	struct apple_pcie_port *port;
+	struct resource *res;
+	char name[16];
+	u32 link_stat, idx;
+	int ret, i;
+
+	port = devm_kzalloc(pcie->dev, sizeof(*port), GFP_KERNEL);
+	if (!port)
+		return -ENOMEM;
+
+	port->sid_map = devm_bitmap_zalloc(pcie->dev, pcie->hw->max_rid2sid, GFP_KERNEL);
+	if (!port->sid_map)
+		return -ENOMEM;
+
+	ret = of_property_read_u32_index(np, "reg", 0, &idx);
+	if (ret)
+		return ret;
+
+	/* Use the first reg entry to work out the port index */
+	port->idx = idx >> 11;
+	port->pcie = pcie;
+	port->np = np;
+
+	raw_spin_lock_init(&port->lock);
+
+	snprintf(name, sizeof(name), "port%d", port->idx);
+	res = platform_get_resource_byname(platform, IORESOURCE_MEM, name);
+	if (!res)
+		res = platform_get_resource(platform, IORESOURCE_MEM, port->idx + 2);
+
+	port->base = devm_ioremap_resource(&platform->dev, res);
+	if (IS_ERR(port->base))
+		return PTR_ERR(port->base);
+
+	snprintf(name, sizeof(name), "phy%d", port->idx);
+	res = platform_get_resource_byname(platform, IORESOURCE_MEM, name);
+	if (res)
+		port->phy = devm_ioremap_resource(&platform->dev, res);
+	else
+		port->phy = pcie->base + CORE_PHY_DEFAULT_BASE(port->idx);
+
+	/* link might be already brought up by u-boot, skip setup then */
+	link_stat = readl_relaxed(port->base + PORT_LINKSTS);
+	if (!(link_stat & PORT_LINKSTS_UP)) {
+		ret = apple_pcie_setup_link(pcie, port, np);
+		if (ret)
+			return ret;
+	}
+
+	if (pcie->hw->port_refclk)
+		rmw_clear(PORT_REFCLK_CGDIS, port->base + pcie->hw->port_refclk);
+	else
+		rmw_set(PHY_LANE_CFG_REFCLKCGEN, port->phy + PHY_LANE_CFG);
+
 	rmw_clear(PORT_APPCLK_CGDIS, port->base + PORT_APPCLK);
 
 	ret = apple_pcie_port_setup_irq(port);
@@ -571,7 +711,7 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
 		return ret;
 
 	/* Reset all RID/SID mappings, and check for RAZ/WI registers */
-	for (i = 0; i < MAX_RID2SID; i++) {
+	for (i = 0; i < pcie->hw->max_rid2sid; i++) {
 		if (apple_pcie_rid2sid_write(port, i, 0xbad1d) != 0xbad1d)
 			break;
 		apple_pcie_rid2sid_write(port, i, 0);
@@ -584,13 +724,27 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
 	list_add_tail(&port->entry, &pcie->ports);
 	init_completion(&pcie->event);
 
+	/* In the success path, we keep a reference to np around */
+	of_node_get(np);
+
 	ret = apple_pcie_port_register_irqs(port);
 	WARN_ON(ret);
 
-	writel_relaxed(PORT_LTSSMCTL_START, port->base + PORT_LTSSMCTL);
+	link_stat = readl_relaxed(port->base + PORT_LINKSTS);
+	if (!(link_stat & PORT_LINKSTS_UP)) {
+		unsigned long timeout, left;
+		/* start link training */
+		writel_relaxed(PORT_LTSSMCTL_START, port->base + PORT_LTSSMCTL);
 
-	if (!wait_for_completion_timeout(&pcie->event, HZ / 10))
-		dev_warn(pcie->dev, "%pOF link didn't come up\n", np);
+		timeout = link_up_timeout * HZ / 1000;
+		left = wait_for_completion_timeout(&pcie->event, timeout);
+		if (!left)
+			dev_warn(pcie->dev, "%pOF link didn't come up\n", np);
+		else
+			dev_info(pcie->dev, "%pOF link up after %ldms\n", np,
+				 (timeout - left) * 1000 / HZ);
+
+	}
 
 	return 0;
 }
@@ -716,7 +870,7 @@ static void apple_pcie_disable_device(struct pci_host_bridge *bridge, struct pci
 	for_each_set_bit(idx, port->sid_map, port->sid_map_sz) {
 		u32 val;
 
-		val = readl_relaxed(port->base + PORT_RID2SID(idx));
+		val = readl_relaxed(port_rid2sid_addr(port, idx));
 		if ((val & 0xffff) == rid) {
 			apple_pcie_rid2sid_write(port, idx, 0);
 			bitmap_release_region(port->sid_map, idx, 0);
@@ -730,35 +884,15 @@ static void apple_pcie_disable_device(struct pci_host_bridge *bridge, struct pci
 
 static int apple_pcie_init(struct pci_config_window *cfg)
 {
+	struct apple_pcie *pcie = cfg->priv;
 	struct device *dev = cfg->parent;
-	struct platform_device *platform = to_platform_device(dev);
 	struct device_node *of_port;
-	struct apple_pcie *pcie;
 	int ret;
 
-	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
-	if (!pcie)
-		return -ENOMEM;
-
-	pcie->dev = dev;
-
-	mutex_init(&pcie->lock);
-
-	pcie->base = devm_platform_ioremap_resource(platform, 1);
-	if (IS_ERR(pcie->base))
-		return PTR_ERR(pcie->base);
-
-	cfg->priv = pcie;
-	INIT_LIST_HEAD(&pcie->ports);
-
-	ret = apple_msi_init(pcie);
-	if (ret)
-		return ret;
-
-	for_each_child_of_node(dev->of_node, of_port) {
+	for_each_available_child_of_node(dev->of_node, of_port) {
 		ret = apple_pcie_setup_port(pcie, of_port);
 		if (ret) {
-			dev_err(pcie->dev, "Port %pOF setup fail: %d\n", of_port, ret);
+			dev_err(dev, "Port %pOF setup fail: %d\n", of_port, ret);
 			of_node_put(of_port);
 			return ret;
 		}
@@ -778,14 +912,78 @@ static const struct pci_ecam_ops apple_pcie_cfg_ecam_ops = {
 	}
 };
 
+static int apple_pcie_probe_port(struct device_node *np)
+{
+	struct gpio_desc *gd;
+
+	/* check whether the GPPIO pin exists but leave it as is */
+	gd = fwnode_gpiod_get_index(of_fwnode_handle(np), "reset", 0,
+				    GPIOD_ASIS, "PERST#");
+	if (IS_ERR(gd))
+		return PTR_ERR(gd);
+
+	gpiod_put(gd);
+
+	gd = fwnode_gpiod_get_index(of_fwnode_handle(np), "pwren", 0,
+				    GPIOD_ASIS, "PWREN");
+	if (IS_ERR(gd)) {
+		if (PTR_ERR(gd) != -ENOENT)
+			return PTR_ERR(gd);
+	} else {
+		gpiod_put(gd);
+	}
+
+	return 0;
+}
+
+static int apple_pcie_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *of_port;
+	struct apple_pcie *pcie;
+	int ret;
+
+	/* Check for probe dependencies for all ports first */
+	for_each_available_child_of_node(dev->of_node, of_port) {
+		ret = apple_pcie_probe_port(of_port);
+		if (ret) {
+			of_node_put(of_port);
+			return dev_err_probe(dev, ret, "Port %pOF probe fail\n", of_port);
+		}
+	}
+
+	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+	if (!pcie)
+		return -ENOMEM;
+
+	pcie->dev = dev;
+	pcie->hw = of_device_get_match_data(dev);
+	if (!pcie->hw)
+		return -ENODEV;
+	pcie->base = devm_platform_ioremap_resource(pdev, 1);
+	if (IS_ERR(pcie->base))
+		return PTR_ERR(pcie->base);
+
+	mutex_init(&pcie->lock);
+	INIT_LIST_HEAD(&pcie->ports);
+	dev_set_drvdata(dev, pcie);
+
+	ret = apple_msi_init(pcie);
+	if (ret)
+		return ret;
+
+	return pci_host_common_init(pdev, &apple_pcie_cfg_ecam_ops);
+}
+
 static const struct of_device_id apple_pcie_of_match[] = {
-	{ .compatible = "apple,pcie", .data = &apple_pcie_cfg_ecam_ops },
+	{ .compatible = "apple,t6020-pcie",	.data = &t602x_hw },
+	{ .compatible = "apple,pcie",		.data = &t8103_hw },
 	{ }
 };
 MODULE_DEVICE_TABLE(of, apple_pcie_of_match);
 
 static struct platform_driver apple_pcie_driver = {
-	.probe	= pci_host_common_probe,
+	.probe	= apple_pcie_probe,
 	.driver	= {
 		.name			= "pcie-apple",
 		.of_match_table		= apple_pcie_of_match,
diff --git a/drivers/pci/ecam.c b/drivers/pci/ecam.c
index 260b7de2dbd578..2c5e6446e00eed 100644
--- a/drivers/pci/ecam.c
+++ b/drivers/pci/ecam.c
@@ -84,6 +84,8 @@ struct pci_config_window *pci_ecam_create(struct device *dev,
 			goto err_exit_iomap;
 	}
 
+	cfg->priv = dev_get_drvdata(dev);
+
 	if (ops->init) {
 		err = ops->init(cfg);
 		if (err)
diff --git a/drivers/perf/apple_m1_cpu_pmu.c b/drivers/perf/apple_m1_cpu_pmu.c
index 06fd317529fcba..6be703619a977e 100644
--- a/drivers/perf/apple_m1_cpu_pmu.c
+++ b/drivers/perf/apple_m1_cpu_pmu.c
@@ -12,6 +12,7 @@
 
 #include <linux/of.h>
 #include <linux/perf/arm_pmu.h>
+#include <linux/perf/arm_pmuv3.h>
 #include <linux/platform_device.h>
 
 #include <asm/apple_m1_pmu.h>
@@ -120,6 +121,8 @@ enum m1_pmu_events {
 	 */
 	M1_PMU_CFG_COUNT_USER					= BIT(8),
 	M1_PMU_CFG_COUNT_KERNEL					= BIT(9),
+	M1_PMU_CFG_COUNT_HOST					= BIT(10),
+	M1_PMU_CFG_COUNT_GUEST					= BIT(11),
 };
 
 /*
@@ -172,6 +175,17 @@ static const unsigned m1_pmu_perf_map[PERF_COUNT_HW_MAX] = {
 	[PERF_COUNT_HW_BRANCH_MISSES]		= M1_PMU_PERFCTR_BRANCH_MISPRED_NONSPEC,
 };
 
+#define M1_PMUV3_EVENT_MAP(pmuv3_event, m1_event)				\
+	[ARMV8_PMUV3_PERFCTR_##pmuv3_event]	= M1_PMU_PERFCTR_##m1_event
+
+static const u16 m1_pmu_pmceid_map[ARMV8_PMUV3_MAX_COMMON_EVENTS] = {
+	[0 ... ARMV8_PMUV3_MAX_COMMON_EVENTS - 1]	= HW_OP_UNSUPPORTED,
+	M1_PMUV3_EVENT_MAP(INST_RETIRED,	INST_ALL),
+	M1_PMUV3_EVENT_MAP(CPU_CYCLES,		CORE_ACTIVE_CYCLE),
+	M1_PMUV3_EVENT_MAP(BR_RETIRED,		INST_BRANCH),
+	M1_PMUV3_EVENT_MAP(BR_MIS_PRED_RETIRED,	BRANCH_MISPRED_NONSPEC),
+};
+
 /* sysfs definitions */
 static ssize_t m1_pmu_events_sysfs_show(struct device *dev,
 					struct device_attribute *attr,
@@ -327,11 +341,10 @@ static void m1_pmu_disable_counter_interrupt(unsigned int index)
 	__m1_pmu_enable_counter_interrupt(index, false);
 }
 
-static void m1_pmu_configure_counter(unsigned int index, u8 event,
-				     bool user, bool kernel)
+static void __m1_pmu_configure_event_filter(unsigned int index, bool user,
+					    bool kernel, bool host)
 {
-	u64 val, user_bit, kernel_bit;
-	int shift;
+	u64 clear, set, user_bit, kernel_bit;
 
 	switch (index) {
 	case 0 ... 7:
@@ -346,19 +359,27 @@ static void m1_pmu_configure_counter(unsigned int index, u8 event,
 		BUG();
 	}
 
-	val = read_sysreg_s(SYS_IMP_APL_PMCR1_EL1);
-
+	clear = set = 0;
 	if (user)
-		val |= user_bit;
+		set |= user_bit;
 	else
-		val &= ~user_bit;
+		clear |= user_bit;
 
 	if (kernel)
-		val |= kernel_bit;
+		set |= kernel_bit;
 	else
-		val &= ~kernel_bit;
+		clear |= kernel_bit;
+
+	if (host)
+		sysreg_clear_set_s(SYS_IMP_APL_PMCR1_EL1, clear, set);
+	else if (is_kernel_in_hyp_mode())
+		sysreg_clear_set_s(SYS_IMP_APL_PMCR1_EL12, clear, set);
+}
 
-	write_sysreg_s(val, SYS_IMP_APL_PMCR1_EL1);
+static void __m1_pmu_configure_eventsel(unsigned int index, u8 event)
+{
+	u64 clear = 0, set = 0;
+	int shift;
 
 	/*
 	 * Counters 0 and 1 have fixed events. For anything else,
@@ -371,21 +392,32 @@ static void m1_pmu_configure_counter(unsigned int index, u8 event,
 		break;
 	case 2 ... 5:
 		shift = (index - 2) * 8;
-		val = read_sysreg_s(SYS_IMP_APL_PMESR0_EL1);
-		val &= ~((u64)0xff << shift);
-		val |= (u64)event << shift;
-		write_sysreg_s(val, SYS_IMP_APL_PMESR0_EL1);
+		clear |= (u64)0xff << shift;
+		set |= (u64)event << shift;
+		sysreg_clear_set_s(SYS_IMP_APL_PMESR0_EL1, clear, set);
 		break;
 	case 6 ... 9:
 		shift = (index - 6) * 8;
-		val = read_sysreg_s(SYS_IMP_APL_PMESR1_EL1);
-		val &= ~((u64)0xff << shift);
-		val |= (u64)event << shift;
-		write_sysreg_s(val, SYS_IMP_APL_PMESR1_EL1);
+		clear |= (u64)0xff << shift;
+		set |= (u64)event << shift;
+		sysreg_clear_set_s(SYS_IMP_APL_PMESR1_EL1, clear, set);
 		break;
 	}
 }
 
+static void m1_pmu_configure_counter(unsigned int index, unsigned long config_base)
+{
+	bool kernel = config_base & M1_PMU_CFG_COUNT_KERNEL;
+	bool guest = config_base & M1_PMU_CFG_COUNT_GUEST;
+	bool host = config_base & M1_PMU_CFG_COUNT_HOST;
+	bool user = config_base & M1_PMU_CFG_COUNT_USER;
+	u8 evt = config_base & M1_PMU_CFG_EVENT;
+
+	__m1_pmu_configure_event_filter(index, user && host, kernel && host, true);
+	__m1_pmu_configure_event_filter(index, user && guest, kernel && guest, false);
+	__m1_pmu_configure_eventsel(index, evt);
+}
+
 /* arm_pmu backend */
 static void m1_pmu_enable_event(struct perf_event *event)
 {
@@ -400,7 +432,7 @@ static void m1_pmu_enable_event(struct perf_event *event)
 	m1_pmu_disable_counter(event->hw.idx);
 	isb();
 
-	m1_pmu_configure_counter(event->hw.idx, evt, user, kernel);
+	m1_pmu_configure_counter(event->hw.idx, event->hw.config_base);
 	m1_pmu_enable_counter(event->hw.idx);
 	m1_pmu_enable_counter_interrupt(event->hw.idx);
 	isb();
@@ -538,6 +570,26 @@ static int m2_pmu_map_event(struct perf_event *event)
 	return armpmu_map_event(event, &m1_pmu_perf_map, NULL, M1_PMU_CFG_EVENT);
 }
 
+static int m1_pmu_map_pmuv3_event(unsigned int eventsel)
+{
+	u16 m1_event = HW_OP_UNSUPPORTED;
+
+	if (eventsel < ARMV8_PMUV3_MAX_COMMON_EVENTS)
+		m1_event = m1_pmu_pmceid_map[eventsel];
+
+	return m1_event == HW_OP_UNSUPPORTED ? -EOPNOTSUPP : m1_event;
+}
+
+static void m1_pmu_init_pmceid(struct arm_pmu *pmu)
+{
+	unsigned int event;
+
+	for (event = 0; event < ARMV8_PMUV3_MAX_COMMON_EVENTS; event++) {
+		if (m1_pmu_map_pmuv3_event(event) >= 0)
+			set_bit(event, pmu->pmceid_bitmap);
+	}
+}
+
 static void m1_pmu_reset(void *info)
 {
 	int i;
@@ -558,7 +610,7 @@ static int m1_pmu_set_event_filter(struct hw_perf_event *event,
 {
 	unsigned long config_base = 0;
 
-	if (!attr->exclude_guest) {
+	if (!attr->exclude_guest && !is_kernel_in_hyp_mode()) {
 		pr_debug("ARM performance counters do not support mode exclusion\n");
 		return -EOPNOTSUPP;
 	}
@@ -566,6 +618,10 @@ static int m1_pmu_set_event_filter(struct hw_perf_event *event,
 		config_base |= M1_PMU_CFG_COUNT_KERNEL;
 	if (!attr->exclude_user)
 		config_base |= M1_PMU_CFG_COUNT_USER;
+	if (!attr->exclude_host)
+		config_base |= M1_PMU_CFG_COUNT_HOST;
+	if (!attr->exclude_guest)
+		config_base |= M1_PMU_CFG_COUNT_GUEST;
 
 	event->config_base = config_base;
 
@@ -594,6 +650,9 @@ static int m1_pmu_init(struct arm_pmu *cpu_pmu, u32 flags)
 	cpu_pmu->reset		  = m1_pmu_reset;
 	cpu_pmu->set_event_filter = m1_pmu_set_event_filter;
 
+	cpu_pmu->map_pmuv3_event  = m1_pmu_map_pmuv3_event;
+	m1_pmu_init_pmceid(cpu_pmu);
+
 	bitmap_set(cpu_pmu->cntr_mask, 0, M1_PMU_NR_COUNTERS);
 	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = &m1_pmu_events_attr_group;
 	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = &m1_pmu_format_attr_group;
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 8d58efe998ec5f..d93d50a114ebf6 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -95,6 +95,7 @@ config PHY_NXP_PTN3222
 
 source "drivers/phy/allwinner/Kconfig"
 source "drivers/phy/amlogic/Kconfig"
+source "drivers/phy/apple/Kconfig"
 source "drivers/phy/broadcom/Kconfig"
 source "drivers/phy/cadence/Kconfig"
 source "drivers/phy/freescale/Kconfig"
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index e281442acc7528..be7e3b40bd7abc 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_PHY_AIROHA_PCIE)		+= phy-airoha-pcie.o
 obj-$(CONFIG_PHY_NXP_PTN3222)		+= phy-nxp-ptn3222.o
 obj-y					+= allwinner/	\
 					   amlogic/	\
+					   apple/	\
 					   broadcom/	\
 					   cadence/	\
 					   freescale/	\
diff --git a/drivers/phy/apple/Kconfig b/drivers/phy/apple/Kconfig
new file mode 100644
index 00000000000000..66f251e6eda70e
--- /dev/null
+++ b/drivers/phy/apple/Kconfig
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+config PHY_APPLE_ATC
+	tristate "Apple Type-C PHY"
+	depends on ARCH_APPLE || COMPILE_TEST
+	default ARCH_APPLE
+	select GENERIC_PHY
+	depends on USB_SUPPORT
+	depends on TYPEC
+	help
+	  Enable this to add support for the Apple Type-C PHY, switch
+	  and mux found in Apple SoCs such as the M1.
+	  This driver currently provides support for USB2 and USB3.
+
+config PHY_APPLE_DPTX
+	tristate "Apple DPTX PHY"
+	depends on ARCH_APPLE || COMPILE_TEST
+	default ARCH_APPLE
+	select GENERIC_PHY
+	help
+	  Enable this to add support for the Apple DPTX PHY found on Apple SoCs
+	  such as the M2.
+	  This driver provides support for DisplayPort and is used on the
+	  Mac mini (M2, 2023).
diff --git a/drivers/phy/apple/Makefile b/drivers/phy/apple/Makefile
new file mode 100644
index 00000000000000..f8900fef11610b
--- /dev/null
+++ b/drivers/phy/apple/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+CFLAGS_trace.o			:= -I$(src)
+
+obj-$(CONFIG_PHY_APPLE_ATC)		+= phy-apple-atc.o
+phy-apple-atc-y			:= atc.o
+phy-apple-atc-$(CONFIG_TRACING)	+= trace.o
+
+obj-$(CONFIG_PHY_APPLE_DPTX)	+= phy-apple-dptx.o
+phy-apple-dptx-y		+= dptx.o
diff --git a/drivers/phy/apple/atc.c b/drivers/phy/apple/atc.c
new file mode 100644
index 00000000000000..f1f633e023bc83
--- /dev/null
+++ b/drivers/phy/apple/atc.c
@@ -0,0 +1,2510 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * Apple Type-C PHY driver
+ *
+ * Copyright (C) The Asahi Linux Contributors
+ * Author: Sven Peter <sven@svenpeter.dev>
+ */
+
+#include "atc.h"
+#include "trace.h"
+
+#include <asm-generic/errno.h>
+#include <dt-bindings/phy/phy.h>
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/types.h>
+#include <linux/usb/typec.h>
+#include <linux/usb/typec_altmode.h>
+#include <linux/usb/typec_dp.h>
+#include <linux/usb/typec_mux.h>
+#include <linux/usb/typec_tbt.h>
+
+#define rcdev_to_apple_atcphy(_rcdev) \
+	container_of(_rcdev, struct apple_atcphy, rcdev)
+
+#define AUSPLL_APB_CMD_OVERRIDE 0x2000
+#define AUSPLL_APB_CMD_OVERRIDE_REQ BIT(0)
+#define AUSPLL_APB_CMD_OVERRIDE_ACK BIT(1)
+#define AUSPLL_APB_CMD_OVERRIDE_UNK28 BIT(28)
+#define AUSPLL_APB_CMD_OVERRIDE_CMD GENMASK(27, 3)
+
+#define AUSPLL_FREQ_DESC_A 0x2080
+#define AUSPLL_FD_FREQ_COUNT_TARGET GENMASK(9, 0)
+#define AUSPLL_FD_FBDIVN_HALF BIT(10)
+#define AUSPLL_FD_REV_DIVN GENMASK(13, 11)
+#define AUSPLL_FD_KI_MAN GENMASK(17, 14)
+#define AUSPLL_FD_KI_EXP GENMASK(21, 18)
+#define AUSPLL_FD_KP_MAN GENMASK(25, 22)
+#define AUSPLL_FD_KP_EXP GENMASK(29, 26)
+#define AUSPLL_FD_KPKI_SCALE_HBW GENMASK(31, 30)
+
+#define AUSPLL_FREQ_DESC_B 0x2084
+#define AUSPLL_FD_FBDIVN_FRAC_DEN GENMASK(13, 0)
+#define AUSPLL_FD_FBDIVN_FRAC_NUM GENMASK(27, 14)
+
+#define AUSPLL_FREQ_DESC_C 0x2088
+#define AUSPLL_FD_SDM_SSC_STEP GENMASK(7, 0)
+#define AUSPLL_FD_SDM_SSC_EN BIT(8)
+#define AUSPLL_FD_PCLK_DIV_SEL GENMASK(13, 9)
+#define AUSPLL_FD_LFSDM_DIV GENMASK(15, 14)
+#define AUSPLL_FD_LFCLK_CTRL GENMASK(19, 16)
+#define AUSPLL_FD_VCLK_OP_DIVN GENMASK(21, 20)
+#define AUSPLL_FD_VCLK_PRE_DIVN BIT(22)
+
+#define AUSPLL_DCO_EFUSE_SPARE 0x222c
+#define AUSPLL_RODCO_ENCAP_EFUSE GENMASK(10, 9)
+#define AUSPLL_RODCO_BIAS_ADJUST_EFUSE GENMASK(14, 12)
+
+#define AUSPLL_FRACN_CAN 0x22a4
+#define AUSPLL_DLL_START_CAPCODE GENMASK(18, 17)
+
+#define AUSPLL_CLKOUT_MASTER 0x2200
+#define AUSPLL_CLKOUT_MASTER_PCLK_DRVR_EN BIT(2)
+#define AUSPLL_CLKOUT_MASTER_PCLK2_DRVR_EN BIT(4)
+#define AUSPLL_CLKOUT_MASTER_REFBUFCLK_DRVR_EN BIT(6)
+
+#define AUSPLL_CLKOUT_DIV 0x2208
+#define AUSPLL_CLKOUT_PLLA_REFBUFCLK_DI GENMASK(20, 16)
+
+#define AUSPLL_BGR 0x2214
+#define AUSPLL_BGR_CTRL_AVAIL BIT(0)
+
+#define AUSPLL_CLKOUT_DTC_VREG 0x2220
+#define AUSPLL_DTC_VREG_ADJUST GENMASK(16, 14)
+#define AUSPLL_DTC_VREG_BYPASS BIT(7)
+
+#define AUSPLL_FREQ_CFG 0x2224
+#define AUSPLL_FREQ_REFCLK GENMASK(1, 0)
+
+#define AUS_COMMON_SHIM_BLK_VREG 0x0a04
+#define AUS_VREG_TRIM GENMASK(6, 2)
+
+#define CIO3PLL_CLK_CTRL 0x2a00
+#define CIO3PLL_CLK_PCLK_EN BIT(1)
+#define CIO3PLL_CLK_REFCLK_EN BIT(5)
+
+#define CIO3PLL_DCO_NCTRL 0x2a38
+#define CIO3PLL_DCO_COARSEBIN_EFUSE0 GENMASK(6, 0)
+#define CIO3PLL_DCO_COARSEBIN_EFUSE1 GENMASK(23, 17)
+
+#define CIO3PLL_FRACN_CAN 0x2aa4
+#define CIO3PLL_DLL_CAL_START_CAPCODE GENMASK(18, 17)
+
+#define CIO3PLL_DTC_VREG 0x2a20
+#define CIO3PLL_DTC_VREG_ADJUST GENMASK(16, 14)
+
+#define ACIOPHY_CROSSBAR 0x4c
+#define ACIOPHY_CROSSBAR_PROTOCOL GENMASK(4, 0)
+#define ACIOPHY_CROSSBAR_PROTOCOL_USB4 0x0
+#define ACIOPHY_CROSSBAR_PROTOCOL_USB4_SWAPPED 0x1
+#define ACIOPHY_CROSSBAR_PROTOCOL_USB3 0xa
+#define ACIOPHY_CROSSBAR_PROTOCOL_USB3_SWAPPED 0xb
+#define ACIOPHY_CROSSBAR_PROTOCOL_USB3_DP 0x10
+#define ACIOPHY_CROSSBAR_PROTOCOL_USB3_DP_SWAPPED 0x11
+#define ACIOPHY_CROSSBAR_PROTOCOL_DP 0x14
+#define ACIOPHY_CROSSBAR_DP_SINGLE_PMA GENMASK(16, 5)
+#define ACIOPHY_CROSSBAR_DP_SINGLE_PMA_NONE 0x0000
+#define ACIOPHY_CROSSBAR_DP_SINGLE_PMA_UNK100 0x100
+#define ACIOPHY_CROSSBAR_DP_SINGLE_PMA_UNK008 0x008
+#define ACIOPHY_CROSSBAR_DP_BOTH_PMA BIT(17)
+
+#define ACIOPHY_LANE_MODE 0x48
+#define ACIOPHY_LANE_MODE_RX0 GENMASK(2, 0)
+#define ACIOPHY_LANE_MODE_TX0 GENMASK(5, 3)
+#define ACIOPHY_LANE_MODE_RX1 GENMASK(8, 6)
+#define ACIOPHY_LANE_MODE_TX1 GENMASK(11, 9)
+#define ACIOPHY_LANE_MODE_USB4 0
+#define ACIOPHY_LANE_MODE_USB3 1
+#define ACIOPHY_LANE_MODE_DP 2
+#define ACIOPHY_LANE_MODE_OFF 3
+
+#define ACIOPHY_TOP_BIST_CIOPHY_CFG1 0x84
+#define ACIOPHY_TOP_BIST_CIOPHY_CFG1_CLK_EN BIT(27)
+#define ACIOPHY_TOP_BIST_CIOPHY_CFG1_BIST_EN BIT(28)
+
+#define ACIOPHY_TOP_BIST_OV_CFG 0x8c
+#define ACIOPHY_TOP_BIST_OV_CFG_LN0_RESET_N_OV BIT(13)
+#define ACIOPHY_TOP_BIST_OV_CFG_LN0_PWR_DOWN_OV BIT(25)
+
+#define ACIOPHY_TOP_BIST_READ_CTRL 0x90
+#define ACIOPHY_TOP_BIST_READ_CTRL_LN0_PHY_STATUS_RE BIT(2)
+
+#define ACIOPHY_TOP_PHY_STAT 0x9c
+#define ACIOPHY_TOP_PHY_STAT_LN0_UNK0 BIT(0)
+#define ACIOPHY_TOP_PHY_STAT_LN0_UNK23 BIT(23)
+
+#define ACIOPHY_TOP_BIST_PHY_CFG0 0xa8
+#define ACIOPHY_TOP_BIST_PHY_CFG0_LN0_RESET_N BIT(0)
+
+#define ACIOPHY_TOP_BIST_PHY_CFG1 0xac
+#define ACIOPHY_TOP_BIST_PHY_CFG1_LN0_PWR_DOWN GENMASK(13, 10)
+
+#define ACIOPHY_PLL_COMMON_CTRL 0x1028
+#define ACIOPHY_PLL_WAIT_FOR_CMN_READY_BEFORE_RESET_EXIT BIT(24)
+
+#define ATCPHY_POWER_CTRL 0x20000
+#define ATCPHY_POWER_STAT 0x20004
+#define ATCPHY_POWER_SLEEP_SMALL BIT(0)
+#define ATCPHY_POWER_SLEEP_BIG BIT(1)
+#define ATCPHY_POWER_CLAMP_EN BIT(2)
+#define ATCPHY_POWER_APB_RESET_N BIT(3)
+#define ATCPHY_POWER_PHY_RESET_N BIT(4)
+
+#define ATCPHY_MISC 0x20008
+#define ATCPHY_MISC_RESET_N BIT(0)
+#define ATCPHY_MISC_LANE_SWAP BIT(2)
+
+#define ACIOPHY_LANE_DP_CFG_BLK_TX_DP_CTRL0 0x7000
+#define DP_PMA_BYTECLK_RESET BIT(0)
+#define DP_MAC_DIV20_CLK_SEL BIT(1)
+#define DPTXPHY_PMA_LANE_RESET_N BIT(2)
+#define DPTXPHY_PMA_LANE_RESET_N_OV BIT(3)
+#define DPTX_PCLK1_SELECT GENMASK(6, 4)
+#define DPTX_PCLK2_SELECT GENMASK(9, 7)
+#define DPRX_PCLK_SELECT GENMASK(12, 10)
+#define DPTX_PCLK1_ENABLE BIT(13)
+#define DPTX_PCLK2_ENABLE BIT(14)
+#define DPRX_PCLK_ENABLE BIT(15)
+
+#define ACIOPHY_DP_PCLK_STAT 0x7044
+#define ACIOPHY_AUSPLL_LOCK BIT(3)
+
+#define LN0_AUSPMA_RX_TOP 0x9000
+#define LN0_AUSPMA_RX_EQ 0xA000
+#define LN0_AUSPMA_RX_SHM 0xB000
+#define LN0_AUSPMA_TX_TOP 0xC000
+#define LN0_AUSPMA_TX_SHM 0xD000
+
+#define LN1_AUSPMA_RX_TOP 0x10000
+#define LN1_AUSPMA_RX_EQ 0x11000
+#define LN1_AUSPMA_RX_SHM 0x12000
+#define LN1_AUSPMA_TX_TOP 0x13000
+#define LN1_AUSPMA_TX_SHM 0x14000
+
+#define LN_AUSPMA_RX_TOP_PMAFSM 0x0010
+#define LN_AUSPMA_RX_TOP_PMAFSM_PCS_OV BIT(0)
+#define LN_AUSPMA_RX_TOP_PMAFSM_PCS_REQ BIT(9)
+
+#define LN_AUSPMA_RX_TOP_TJ_CFG_RX_TXMODE 0x00F0
+#define LN_RX_TXMODE BIT(0)
+
+#define LN_AUSPMA_RX_SHM_TJ_RXA_CTLE_CTRL0 0x00
+#define LN_TX_CLK_EN BIT(20)
+#define LN_TX_CLK_EN_OV BIT(21)
+
+#define LN_AUSPMA_RX_SHM_TJ_RXA_AFE_CTRL1 0x04
+#define LN_RX_DIV20_RESET_N_OV BIT(29)
+#define LN_RX_DIV20_RESET_N BIT(30)
+
+#define LN_AUSPMA_RX_SHM_TJ_RXA_UNK_CTRL2 0x08
+#define LN_AUSPMA_RX_SHM_TJ_RXA_UNK_CTRL3 0x0C
+#define LN_AUSPMA_RX_SHM_TJ_RXA_UNK_CTRL4 0x10
+#define LN_AUSPMA_RX_SHM_TJ_RXA_UNK_CTRL5 0x14
+#define LN_AUSPMA_RX_SHM_TJ_RXA_UNK_CTRL6 0x18
+#define LN_AUSPMA_RX_SHM_TJ_RXA_UNK_CTRL7 0x1C
+#define LN_AUSPMA_RX_SHM_TJ_RXA_UNK_CTRL8 0x20
+#define LN_AUSPMA_RX_SHM_TJ_RXA_UNK_CTRL9 0x24
+#define LN_AUSPMA_RX_SHM_TJ_RXA_DFE_CTRL10 0x28
+#define LN_DTVREG_ADJUST GENMASK(31, 27)
+
+#define LN_AUSPMA_RX_SHM_TJ_RXA_DFE_CTRL11 0x2C
+#define LN_DTVREG_BIG_EN BIT(23)
+#define LN_DTVREG_BIG_EN_OV BIT(24)
+#define LN_DTVREG_SML_EN BIT(25)
+#define LN_DTVREG_SML_EN_OV BIT(26)
+
+#define LN_AUSPMA_RX_SHM_TJ_RXA_DFE_CTRL12 0x30
+#define LN_TX_BYTECLK_RESET_SYNC_CLR BIT(22)
+#define LN_TX_BYTECLK_RESET_SYNC_CLR_OV BIT(23)
+#define LN_TX_BYTECLK_RESET_SYNC_EN BIT(24)
+#define LN_TX_BYTECLK_RESET_SYNC_EN_OV BIT(25)
+#define LN_TX_HRCLK_SEL BIT(28)
+#define LN_TX_HRCLK_SEL_OV BIT(29)
+#define LN_TX_PBIAS_EN BIT(30)
+#define LN_TX_PBIAS_EN_OV BIT(31)
+
+#define LN_AUSPMA_RX_SHM_TJ_RXA_DFE_CTRL13 0x34
+#define LN_TX_PRE_EN BIT(0)
+#define LN_TX_PRE_EN_OV BIT(1)
+#define LN_TX_PST1_EN BIT(2)
+#define LN_TX_PST1_EN_OV BIT(3)
+#define LN_DTVREG_ADJUST_OV BIT(15)
+
+#define LN_AUSPMA_RX_SHM_TJ_UNK_CTRL14A 0x38
+#define LN_AUSPMA_RX_SHM_TJ_UNK_CTRL14B 0x3C
+#define LN_AUSPMA_RX_SHM_TJ_UNK_CTRL15A 0x40
+#define LN_AUSPMA_RX_SHM_TJ_UNK_CTRL15B 0x44
+#define LN_AUSPMA_RX_SHM_TJ_RXA_SAVOS_CTRL16 0x48
+#define LN_RXTERM_EN BIT(21)
+#define LN_RXTERM_EN_OV BIT(22)
+#define LN_RXTERM_PULLUP_LEAK_EN BIT(23)
+#define LN_RXTERM_PULLUP_LEAK_EN_OV BIT(24)
+#define LN_TX_CAL_CODE GENMASK(29, 25)
+#define LN_TX_CAL_CODE_OV BIT(30)
+
+#define LN_AUSPMA_RX_SHM_TJ_RXA_TX_CTRL17 0x4C
+#define LN_TX_MARGIN GENMASK(19, 15)
+#define LN_TX_MARGIN_OV BIT(20)
+#define LN_TX_MARGIN_LSB BIT(21)
+#define LN_TX_MARGIN_LSB_OV BIT(22)
+#define LN_TX_MARGIN_P1 GENMASK(26, 23)
+#define LN_TX_MARGIN_P1_OV BIT(27)
+#define LN_TX_MARGIN_P1_LSB GENMASK(29, 28)
+#define LN_TX_MARGIN_P1_LSB_OV BIT(30)
+
+#define LN_AUSPMA_RX_SHM_TJ_RXA_TX_CTRL18 0x50
+#define LN_TX_P1_CODE GENMASK(3, 0)
+#define LN_TX_P1_CODE_OV BIT(4)
+#define LN_TX_P1_LSB_CODE GENMASK(6, 5)
+#define LN_TX_P1_LSB_CODE_OV BIT(7)
+#define LN_TX_MARGIN_PRE GENMASK(10, 8)
+#define LN_TX_MARGIN_PRE_OV BIT(11)
+#define LN_TX_MARGIN_PRE_LSB GENMASK(13, 12)
+#define LN_TX_MARGIN_PRE_LSB_OV BIT(14)
+#define LN_TX_PRE_LSB_CODE GENMASK(16, 15)
+#define LN_TX_PRE_LSB_CODE_OV BIT(17)
+#define LN_TX_PRE_CODE GENMASK(21, 18)
+#define LN_TX_PRE_CODE_OV BIT(22)
+
+#define LN_AUSPMA_RX_SHM_TJ_RXA_TERM_CTRL19 0x54
+#define LN_TX_TEST_EN BIT(21)
+#define LN_TX_TEST_EN_OV BIT(22)
+#define LN_TX_EN BIT(23)
+#define LN_TX_EN_OV BIT(24)
+#define LN_TX_CLK_DLY_CTRL_TAPGEN GENMASK(27, 25)
+#define LN_TX_CLK_DIV2_EN BIT(28)
+#define LN_TX_CLK_DIV2_EN_OV BIT(29)
+#define LN_TX_CLK_DIV2_RST BIT(30)
+#define LN_TX_CLK_DIV2_RST_OV BIT(31)
+
+#define LN_AUSPMA_RX_SHM_TJ_RXA_UNK_CTRL20 0x58
+#define LN_AUSPMA_RX_SHM_TJ_RXA_UNK_CTRL21 0x5C
+#define LN_AUSPMA_RX_SHM_TJ_RXA_VREF_CTRL22 0x60
+#define LN_VREF_ADJUST_GRAY GENMASK(11, 7)
+#define LN_VREF_ADJUST_GRAY_OV BIT(12)
+#define LN_VREF_BIAS_SEL GENMASK(14, 13)
+#define LN_VREF_BIAS_SEL_OV BIT(15)
+#define LN_VREF_BOOST_EN BIT(16)
+#define LN_VREF_BOOST_EN_OV BIT(17)
+#define LN_VREF_EN BIT(18)
+#define LN_VREF_EN_OV BIT(19)
+#define LN_VREF_LPBKIN_DATA GENMASK(29, 28)
+#define LN_VREF_TEST_RXLPBKDT_EN BIT(30)
+#define LN_VREF_TEST_RXLPBKDT_EN_OV BIT(31)
+
+#define LN_AUSPMA_TX_SHM_TXA_CFG_MAIN_REG0 0x00
+#define LN_BYTECLK_RESET_SYNC_EN_OV BIT(2)
+#define LN_BYTECLK_RESET_SYNC_EN BIT(3)
+#define LN_BYTECLK_RESET_SYNC_CLR_OV BIT(4)
+#define LN_BYTECLK_RESET_SYNC_CLR BIT(5)
+#define LN_BYTECLK_RESET_SYNC_SEL_OV BIT(6)
+
+#define LN_AUSPMA_TX_SHM_TXA_CFG_MAIN_REG1 0x04
+#define LN_TXA_DIV2_EN_OV BIT(8)
+#define LN_TXA_DIV2_EN BIT(9)
+#define LN_TXA_DIV2_RESET_OV BIT(10)
+#define LN_TXA_DIV2_RESET BIT(11)
+#define LN_TXA_CLK_EN_OV BIT(22)
+#define LN_TXA_CLK_EN BIT(23)
+
+#define LN_AUSPMA_TX_SHM_TXA_IMP_REG0 0x08
+#define LN_TXA_CAL_CTRL_OV BIT(0)
+#define LN_TXA_CAL_CTRL GENMASK(18, 1)
+#define LN_TXA_CAL_CTRL_BASE_OV BIT(19)
+#define LN_TXA_CAL_CTRL_BASE GENMASK(23, 20)
+#define LN_TXA_HIZ_OV BIT(29)
+#define LN_TXA_HIZ BIT(30)
+
+#define LN_AUSPMA_TX_SHM_TXA_IMP_REG1 0x0C
+#define LN_AUSPMA_TX_SHM_TXA_IMP_REG2 0x10
+#define LN_TXA_MARGIN_OV BIT(0)
+#define LN_TXA_MARGIN GENMASK(18, 1)
+#define LN_TXA_MARGIN_2R_OV BIT(19)
+#define LN_TXA_MARGIN_2R BIT(20)
+
+#define LN_AUSPMA_TX_SHM_TXA_IMP_REG3 0x14
+#define LN_TXA_MARGIN_POST_OV BIT(0)
+#define LN_TXA_MARGIN_POST GENMASK(10, 1)
+#define LN_TXA_MARGIN_POST_2R_OV BIT(11)
+#define LN_TXA_MARGIN_POST_2R BIT(12)
+#define LN_TXA_MARGIN_POST_4R_OV BIT(13)
+#define LN_TXA_MARGIN_POST_4R BIT(14)
+#define LN_TXA_MARGIN_PRE_OV BIT(15)
+#define LN_TXA_MARGIN_PRE GENMASK(21, 16)
+#define LN_TXA_MARGIN_PRE_2R_OV BIT(22)
+#define LN_TXA_MARGIN_PRE_2R BIT(23)
+#define LN_TXA_MARGIN_PRE_4R_OV BIT(24)
+#define LN_TXA_MARGIN_PRE_4R BIT(25)
+
+#define LN_AUSPMA_TX_SHM_TXA_UNK_REG0 0x18
+#define LN_AUSPMA_TX_SHM_TXA_UNK_REG1 0x1C
+#define LN_AUSPMA_TX_SHM_TXA_UNK_REG2 0x20
+
+#define LN_AUSPMA_TX_SHM_TXA_LDOCLK 0x24
+#define LN_LDOCLK_BYPASS_SML_OV BIT(8)
+#define LN_LDOCLK_BYPASS_SML BIT(9)
+#define LN_LDOCLK_BYPASS_BIG_OV BIT(10)
+#define LN_LDOCLK_BYPASS_BIG BIT(11)
+#define LN_LDOCLK_EN_SML_OV BIT(12)
+#define LN_LDOCLK_EN_SML BIT(13)
+#define LN_LDOCLK_EN_BIG_OV BIT(14)
+#define LN_LDOCLK_EN_BIG BIT(15)
+
+/* LPDPTX registers */
+#define LPDPTX_AUX_CFG_BLK_AUX_CTRL 0x0000
+#define LPDPTX_BLK_AUX_CTRL_PWRDN BIT(4)
+#define LPDPTX_BLK_AUX_RXOFFSET GENMASK(25, 22)
+
+#define LPDPTX_AUX_CFG_BLK_AUX_LDO_CTRL 0x0008
+
+#define LPDPTX_AUX_CFG_BLK_AUX_MARGIN 0x000c
+#define LPDPTX_MARGIN_RCAL_RXOFFSET_EN BIT(5)
+#define LPDPTX_AUX_MARGIN_RCAL_TXSWING GENMASK(10, 6)
+
+#define LPDPTX_AUX_SHM_CFG_BLK_AUX_CTRL_REG0 0x0204
+#define LPDPTX_CFG_PMA_AUX_SEL_LF_DATA BIT(15)
+
+#define LPDPTX_AUX_SHM_CFG_BLK_AUX_CTRL_REG1 0x0208
+#define LPDPTX_CFG_PMA_PHYS_ADJ GENMASK(22, 20)
+#define LPDPTX_CFG_PMA_PHYS_ADJ_OV BIT(19)
+
+#define LPDPTX_AUX_CONTROL 0x4000
+#define LPDPTX_AUX_PWN_DOWN 0x10
+#define LPDPTX_AUX_CLAMP_EN 0x04
+#define LPDPTX_SLEEP_B_BIG_IN 0x02
+#define LPDPTX_SLEEP_B_SML_IN 0x01
+#define LPDPTX_TXTERM_CODEMSB 0x400
+#define LPDPTX_TXTERM_CODE GENMASK(9, 5)
+
+/* pipehandler registers */
+#define PIPEHANDLER_OVERRIDE 0x00
+#define PIPEHANDLER_OVERRIDE_RXVALID BIT(0)
+#define PIPEHANDLER_OVERRIDE_RXDETECT BIT(2)
+
+#define PIPEHANDLER_OVERRIDE_VALUES 0x04
+
+#define PIPEHANDLER_MUX_CTRL 0x0c
+#define PIPEHANDLER_MUX_MODE GENMASK(1, 0)
+#define PIPEHANDLER_MUX_MODE_USB3PHY 0
+#define PIPEHANDLER_MUX_MODE_DUMMY_PHY 2
+#define PIPEHANDLER_CLK_SELECT GENMASK(5, 3)
+#define PIPEHANDLER_CLK_USB3PHY 1
+#define PIPEHANDLER_CLK_DUMMY_PHY 4
+#define PIPEHANDLER_LOCK_REQ 0x10
+#define PIPEHANDLER_LOCK_ACK 0x14
+#define PIPEHANDLER_LOCK_EN BIT(0)
+
+#define PIPEHANDLER_AON_GEN 0x1C
+#define PIPEHANDLER_AON_GEN_DWC3_FORCE_CLAMP_EN BIT(4)
+#define PIPEHANDLER_AON_GEN_DWC3_RESET_N BIT(0)
+
+#define PIPEHANDLER_NONSELECTED_OVERRIDE 0x20
+#define PIPEHANDLER_NONSELECTED_NATIVE_RESET BIT(12)
+#define PIPEHANDLER_DUMMY_PHY_EN BIT(15)
+#define PIPEHANDLER_NONSELECTED_NATIVE_POWER_DOWN GENMASK(3, 0)
+
+/* USB2 PHY regs */
+#define USB2PHY_USBCTL 0x00
+#define USB2PHY_USBCTL_HOST_EN BIT(1)
+
+#define USB2PHY_CTL 0x04
+#define USB2PHY_CTL_RESET BIT(0)
+#define USB2PHY_CTL_PORT_RESET BIT(1)
+#define USB2PHY_CTL_APB_RESET_N BIT(2)
+#define USB2PHY_CTL_SIDDQ BIT(3)
+
+#define USB2PHY_SIG 0x08
+#define USB2PHY_SIG_VBUSDET_FORCE_VAL BIT(0)
+#define USB2PHY_SIG_VBUSDET_FORCE_EN BIT(1)
+#define USB2PHY_SIG_VBUSVLDEXT_FORCE_VAL BIT(2)
+#define USB2PHY_SIG_VBUSVLDEXT_FORCE_EN BIT(3)
+#define USB2PHY_SIG_HOST (7 << 12)
+
+static const struct {
+	const struct atcphy_mode_configuration normal;
+	const struct atcphy_mode_configuration swapped;
+	bool enable_dp_aux;
+	enum atcphy_pipehandler_state pipehandler_state;
+} atcphy_modes[] = {
+	[APPLE_ATCPHY_MODE_OFF] = {
+		.normal = {
+			.crossbar = ACIOPHY_CROSSBAR_PROTOCOL_USB3,
+			.crossbar_dp_single_pma = ACIOPHY_CROSSBAR_DP_SINGLE_PMA_NONE,
+			.crossbar_dp_both_pma = false,
+			.lane_mode = {ACIOPHY_LANE_MODE_OFF, ACIOPHY_LANE_MODE_OFF},
+			.dp_lane = {false, false},
+			.set_swap = false,
+		},
+		.swapped = {
+			.crossbar = ACIOPHY_CROSSBAR_PROTOCOL_USB3_SWAPPED,
+			.crossbar_dp_single_pma = ACIOPHY_CROSSBAR_DP_SINGLE_PMA_NONE,
+			.crossbar_dp_both_pma = false,
+			.lane_mode = {ACIOPHY_LANE_MODE_OFF, ACIOPHY_LANE_MODE_OFF},
+			.dp_lane = {false, false},
+			.set_swap = false, /* doesn't matter since the SS lanes are off */
+		},
+		.enable_dp_aux = false,
+		.pipehandler_state = ATCPHY_PIPEHANDLER_STATE_USB2,
+	},
+	[APPLE_ATCPHY_MODE_USB2] = {
+		.normal = {
+			.crossbar = ACIOPHY_CROSSBAR_PROTOCOL_USB3,
+			.crossbar_dp_single_pma = ACIOPHY_CROSSBAR_DP_SINGLE_PMA_NONE,
+			.crossbar_dp_both_pma = false,
+			.lane_mode = {ACIOPHY_LANE_MODE_OFF, ACIOPHY_LANE_MODE_OFF},
+			.dp_lane = {false, false},
+			.set_swap = false,
+		},
+		.swapped = {
+			.crossbar = ACIOPHY_CROSSBAR_PROTOCOL_USB3_SWAPPED,
+			.crossbar_dp_single_pma = ACIOPHY_CROSSBAR_DP_SINGLE_PMA_NONE,
+			.crossbar_dp_both_pma = false,
+			.lane_mode = {ACIOPHY_LANE_MODE_OFF, ACIOPHY_LANE_MODE_OFF},
+			.dp_lane = {false, false},
+			.set_swap = false, /* doesn't matter since the SS lanes are off */
+		},
+		.enable_dp_aux = false,
+		.pipehandler_state = ATCPHY_PIPEHANDLER_STATE_USB2,
+	},
+	[APPLE_ATCPHY_MODE_USB3] = {
+		.normal = {
+			.crossbar = ACIOPHY_CROSSBAR_PROTOCOL_USB3,
+			.crossbar_dp_single_pma = ACIOPHY_CROSSBAR_DP_SINGLE_PMA_NONE,
+			.crossbar_dp_both_pma = false,
+			.lane_mode = {ACIOPHY_LANE_MODE_USB3, ACIOPHY_LANE_MODE_OFF},
+			.dp_lane = {false, false},
+			.set_swap = false,
+		},
+		.swapped = {
+			.crossbar = ACIOPHY_CROSSBAR_PROTOCOL_USB3_SWAPPED,
+			.crossbar_dp_single_pma = ACIOPHY_CROSSBAR_DP_SINGLE_PMA_NONE,
+			.crossbar_dp_both_pma = false,
+			.lane_mode = {ACIOPHY_LANE_MODE_OFF, ACIOPHY_LANE_MODE_USB3},
+			.dp_lane = {false, false},
+			.set_swap = true,
+		},
+		.enable_dp_aux = false,
+		.pipehandler_state = ATCPHY_PIPEHANDLER_STATE_USB3,
+	},
+	[APPLE_ATCPHY_MODE_USB3_DP] = {
+		.normal = {
+			.crossbar = ACIOPHY_CROSSBAR_PROTOCOL_USB3_DP,
+			.crossbar_dp_single_pma = ACIOPHY_CROSSBAR_DP_SINGLE_PMA_UNK008,
+			.crossbar_dp_both_pma = false,
+			.lane_mode = {ACIOPHY_LANE_MODE_USB3, ACIOPHY_LANE_MODE_DP},
+			.dp_lane = {false, true},
+			.set_swap = false,
+		},
+		.swapped = {
+			.crossbar = ACIOPHY_CROSSBAR_PROTOCOL_USB3_DP_SWAPPED,
+			.crossbar_dp_single_pma = ACIOPHY_CROSSBAR_DP_SINGLE_PMA_UNK008,
+			.crossbar_dp_both_pma = false,
+			.lane_mode = {ACIOPHY_LANE_MODE_DP, ACIOPHY_LANE_MODE_USB3},
+			.dp_lane = {true, false},
+			.set_swap = true,
+		},
+		.enable_dp_aux = true,
+		.pipehandler_state = ATCPHY_PIPEHANDLER_STATE_USB3,
+	},
+	[APPLE_ATCPHY_MODE_USB4] = {
+		.normal = {
+			.crossbar = ACIOPHY_CROSSBAR_PROTOCOL_USB4,
+			.crossbar_dp_single_pma = ACIOPHY_CROSSBAR_DP_SINGLE_PMA_NONE,
+			.crossbar_dp_both_pma = false,
+			.lane_mode = {ACIOPHY_LANE_MODE_USB4, ACIOPHY_LANE_MODE_USB4},
+			.dp_lane = {false, false},
+			.set_swap = false,
+		},
+		.swapped = {
+			.crossbar = ACIOPHY_CROSSBAR_PROTOCOL_USB4_SWAPPED,
+			.crossbar_dp_single_pma = ACIOPHY_CROSSBAR_DP_SINGLE_PMA_NONE,
+			.crossbar_dp_both_pma = false,
+			.lane_mode = {ACIOPHY_LANE_MODE_USB4, ACIOPHY_LANE_MODE_USB4},
+			.dp_lane = {false, false},
+			.set_swap = false, /* intentionally false */
+		},
+		.enable_dp_aux = false,
+		.pipehandler_state = ATCPHY_PIPEHANDLER_STATE_USB2,
+	},
+	[APPLE_ATCPHY_MODE_DP] = {
+		.normal = {
+			.crossbar = ACIOPHY_CROSSBAR_PROTOCOL_DP,
+			.crossbar_dp_single_pma = ACIOPHY_CROSSBAR_DP_SINGLE_PMA_UNK100,
+			.crossbar_dp_both_pma = true,
+			.lane_mode = {ACIOPHY_LANE_MODE_DP, ACIOPHY_LANE_MODE_DP},
+			.dp_lane = {true, true},
+			.set_swap = false,
+		},
+		.swapped = {
+			.crossbar = ACIOPHY_CROSSBAR_PROTOCOL_DP,
+			.crossbar_dp_single_pma = ACIOPHY_CROSSBAR_DP_SINGLE_PMA_UNK008,
+			.crossbar_dp_both_pma = false, /* intentionally false */
+			.lane_mode = {ACIOPHY_LANE_MODE_DP, ACIOPHY_LANE_MODE_DP},
+			.dp_lane = {true, true},
+			.set_swap = false, /* intentionally false */
+		},
+		.enable_dp_aux = true,
+		.pipehandler_state = ATCPHY_PIPEHANDLER_STATE_USB2,
+	},
+};
+
+static const struct atcphy_dp_link_rate_configuration dp_lr_config[] = {
+	[ATCPHY_DP_LINK_RATE_RBR] = {
+		.freqinit_count_target = 0x21c,
+		.fbdivn_frac_den = 0x0,
+		.fbdivn_frac_num = 0x0,
+		.pclk_div_sel = 0x13,
+		.lfclk_ctrl = 0x5,
+		.vclk_op_divn = 0x2,
+		.plla_clkout_vreg_bypass = true,
+		.bypass_txa_ldoclk = true,
+		.txa_div2_en = true,
+	},
+	[ATCPHY_DP_LINK_RATE_HBR] = {
+		.freqinit_count_target = 0x1c2,
+		.fbdivn_frac_den = 0x3ffe,
+		.fbdivn_frac_num = 0x1fff,
+		.pclk_div_sel = 0x9,
+		.lfclk_ctrl = 0x5,
+		.vclk_op_divn = 0x2,
+		.plla_clkout_vreg_bypass = true,
+		.bypass_txa_ldoclk = true,
+		.txa_div2_en = false,
+	},
+	[ATCPHY_DP_LINK_RATE_HBR2] = {
+		.freqinit_count_target = 0x1c2,
+		.fbdivn_frac_den = 0x3ffe,
+		.fbdivn_frac_num = 0x1fff,
+		.pclk_div_sel = 0x4,
+		.lfclk_ctrl = 0x5,
+		.vclk_op_divn = 0x0,
+		.plla_clkout_vreg_bypass = true,
+		.bypass_txa_ldoclk = true,
+		.txa_div2_en = false,
+	},
+	[ATCPHY_DP_LINK_RATE_HBR3] = {
+		.freqinit_count_target = 0x2a3,
+		.fbdivn_frac_den = 0x3ffc,
+		.fbdivn_frac_num = 0x2ffd,
+		.pclk_div_sel = 0x4,
+		.lfclk_ctrl = 0x6,
+		.vclk_op_divn = 0x0,
+		.plla_clkout_vreg_bypass = false,
+		.bypass_txa_ldoclk = false,
+		.txa_div2_en = false,
+	},
+};
+
+static inline void mask32(void __iomem *reg, u32 mask, u32 set)
+{
+	u32 value = readl(reg);
+	value &= ~mask;
+	value |= set;
+	writel(value, reg);
+}
+
+static inline void core_mask32(struct apple_atcphy *atcphy, u32 reg, u32 mask,
+			       u32 set)
+{
+	mask32(atcphy->regs.core + reg, mask, set);
+}
+
+static inline void set32(void __iomem *reg, u32 set)
+{
+	mask32(reg, 0, set);
+}
+
+static inline void core_set32(struct apple_atcphy *atcphy, u32 reg, u32 set)
+{
+	core_mask32(atcphy, reg, 0, set);
+}
+
+static inline void clear32(void __iomem *reg, u32 clear)
+{
+	mask32(reg, clear, 0);
+}
+
+static inline void core_clear32(struct apple_atcphy *atcphy, u32 reg, u32 clear)
+{
+	core_mask32(atcphy, reg, clear, 0);
+}
+
+static void atcphy_apply_tunable(struct apple_atcphy *atcphy,
+				 void __iomem *regs,
+				 struct atcphy_tunable *tunable)
+{
+	size_t i;
+
+	for (i = 0; i < tunable->sz; ++i)
+		mask32(regs + tunable->values[i].offset,
+		       tunable->values[i].mask, tunable->values[i].value);
+}
+
+static void atcphy_apply_tunables(struct apple_atcphy *atcphy,
+				  enum atcphy_mode mode)
+{
+	int lane0 = atcphy->swap_lanes ? 1 : 0;
+	int lane1 = atcphy->swap_lanes ? 0 : 1;
+
+	atcphy_apply_tunable(atcphy, atcphy->regs.axi2af,
+			     &atcphy->tunables.axi2af);
+	atcphy_apply_tunable(atcphy, atcphy->regs.core,
+			     &atcphy->tunables.common);
+
+	switch (mode) {
+	case APPLE_ATCPHY_MODE_USB3:
+		atcphy_apply_tunable(atcphy, atcphy->regs.core,
+				     &atcphy->tunables.lane_usb3[lane0]);
+		atcphy_apply_tunable(atcphy, atcphy->regs.core,
+				     &atcphy->tunables.lane_usb3[lane1]);
+		break;
+
+	case APPLE_ATCPHY_MODE_USB3_DP:
+		atcphy_apply_tunable(atcphy, atcphy->regs.core,
+				     &atcphy->tunables.lane_usb3[lane0]);
+		atcphy_apply_tunable(atcphy, atcphy->regs.core,
+				     &atcphy->tunables.lane_displayport[lane1]);
+		break;
+
+	case APPLE_ATCPHY_MODE_DP:
+		atcphy_apply_tunable(atcphy, atcphy->regs.core,
+				     &atcphy->tunables.lane_displayport[lane0]);
+		atcphy_apply_tunable(atcphy, atcphy->regs.core,
+				     &atcphy->tunables.lane_displayport[lane1]);
+		break;
+
+	case APPLE_ATCPHY_MODE_USB4:
+		atcphy_apply_tunable(atcphy, atcphy->regs.core,
+				     &atcphy->tunables.lane_usb4[lane0]);
+		atcphy_apply_tunable(atcphy, atcphy->regs.core,
+				     &atcphy->tunables.lane_usb4[lane1]);
+		break;
+
+	default:
+		dev_warn(atcphy->dev,
+			 "Unknown mode %d in atcphy_apply_tunables\n", mode);
+		fallthrough;
+	case APPLE_ATCPHY_MODE_OFF:
+	case APPLE_ATCPHY_MODE_USB2:
+		break;
+	}
+}
+
+static void atcphy_setup_pll_fuses(struct apple_atcphy *atcphy)
+{
+	void __iomem *regs = atcphy->regs.core;
+
+	if (!atcphy->fuses.present)
+		return;
+
+	/* CIO3PLL fuses */
+	mask32(regs + CIO3PLL_DCO_NCTRL, CIO3PLL_DCO_COARSEBIN_EFUSE0,
+	       FIELD_PREP(CIO3PLL_DCO_COARSEBIN_EFUSE0,
+			  atcphy->fuses.cio3pll_dco_coarsebin[0]));
+	mask32(regs + CIO3PLL_DCO_NCTRL, CIO3PLL_DCO_COARSEBIN_EFUSE1,
+	       FIELD_PREP(CIO3PLL_DCO_COARSEBIN_EFUSE1,
+			  atcphy->fuses.cio3pll_dco_coarsebin[1]));
+	mask32(regs + CIO3PLL_FRACN_CAN, CIO3PLL_DLL_CAL_START_CAPCODE,
+	       FIELD_PREP(CIO3PLL_DLL_CAL_START_CAPCODE,
+			  atcphy->fuses.cio3pll_dll_start_capcode[0]));
+
+	if (atcphy->quirks.t8103_cio3pll_workaround) {
+		mask32(regs + AUS_COMMON_SHIM_BLK_VREG, AUS_VREG_TRIM,
+		       FIELD_PREP(AUS_VREG_TRIM,
+				  atcphy->fuses.aus_cmn_shm_vreg_trim));
+		mask32(regs + CIO3PLL_FRACN_CAN, CIO3PLL_DLL_CAL_START_CAPCODE,
+		       FIELD_PREP(CIO3PLL_DLL_CAL_START_CAPCODE,
+				  atcphy->fuses.cio3pll_dll_start_capcode[1]));
+		mask32(regs + CIO3PLL_DTC_VREG, CIO3PLL_DTC_VREG_ADJUST,
+		       FIELD_PREP(CIO3PLL_DTC_VREG_ADJUST,
+				  atcphy->fuses.cio3pll_dtc_vreg_adjust));
+	} else {
+		mask32(regs + CIO3PLL_DTC_VREG, CIO3PLL_DTC_VREG_ADJUST,
+		       FIELD_PREP(CIO3PLL_DTC_VREG_ADJUST,
+				  atcphy->fuses.cio3pll_dtc_vreg_adjust));
+		mask32(regs + AUS_COMMON_SHIM_BLK_VREG, AUS_VREG_TRIM,
+		       FIELD_PREP(AUS_VREG_TRIM,
+				  atcphy->fuses.aus_cmn_shm_vreg_trim));
+	}
+
+	/* AUSPLL fuses */
+	mask32(regs + AUSPLL_DCO_EFUSE_SPARE, AUSPLL_RODCO_ENCAP_EFUSE,
+	       FIELD_PREP(AUSPLL_RODCO_ENCAP_EFUSE,
+			  atcphy->fuses.auspll_rodco_encap));
+	mask32(regs + AUSPLL_DCO_EFUSE_SPARE, AUSPLL_RODCO_BIAS_ADJUST_EFUSE,
+	       FIELD_PREP(AUSPLL_RODCO_BIAS_ADJUST_EFUSE,
+			  atcphy->fuses.auspll_rodco_bias_adjust));
+	mask32(regs + AUSPLL_FRACN_CAN, AUSPLL_DLL_START_CAPCODE,
+	       FIELD_PREP(AUSPLL_DLL_START_CAPCODE,
+			  atcphy->fuses.auspll_fracn_dll_start_capcode));
+	mask32(regs + AUSPLL_CLKOUT_DTC_VREG, AUSPLL_DTC_VREG_ADJUST,
+	       FIELD_PREP(AUSPLL_DTC_VREG_ADJUST,
+			  atcphy->fuses.auspll_dtc_vreg_adjust));
+
+	/* TODO: is this actually required again? */
+	mask32(regs + AUS_COMMON_SHIM_BLK_VREG, AUS_VREG_TRIM,
+	       FIELD_PREP(AUS_VREG_TRIM, atcphy->fuses.aus_cmn_shm_vreg_trim));
+}
+
+static int atcphy_cio_power_off(struct apple_atcphy *atcphy)
+{
+	u32 reg;
+	int ret;
+
+	/* enable all reset lines */
+	core_clear32(atcphy, ATCPHY_POWER_CTRL, ATCPHY_POWER_PHY_RESET_N);
+	core_clear32(atcphy, ATCPHY_POWER_CTRL, ATCPHY_POWER_APB_RESET_N);
+	core_set32(atcphy, ATCPHY_POWER_CTRL, ATCPHY_POWER_CLAMP_EN);
+	core_clear32(atcphy, ATCPHY_MISC, ATCPHY_MISC_RESET_N);
+
+	// TODO: why clear? is this SLEEP_N? or do we enable some power management here?
+	core_clear32(atcphy, ATCPHY_POWER_CTRL, ATCPHY_POWER_SLEEP_BIG);
+	ret = readl_poll_timeout(atcphy->regs.core + ATCPHY_POWER_STAT, reg,
+				 !(reg & ATCPHY_POWER_SLEEP_BIG), 100, 100000);
+	if (ret) {
+		dev_err(atcphy->dev, "failed to sleep atcphy \"big\"\n");
+		return ret;
+	}
+
+	core_clear32(atcphy, ATCPHY_POWER_CTRL, ATCPHY_POWER_SLEEP_SMALL);
+	ret = readl_poll_timeout(atcphy->regs.core + ATCPHY_POWER_STAT, reg,
+				 !(reg & ATCPHY_POWER_SLEEP_SMALL), 100,
+				 100000);
+	if (ret) {
+		dev_err(atcphy->dev, "failed to sleep atcphy \"small\"\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int atcphy_cio_power_on(struct apple_atcphy *atcphy)
+{
+	u32 reg;
+	int ret;
+
+	core_set32(atcphy, ATCPHY_MISC, ATCPHY_MISC_RESET_N);
+
+	// TODO: why set?! see above
+	core_set32(atcphy, ATCPHY_POWER_CTRL, ATCPHY_POWER_SLEEP_SMALL);
+	ret = readl_poll_timeout(atcphy->regs.core + ATCPHY_POWER_STAT, reg,
+				 reg & ATCPHY_POWER_SLEEP_SMALL, 100, 100000);
+	if (ret) {
+		dev_err(atcphy->dev, "failed to wakeup atcphy \"small\"\n");
+		return ret;
+	}
+
+	core_set32(atcphy, ATCPHY_POWER_CTRL, ATCPHY_POWER_SLEEP_BIG);
+	ret = readl_poll_timeout(atcphy->regs.core + ATCPHY_POWER_STAT, reg,
+				 reg & ATCPHY_POWER_SLEEP_BIG, 100, 100000);
+	if (ret) {
+		dev_err(atcphy->dev, "failed to wakeup atcphy \"big\"\n");
+		return ret;
+	}
+
+	core_clear32(atcphy, ATCPHY_POWER_CTRL, ATCPHY_POWER_CLAMP_EN);
+	core_set32(atcphy, ATCPHY_POWER_CTRL, ATCPHY_POWER_APB_RESET_N);
+
+	return 0;
+}
+
+static void atcphy_configure_lanes(struct apple_atcphy *atcphy,
+				   enum atcphy_mode mode)
+{
+	const struct atcphy_mode_configuration *mode_cfg;
+
+	if (atcphy->swap_lanes)
+		mode_cfg = &atcphy_modes[mode].swapped;
+	else
+		mode_cfg = &atcphy_modes[mode].normal;
+
+	trace_atcphy_configure_lanes(mode, mode_cfg);
+
+	if (mode_cfg->dp_lane[0]) {
+		core_set32(atcphy, LN0_AUSPMA_RX_TOP + LN_AUSPMA_RX_TOP_PMAFSM,
+			   LN_AUSPMA_RX_TOP_PMAFSM_PCS_OV);
+		core_clear32(atcphy,
+			     LN0_AUSPMA_RX_TOP + LN_AUSPMA_RX_TOP_PMAFSM,
+			     LN_AUSPMA_RX_TOP_PMAFSM_PCS_REQ);
+	}
+	if (mode_cfg->dp_lane[1]) {
+		core_set32(atcphy, LN1_AUSPMA_RX_TOP + LN_AUSPMA_RX_TOP_PMAFSM,
+			   LN_AUSPMA_RX_TOP_PMAFSM_PCS_OV);
+		core_clear32(atcphy,
+			     LN1_AUSPMA_RX_TOP + LN_AUSPMA_RX_TOP_PMAFSM,
+			     LN_AUSPMA_RX_TOP_PMAFSM_PCS_REQ);
+	}
+
+	core_mask32(atcphy, ACIOPHY_LANE_MODE, ACIOPHY_LANE_MODE_RX0,
+		    FIELD_PREP(ACIOPHY_LANE_MODE_RX0, mode_cfg->lane_mode[0]));
+	core_mask32(atcphy, ACIOPHY_LANE_MODE, ACIOPHY_LANE_MODE_TX0,
+		    FIELD_PREP(ACIOPHY_LANE_MODE_TX0, mode_cfg->lane_mode[0]));
+	core_mask32(atcphy, ACIOPHY_LANE_MODE, ACIOPHY_LANE_MODE_RX1,
+		    FIELD_PREP(ACIOPHY_LANE_MODE_RX1, mode_cfg->lane_mode[1]));
+	core_mask32(atcphy, ACIOPHY_LANE_MODE, ACIOPHY_LANE_MODE_TX1,
+		    FIELD_PREP(ACIOPHY_LANE_MODE_TX1, mode_cfg->lane_mode[1]));
+	core_mask32(atcphy, ACIOPHY_CROSSBAR, ACIOPHY_CROSSBAR_PROTOCOL,
+		    FIELD_PREP(ACIOPHY_CROSSBAR_PROTOCOL, mode_cfg->crossbar));
+
+	if (mode_cfg->set_swap)
+		core_set32(atcphy, ATCPHY_MISC, ATCPHY_MISC_LANE_SWAP);
+	else
+		core_clear32(atcphy, ATCPHY_MISC, ATCPHY_MISC_LANE_SWAP);
+
+	if (mode_cfg->crossbar_dp_both_pma)
+		core_set32(atcphy, ACIOPHY_CROSSBAR,
+			   ACIOPHY_CROSSBAR_DP_BOTH_PMA);
+	else
+		core_clear32(atcphy, ACIOPHY_CROSSBAR,
+			     ACIOPHY_CROSSBAR_DP_BOTH_PMA);
+
+	core_mask32(atcphy, ACIOPHY_CROSSBAR, ACIOPHY_CROSSBAR_DP_SINGLE_PMA,
+		    FIELD_PREP(ACIOPHY_CROSSBAR_DP_SINGLE_PMA,
+			       mode_cfg->crossbar_dp_single_pma));
+}
+
+static int atcphy_pipehandler_lock(struct apple_atcphy *atcphy)
+{
+	int ret;
+	u32 reg;
+
+	if (readl_relaxed(atcphy->regs.pipehandler + PIPEHANDLER_LOCK_REQ) &
+	    PIPEHANDLER_LOCK_EN)
+		dev_warn(atcphy->dev, "pipehandler already locked\n");
+
+	set32(atcphy->regs.pipehandler + PIPEHANDLER_LOCK_REQ,
+	      PIPEHANDLER_LOCK_EN);
+
+	ret = readl_poll_timeout(atcphy->regs.pipehandler +
+					 PIPEHANDLER_LOCK_ACK,
+				 reg, reg & PIPEHANDLER_LOCK_EN, 1000, 1000000);
+	if (ret) {
+		clear32(atcphy->regs.pipehandler + PIPEHANDLER_LOCK_REQ, 1);
+		dev_err(atcphy->dev,
+			"pipehandler lock not acked, this type-c port is probably dead until the next reboot.\n");
+	}
+
+	return ret;
+}
+
+static int atcphy_pipehandler_unlock(struct apple_atcphy *atcphy)
+{
+	int ret;
+	u32 reg;
+
+	clear32(atcphy->regs.pipehandler + PIPEHANDLER_LOCK_REQ,
+		PIPEHANDLER_LOCK_EN);
+	ret = readl_poll_timeout(
+		atcphy->regs.pipehandler + PIPEHANDLER_LOCK_ACK, reg,
+		!(reg & PIPEHANDLER_LOCK_EN), 1000, 1000000);
+	if (ret)
+		dev_err(atcphy->dev,
+			"pipehandler lock release not acked, this type-c port is probably dead until the next reboot.\n");
+
+	return ret;
+}
+
+static int atcphy_configure_pipehandler(struct apple_atcphy *atcphy,
+					enum atcphy_pipehandler_state state)
+{
+	int ret;
+	u32 reg;
+
+	if (atcphy->pipehandler_state == state)
+		return 0;
+
+	clear32(atcphy->regs.pipehandler + PIPEHANDLER_OVERRIDE_VALUES,
+		14); // TODO: why 14?
+	set32(atcphy->regs.pipehandler + PIPEHANDLER_OVERRIDE,
+	      PIPEHANDLER_OVERRIDE_RXVALID | PIPEHANDLER_OVERRIDE_RXDETECT);
+
+	ret = atcphy_pipehandler_lock(atcphy);
+	if (ret)
+		return ret;
+
+	switch (state) {
+	case ATCPHY_PIPEHANDLER_STATE_USB3:
+		core_set32(atcphy, ACIOPHY_TOP_BIST_PHY_CFG0,
+			   ACIOPHY_TOP_BIST_PHY_CFG0_LN0_RESET_N);
+		core_set32(atcphy, ACIOPHY_TOP_BIST_OV_CFG,
+			   ACIOPHY_TOP_BIST_OV_CFG_LN0_RESET_N_OV);
+		ret = readl_poll_timeout(
+			atcphy->regs.core + ACIOPHY_TOP_PHY_STAT, reg,
+			!(reg & ACIOPHY_TOP_PHY_STAT_LN0_UNK23), 100, 100000);
+		if (ret)
+			dev_warn(
+				atcphy->dev,
+				"timed out waiting for ACIOPHY_TOP_PHY_STAT_LN0_UNK23\n");
+
+			// TODO: macOS does this but this breaks waiting for
+			//       ACIOPHY_TOP_PHY_STAT_LN0_UNK0 then for some reason :/
+			//       this is probably status reset which clears the ln0
+			//       ready status but then the ready status never comes
+			//       up again
+#if 0
+		core_set32(atcphy, ACIOPHY_TOP_BIST_READ_CTRL,
+			   ACIOPHY_TOP_BIST_READ_CTRL_LN0_PHY_STATUS_RE);
+		core_clear32(atcphy, ACIOPHY_TOP_BIST_READ_CTRL,
+			     ACIOPHY_TOP_BIST_READ_CTRL_LN0_PHY_STATUS_RE);
+#endif
+		core_mask32(atcphy, ACIOPHY_TOP_BIST_PHY_CFG1,
+			    ACIOPHY_TOP_BIST_PHY_CFG1_LN0_PWR_DOWN,
+			    FIELD_PREP(ACIOPHY_TOP_BIST_PHY_CFG1_LN0_PWR_DOWN,
+				       3));
+		core_set32(atcphy, ACIOPHY_TOP_BIST_OV_CFG,
+			   ACIOPHY_TOP_BIST_OV_CFG_LN0_PWR_DOWN_OV);
+		core_set32(atcphy, ACIOPHY_TOP_BIST_CIOPHY_CFG1,
+			   ACIOPHY_TOP_BIST_CIOPHY_CFG1_CLK_EN);
+		core_set32(atcphy, ACIOPHY_TOP_BIST_CIOPHY_CFG1,
+			   ACIOPHY_TOP_BIST_CIOPHY_CFG1_BIST_EN);
+		writel(0, atcphy->regs.core + ACIOPHY_TOP_BIST_CIOPHY_CFG1);
+
+		ret = readl_poll_timeout(
+			atcphy->regs.core + ACIOPHY_TOP_PHY_STAT, reg,
+			(reg & ACIOPHY_TOP_PHY_STAT_LN0_UNK0), 100, 100000);
+		if (ret)
+			dev_warn(
+				atcphy->dev,
+				"timed out waiting for ACIOPHY_TOP_PHY_STAT_LN0_UNK0\n");
+
+		ret = readl_poll_timeout(
+			atcphy->regs.core + ACIOPHY_TOP_PHY_STAT, reg,
+			!(reg & ACIOPHY_TOP_PHY_STAT_LN0_UNK23), 100, 100000);
+		if (ret)
+			dev_warn(
+				atcphy->dev,
+				"timed out waiting for ACIOPHY_TOP_PHY_STAT_LN0_UNK23\n");
+
+		writel(0, atcphy->regs.core + ACIOPHY_TOP_BIST_OV_CFG);
+		core_set32(atcphy, ACIOPHY_TOP_BIST_CIOPHY_CFG1,
+			   ACIOPHY_TOP_BIST_CIOPHY_CFG1_CLK_EN);
+		core_set32(atcphy, ACIOPHY_TOP_BIST_CIOPHY_CFG1,
+			   ACIOPHY_TOP_BIST_CIOPHY_CFG1_BIST_EN);
+
+		/* switch dwc3's superspeed PHY to the real physical PHY */
+		clear32(atcphy->regs.pipehandler + PIPEHANDLER_MUX_CTRL,
+			PIPEHANDLER_CLK_SELECT);
+		clear32(atcphy->regs.pipehandler + PIPEHANDLER_MUX_CTRL,
+			PIPEHANDLER_MUX_MODE);
+		mask32(atcphy->regs.pipehandler + PIPEHANDLER_MUX_CTRL,
+		       PIPEHANDLER_CLK_SELECT,
+		       FIELD_PREP(PIPEHANDLER_CLK_SELECT,
+				  PIPEHANDLER_CLK_USB3PHY));
+		mask32(atcphy->regs.pipehandler + PIPEHANDLER_MUX_CTRL,
+		       PIPEHANDLER_MUX_MODE,
+		       FIELD_PREP(PIPEHANDLER_MUX_MODE,
+				  PIPEHANDLER_MUX_MODE_USB3PHY));
+
+		/* use real rx detect/valid values again */
+		clear32(atcphy->regs.pipehandler + PIPEHANDLER_OVERRIDE,
+			PIPEHANDLER_OVERRIDE_RXVALID |
+				PIPEHANDLER_OVERRIDE_RXDETECT);
+		break;
+	default:
+		dev_warn(
+			atcphy->dev,
+			"unknown mode in pipehandler_configure: %d, switching to safe state\n",
+			state);
+		fallthrough;
+	case ATCPHY_PIPEHANDLER_STATE_USB2:
+		/* switch dwc3's superspeed PHY back to the dummy (and also USB4 PHY?) */
+		clear32(atcphy->regs.pipehandler + PIPEHANDLER_MUX_CTRL,
+			PIPEHANDLER_CLK_SELECT);
+		clear32(atcphy->regs.pipehandler + PIPEHANDLER_MUX_CTRL,
+			PIPEHANDLER_MUX_MODE);
+		mask32(atcphy->regs.pipehandler + PIPEHANDLER_MUX_CTRL,
+		       PIPEHANDLER_CLK_SELECT,
+		       FIELD_PREP(PIPEHANDLER_CLK_SELECT,
+				  PIPEHANDLER_CLK_DUMMY_PHY));
+		mask32(atcphy->regs.pipehandler + PIPEHANDLER_MUX_CTRL,
+		       PIPEHANDLER_MUX_MODE,
+		       FIELD_PREP(PIPEHANDLER_MUX_MODE,
+				  PIPEHANDLER_MUX_MODE_DUMMY_PHY));
+
+		/* keep ignoring rx detect and valid values from the USB3/4 PHY? */
+		set32(atcphy->regs.pipehandler + PIPEHANDLER_OVERRIDE,
+		      PIPEHANDLER_OVERRIDE_RXVALID |
+			      PIPEHANDLER_OVERRIDE_RXDETECT);
+		break;
+	}
+
+	ret = atcphy_pipehandler_unlock(atcphy);
+	if (ret)
+		return ret;
+
+	// TODO: macos seems to always clear it for USB3 - what about USB2/4?
+	clear32(atcphy->regs.pipehandler + PIPEHANDLER_NONSELECTED_OVERRIDE,
+		PIPEHANDLER_NONSELECTED_NATIVE_RESET);
+
+	// TODO: why? without this superspeed devices sometimes come up as highspeed
+	msleep(500);
+
+	atcphy->pipehandler_state = state;
+
+	return 0;
+}
+
+static void atcphy_enable_dp_aux(struct apple_atcphy *atcphy)
+{
+	core_set32(atcphy, ACIOPHY_LANE_DP_CFG_BLK_TX_DP_CTRL0,
+		   DPTXPHY_PMA_LANE_RESET_N);
+	core_set32(atcphy, ACIOPHY_LANE_DP_CFG_BLK_TX_DP_CTRL0,
+		   DPTXPHY_PMA_LANE_RESET_N_OV);
+
+	core_mask32(atcphy, ACIOPHY_LANE_DP_CFG_BLK_TX_DP_CTRL0,
+		    DPRX_PCLK_SELECT, FIELD_PREP(DPRX_PCLK_SELECT, 1));
+	core_set32(atcphy, ACIOPHY_LANE_DP_CFG_BLK_TX_DP_CTRL0,
+		   DPRX_PCLK_ENABLE);
+
+	core_mask32(atcphy, ACIOPHY_LANE_DP_CFG_BLK_TX_DP_CTRL0,
+		    DPTX_PCLK1_SELECT, FIELD_PREP(DPTX_PCLK1_SELECT, 1));
+	core_set32(atcphy, ACIOPHY_LANE_DP_CFG_BLK_TX_DP_CTRL0,
+		   DPTX_PCLK1_ENABLE);
+
+	core_mask32(atcphy, ACIOPHY_LANE_DP_CFG_BLK_TX_DP_CTRL0,
+		    DPTX_PCLK2_SELECT, FIELD_PREP(DPTX_PCLK2_SELECT, 1));
+	core_set32(atcphy, ACIOPHY_LANE_DP_CFG_BLK_TX_DP_CTRL0,
+		   DPTX_PCLK2_ENABLE);
+
+	core_set32(atcphy, ACIOPHY_PLL_COMMON_CTRL,
+		   ACIOPHY_PLL_WAIT_FOR_CMN_READY_BEFORE_RESET_EXIT);
+
+	set32(atcphy->regs.lpdptx + LPDPTX_AUX_CONTROL, LPDPTX_AUX_CLAMP_EN);
+	set32(atcphy->regs.lpdptx + LPDPTX_AUX_CONTROL, LPDPTX_SLEEP_B_SML_IN);
+	udelay(2);
+	set32(atcphy->regs.lpdptx + LPDPTX_AUX_CONTROL, LPDPTX_SLEEP_B_BIG_IN);
+	udelay(2);
+	clear32(atcphy->regs.lpdptx + LPDPTX_AUX_CONTROL, LPDPTX_AUX_CLAMP_EN);
+	clear32(atcphy->regs.lpdptx + LPDPTX_AUX_CONTROL, LPDPTX_AUX_PWN_DOWN);
+	clear32(atcphy->regs.lpdptx + LPDPTX_AUX_CONTROL,
+		LPDPTX_TXTERM_CODEMSB);
+	mask32(atcphy->regs.lpdptx + LPDPTX_AUX_CONTROL, LPDPTX_TXTERM_CODE,
+	       FIELD_PREP(LPDPTX_TXTERM_CODE, 0x16));
+
+	set32(atcphy->regs.lpdptx + LPDPTX_AUX_CFG_BLK_AUX_LDO_CTRL, 0x1c00);
+	mask32(atcphy->regs.lpdptx + LPDPTX_AUX_SHM_CFG_BLK_AUX_CTRL_REG1,
+	       LPDPTX_CFG_PMA_PHYS_ADJ, FIELD_PREP(LPDPTX_CFG_PMA_PHYS_ADJ, 5));
+	set32(atcphy->regs.lpdptx + LPDPTX_AUX_SHM_CFG_BLK_AUX_CTRL_REG1,
+	      LPDPTX_CFG_PMA_PHYS_ADJ_OV);
+
+	clear32(atcphy->regs.lpdptx + LPDPTX_AUX_CFG_BLK_AUX_MARGIN,
+		LPDPTX_MARGIN_RCAL_RXOFFSET_EN);
+
+	clear32(atcphy->regs.lpdptx + LPDPTX_AUX_CFG_BLK_AUX_CTRL,
+		LPDPTX_BLK_AUX_CTRL_PWRDN);
+	set32(atcphy->regs.lpdptx + LPDPTX_AUX_SHM_CFG_BLK_AUX_CTRL_REG0,
+	      LPDPTX_CFG_PMA_AUX_SEL_LF_DATA);
+	mask32(atcphy->regs.lpdptx + LPDPTX_AUX_CFG_BLK_AUX_CTRL,
+	       LPDPTX_BLK_AUX_RXOFFSET, FIELD_PREP(LPDPTX_BLK_AUX_RXOFFSET, 3));
+
+	mask32(atcphy->regs.lpdptx + LPDPTX_AUX_CFG_BLK_AUX_MARGIN,
+	       LPDPTX_AUX_MARGIN_RCAL_TXSWING,
+	       FIELD_PREP(LPDPTX_AUX_MARGIN_RCAL_TXSWING, 12));
+
+	atcphy->dp_link_rate = -1;
+}
+
+static void atcphy_disable_dp_aux(struct apple_atcphy *atcphy)
+{
+	set32(atcphy->regs.lpdptx + LPDPTX_AUX_CONTROL, LPDPTX_AUX_PWN_DOWN);
+	set32(atcphy->regs.lpdptx + LPDPTX_AUX_CFG_BLK_AUX_CTRL,
+	      LPDPTX_BLK_AUX_CTRL_PWRDN);
+	set32(atcphy->regs.lpdptx + LPDPTX_AUX_CONTROL, LPDPTX_AUX_CLAMP_EN);
+	clear32(atcphy->regs.lpdptx + LPDPTX_AUX_CONTROL,
+		LPDPTX_SLEEP_B_SML_IN);
+	udelay(2);
+	clear32(atcphy->regs.lpdptx + LPDPTX_AUX_CONTROL,
+		LPDPTX_SLEEP_B_BIG_IN);
+	udelay(2);
+
+	// TODO: maybe?
+	core_clear32(atcphy, ACIOPHY_LANE_DP_CFG_BLK_TX_DP_CTRL0,
+		     DPTXPHY_PMA_LANE_RESET_N);
+	// _OV?
+	core_clear32(atcphy, ACIOPHY_LANE_DP_CFG_BLK_TX_DP_CTRL0,
+		     DPRX_PCLK_ENABLE);
+	core_clear32(atcphy, ACIOPHY_LANE_DP_CFG_BLK_TX_DP_CTRL0,
+		     DPTX_PCLK1_ENABLE);
+	core_clear32(atcphy, ACIOPHY_LANE_DP_CFG_BLK_TX_DP_CTRL0,
+		     DPTX_PCLK2_ENABLE);
+
+	// clear 0x1000000 / BIT(24) maybe
+	// writel(0x1830630, atcphy->regs.core + 0x1028);
+}
+
+static int
+atcphy_dp_configure_lane(struct apple_atcphy *atcphy, unsigned int lane,
+			 const struct atcphy_dp_link_rate_configuration *cfg)
+{
+	void __iomem *tx_shm, *rx_shm, *rx_top;
+
+	switch (lane) {
+	case 0:
+		tx_shm = atcphy->regs.core + LN0_AUSPMA_TX_SHM;
+		rx_shm = atcphy->regs.core + LN0_AUSPMA_RX_SHM;
+		rx_top = atcphy->regs.core + LN0_AUSPMA_RX_TOP;
+		break;
+	case 1:
+		tx_shm = atcphy->regs.core + LN1_AUSPMA_TX_SHM;
+		rx_shm = atcphy->regs.core + LN1_AUSPMA_RX_SHM;
+		rx_top = atcphy->regs.core + LN1_AUSPMA_RX_TOP;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	set32(tx_shm + LN_AUSPMA_TX_SHM_TXA_LDOCLK, LN_LDOCLK_EN_SML);
+	set32(tx_shm + LN_AUSPMA_TX_SHM_TXA_LDOCLK, LN_LDOCLK_EN_SML_OV);
+	udelay(2);
+
+	set32(tx_shm + LN_AUSPMA_TX_SHM_TXA_LDOCLK, LN_LDOCLK_EN_BIG);
+	set32(tx_shm + LN_AUSPMA_TX_SHM_TXA_LDOCLK, LN_LDOCLK_EN_BIG_OV);
+	udelay(2);
+
+	if (cfg->bypass_txa_ldoclk) {
+		set32(tx_shm + LN_AUSPMA_TX_SHM_TXA_LDOCLK,
+		      LN_LDOCLK_BYPASS_SML);
+		set32(tx_shm + LN_AUSPMA_TX_SHM_TXA_LDOCLK,
+		      LN_LDOCLK_BYPASS_SML_OV);
+		udelay(2);
+
+		set32(tx_shm + LN_AUSPMA_TX_SHM_TXA_LDOCLK,
+		      LN_LDOCLK_BYPASS_BIG);
+		set32(tx_shm + LN_AUSPMA_TX_SHM_TXA_LDOCLK,
+		      LN_LDOCLK_BYPASS_BIG_OV);
+		udelay(2);
+	} else {
+		clear32(tx_shm + LN_AUSPMA_TX_SHM_TXA_LDOCLK,
+			LN_LDOCLK_BYPASS_SML);
+		clear32(tx_shm + LN_AUSPMA_TX_SHM_TXA_LDOCLK,
+			LN_LDOCLK_BYPASS_SML_OV);
+		udelay(2);
+
+		clear32(tx_shm + LN_AUSPMA_TX_SHM_TXA_LDOCLK,
+			LN_LDOCLK_BYPASS_BIG);
+		clear32(tx_shm + LN_AUSPMA_TX_SHM_TXA_LDOCLK,
+			LN_LDOCLK_BYPASS_BIG_OV);
+		udelay(2);
+	}
+
+	set32(tx_shm + LN_AUSPMA_TX_SHM_TXA_CFG_MAIN_REG0,
+	      LN_BYTECLK_RESET_SYNC_SEL_OV);
+	set32(tx_shm + LN_AUSPMA_TX_SHM_TXA_CFG_MAIN_REG0,
+	      LN_BYTECLK_RESET_SYNC_EN);
+	set32(tx_shm + LN_AUSPMA_TX_SHM_TXA_CFG_MAIN_REG0,
+	      LN_BYTECLK_RESET_SYNC_EN_OV);
+	clear32(tx_shm + LN_AUSPMA_TX_SHM_TXA_CFG_MAIN_REG0,
+		LN_BYTECLK_RESET_SYNC_CLR);
+	set32(tx_shm + LN_AUSPMA_TX_SHM_TXA_CFG_MAIN_REG0,
+	      LN_BYTECLK_RESET_SYNC_CLR_OV);
+
+	if (cfg->txa_div2_en)
+		set32(tx_shm + LN_AUSPMA_TX_SHM_TXA_CFG_MAIN_REG1,
+		      LN_TXA_DIV2_EN);
+	else
+		clear32(tx_shm + LN_AUSPMA_TX_SHM_TXA_CFG_MAIN_REG1,
+			LN_TXA_DIV2_EN);
+	set32(tx_shm + LN_AUSPMA_TX_SHM_TXA_CFG_MAIN_REG1, LN_TXA_DIV2_EN_OV);
+	set32(tx_shm + LN_AUSPMA_TX_SHM_TXA_CFG_MAIN_REG1, LN_TXA_CLK_EN);
+	set32(tx_shm + LN_AUSPMA_TX_SHM_TXA_CFG_MAIN_REG1, LN_TXA_CLK_EN_OV);
+	clear32(tx_shm + LN_AUSPMA_TX_SHM_TXA_CFG_MAIN_REG1, LN_TXA_DIV2_RESET);
+	set32(tx_shm + LN_AUSPMA_TX_SHM_TXA_CFG_MAIN_REG1,
+	      LN_TXA_DIV2_RESET_OV);
+
+	mask32(tx_shm + LN_AUSPMA_TX_SHM_TXA_IMP_REG0, LN_TXA_CAL_CTRL_BASE,
+	       FIELD_PREP(LN_TXA_CAL_CTRL_BASE, 0xf));
+	set32(tx_shm + LN_AUSPMA_TX_SHM_TXA_IMP_REG0, LN_TXA_CAL_CTRL_BASE_OV);
+	mask32(tx_shm + LN_AUSPMA_TX_SHM_TXA_IMP_REG0, LN_TXA_CAL_CTRL,
+	       FIELD_PREP(LN_TXA_CAL_CTRL, 0x3f)); // TODO: 3f?
+	set32(tx_shm + LN_AUSPMA_TX_SHM_TXA_IMP_REG0, LN_TXA_CAL_CTRL_OV);
+
+	clear32(tx_shm + LN_AUSPMA_TX_SHM_TXA_IMP_REG2, LN_TXA_MARGIN);
+	set32(tx_shm + LN_AUSPMA_TX_SHM_TXA_IMP_REG2, LN_TXA_MARGIN_OV);
+	clear32(tx_shm + LN_AUSPMA_TX_SHM_TXA_IMP_REG2, LN_TXA_MARGIN_2R);
+	set32(tx_shm + LN_AUSPMA_TX_SHM_TXA_IMP_REG2, LN_TXA_MARGIN_2R_OV);
+
+	clear32(tx_shm + LN_AUSPMA_TX_SHM_TXA_IMP_REG3, LN_TXA_MARGIN_POST);
+	set32(tx_shm + LN_AUSPMA_TX_SHM_TXA_IMP_REG3, LN_TXA_MARGIN_POST_OV);
+	clear32(tx_shm + LN_AUSPMA_TX_SHM_TXA_IMP_REG3, LN_TXA_MARGIN_POST_2R);
+	set32(tx_shm + LN_AUSPMA_TX_SHM_TXA_IMP_REG3, LN_TXA_MARGIN_POST_2R_OV);
+	clear32(tx_shm + LN_AUSPMA_TX_SHM_TXA_IMP_REG3, LN_TXA_MARGIN_POST_4R);
+	set32(tx_shm + LN_AUSPMA_TX_SHM_TXA_IMP_REG3, LN_TXA_MARGIN_POST_4R_OV);
+	clear32(tx_shm + LN_AUSPMA_TX_SHM_TXA_IMP_REG3, LN_TXA_MARGIN_PRE);
+	set32(tx_shm + LN_AUSPMA_TX_SHM_TXA_IMP_REG3, LN_TXA_MARGIN_PRE_OV);
+	clear32(tx_shm + LN_AUSPMA_TX_SHM_TXA_IMP_REG3, LN_TXA_MARGIN_PRE_2R);
+	set32(tx_shm + LN_AUSPMA_TX_SHM_TXA_IMP_REG3, LN_TXA_MARGIN_PRE_2R_OV);
+	clear32(tx_shm + LN_AUSPMA_TX_SHM_TXA_IMP_REG3, LN_TXA_MARGIN_PRE_4R);
+	set32(tx_shm + LN_AUSPMA_TX_SHM_TXA_IMP_REG3, LN_TXA_MARGIN_PRE_4R_OV);
+
+	clear32(tx_shm + LN_AUSPMA_TX_SHM_TXA_IMP_REG0, LN_TXA_HIZ);
+	set32(tx_shm + LN_AUSPMA_TX_SHM_TXA_IMP_REG0, LN_TXA_HIZ_OV);
+
+	return 0;
+}
+
+static int
+atcphy_dp_configure_lane2(struct apple_atcphy *atcphy, unsigned int lane,
+			 const struct atcphy_dp_link_rate_configuration *cfg)
+{
+	void __iomem *tx_shm, *rx_shm, *rx_top;
+
+	switch (lane) {
+	case 0:
+		tx_shm = atcphy->regs.core + LN0_AUSPMA_TX_SHM;
+		rx_shm = atcphy->regs.core + LN0_AUSPMA_RX_SHM;
+		rx_top = atcphy->regs.core + LN0_AUSPMA_RX_TOP;
+		break;
+	case 1:
+		tx_shm = atcphy->regs.core + LN1_AUSPMA_TX_SHM;
+		rx_shm = atcphy->regs.core + LN1_AUSPMA_RX_SHM;
+		rx_top = atcphy->regs.core + LN1_AUSPMA_RX_TOP;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	clear32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_AFE_CTRL1,
+		LN_RX_DIV20_RESET_N);
+	set32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_AFE_CTRL1,
+	      LN_RX_DIV20_RESET_N_OV);
+	udelay(2);
+
+	set32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_AFE_CTRL1, LN_RX_DIV20_RESET_N);
+
+	set32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_DFE_CTRL12,
+	      LN_TX_BYTECLK_RESET_SYNC_EN);
+	set32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_DFE_CTRL12,
+	      LN_TX_BYTECLK_RESET_SYNC_EN_OV);
+
+	mask32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_SAVOS_CTRL16, LN_TX_CAL_CODE,
+	       FIELD_PREP(LN_TX_CAL_CODE, 6)); // TODO 6?
+	set32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_SAVOS_CTRL16, LN_TX_CAL_CODE_OV);
+
+	mask32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_TERM_CTRL19,
+	       LN_TX_CLK_DLY_CTRL_TAPGEN,
+	       FIELD_PREP(LN_TX_CLK_DLY_CTRL_TAPGEN, 3));
+
+	clear32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_DFE_CTRL10, LN_DTVREG_ADJUST);
+	set32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_DFE_CTRL13, LN_DTVREG_ADJUST_OV);
+
+	clear32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_SAVOS_CTRL16, LN_RXTERM_EN);
+	set32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_SAVOS_CTRL16, LN_RXTERM_EN_OV);
+
+	clear32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_TERM_CTRL19, LN_TX_TEST_EN);
+	set32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_TERM_CTRL19, LN_TX_TEST_EN_OV);
+
+	set32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_VREF_CTRL22,
+	      LN_VREF_TEST_RXLPBKDT_EN);
+	set32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_VREF_CTRL22,
+	      LN_VREF_TEST_RXLPBKDT_EN_OV);
+	mask32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_VREF_CTRL22,
+	       LN_VREF_LPBKIN_DATA, FIELD_PREP(LN_VREF_LPBKIN_DATA, 3));
+	mask32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_VREF_CTRL22, LN_VREF_BIAS_SEL,
+	       FIELD_PREP(LN_VREF_BIAS_SEL, 2));
+	set32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_VREF_CTRL22,
+	      LN_VREF_BIAS_SEL_OV);
+	mask32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_VREF_CTRL22,
+	       LN_VREF_ADJUST_GRAY, FIELD_PREP(LN_VREF_ADJUST_GRAY, 0x18));
+	set32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_VREF_CTRL22,
+	      LN_VREF_ADJUST_GRAY_OV);
+	set32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_VREF_CTRL22, LN_VREF_EN);
+	set32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_VREF_CTRL22, LN_VREF_EN_OV);
+	set32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_VREF_CTRL22, LN_VREF_BOOST_EN);
+	set32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_VREF_CTRL22,
+	      LN_VREF_BOOST_EN_OV);
+	udelay(2);
+
+	clear32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_VREF_CTRL22, LN_VREF_BOOST_EN);
+	set32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_VREF_CTRL22,
+	      LN_VREF_BOOST_EN_OV);
+	udelay(2);
+
+	clear32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_DFE_CTRL13, LN_TX_PRE_EN);
+	set32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_DFE_CTRL13, LN_TX_PRE_EN_OV);
+	clear32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_DFE_CTRL13, LN_TX_PST1_EN);
+	set32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_DFE_CTRL13, LN_TX_PST1_EN_OV);
+
+	clear32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_DFE_CTRL12, LN_TX_PBIAS_EN);
+	set32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_DFE_CTRL12, LN_TX_PBIAS_EN_OV);
+
+	clear32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_SAVOS_CTRL16,
+		LN_RXTERM_PULLUP_LEAK_EN);
+	set32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_SAVOS_CTRL16,
+	      LN_RXTERM_PULLUP_LEAK_EN_OV);
+
+	set32(rx_top + LN_AUSPMA_RX_TOP_TJ_CFG_RX_TXMODE, LN_RX_TXMODE);
+
+	if (cfg->txa_div2_en)
+		set32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_TERM_CTRL19,
+		      LN_TX_CLK_DIV2_EN);
+	else
+		clear32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_TERM_CTRL19,
+			LN_TX_CLK_DIV2_EN);
+	set32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_TERM_CTRL19,
+	      LN_TX_CLK_DIV2_EN_OV);
+
+	clear32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_TERM_CTRL19,
+		LN_TX_CLK_DIV2_RST);
+	set32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_TERM_CTRL19,
+	      LN_TX_CLK_DIV2_RST_OV);
+
+	clear32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_DFE_CTRL12, LN_TX_HRCLK_SEL);
+	set32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_DFE_CTRL12, LN_TX_HRCLK_SEL_OV);
+
+	clear32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_TX_CTRL17, LN_TX_MARGIN);
+	set32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_TX_CTRL17, LN_TX_MARGIN_OV);
+	clear32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_TX_CTRL17, LN_TX_MARGIN_LSB);
+	set32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_TX_CTRL17, LN_TX_MARGIN_LSB_OV);
+	clear32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_TX_CTRL17, LN_TX_MARGIN_P1);
+	set32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_TX_CTRL17, LN_TX_MARGIN_P1_OV);
+	clear32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_TX_CTRL17,
+		LN_TX_MARGIN_P1_LSB);
+	set32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_TX_CTRL17,
+	      LN_TX_MARGIN_P1_LSB_OV);
+
+	clear32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_TX_CTRL18, LN_TX_P1_CODE);
+	set32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_TX_CTRL18, LN_TX_P1_CODE_OV);
+	clear32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_TX_CTRL18, LN_TX_P1_LSB_CODE);
+	set32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_TX_CTRL18, LN_TX_P1_LSB_CODE_OV);
+	clear32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_TX_CTRL18, LN_TX_MARGIN_PRE);
+	set32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_TX_CTRL18, LN_TX_MARGIN_PRE_OV);
+	clear32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_TX_CTRL18,
+		LN_TX_MARGIN_PRE_LSB);
+	set32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_TX_CTRL18,
+	      LN_TX_MARGIN_PRE_LSB_OV);
+	clear32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_TX_CTRL18, LN_TX_PRE_LSB_CODE);
+	set32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_TX_CTRL18,
+	      LN_TX_PRE_LSB_CODE_OV);
+	clear32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_TX_CTRL18, LN_TX_PRE_CODE);
+	set32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_TX_CTRL18, LN_TX_PRE_CODE_OV);
+
+	set32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_DFE_CTRL11, LN_DTVREG_SML_EN);
+	set32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_DFE_CTRL11, LN_DTVREG_SML_EN_OV);
+	udelay(2);
+
+	set32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_DFE_CTRL11, LN_DTVREG_BIG_EN);
+	set32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_DFE_CTRL11, LN_DTVREG_BIG_EN_OV);
+	udelay(2);
+
+	mask32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_DFE_CTRL10, LN_DTVREG_ADJUST,
+	       FIELD_PREP(LN_DTVREG_ADJUST, 0xa));
+	set32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_DFE_CTRL13, LN_DTVREG_ADJUST_OV);
+	udelay(2);
+
+	set32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_TERM_CTRL19, LN_TX_EN);
+	set32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_TERM_CTRL19, LN_TX_EN_OV);
+	udelay(2);
+
+	set32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_CTLE_CTRL0, LN_TX_CLK_EN);
+	set32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_CTLE_CTRL0, LN_TX_CLK_EN_OV);
+
+	clear32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_DFE_CTRL12,
+		LN_TX_BYTECLK_RESET_SYNC_CLR);
+	set32(rx_shm + LN_AUSPMA_RX_SHM_TJ_RXA_DFE_CTRL12,
+	      LN_TX_BYTECLK_RESET_SYNC_CLR_OV);
+
+	return 0;
+}
+
+static int atcphy_auspll_apb_command(struct apple_atcphy *atcphy, u32 command)
+{
+	int ret;
+	u32 reg;
+
+	reg = readl(atcphy->regs.core + AUSPLL_APB_CMD_OVERRIDE);
+	reg &= ~AUSPLL_APB_CMD_OVERRIDE_CMD;
+	reg |= FIELD_PREP(AUSPLL_APB_CMD_OVERRIDE_CMD, command);
+	reg |= AUSPLL_APB_CMD_OVERRIDE_REQ;
+	reg |= AUSPLL_APB_CMD_OVERRIDE_UNK28;
+	writel(reg, atcphy->regs.core + AUSPLL_APB_CMD_OVERRIDE);
+
+	ret = readl_poll_timeout(atcphy->regs.core + AUSPLL_APB_CMD_OVERRIDE,
+				 reg, (reg & AUSPLL_APB_CMD_OVERRIDE_ACK), 100,
+				 100000);
+	if (ret) {
+		dev_err(atcphy->dev, "AUSPLL APB command was not acked.\n");
+		return ret;
+	}
+
+	core_clear32(atcphy, AUSPLL_APB_CMD_OVERRIDE,
+		     AUSPLL_APB_CMD_OVERRIDE_REQ);
+
+	return 0;
+}
+
+static int atcphy_dp_configure(struct apple_atcphy *atcphy,
+			       enum atcphy_dp_link_rate lr)
+{
+	const struct atcphy_dp_link_rate_configuration *cfg = &dp_lr_config[lr];
+	const struct atcphy_mode_configuration *mode_cfg;
+	int ret;
+	u32 reg;
+
+	trace_atcphy_dp_configure(atcphy, lr);
+
+	if (atcphy->dp_link_rate == lr)
+		return 0;
+
+	if (atcphy->swap_lanes)
+		mode_cfg = &atcphy_modes[atcphy->mode].swapped;
+	else
+		mode_cfg = &atcphy_modes[atcphy->mode].normal;
+
+	core_clear32(atcphy, AUSPLL_FREQ_CFG, AUSPLL_FREQ_REFCLK);
+
+	core_mask32(atcphy, AUSPLL_FREQ_DESC_A, AUSPLL_FD_FREQ_COUNT_TARGET,
+		    FIELD_PREP(AUSPLL_FD_FREQ_COUNT_TARGET,
+			       cfg->freqinit_count_target));
+	core_clear32(atcphy, AUSPLL_FREQ_DESC_A, AUSPLL_FD_FBDIVN_HALF);
+	core_clear32(atcphy, AUSPLL_FREQ_DESC_A, AUSPLL_FD_REV_DIVN);
+	core_mask32(atcphy, AUSPLL_FREQ_DESC_A, AUSPLL_FD_KI_MAN,
+		    FIELD_PREP(AUSPLL_FD_KI_MAN, 8));
+	core_mask32(atcphy, AUSPLL_FREQ_DESC_A, AUSPLL_FD_KI_EXP,
+		    FIELD_PREP(AUSPLL_FD_KI_EXP, 3));
+	core_mask32(atcphy, AUSPLL_FREQ_DESC_A, AUSPLL_FD_KP_MAN,
+		    FIELD_PREP(AUSPLL_FD_KP_MAN, 8));
+	core_mask32(atcphy, AUSPLL_FREQ_DESC_A, AUSPLL_FD_KP_EXP,
+		    FIELD_PREP(AUSPLL_FD_KP_EXP, 7));
+	core_clear32(atcphy, AUSPLL_FREQ_DESC_A, AUSPLL_FD_KPKI_SCALE_HBW);
+
+	core_mask32(atcphy, AUSPLL_FREQ_DESC_B, AUSPLL_FD_FBDIVN_FRAC_DEN,
+		    FIELD_PREP(AUSPLL_FD_FBDIVN_FRAC_DEN,
+			       cfg->fbdivn_frac_den));
+	core_mask32(atcphy, AUSPLL_FREQ_DESC_B, AUSPLL_FD_FBDIVN_FRAC_NUM,
+		    FIELD_PREP(AUSPLL_FD_FBDIVN_FRAC_NUM,
+			       cfg->fbdivn_frac_num));
+
+	core_clear32(atcphy, AUSPLL_FREQ_DESC_C, AUSPLL_FD_SDM_SSC_STEP);
+	core_clear32(atcphy, AUSPLL_FREQ_DESC_C, AUSPLL_FD_SDM_SSC_EN);
+	core_mask32(atcphy, AUSPLL_FREQ_DESC_C, AUSPLL_FD_PCLK_DIV_SEL,
+		    FIELD_PREP(AUSPLL_FD_PCLK_DIV_SEL, cfg->pclk_div_sel));
+	core_mask32(atcphy, AUSPLL_FREQ_DESC_C, AUSPLL_FD_LFSDM_DIV,
+		    FIELD_PREP(AUSPLL_FD_LFSDM_DIV, 1));
+	core_mask32(atcphy, AUSPLL_FREQ_DESC_C, AUSPLL_FD_LFCLK_CTRL,
+		    FIELD_PREP(AUSPLL_FD_LFCLK_CTRL, cfg->lfclk_ctrl));
+	core_mask32(atcphy, AUSPLL_FREQ_DESC_C, AUSPLL_FD_VCLK_OP_DIVN,
+		    FIELD_PREP(AUSPLL_FD_VCLK_OP_DIVN, cfg->vclk_op_divn));
+	core_set32(atcphy, AUSPLL_FREQ_DESC_C, AUSPLL_FD_VCLK_PRE_DIVN);
+
+	core_mask32(atcphy, AUSPLL_CLKOUT_DIV, AUSPLL_CLKOUT_PLLA_REFBUFCLK_DI,
+		    FIELD_PREP(AUSPLL_CLKOUT_PLLA_REFBUFCLK_DI, 7));
+
+	if (cfg->plla_clkout_vreg_bypass)
+		core_set32(atcphy, AUSPLL_CLKOUT_DTC_VREG,
+			   AUSPLL_DTC_VREG_BYPASS);
+	else
+		core_clear32(atcphy, AUSPLL_CLKOUT_DTC_VREG,
+			     AUSPLL_DTC_VREG_BYPASS);
+
+	core_set32(atcphy, AUSPLL_BGR, AUSPLL_BGR_CTRL_AVAIL);
+
+	core_set32(atcphy, AUSPLL_CLKOUT_MASTER,
+		   AUSPLL_CLKOUT_MASTER_PCLK_DRVR_EN);
+	core_set32(atcphy, AUSPLL_CLKOUT_MASTER,
+		   AUSPLL_CLKOUT_MASTER_PCLK2_DRVR_EN);
+	core_set32(atcphy, AUSPLL_CLKOUT_MASTER,
+		   AUSPLL_CLKOUT_MASTER_REFBUFCLK_DRVR_EN);
+
+	ret = atcphy_auspll_apb_command(atcphy, 0);
+	if (ret)
+		return ret;
+
+	ret = readl_poll_timeout(atcphy->regs.core + ACIOPHY_DP_PCLK_STAT, reg,
+				 (reg & ACIOPHY_AUSPLL_LOCK), 100, 100000);
+	if (ret) {
+		dev_err(atcphy->dev, "ACIOPHY_DP_PCLK did not lock.\n");
+		return ret;
+	}
+
+	ret = atcphy_auspll_apb_command(atcphy, 0x2800);
+	if (ret)
+		return ret;
+
+	if (mode_cfg->dp_lane[0]) {
+		ret = atcphy_dp_configure_lane(atcphy, 0, cfg);
+		if (ret)
+			return ret;
+	}
+
+	if (mode_cfg->dp_lane[1]) {
+		ret = atcphy_dp_configure_lane(atcphy, 1, cfg);
+		if (ret)
+			return ret;
+	}
+
+	if (mode_cfg->dp_lane[0]) {
+		ret = atcphy_dp_configure_lane2(atcphy, 0, cfg);
+		if (ret)
+			return ret;
+	}
+
+	if (mode_cfg->dp_lane[1]) {
+		ret = atcphy_dp_configure_lane2(atcphy, 1, cfg);
+		if (ret)
+			return ret;
+	}
+
+	core_clear32(atcphy, ACIOPHY_LANE_DP_CFG_BLK_TX_DP_CTRL0,
+		     DP_PMA_BYTECLK_RESET);
+	core_clear32(atcphy, ACIOPHY_LANE_DP_CFG_BLK_TX_DP_CTRL0,
+		     DP_MAC_DIV20_CLK_SEL);
+
+	atcphy->dp_link_rate = lr;
+	return 0;
+}
+
+static int atcphy_cio_configure(struct apple_atcphy *atcphy,
+				enum atcphy_mode mode)
+{
+	int ret;
+
+	BUG_ON(!mutex_is_locked(&atcphy->lock));
+
+	ret = atcphy_cio_power_on(atcphy);
+	if (ret)
+		return ret;
+
+	atcphy_setup_pll_fuses(atcphy);
+	atcphy_apply_tunables(atcphy, mode);
+
+	// TODO: without this sometimes device aren't recognized but no idea what it does
+	// ACIOPHY_PLL_TOP_BLK_AUSPLL_PCTL_FSM_CTRL1.APB_REQ_OV_SEL = 255
+	core_set32(atcphy, 0x1014, 255 << 13);
+	core_set32(atcphy, AUSPLL_APB_CMD_OVERRIDE,
+		   AUSPLL_APB_CMD_OVERRIDE_UNK28);
+
+	writel(0x10000cef, atcphy->regs.core + 0x8); // ACIOPHY_CFG0
+	writel(0x15570cff, atcphy->regs.core + 0x1b0); // ACIOPHY_SLEEP_CTRL
+	writel(0x11833fef, atcphy->regs.core + 0x8); // ACIOPHY_CFG0
+
+	/* enable clocks and configure lanes */
+	core_set32(atcphy, CIO3PLL_CLK_CTRL, CIO3PLL_CLK_PCLK_EN);
+	core_set32(atcphy, CIO3PLL_CLK_CTRL, CIO3PLL_CLK_REFCLK_EN);
+	atcphy_configure_lanes(atcphy, mode);
+
+	/* take the USB3 PHY out of reset */
+	core_set32(atcphy, ATCPHY_POWER_CTRL, ATCPHY_POWER_PHY_RESET_N);
+
+	/* setup AUX channel if DP altmode is requested */
+	if (atcphy_modes[mode].enable_dp_aux)
+		atcphy_enable_dp_aux(atcphy);
+
+	atcphy->mode = mode;
+	return 0;
+}
+
+static int atcphy_usb3_power_on(struct phy *phy)
+{
+	struct apple_atcphy *atcphy = phy_get_drvdata(phy);
+	enum atcphy_pipehandler_state state;
+	int ret = 0;
+
+	/*
+	 * Both usb role switch and mux set work will be running concurrently.
+	 * Make sure atcphy_mux_set_work is done bringing up ATCPHY before
+	 * trying to switch dwc3 to the correct PHY.
+	 */
+	mutex_lock(&atcphy->lock);
+	if (atcphy->mode != atcphy->target_mode) {
+		reinit_completion(&atcphy->atcphy_online_event);
+		mutex_unlock(&atcphy->lock);
+		wait_for_completion_timeout(&atcphy->atcphy_online_event,
+					msecs_to_jiffies(1000));
+		mutex_lock(&atcphy->lock);
+	}
+
+	if (atcphy->mode != atcphy->target_mode) {
+		dev_err(atcphy->dev, "ATCPHY did not come up; won't allow dwc3 to come up.\n");
+		mutex_unlock(&atcphy->lock);
+		return -EINVAL;
+	}
+
+	atcphy->dwc3_online = true;
+	state = atcphy_modes[atcphy->mode].pipehandler_state;
+	switch (state) {
+	case ATCPHY_PIPEHANDLER_STATE_USB2:
+	case ATCPHY_PIPEHANDLER_STATE_USB3:
+		ret = atcphy_configure_pipehandler(atcphy, state);
+		break;
+
+	case ATCPHY_PIPEHANDLER_STATE_INVALID:
+	default:
+		dev_warn(atcphy->dev, "Invalid state %d in usb3_set_phy\n",
+			 state);
+		ret = -EINVAL;
+	}
+
+	mutex_unlock(&atcphy->lock);
+
+	return 0;
+}
+
+static int atcphy_usb3_power_off(struct phy *phy)
+{
+	struct apple_atcphy *atcphy = phy_get_drvdata(phy);
+
+	mutex_lock(&atcphy->lock);
+
+	atcphy_configure_pipehandler(atcphy, ATCPHY_PIPEHANDLER_STATE_USB2);
+
+	atcphy->dwc3_online = false;
+	complete(&atcphy->dwc3_shutdown_event);
+
+	mutex_unlock(&atcphy->lock);
+
+	return 0;
+}
+
+static const struct phy_ops apple_atc_usb3_phy_ops = {
+	.owner = THIS_MODULE,
+	.power_on = atcphy_usb3_power_on,
+	.power_off = atcphy_usb3_power_off,
+};
+
+static int atcphy_usb2_power_on(struct phy *phy)
+{
+	struct apple_atcphy *atcphy = phy_get_drvdata(phy);
+
+	mutex_lock(&atcphy->lock);
+
+	/* take the PHY out of its low power state */
+	clear32(atcphy->regs.usb2phy + USB2PHY_CTL, USB2PHY_CTL_SIDDQ);
+	udelay(10);
+
+	/* reset the PHY for good measure */
+	clear32(atcphy->regs.usb2phy + USB2PHY_CTL, USB2PHY_CTL_APB_RESET_N);
+	set32(atcphy->regs.usb2phy + USB2PHY_CTL,
+	      USB2PHY_CTL_RESET | USB2PHY_CTL_PORT_RESET);
+	udelay(10);
+	set32(atcphy->regs.usb2phy + USB2PHY_CTL, USB2PHY_CTL_APB_RESET_N);
+	clear32(atcphy->regs.usb2phy + USB2PHY_CTL,
+		USB2PHY_CTL_RESET | USB2PHY_CTL_PORT_RESET);
+
+	set32(atcphy->regs.usb2phy + USB2PHY_SIG,
+	      USB2PHY_SIG_VBUSDET_FORCE_VAL | USB2PHY_SIG_VBUSDET_FORCE_EN |
+		      USB2PHY_SIG_VBUSVLDEXT_FORCE_VAL |
+		      USB2PHY_SIG_VBUSVLDEXT_FORCE_EN);
+
+	/* enable the dummy PHY for the SS lanes */
+	set32(atcphy->regs.pipehandler + PIPEHANDLER_NONSELECTED_OVERRIDE,
+	      PIPEHANDLER_DUMMY_PHY_EN);
+
+	mutex_unlock(&atcphy->lock);
+
+	return 0;
+}
+
+static int atcphy_usb2_power_off(struct phy *phy)
+{
+	struct apple_atcphy *atcphy = phy_get_drvdata(phy);
+
+	mutex_lock(&atcphy->lock);
+
+	/* reset the PHY before transitioning to low power mode */
+	clear32(atcphy->regs.usb2phy + USB2PHY_CTL, USB2PHY_CTL_APB_RESET_N);
+	set32(atcphy->regs.usb2phy + USB2PHY_CTL,
+	      USB2PHY_CTL_RESET | USB2PHY_CTL_PORT_RESET);
+
+	/* switch the PHY to low power mode */
+	set32(atcphy->regs.usb2phy + USB2PHY_CTL, USB2PHY_CTL_SIDDQ);
+
+	mutex_unlock(&atcphy->lock);
+
+	return 0;
+}
+
+static int atcphy_usb2_set_mode(struct phy *phy, enum phy_mode mode,
+				int submode)
+{
+	struct apple_atcphy *atcphy = phy_get_drvdata(phy);
+	int ret;
+
+	mutex_lock(&atcphy->lock);
+
+	switch (mode) {
+	case PHY_MODE_USB_HOST:
+	case PHY_MODE_USB_HOST_LS:
+	case PHY_MODE_USB_HOST_FS:
+	case PHY_MODE_USB_HOST_HS:
+	case PHY_MODE_USB_HOST_SS:
+		set32(atcphy->regs.usb2phy + USB2PHY_SIG, USB2PHY_SIG_HOST);
+		set32(atcphy->regs.usb2phy + USB2PHY_USBCTL,
+		      USB2PHY_USBCTL_HOST_EN);
+		ret = 0;
+		break;
+
+	case PHY_MODE_USB_DEVICE:
+	case PHY_MODE_USB_DEVICE_LS:
+	case PHY_MODE_USB_DEVICE_FS:
+	case PHY_MODE_USB_DEVICE_HS:
+	case PHY_MODE_USB_DEVICE_SS:
+		clear32(atcphy->regs.usb2phy + USB2PHY_SIG, USB2PHY_SIG_HOST);
+		clear32(atcphy->regs.usb2phy + USB2PHY_USBCTL,
+			USB2PHY_USBCTL_HOST_EN);
+		ret = 0;
+		break;
+
+	default:
+		dev_err(atcphy->dev, "Unknown mode for usb2 phy: %d\n", mode);
+		ret = -EINVAL;
+	}
+
+	mutex_unlock(&atcphy->lock);
+	return ret;
+}
+
+static const struct phy_ops apple_atc_usb2_phy_ops = {
+	.owner = THIS_MODULE,
+	.set_mode = atcphy_usb2_set_mode,
+	/*
+	 * This PHY is always matched with a dwc3 controller. Currently,
+	 * first dwc3 initializes the PHY and then soft-resets itself and
+	 * then finally powers on the PHY. This should be reasonable.
+	 * Annoyingly, the dwc3 soft reset is never completed when the USB2 PHY
+	 * is powered off so we have to pretend that these two are actually
+	 * init/exit here to ensure the PHY is powered on and out of reset
+	 * early enough.
+	 */
+	.init = atcphy_usb2_power_on,
+	.exit = atcphy_usb2_power_off,
+};
+
+static int atcphy_dpphy_mux_set(struct apple_atcphy *atcphy, enum atcphy_mode target)
+{
+	int ret = 0;
+
+	// TODO:
+	flush_work(&atcphy->mux_set_work);
+
+	mutex_lock(&atcphy->lock);
+
+	if (atcphy->mode == target)
+		goto out_unlock;
+
+	atcphy->target_mode = target;
+
+	WARN_ON(!schedule_work(&atcphy->mux_set_work));
+	ret = wait_for_completion_timeout(&atcphy->atcphy_online_event,
+					  msecs_to_jiffies(1000));
+	if (ret == 0)
+		ret = -ETIMEDOUT;
+	else if (ret > 0)
+		ret = 0;
+
+out_unlock:
+	mutex_unlock(&atcphy->lock);
+	return ret;
+}
+
+static int atcphy_dpphy_set_mode(struct phy *phy, enum phy_mode mode,
+				 int submode)
+{
+	struct apple_atcphy *atcphy = phy_get_drvdata(phy);
+
+	if (!atcphy->dp_only)
+		return 0;
+
+	dev_info(atcphy->dev, "%s(mode=%u, submode=%d)\n", __func__, mode, submode);
+
+	switch (mode) {
+	case PHY_MODE_INVALID:
+		if (atcphy->mode == APPLE_ATCPHY_MODE_OFF)
+			return 0;
+		return atcphy_dpphy_mux_set(atcphy, APPLE_ATCPHY_MODE_OFF);
+	case PHY_MODE_DP:
+		if (atcphy->mode == APPLE_ATCPHY_MODE_DP)
+			return 0;
+		return atcphy_dpphy_mux_set(atcphy, APPLE_ATCPHY_MODE_DP);
+	default:
+		break;
+	}
+
+	return -EINVAL;
+}
+
+static int atcphy_dpphy_validate(struct phy *phy, enum phy_mode mode,
+				 int submode, union phy_configure_opts *opts_)
+{
+	struct phy_configure_opts_dp *opts = &opts_->dp;
+	struct apple_atcphy *atcphy = phy_get_drvdata(phy);
+
+	if (mode == PHY_MODE_INVALID) {
+		memset(opts, 0, sizeof(*opts));
+		return 0;
+	}
+
+	if (mode != PHY_MODE_DP)
+		return -EINVAL;
+	if (submode != 0)
+		return -EINVAL;
+
+	switch (atcphy->mode) {
+	case APPLE_ATCPHY_MODE_USB3_DP:
+		opts->lanes = 2;
+		break;
+	case APPLE_ATCPHY_MODE_DP:
+		opts->lanes = 4;
+		break;
+	default:
+		opts->lanes = 0;
+	}
+
+	opts->link_rate = 8100;
+
+	for (int i = 0; i < 4; ++i) {
+		opts->voltage[i] = 3;
+		opts->pre[i] = 3;
+	}
+
+	return 0;
+}
+
+static int atcphy_dpphy_configure(struct phy *phy,
+				  union phy_configure_opts *opts_)
+{
+	struct phy_configure_opts_dp *opts = &opts_->dp;
+	struct apple_atcphy *atcphy = phy_get_drvdata(phy);
+	enum atcphy_dp_link_rate link_rate;
+	int ret = 0;
+
+	/* might be possibly but we don't know how */
+	if (opts->set_voltages)
+		return -EINVAL;
+
+	/*
+	 * Just ack set_lanes for compatibility with (lp)dptx-phy
+	 * The mux_set should've done this anyway
+	 */
+	if (opts->set_lanes) {
+		if (((atcphy->mode == APPLE_ATCPHY_MODE_DP && opts->lanes != 4) ||
+		     (atcphy->mode == APPLE_ATCPHY_MODE_USB3_DP && opts->lanes != 2)) &&
+	            (atcphy->mode == APPLE_ATCPHY_MODE_OFF && opts->lanes != 0))
+			dev_warn(atcphy->dev, "Unexpected lane count %u for mode %u\n",
+				 opts->lanes, atcphy->mode);
+
+	}
+
+	if (opts->set_rate) {
+		switch (opts->link_rate) {
+		case 1620:
+			link_rate = ATCPHY_DP_LINK_RATE_RBR;
+			break;
+		case 2700:
+			link_rate = ATCPHY_DP_LINK_RATE_HBR;
+			break;
+		case 5400:
+			link_rate = ATCPHY_DP_LINK_RATE_HBR2;
+			break;
+		case 8100:
+			link_rate = ATCPHY_DP_LINK_RATE_HBR3;
+			break;
+		case 0:
+			// TODO: disable!
+			return 0;
+			break;
+		default:
+			dev_err(atcphy->dev, "Unsupported link rate: %d\n",
+				opts->link_rate);
+			return -EINVAL;
+		}
+
+		mutex_lock(&atcphy->lock);
+		ret = atcphy_dp_configure(atcphy, link_rate);
+		mutex_unlock(&atcphy->lock);
+	}
+
+	return ret;
+}
+
+static const struct phy_ops apple_atc_dp_phy_ops = {
+	.owner = THIS_MODULE,
+	.configure = atcphy_dpphy_configure,
+	.validate = atcphy_dpphy_validate,
+	.set_mode = atcphy_dpphy_set_mode,
+};
+
+static struct phy *atcphy_xlate(struct device *dev,
+				const struct of_phandle_args *args)
+{
+	struct apple_atcphy *atcphy = dev_get_drvdata(dev);
+
+	switch (args->args[0]) {
+	case PHY_TYPE_USB2:
+		return atcphy->phy_usb2;
+	case PHY_TYPE_USB3:
+		return atcphy->phy_usb3;
+	case PHY_TYPE_DP:
+		return atcphy->phy_dp;
+	}
+	return ERR_PTR(-ENODEV);
+}
+
+static int atcphy_probe_phy(struct apple_atcphy *atcphy)
+{
+	atcphy->phy_usb2 =
+		devm_phy_create(atcphy->dev, NULL, &apple_atc_usb2_phy_ops);
+	if (IS_ERR(atcphy->phy_usb2))
+		return PTR_ERR(atcphy->phy_usb2);
+	phy_set_drvdata(atcphy->phy_usb2, atcphy);
+
+	atcphy->phy_usb3 =
+		devm_phy_create(atcphy->dev, NULL, &apple_atc_usb3_phy_ops);
+	if (IS_ERR(atcphy->phy_usb3))
+		return PTR_ERR(atcphy->phy_usb3);
+	phy_set_drvdata(atcphy->phy_usb3, atcphy);
+
+	atcphy->phy_dp =
+		devm_phy_create(atcphy->dev, NULL, &apple_atc_dp_phy_ops);
+	if (IS_ERR(atcphy->phy_dp))
+		return PTR_ERR(atcphy->phy_dp);
+	phy_set_drvdata(atcphy->phy_dp, atcphy);
+
+	atcphy->phy_provider =
+		devm_of_phy_provider_register(atcphy->dev, atcphy_xlate);
+	if (IS_ERR(atcphy->phy_provider))
+		return PTR_ERR(atcphy->phy_provider);
+
+	return 0;
+}
+
+static int atcphy_dwc3_reset_assert(struct reset_controller_dev *rcdev,
+				    unsigned long id)
+{
+	struct apple_atcphy *atcphy = rcdev_to_apple_atcphy(rcdev);
+
+	clear32(atcphy->regs.pipehandler + PIPEHANDLER_AON_GEN,
+		PIPEHANDLER_AON_GEN_DWC3_RESET_N);
+	set32(atcphy->regs.pipehandler + PIPEHANDLER_AON_GEN,
+	      PIPEHANDLER_AON_GEN_DWC3_FORCE_CLAMP_EN);
+
+	return 0;
+}
+
+static int atcphy_dwc3_reset_deassert(struct reset_controller_dev *rcdev,
+				      unsigned long id)
+{
+	struct apple_atcphy *atcphy = rcdev_to_apple_atcphy(rcdev);
+
+	clear32(atcphy->regs.pipehandler + PIPEHANDLER_AON_GEN,
+		PIPEHANDLER_AON_GEN_DWC3_FORCE_CLAMP_EN);
+	set32(atcphy->regs.pipehandler + PIPEHANDLER_AON_GEN,
+	      PIPEHANDLER_AON_GEN_DWC3_RESET_N);
+
+	return 0;
+}
+
+const struct reset_control_ops atcphy_dwc3_reset_ops = {
+	.assert = atcphy_dwc3_reset_assert,
+	.deassert = atcphy_dwc3_reset_deassert,
+};
+
+static int atcphy_reset_xlate(struct reset_controller_dev *rcdev,
+			      const struct of_phandle_args *reset_spec)
+{
+	return 0;
+}
+
+static int atcphy_probe_rcdev(struct apple_atcphy *atcphy)
+{
+	atcphy->rcdev.owner = THIS_MODULE;
+	atcphy->rcdev.nr_resets = 1;
+	atcphy->rcdev.ops = &atcphy_dwc3_reset_ops;
+	atcphy->rcdev.of_node = atcphy->dev->of_node;
+	atcphy->rcdev.of_reset_n_cells = 0;
+	atcphy->rcdev.of_xlate = atcphy_reset_xlate;
+
+	return devm_reset_controller_register(atcphy->dev, &atcphy->rcdev);
+}
+
+static int atcphy_sw_set(struct typec_switch_dev *sw,
+			 enum typec_orientation orientation)
+{
+	struct apple_atcphy *atcphy = typec_switch_get_drvdata(sw);
+
+	trace_atcphy_sw_set(orientation);
+
+	mutex_lock(&atcphy->lock);
+	switch (orientation) {
+	case TYPEC_ORIENTATION_NONE:
+		break;
+	case TYPEC_ORIENTATION_NORMAL:
+		atcphy->swap_lanes = false;
+		break;
+	case TYPEC_ORIENTATION_REVERSE:
+		atcphy->swap_lanes = true;
+		break;
+	}
+	mutex_unlock(&atcphy->lock);
+
+	return 0;
+}
+
+static int atcphy_probe_switch(struct apple_atcphy *atcphy)
+{
+	struct typec_switch_desc sw_desc = {
+		.drvdata = atcphy,
+		.fwnode = atcphy->dev->fwnode,
+		.set = atcphy_sw_set,
+	};
+
+	return PTR_ERR_OR_ZERO(typec_switch_register(atcphy->dev, &sw_desc));
+}
+
+static void atcphy_mux_set_work(struct work_struct *work)
+{
+	struct apple_atcphy *atcphy = container_of(work, struct apple_atcphy, mux_set_work);
+
+	mutex_lock(&atcphy->lock);
+	/*
+	 * If we're transitiong to TYPEC_STATE_SAFE dwc3 will have gotten
+	 * a usb-role-switch event to ROLE_NONE which is deferred to a work
+	 * queue. dwc3 will try to switch the pipehandler mux to USB2 and
+	 * we have to make sure that has happened before we disable ATCPHY.
+	 * If we instead disable ATCPHY first dwc3 will get stuck and the
+	 * port won't work anymore until a full SoC reset.
+	 * We're guaranteed that no other role switch event will be generated
+	 * before we return because the mux_set callback runs in the same
+	 * thread that generates these. We can thus unlock the mutex, wait
+	 * for dwc3_shutdown_event from the usb3 phy's power_off callback after
+	 * it has taken the mutex and the lock again.
+	 */
+	if (atcphy->dwc3_online && atcphy->target_mode == APPLE_ATCPHY_MODE_OFF) {
+		reinit_completion(&atcphy->dwc3_shutdown_event);
+		mutex_unlock(&atcphy->lock);
+		wait_for_completion_timeout(&atcphy->dwc3_shutdown_event,
+					    msecs_to_jiffies(1000));
+		mutex_lock(&atcphy->lock);
+		WARN_ON(atcphy->dwc3_online);
+	}
+
+	switch (atcphy->target_mode) {
+	case APPLE_ATCPHY_MODE_DP:
+	case APPLE_ATCPHY_MODE_USB3_DP:
+	case APPLE_ATCPHY_MODE_USB3:
+	case APPLE_ATCPHY_MODE_USB4:
+		atcphy_cio_configure(atcphy, atcphy->target_mode);
+		break;
+	default:
+		dev_warn(atcphy->dev, "Unknown mode %d in atcphy_mux_set\n",
+			 atcphy->target_mode);
+		fallthrough;
+	case APPLE_ATCPHY_MODE_USB2:
+	case APPLE_ATCPHY_MODE_OFF:
+		atcphy->mode = APPLE_ATCPHY_MODE_OFF;
+		atcphy_disable_dp_aux(atcphy);
+		atcphy_cio_power_off(atcphy);
+	}
+
+	complete(&atcphy->atcphy_online_event);
+	mutex_unlock(&atcphy->lock);
+}
+
+static int atcphy_mux_set(struct typec_mux_dev *mux,
+			  struct typec_mux_state *state)
+{
+	struct apple_atcphy *atcphy = typec_mux_get_drvdata(mux);
+
+	// TODO: 
+	flush_work(&atcphy->mux_set_work);
+
+	mutex_lock(&atcphy->lock);
+	trace_atcphy_mux_set(state);
+
+	if (state->mode == TYPEC_STATE_SAFE) {
+		atcphy->target_mode = APPLE_ATCPHY_MODE_OFF;
+	} else if (state->mode == TYPEC_STATE_USB) {
+		atcphy->target_mode = APPLE_ATCPHY_MODE_USB3;
+	} else if (state->alt && state->alt->svid == USB_TYPEC_DP_SID) {
+		switch (state->mode) {
+		case TYPEC_DP_STATE_C:
+		case TYPEC_DP_STATE_E:
+			atcphy->target_mode = APPLE_ATCPHY_MODE_DP;
+			break;
+		case TYPEC_DP_STATE_D:
+			atcphy->target_mode = APPLE_ATCPHY_MODE_USB3_DP;
+			break;
+		default:
+			dev_err(atcphy->dev,
+				"Unsupported DP pin assignment: 0x%lx.\n",
+				state->mode);
+			atcphy->target_mode = APPLE_ATCPHY_MODE_OFF;
+		}
+	} else if (state->alt && state->alt->svid == USB_TYPEC_TBT_SID) {
+		dev_err(atcphy->dev, "USB4/TBT mode is not supported yet.\n");
+		atcphy->target_mode = APPLE_ATCPHY_MODE_OFF;
+	} else if (state->alt) {
+		dev_err(atcphy->dev, "Unknown alternate mode SVID: 0x%x\n",
+			state->alt->svid);
+		atcphy->target_mode = APPLE_ATCPHY_MODE_OFF;
+	} else {
+		dev_err(atcphy->dev, "Unknown mode: 0x%lx\n", state->mode);
+		atcphy->target_mode = APPLE_ATCPHY_MODE_OFF;
+	}
+
+	if (atcphy->mode != atcphy->target_mode)
+		WARN_ON(!schedule_work(&atcphy->mux_set_work));
+
+	mutex_unlock(&atcphy->lock);
+
+	return 0;
+}
+
+static int atcphy_probe_mux(struct apple_atcphy *atcphy)
+{
+	struct typec_mux_desc mux_desc = {
+		.drvdata = atcphy,
+		.fwnode = atcphy->dev->fwnode,
+		.set = atcphy_mux_set,
+	};
+
+	return PTR_ERR_OR_ZERO(typec_mux_register(atcphy->dev, &mux_desc));
+}
+
+static int atcphy_parse_legacy_tunable(struct apple_atcphy *atcphy,
+				       struct atcphy_tunable *tunable,
+				       const char *name)
+{
+	struct property *prop;
+	const __le32 *p = NULL;
+	int i;
+
+#if 0
+	WARN_TAINT_ONCE(1, TAINT_FIRMWARE_WORKAROUND,
+			"parsing legacy tunable; please update m1n1");
+#endif
+
+	prop = of_find_property(atcphy->np, name, NULL);
+	if (!prop) {
+		dev_err(atcphy->dev, "tunable %s not found\n", name);
+		return -ENOENT;
+	}
+
+	if (prop->length % (3 * sizeof(u32)))
+		return -EINVAL;
+
+	tunable->sz = prop->length / (3 * sizeof(u32));
+	tunable->values = devm_kcalloc(atcphy->dev, tunable->sz,
+				       sizeof(*tunable->values), GFP_KERNEL);
+	if (!tunable->values)
+		return -ENOMEM;
+
+	for (i = 0; i < tunable->sz; ++i) {
+		p = of_prop_next_u32(prop, p, &tunable->values[i].offset);
+		p = of_prop_next_u32(prop, p, &tunable->values[i].mask);
+		p = of_prop_next_u32(prop, p, &tunable->values[i].value);
+	}
+
+	trace_atcphy_parsed_tunable(name, tunable);
+
+	return 0;
+}
+
+static int atcphy_parse_new_tunable(struct apple_atcphy *atcphy,
+				    struct atcphy_tunable *tunable,
+				    const char *name)
+{
+	struct property *prop;
+	u64 *fdt_tunable;
+	int ret, i;
+
+	prop = of_find_property(atcphy->np, name, NULL);
+	if (!prop) {
+		dev_err(atcphy->dev, "tunable %s not found\n", name);
+		return -ENOENT;
+	}
+
+	if (prop->length % (4 * sizeof(u64)))
+		return -EINVAL;
+
+	fdt_tunable = kzalloc(prop->length, GFP_KERNEL);
+	if (!fdt_tunable)
+		return -ENOMEM;
+
+	tunable->sz = prop->length / (4 * sizeof(u64));
+	ret = of_property_read_variable_u64_array(atcphy->np, name, fdt_tunable,
+						  tunable->sz, tunable->sz);
+	if (ret < 0)
+		goto err_free_fdt;
+
+	tunable->values = devm_kcalloc(atcphy->dev, tunable->sz,
+				       sizeof(*tunable->values), GFP_KERNEL);
+	if (!tunable->values) {
+		ret = -ENOMEM;
+		goto err_free_fdt;
+	}
+
+	for (i = 0; i < tunable->sz; ++i) {
+		u32 offset, size, mask, value;
+
+		offset = fdt_tunable[4 * i];
+		size = fdt_tunable[4 * i + 1];
+		mask = fdt_tunable[4 * i + 2];
+		value = fdt_tunable[4 * i + 3];
+
+		if (offset > U32_MAX || size != 4 || mask > U32_MAX ||
+		    value > U32_MAX) {
+			ret = -EINVAL;
+			goto err_free_values;
+		}
+
+		tunable->values[i].offset = offset;
+		tunable->values[i].mask = mask;
+		tunable->values[i].value = value;
+	}
+
+	trace_atcphy_parsed_tunable(name, tunable);
+	kfree(fdt_tunable);
+
+	BUG_ON(1);
+	return 0;
+
+err_free_values:
+	devm_kfree(atcphy->dev, tunable->values);
+err_free_fdt:
+	kfree(fdt_tunable);
+	return ret;
+}
+
+static int atcphy_parse_tunable(struct apple_atcphy *atcphy,
+				struct atcphy_tunable *tunable,
+				const char *name)
+{
+	int ret;
+
+	if (!of_find_property(atcphy->np, name, NULL)) {
+		dev_err(atcphy->dev, "tunable %s not found\n", name);
+		return -ENOENT;
+	}
+
+	ret = atcphy_parse_new_tunable(atcphy, tunable, name);
+	if (ret)
+		ret = atcphy_parse_legacy_tunable(atcphy, tunable, name);
+
+	return ret;
+}
+
+static int atcphy_load_tunables(struct apple_atcphy *atcphy)
+{
+	int ret;
+
+	ret = atcphy_parse_tunable(atcphy, &atcphy->tunables.axi2af,
+				   "apple,tunable-axi2af");
+	if (ret)
+		return ret;
+	ret = atcphy_parse_tunable(atcphy, &atcphy->tunables.common,
+				   "apple,tunable-common");
+	if (ret)
+		return ret;
+	ret = atcphy_parse_tunable(atcphy, &atcphy->tunables.lane_usb3[0],
+				   "apple,tunable-lane0-usb");
+	if (ret)
+		return ret;
+	ret = atcphy_parse_tunable(atcphy, &atcphy->tunables.lane_usb3[1],
+				   "apple,tunable-lane1-usb");
+	if (ret)
+		return ret;
+	ret = atcphy_parse_tunable(atcphy, &atcphy->tunables.lane_usb4[0],
+				   "apple,tunable-lane0-cio");
+	if (ret)
+		return ret;
+	ret = atcphy_parse_tunable(atcphy, &atcphy->tunables.lane_usb4[1],
+				   "apple,tunable-lane1-cio");
+	if (ret)
+		return ret;
+	ret = atcphy_parse_tunable(atcphy,
+				   &atcphy->tunables.lane_displayport[0],
+				   "apple,tunable-lane0-dp");
+	if (ret)
+		return ret;
+	ret = atcphy_parse_tunable(atcphy,
+				   &atcphy->tunables.lane_displayport[1],
+				   "apple,tunable-lane1-dp");
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int atcphy_load_fuses(struct apple_atcphy *atcphy)
+{
+	int ret;
+
+	ret = nvmem_cell_read_variable_le_u32(
+		atcphy->dev, "aus_cmn_shm_vreg_trim",
+		&atcphy->fuses.aus_cmn_shm_vreg_trim);
+	if (ret)
+		return ret;
+	ret = nvmem_cell_read_variable_le_u32(
+		atcphy->dev, "auspll_rodco_encap",
+		&atcphy->fuses.auspll_rodco_encap);
+	if (ret)
+		return ret;
+	ret = nvmem_cell_read_variable_le_u32(
+		atcphy->dev, "auspll_rodco_bias_adjust",
+		&atcphy->fuses.auspll_rodco_bias_adjust);
+	if (ret)
+		return ret;
+	ret = nvmem_cell_read_variable_le_u32(
+		atcphy->dev, "auspll_fracn_dll_start_capcode",
+		&atcphy->fuses.auspll_fracn_dll_start_capcode);
+	if (ret)
+		return ret;
+	ret = nvmem_cell_read_variable_le_u32(
+		atcphy->dev, "auspll_dtc_vreg_adjust",
+		&atcphy->fuses.auspll_dtc_vreg_adjust);
+	if (ret)
+		return ret;
+	ret = nvmem_cell_read_variable_le_u32(
+		atcphy->dev, "cio3pll_dco_coarsebin0",
+		&atcphy->fuses.cio3pll_dco_coarsebin[0]);
+	if (ret)
+		return ret;
+	ret = nvmem_cell_read_variable_le_u32(
+		atcphy->dev, "cio3pll_dco_coarsebin1",
+		&atcphy->fuses.cio3pll_dco_coarsebin[1]);
+	if (ret)
+		return ret;
+	ret = nvmem_cell_read_variable_le_u32(
+		atcphy->dev, "cio3pll_dll_start_capcode",
+		&atcphy->fuses.cio3pll_dll_start_capcode[0]);
+	if (ret)
+		return ret;
+	ret = nvmem_cell_read_variable_le_u32(
+		atcphy->dev, "cio3pll_dtc_vreg_adjust",
+		&atcphy->fuses.cio3pll_dtc_vreg_adjust);
+	if (ret)
+		return ret;
+
+	/* 
+	 * Only one of the two t8103 PHYs requires the following additional fuse
+	 * and a slighly different configuration sequence if it's present.
+	 * The other t8103 instance and all t6000 instances don't which means
+	 * we must not fail here in case the fuse isn't present.
+	 */
+	ret = nvmem_cell_read_variable_le_u32(
+		atcphy->dev, "cio3pll_dll_start_capcode_workaround",
+		&atcphy->fuses.cio3pll_dll_start_capcode[1]);
+	switch (ret) {
+	case 0:
+		atcphy->quirks.t8103_cio3pll_workaround = true;
+		break;
+	case -ENOENT:
+		atcphy->quirks.t8103_cio3pll_workaround = false;
+		break;
+	default:
+		return ret;
+	}
+
+	atcphy->fuses.present = true;
+
+	trace_atcphy_fuses(atcphy);
+	return 0;
+}
+
+static int atcphy_probe(struct platform_device *pdev)
+{
+	struct apple_atcphy *atcphy;
+	struct device *dev = &pdev->dev;
+	int ret;
+
+	atcphy = devm_kzalloc(&pdev->dev, sizeof(*atcphy), GFP_KERNEL);
+	if (!atcphy)
+		return -ENOMEM;
+
+	atcphy->dev = dev;
+	atcphy->np = dev->of_node;
+	platform_set_drvdata(pdev, atcphy);
+
+	mutex_init(&atcphy->lock);
+	init_completion(&atcphy->dwc3_shutdown_event);
+	init_completion(&atcphy->atcphy_online_event);
+	INIT_WORK(&atcphy->mux_set_work, atcphy_mux_set_work);
+
+	atcphy->regs.core = devm_platform_ioremap_resource_byname(pdev, "core");
+	if (IS_ERR(atcphy->regs.core))
+		return PTR_ERR(atcphy->regs.core);
+	atcphy->regs.lpdptx =
+		devm_platform_ioremap_resource_byname(pdev, "lpdptx");
+	if (IS_ERR(atcphy->regs.lpdptx))
+		return PTR_ERR(atcphy->regs.lpdptx);
+	atcphy->regs.axi2af =
+		devm_platform_ioremap_resource_byname(pdev, "axi2af");
+	if (IS_ERR(atcphy->regs.axi2af))
+		return PTR_ERR(atcphy->regs.axi2af);
+	atcphy->regs.usb2phy =
+		devm_platform_ioremap_resource_byname(pdev, "usb2phy");
+	if (IS_ERR(atcphy->regs.usb2phy))
+		return PTR_ERR(atcphy->regs.usb2phy);
+	atcphy->regs.pipehandler =
+		devm_platform_ioremap_resource_byname(pdev, "pipehandler");
+	if (IS_ERR(atcphy->regs.pipehandler))
+		return PTR_ERR(atcphy->regs.pipehandler);
+
+	if (of_property_present(dev->of_node, "nvmem-cells")) {
+		ret = atcphy_load_fuses(atcphy);
+		if (ret)
+			return ret;
+	}
+
+	ret = atcphy_load_tunables(atcphy);
+	if (ret)
+		return ret;
+
+	atcphy->dp_only = of_property_read_bool(dev->of_node, "apple,mode-fixed-dp");
+
+	atcphy->mode = APPLE_ATCPHY_MODE_OFF;
+	atcphy->pipehandler_state = ATCPHY_PIPEHANDLER_STATE_INVALID;
+
+	if (!atcphy->dp_only) {
+		ret = atcphy_probe_rcdev(atcphy);
+		if (ret)
+			return ret;
+		ret = atcphy_probe_mux(atcphy);
+		if (ret)
+			return ret;
+		ret = atcphy_probe_switch(atcphy);
+		if (ret)
+			return ret;
+	}
+
+	ret = atcphy_probe_phy(atcphy);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static const struct of_device_id atcphy_match[] = {
+	{
+		.compatible = "apple,t8103-atcphy",
+	},
+	{
+		.compatible = "apple,t6000-atcphy",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, atcphy_match);
+
+static struct platform_driver atcphy_driver = {
+	.driver = {
+		.name = "phy-apple-atc",
+		.of_match_table = atcphy_match,
+	},
+	.probe = atcphy_probe,
+};
+
+module_platform_driver(atcphy_driver);
+
+MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
+MODULE_DESCRIPTION("Apple Type-C PHY driver");
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/apple/atc.h b/drivers/phy/apple/atc.h
new file mode 100644
index 00000000000000..922f68c0100782
--- /dev/null
+++ b/drivers/phy/apple/atc.h
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * Apple Type-C PHY driver
+ *
+ * Copyright (C) The Asahi Linux Contributors
+ * Author: Sven Peter <sven@svenpeter.dev>
+ */
+
+#ifndef APPLE_PHY_ATC_H
+#define APPLE_PHY_ATC_H 1
+
+#include <linux/mutex.h>
+#include <linux/phy/phy.h>
+#include <linux/usb/typec_mux.h>
+#include <linux/reset-controller.h>
+#include <linux/types.h>
+#include <linux/usb/typec.h>
+#include <linux/usb/typec_altmode.h>
+#include <linux/usb/typec_dp.h>
+#include <linux/usb/typec_tbt.h>
+#include <linux/workqueue.h>
+
+enum atcphy_dp_link_rate {
+	ATCPHY_DP_LINK_RATE_RBR,
+	ATCPHY_DP_LINK_RATE_HBR,
+	ATCPHY_DP_LINK_RATE_HBR2,
+	ATCPHY_DP_LINK_RATE_HBR3,
+};
+
+enum atcphy_pipehandler_state {
+	ATCPHY_PIPEHANDLER_STATE_INVALID,
+	ATCPHY_PIPEHANDLER_STATE_USB2,
+	ATCPHY_PIPEHANDLER_STATE_USB3,
+};
+
+enum atcphy_mode {
+	APPLE_ATCPHY_MODE_OFF,
+	APPLE_ATCPHY_MODE_USB2,
+	APPLE_ATCPHY_MODE_USB3,
+	APPLE_ATCPHY_MODE_USB3_DP,
+	APPLE_ATCPHY_MODE_USB4,
+	APPLE_ATCPHY_MODE_DP,
+};
+
+struct atcphy_dp_link_rate_configuration {
+	u16 freqinit_count_target;
+	u16 fbdivn_frac_den;
+	u16 fbdivn_frac_num;
+	u16 pclk_div_sel;
+	u8 lfclk_ctrl;
+	u8 vclk_op_divn;
+	bool plla_clkout_vreg_bypass;
+	bool bypass_txa_ldoclk;
+	bool txa_div2_en;
+};
+
+struct atcphy_mode_configuration {
+	u32 crossbar;
+	u32 crossbar_dp_single_pma;
+	bool crossbar_dp_both_pma;
+	u32 lane_mode[2];
+	bool dp_lane[2];
+	bool set_swap;
+};
+
+struct atcphy_tunable {
+	size_t sz;
+	struct {
+		u32 offset;
+		u32 mask;
+		u32 value;
+	} * values;
+};
+
+struct apple_atcphy {
+	struct device_node *np;
+	struct device *dev;
+
+	struct {
+		unsigned int t8103_cio3pll_workaround : 1;
+	} quirks;
+
+	/* calibration fuse values */
+	struct {
+		bool present;
+		u32 aus_cmn_shm_vreg_trim;
+		u32 auspll_rodco_encap;
+		u32 auspll_rodco_bias_adjust;
+		u32 auspll_fracn_dll_start_capcode;
+		u32 auspll_dtc_vreg_adjust;
+		u32 cio3pll_dco_coarsebin[2];
+		u32 cio3pll_dll_start_capcode[2];
+		u32 cio3pll_dtc_vreg_adjust;
+	} fuses;
+
+	/* tunables provided by firmware through the device tree */
+	struct {
+		struct atcphy_tunable axi2af;
+		struct atcphy_tunable common;
+		struct atcphy_tunable lane_usb3[2];
+		struct atcphy_tunable lane_displayport[2];
+		struct atcphy_tunable lane_usb4[2];
+	} tunables;
+
+	bool usb3_power_on;
+	bool swap_lanes;
+
+	enum atcphy_mode mode;
+	int dp_link_rate;
+
+	struct {
+		void __iomem *core;
+		void __iomem *axi2af;
+		void __iomem *usb2phy;
+		void __iomem *pipehandler;
+		void __iomem *lpdptx;
+	} regs;
+
+	struct phy *phy_usb2;
+	struct phy *phy_usb3;
+	struct phy *phy_dp;
+	struct phy_provider *phy_provider;
+	struct reset_controller_dev rcdev;
+	struct typec_switch *sw;
+	struct typec_mux *mux;
+
+	bool dwc3_online;
+	struct completion dwc3_shutdown_event;
+	struct completion atcphy_online_event;
+
+	enum atcphy_pipehandler_state pipehandler_state;
+
+	struct mutex lock;
+
+	struct work_struct mux_set_work;
+	enum atcphy_mode target_mode;
+	bool dp_only;
+};
+
+#endif
diff --git a/drivers/phy/apple/dptx.c b/drivers/phy/apple/dptx.c
new file mode 100644
index 00000000000000..f0df2d40a18023
--- /dev/null
+++ b/drivers/phy/apple/dptx.c
@@ -0,0 +1,690 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * Apple dptx PHY driver
+ *
+ * Copyright (C) The Asahi Linux Contributors
+ * Author: Janne Grunau <j@jannau.net>
+ *
+ * based on drivers/phy/apple/atc.c
+ *
+ * Copyright (C) The Asahi Linux Contributors
+ * Author: Sven Peter <sven@svenpeter.dev>
+ */
+
+#include "dptx.h"
+
+#include <asm/io.h>
+#include "linux/of.h"
+#include <dt-bindings/phy/phy.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/phy/phy.h>
+#include <linux/phy/phy-dp.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#define DPTX_MAX_LANES    4
+#define DPTX_LANE0_OFFSET 0x5000
+#define DPTX_LANE_STRIDE  0x1000
+#define DPTX_LANE_END     (DPTX_LANE0_OFFSET + DPTX_MAX_LANES * DPTX_LANE_STRIDE)
+
+enum apple_dptx_type {
+    DPTX_PHY_T8112,
+    DPTX_PHY_T6020,
+};
+
+struct apple_dptx_phy_hw {
+	enum apple_dptx_type type;
+};
+
+struct apple_dptx_phy {
+	struct device *dev;
+
+	struct apple_dptx_phy_hw hw;
+
+	int dp_link_rate;
+
+	struct {
+		void __iomem *core;
+		void __iomem *dptx;
+	} regs;
+
+	struct phy *phy_dp;
+	struct phy_provider *phy_provider;
+
+	struct mutex lock;
+
+	// TODO: m1n1 port things to clean up
+	u32 active_lanes;
+};
+
+
+static inline void mask32(void __iomem *reg, u32 mask, u32 set)
+{
+	u32 value = readl(reg);
+	value &= ~mask;
+	value |= set;
+	writel(value, reg);
+}
+
+static inline void set32(void __iomem *reg, u32 set)
+{
+	mask32(reg, 0, set);
+}
+
+static inline void clear32(void __iomem *reg, u32 clear)
+{
+	mask32(reg, clear, 0);
+}
+
+
+static int dptx_phy_set_active_lane_count(struct apple_dptx_phy *phy, u32 num_lanes)
+{
+	u32 l, ctrl;
+
+	dev_dbg(phy->dev, "set_active_lane_count(%u)\n", num_lanes);
+
+	if (num_lanes == 3 || num_lanes > DPTX_MAX_LANES)
+		return -1;
+
+	ctrl = readl(phy->regs.dptx + 0x4000);
+	writel(ctrl, phy->regs.dptx + 0x4000);
+
+	for (l = 0; l < num_lanes; l++) {
+		u64 offset = 0x5000 + 0x1000 * l;
+		readl(phy->regs.dptx + offset);
+		writel(0x100, phy->regs.dptx + offset);
+    }
+    for (; l < DPTX_MAX_LANES; l++) {
+        u64 offset = 0x5000 + 0x1000 * l;
+	readl(phy->regs.dptx + offset);
+	writel(0x300, phy->regs.dptx + offset);
+    }
+    for (l = 0; l < num_lanes; l++) {
+        u64 offset = 0x5000 + 0x1000 * l;
+	readl(phy->regs.dptx + offset);
+	writel(0x0, phy->regs.dptx + offset);
+    }
+    for (; l < DPTX_MAX_LANES; l++) {
+        u64 offset = 0x5000 + 0x1000 * l;
+	readl(phy->regs.dptx + offset);
+	writel(0x300, phy->regs.dptx + offset);
+    }
+
+    if (num_lanes > 0) {
+	// clear32(phy->regs.dptx + 0x4000, 0x4000000);
+	ctrl = readl(phy->regs.dptx + 0x4000);
+	ctrl &= ~0x4000000;
+	writel(ctrl, phy->regs.dptx + 0x4000);
+    }
+    phy->active_lanes = num_lanes;
+
+    return 0;
+}
+
+static int dptx_phy_activate(struct apple_dptx_phy *phy, u32 dcp_index)
+{
+	u32 val_2014;
+	u32 val_4008;
+	u32 val_4408;
+
+	dev_dbg(phy->dev, "activate(dcp:%u)\n", dcp_index);
+
+	// MMIO: R.4   0x23c500010 (dptx-phy[1], offset 0x10) = 0x0
+	// MMIO: W.4   0x23c500010 (dptx-phy[1], offset 0x10) = 0x0
+	readl(phy->regs.core + 0x10);
+	writel(dcp_index, phy->regs.core + 0x10);
+
+	// MMIO: R.4   0x23c500048 (dptx-phy[1], offset 0x48) = 0x444
+	// MMIO: W.4   0x23c500048 (dptx-phy[1], offset 0x48) = 0x454
+	set32(phy->regs.core + 0x48, 0x010);
+	// MMIO: R.4   0x23c500048 (dptx-phy[1], offset 0x48) = 0x454
+	// MMIO: W.4   0x23c500048 (dptx-phy[1], offset 0x48) = 0x474
+	set32(phy->regs.core + 0x48, 0x020);
+	// MMIO: R.4   0x23c500048 (dptx-phy[1], offset 0x48) = 0x474
+	// MMIO: W.4   0x23c500048 (dptx-phy[1], offset 0x48) = 0x434
+	clear32(phy->regs.core + 0x48, 0x040);
+	// MMIO: R.4   0x23c500048 (dptx-phy[1], offset 0x48) = 0x434
+	// MMIO: W.4   0x23c500048 (dptx-phy[1], offset 0x48) = 0x534
+	set32(phy->regs.core + 0x48, 0x100);
+	// MMIO: R.4   0x23c500048 (dptx-phy[1], offset 0x48) = 0x534
+	// MMIO: W.4   0x23c500048 (dptx-phy[1], offset 0x48) = 0x734
+	set32(phy->regs.core + 0x48, 0x200);
+	// MMIO: R.4   0x23c500048 (dptx-phy[1], offset 0x48) = 0x734
+	// MMIO: W.4   0x23c500048 (dptx-phy[1], offset 0x48) = 0x334
+	clear32(phy->regs.core + 0x48, 0x400);
+	// MMIO: R.4   0x23c500048 (dptx-phy[1], offset 0x48) = 0x334
+	// MMIO: W.4   0x23c500048 (dptx-phy[1], offset 0x48) = 0x335
+	set32(phy->regs.core + 0x48, 0x001);
+	// MMIO: R.4   0x23c500048 (dptx-phy[1], offset 0x48) = 0x335
+	// MMIO: W.4   0x23c500048 (dptx-phy[1], offset 0x48) = 0x337
+	set32(phy->regs.core + 0x48, 0x002);
+	// MMIO: R.4   0x23c500048 (dptx-phy[1], offset 0x48) = 0x337
+	// MMIO: W.4   0x23c500048 (dptx-phy[1], offset 0x48) = 0x333
+	clear32(phy->regs.core + 0x48, 0x004);
+
+	// MMIO: R.4   0x23c542014 (dptx-phy[0], offset 0x2014) = 0x80a0c
+	val_2014 = readl(phy->regs.dptx + 0x2014);
+	// MMIO: W.4   0x23c542014 (dptx-phy[0], offset 0x2014) = 0x300a0c
+	writel((0x30 << 16) | (val_2014 & 0xffff), phy->regs.dptx + 0x2014);
+
+	// MMIO: R.4   0x23c5420b8 (dptx-phy[0], offset 0x20b8) = 0x644800
+	// MMIO: W.4   0x23c5420b8 (dptx-phy[0], offset 0x20b8) = 0x654800
+	set32(phy->regs.dptx + 0x20b8, 0x010000);
+
+	// MMIO: R.4   0x23c542220 (dptx-phy[0], offset 0x2220) = 0x11090a2
+	// MMIO: W.4   0x23c542220 (dptx-phy[0], offset 0x2220) = 0x11090a0
+	clear32(phy->regs.dptx + 0x2220, 0x0000002);
+
+	// MMIO: R.4   0x23c54222c (dptx-phy[0], offset 0x222c) = 0x103003
+	// MMIO: W.4   0x23c54222c (dptx-phy[0], offset 0x222c) = 0x103803
+	set32(phy->regs.dptx + 0x222c, 0x000800);
+	// MMIO: R.4   0x23c54222c (dptx-phy[0], offset 0x222c) = 0x103803
+	// MMIO: W.4   0x23c54222c (dptx-phy[0], offset 0x222c) = 0x103903
+	set32(phy->regs.dptx + 0x222c, 0x000100);
+
+	// MMIO: R.4   0x23c542230 (dptx-phy[0], offset 0x2230) = 0x2308804
+	// MMIO: W.4   0x23c542230 (dptx-phy[0], offset 0x2230) = 0x2208804
+	clear32(phy->regs.dptx + 0x2230, 0x0100000);
+
+	// MMIO: R.4   0x23c542278 (dptx-phy[0], offset 0x2278) = 0x18300811
+	// MMIO: W.4   0x23c542278 (dptx-phy[0], offset 0x2278) = 0x10300811
+	clear32(phy->regs.dptx + 0x2278, 0x08000000);
+
+	// MMIO: R.4   0x23c5422a4 (dptx-phy[0], offset 0x22a4) = 0x1044200
+	// MMIO: W.4   0x23c5422a4 (dptx-phy[0], offset 0x22a4) = 0x1044201
+	set32(phy->regs.dptx + 0x22a4, 0x0000001);
+
+	// MMIO: R.4   0x23c544008 (dptx-phy[0], offset 0x4008) = 0x18030
+	val_4008 = readl(phy->regs.dptx + 0x4008);
+	// MMIO: W.4   0x23c544008 (dptx-phy[0], offset 0x4008) = 0x30030
+	writel((0x6 << 15) | (val_4008 & 0x7fff), phy->regs.dptx + 0x4008);
+	// MMIO: R.4   0x23c544008 (dptx-phy[0], offset 0x4008) = 0x30030
+	// MMIO: W.4   0x23c544008 (dptx-phy[0], offset 0x4008) = 0x30010
+	clear32(phy->regs.dptx + 0x4008, 0x00020);
+
+	// MMIO: R.4   0x23c54420c (dptx-phy[0], offset 0x420c) = 0x88e3
+	// MMIO: W.4   0x23c54420c (dptx-phy[0], offset 0x420c) = 0x88c3
+	clear32(phy->regs.dptx + 0x420c, 0x0020);
+
+	// MMIO: R.4   0x23c544600 (dptx-phy[0], offset 0x4600) = 0x0
+	// MMIO: W.4   0x23c544600 (dptx-phy[0], offset 0x4600) = 0x8000000
+	set32(phy->regs.dptx + 0x4600, 0x8000000);
+
+	// MMIO: R.4   0x23c545040 (dptx-phy[0], offset 0x5040) = 0x21780
+	// MMIO: W.4   0x23c545040 (dptx-phy[0], offset 0x5040) = 0x221780
+	// MMIO: R.4   0x23c546040 (dptx-phy[0], offset 0x6040) = 0x21780
+	// MMIO: W.4   0x23c546040 (dptx-phy[0], offset 0x6040) = 0x221780
+	// MMIO: R.4   0x23c547040 (dptx-phy[0], offset 0x7040) = 0x21780
+	// MMIO: W.4   0x23c547040 (dptx-phy[0], offset 0x7040) = 0x221780
+	// MMIO: R.4   0x23c548040 (dptx-phy[0], offset 0x8040) = 0x21780
+	// MMIO: W.4   0x23c548040 (dptx-phy[0], offset 0x8040) = 0x221780
+	for (u32 loff = DPTX_LANE0_OFFSET; loff < DPTX_LANE_END;
+	     loff += DPTX_LANE_STRIDE)
+		set32(phy->regs.dptx + loff + 0x40, 0x200000);
+
+	// MMIO: R.4   0x23c545040 (dptx-phy[0], offset 0x5040) = 0x221780
+	// MMIO: W.4   0x23c545040 (dptx-phy[0], offset 0x5040) = 0x2a1780
+	// MMIO: R.4   0x23c546040 (dptx-phy[0], offset 0x6040) = 0x221780
+	// MMIO: W.4   0x23c546040 (dptx-phy[0], offset 0x6040) = 0x2a1780
+	// MMIO: R.4   0x23c547040 (dptx-phy[0], offset 0x7040) = 0x221780
+	// MMIO: W.4   0x23c547040 (dptx-phy[0], offset 0x7040) = 0x2a1780
+	// MMIO: R.4   0x23c548040 (dptx-phy[0], offset 0x8040) = 0x221780
+	// MMIO: W.4   0x23c548040 (dptx-phy[0], offset 0x8040) = 0x2a1780
+	for (u32 loff = DPTX_LANE0_OFFSET; loff < DPTX_LANE_END;
+	     loff += DPTX_LANE_STRIDE)
+		set32(phy->regs.dptx + loff + 0x40, 0x080000);
+
+	// MMIO: R.4   0x23c545244 (dptx-phy[0], offset 0x5244) = 0x18
+	// MMIO: W.4   0x23c545244 (dptx-phy[0], offset 0x5244) = 0x8
+	// MMIO: R.4   0x23c546244 (dptx-phy[0], offset 0x6244) = 0x18
+	// MMIO: W.4   0x23c546244 (dptx-phy[0], offset 0x6244) = 0x8
+	// MMIO: R.4   0x23c547244 (dptx-phy[0], offset 0x7244) = 0x18
+	// MMIO: W.4   0x23c547244 (dptx-phy[0], offset 0x7244) = 0x8
+	// MMIO: R.4   0x23c548244 (dptx-phy[0], offset 0x8244) = 0x18
+	// MMIO: W.4   0x23c548244 (dptx-phy[0], offset 0x8244) = 0x8
+	for (u32 loff = DPTX_LANE0_OFFSET; loff < DPTX_LANE_END;
+	     loff += DPTX_LANE_STRIDE)
+		clear32(phy->regs.dptx + loff + 0x244, 0x10);
+
+	// MMIO: R.4   0x23c542214 (dptx-phy[0], offset 0x2214) = 0x1e0
+	// MMIO: W.4   0x23c542214 (dptx-phy[0], offset 0x2214) = 0x1e1
+	set32(phy->regs.dptx + 0x2214, 0x001);
+
+	// MMIO: R.4   0x23c542224 (dptx-phy[0], offset 0x2224) = 0x20086001
+	// MMIO: W.4   0x23c542224 (dptx-phy[0], offset 0x2224) = 0x20086000
+	clear32(phy->regs.dptx + 0x2224, 0x00000001);
+
+	// MMIO: R.4   0x23c542200 (dptx-phy[0], offset 0x2200) = 0x2000
+	// MMIO: W.4   0x23c542200 (dptx-phy[0], offset 0x2200) = 0x2002
+	set32(phy->regs.dptx + 0x2200, 0x0002);
+
+	// MMIO: R.4   0x23c541000 (dptx-phy[0], offset 0x1000) = 0xe0000003
+	// MMIO: W.4   0x23c541000 (dptx-phy[0], offset 0x1000) = 0xe0000001
+	clear32(phy->regs.dptx + 0x1000, 0x00000002);
+
+	// MMIO: R.4   0x23c544004 (dptx-phy[0], offset 0x4004) = 0x41
+	// MMIO: W.4   0x23c544004 (dptx-phy[0], offset 0x4004) = 0x49
+	set32(phy->regs.dptx + 0x4004, 0x08);
+
+	/* TODO: no idea what happens here, supposedly setting/clearing some bits */
+	// MMIO: R.4   0x23c544404 (dptx-phy[0], offset 0x4404) = 0x555d444
+	readl(phy->regs.dptx + 0x4404);
+	// MMIO: W.4   0x23c544404 (dptx-phy[0], offset 0x4404) = 0x555d444
+	writel(0x555d444, phy->regs.dptx + 0x4404);
+	// MMIO: R.4   0x23c544404 (dptx-phy[0], offset 0x4404) = 0x555d444
+	readl(phy->regs.dptx + 0x4404);
+	// MMIO: W.4   0x23c544404 (dptx-phy[0], offset 0x4404) = 0x555d444
+	writel(0x555d444, phy->regs.dptx + 0x4404);
+
+	dptx_phy_set_active_lane_count(phy, 0);
+
+	// MMIO: R.4   0x23c544200 (dptx-phy[0], offset 0x4200) = 0x4002430
+	// MMIO: W.4   0x23c544200 (dptx-phy[0], offset 0x4200) = 0x4002420
+	clear32(phy->regs.dptx + 0x4200, 0x0000010);
+
+	// MMIO: R.4   0x23c544600 (dptx-phy[0], offset 0x4600) = 0x8000000
+	// MMIO: W.4   0x23c544600 (dptx-phy[0], offset 0x4600) = 0x8000000
+	clear32(phy->regs.dptx + 0x4600, 0x0000001);
+	// MMIO: R.4   0x23c544600 (dptx-phy[0], offset 0x4600) = 0x8000000
+	// MMIO: W.4   0x23c544600 (dptx-phy[0], offset 0x4600) = 0x8000001
+	set32(phy->regs.dptx + 0x4600, 0x0000001);
+	// MMIO: R.4   0x23c544600 (dptx-phy[0], offset 0x4600) = 0x8000001
+	// MMIO: W.4   0x23c544600 (dptx-phy[0], offset 0x4600) = 0x8000003
+	set32(phy->regs.dptx + 0x4600, 0x0000002);
+	// MMIO: R.4   0x23c544600 (dptx-phy[0], offset 0x4600) = 0x8000043
+	// MMIO: R.4   0x23c544600 (dptx-phy[0], offset 0x4600) = 0x8000043
+	// MMIO: W.4   0x23c544600 (dptx-phy[0], offset 0x4600) = 0x8000041
+	/* TODO: read first to check if the previous set(...,0x2) sticked? */
+	readl(phy->regs.dptx + 0x4600);
+	clear32(phy->regs.dptx + 0x4600, 0x0000001);
+
+	// MMIO: R.4   0x23c544408 (dptx-phy[0], offset 0x4408) = 0x482
+	// MMIO: W.4   0x23c544408 (dptx-phy[0], offset 0x4408) = 0x482
+	/* TODO: probably a set32 of an already set bit */
+	val_4408 = readl(phy->regs.dptx + 0x4408);
+	if (val_4408 != 0x482 && val_4408 != 0x483)
+		dev_warn(
+			phy->dev,
+			"unexpected initial value at regs.dptx offset 0x4408: 0x%03x\n",
+			val_4408);
+	writel(val_4408, phy->regs.dptx + 0x4408);
+	// MMIO: R.4   0x23c544408 (dptx-phy[0], offset 0x4408) = 0x482
+	// MMIO: W.4   0x23c544408 (dptx-phy[0], offset 0x4408) = 0x483
+	set32(phy->regs.dptx + 0x4408, 0x001);
+
+	return 0;
+}
+
+static int dptx_phy_deactivate(struct apple_dptx_phy *phy)
+{
+	return 0;
+}
+
+static int dptx_phy_set_link_rate(struct apple_dptx_phy *phy, u32 link_rate)
+{
+    u32 sts_1008, sts_1014, val_100c, val_20b0, val_20b4;
+
+	dev_dbg(phy->dev, "set_link_rate(%u)\n", link_rate);
+
+    // MMIO: R.4   0x23c544004 (dptx-phy[0], offset 0x4004) = 0x49
+    // MMIO: W.4   0x23c544004 (dptx-phy[0], offset 0x4004) = 0x49
+    set32(phy->regs.dptx + 0x4004, 0x08);
+
+    // MMIO: R.4   0x23c544000 (dptx-phy[0], offset 0x4000) = 0x41021ac
+    // MMIO: W.4   0x23c544000 (dptx-phy[0], offset 0x4000) = 0x41021ac
+    clear32(phy->regs.dptx + 0x4000, 0x0000040);
+
+    // MMIO: R.4   0x23c544004 (dptx-phy[0], offset 0x4004) = 0x49
+    // MMIO: W.4   0x23c544004 (dptx-phy[0], offset 0x4004) = 0x41
+    clear32(phy->regs.dptx + 0x4004, 0x08);
+
+    // MMIO: R.4   0x23c544000 (dptx-phy[0], offset 0x4000) = 0x41021ac
+    // MMIO: W.4   0x23c544000 (dptx-phy[0], offset 0x4000) = 0x41021ac
+    clear32(phy->regs.dptx + 0x4000, 0x2000000);
+    // MMIO: R.4   0x23c544000 (dptx-phy[0], offset 0x4000) = 0x41021ac
+    // MMIO: W.4   0x23c544000 (dptx-phy[0], offset 0x4000) = 0x41021ac
+    set32(phy->regs.dptx + 0x4000, 0x1000000);
+
+    // MMIO: R.4   0x23c542200 (dptx-phy[0], offset 0x2200) = 0x2002
+    // MMIO: R.4   0x23c542200 (dptx-phy[0], offset 0x2200) = 0x2002
+    // MMIO: W.4   0x23c542200 (dptx-phy[0], offset 0x2200) = 0x2000
+    /* TODO: what is this read checking for? */
+    readl(phy->regs.dptx + 0x2200);
+    clear32(phy->regs.dptx + 0x2200, 0x0002);
+
+    // MMIO: R.4   0x23c54100c (dptx-phy[0], offset 0x100c) = 0xf000
+    // MMIO: W.4   0x23c54100c (dptx-phy[0], offset 0x100c) = 0xf000
+    // MMIO: R.4   0x23c54100c (dptx-phy[0], offset 0x100c) = 0xf000
+    // MMIO: W.4   0x23c54100c (dptx-phy[0], offset 0x100c) = 0xf008
+    /* TODO: what is the setting/clearing? */
+    val_100c = readl(phy->regs.dptx + 0x100c);
+    writel(val_100c, phy->regs.dptx + 0x100c);
+    set32(phy->regs.dptx + 0x100c, 0x0008);
+
+    // MMIO: R.4   0x23c541014 (dptx-phy[0], offset 0x1014) = 0x1
+    sts_1014 = readl(phy->regs.dptx + 0x1014);
+    if (sts_1014 != 0x1)
+	    dev_dbg(phy->dev, "unexpected?: dptx[0x1014]: %02x\n", sts_1014);
+
+    // MMIO: R.4   0x23c54100c (dptx-phy[0], offset 0x100c) = 0xf008
+    // MMIO: W.4   0x23c54100c (dptx-phy[0], offset 0x100c) = 0xf000
+    clear32(phy->regs.dptx + 0x100c, 0x0008);
+
+    // MMIO: R.4   0x23c541008 (dptx-phy[0], offset 0x1008) = 0x1
+    sts_1008 = readl(phy->regs.dptx + 0x1008);
+    if (sts_1008 != 0x1)
+	    dev_dbg(phy->dev, "unexpected?: dptx[0x1008]: %02x\n", sts_1008);
+
+    // MMIO: R.4   0x23c542220 (dptx-phy[0], offset 0x2220) = 0x11090a0
+    // MMIO: W.4   0x23c542220 (dptx-phy[0], offset 0x2220) = 0x1109020
+    clear32(phy->regs.dptx + 0x2220, 0x0000080);
+
+    // MMIO: R.4   0x23c5420b0 (dptx-phy[0], offset 0x20b0) = 0x1e0e01c2
+    // MMIO: W.4   0x23c5420b0 (dptx-phy[0], offset 0x20b0) = 0x1e0e01c2
+    val_20b0 = readl(phy->regs.dptx + 0x20b0);
+    /* TODO: what happens on dptx-phy */
+    if (phy->hw.type == DPTX_PHY_T6020)
+	val_20b0 = (val_20b0 & ~0x3ff) | 0x2a3;
+    writel(val_20b0, phy->regs.dptx + 0x20b0);
+
+    // MMIO: R.4   0x23c5420b4 (dptx-phy[0], offset 0x20b4) = 0x7fffffe
+    // MMIO: W.4   0x23c5420b4 (dptx-phy[0], offset 0x20b4) = 0x7fffffe
+    val_20b4 = readl(phy->regs.dptx + 0x20b4);
+    /* TODO: what happens on dptx-phy */
+    if (phy->hw.type == DPTX_PHY_T6020)
+	val_20b4 = (val_20b4 | 0x4000000) & ~0x0008000;
+    writel(val_20b4, phy->regs.dptx + 0x20b4);
+
+    // MMIO: R.4   0x23c5420b4 (dptx-phy[0], offset 0x20b4) = 0x7fffffe
+    // MMIO: W.4   0x23c5420b4 (dptx-phy[0], offset 0x20b4) = 0x7fffffe
+    val_20b4 = readl(phy->regs.dptx + 0x20b4);
+    /* TODO: what happens on dptx-phy */
+    if (phy->hw.type == DPTX_PHY_T6020)
+	val_20b4 = (val_20b4 | 0x0000001) & ~0x0000004;
+    writel(val_20b4, phy->regs.dptx + 0x20b4);
+
+    // MMIO: R.4   0x23c5420b8 (dptx-phy[0], offset 0x20b8) = 0x654800
+    // MMIO: W.4   0x23c5420b8 (dptx-phy[0], offset 0x20b8) = 0x654800
+    /* TODO: unclear */
+    set32(phy->regs.dptx + 0x20b8, 0);
+    // MMIO: R.4   0x23c5420b8 (dptx-phy[0], offset 0x20b8) = 0x654800
+    // MMIO: W.4   0x23c5420b8 (dptx-phy[0], offset 0x20b8) = 0x654800
+    /* TODO: unclear */
+    set32(phy->regs.dptx + 0x20b8, 0);
+    // MMIO: R.4   0x23c5420b8 (dptx-phy[0], offset 0x20b8) = 0x654800
+    // MMIO: W.4   0x23c5420b8 (dptx-phy[0], offset 0x20b8) = 0x654800
+    /* TODO: unclear */
+    if (phy->hw.type == DPTX_PHY_T6020)
+	set32(phy->regs.dptx + 0x20b8, 0x010000);
+    else
+	set32(phy->regs.dptx + 0x20b8, 0);
+    // MMIO: R.4   0x23c5420b8 (dptx-phy[0], offset 0x20b8) = 0x654800
+    // MMIO: W.4   0x23c5420b8 (dptx-phy[0], offset 0x20b8) = 0x454800
+    clear32(phy->regs.dptx + 0x20b8, 0x200000);
+
+    // MMIO: R.4   0x23c5420b8 (dptx-phy[0], offset 0x20b8) = 0x454800
+    // MMIO: W.4   0x23c5420b8 (dptx-phy[0], offset 0x20b8) = 0x454800
+    /* TODO: unclear */
+    set32(phy->regs.dptx + 0x20b8, 0);
+
+    // MMIO: R.4   0x23c5000a0 (dptx-phy[1], offset 0xa0) = 0x0
+    // MMIO: W.4   0x23c5000a0 (dptx-phy[1], offset 0xa0) = 0x8
+    // MMIO: R.4   0x23c5000a0 (dptx-phy[1], offset 0xa0) = 0x8
+    // MMIO: W.4   0x23c5000a0 (dptx-phy[1], offset 0xa0) = 0xc
+    // MMIO: R.4   0x23c5000a0 (dptx-phy[1], offset 0xa0) = 0xc
+    // MMIO: W.4   0x23c5000a0 (dptx-phy[1], offset 0xa0) = 0x4000c
+    // MMIO: R.4   0x23c5000a0 (dptx-phy[1], offset 0xa0) = 0x4000c
+    // MMIO: W.4   0x23c5000a0 (dptx-phy[1], offset 0xa0) = 0xc
+    // MMIO: R.4   0x23c5000a0 (dptx-phy[1], offset 0xa0) = 0xc
+    // MMIO: W.4   0x23c5000a0 (dptx-phy[1], offset 0xa0) = 0x8000c
+    // MMIO: R.4   0x23c5000a0 (dptx-phy[1], offset 0xa0) = 0x8000c
+    // MMIO: W.4   0x23c5000a0 (dptx-phy[1], offset 0xa0) = 0xc
+    // MMIO: R.4   0x23c5000a0 (dptx-phy[1], offset 0xa0) = 0xc
+    // MMIO: W.4   0x23c5000a0 (dptx-phy[1], offset 0xa0) = 0x8
+    // MMIO: R.4   0x23c5000a0 (dptx-phy[1], offset 0xa0) = 0x8
+    // MMIO: W.4   0x23c5000a0 (dptx-phy[1], offset 0xa0) = 0x0
+    set32(phy->regs.core + 0xa0, 0x8);
+    set32(phy->regs.core + 0xa0, 0x4);
+    set32(phy->regs.core + 0xa0, 0x40000);
+    clear32(phy->regs.core + 0xa0, 0x40000);
+    set32(phy->regs.core + 0xa0, 0x80000);
+    clear32(phy->regs.core + 0xa0, 0x80000);
+    clear32(phy->regs.core + 0xa0, 0x4);
+    clear32(phy->regs.core + 0xa0, 0x8);
+
+    // MMIO: R.4   0x23c542000 (dptx-phy[0], offset 0x2000) = 0x2
+    // MMIO: W.4   0x23c542000 (dptx-phy[0], offset 0x2000) = 0x2
+    /* TODO: unclear */
+    set32(phy->regs.dptx + 0x2000, 0x0);
+
+    // MMIO: R.4   0x23c542018 (dptx-phy[0], offset 0x2018) = 0x0
+    // MMIO: W.4   0x23c542018 (dptx-phy[0], offset 0x2018) = 0x0
+    clear32(phy->regs.dptx + 0x2018, 0x0);
+
+    // MMIO: R.4   0x23c54100c (dptx-phy[0], offset 0x100c) = 0xf000
+    // MMIO: W.4   0x23c54100c (dptx-phy[0], offset 0x100c) = 0xf007
+    set32(phy->regs.dptx + 0x100c, 0x0007);
+    // MMIO: R.4   0x23c54100c (dptx-phy[0], offset 0x100c) = 0xf007
+    // MMIO: W.4   0x23c54100c (dptx-phy[0], offset 0x100c) = 0xf00f
+    set32(phy->regs.dptx + 0x100c, 0x0008);
+
+    // MMIO: R.4   0x23c541014 (dptx-phy[0], offset 0x1014) = 0x38f
+    sts_1014 = readl(phy->regs.dptx + 0x1014);
+    if (sts_1014 != 0x38f)
+	    dev_dbg(phy->dev, "unexpected?: dptx[0x1014]: %02x\n", sts_1014);
+
+    // MMIO: R.4   0x23c54100c (dptx-phy[0], offset 0x100c) = 0xf00f
+    // MMIO: W.4   0x23c54100c (dptx-phy[0], offset 0x100c) = 0xf007
+    clear32(phy->regs.dptx + 0x100c, 0x0008);
+
+    // MMIO: R.4   0x23c541008 (dptx-phy[0], offset 0x1008) = 0x9
+    sts_1008 = readl(phy->regs.dptx + 0x1008);
+    if (sts_1008 != 0x9)
+	    dev_dbg(phy->dev, "unexpected?: dptx[0x1008]: %02x\n", sts_1008);
+
+    // MMIO: R.4   0x23c542200 (dptx-phy[0], offset 0x2200) = 0x2000
+    // MMIO: W.4   0x23c542200 (dptx-phy[0], offset 0x2200) = 0x2002
+    set32(phy->regs.dptx + 0x2200, 0x0002);
+
+    // MMIO: R.4   0x23c545010 (dptx-phy[0], offset 0x5010) = 0x18003000
+    // MMIO: W.4   0x23c545010 (dptx-phy[0], offset 0x5010) = 0x18003000
+    // MMIO: R.4   0x23c546010 (dptx-phy[0], offset 0x6010) = 0x18003000
+    // MMIO: W.4   0x23c546010 (dptx-phy[0], offset 0x6010) = 0x18003000
+    // MMIO: R.4   0x23c547010 (dptx-phy[0], offset 0x7010) = 0x18003000
+    // MMIO: W.4   0x23c547010 (dptx-phy[0], offset 0x7010) = 0x18003000
+    // MMIO: R.4   0x23c548010 (dptx-phy[0], offset 0x8010) = 0x18003000
+    // MMIO: W.4   0x23c548010 (dptx-phy[0], offset 0x8010) = 0x18003000
+    writel(0x18003000, phy->regs.dptx + 0x8010);
+    for (u32 loff = DPTX_LANE0_OFFSET; loff < DPTX_LANE_END; loff += DPTX_LANE_STRIDE) {
+	u32 val_l010 = readl(phy->regs.dptx + loff + 0x10);
+	writel(val_l010, phy->regs.dptx + loff + 0x10);
+    }
+
+    // MMIO: R.4   0x23c544000 (dptx-phy[0], offset 0x4000) = 0x41021ac
+    // MMIO: W.4   0x23c544000 (dptx-phy[0], offset 0x4000) = 0x51021ac
+    set32(phy->regs.dptx + 0x4000, 0x1000000);
+    // MMIO: R.4   0x23c544000 (dptx-phy[0], offset 0x4000) = 0x51021ac
+    // MMIO: W.4   0x23c544000 (dptx-phy[0], offset 0x4000) = 0x71021ac
+    set32(phy->regs.dptx + 0x4000, 0x2000000);
+
+    // MMIO: R.4   0x23c544004 (dptx-phy[0], offset 0x4004) = 0x41
+    // MMIO: W.4   0x23c544004 (dptx-phy[0], offset 0x4004) = 0x49
+    set32(phy->regs.dptx + 0x4004, 0x08);
+
+    // MMIO: R.4   0x23c544000 (dptx-phy[0], offset 0x4000) = 0x71021ac
+    // MMIO: W.4   0x23c544000 (dptx-phy[0], offset 0x4000) = 0x71021ec
+    set32(phy->regs.dptx + 0x4000, 0x0000040);
+
+    // MMIO: R.4   0x23c544004 (dptx-phy[0], offset 0x4004) = 0x49
+    // MMIO: W.4   0x23c544004 (dptx-phy[0], offset 0x4004) = 0x48
+    clear32(phy->regs.dptx + 0x4004, 0x01);
+
+    return 0;
+}
+
+static int dptx_phy_set_mode(struct phy *phy, enum phy_mode mode, int submode)
+{
+	struct apple_dptx_phy *dptx_phy = phy_get_drvdata(phy);
+
+	switch (mode) {
+	case PHY_MODE_INVALID:
+		return dptx_phy_deactivate(dptx_phy);
+	case PHY_MODE_DP:
+		if (submode < 0 || submode > 5)
+			return -EINVAL;
+		return dptx_phy_activate(dptx_phy, submode);
+	default:
+		break;
+	}
+
+	return -EINVAL;
+}
+
+static int dptx_phy_validate(struct phy *phy, enum phy_mode mode, int submode,
+			     union phy_configure_opts *opts_)
+{
+	struct phy_configure_opts_dp *opts = &opts_->dp;
+
+	if (mode == PHY_MODE_INVALID) {
+		memset(opts, 0, sizeof(*opts));
+		return 0;
+	}
+
+	if (mode != PHY_MODE_DP)
+		return -EINVAL;
+	if (submode < 0 || submode > 5)
+		return -EINVAL;
+
+	opts->lanes = 4;
+	opts->link_rate = 8100;
+
+	for (int i = 0; i < 4; ++i) {
+		opts->voltage[i] = 3;
+		opts->pre[i] = 3;
+	}
+
+	return 0;
+}
+
+static int dptx_phy_configure(struct phy *phy, union phy_configure_opts *opts_)
+{
+	struct phy_configure_opts_dp *opts = &opts_->dp;
+	struct apple_dptx_phy *dptx_phy = phy_get_drvdata(phy);
+	enum dptx_phy_link_rate link_rate;
+	int ret = 0;
+
+	if (opts->set_lanes) {
+		mutex_lock(&dptx_phy->lock);
+		ret = dptx_phy_set_active_lane_count(dptx_phy, opts->lanes);
+		mutex_unlock(&dptx_phy->lock);
+	}
+
+	if (opts->set_rate) {
+		switch (opts->link_rate) {
+		case 1620:
+			link_rate = DPTX_PHY_LINK_RATE_RBR;
+			break;
+		case 2700:
+			link_rate = DPTX_PHY_LINK_RATE_HBR;
+			break;
+		case 5400:
+			link_rate = DPTX_PHY_LINK_RATE_HBR2;
+			break;
+		case 8100:
+			link_rate = DPTX_PHY_LINK_RATE_HBR3;
+			break;
+		case 0:
+			// TODO: disable!
+			return 0;
+			break;
+		default:
+			dev_err(dptx_phy->dev, "Unsupported link rate: %d\n",
+				opts->link_rate);
+			return -EINVAL;
+		}
+
+		mutex_lock(&dptx_phy->lock);
+		ret = dptx_phy_set_link_rate(dptx_phy, link_rate);
+		mutex_unlock(&dptx_phy->lock);
+	}
+
+	return ret;
+}
+
+static const struct phy_ops apple_atc_dp_phy_ops = {
+	.owner = THIS_MODULE,
+	.configure = dptx_phy_configure,
+	.validate = dptx_phy_validate,
+	.set_mode = dptx_phy_set_mode,
+};
+
+static int dptx_phy_probe(struct platform_device *pdev)
+{
+	struct apple_dptx_phy *dptx_phy;
+	struct device *dev = &pdev->dev;
+
+	dptx_phy = devm_kzalloc(dev, sizeof(*dptx_phy), GFP_KERNEL);
+	if (!dptx_phy)
+		return -ENOMEM;
+
+	dptx_phy->dev = dev;
+	dptx_phy->hw =
+		*(struct apple_dptx_phy_hw *)of_device_get_match_data(dev);
+	platform_set_drvdata(pdev, dptx_phy);
+
+	mutex_init(&dptx_phy->lock);
+
+	dptx_phy->regs.core =
+		devm_platform_ioremap_resource_byname(pdev, "core");
+	if (IS_ERR(dptx_phy->regs.core))
+		return PTR_ERR(dptx_phy->regs.core);
+	dptx_phy->regs.dptx =
+		devm_platform_ioremap_resource_byname(pdev, "dptx");
+	if (IS_ERR(dptx_phy->regs.dptx))
+		return PTR_ERR(dptx_phy->regs.dptx);
+
+	/* create phy */
+	dptx_phy->phy_dp =
+		devm_phy_create(dptx_phy->dev, NULL, &apple_atc_dp_phy_ops);
+	if (IS_ERR(dptx_phy->phy_dp))
+		return PTR_ERR(dptx_phy->phy_dp);
+	phy_set_drvdata(dptx_phy->phy_dp, dptx_phy);
+
+	dptx_phy->phy_provider =
+		devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+	if (IS_ERR(dptx_phy->phy_provider))
+		return PTR_ERR(dptx_phy->phy_provider);
+
+	return 0;
+}
+
+static const struct apple_dptx_phy_hw apple_dptx_hw_t6020 = {
+	.type = DPTX_PHY_T6020,
+};
+
+static const struct apple_dptx_phy_hw apple_dptx_hw_t8112 = {
+	.type = DPTX_PHY_T8112,
+};
+
+static const struct of_device_id dptx_phy_match[] = {
+	{ .compatible = "apple,t6020-dptx-phy", .data = &apple_dptx_hw_t6020 },
+	{ .compatible = "apple,t8112-dptx-phy", .data = &apple_dptx_hw_t8112 },
+	{},
+};
+MODULE_DEVICE_TABLE(of, dptx_phy_match);
+
+static struct platform_driver dptx_phy_driver = {
+	.driver = {
+		.name = "phy-apple-dptx",
+		.of_match_table = dptx_phy_match,
+	},
+	.probe = dptx_phy_probe,
+};
+
+module_platform_driver(dptx_phy_driver);
+
+MODULE_AUTHOR("Janne Grunau <j@jananu.net>");
+MODULE_DESCRIPTION("Apple DP TX PHY driver");
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/apple/dptx.h b/drivers/phy/apple/dptx.h
new file mode 100644
index 00000000000000..2dd36d753eb357
--- /dev/null
+++ b/drivers/phy/apple/dptx.h
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * Apple DP TX PHY driver
+ *
+ * Copyright (C) The Asahi Linux Contributors
+ * Author: Janne Grunau <j@jannau.net>
+ */
+
+#ifndef PHY_APPLE_DPTX_H
+#define PHY_APPLE_DPTX_H
+
+enum dptx_phy_link_rate {
+	DPTX_PHY_LINK_RATE_RBR,
+	DPTX_PHY_LINK_RATE_HBR,
+	DPTX_PHY_LINK_RATE_HBR2,
+	DPTX_PHY_LINK_RATE_HBR3,
+};
+#endif /* PHY_APPLE_DPTX_H */
diff --git a/drivers/phy/apple/trace.c b/drivers/phy/apple/trace.c
new file mode 100644
index 00000000000000..a82dc089f6caa8
--- /dev/null
+++ b/drivers/phy/apple/trace.c
@@ -0,0 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
diff --git a/drivers/phy/apple/trace.h b/drivers/phy/apple/trace.h
new file mode 100644
index 00000000000000..bcee8c52b0a1fd
--- /dev/null
+++ b/drivers/phy/apple/trace.h
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * Apple Type-C PHY driver
+ *
+ * Copyright (C) The Asahi Linux Contributors
+ * Author: Sven Peter <sven@svenpeter.dev>
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM appletypecphy
+
+#if !defined(_APPLETYPECPHY_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _APPLETYPECPHY_TRACE_H_
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+#include "atc.h"
+
+#define show_dp_lr(lr)                                  \
+	__print_symbolic(lr, { ATCPHY_DP_LINK_RATE_RBR, "RBR" }, \
+			 { ATCPHY_DP_LINK_RATE_HBR, "HBR" },          \
+			 { ATCPHY_DP_LINK_RATE_HBR2, "HBR2" },          \
+			 { ATCPHY_DP_LINK_RATE_HBR3, "HBR3" })
+
+#define show_sw_orientation(orientation)                                  \
+	__print_symbolic(orientation, { TYPEC_ORIENTATION_NONE, "none" }, \
+			 { TYPEC_ORIENTATION_NORMAL, "normal" },          \
+			 { TYPEC_ORIENTATION_REVERSE, "reverse" })
+
+TRACE_EVENT(atcphy_sw_set, TP_PROTO(enum typec_orientation orientation),
+	    TP_ARGS(orientation),
+
+	    TP_STRUCT__entry(__field(enum typec_orientation, orientation)),
+
+	    TP_fast_assign(__entry->orientation = orientation;),
+
+	    TP_printk("orientation: %s",
+		      show_sw_orientation(__entry->orientation)));
+
+#define show_mux_state(state)                                                 \
+	__print_symbolic(state.mode, { TYPEC_STATE_SAFE, "USB Safe State" }, \
+			 { TYPEC_STATE_USB, "USB" })
+
+#define show_atcphy_mode(mode)                                      \
+	__print_symbolic(mode, { APPLE_ATCPHY_MODE_OFF, "off" },    \
+			 { APPLE_ATCPHY_MODE_USB2, "USB2" },        \
+			 { APPLE_ATCPHY_MODE_USB3, "USB3" },        \
+			 { APPLE_ATCPHY_MODE_USB3_DP, "DP + USB" }, \
+			 { APPLE_ATCPHY_MODE_USB4, "USB4" },        \
+			 { APPLE_ATCPHY_MODE_DP, "DP-only" })
+
+TRACE_EVENT(atcphy_usb3_set_mode,
+	    TP_PROTO(struct apple_atcphy *atcphy, enum phy_mode mode,
+		     int submode),
+	    TP_ARGS(atcphy, mode, submode),
+
+	    TP_STRUCT__entry(__field(enum atcphy_mode, mode)
+					     __field(enum phy_mode, phy_mode)
+						     __field(int, submode)),
+
+	    TP_fast_assign(__entry->mode = atcphy->mode;
+			   __entry->phy_mode = mode;
+			   __entry->submode = submode;),
+
+	    TP_printk("mode: %s, phy_mode: %d, submode: %d",
+		      show_atcphy_mode(__entry->mode), __entry->phy_mode,
+		      __entry->submode));
+
+TRACE_EVENT(
+	atcphy_configure_lanes,
+	TP_PROTO(enum atcphy_mode mode,
+		 const struct atcphy_mode_configuration *cfg),
+	TP_ARGS(mode, cfg),
+
+	TP_STRUCT__entry(__field(enum atcphy_mode, mode) __field_struct(
+		struct atcphy_mode_configuration, cfg)),
+
+	TP_fast_assign(__entry->mode = mode; __entry->cfg = *cfg;),
+
+	TP_printk(
+		"mode: %s, crossbar: 0x%02x, lanes: {0x%02x, 0x%02x}, swap: %d",
+		show_atcphy_mode(__entry->mode), __entry->cfg.crossbar,
+		__entry->cfg.lane_mode[0], __entry->cfg.lane_mode[1],
+		__entry->cfg.set_swap));
+
+TRACE_EVENT(atcphy_mux_set, TP_PROTO(struct typec_mux_state *state),
+	    TP_ARGS(state),
+
+	    TP_STRUCT__entry(__field_struct(struct typec_mux_state, state)),
+
+	    TP_fast_assign(__entry->state = *state;),
+
+	    TP_printk("state: %s", show_mux_state(__entry->state)));
+
+TRACE_EVENT(atcphy_parsed_tunable,
+	    TP_PROTO(const char *name, struct atcphy_tunable *tunable),
+	    TP_ARGS(name, tunable),
+
+	    TP_STRUCT__entry(__field(const char *, name)
+				     __field(size_t, sz)),
+
+	    TP_fast_assign(__entry->name = name; __entry->sz = tunable->sz;),
+
+	    TP_printk("%s with %zu entries", __entry->name,
+		      __entry->sz));
+
+TRACE_EVENT(
+	atcphy_fuses, TP_PROTO(struct apple_atcphy *atcphy), TP_ARGS(atcphy),
+	TP_STRUCT__entry(__field(struct apple_atcphy *, atcphy)),
+	TP_fast_assign(__entry->atcphy = atcphy;),
+	TP_printk(
+		"aus_cmn_shm_vreg_trim: 0x%02x; auspll_rodco_encap: 0x%02x; auspll_rodco_bias_adjust: 0x%02x; auspll_fracn_dll_start_capcode: 0x%02x; auspll_dtc_vreg_adjust: 0x%02x; cio3pll_dco_coarsebin: 0x%02x, 0x%02x; cio3pll_dll_start_capcode: 0x%02x, 0x%02x; cio3pll_dtc_vreg_adjust: 0x%02x",
+		__entry->atcphy->fuses.aus_cmn_shm_vreg_trim,
+		__entry->atcphy->fuses.auspll_rodco_encap,
+		__entry->atcphy->fuses.auspll_rodco_bias_adjust,
+		__entry->atcphy->fuses.auspll_fracn_dll_start_capcode,
+		__entry->atcphy->fuses.auspll_dtc_vreg_adjust,
+		__entry->atcphy->fuses.cio3pll_dco_coarsebin[0],
+		__entry->atcphy->fuses.cio3pll_dco_coarsebin[1],
+		__entry->atcphy->fuses.cio3pll_dll_start_capcode[0],
+		__entry->atcphy->fuses.cio3pll_dll_start_capcode[1],
+		__entry->atcphy->fuses.cio3pll_dtc_vreg_adjust));
+
+
+
+TRACE_EVENT(atcphy_dp_configure,
+	    TP_PROTO(struct apple_atcphy *atcphy, enum atcphy_dp_link_rate lr),
+	    TP_ARGS(atcphy, lr),
+
+	    TP_STRUCT__entry(__string(devname, dev_name(atcphy->dev))
+				     __field(enum atcphy_dp_link_rate, lr)),
+
+	    TP_fast_assign(__assign_str(devname);
+	     		  __entry->lr = lr;),
+
+	    TP_printk("%s: link rate: %s", __get_str(devname),
+		      show_dp_lr(__entry->lr)));
+
+#endif /* _APPLETYPECPHY_TRACE_H_ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/platform/Kconfig b/drivers/platform/Kconfig
index 960fd6a82450a4..9f5d5251161b0d 100644
--- a/drivers/platform/Kconfig
+++ b/drivers/platform/Kconfig
@@ -17,4 +17,6 @@ source "drivers/platform/surface/Kconfig"
 
 source "drivers/platform/x86/Kconfig"
 
+source "drivers/platform/apple/Kconfig"
+
 source "drivers/platform/arm64/Kconfig"
diff --git a/drivers/platform/Makefile b/drivers/platform/Makefile
index 19ac54648586eb..1e35f82c01e224 100644
--- a/drivers/platform/Makefile
+++ b/drivers/platform/Makefile
@@ -12,4 +12,5 @@ obj-$(CONFIG_GOLDFISH)		+= goldfish/
 obj-$(CONFIG_CHROME_PLATFORMS)	+= chrome/
 obj-$(CONFIG_CZNIC_PLATFORMS)	+= cznic/
 obj-$(CONFIG_SURFACE_PLATFORMS)	+= surface/
+obj-$(CONFIG_APPLE_PLATFORMS)	+= apple/
 obj-$(CONFIG_ARM64_PLATFORM_DEVICES)	+= arm64/
diff --git a/drivers/platform/apple/Kconfig b/drivers/platform/apple/Kconfig
new file mode 100644
index 00000000000000..5bcadd349493ac
--- /dev/null
+++ b/drivers/platform/apple/Kconfig
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Apple Platform-Specific Drivers
+#
+
+menuconfig APPLE_PLATFORMS
+	bool "Apple Mac Platform-Specific Device Drivers"
+	default y
+	help
+	  Say Y here to get to see options for platform-specific device drivers
+	  for Apple devices. This option alone does not add any kernel code.
+
+	  If you say N, all options in this submenu will be skipped and disabled.
+
+if APPLE_PLATFORMS
+
+config APPLE_SMC
+	tristate "Apple SMC Driver"
+	depends on ARCH_APPLE || (COMPILE_TEST && 64BIT)
+	default ARCH_APPLE
+	select MFD_CORE
+	help
+	  Build support for the Apple System Management Controller present in
+	  Apple Macs. This driver currently supports the SMC in Apple Silicon
+	  Macs. For x86 Macs, see the applesmc driver (SENSORS_APPLESMC).
+
+	  Say Y here if you have an Apple Silicon Mac.
+
+	  To compile this driver as a module, choose M here: the module will
+	  be called macsmc.
+
+if APPLE_SMC
+
+config APPLE_SMC_RTKIT
+	tristate "RTKit (Apple Silicon) backend"
+	depends on ARCH_APPLE || (COMPILE_TEST && 64BIT)
+	depends on APPLE_RTKIT
+	default ARCH_APPLE
+	help
+	  Build support for SMC communications via the RTKit backend. This is
+	  required for Apple Silicon Macs.
+
+	  Say Y here if you have an Apple Silicon Mac.
+
+	  To compile this driver as a module, choose M here: the module will
+	  be called macsmc-rtkit.
+
+endif
+endif
diff --git a/drivers/platform/apple/Makefile b/drivers/platform/apple/Makefile
new file mode 100644
index 00000000000000..79fac195398b0c
--- /dev/null
+++ b/drivers/platform/apple/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for linux/drivers/platform/apple
+# Apple Platform-Specific Drivers
+#
+
+macsmc-y				+= smc_core.o
+macsmc-rtkit-y				+= smc_rtkit.o
+
+obj-$(CONFIG_APPLE_SMC)			+= macsmc.o
+obj-$(CONFIG_APPLE_SMC_RTKIT)		+= macsmc-rtkit.o
diff --git a/drivers/platform/apple/smc.h b/drivers/platform/apple/smc.h
new file mode 100644
index 00000000000000..34131f77fe09cb
--- /dev/null
+++ b/drivers/platform/apple/smc.h
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple SMC internal core definitions
+ * Copyright (C) The Asahi Linux Contributors
+ */
+
+#ifndef _SMC_H
+#define _SMC_H
+
+#include <linux/mfd/macsmc.h>
+
+struct apple_smc_backend_ops {
+	int (*read_key)(void *cookie, smc_key key, void *buf, size_t size);
+	int (*write_key)(void *cookie, smc_key key, void *buf, size_t size);
+	int (*write_key_atomic)(void *cookie, smc_key key, void *buf, size_t size);
+	int (*rw_key)(void *cookie, smc_key key, void *wbuf, size_t wsize,
+		      void *rbuf, size_t rsize);
+	int (*get_key_by_index)(void *cookie, int index, smc_key *key);
+	int (*get_key_info)(void *cookie, smc_key key, struct apple_smc_key_info *info);
+};
+
+int apple_smc_probe(struct device *dev, const struct apple_smc_backend_ops *ops, void *cookie);
+void *apple_smc_get_cookie(struct apple_smc *smc);
+int apple_smc_remove(struct apple_smc *smc);
+void apple_smc_event_received(struct apple_smc *smc, uint32_t event);
+
+#endif
diff --git a/drivers/platform/apple/smc_core.c b/drivers/platform/apple/smc_core.c
new file mode 100644
index 00000000000000..ae85ef2aad9d33
--- /dev/null
+++ b/drivers/platform/apple/smc_core.c
@@ -0,0 +1,360 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple SMC core framework
+ * Copyright The Asahi Linux Contributors
+ */
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/mfd/core.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include "smc.h"
+
+struct apple_smc {
+	struct device *dev;
+
+	void *be_cookie;
+	const struct apple_smc_backend_ops *be;
+
+	struct mutex mutex;
+
+	u32 key_count;
+	smc_key first_key;
+	smc_key last_key;
+
+	struct blocking_notifier_head event_handlers;
+};
+
+static const struct mfd_cell apple_smc_devs[] = {
+	{
+		.name = "macsmc-gpio",
+	},
+	{
+		.name = "macsmc-hid",
+	},
+	{
+		.name = "macsmc-power",
+	},
+	{
+		.name = "macsmc-reboot",
+	},
+	{
+		.name = "macsmc-rtc",
+	},
+	{
+		.name = "macsmc_hwmon",
+	},
+};
+
+int apple_smc_read(struct apple_smc *smc, smc_key key, void *buf, size_t size)
+{
+	int ret;
+
+	mutex_lock(&smc->mutex);
+	ret = smc->be->read_key(smc->be_cookie, key, buf, size);
+	mutex_unlock(&smc->mutex);
+
+	return ret;
+}
+EXPORT_SYMBOL(apple_smc_read);
+
+int apple_smc_write(struct apple_smc *smc, smc_key key, void *buf, size_t size)
+{
+	int ret;
+
+	mutex_lock(&smc->mutex);
+	ret = smc->be->write_key(smc->be_cookie, key, buf, size);
+	mutex_unlock(&smc->mutex);
+
+	return ret;
+}
+EXPORT_SYMBOL(apple_smc_write);
+
+int apple_smc_write_atomic(struct apple_smc *smc, smc_key key, void *buf, size_t size)
+{
+	int ret;
+
+	/*
+	 * Will fail if SMC is busy. This is only used by SMC reboot/poweroff
+	 * final calls, so it doesn't really matter at that point.
+	 */
+	if (!mutex_trylock(&smc->mutex))
+		return -EBUSY;
+
+	ret = smc->be->write_key_atomic(smc->be_cookie, key, buf, size);
+	mutex_unlock(&smc->mutex);
+
+	return ret;
+}
+EXPORT_SYMBOL(apple_smc_write_atomic);
+
+int apple_smc_rw(struct apple_smc *smc, smc_key key, void *wbuf, size_t wsize,
+		 void *rbuf, size_t rsize)
+{
+	int ret;
+
+	mutex_lock(&smc->mutex);
+	ret = smc->be->rw_key(smc->be_cookie, key, wbuf, wsize, rbuf, rsize);
+	mutex_unlock(&smc->mutex);
+
+	return ret;
+}
+EXPORT_SYMBOL(apple_smc_rw);
+
+int apple_smc_read_f32_scaled(struct apple_smc *smc, smc_key key, int *p, int scale)
+{
+	u32 fval;
+	u64 val;
+	int ret, exp;
+
+	ret = apple_smc_read_u32(smc, key, &fval);
+	if (ret < 0)
+		return ret;
+
+	val = ((u64)((fval & GENMASK(22, 0)) | BIT(23)));
+	exp = ((fval >> 23) & 0xff) - 127 - 23;
+	if (scale < 0) {
+		val <<= 32;
+		exp -= 32;
+		val /= -scale;
+	} else {
+		val *= scale;
+	}
+
+	if (exp > 63)
+		val = U64_MAX;
+	else if (exp < -63)
+		val = 0;
+	else if (exp < 0)
+		val >>= -exp;
+	else if (exp != 0 && (val & ~((1UL << (64 - exp)) - 1))) /* overflow */
+		val = U64_MAX;
+	else
+		val <<= exp;
+
+	if (fval & BIT(31)) {
+		if (val > (-(s64)INT_MIN))
+			*p = INT_MIN;
+		else
+			*p = -val;
+	} else {
+		if (val > INT_MAX)
+			*p = INT_MAX;
+		else
+			*p = val;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(apple_smc_read_f32_scaled);
+
+#define FLT_SIGN_MASK BIT(31)
+#define FLT_EXP_MASK GENMASK(30, 23)
+#define FLT_MANT_MASK GENMASK(22, 0)
+#define FLT_EXP_BIAS 127
+
+int apple_smc_write_f32_scaled(struct apple_smc *smc, smc_key key, int value,
+			       int scale)
+{
+	u64 val;
+	u32 fval = 0;
+	int exp = 0, neg;
+
+	val = abs(value);
+	neg = val != value;
+
+	if (scale > 1) {
+		val <<= 32;
+		exp = 32;
+		val /= scale;
+	} else if (scale < 1)
+		val *= -scale;
+
+	if (val) {
+		int msb = __fls(val) - exp;
+		if (msb > 23) {
+			val >>= msb - 23;
+			exp -= msb - 23;
+		} else if (msb < 23) {
+			val <<= 23 - msb;
+			exp += msb;
+		}
+
+		fval = FIELD_PREP(FLT_SIGN_MASK, neg) |
+		       FIELD_PREP(FLT_EXP_MASK, exp + FLT_EXP_BIAS) |
+		       FIELD_PREP(FLT_MANT_MASK, val);
+	}
+
+	return apple_smc_write_u32(smc, key, fval);
+}
+EXPORT_SYMBOL(apple_smc_write_f32_scaled);
+
+/*
+ * ioft is a 48.16 fixed point type
+ */
+int apple_smc_read_ioft_scaled(struct apple_smc *smc, smc_key key, u64 *p,
+			       int scale)
+{
+	u64 val;
+	int ret;
+
+	ret = apple_smc_read_u64(smc, key, &val);
+	if (ret < 0)
+		return ret;
+
+	*p = mult_frac(val, scale, 65536);
+
+	return 0;
+}
+EXPORT_SYMBOL(apple_smc_read_ioft_scaled);
+
+int apple_smc_get_key_by_index(struct apple_smc *smc, int index, smc_key *key)
+{
+	int ret;
+
+	mutex_lock(&smc->mutex);
+	ret = smc->be->get_key_by_index(smc->be_cookie, index, key);
+	mutex_unlock(&smc->mutex);
+
+	return ret;
+}
+EXPORT_SYMBOL(apple_smc_get_key_by_index);
+
+int apple_smc_get_key_info(struct apple_smc *smc, smc_key key, struct apple_smc_key_info *info)
+{
+	int ret;
+
+	mutex_lock(&smc->mutex);
+	ret = smc->be->get_key_info(smc->be_cookie, key, info);
+	mutex_unlock(&smc->mutex);
+
+	return ret;
+}
+EXPORT_SYMBOL(apple_smc_get_key_info);
+
+int apple_smc_find_first_key_index(struct apple_smc *smc, smc_key key)
+{
+	int start = 0, count = smc->key_count;
+	int ret;
+
+	if (key <= smc->first_key)
+		return 0;
+	if (key > smc->last_key)
+		return smc->key_count;
+
+	while (count > 1) {
+		int pivot = start + ((count - 1) >> 1);
+		smc_key pkey;
+
+		ret = apple_smc_get_key_by_index(smc, pivot, &pkey);
+		if (ret < 0)
+			return ret;
+
+		if (pkey == key)
+			return pivot;
+
+		pivot++;
+
+		if (pkey < key) {
+			count -= pivot - start;
+			start = pivot;
+		} else {
+			count = pivot - start;
+		}
+	}
+
+	return start;
+}
+EXPORT_SYMBOL(apple_smc_find_first_key_index);
+
+int apple_smc_get_key_count(struct apple_smc *smc)
+{
+	return smc->key_count;
+}
+EXPORT_SYMBOL(apple_smc_get_key_count);
+
+void apple_smc_event_received(struct apple_smc *smc, uint32_t event)
+{
+	dev_dbg(smc->dev, "Event: 0x%08x\n", event);
+	blocking_notifier_call_chain(&smc->event_handlers, event, NULL);
+}
+EXPORT_SYMBOL(apple_smc_event_received);
+
+int apple_smc_register_notifier(struct apple_smc *smc, struct notifier_block *n)
+{
+	return blocking_notifier_chain_register(&smc->event_handlers, n);
+}
+EXPORT_SYMBOL(apple_smc_register_notifier);
+
+int apple_smc_unregister_notifier(struct apple_smc *smc, struct notifier_block *n)
+{
+	return blocking_notifier_chain_unregister(&smc->event_handlers, n);
+}
+EXPORT_SYMBOL(apple_smc_unregister_notifier);
+
+void *apple_smc_get_cookie(struct apple_smc *smc)
+{
+	return smc->be_cookie;
+}
+EXPORT_SYMBOL(apple_smc_get_cookie);
+
+int apple_smc_probe(struct device *dev, const struct apple_smc_backend_ops *ops, void *cookie)
+{
+	struct apple_smc *smc;
+	u32 count;
+	int ret;
+
+	smc = devm_kzalloc(dev, sizeof(*smc), GFP_KERNEL);
+	if (!smc)
+		return -ENOMEM;
+
+	smc->dev = dev;
+	smc->be_cookie = cookie;
+	smc->be = ops;
+	mutex_init(&smc->mutex);
+	BLOCKING_INIT_NOTIFIER_HEAD(&smc->event_handlers);
+
+	ret = apple_smc_read_u32(smc, SMC_KEY(#KEY), &count);
+	if (ret)
+		return dev_err_probe(dev, ret, "Failed to get key count");
+	smc->key_count = be32_to_cpu(count);
+
+	ret = apple_smc_get_key_by_index(smc, 0, &smc->first_key);
+	if (ret)
+		return dev_err_probe(dev, ret, "Failed to get first key");
+
+	ret = apple_smc_get_key_by_index(smc, smc->key_count - 1, &smc->last_key);
+	if (ret)
+		return dev_err_probe(dev, ret, "Failed to get last key");
+
+	dev_set_drvdata(dev, smc);
+
+	/* Enable notifications */
+	apple_smc_write_flag(smc, SMC_KEY(NTAP), 1);
+
+	dev_info(dev, "Initialized (%d keys %p4ch..%p4ch)\n",
+		 smc->key_count, &smc->first_key, &smc->last_key);
+
+	ret = mfd_add_devices(dev, -1, apple_smc_devs, ARRAY_SIZE(apple_smc_devs), NULL, 0, NULL);
+	if (ret)
+		return dev_err_probe(dev, ret, "Subdevice initialization failed");
+
+	return 0;
+}
+EXPORT_SYMBOL(apple_smc_probe);
+
+int apple_smc_remove(struct apple_smc *smc)
+{
+	mfd_remove_devices(smc->dev);
+
+	/* Disable notifications */
+	apple_smc_write_flag(smc, SMC_KEY(NTAP), 1);
+
+	return 0;
+}
+EXPORT_SYMBOL(apple_smc_remove);
+
+MODULE_AUTHOR("Hector Martin <marcan@marcan.st>");
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_DESCRIPTION("Apple SMC core");
diff --git a/drivers/platform/apple/smc_rtkit.c b/drivers/platform/apple/smc_rtkit.c
new file mode 100644
index 00000000000000..ac313cce786adc
--- /dev/null
+++ b/drivers/platform/apple/smc_rtkit.c
@@ -0,0 +1,449 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple SMC RTKit backend
+ * Copyright The Asahi Linux Contributors
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/soc/apple/rtkit.h>
+#include <linux/unaligned.h>
+#include "smc.h"
+
+#define SMC_ENDPOINT			0x20
+
+/* Guess */
+#define SMC_SHMEM_SIZE			0x1000
+
+#define SMC_MSG_READ_KEY		0x10
+#define SMC_MSG_WRITE_KEY		0x11
+#define SMC_MSG_GET_KEY_BY_INDEX	0x12
+#define SMC_MSG_GET_KEY_INFO		0x13
+#define SMC_MSG_INITIALIZE		0x17
+#define SMC_MSG_NOTIFICATION		0x18
+#define SMC_MSG_RW_KEY			0x20
+
+#define SMC_DATA			GENMASK(63, 32)
+#define SMC_WSIZE			GENMASK(31, 24)
+#define SMC_SIZE			GENMASK(23, 16)
+#define SMC_ID				GENMASK(15, 12)
+#define SMC_MSG				GENMASK(7, 0)
+#define SMC_RESULT			SMC_MSG
+
+#define SMC_RECV_TIMEOUT		500
+
+struct apple_smc_rtkit {
+	struct device *dev;
+	struct apple_rtkit *rtk;
+
+	struct completion init_done;
+	bool initialized;
+	bool alive;
+
+	struct resource *sram;
+	void __iomem *sram_base;
+	struct apple_rtkit_shmem shmem;
+
+	unsigned int msg_id;
+
+	bool atomic_pending;
+	struct completion cmd_done;
+	u64 cmd_ret;
+};
+
+static int apple_smc_rtkit_write_key_atomic(void *cookie, smc_key key, void *buf, size_t size)
+{
+	struct apple_smc_rtkit *smc = cookie;
+	int ret;
+	u64 msg;
+	u8 result;
+
+	if (size > SMC_SHMEM_SIZE || size == 0)
+		return -EINVAL;
+
+	if (!smc->alive)
+		return -EIO;
+
+	memcpy_toio(smc->shmem.iomem, buf, size);
+	smc->msg_id = (smc->msg_id + 1) & 0xf;
+	msg = (FIELD_PREP(SMC_MSG, SMC_MSG_WRITE_KEY) |
+	       FIELD_PREP(SMC_SIZE, size) |
+	       FIELD_PREP(SMC_ID, smc->msg_id) |
+	       FIELD_PREP(SMC_DATA, key));
+	smc->atomic_pending = true;
+
+	ret = apple_rtkit_send_message(smc->rtk, SMC_ENDPOINT, msg, NULL, true);
+	if (ret < 0) {
+		dev_err(smc->dev, "Failed to send command (%d)\n", ret);
+		return ret;
+	}
+
+	while (smc->atomic_pending) {
+		ret = apple_rtkit_poll(smc->rtk);
+		if (ret < 0) {
+			dev_err(smc->dev, "RTKit poll failed (%llx)", msg);
+			return ret;
+		}
+		udelay(100);
+	}
+
+	if (FIELD_GET(SMC_ID, smc->cmd_ret) != smc->msg_id) {
+		dev_err(smc->dev, "Command sequence mismatch (expected %d, got %d)\n",
+			smc->msg_id, (unsigned int)FIELD_GET(SMC_ID, smc->cmd_ret));
+		return -EIO;
+	}
+
+	result = FIELD_GET(SMC_RESULT, smc->cmd_ret);
+	if (result != 0)
+		return -result;
+
+	return FIELD_GET(SMC_SIZE, smc->cmd_ret);
+}
+
+static int apple_smc_cmd(struct apple_smc_rtkit *smc, u64 cmd, u64 arg,
+			 u64 size, u64 wsize, u32 *ret_data)
+{
+	int ret;
+	u64 msg;
+	u8 result;
+
+	if (!smc->alive)
+		return -EIO;
+
+	reinit_completion(&smc->cmd_done);
+
+	smc->msg_id = (smc->msg_id + 1) & 0xf;
+	msg = (FIELD_PREP(SMC_MSG, cmd) |
+	       FIELD_PREP(SMC_SIZE, size) |
+	       FIELD_PREP(SMC_WSIZE, wsize) |
+	       FIELD_PREP(SMC_ID, smc->msg_id) |
+	       FIELD_PREP(SMC_DATA, arg));
+
+	ret = apple_rtkit_send_message(smc->rtk, SMC_ENDPOINT, msg, NULL, false);
+	if (ret < 0) {
+		dev_err(smc->dev, "Failed to send command\n");
+		return ret;
+	}
+
+	do {
+		if (wait_for_completion_timeout(&smc->cmd_done,
+						msecs_to_jiffies(SMC_RECV_TIMEOUT)) == 0) {
+			dev_err(smc->dev, "Command timed out (%llx)", msg);
+			return -ETIMEDOUT;
+		}
+		if (FIELD_GET(SMC_ID, smc->cmd_ret) == smc->msg_id)
+			break;
+		dev_err(smc->dev, "Command sequence mismatch (expected %d, got %d)\n",
+			smc->msg_id, (unsigned int)FIELD_GET(SMC_ID, smc->cmd_ret));
+	} while(1);
+
+	result = FIELD_GET(SMC_RESULT, smc->cmd_ret);
+	if (result != 0)
+		return -EIO;
+
+	if (ret_data)
+		*ret_data = FIELD_GET(SMC_DATA, smc->cmd_ret);
+
+	return FIELD_GET(SMC_SIZE, smc->cmd_ret);
+}
+
+static int _apple_smc_rtkit_read_key(struct apple_smc_rtkit *smc, smc_key key,
+				     void *buf, size_t size, size_t wsize)
+{
+	int ret;
+	u32 rdata;
+	u64 cmd;
+
+	if (size > SMC_SHMEM_SIZE || size == 0)
+		return -EINVAL;
+
+	cmd = wsize ? SMC_MSG_RW_KEY : SMC_MSG_READ_KEY;
+
+	ret = apple_smc_cmd(smc, cmd, key, size, wsize, &rdata);
+	if (ret < 0)
+		return ret;
+
+	if (size <= 4)
+		memcpy(buf, &rdata, size);
+	else
+		memcpy_fromio(buf, smc->shmem.iomem, size);
+
+	return ret;
+}
+
+static int apple_smc_rtkit_read_key(void *cookie, smc_key key, void *buf, size_t size)
+{
+	return _apple_smc_rtkit_read_key(cookie, key, buf, size, 0);
+}
+
+static int apple_smc_rtkit_write_key(void *cookie, smc_key key, void *buf, size_t size)
+{
+	struct apple_smc_rtkit *smc = cookie;
+
+	if (size > SMC_SHMEM_SIZE || size == 0)
+		return -EINVAL;
+
+	memcpy_toio(smc->shmem.iomem, buf, size);
+	return apple_smc_cmd(smc, SMC_MSG_WRITE_KEY, key, size, 0, NULL);
+}
+
+static int apple_smc_rtkit_rw_key(void *cookie, smc_key key,
+				  void *wbuf, size_t wsize, void *rbuf, size_t rsize)
+{
+	struct apple_smc_rtkit *smc = cookie;
+
+	if (wsize > SMC_SHMEM_SIZE || wsize == 0)
+		return -EINVAL;
+
+	memcpy_toio(smc->shmem.iomem, wbuf, wsize);
+	return _apple_smc_rtkit_read_key(smc, key, rbuf, rsize, wsize);
+}
+
+static int apple_smc_rtkit_get_key_by_index(void *cookie, int index, smc_key *key)
+{
+	struct apple_smc_rtkit *smc = cookie;
+	int ret;
+
+	ret = apple_smc_cmd(smc, SMC_MSG_GET_KEY_BY_INDEX, index, 0, 0, key);
+
+	*key = swab32(*key);
+	return ret;
+}
+
+static int apple_smc_rtkit_get_key_info(void *cookie, smc_key key, struct apple_smc_key_info *info)
+{
+	struct apple_smc_rtkit *smc = cookie;
+	u8 key_info[6];
+	int ret;
+
+	ret = apple_smc_cmd(smc, SMC_MSG_GET_KEY_INFO, key, 0, 0, NULL);
+	if (ret >= 0 && info) {
+		memcpy_fromio(key_info, smc->shmem.iomem, sizeof(key_info));
+		info->size = key_info[0];
+		info->type_code = get_unaligned_be32(&key_info[1]);
+		info->flags = key_info[5];
+	}
+	return ret;
+}
+
+static const struct apple_smc_backend_ops apple_smc_rtkit_be_ops = {
+	.read_key = apple_smc_rtkit_read_key,
+	.write_key = apple_smc_rtkit_write_key,
+	.write_key_atomic = apple_smc_rtkit_write_key_atomic,
+	.rw_key = apple_smc_rtkit_rw_key,
+	.get_key_by_index = apple_smc_rtkit_get_key_by_index,
+	.get_key_info = apple_smc_rtkit_get_key_info,
+};
+
+static void apple_smc_rtkit_crashed(void *cookie, const void *crashlog, size_t crashlog_size)
+{
+	struct apple_smc_rtkit *smc = cookie;
+
+	dev_err(smc->dev, "SMC crashed! Your system will reboot in a few seconds...\n");
+	smc->alive = false;
+}
+
+static int apple_smc_rtkit_shmem_setup(void *cookie, struct apple_rtkit_shmem *bfr)
+{
+	struct apple_smc_rtkit *smc = cookie;
+	struct resource res = {
+		.start = bfr->iova,
+		.end = bfr->iova + bfr->size - 1,
+		.name = "rtkit_map",
+		.flags = smc->sram->flags,
+	};
+
+	if (!bfr->iova) {
+		dev_err(smc->dev, "RTKit wants a RAM buffer\n");
+		return -EIO;
+	}
+
+	if (res.end < res.start || !resource_contains(smc->sram, &res)) {
+		dev_err(smc->dev,
+			"RTKit buffer request outside SRAM region: %pR", &res);
+		return -EFAULT;
+	}
+
+	bfr->iomem = smc->sram_base + (res.start - smc->sram->start);
+	bfr->is_mapped = true;
+
+	return 0;
+}
+
+static void apple_smc_rtkit_shmem_destroy(void *cookie, struct apple_rtkit_shmem *bfr)
+{
+	// no-op
+}
+
+static bool apple_smc_rtkit_recv_early(void *cookie, u8 endpoint, u64 message)
+{
+	struct apple_smc_rtkit *smc = cookie;
+
+	if (endpoint != SMC_ENDPOINT) {
+		dev_err(smc->dev, "Received message for unknown endpoint 0x%x\n", endpoint);
+		return false;
+	}
+
+	if (!smc->initialized) {
+		int ret;
+
+		smc->shmem.iova = message;
+		smc->shmem.size = SMC_SHMEM_SIZE;
+		ret = apple_smc_rtkit_shmem_setup(smc, &smc->shmem);
+		if (ret < 0)
+			dev_err(smc->dev, "Failed to initialize shared memory\n");
+		else
+			smc->alive = true;
+		smc->initialized = true;
+		complete(&smc->init_done);
+	} else if (FIELD_GET(SMC_MSG, message) == SMC_MSG_NOTIFICATION) {
+		/* Handle these in the RTKit worker thread */
+		return false;
+	} else {
+		smc->cmd_ret = message;
+		if (smc->atomic_pending) {
+			smc->atomic_pending = false;
+		} else {
+			complete(&smc->cmd_done);
+		}
+	}
+
+	return true;
+}
+
+static void apple_smc_rtkit_recv(void *cookie, u8 endpoint, u64 message)
+{
+	struct apple_smc_rtkit *smc = cookie;
+	struct apple_smc *core = dev_get_drvdata(smc->dev);
+
+	if (endpoint != SMC_ENDPOINT) {
+		dev_err(smc->dev, "Received message for unknown endpoint 0x%x\n", endpoint);
+		return;
+	}
+
+	if (FIELD_GET(SMC_MSG, message) != SMC_MSG_NOTIFICATION) {
+		dev_err(smc->dev, "Received unknown message from worker: 0x%llx\n", message);
+		return;
+	}
+
+	apple_smc_event_received(core, FIELD_GET(SMC_DATA, message));
+}
+
+static const struct apple_rtkit_ops apple_smc_rtkit_ops = {
+	.crashed = apple_smc_rtkit_crashed,
+	.recv_message = apple_smc_rtkit_recv,
+	.recv_message_early = apple_smc_rtkit_recv_early,
+	.shmem_setup = apple_smc_rtkit_shmem_setup,
+	.shmem_destroy = apple_smc_rtkit_shmem_destroy,
+};
+
+static int apple_smc_rtkit_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct apple_smc_rtkit *smc;
+	int ret;
+
+	smc = devm_kzalloc(dev, sizeof(*smc), GFP_KERNEL);
+	if (!smc)
+		return -ENOMEM;
+
+	smc->dev = dev;
+
+	smc->sram = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
+	if (!smc->sram)
+		return dev_err_probe(dev, EIO,
+				     "No SRAM region");
+
+	smc->sram_base = devm_ioremap_resource(dev, smc->sram);
+	if (IS_ERR(smc->sram_base))
+		return dev_err_probe(dev, PTR_ERR(smc->sram_base),
+				     "Failed to map SRAM region");
+
+	smc->rtk =
+		devm_apple_rtkit_init(dev, smc, NULL, 0, &apple_smc_rtkit_ops);
+	if (IS_ERR(smc->rtk))
+		return dev_err_probe(dev, PTR_ERR(smc->rtk),
+				     "Failed to intialize RTKit");
+
+	ret = apple_rtkit_wake(smc->rtk);
+	if (ret != 0)
+		return dev_err_probe(dev, ret,
+				     "Failed to wake up SMC");
+
+	ret = apple_rtkit_start_ep(smc->rtk, SMC_ENDPOINT);
+	if (ret != 0) {
+		dev_err(dev, "Failed to start endpoint");
+		goto cleanup;
+	}
+
+	init_completion(&smc->init_done);
+	init_completion(&smc->cmd_done);
+
+	ret = apple_rtkit_send_message(smc->rtk, SMC_ENDPOINT,
+				       FIELD_PREP(SMC_MSG, SMC_MSG_INITIALIZE), NULL, false);
+	if (ret < 0)
+		return dev_err_probe(dev, ret,
+				     "Failed to send init message");
+
+	if (wait_for_completion_timeout(&smc->init_done,
+					msecs_to_jiffies(SMC_RECV_TIMEOUT)) == 0) {
+		ret = -ETIMEDOUT;
+		dev_err(dev, "Timed out initializing SMC");
+		goto cleanup;
+	}
+
+	if (!smc->alive) {
+		ret = -EIO;
+		goto cleanup;
+	}
+
+	ret = apple_smc_probe(dev, &apple_smc_rtkit_be_ops, smc);
+	if (ret)
+		goto cleanup;
+
+	return 0;
+
+cleanup:
+	/* Try to shut down RTKit, if it's not completely wedged */
+	if (apple_rtkit_is_running(smc->rtk))
+		apple_rtkit_quiesce(smc->rtk);
+
+	return ret;
+}
+
+static void apple_smc_rtkit_remove(struct platform_device *pdev)
+{
+	struct apple_smc *core = platform_get_drvdata(pdev);
+	struct apple_smc_rtkit *smc = apple_smc_get_cookie(core);
+
+	apple_smc_remove(core);
+
+	if (apple_rtkit_is_running(smc->rtk))
+		apple_rtkit_quiesce(smc->rtk);
+}
+
+static const struct of_device_id apple_smc_rtkit_of_match[] = {
+	{ .compatible = "apple,smc" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, apple_smc_rtkit_of_match);
+
+static struct platform_driver apple_smc_rtkit_driver = {
+	.driver = {
+		.name = "macsmc-rtkit",
+		.of_match_table = apple_smc_rtkit_of_match,
+	},
+	.probe = apple_smc_rtkit_probe,
+	.remove = apple_smc_rtkit_remove,
+};
+module_platform_driver(apple_smc_rtkit_driver);
+
+MODULE_AUTHOR("Hector Martin <marcan@marcan.st>");
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_DESCRIPTION("Apple SMC RTKit backend driver");
diff --git a/drivers/pmdomain/apple/pmgr-pwrstate.c b/drivers/pmdomain/apple/pmgr-pwrstate.c
index 9467235110f465..1d017ff8653a0a 100644
--- a/drivers/pmdomain/apple/pmgr-pwrstate.c
+++ b/drivers/pmdomain/apple/pmgr-pwrstate.c
@@ -21,7 +21,8 @@
 #define APPLE_PMGR_AUTO_ENABLE  BIT(28)
 #define APPLE_PMGR_PS_AUTO      GENMASK(27, 24)
 #define APPLE_PMGR_PS_MIN       GENMASK(19, 16)
-#define APPLE_PMGR_PARENT_OFF   BIT(11)
+#define APPLE_PMGR_PS_RESET     BIT(12)
+#define APPLE_PMGR_BUSY         BIT(11)
 #define APPLE_PMGR_DEV_DISABLE  BIT(10)
 #define APPLE_PMGR_WAS_CLKGATED BIT(9)
 #define APPLE_PMGR_WAS_PWRGATED BIT(8)
@@ -44,6 +45,9 @@ struct apple_pmgr_ps {
 	struct regmap *regmap;
 	u32 offset;
 	u32 min_state;
+	bool force_disable;
+	bool force_reset;
+	bool externally_clocked;
 };
 
 #define genpd_to_apple_pmgr_ps(_genpd) container_of(_genpd, struct apple_pmgr_ps, genpd)
@@ -53,7 +57,7 @@ static int apple_pmgr_ps_set(struct generic_pm_domain *genpd, u32 pstate, bool a
 {
 	int ret;
 	struct apple_pmgr_ps *ps = genpd_to_apple_pmgr_ps(genpd);
-	u32 reg;
+	u32 reg, cur;
 
 	ret = regmap_read(ps->regmap, ps->offset, &reg);
 	if (ret < 0)
@@ -64,24 +68,57 @@ static int apple_pmgr_ps_set(struct generic_pm_domain *genpd, u32 pstate, bool a
 		dev_err(ps->dev, "PS %s: powering off with RESET active\n",
 			genpd->name);
 
-	reg &= ~(APPLE_PMGR_AUTO_ENABLE | APPLE_PMGR_FLAGS | APPLE_PMGR_PS_TARGET);
+	if (pstate != APPLE_PMGR_PS_ACTIVE && (ps->force_disable || ps->force_reset)) {
+		u32 reg_pre = reg & ~(APPLE_PMGR_AUTO_ENABLE | APPLE_PMGR_FLAGS);
+
+		if (ps->force_disable)
+			reg_pre |= APPLE_PMGR_DEV_DISABLE;
+		if (ps->force_reset)
+			reg_pre |= APPLE_PMGR_PS_RESET;
+
+		regmap_write(ps->regmap, ps->offset, reg_pre);
+
+		ret = regmap_read_poll_timeout_atomic(
+			ps->regmap, ps->offset, cur,
+			(cur & (APPLE_PMGR_DEV_DISABLE | APPLE_PMGR_PS_RESET)) ==
+			(reg_pre & (APPLE_PMGR_DEV_DISABLE | APPLE_PMGR_PS_RESET)), 1,
+			APPLE_PMGR_PS_SET_TIMEOUT);
+
+		if (ret < 0)
+			dev_err(ps->dev, "PS %s: Failed to set reset/disable bits (now: 0x%x)\n",
+				genpd->name, reg);
+	}
+
+	reg &= ~(APPLE_PMGR_DEV_DISABLE | APPLE_PMGR_PS_RESET |
+		 APPLE_PMGR_AUTO_ENABLE | APPLE_PMGR_FLAGS | APPLE_PMGR_PS_TARGET);
 	reg |= FIELD_PREP(APPLE_PMGR_PS_TARGET, pstate);
 
 	dev_dbg(ps->dev, "PS %s: pwrstate = 0x%x: 0x%x\n", genpd->name, pstate, reg);
 
 	regmap_write(ps->regmap, ps->offset, reg);
 
-	ret = regmap_read_poll_timeout_atomic(
-		ps->regmap, ps->offset, reg,
-		(FIELD_GET(APPLE_PMGR_PS_ACTUAL, reg) == pstate), 1,
-		APPLE_PMGR_PS_SET_TIMEOUT);
+	if (ps->externally_clocked && pstate == APPLE_PMGR_PS_ACTIVE) {
+		/*
+		 * If this clock domain requires an external clock, then
+		 * consider the "clock gated" state to be good enough.
+		 */
+		ret = regmap_read_poll_timeout_atomic(
+			ps->regmap, ps->offset, cur,
+			FIELD_GET(APPLE_PMGR_PS_ACTUAL, cur) >= APPLE_PMGR_PS_CLKGATE, 1,
+			APPLE_PMGR_PS_SET_TIMEOUT);
+	} else {
+		ret = regmap_read_poll_timeout_atomic(
+			ps->regmap, ps->offset, cur,
+			FIELD_GET(APPLE_PMGR_PS_ACTUAL, cur) == pstate, 1,
+			APPLE_PMGR_PS_SET_TIMEOUT);
+	}
+
 	if (ret < 0)
 		dev_err(ps->dev, "PS %s: Failed to reach power state 0x%x (now: 0x%x)\n",
 			genpd->name, pstate, reg);
 
 	if (auto_enable) {
 		/* Not all devices implement this; this is a no-op where not implemented. */
-		reg &= ~APPLE_PMGR_FLAGS;
 		reg |= APPLE_PMGR_AUTO_ENABLE;
 		regmap_write(ps->regmap, ps->offset, reg);
 	}
@@ -234,6 +271,15 @@ static int apple_pmgr_ps_probe(struct platform_device *pdev)
 		regmap_update_bits(regmap, ps->offset, APPLE_PMGR_FLAGS | APPLE_PMGR_PS_MIN,
 				   FIELD_PREP(APPLE_PMGR_PS_MIN, ps->min_state));
 
+	if (of_property_read_bool(node, "apple,force-disable"))
+		ps->force_disable = true;
+
+	if (of_property_read_bool(node, "apple,force-reset"))
+		ps->force_reset = true;
+
+	if (of_property_read_bool(node, "apple,externally-clocked"))
+		ps->externally_clocked = true;
+
 	active = apple_pmgr_ps_is_active(ps);
 	if (of_property_read_bool(node, "apple,always-on")) {
 		ps->genpd.flags |= GENPD_FLAG_ALWAYS_ON;
@@ -242,6 +288,8 @@ static int apple_pmgr_ps_probe(struct platform_device *pdev)
 			/* Turn it on so pm_genpd_init does not fail */
 			active = apple_pmgr_ps_power_on(&ps->genpd) == 0;
 		}
+	} else if (active) {
+		ps->genpd.flags |= GENPD_FLAG_DEFER_OFF | GENPD_FLAG_ACTIVE_WAKEUP;
 	}
 
 	/* Turn on auto-PM if the domain is already on */
diff --git a/drivers/pmdomain/core.c b/drivers/pmdomain/core.c
index 6c94137865c9b5..ebff814ce5bfae 100644
--- a/drivers/pmdomain/core.c
+++ b/drivers/pmdomain/core.c
@@ -7,6 +7,7 @@
 #define pr_fmt(fmt) "PM: " fmt
 
 #include <linux/delay.h>
+#include <linux/fwnode.h>
 #include <linux/idr.h>
 #include <linux/kernel.h>
 #include <linux/io.h>
@@ -176,6 +177,7 @@ static const struct genpd_lock_ops genpd_raw_spin_ops = {
 #define genpd_is_rpm_always_on(genpd)	(genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON)
 #define genpd_is_opp_table_fw(genpd)	(genpd->flags & GENPD_FLAG_OPP_TABLE_FW)
 #define genpd_is_dev_name_fw(genpd)	(genpd->flags & GENPD_FLAG_DEV_NAME_FW)
+#define genpd_is_defer_off(genpd)	(genpd->flags & GENPD_FLAG_DEFER_OFF)
 
 static inline bool irq_safe_dev_in_sleep_domain(struct device *dev,
 		const struct generic_pm_domain *genpd)
@@ -810,6 +812,27 @@ static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
 	queue_work(pm_wq, &genpd->power_off_work);
 }
 
+/**
+ * genpd_must_defer - Check whether the genpd cannot be safely powered off.
+ * @genpd: PM domain about to be powered down.
+ * @one_dev_probing: True if we are being called from RPM callbacks on a device that
+ * is probing, to allow poweroff if that device is the sole remaining consumer probing.
+ *
+ * Returns true if the @genpd has the GENPD_FLAG_DEFER_OFF flag and there
+ * are any consumer devices which either do not exist yet (only represented
+ * by fwlinks) or whose drivers have not probed yet.
+ */
+static bool genpd_must_defer(struct generic_pm_domain *genpd, bool one_dev_probing)
+{
+	if (genpd_is_defer_off(genpd) && genpd->has_provider) {
+		int absent = fw_devlink_count_absent_consumers(genpd->provider);
+
+		if (absent > (one_dev_probing ? 1 : 0))
+			return true;
+	}
+	return false;
+}
+
 /**
  * genpd_power_off - Remove power from a given PM domain.
  * @genpd: PM domain to power down.
@@ -823,7 +846,7 @@ static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
  * have been powered down, remove power from @genpd.
  */
 static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
-			   unsigned int depth)
+			   bool one_dev_probing, unsigned int depth)
 {
 	struct pm_domain_data *pdd;
 	struct gpd_link *link;
@@ -873,6 +896,14 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
 	if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
 		return -EBUSY;
 
+	/*
+	 * Do not allow PM domain to be powered off if it is marked
+	 * as GENPD_FLAG_DEFER_OFF and there are consumer devices
+	 * which have not probed yet.
+	 */
+	if (genpd_must_defer(genpd, one_dev_probing))
+		return -EBUSY;
+
 	if (genpd->gov && genpd->gov->power_down_ok) {
 		if (!genpd->gov->power_down_ok(&genpd->domain))
 			return -EAGAIN;
@@ -899,7 +930,7 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
 	list_for_each_entry(link, &genpd->child_links, child_node) {
 		genpd_sd_counter_dec(link->parent);
 		genpd_lock_nested(link->parent, depth + 1);
-		genpd_power_off(link->parent, false, depth + 1);
+		genpd_power_off(link->parent, false, false, depth + 1);
 		genpd_unlock(link->parent);
 	}
 
@@ -957,7 +988,7 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
 					child_node) {
 		genpd_sd_counter_dec(link->parent);
 		genpd_lock_nested(link->parent, depth + 1);
-		genpd_power_off(link->parent, false, depth + 1);
+		genpd_power_off(link->parent, false, false, depth + 1);
 		genpd_unlock(link->parent);
 	}
 
@@ -1024,7 +1055,7 @@ static void genpd_power_off_work_fn(struct work_struct *work)
 	genpd = container_of(work, struct generic_pm_domain, power_off_work);
 
 	genpd_lock(genpd);
-	genpd_power_off(genpd, false, 0);
+	genpd_power_off(genpd, false, false, 0);
 	genpd_unlock(genpd);
 }
 
@@ -1089,6 +1120,7 @@ static int genpd_runtime_suspend(struct device *dev)
 	struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
 	struct gpd_timing_data *td = gpd_data->td;
 	bool runtime_pm = pm_runtime_enabled(dev);
+	bool probing = dev->links.status != DL_DEV_DRIVER_BOUND;
 	ktime_t time_start = 0;
 	s64 elapsed_ns;
 	int ret;
@@ -1143,7 +1175,7 @@ static int genpd_runtime_suspend(struct device *dev)
 		return 0;
 
 	genpd_lock(genpd);
-	genpd_power_off(genpd, true, 0);
+	genpd_power_off(genpd, true, probing, 0);
 	gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
 	genpd_unlock(genpd);
 
@@ -1164,6 +1196,7 @@ static int genpd_runtime_resume(struct device *dev)
 	struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
 	struct gpd_timing_data *td = gpd_data->td;
 	bool timed = td && pm_runtime_enabled(dev);
+	bool probing = dev->links.status != DL_DEV_DRIVER_BOUND;
 	ktime_t time_start = 0;
 	s64 elapsed_ns;
 	int ret;
@@ -1221,7 +1254,7 @@ static int genpd_runtime_resume(struct device *dev)
 err_poweroff:
 	if (!pm_runtime_is_irq_safe(dev) || genpd_is_irq_safe(genpd)) {
 		genpd_lock(genpd);
-		genpd_power_off(genpd, true, 0);
+		genpd_power_off(genpd, true, probing, 0);
 		gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
 		genpd_unlock(genpd);
 	}
@@ -1288,6 +1321,9 @@ static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
 	    || atomic_read(&genpd->sd_count) > 0)
 		return;
 
+	if (genpd_must_defer(genpd, false))
+		return;
+
 	/* Check that the children are in their deepest (powered-off) state. */
 	list_for_each_entry(link, &genpd->parent_links, parent_node) {
 		struct generic_pm_domain *child = link->child;
@@ -2288,6 +2324,12 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
 		return -EINVAL;
 	}
 
+	/* Deferred-off power domains should be powered on at initialization. */
+	if (genpd_is_defer_off(genpd) && !genpd_status_on(genpd)) {
+		pr_warn("deferred-off PM domain %s is not on at init\n", genpd->name);
+		genpd->flags &= ~GENPD_FLAG_DEFER_OFF;
+	}
+
 	/* Multiple states but no governor doesn't make sense. */
 	if (!gov && genpd->state_count > 1)
 		pr_warn("%s: no governor for states\n", genpd->name);
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index 60bf0ca64cf395..469b1b5d01606f 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -128,6 +128,18 @@ config POWER_RESET_LINKSTATION
 
 	  Say Y here if you have a Buffalo LinkStation LS421D/E.
 
+config POWER_RESET_MACSMC
+	tristate "Apple SMC reset/power-off driver"
+	depends on ARCH_APPLE || COMPILE_TEST
+	depends on APPLE_SMC
+	depends on OF
+	default ARCH_APPLE
+	help
+	  This driver supports reset and power-off on Apple Mac machines
+	  that implement this functionality via the SMC.
+
+	  Say Y here if you have an Apple Silicon Mac.
+
 config POWER_RESET_MSM
 	bool "Qualcomm MSM power-off driver"
 	depends on ARCH_QCOM
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index 10782d32e1da39..887dd9e49b7293 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_POWER_RESET_GPIO) += gpio-poweroff.o
 obj-$(CONFIG_POWER_RESET_GPIO_RESTART) += gpio-restart.o
 obj-$(CONFIG_POWER_RESET_HISI) += hisi-reboot.o
 obj-$(CONFIG_POWER_RESET_LINKSTATION) += linkstation-poweroff.o
+obj-$(CONFIG_POWER_RESET_MACSMC) += macsmc-reboot.o
 obj-$(CONFIG_POWER_RESET_MSM) += msm-poweroff.o
 obj-$(CONFIG_POWER_RESET_MT6323) += mt6323-poweroff.o
 obj-$(CONFIG_POWER_RESET_QCOM_PON) += qcom-pon.o
diff --git a/drivers/power/reset/macsmc-reboot.c b/drivers/power/reset/macsmc-reboot.c
new file mode 100644
index 00000000000000..780f7cd7b50e1c
--- /dev/null
+++ b/drivers/power/reset/macsmc-reboot.c
@@ -0,0 +1,333 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple SMC Reboot/Poweroff Handler
+ * Copyright The Asahi Linux Contributors
+ */
+
+#include <linux/delay.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/macsmc.h>
+#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+
+struct macsmc_reboot_nvmem {
+	struct nvmem_cell *shutdown_flag;
+	struct nvmem_cell *pm_setting;
+	struct nvmem_cell *boot_stage;
+	struct nvmem_cell *boot_error_count;
+	struct nvmem_cell *panic_count;
+};
+
+static const char *nvmem_names[] = {
+	"shutdown_flag",
+	"pm_setting",
+	"boot_stage",
+	"boot_error_count",
+	"panic_count",
+};
+
+enum boot_stage {
+	BOOT_STAGE_SHUTDOWN		= 0x00, /* Clean shutdown */
+	BOOT_STAGE_IBOOT_DONE		= 0x2f, /* Last stage of bootloader */
+	BOOT_STAGE_KERNEL_STARTED	= 0x30, /* Normal OS booting */
+};
+
+enum pm_setting {
+	PM_SETTING_AC_POWER_RESTORE	= 0x02,
+	PM_SETTING_AC_POWER_OFF		= 0x03,
+};
+
+static const char *ac_power_modes[] = { "off", "restore" };
+
+static int ac_power_mode_map[] = {
+	PM_SETTING_AC_POWER_OFF,
+	PM_SETTING_AC_POWER_RESTORE,
+};
+
+struct macsmc_reboot {
+	struct device *dev;
+	struct apple_smc *smc;
+	struct notifier_block reboot_notify;
+
+	union {
+		struct macsmc_reboot_nvmem nvm;
+		struct nvmem_cell *nvm_cells[ARRAY_SIZE(nvmem_names)];
+	};
+};
+
+/* Helpers to read/write a u8 given a struct nvmem_cell */
+static int nvmem_cell_get_u8(struct nvmem_cell *cell)
+{
+	size_t len;
+	u8 val;
+	void *ret = nvmem_cell_read(cell, &len);
+
+	if (IS_ERR(ret))
+		return PTR_ERR(ret);
+
+	if (len < 1) {
+		kfree(ret);
+		return -EINVAL;
+	}
+
+	val = *(u8 *)ret;
+	kfree(ret);
+	return val;
+}
+
+static int nvmem_cell_set_u8(struct nvmem_cell *cell, u8 val)
+{
+	return nvmem_cell_write(cell, &val, sizeof(val));
+}
+
+static ssize_t macsmc_ac_power_mode_store(struct device *dev, struct device_attribute *attr,
+					  const char *buf, size_t n)
+{
+	struct macsmc_reboot *reboot = dev_get_drvdata(dev);
+	int mode;
+	int ret;
+
+	mode = sysfs_match_string(ac_power_modes, buf);
+	if (mode < 0)
+		return mode;
+
+	ret = nvmem_cell_set_u8(reboot->nvm.pm_setting, ac_power_mode_map[mode]);
+	if (ret < 0)
+		return ret;
+
+	return n;
+}
+
+static ssize_t macsmc_ac_power_mode_show(struct device *dev,
+					 struct device_attribute *attr, char *buf)
+{
+	struct macsmc_reboot *reboot = dev_get_drvdata(dev);
+	int len = 0;
+	int i;
+	int mode = nvmem_cell_get_u8(reboot->nvm.pm_setting);
+
+	if (mode < 0)
+		return mode;
+
+	for (i = 0; i < ARRAY_SIZE(ac_power_mode_map); i++)
+		if (mode == ac_power_mode_map[i])
+			len += scnprintf(buf+len, PAGE_SIZE-len,
+					 "[%s] ", ac_power_modes[i]);
+		else
+			len += scnprintf(buf+len, PAGE_SIZE-len,
+					 "%s ", ac_power_modes[i]);
+	buf[len-1] = '\n';
+	return len;
+}
+static DEVICE_ATTR(ac_power_mode, 0644, macsmc_ac_power_mode_show,
+		   macsmc_ac_power_mode_store);
+
+/*
+ * SMC 'MBSE' key actions:
+ *
+ * 'offw' - shutdown warning
+ * 'slpw' - sleep warning
+ * 'rest' - restart warning
+ * 'off1' - shutdown (needs PMU bit set to stay on)
+ * 'susp' - suspend
+ * 'phra' - restart ("PE Halt Restart Action"?)
+ * 'panb' - panic beginning
+ * 'pane' - panic end
+ */
+
+static int macsmc_power_off(struct sys_off_data *data)
+{
+	struct macsmc_reboot *reboot = data->cb_data;
+
+	dev_info(reboot->dev, "Issuing power off (off1)\n");
+
+	if (apple_smc_write_u32_atomic(reboot->smc, SMC_KEY(MBSE), SMC_KEY(off1)) < 0) {
+		dev_err(reboot->dev, "Failed to issue MBSE = off1 (power_off)\n");
+	} else {
+		mdelay(100);
+		WARN_ON(1);
+	}
+
+	return NOTIFY_OK;
+}
+
+static int macsmc_restart(struct sys_off_data *data)
+{
+	struct macsmc_reboot *reboot = data->cb_data;
+
+	dev_info(reboot->dev, "Issuing restart (phra)\n");
+
+	if (apple_smc_write_u32_atomic(reboot->smc, SMC_KEY(MBSE), SMC_KEY(phra)) < 0) {
+		dev_err(reboot->dev, "Failed to issue MBSE = phra (restart)\n");
+	} else {
+		mdelay(100);
+		WARN_ON(1);
+	}
+
+	return NOTIFY_OK;
+}
+
+static int macsmc_reboot_notify(struct notifier_block *this, unsigned long action, void *data)
+{
+	struct macsmc_reboot *reboot = container_of(this, struct macsmc_reboot, reboot_notify);
+	u32 val;
+	u8 shutdown_flag;
+
+	switch (action) {
+		case SYS_RESTART:
+			val = SMC_KEY(rest);
+			shutdown_flag = 0;
+			break;
+		case SYS_POWER_OFF:
+			val = SMC_KEY(offw);
+			shutdown_flag = 1;
+			break;
+		default:
+			return NOTIFY_DONE;
+	}
+
+	dev_info(reboot->dev, "Preparing for reboot (%p4ch)\n", &val);
+
+	/* On the Mac Mini, this will turn off the LED for power off */
+	if (apple_smc_write_u32(reboot->smc, SMC_KEY(MBSE), val) < 0)
+		dev_err(reboot->dev, "Failed to issue MBSE = %p4ch (reboot_prepare)\n", &val);
+
+	/* Set the boot_stage to 0, which means we're doing a clean shutdown/reboot. */
+	if (reboot->nvm.boot_stage &&
+	    nvmem_cell_set_u8(reboot->nvm.boot_stage, BOOT_STAGE_SHUTDOWN) < 0)
+		dev_err(reboot->dev, "Failed to write boot_stage\n");
+
+	/*
+	 * Set the PMU flag to actually reboot into the off state.
+	 * Without this, the device will just reboot. We make it optional in case it is no longer
+	 * necessary on newer hardware.
+	 */
+	if (reboot->nvm.shutdown_flag &&
+	    nvmem_cell_set_u8(reboot->nvm.shutdown_flag, shutdown_flag) < 0)
+		dev_err(reboot->dev, "Failed to write shutdown_flag\n");
+
+	return NOTIFY_OK;
+}
+
+static void macsmc_power_init_error_counts(struct macsmc_reboot *reboot)
+{
+	int boot_error_count, panic_count;
+
+	if (!reboot->nvm.boot_error_count || !reboot->nvm.panic_count)
+		return;
+
+	boot_error_count = nvmem_cell_get_u8(reboot->nvm.boot_error_count);
+	if (boot_error_count < 0) {
+		dev_err(reboot->dev, "Failed to read boot_error_count (%d)\n", boot_error_count);
+		return;
+	}
+
+	panic_count = nvmem_cell_get_u8(reboot->nvm.panic_count);
+	if (panic_count < 0) {
+		dev_err(reboot->dev, "Failed to read panic_count (%d)\n", panic_count);
+		return;
+	}
+
+	if (!boot_error_count && !panic_count)
+		return;
+
+	dev_warn(reboot->dev, "PMU logged %d boot error(s) and %d panic(s)\n",
+		 boot_error_count, panic_count);
+
+	if (nvmem_cell_set_u8(reboot->nvm.panic_count, 0) < 0)
+		dev_err(reboot->dev, "Failed to reset panic_count\n");
+	if (nvmem_cell_set_u8(reboot->nvm.boot_error_count, 0) < 0)
+		dev_err(reboot->dev, "Failed to reset boot_error_count\n");
+}
+
+static int macsmc_reboot_probe(struct platform_device *pdev)
+{
+	struct apple_smc *smc = dev_get_drvdata(pdev->dev.parent);
+	struct macsmc_reboot *reboot;
+	int ret, i;
+
+	/* Ignore devices without this functionality */
+	if (!apple_smc_key_exists(smc, SMC_KEY(MBSE)))
+		return -ENODEV;
+
+	reboot = devm_kzalloc(&pdev->dev, sizeof(*reboot), GFP_KERNEL);
+	if (!reboot)
+		return -ENOMEM;
+
+	reboot->dev = &pdev->dev;
+	reboot->smc = smc;
+
+	platform_set_drvdata(pdev, reboot);
+
+	pdev->dev.of_node = of_get_child_by_name(pdev->dev.parent->of_node, "reboot");
+
+	for (i = 0; i < ARRAY_SIZE(nvmem_names); i++) {
+		struct nvmem_cell *cell;
+		cell = devm_nvmem_cell_get(&pdev->dev,
+					   nvmem_names[i]);
+		if (IS_ERR(cell)) {
+			if (PTR_ERR(cell) == -EPROBE_DEFER)
+				return -EPROBE_DEFER;
+			dev_warn(&pdev->dev, "Missing NVMEM cell %s (%ld)\n",
+				 nvmem_names[i], PTR_ERR(cell));
+			/* Non fatal, we'll deal with it */
+			cell = NULL;
+		}
+		reboot->nvm_cells[i] = cell;
+	}
+
+	/* Set the boot_stage to indicate we're running the OS kernel */
+	if (reboot->nvm.boot_stage &&
+	    nvmem_cell_set_u8(reboot->nvm.boot_stage, BOOT_STAGE_KERNEL_STARTED) < 0)
+		dev_err(reboot->dev, "Failed to write boot_stage\n");
+
+	/* Display and clear the error counts */
+	macsmc_power_init_error_counts(reboot);
+
+	reboot->reboot_notify.notifier_call = macsmc_reboot_notify;
+
+	ret = devm_register_sys_off_handler(&pdev->dev, SYS_OFF_MODE_POWER_OFF, SYS_OFF_PRIO_HIGH,
+					    macsmc_power_off, reboot);
+	if (ret)
+		return dev_err_probe(&pdev->dev, ret, "Failed to register power-off handler\n");
+
+	ret = devm_register_sys_off_handler(&pdev->dev, SYS_OFF_MODE_RESTART, SYS_OFF_PRIO_HIGH,
+					    macsmc_restart, reboot);
+	if (ret)
+		return dev_err_probe(&pdev->dev, ret, "Failed to register restart handler\n");
+
+	ret = devm_register_reboot_notifier(&pdev->dev, &reboot->reboot_notify);
+	if (ret)
+		return dev_err_probe(&pdev->dev, ret, "Failed to register reboot notifier\n");
+
+	dev_info(&pdev->dev, "Handling reboot and poweroff requests via SMC\n");
+
+	if (device_create_file(&pdev->dev, &dev_attr_ac_power_mode))
+		dev_warn(&pdev->dev, "could not create sysfs file\n");
+
+	return 0;
+}
+
+static void macsmc_reboot_remove(struct platform_device *pdev)
+{
+	device_remove_file(&pdev->dev, &dev_attr_ac_power_mode);
+}
+
+
+static struct platform_driver macsmc_reboot_driver = {
+	.driver = {
+		.name = "macsmc-reboot",
+	},
+	.probe = macsmc_reboot_probe,
+	.remove = macsmc_reboot_remove,
+};
+module_platform_driver(macsmc_reboot_driver);
+
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_DESCRIPTION("Apple SMC reboot/poweroff driver");
+MODULE_AUTHOR("Hector Martin <marcan@marcan.st>");
+MODULE_ALIAS("platform:macsmc-reboot");
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index 7b18358f194a70..c6825a626e6bf8 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -1037,4 +1037,11 @@ config FUEL_GAUGE_MM8013
 	  the state of charge, temperature, cycle count, actual and design
 	  capacity, etc.
 
+config CHARGER_MACSMC
+	tristate "Apple SMC Charger / Battery support"
+	depends on APPLE_SMC
+	help
+	  Say Y here to enable support for the charger and battery controls on
+	  Apple SMC controllers, as used on Apple Silicon Macs.
+
 endif # POWER_SUPPLY
diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile
index b55cc48a4c86f8..6c998eb9573c66 100644
--- a/drivers/power/supply/Makefile
+++ b/drivers/power/supply/Makefile
@@ -76,6 +76,7 @@ obj-$(CONFIG_CHARGER_GPIO)	+= gpio-charger.o
 obj-$(CONFIG_CHARGER_MANAGER)	+= charger-manager.o
 obj-$(CONFIG_CHARGER_LT3651)	+= lt3651-charger.o
 obj-$(CONFIG_CHARGER_LTC4162L)	+= ltc4162-l-charger.o
+obj-$(CONFIG_CHARGER_MACSMC)	+= macsmc_power.o
 obj-$(CONFIG_CHARGER_MAX14577)	+= max14577_charger.o
 obj-$(CONFIG_CHARGER_DETECTOR_MAX14656)	+= max14656_charger_detector.o
 obj-$(CONFIG_CHARGER_MAX77650)	+= max77650-charger.o
diff --git a/drivers/power/supply/macsmc_power.c b/drivers/power/supply/macsmc_power.c
new file mode 100644
index 00000000000000..eed0664615896a
--- /dev/null
+++ b/drivers/power/supply/macsmc_power.c
@@ -0,0 +1,888 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple SMC Power/Battery Management
+ * Copyright The Asahi Linux Contributors
+ */
+
+#include <linux/ctype.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/macsmc.h>
+#include <linux/power_supply.h>
+#include <linux/reboot.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+
+#define MAX_STRING_LENGTH 256
+
+/*
+ * This number is not reported anywhere by SMC, but seems to be a good
+ * conversion factor for charge to energy across machines. We need this
+ * to convert in the driver, since if we don't userspace will try to do
+ * the conversion with a randomly guessed voltage and get it wrong.
+ *
+ * Ideally there would be a power supply prop to inform userspace of this
+ * number, but there isn't, only min/max.
+ */
+#define MACSMC_NOMINAL_CELL_VOLTAGE_MV 3800
+
+struct macsmc_power {
+	struct device *dev;
+	struct apple_smc *smc;
+	struct power_supply_desc ac_desc;
+	struct power_supply_desc batt_desc;
+
+	struct power_supply *batt;
+	char model_name[MAX_STRING_LENGTH];
+	char serial_number[MAX_STRING_LENGTH];
+	char mfg_date[MAX_STRING_LENGTH];
+	bool has_chwa;
+	bool has_chls;
+	u8 num_cells;
+	int nominal_voltage_mv;
+
+	struct power_supply *ac;
+
+	struct notifier_block nb;
+
+	struct work_struct critical_work;
+	bool shutdown_started;
+
+	struct delayed_work dbg_log_work;
+};
+
+static int macsmc_log_power_set(const char *val, const struct kernel_param *kp);
+
+static const struct kernel_param_ops macsmc_log_power_ops = {
+        .set = macsmc_log_power_set,
+        .get = param_get_bool,
+};
+
+static bool log_power = false;
+module_param_cb(log_power, &macsmc_log_power_ops, &log_power, 0644);
+MODULE_PARM_DESC(log_power, "Periodically log power consumption for debugging");
+
+#define POWER_LOG_INTERVAL (HZ)
+
+static struct macsmc_power *g_power;
+
+#define CHNC_BATTERY_FULL	BIT(0)
+#define CHNC_NO_CHARGER		BIT(7)
+#define CHNC_NOCHG_CH0C		BIT(14)
+#define CHNC_NOCHG_CH0B_CH0K	BIT(15)
+#define CHNC_BATTERY_FULL_2	BIT(18)
+#define CHNC_BMS_BUSY		BIT(23)
+#define CHNC_CHLS_LIMIT		BIT(24)
+#define CHNC_NOAC_CH0J		BIT(53)
+#define CHNC_NOAC_CH0I		BIT(54)
+
+#define CH0R_LOWER_FLAGS	GENMASK(15, 0)
+#define CH0R_NOAC_CH0I		BIT(0)
+#define CH0R_NOAC_DISCONNECTED	BIT(4)
+#define CH0R_NOAC_CH0J		BIT(5)
+#define CH0R_BMS_BUSY		BIT(8)
+#define CH0R_NOAC_CH0K		BIT(9)
+#define CH0R_NOAC_CHWA		BIT(11)
+
+#define CH0X_CH0C		BIT(0)
+#define CH0X_CH0B		BIT(1)
+
+#define ACSt_CAN_BOOT_AP	BIT(2)
+#define ACSt_CAN_BOOT_IBOOT	BIT(1)
+
+#define CHWA_CHLS_FIXED_START_OFFSET	5
+#define CHLS_MIN_END_THRESHOLD		10
+#define CHLS_FORCE_DISCHARGE		0x100
+#define CHWA_FIXED_END_THRESHOLD	80
+#define CHWA_PROP_WRITE_THRESHOLD	95
+
+static void macsmc_do_dbg(struct macsmc_power *power)
+{
+	int p_in = 0, p_sys = 0, p_3v8 = 0, p_mpmu = 0, p_spmu = 0, p_clvr = 0, p_cpu = 0;
+	s32 p_bat = 0;
+	s16 t_full = 0, t_empty = 0;
+	u8 charge = 0;
+
+	apple_smc_read_f32_scaled(power->smc, SMC_KEY(PDTR), &p_in, 1000);
+	apple_smc_read_f32_scaled(power->smc, SMC_KEY(PSTR), &p_sys, 1000);
+	apple_smc_read_f32_scaled(power->smc, SMC_KEY(PMVR), &p_3v8, 1000);
+	apple_smc_read_f32_scaled(power->smc, SMC_KEY(PHPC), &p_cpu, 1000);
+	apple_smc_read_f32_scaled(power->smc, SMC_KEY(PSVR), &p_clvr, 1000);
+	apple_smc_read_f32_scaled(power->smc, SMC_KEY(PPMC), &p_mpmu, 1000);
+	apple_smc_read_f32_scaled(power->smc, SMC_KEY(PPSC), &p_spmu, 1000);
+	apple_smc_read_s32(power->smc, SMC_KEY(B0AP), &p_bat);
+	apple_smc_read_s16(power->smc, SMC_KEY(B0TE), &t_empty);
+	apple_smc_read_s16(power->smc, SMC_KEY(B0TF), &t_full);
+	apple_smc_read_u8(power->smc, SMC_KEY(BUIC), &charge);
+
+#define FD3(x) ((x) / 1000), abs((x) % 1000)
+	dev_info(power->dev,
+		 "In %2d.%03dW Sys %2d.%03dW 3V8 %2d.%03dW MPMU %2d.%03dW SPMU %2d.%03dW "
+		 "CLVR %2d.%03dW CPU %2d.%03dW Batt %2d.%03dW %d%% T%s %dm\n",
+		 FD3(p_in), FD3(p_sys), FD3(p_3v8), FD3(p_mpmu), FD3(p_spmu), FD3(p_clvr),
+		 FD3(p_cpu), FD3(p_bat), charge,
+		 t_full >= 0 ? "full" : "empty",
+		 t_full >= 0 ? t_full : t_empty);
+#undef FD3
+}
+
+static int macsmc_battery_get_status(struct macsmc_power *power)
+{
+	u64 nocharge_flags;
+	u32 nopower_flags;
+	u16 ac_current;
+	int charge_limit = 0;
+	bool limited = false;
+	int ret;
+
+	/*
+	 * Note: there are fallbacks in case some of these SMC keys disappear in the future
+	 * or are not present on some machines. We treat the absence of the CHCE/CHCC/BSFC/CHSC
+	 * flags as an error, since they are quite fundamental and simple booleans.
+	 */
+
+	/*
+	 * If power input is inhibited, we are definitely discharging.
+	 * However, if the only reason is the BMS is doing a balancing cycle,
+	 * go ahead and ignore that one to avoid spooking users.
+	 */
+	ret = apple_smc_read_u32(power->smc, SMC_KEY(CH0R), &nopower_flags);
+	if (!ret && (nopower_flags & CH0R_LOWER_FLAGS & ~CH0R_BMS_BUSY))
+		return POWER_SUPPLY_STATUS_DISCHARGING;
+
+	/* If no charger is present, we are definitely discharging. */
+	ret = apple_smc_read_flag(power->smc, SMC_KEY(CHCE));
+	if (ret < 0)
+		return ret;
+	else if (!ret)
+		return POWER_SUPPLY_STATUS_DISCHARGING;
+
+	/* If AC is not charge capable, we are definitely discharging. */
+	ret = apple_smc_read_flag(power->smc, SMC_KEY(CHCC));
+	if (ret < 0)
+		return ret;
+	else if (!ret)
+		return POWER_SUPPLY_STATUS_DISCHARGING;
+
+	/*
+	 * If the AC input current limit is tiny or 0, we are discharging no matter
+	 * how much the BMS believes it can charge.
+	 */
+	ret = apple_smc_read_u16(power->smc, SMC_KEY(AC-i), &ac_current);
+	if (!ret && ac_current < 100)
+		return POWER_SUPPLY_STATUS_DISCHARGING;
+
+	/* If the battery is full, report it as such. */
+	ret = apple_smc_read_flag(power->smc, SMC_KEY(BSFC));
+	if (ret < 0)
+		return ret;
+	else if (ret)
+		return POWER_SUPPLY_STATUS_FULL;
+
+	/*
+	 * If we have charge limits supported and enabled and the SoC is above
+	 * the start threshold, that means we are not charging for that reason
+	 * (if not charging).
+	 */
+	if (power->has_chls) {
+		u16 vu16;
+		ret = apple_smc_read_u16(power->smc, SMC_KEY(CHLS), &vu16);
+		if (ret == sizeof(vu16) && (vu16 & 0xff) >= CHLS_MIN_END_THRESHOLD)
+			charge_limit = (vu16 & 0xff) - CHWA_CHLS_FIXED_START_OFFSET;
+	} else if (power->has_chwa &&
+		   apple_smc_read_flag(power->smc, SMC_KEY(CHWA)) == 1) {
+		charge_limit = CHWA_FIXED_END_THRESHOLD - CHWA_CHLS_FIXED_START_OFFSET;
+	}
+
+	if (charge_limit > 0) {
+		u8 buic = 0;
+		if (apple_smc_read_u8(power->smc, SMC_KEY(BUIC), &buic) >= 0 &&
+			buic >= charge_limit)
+			limited = true;
+	}
+
+	/* If there are reasons we aren't charging... */
+	ret = apple_smc_read_u64(power->smc, SMC_KEY(CHNC), &nocharge_flags);
+	if (!ret) {
+		/* Perhaps the battery is full after all */
+		if (nocharge_flags & CHNC_BATTERY_FULL)
+			return POWER_SUPPLY_STATUS_FULL;
+		/*
+		 * Or maybe the BMS is just busy doing something, if so call it charging anyway.
+		 * But CHWA limits show up as this, so exclude those.
+		 */
+		else if (nocharge_flags == CHNC_BMS_BUSY && !limited)
+			return POWER_SUPPLY_STATUS_CHARGING;
+		/* If we have other reasons we aren't charging, say we aren't */
+		else if (nocharge_flags)
+			return POWER_SUPPLY_STATUS_NOT_CHARGING;
+		/* Else we're either charging or about to charge */
+		else
+			return POWER_SUPPLY_STATUS_CHARGING;
+	}
+
+	/* As a fallback, use the system charging flag. */
+	ret = apple_smc_read_flag(power->smc, SMC_KEY(CHSC));
+	if (ret < 0)
+		return ret;
+	if (!ret)
+		return POWER_SUPPLY_STATUS_NOT_CHARGING;
+	else
+		return POWER_SUPPLY_STATUS_CHARGING;
+}
+
+static int macsmc_battery_get_charge_behaviour(struct macsmc_power *power)
+{
+	int ret;
+	u8 val;
+
+	/* CH0I returns a bitmask like the low byte of CH0R */
+	ret = apple_smc_read_u8(power->smc, SMC_KEY(CH0I), &val);
+	if (ret)
+		return ret;
+	if (val & CH0R_NOAC_CH0I)
+		return POWER_SUPPLY_CHARGE_BEHAVIOUR_FORCE_DISCHARGE;
+
+	/* CH0C returns a bitmask containing CH0B/CH0C flags */
+	ret = apple_smc_read_u8(power->smc, SMC_KEY(CH0C), &val);
+	if (ret)
+		return ret;
+	if (val & CH0X_CH0C)
+		return POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE;
+	else
+		return POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO;
+}
+
+static int macsmc_battery_set_charge_behaviour(struct macsmc_power *power, int val)
+{
+	u8 ch0i, ch0c;
+	int ret;
+
+	/*
+	 * CH0I/CH0C are "hard" controls that will allow the battery to run down to 0.
+	 * CH0K/CH0B are "soft" controls that are reset to 0 when SOC drops below 50%;
+	 * we don't expose these yet.
+	 */
+
+	switch (val) {
+	case POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO:
+		ch0i = ch0c = 0;
+		break;
+	case POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE:
+		ch0i = 0;
+		ch0c = 1;
+		break;
+	case POWER_SUPPLY_CHARGE_BEHAVIOUR_FORCE_DISCHARGE:
+		ch0i = 1;
+		ch0c = 0;
+		break;
+	default:
+		return -EINVAL;
+	}
+	ret = apple_smc_write_u8(power->smc, SMC_KEY(CH0I), ch0i);
+	if (ret)
+		return ret;
+	return apple_smc_write_u8(power->smc, SMC_KEY(CH0C), ch0c);
+}
+
+static int macsmc_battery_get_date(const char *s, int *out)
+{
+	if (!isdigit(s[0]) || !isdigit(s[1]))
+		return -ENOTSUPP;
+
+	*out = (s[0] - '0') * 10 + s[1] - '0';
+	return 0;
+}
+
+static int macsmc_battery_get_capacity_level(struct macsmc_power *power)
+{
+	u32 val;
+	int ret;
+
+	/* Check for emergency shutdown condition */
+	if (apple_smc_read_u32(power->smc, SMC_KEY(BCF0), &val) >= 0 && val)
+		return POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
+
+	/* Check AC status for whether we could boot in this state */
+	if (apple_smc_read_u32(power->smc, SMC_KEY(ACSt), &val) >= 0) {
+		if (!(val & ACSt_CAN_BOOT_IBOOT))
+			return POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
+
+		if (!(val & ACSt_CAN_BOOT_AP))
+			return POWER_SUPPLY_CAPACITY_LEVEL_LOW;
+	}
+
+	/* Check battery full flag */
+	ret = apple_smc_read_flag(power->smc, SMC_KEY(BSFC));
+	if (ret > 0)
+		return POWER_SUPPLY_CAPACITY_LEVEL_FULL;
+	else if (ret == 0)
+		return POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
+	else
+		return POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN;
+}
+
+static int macsmc_battery_get_property(struct power_supply *psy,
+				       enum power_supply_property psp,
+				       union power_supply_propval *val)
+{
+	struct macsmc_power *power = power_supply_get_drvdata(psy);
+	int ret = 0;
+	u8 vu8;
+	u16 vu16;
+	s16 vs16;
+	s32 vs32;
+	s64 vs64;
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_STATUS:
+		val->intval = macsmc_battery_get_status(power);
+		ret = val->intval < 0 ? val->intval : 0;
+		break;
+	case POWER_SUPPLY_PROP_PRESENT:
+		val->intval = 1;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR:
+		val->intval = macsmc_battery_get_charge_behaviour(power);
+		ret = val->intval < 0 ? val->intval : 0;
+		break;
+	case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW:
+		ret = apple_smc_read_u16(power->smc, SMC_KEY(B0TE), &vu16);
+		val->intval = vu16 == 0xffff ? 0 : vu16 * 60;
+		break;
+	case POWER_SUPPLY_PROP_TIME_TO_FULL_NOW:
+		ret = apple_smc_read_u16(power->smc, SMC_KEY(B0TF), &vu16);
+		val->intval = vu16 == 0xffff ? 0 : vu16 * 60;
+		break;
+	case POWER_SUPPLY_PROP_CAPACITY:
+		ret = apple_smc_read_u8(power->smc, SMC_KEY(BUIC), &vu8);
+		val->intval = vu8;
+		break;
+	case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
+		val->intval = macsmc_battery_get_capacity_level(power);
+		ret = val->intval < 0 ? val->intval : 0;
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+		ret = apple_smc_read_u16(power->smc, SMC_KEY(B0AV), &vu16);
+		val->intval = vu16 * 1000;
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_NOW:
+		ret = apple_smc_read_s16(power->smc, SMC_KEY(B0AC), &vs16);
+		val->intval = vs16 * 1000;
+		break;
+	case POWER_SUPPLY_PROP_POWER_NOW:
+		ret = apple_smc_read_s32(power->smc, SMC_KEY(B0AP), &vs32);
+		val->intval = vs32 * 1000;
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
+		ret = apple_smc_read_u16(power->smc, SMC_KEY(BITV), &vu16);
+		val->intval = vu16 * 1000;
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+		/*
+		 * Battery cell max voltage? BVV* seem to return per-cell voltages,
+		 * BVV[NOP] are probably the max voltages for the 3 cells but we don't
+		 * know what will happen if they ever change the number of cells.
+		 * So go with BVVN and multiply by the cell count (BNCB).
+		 * BVVL seems to be the per-cell limit adjusted dynamically.
+		 * Guess: BVVL = Limit, BVVN = Nominal, and the other cells got filled
+		 * in around nearby letters?
+		 */
+		ret = apple_smc_read_u16(power->smc, SMC_KEY(BVVN), &vu16);
+		val->intval = vu16 * 1000 * power->num_cells;
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MIN:
+		/* Lifetime min */
+		ret = apple_smc_read_s16(power->smc, SMC_KEY(BLPM), &vs16);
+		val->intval = vs16 * 1000;
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+		/* Lifetime max */
+		ret = apple_smc_read_s16(power->smc, SMC_KEY(BLPX), &vs16);
+		val->intval = vs16 * 1000;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+		ret = apple_smc_read_u16(power->smc, SMC_KEY(B0RC), &vu16);
+		val->intval = vu16 * 1000;
+		break;
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+		ret = apple_smc_read_u16(power->smc, SMC_KEY(B0RI), &vu16);
+		val->intval = vu16 * 1000;
+		break;
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+		ret = apple_smc_read_u16(power->smc, SMC_KEY(B0RV), &vu16);
+		val->intval = vu16 * 1000;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+		ret = apple_smc_read_u16(power->smc, SMC_KEY(B0DC), &vu16);
+		val->intval = vu16 * 1000;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_FULL:
+		ret = apple_smc_read_u16(power->smc, SMC_KEY(B0FC), &vu16);
+		val->intval = vu16 * 1000;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_NOW:
+		ret = apple_smc_read_u16(power->smc, SMC_KEY(B0RM), &vu16);
+		val->intval = swab16(vu16) * 1000;
+		break;
+	case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
+		ret = apple_smc_read_u16(power->smc, SMC_KEY(B0DC), &vu16);
+		val->intval = vu16 * power->nominal_voltage_mv;
+		break;
+	case POWER_SUPPLY_PROP_ENERGY_FULL:
+		ret = apple_smc_read_u16(power->smc, SMC_KEY(B0FC), &vu16);
+		val->intval = vu16 * power->nominal_voltage_mv;
+		break;
+	case POWER_SUPPLY_PROP_ENERGY_NOW:
+		ret = apple_smc_read_u16(power->smc, SMC_KEY(B0RM), &vu16);
+		val->intval = swab16(vu16) * power->nominal_voltage_mv;
+		break;
+	case POWER_SUPPLY_PROP_TEMP:
+		ret = apple_smc_read_u16(power->smc, SMC_KEY(B0AT), &vu16);
+		val->intval = vu16 - 2732;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_COUNTER:
+		ret = apple_smc_read_s64(power->smc, SMC_KEY(BAAC), &vs64);
+		val->intval = vs64;
+		break;
+	case POWER_SUPPLY_PROP_CYCLE_COUNT:
+		ret = apple_smc_read_u16(power->smc, SMC_KEY(B0CT), &vu16);
+		val->intval = vu16;
+		break;
+	case POWER_SUPPLY_PROP_SCOPE:
+		val->intval = POWER_SUPPLY_SCOPE_SYSTEM;
+		break;
+	case POWER_SUPPLY_PROP_HEALTH:
+		ret = apple_smc_read_flag(power->smc, SMC_KEY(BBAD));
+		val->intval = ret == 1 ? POWER_SUPPLY_HEALTH_DEAD : POWER_SUPPLY_HEALTH_GOOD;
+		ret = ret < 0 ? ret : 0;
+		break;
+	case POWER_SUPPLY_PROP_MODEL_NAME:
+		val->strval = power->model_name;
+		break;
+	case POWER_SUPPLY_PROP_SERIAL_NUMBER:
+		val->strval = power->serial_number;
+		break;
+	case POWER_SUPPLY_PROP_MANUFACTURE_YEAR:
+		ret = macsmc_battery_get_date(&power->mfg_date[0], &val->intval);
+		val->intval += 2000 - 8; /* -8 is a fixup for a firmware bug... */
+		break;
+	case POWER_SUPPLY_PROP_MANUFACTURE_MONTH:
+		ret = macsmc_battery_get_date(&power->mfg_date[2], &val->intval);
+		break;
+	case POWER_SUPPLY_PROP_MANUFACTURE_DAY:
+		ret = macsmc_battery_get_date(&power->mfg_date[4], &val->intval);
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_CONTROL_START_THRESHOLD:
+	case POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD:
+		if (power->has_chls) {
+			ret = apple_smc_read_u16(power->smc, SMC_KEY(CHLS), &vu16);
+			val->intval = vu16 & 0xff;
+			if (val->intval < CHLS_MIN_END_THRESHOLD || val->intval >= 100)
+				val->intval = 100;
+		}
+		else if (power->has_chwa) {
+			ret = apple_smc_read_flag(power->smc, SMC_KEY(CHWA));
+			val->intval = ret == 1 ? CHWA_FIXED_END_THRESHOLD : 100;
+			ret = ret < 0 ? ret : 0;
+		} else {
+			return -EINVAL;
+		}
+		if (psp == POWER_SUPPLY_PROP_CHARGE_CONTROL_START_THRESHOLD &&
+		    ret >= 0 && val->intval < 100 && val->intval >= CHLS_MIN_END_THRESHOLD)
+			val->intval -= CHWA_CHLS_FIXED_START_OFFSET;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
+static int macsmc_battery_set_property(struct power_supply *psy,
+				       enum power_supply_property psp,
+				       const union power_supply_propval *val)
+{
+	struct macsmc_power *power = power_supply_get_drvdata(psy);
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR:
+		return macsmc_battery_set_charge_behaviour(power, val->intval);
+	case POWER_SUPPLY_PROP_CHARGE_CONTROL_START_THRESHOLD:
+		/*
+		 * Ignore, we allow writes so userspace isn't confused but this is
+		 * not configurable independently, it always is end - 5 or 100 depending
+		 * on the end_threshold setting.
+		 */
+		return 0;
+	case POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD:
+		if (power->has_chls) {
+			u16 kval = 0;
+			/* TODO: Make CHLS_FORCE_DISCHARGE configurable */
+			if (val->intval < CHLS_MIN_END_THRESHOLD)
+				kval = CHLS_FORCE_DISCHARGE | CHLS_MIN_END_THRESHOLD;
+			else if (val->intval < 100)
+				kval = CHLS_FORCE_DISCHARGE | (val->intval & 0xff);
+			return apple_smc_write_u16(power->smc, SMC_KEY(CHLS), kval);
+		} else if (power->has_chwa) {
+			return apple_smc_write_flag(power->smc, SMC_KEY(CHWA),
+						    val->intval <= CHWA_PROP_WRITE_THRESHOLD);
+		} else {
+			return -EINVAL;
+		}
+	default:
+		return -EINVAL;
+	}
+}
+
+static int macsmc_battery_property_is_writeable(struct power_supply *psy,
+						enum power_supply_property psp)
+{
+	struct macsmc_power *power = power_supply_get_drvdata(psy);
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR:
+		return true;
+	case POWER_SUPPLY_PROP_CHARGE_CONTROL_START_THRESHOLD:
+	case POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD:
+		return power->has_chwa || power->has_chls;
+	default:
+		return false;
+	}
+}
+
+static const enum power_supply_property macsmc_battery_props[] = {
+	POWER_SUPPLY_PROP_STATUS,
+	POWER_SUPPLY_PROP_PRESENT,
+	POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR,
+	POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
+	POWER_SUPPLY_PROP_TIME_TO_FULL_NOW,
+	POWER_SUPPLY_PROP_CAPACITY,
+	POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+	POWER_SUPPLY_PROP_VOLTAGE_NOW,
+	POWER_SUPPLY_PROP_CURRENT_NOW,
+	POWER_SUPPLY_PROP_POWER_NOW,
+	POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+	POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+	POWER_SUPPLY_PROP_VOLTAGE_MIN,
+	POWER_SUPPLY_PROP_VOLTAGE_MAX,
+	POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT,
+	POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+	POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
+	POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+	POWER_SUPPLY_PROP_CHARGE_FULL,
+	POWER_SUPPLY_PROP_CHARGE_NOW,
+	POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
+	POWER_SUPPLY_PROP_ENERGY_FULL,
+	POWER_SUPPLY_PROP_ENERGY_NOW,
+	POWER_SUPPLY_PROP_TEMP,
+	POWER_SUPPLY_PROP_CHARGE_COUNTER,
+	POWER_SUPPLY_PROP_CYCLE_COUNT,
+	POWER_SUPPLY_PROP_SCOPE,
+	POWER_SUPPLY_PROP_HEALTH,
+	POWER_SUPPLY_PROP_MODEL_NAME,
+	POWER_SUPPLY_PROP_SERIAL_NUMBER,
+	POWER_SUPPLY_PROP_MANUFACTURE_YEAR,
+	POWER_SUPPLY_PROP_MANUFACTURE_MONTH,
+	POWER_SUPPLY_PROP_MANUFACTURE_DAY,
+	POWER_SUPPLY_PROP_CHARGE_CONTROL_START_THRESHOLD,
+	POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD
+};
+
+static const struct power_supply_desc macsmc_battery_desc = {
+	.name			= "macsmc-battery",
+	.type			= POWER_SUPPLY_TYPE_BATTERY,
+	.get_property		= macsmc_battery_get_property,
+	.set_property		= macsmc_battery_set_property,
+	.property_is_writeable	= macsmc_battery_property_is_writeable,
+	.properties		= macsmc_battery_props,
+	.num_properties		= ARRAY_SIZE(macsmc_battery_props),
+	.charge_behaviours	= BIT(POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO)
+				| BIT(POWER_SUPPLY_CHARGE_BEHAVIOUR_FORCE_DISCHARGE)
+				| BIT(POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE),
+};
+
+static int macsmc_ac_get_property(struct power_supply *psy,
+				       enum power_supply_property psp,
+				       union power_supply_propval *val)
+{
+	struct macsmc_power *power = power_supply_get_drvdata(psy);
+	int ret = 0;
+	u16 vu16;
+	u32 vu32;
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_ONLINE:
+		ret = apple_smc_read_u32(power->smc, SMC_KEY(CHIS), &vu32);
+		val->intval = !!vu32;
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+		ret = apple_smc_read_u16(power->smc, SMC_KEY(AC-n), &vu16);
+		val->intval = vu16 * 1000;
+		break;
+	case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+		ret = apple_smc_read_u16(power->smc, SMC_KEY(AC-i), &vu16);
+		val->intval = vu16 * 1000;
+		break;
+	case POWER_SUPPLY_PROP_INPUT_POWER_LIMIT:
+		ret = apple_smc_read_u32(power->smc, SMC_KEY(ACPW), &vu32);
+		val->intval = vu32 * 1000;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
+static enum power_supply_property macsmc_ac_props[] = {
+	POWER_SUPPLY_PROP_ONLINE,
+	POWER_SUPPLY_PROP_INPUT_POWER_LIMIT,
+	POWER_SUPPLY_PROP_VOLTAGE_NOW,
+	POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
+};
+
+static const struct power_supply_desc macsmc_ac_desc = {
+	.name			= "macsmc-ac",
+	.type			= POWER_SUPPLY_TYPE_MAINS,
+	.get_property		= macsmc_ac_get_property,
+	.properties		= macsmc_ac_props,
+	.num_properties		= ARRAY_SIZE(macsmc_ac_props),
+};
+
+static int macsmc_log_power_set(const char *val, const struct kernel_param *kp)
+{
+	int ret = param_set_bool(val, kp);
+
+	if (ret < 0)
+		return ret;
+
+	if (log_power && g_power)
+		schedule_delayed_work(&g_power->dbg_log_work, 0);
+
+	return 0;
+}
+
+static void macsmc_dbg_work(struct work_struct *wrk)
+{
+	struct macsmc_power *power = container_of(to_delayed_work(wrk),
+						  struct macsmc_power, dbg_log_work);
+
+	macsmc_do_dbg(power);
+
+	if (log_power)
+		schedule_delayed_work(&power->dbg_log_work, POWER_LOG_INTERVAL);
+}
+
+static void macsmc_power_critical_work(struct work_struct *wrk)
+{
+	struct macsmc_power *power = container_of(wrk, struct macsmc_power, critical_work);
+	int ret;
+	u32 bcf0;
+	u16 bitv, b0av;
+
+	/*
+	 * Check if the battery voltage is below the design voltage. If it is,
+	 * we have a few seconds until the machine dies. Explicitly shut down,
+	 * which at least gets the NVMe controller to flush its cache.
+	 */
+	if (apple_smc_read_u16(power->smc, SMC_KEY(BITV), &bitv) >= 0 &&
+	    apple_smc_read_u16(power->smc, SMC_KEY(B0AV), &b0av) >= 0 &&
+	    b0av < bitv) {
+		dev_crit(power->dev, "Emergency notification: Battery is critical\n");
+		if (kernel_can_power_off())
+			kernel_power_off();
+		else /* Missing macsmc-reboot driver? In this state, this will not boot anyway. */
+			kernel_restart("Battery is critical");
+	}
+
+	/* This spams once per second, so make sure we only trigger shutdown once. */
+	if (power->shutdown_started)
+		return;
+
+	/* Check for battery empty condition */
+	ret = apple_smc_read_u32(power->smc, SMC_KEY(BCF0), &bcf0);
+	if (ret < 0) {
+		dev_err(power->dev,
+				"Emergency notification: Failed to read battery status\n");
+	} else if (bcf0 == 0) {
+		dev_warn(power->dev, "Emergency notification: Battery status is OK?\n");
+		return;
+	} else {
+		dev_warn(power->dev, "Emergency notification: Battery is empty\n");
+	}
+
+	power->shutdown_started = true;
+
+	/*
+	 * Attempt to trigger an orderly shutdown. At this point, we should have a few
+	 * minutes of reserve capacity left, enough to do a clean shutdown.
+	 */
+	dev_warn(power->dev, "Shutting down in 10 seconds\n");
+	ssleep(10);
+
+	/*
+	 * Don't force it; if this stalls or fails, the last-resort check above will
+	 * trigger a hard shutdown when shutdown is truly imminent.
+	 */
+	orderly_poweroff(false);
+}
+
+static int macsmc_power_event(struct notifier_block *nb, unsigned long event, void *data)
+{
+	struct macsmc_power *power = container_of(nb, struct macsmc_power, nb);
+
+	if ((event & 0xffffff00) == 0x71010100) {
+		bool charging = (event & 0xff) != 0;
+
+		dev_info(power->dev, "Charging: %d\n", charging);
+		power_supply_changed(power->batt);
+		power_supply_changed(power->ac);
+
+		return NOTIFY_OK;
+	} else if (event == 0x71020000) {
+		schedule_work(&power->critical_work);
+
+		return NOTIFY_OK;
+	} else if ((event & 0xffff0000) == 0x71060000) {
+		u8 changed_port = event >> 8;
+		u8 cur_port;
+
+		/* Port charging state change? */
+		if (apple_smc_read_u8(power->smc, SMC_KEY(AC-W), &cur_port) >= 0) {
+			dev_info(power->dev, "Port %d state change (charge port: %d)\n",
+				 changed_port + 1, cur_port);
+		}
+
+		power_supply_changed(power->batt);
+		power_supply_changed(power->ac);
+
+		return NOTIFY_OK;
+	} else if ((event & 0xff000000) == 0x71000000) {
+		dev_info(power->dev, "Unknown charger event 0x%lx\n", event);
+
+		return NOTIFY_OK;
+	} else if ((event & 0xffff0000) == 0x72010000) {
+		/* Button event handled by macsmc-hid, but let's do a debug print */
+		if (log_power)
+			macsmc_do_dbg(power);
+
+		return NOTIFY_OK;
+	}
+
+	return NOTIFY_DONE;
+}
+
+static int macsmc_power_probe(struct platform_device *pdev)
+{
+	struct apple_smc *smc = dev_get_drvdata(pdev->dev.parent);
+	struct power_supply_config psy_cfg = {};
+	struct macsmc_power *power;
+	u32 val;
+	u16 vu16;
+	int ret;
+
+	power = devm_kzalloc(&pdev->dev, sizeof(*power), GFP_KERNEL);
+	if (!power)
+		return -ENOMEM;
+
+	power->dev = &pdev->dev;
+	power->smc = smc;
+	power->ac_desc = macsmc_ac_desc;
+	power->batt_desc = macsmc_battery_desc;
+	dev_set_drvdata(&pdev->dev, power);
+
+	/* Ignore devices without a charger/battery */
+	if (macsmc_battery_get_status(power) <= POWER_SUPPLY_STATUS_UNKNOWN)
+		return -ENODEV;
+
+	/* Fetch string properties */
+	apple_smc_read(smc, SMC_KEY(BMDN), power->model_name, sizeof(power->model_name) - 1);
+	apple_smc_read(smc, SMC_KEY(BMSN), power->serial_number, sizeof(power->serial_number) - 1);
+	apple_smc_read(smc, SMC_KEY(BMDT), power->mfg_date, sizeof(power->mfg_date) - 1);
+
+	/* Turn off the "optimized battery charging" flags, in case macOS left them on */
+	apple_smc_write_u8(power->smc, SMC_KEY(CH0K), 0);
+	apple_smc_write_u8(power->smc, SMC_KEY(CH0B), 0);
+
+	/*
+	 * Prefer CHWA as the SMC firmware from iBoot-10151.1.1 is not compatible with
+	 * this CHLS usage.
+	 */
+	if (apple_smc_read_flag(power->smc, SMC_KEY(CHWA)) >= 0) {
+		power->has_chwa = true;
+	} else if (apple_smc_read_u16(power->smc, SMC_KEY(CHLS), &vu16) >= 0) {
+		power->has_chls = true;
+	} else {
+		/* Remove the last 2 properties that control the charge threshold */
+		power->batt_desc.num_properties -= 2;
+	}
+
+	apple_smc_read_u8(power->smc, SMC_KEY(BNCB), &power->num_cells);
+	power->nominal_voltage_mv = MACSMC_NOMINAL_CELL_VOLTAGE_MV * power->num_cells;
+
+	/* Doing one read of this flag enables critical shutdown notifications */
+	apple_smc_read_u32(power->smc, SMC_KEY(BCF0), &val);
+
+	psy_cfg.drv_data = power;
+	power->batt = devm_power_supply_register(&pdev->dev, &power->batt_desc, &psy_cfg);
+	if (IS_ERR(power->batt)) {
+		dev_err(&pdev->dev, "Failed to register battery\n");
+		ret = PTR_ERR(power->batt);
+		return ret;
+	}
+
+	/* SMC firmware in macOS 15.4 dropped "AC-i" and "AC-n" (and all keys
+	 * with lower case last letter) without obvious replacement. */
+	if (apple_smc_read_u16(power->smc, SMC_KEY(AC-n), &vu16) < 0)
+		power->ac_desc.num_properties -= 2;
+
+	power->ac = devm_power_supply_register(&pdev->dev, &power->ac_desc, &psy_cfg);
+	if (IS_ERR(power->ac)) {
+		dev_err(&pdev->dev, "Failed to register AC adapter\n");
+		ret = PTR_ERR(power->ac);
+		return ret;
+	}
+
+	power->nb.notifier_call = macsmc_power_event;
+	apple_smc_register_notifier(power->smc, &power->nb);
+
+	INIT_WORK(&power->critical_work, macsmc_power_critical_work);
+	INIT_DELAYED_WORK(&power->dbg_log_work, macsmc_dbg_work);
+
+	g_power = power;
+
+	if (log_power)
+		schedule_delayed_work(&power->dbg_log_work, 0);
+
+	return 0;
+}
+
+static void macsmc_power_remove(struct platform_device *pdev)
+{
+	struct macsmc_power *power = dev_get_drvdata(&pdev->dev);
+
+	cancel_work(&power->critical_work);
+	cancel_delayed_work(&power->dbg_log_work);
+
+	g_power = NULL;
+
+	apple_smc_unregister_notifier(power->smc, &power->nb);
+}
+
+static struct platform_driver macsmc_power_driver = {
+	.driver = {
+		.name = "macsmc-power",
+		.owner = THIS_MODULE,
+	},
+	.probe = macsmc_power_probe,
+	.remove = macsmc_power_remove,
+};
+module_platform_driver(macsmc_power_driver);
+
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_DESCRIPTION("Apple SMC battery and power management driver");
+MODULE_AUTHOR("Hector Martin <marcan@marcan.st>");
+MODULE_ALIAS("platform:macsmc-power");
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 0bbbf778ecfa3e..e12f661046e8b0 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -2060,6 +2060,19 @@ config RTC_DRV_WILCO_EC
 	  This can also be built as a module. If so, the module will
 	  be named "rtc_wilco_ec".
 
+config RTC_DRV_MACSMC
+	tristate "Apple Mac SMC RTC"
+	depends on ARCH_APPLE || COMPILE_TEST
+	depends on APPLE_SMC
+	depends on OF
+	default ARCH_APPLE
+	help
+	  If you say yes here you get support for RTC functions
+	  inside Apple SPMI PMUs.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called rtc-macsmc.
+
 config RTC_DRV_MSC313
 	tristate "MStar MSC313 RTC"
         depends on ARCH_MSTARV7 || COMPILE_TEST
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 489b4ab07068c7..661389abe0e2fd 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -92,6 +92,7 @@ obj-$(CONFIG_RTC_DRV_M48T35)	+= rtc-m48t35.o
 obj-$(CONFIG_RTC_DRV_M48T59)	+= rtc-m48t59.o
 obj-$(CONFIG_RTC_DRV_M48T86)	+= rtc-m48t86.o
 obj-$(CONFIG_RTC_DRV_MA35D1)	+= rtc-ma35d1.o
+obj-$(CONFIG_RTC_DRV_MACSMC)	+= rtc-macsmc.o
 obj-$(CONFIG_RTC_DRV_MAX31335)	+= rtc-max31335.o
 obj-$(CONFIG_RTC_DRV_MAX6900)	+= rtc-max6900.o
 obj-$(CONFIG_RTC_DRV_MAX6902)	+= rtc-max6902.o
diff --git a/drivers/rtc/rtc-macsmc.c b/drivers/rtc/rtc-macsmc.c
new file mode 100644
index 00000000000000..2f377a643c19e3
--- /dev/null
+++ b/drivers/rtc/rtc-macsmc.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple SMC RTC driver
+ * Copyright The Asahi Linux Contributors
+ */
+
+#include <linux/bitops.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/macsmc.h>
+#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/slab.h>
+
+/* 48-bit RTC */
+#define RTC_BYTES 6
+#define RTC_BITS (8 * RTC_BYTES)
+
+/* 32768 Hz clock */
+#define RTC_SEC_SHIFT 15
+
+struct macsmc_rtc {
+	struct device *dev;
+	struct apple_smc *smc;
+	struct rtc_device *rtc_dev;
+	struct nvmem_cell *rtc_offset;
+};
+
+static int macsmc_rtc_get_time(struct device *dev, struct rtc_time *tm)
+{
+	struct macsmc_rtc *rtc = dev_get_drvdata(dev);
+	u64 ctr = 0, off = 0;
+	time64_t now;
+	void *p_off;
+	size_t len;
+	int ret;
+
+	ret = apple_smc_read(rtc->smc, SMC_KEY(CLKM), &ctr, RTC_BYTES);
+	if (ret != RTC_BYTES)
+		return ret < 0 ? ret : -EIO;
+
+	p_off = nvmem_cell_read(rtc->rtc_offset, &len);
+	if (IS_ERR(p_off))
+		return PTR_ERR(p_off);
+	if (len < RTC_BYTES) {
+		kfree(p_off);
+		return -EIO;
+	}
+
+	memcpy(&off, p_off, RTC_BYTES);
+	kfree(p_off);
+
+	/* Sign extend from 48 to 64 bits, then arithmetic shift right 15 bits to get seconds */
+	now = sign_extend64(ctr + off, RTC_BITS - 1) >> RTC_SEC_SHIFT;
+	rtc_time64_to_tm(now, tm);
+
+	return ret;
+}
+
+static int macsmc_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+	struct macsmc_rtc *rtc = dev_get_drvdata(dev);
+	u64 ctr = 0, off = 0;
+	int ret;
+
+	ret = apple_smc_read(rtc->smc, SMC_KEY(CLKM), &ctr, RTC_BYTES);
+	if (ret != RTC_BYTES)
+		return ret < 0 ? ret : -EIO;
+
+	/* This sets the offset such that the set second begins now */
+	off = (rtc_tm_to_time64(tm) << RTC_SEC_SHIFT) - ctr;
+	return nvmem_cell_write(rtc->rtc_offset, &off, RTC_BYTES);
+}
+
+static const struct rtc_class_ops macsmc_rtc_ops = {
+	.read_time = macsmc_rtc_get_time,
+	.set_time = macsmc_rtc_set_time,
+};
+
+static int macsmc_rtc_probe(struct platform_device *pdev)
+{
+	struct apple_smc *smc = dev_get_drvdata(pdev->dev.parent);
+	struct macsmc_rtc *rtc;
+
+	/* Ignore devices without this functionality */
+	if (!apple_smc_key_exists(smc, SMC_KEY(CLKM)))
+		return -ENODEV;
+
+	rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
+	if (!rtc)
+		return -ENOMEM;
+
+	rtc->dev = &pdev->dev;
+	rtc->smc = smc;
+
+	pdev->dev.of_node = of_get_child_by_name(pdev->dev.parent->of_node, "rtc");
+
+	rtc->rtc_offset = devm_nvmem_cell_get(&pdev->dev, "rtc_offset");
+	if (IS_ERR(rtc->rtc_offset))
+		return dev_err_probe(&pdev->dev, PTR_ERR(rtc->rtc_offset),
+				     "Failed to get rtc_offset NVMEM cell\n");
+
+	rtc->rtc_dev = devm_rtc_allocate_device(&pdev->dev);
+	if (IS_ERR(rtc->rtc_dev))
+		return PTR_ERR(rtc->rtc_dev);
+
+	rtc->rtc_dev->ops = &macsmc_rtc_ops;
+	rtc->rtc_dev->range_min = S64_MIN >> (RTC_SEC_SHIFT + (64 - RTC_BITS));
+	rtc->rtc_dev->range_max = S64_MAX >> (RTC_SEC_SHIFT + (64 - RTC_BITS));
+
+	platform_set_drvdata(pdev, rtc);
+
+	return devm_rtc_register_device(rtc->rtc_dev);
+}
+
+static struct platform_driver macsmc_rtc_driver = {
+	.driver = {
+		.name = "macsmc-rtc",
+	},
+	.probe = macsmc_rtc_probe,
+};
+module_platform_driver(macsmc_rtc_driver);
+
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_DESCRIPTION("Apple SMC RTC driver");
+MODULE_AUTHOR("Hector Martin <marcan@marcan.st>");
+MODULE_ALIAS("platform:macsmc-rtc");
diff --git a/drivers/soc/apple/Kconfig b/drivers/soc/apple/Kconfig
index 6388cbe1e56b5a..4b8cbf10a9f2a9 100644
--- a/drivers/soc/apple/Kconfig
+++ b/drivers/soc/apple/Kconfig
@@ -4,6 +4,16 @@ if ARCH_APPLE || COMPILE_TEST
 
 menu "Apple SoC drivers"
 
+config APPLE_DOCKCHANNEL
+	tristate "Apple DockChannel FIFO"
+	depends on ARCH_APPLE || COMPILE_TEST
+	default ARCH_APPLE
+	help
+	  DockChannel is a simple FIFO used on Apple SoCs for debug and inter-processor
+	  communications.
+
+	  Say 'y' here if you have an Apple SoC.
+
 config APPLE_MAILBOX
 	tristate "Apple SoC mailboxes"
 	depends on PM
@@ -17,6 +27,15 @@ config APPLE_MAILBOX
 
 	  Say Y here if you have an Apple SoC.
 
+config APPLE_PMGR_MISC
+	bool "Apple SoC PMGR miscellaneous support"
+	depends on PM
+	default ARCH_APPLE
+	help
+	  The PMGR block in Apple SoCs provides high-level power state
+	  controls for SoC devices. This driver manages miscellaneous
+	  power controls.
+
 config APPLE_RTKIT
 	tristate "Apple RTKit co-processor IPC protocol"
 	depends on APPLE_MAILBOX
@@ -30,6 +49,20 @@ config APPLE_RTKIT
 
 	  Say 'y' here if you have an Apple SoC.
 
+config APPLE_RTKIT_HELPER
+	tristate "Apple Generic RTKit helper co-processor"
+	depends on APPLE_RTKIT
+	depends on ARCH_APPLE || COMPILE_TEST
+	default ARCH_APPLE
+	help
+	  Apple SoCs such as the M1 come with various co-processors running
+	  their proprietary RTKit operating system. This option enables support
+	  for a generic co-processor that does not implement any additional
+	  in-band communications. It can be used for testing purposes, or for
+	  coprocessors such as MTP that communicate over a different interface.
+
+	  Say 'y' here if you have an Apple SoC.
+
 config APPLE_SART
 	tristate "Apple SART DMA address filter"
 	depends on ARCH_APPLE || COMPILE_TEST
@@ -41,6 +74,35 @@ config APPLE_SART
 
 	  Say 'y' here if you have an Apple SoC.
 
+config RUST_APPLE_RTKIT
+	bool
+	depends on RUST
+	depends on APPLE_RTKIT
+
+config APPLE_AOP
+	tristate "Apple \"Always-on\" Processor"
+	depends on ARCH_APPLE || COMPILE_TEST
+	depends on RUST
+	select RUST_APPLE_RTKIT
+	default m if ARCH_APPLE
+	help
+	  A co-processor persent on certain Apple SoCs controlling accelerometers,
+	  gyros, ambient light sensors and microphones. Is not actually always on.
+
+	  Say 'y' here if you have an Apple laptop.
+
+config APPLE_SEP
+	tristate "Apple Secure Element Processor"
+	depends on ARCH_APPLE || COMPILE_TEST
+	depends on RUST
+	select RUST_APPLE_RTKIT
+	default y if ARCH_APPLE
+	help
+	  A security co-processor persent on Apple SoCs, controlling transparent
+	  disk encryption, secure boot, HDCP, biometric auth and probably more.
+
+	  Say 'y' here if you have an Apple SoC.
+
 endmenu
 
 endif
diff --git a/drivers/soc/apple/Makefile b/drivers/soc/apple/Makefile
index 4d9ab8f3037b71..fc9d4f4401b7c4 100644
--- a/drivers/soc/apple/Makefile
+++ b/drivers/soc/apple/Makefile
@@ -1,10 +1,22 @@
 # SPDX-License-Identifier: GPL-2.0-only
 
+obj-$(CONFIG_APPLE_DOCKCHANNEL) += apple-dockchannel.o
+apple-dockchannel-y = dockchannel.o
+
 obj-$(CONFIG_APPLE_MAILBOX) += apple-mailbox.o
 apple-mailbox-y = mailbox.o
 
+obj-$(CONFIG_APPLE_PMGR_MISC)	+= apple-pmgr-misc.o
+
 obj-$(CONFIG_APPLE_RTKIT) += apple-rtkit.o
 apple-rtkit-y = rtkit.o rtkit-crashlog.o
 
+obj-$(CONFIG_APPLE_RTKIT_HELPER) += apple-rtkit-helper.o
+apple-rtkit-helper-y = rtkit-helper.o
+
 obj-$(CONFIG_APPLE_SART) += apple-sart.o
 apple-sart-y = sart.o
+
+obj-$(CONFIG_APPLE_AOP) += aop.o
+
+obj-$(CONFIG_APPLE_SEP) += sep.o
diff --git a/drivers/soc/apple/aop.rs b/drivers/soc/apple/aop.rs
new file mode 100644
index 00000000000000..428870fabeb68b
--- /dev/null
+++ b/drivers/soc/apple/aop.rs
@@ -0,0 +1,943 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+#![recursion_limit = "2048"]
+
+//! Apple AOP driver
+//!
+//! Copyright (C) The Asahi Linux Contributors
+
+use core::{arch::asm, mem, ptr, slice};
+
+use kernel::{
+    bindings, c_str,
+    devres::Devres,
+    dma::{dma_bit_mask, CoherentAllocation, Device},
+    error::from_err_ptr,
+    io::mem::IoMem,
+    module_platform_driver, new_condvar, new_mutex, of, platform,
+    prelude::*,
+    soc::apple::aop::{from_fourcc, EPICService, FakehidListener, AOP},
+    soc::apple::rtkit,
+    sync::{Arc, ArcBorrow, CondVar, Mutex},
+    types::ForeignOwnable,
+    workqueue::{self, impl_has_work, new_work, Work, WorkItem},
+};
+
+const AOP_MMIO_SIZE: usize = 0x1e0000;
+const ASC_MMIO_SIZE: usize = 0x4000;
+const BOOTARGS_OFFSET: usize = 0x22c;
+const BOOTARGS_SIZE: usize = 0x230;
+const CPU_CONTROL: usize = 0x44;
+const CPU_RUN: u32 = 0x1 << 4;
+const AFK_ENDPOINT_START: u8 = 0x20;
+const AFK_ENDPOINT_COUNT: u8 = 0xf;
+const AFK_OPC_GET_BUF: u64 = 0x89;
+const AFK_OPC_INIT: u64 = 0x80;
+const AFK_OPC_INIT_RX: u64 = 0x8b;
+const AFK_OPC_INIT_TX: u64 = 0x8a;
+const AFK_OPC_INIT_UNK: u64 = 0x8c;
+const AFK_OPC_SEND: u64 = 0xa2;
+const AFK_OPC_START_ACK: u64 = 0x86;
+const AFK_OPC_SHUTDOWN_ACK: u64 = 0xc1;
+const AFK_OPC_RECV: u64 = 0x85;
+const AFK_MSG_GET_BUF_ACK: u64 = 0xa1 << 48;
+const AFK_MSG_INIT: u64 = AFK_OPC_INIT << 48;
+const AFK_MSG_INIT_ACK: u64 = 0xa0 << 48;
+const AFK_MSG_START: u64 = 0xa3 << 48;
+const AFK_MSG_SHUTDOWN: u64 = 0xc0 << 48;
+const AFK_RB_BLOCK_STEP: usize = 0x40;
+const EPIC_TYPE_NOTIFY: u32 = 0;
+const EPIC_CATEGORY_REPORT: u8 = 0x00;
+const EPIC_CATEGORY_NOTIFY: u8 = 0x10;
+const EPIC_CATEGORY_REPLY: u8 = 0x20;
+const EPIC_SUBTYPE_STD_SERVICE: u16 = 0xc0;
+const EPIC_SUBTYPE_FAKEHID_REPORT: u16 = 0xc4;
+const EPIC_SUBTYPE_RETCODE: u16 = 0x84;
+const EPIC_SUBTYPE_RETCODE_PAYLOAD: u16 = 0xa0;
+const QE_MAGIC1: u32 = from_fourcc(b" POI");
+const QE_MAGIC2: u32 = from_fourcc(b" POA");
+
+fn align_up(v: usize, a: usize) -> usize {
+    (v + a - 1) & !(a - 1)
+}
+
+#[inline(always)]
+fn mem_sync() {
+    unsafe {
+        asm!("dsb sy");
+    }
+}
+
+#[repr(C, packed)]
+#[derive(Clone, Copy, Default)]
+struct QEHeader {
+    magic: u32,
+    size: u32,
+    channel: u32,
+    ty: u32,
+}
+
+#[repr(C, packed)]
+#[derive(Clone, Copy, Default)]
+struct EPICHeader {
+    version: u8,
+    seq: u16,
+    _pad0: u8,
+    _unk0: u32,
+    timestamp: u64,
+    // Subheader
+    length: u32,
+    sub_version: u8,
+    category: u8,
+    subtype: u16,
+    tag: u16,
+    _unk1: u16,
+    _pad1: u64,
+    inline_len: u32,
+}
+
+#[repr(C, packed)]
+struct EPICServiceAnnounce {
+    name: [u8; 20],
+    _unk0: u32,
+    retcode: u32,
+    _unk1: u32,
+    channel: u32,
+    _unk2: u32,
+    _unk3: u32,
+}
+
+#[pin_data]
+struct FutureValue<T> {
+    #[pin]
+    val: Mutex<Option<T>>,
+    #[pin]
+    completion: CondVar,
+}
+
+impl<T: Clone> FutureValue<T> {
+    fn pin_init() -> impl PinInit<FutureValue<T>> {
+        pin_init!(
+            FutureValue {
+                val <- new_mutex!(None),
+                completion <- new_condvar!()
+            }
+        )
+    }
+    fn complete(&self, val: T) {
+        *self.val.lock() = Some(val);
+        self.completion.notify_all();
+    }
+    fn wait(&self) -> T {
+        let mut ret_guard = self.val.lock();
+        while ret_guard.is_none() {
+            self.completion.wait(&mut ret_guard);
+        }
+        ret_guard.as_ref().unwrap().clone()
+    }
+    fn reset(&self) {
+        *self.val.lock() = None;
+    }
+}
+
+struct AFKRingBuffer {
+    offset: usize,
+    block_size: usize,
+    buf_size: usize,
+}
+
+struct AFKEndpoint {
+    index: u8,
+    iomem: Option<CoherentAllocation<u8>>,
+    txbuf: Option<AFKRingBuffer>,
+    rxbuf: Option<AFKRingBuffer>,
+    seq: u16,
+    calls: [Option<Arc<FutureValue<u32>>>; 8],
+}
+
+unsafe impl Send for AFKEndpoint {}
+
+impl AFKEndpoint {
+    fn new(index: u8) -> AFKEndpoint {
+        AFKEndpoint {
+            index,
+            iomem: None,
+            txbuf: None,
+            rxbuf: None,
+            seq: 0,
+            calls: [const { None }; 8],
+        }
+    }
+
+    fn start(&self, rtkit: &mut rtkit::RtKit<AopData>) -> Result<()> {
+        rtkit.send_message(self.index, AFK_MSG_INIT)
+    }
+
+    fn stop(&self, rtkit: &mut rtkit::RtKit<AopData>) -> Result<()> {
+        rtkit.send_message(self.index, AFK_MSG_SHUTDOWN)
+    }
+
+    fn recv_message(
+        &mut self,
+        client: ArcBorrow<'_, AopData>,
+        rtkit: &mut rtkit::RtKit<AopData>,
+        msg: u64,
+    ) -> Result<()> {
+        let opc = msg >> 48;
+        match opc {
+            AFK_OPC_INIT => {
+                rtkit.send_message(self.index, AFK_MSG_INIT_ACK)?;
+            }
+            AFK_OPC_GET_BUF => {
+                self.recv_get_buf(&client.dev, rtkit, msg)?;
+            }
+            AFK_OPC_INIT_UNK => {} // no-op
+            AFK_OPC_START_ACK => {}
+            AFK_OPC_INIT_RX => {
+                if self.rxbuf.is_some() {
+                    dev_err!(
+                        client.dev.as_ref(),
+                        "Got InitRX message with existing rxbuf at endpoint {}",
+                        self.index
+                    );
+                    return Err(EIO);
+                }
+                self.rxbuf = Some(self.parse_ring_buf(msg)?);
+                if self.txbuf.is_some() {
+                    rtkit.send_message(self.index, AFK_MSG_START)?;
+                }
+            }
+            AFK_OPC_INIT_TX => {
+                if self.txbuf.is_some() {
+                    dev_err!(
+                        client.dev.as_ref(),
+                        "Got InitTX message with existing txbuf at endpoint {}",
+                        self.index
+                    );
+                    return Err(EIO);
+                }
+                self.txbuf = Some(self.parse_ring_buf(msg)?);
+                if self.rxbuf.is_some() {
+                    rtkit.send_message(self.index, AFK_MSG_START)?;
+                }
+            }
+            AFK_OPC_RECV => {
+                self.recv_rb(client)?;
+            }
+            AFK_OPC_SHUTDOWN_ACK => {
+                client.shutdown_complete();
+            }
+            _ => dev_err!(
+                client.dev.as_ref(),
+                "AFK endpoint {} got unknown message {}",
+                self.index,
+                msg
+            ),
+        }
+        Ok(())
+    }
+
+    fn parse_ring_buf(&self, msg: u64) -> Result<AFKRingBuffer> {
+        let msg = msg as usize;
+        let size = ((msg >> 16) & 0xFFFF) * AFK_RB_BLOCK_STEP;
+        let offset = ((msg >> 32) & 0xFFFF) * AFK_RB_BLOCK_STEP;
+        let buf_size = self.iomem_read32(offset)? as usize;
+        let block_size = (size - buf_size) / 3;
+        Ok(AFKRingBuffer {
+            offset,
+            block_size,
+            buf_size,
+        })
+    }
+    fn iomem_write32(&mut self, off: usize, data: u32) -> Result<()> {
+        let size = core::mem::size_of::<u32>();
+        let data = data.to_le_bytes();
+        let iomem = self.iomem.as_ref().unwrap();
+        let buf = unsafe { iomem.as_slice_mut(off, size)? };
+        buf.copy_from_slice(&data);
+        Ok(())
+    }
+    fn iomem_read32(&self, off: usize) -> Result<u32> {
+        let size = core::mem::size_of::<u32>();
+        let iomem = self.iomem.as_ref().unwrap();
+        let buf = unsafe { iomem.as_slice(off, size)? };
+        Ok(u32::from_le_bytes(buf.try_into().unwrap()))
+    }
+    fn memcpy_from_iomem(&self, off: usize, target: &mut [u8]) -> Result<()> {
+        let iomem = self.iomem.as_ref().unwrap();
+        // SAFETY:
+        // as_slice() checks that off and target.len() are whithin iomem's limits.
+        unsafe {
+            let src = iomem.as_slice(off, target.len())?;
+            target.copy_from_slice(src);
+        }
+        Ok(())
+    }
+
+    fn memcpy_to_iomem(&self, off: usize, src: &[u8]) -> Result<()> {
+        let iomem = self.iomem.as_ref().unwrap();
+        // SAFETY:
+        // as_slice_mut() checks that off and src.len() are whithin iomem's limits.
+        unsafe {
+            let target = iomem.as_slice_mut(off, src.len())?;
+            target.copy_from_slice(src);
+        }
+        Ok(())
+    }
+
+    fn recv_get_buf(
+        &mut self,
+        dev: &platform::Device,
+        rtkit: &mut rtkit::RtKit<AopData>,
+        msg: u64,
+    ) -> Result<()> {
+        let size = ((msg & 0xFFFF0000) >> 16) as usize * AFK_RB_BLOCK_STEP;
+        if self.iomem.is_some() {
+            dev_err!(
+                dev.as_ref(),
+                "Got GetBuf message with existing buffer on endpoint {}",
+                self.index
+            );
+            return Err(EIO);
+        }
+        let iomem = CoherentAllocation::<u8>::alloc_coherent(dev, size, GFP_KERNEL)?;
+        rtkit.send_message(self.index, AFK_MSG_GET_BUF_ACK | iomem.dma_handle())?;
+        self.iomem = Some(iomem);
+        Ok(())
+    }
+
+    fn recv_rb(&mut self, client: ArcBorrow<'_, AopData>) -> Result<()> {
+        let (buf_offset, block_size, buf_size) = match self.rxbuf.as_ref() {
+            Some(b) => (b.offset, b.block_size, b.buf_size),
+            None => {
+                dev_err!(
+                    client.dev.as_ref(),
+                    "Got Recv message with no rxbuf at endpoint {}",
+                    self.index
+                );
+                return Err(EIO);
+            }
+        };
+        let mut rptr = self.iomem_read32(buf_offset + block_size)? as usize;
+        let mut wptr = self.iomem_read32(buf_offset + block_size * 2)?;
+        mem_sync();
+        let base = buf_offset + block_size * 3;
+        let mut msg_buf = KVec::new();
+        const QEH_SIZE: usize = mem::size_of::<QEHeader>();
+        while wptr as usize != rptr {
+            let mut qeh_bytes = [0; QEH_SIZE];
+            self.memcpy_from_iomem(base + rptr, &mut qeh_bytes)?;
+            let mut qeh = unsafe { &*(qeh_bytes.as_ptr() as *const QEHeader) };
+            if qeh.magic != QE_MAGIC1 && qeh.magic != QE_MAGIC2 {
+                let magic = qeh.magic;
+                dev_err!(
+                    client.dev.as_ref(),
+                    "Invalid magic on ep {}, got {:x}",
+                    self.index,
+                    magic
+                );
+                return Err(EIO);
+            }
+            if qeh.size as usize > (buf_size - rptr - QEH_SIZE) {
+                rptr = 0;
+                self.memcpy_from_iomem(base + rptr, &mut qeh_bytes)?;
+                qeh = unsafe { &*(qeh_bytes.as_ptr() as *const QEHeader) };
+
+                if qeh.magic != QE_MAGIC1 && qeh.magic != QE_MAGIC2 {
+                    let magic = qeh.magic;
+                    dev_err!(
+                        client.dev.as_ref(),
+                        "Invalid magic on ep {}, got {:x}",
+                        self.index,
+                        magic
+                    );
+                    return Err(EIO);
+                }
+            }
+            msg_buf.resize(qeh.size as usize, 0, GFP_KERNEL)?;
+            self.memcpy_from_iomem(base + rptr + QEH_SIZE, &mut msg_buf)?;
+            let (hdr_bytes, msg) = msg_buf.split_at(mem::size_of::<EPICHeader>());
+            let header = unsafe { &*(hdr_bytes.as_ptr() as *const EPICHeader) };
+            self.handle_ipc(client, qeh, header, msg)?;
+            rptr = align_up(rptr + QEH_SIZE + qeh.size as usize, block_size) % buf_size;
+            mem_sync();
+            self.iomem_write32(buf_offset + block_size, rptr as u32)?;
+            wptr = self.iomem_read32(buf_offset + block_size * 2)?;
+            mem_sync();
+        }
+        Ok(())
+    }
+    fn handle_ipc(
+        &mut self,
+        client: ArcBorrow<'_, AopData>,
+        qhdr: &QEHeader,
+        ehdr: &EPICHeader,
+        data: &[u8],
+    ) -> Result<()> {
+        let subtype = ehdr.subtype;
+        if ehdr.category == EPIC_CATEGORY_REPORT {
+            if subtype == EPIC_SUBTYPE_STD_SERVICE {
+                let announce = unsafe { &*(data.as_ptr() as *const EPICServiceAnnounce) };
+                let chan = announce.channel;
+                let name_len = announce
+                    .name
+                    .iter()
+                    .position(|x| *x == 0)
+                    .unwrap_or(announce.name.len());
+                return Into::<Arc<_>>::into(client).register_service(
+                    self,
+                    chan,
+                    &announce.name[..name_len],
+                );
+            } else if subtype == EPIC_SUBTYPE_FAKEHID_REPORT {
+                return client.process_fakehid_report(self, qhdr.channel, data);
+            } else {
+                dev_err!(
+                    client.dev.as_ref(),
+                    "Unexpected EPIC report subtype {:x} on endpoint {}",
+                    subtype,
+                    self.index
+                );
+                return Err(EIO);
+            }
+        } else if ehdr.category == EPIC_CATEGORY_REPLY {
+            if subtype == EPIC_SUBTYPE_RETCODE_PAYLOAD || subtype == EPIC_SUBTYPE_RETCODE {
+                if data.len() < mem::size_of::<u32>() {
+                    dev_err!(
+                        client.dev.as_ref(),
+                        "Retcode data too short on endpoint {}",
+                        self.index
+                    );
+                    return Err(EIO);
+                }
+                let retcode = u32::from_ne_bytes(data[..4].try_into().unwrap());
+                let tag = ehdr.tag as usize;
+                if tag == 0 || tag - 1 > self.calls.len() || self.calls[tag - 1].is_none() {
+                    dev_err!(
+                        client.dev.as_ref(),
+                        "Got a retcode with invalid tag {:?} on endpoint {}",
+                        tag,
+                        self.index
+                    );
+                    return Err(EIO);
+                }
+                self.calls[tag - 1].take().unwrap().complete(retcode);
+                return Ok(());
+            } else {
+                dev_err!(
+                    client.dev.as_ref(),
+                    "Unexpected EPIC reply subtype {:x} on endpoint {}",
+                    subtype,
+                    self.index
+                );
+                return Err(EIO);
+            }
+        }
+        dev_err!(
+            client.dev.as_ref(),
+            "Unexpected EPIC category {:x} on endpoint {}",
+            ehdr.category,
+            self.index
+        );
+        Err(EIO)
+    }
+    fn send_rb(
+        &mut self,
+        client: &AopData,
+        rtkit: &mut rtkit::RtKit<AopData>,
+        channel: u32,
+        ty: u32,
+        header: &[u8],
+        data: &[u8],
+    ) -> Result<()> {
+        let (buf_offset, block_size, buf_size) = match self.txbuf.as_ref() {
+            Some(b) => (b.offset, b.block_size, b.buf_size),
+            None => {
+                dev_err!(
+                    client.dev.as_ref(),
+                    "Attempting to send message with no txbuf at endpoint {}",
+                    self.index
+                );
+                return Err(EIO);
+            }
+        };
+        let base = buf_offset + block_size * 3;
+        mem_sync();
+        let rptr = self.iomem_read32(buf_offset + block_size)? as usize;
+        let mut wptr = self.iomem_read32(buf_offset + block_size * 2)? as usize;
+        const QEH_SIZE: usize = mem::size_of::<QEHeader>();
+        if wptr < rptr && wptr + QEH_SIZE >= rptr {
+            dev_err!(
+                client.dev.as_ref(),
+                "Tx buffer full at endpoint {}",
+                self.index
+            );
+            return Err(EIO);
+        }
+        let payload_len = header.len() + data.len();
+        let qeh = QEHeader {
+            magic: QE_MAGIC1,
+            size: payload_len as u32,
+            channel,
+            ty,
+        };
+        let qeh_bytes = unsafe {
+            slice::from_raw_parts(
+                &qeh as *const QEHeader as *const u8,
+                mem::size_of::<QEHeader>(),
+            )
+        };
+        self.memcpy_to_iomem(base + wptr, qeh_bytes)?;
+        if payload_len > buf_size - wptr - QEH_SIZE {
+            wptr = 0;
+            self.memcpy_to_iomem(base + wptr, qeh_bytes)?;
+        }
+        self.memcpy_to_iomem(base + wptr + QEH_SIZE, header)?;
+        self.memcpy_to_iomem(base + wptr + QEH_SIZE + header.len(), data)?;
+        wptr = align_up(wptr + QEH_SIZE + payload_len, block_size) % buf_size;
+        self.iomem_write32(buf_offset + block_size * 2, wptr as u32)?;
+        let msg = wptr as u64 | (AFK_OPC_SEND << 48);
+        rtkit.send_message(self.index, msg)
+    }
+    fn epic_notify(
+        &mut self,
+        client: &AopData,
+        rtkit: &mut rtkit::RtKit<AopData>,
+        channel: u32,
+        subtype: u16,
+        data: &[u8],
+    ) -> Result<Arc<FutureValue<u32>>> {
+        let mut tag = 0;
+        for i in 0..self.calls.len() {
+            if self.calls[i].is_none() {
+                tag = i + 1;
+                break;
+            }
+        }
+        if tag == 0 {
+            dev_err!(
+                client.dev.as_ref(),
+                "Too many inflight calls on endpoint {}",
+                self.index
+            );
+            return Err(EIO);
+        }
+        let call = Arc::pin_init(FutureValue::pin_init(), GFP_KERNEL)?;
+        let hdr = EPICHeader {
+            version: 2,
+            seq: self.seq,
+            length: data.len() as u32,
+            sub_version: 2,
+            category: EPIC_CATEGORY_NOTIFY,
+            subtype,
+            tag: tag as u16,
+            ..EPICHeader::default()
+        };
+        self.send_rb(
+            client,
+            rtkit,
+            channel,
+            EPIC_TYPE_NOTIFY,
+            unsafe {
+                slice::from_raw_parts(
+                    &hdr as *const EPICHeader as *const u8,
+                    mem::size_of::<EPICHeader>(),
+                )
+            },
+            data,
+        )?;
+        self.seq = self.seq.wrapping_add(1);
+        self.calls[tag - 1] = Some(call.clone());
+        Ok(call)
+    }
+}
+
+struct ListenerEntry {
+    svc: EPICService,
+    listener: Arc<dyn FakehidListener>,
+}
+
+unsafe impl Send for ListenerEntry {}
+
+#[pin_data]
+struct AopData {
+    dev: platform::Device,
+    aop_mmio: Devres<IoMem<AOP_MMIO_SIZE>>,
+    asc_mmio: Devres<IoMem<ASC_MMIO_SIZE>>,
+    #[pin]
+    rtkit: Mutex<Option<rtkit::RtKit<AopData>>>,
+    #[pin]
+    endpoints: [Mutex<AFKEndpoint>; AFK_ENDPOINT_COUNT as usize],
+    #[pin]
+    ep_shutdown: FutureValue<()>,
+    #[pin]
+    hid_listeners: Mutex<KVec<ListenerEntry>>,
+    #[pin]
+    subdevices: Mutex<KVec<*mut bindings::platform_device>>,
+}
+
+unsafe impl Send for AopData {}
+unsafe impl Sync for AopData {}
+
+#[pin_data]
+struct AopServiceRegisterWork {
+    name: &'static CStr,
+    data: Arc<AopData>,
+    service: EPICService,
+    #[pin]
+    work: Work<AopServiceRegisterWork>,
+}
+
+impl_has_work! {
+    impl HasWork<Self, 0> for AopServiceRegisterWork { self.work }
+}
+
+impl AopServiceRegisterWork {
+    fn new(name: &'static CStr, data: Arc<AopData>, service: EPICService) -> Result<Arc<Self>> {
+        Arc::pin_init(
+            pin_init!(AopServiceRegisterWork {
+                name, data, service,
+                work <- new_work!("AopServiceRegisterWork::work"),
+            }),
+            GFP_KERNEL,
+        )
+    }
+}
+
+impl WorkItem for AopServiceRegisterWork {
+    type Pointer = Arc<AopServiceRegisterWork>;
+
+    fn run(this: Arc<AopServiceRegisterWork>) {
+        let info = bindings::platform_device_info {
+            parent: this.data.dev.as_ref().as_raw(),
+            name: this.name.as_ptr() as *const _,
+            id: bindings::PLATFORM_DEVID_AUTO,
+            res: ptr::null_mut(),
+            num_res: 0,
+            data: &this.service as *const EPICService as *const _,
+            size_data: mem::size_of::<EPICService>(),
+            dma_mask: 0,
+            fwnode: ptr::null_mut(),
+            properties: ptr::null_mut(),
+            of_node_reused: false,
+        };
+        let pdev = unsafe { from_err_ptr(bindings::platform_device_register_full(&info)) };
+        match pdev {
+            Err(e) => {
+                dev_err!(
+                    this.data.dev.as_ref(),
+                    "Failed to create device for service {:?}: {:?}",
+                    this.name,
+                    e
+                );
+            }
+            Ok(pdev) => {
+                let res = this.data.subdevices.lock().push(pdev, GFP_KERNEL);
+                if res.is_err() {
+                    dev_err!(this.data.dev.as_ref(), "Failed to store subdevice");
+                }
+            }
+        }
+    }
+}
+
+impl AopData {
+    fn new(dev: &platform::Device) -> Result<Arc<AopData>> {
+        let aop_res = dev.resource(0).ok_or(EINVAL)?;
+        let asc_res = dev.resource(1).ok_or(EINVAL)?;
+        let aop_mmio = dev.ioremap_resource_sized::<AOP_MMIO_SIZE>(aop_res)?;
+        let asc_mmio = dev.ioremap_resource_sized::<ASC_MMIO_SIZE>(asc_res)?;
+        Arc::pin_init(
+            pin_init!(
+                AopData {
+                    dev: dev.clone(),
+                    aop_mmio,
+                    asc_mmio,
+                    rtkit <- new_mutex!(None),
+                    endpoints <- init::pin_init_array_from_fn(|i| {
+                        new_mutex!(AFKEndpoint::new(AFK_ENDPOINT_START + i as u8))
+                    }),
+                    ep_shutdown <- FutureValue::pin_init(),
+                    hid_listeners <- new_mutex!(KVec::new()),
+                    subdevices <- new_mutex!(KVec::new()),
+                }
+            ),
+            GFP_KERNEL,
+        )
+    }
+    fn start(&self) -> Result<()> {
+        {
+            let mut guard = self.rtkit.lock();
+            let rtk = guard.as_mut().unwrap();
+            rtk.wake()?;
+        }
+        for ep in 0..AFK_ENDPOINT_COUNT {
+            let rtk_ep_num = AFK_ENDPOINT_START + ep;
+            let mut guard = self.rtkit.lock();
+            let rtk = guard.as_mut().unwrap();
+            if !rtk.has_endpoint(rtk_ep_num) {
+                continue;
+            }
+            rtk.start_endpoint(rtk_ep_num)?;
+            let ep_guard = self.endpoints[ep as usize].lock();
+            ep_guard.start(rtk)?;
+        }
+        Ok(())
+    }
+    fn register_service(
+        self: Arc<Self>,
+        ep: &mut AFKEndpoint,
+        channel: u32,
+        name: &[u8],
+    ) -> Result<()> {
+        let svc = EPICService {
+            channel,
+            endpoint: ep.index,
+        };
+        let dev_name = match name {
+            b"aop-audio" => c_str!("snd_soc_apple_aop"),
+            b"las" => c_str!("iio_aop_las"),
+            b"als" => c_str!("iio_aop_als"),
+            _ => {
+                return Ok(());
+            }
+        };
+        // probe can call back into us, run it with locks dropped.
+        let work = AopServiceRegisterWork::new(dev_name, self, svc)?;
+        workqueue::system().enqueue(work).map_err(|_| ENOMEM)
+    }
+
+    fn process_fakehid_report(&self, ep: &AFKEndpoint, ch: u32, data: &[u8]) -> Result<()> {
+        let guard = self.hid_listeners.lock();
+        for entry in &*guard {
+            if entry.svc.endpoint == ep.index && entry.svc.channel == ch {
+                return entry.listener.process_fakehid_report(data);
+            }
+        }
+        Ok(())
+    }
+
+    fn shutdown_complete(&self) {
+        self.ep_shutdown.complete(());
+    }
+
+    fn stop(&self) -> Result<()> {
+        for ep in 0..AFK_ENDPOINT_COUNT {
+            {
+                let rtk_ep_num = AFK_ENDPOINT_START + ep;
+                let mut guard = self.rtkit.lock();
+                let rtk = guard.as_mut().unwrap();
+                if !rtk.has_endpoint(rtk_ep_num) {
+                    continue;
+                }
+                let ep_guard = self.endpoints[ep as usize].lock();
+                ep_guard.stop(rtk)?;
+            }
+            self.ep_shutdown.wait();
+            self.ep_shutdown.reset();
+        }
+        Ok(())
+    }
+
+    fn aop_read32(&self, off: usize) -> u32 {
+        if let Some(aop_mmio) = self.aop_mmio.try_access() {
+            aop_mmio.readl_relaxed(off)
+        } else {
+            0
+        }
+    }
+
+    fn patch_bootargs(&self, patches: &[(u32, u64)]) -> Result<()> {
+        let offset = self.aop_read32(BOOTARGS_OFFSET) as usize;
+        let size = self.aop_read32(BOOTARGS_SIZE) as usize;
+        let mut arg_bytes = KVec::with_capacity(size, GFP_KERNEL)?;
+        for _ in 0..size {
+            arg_bytes.push(0, GFP_KERNEL).unwrap();
+        }
+        {
+            let aop_mmio = self.aop_mmio.try_access().ok_or(ENXIO)?;
+            aop_mmio.try_memcpy_fromio(&mut arg_bytes, offset)?;
+        }
+        let mut idx = 0;
+        while idx < size {
+            let key = u32::from_le_bytes(arg_bytes[idx..idx + 4].try_into().unwrap());
+            let size = u32::from_le_bytes(arg_bytes[idx + 4..idx + 8].try_into().unwrap()) as usize;
+            idx += 8;
+            for (k, v) in patches.iter() {
+                if *k != key {
+                    continue;
+                }
+                arg_bytes[idx..idx + size].copy_from_slice(&(*v as u64).to_le_bytes()[..size]);
+                break;
+            }
+            idx += size;
+        }
+        {
+            let aop_mmio = self.aop_mmio.try_access().ok_or(ENXIO)?;
+            aop_mmio.try_memcpy_toio(offset, &arg_bytes)
+        }
+    }
+
+    fn start_cpu(&self) -> Result<()> {
+        let asc_mmio = self.asc_mmio.try_access().ok_or(ENXIO)?;
+        let val = asc_mmio.readl_relaxed(CPU_CONTROL);
+        asc_mmio.writel_relaxed(val | CPU_RUN, CPU_CONTROL);
+        Ok(())
+    }
+}
+
+impl AOP for AopData {
+    fn epic_call(&self, svc: &EPICService, subtype: u16, msg_bytes: &[u8]) -> Result<u32> {
+        let ep_idx = svc.endpoint - AFK_ENDPOINT_START;
+        let call = {
+            let mut rtk_guard = self.rtkit.lock();
+            let rtk = rtk_guard.as_mut().unwrap();
+            let mut ep_guard = self.endpoints[ep_idx as usize].lock();
+            ep_guard.epic_notify(self, rtk, svc.channel, subtype, msg_bytes)?
+        };
+        Ok(call.wait())
+    }
+    fn add_fakehid_listener(
+        &self,
+        svc: EPICService,
+        listener: Arc<dyn FakehidListener>,
+    ) -> Result<()> {
+        let mut guard = self.hid_listeners.lock();
+        Ok(guard.push(ListenerEntry { svc, listener }, GFP_KERNEL)?)
+    }
+    fn remove_fakehid_listener(&self, svc: &EPICService) -> bool {
+        let mut guard = self.hid_listeners.lock();
+        for i in 0..guard.len() {
+            if guard[i].svc == *svc {
+                guard.swap_remove(i);
+                return true;
+            }
+        }
+        false
+    }
+    fn remove(&self) {
+        if let Err(e) = self.stop() {
+            dev_err!(self.dev.as_ref(), "Failed to stop AOP {:?}", e);
+        }
+        *self.rtkit.lock() = None;
+        let guard = self.subdevices.lock();
+        for pdev in &*guard {
+            unsafe {
+                bindings::platform_device_unregister(*pdev);
+            }
+        }
+    }
+}
+
+struct NoBuffer;
+impl rtkit::Buffer for NoBuffer {
+    fn iova(&self) -> Result<usize> {
+        unreachable!()
+    }
+    fn buf(&mut self) -> Result<&mut [u8]> {
+        unreachable!()
+    }
+}
+
+#[vtable]
+impl rtkit::Operations for AopData {
+    type Data = Arc<AopData>;
+    type Buffer = NoBuffer;
+
+    fn recv_message(data: <Self::Data as ForeignOwnable>::Borrowed<'_>, ep: u8, msg: u64) {
+        let mut rtk = data.rtkit.lock();
+        let mut ep_guard = data.endpoints[(ep - AFK_ENDPOINT_START) as usize].lock();
+        let ret = ep_guard.recv_message(data, rtk.as_mut().unwrap(), msg);
+        if let Err(e) = ret {
+            dev_err!(
+                data.dev.as_ref(),
+                "Failed to handle rtkit message, error: {:?}",
+                e
+            );
+        }
+    }
+
+    fn crashed(data: <Self::Data as ForeignOwnable>::Borrowed<'_>, _crashlog: Option<&[u8]>) {
+        dev_err!(data.dev.as_ref(), "AOP firmware crashed");
+    }
+}
+
+#[repr(transparent)]
+struct AopDriver(Arc<dyn AOP>);
+
+struct AopHwConfig {
+    ec0p: u64,
+    alig: u64,
+    aopt: u64,
+}
+
+const HW_CFG_T8103: AopHwConfig = AopHwConfig {
+    ec0p: 0x020000,
+    aopt: 1,
+    alig: 128,
+};
+const HW_CFG_T8112: AopHwConfig = AopHwConfig {
+    ec0p: 0x020000,
+    aopt: 0,
+    alig: 128,
+};
+const HW_CFG_T6000: AopHwConfig = AopHwConfig {
+    ec0p: 0x020000,
+    aopt: 0,
+    alig: 64,
+};
+const HW_CFG_T6020: AopHwConfig = AopHwConfig {
+    ec0p: 0x0100_00000000,
+    aopt: 0,
+    alig: 64,
+};
+
+kernel::of_device_table!(
+    OF_TABLE,
+    MODULE_OF_TABLE,
+    <AopDriver as platform::Driver>::IdInfo,
+    [
+        (of::DeviceId::new(c_str!("apple,t8103-aop")), &HW_CFG_T8103),
+        (of::DeviceId::new(c_str!("apple,t8112-aop")), &HW_CFG_T8112),
+        (of::DeviceId::new(c_str!("apple,t6000-aop")), &HW_CFG_T6000),
+        (of::DeviceId::new(c_str!("apple,t6020-aop")), &HW_CFG_T6020),
+    ]
+);
+
+impl platform::Driver for AopDriver {
+    type IdInfo = &'static AopHwConfig;
+
+    const OF_ID_TABLE: Option<of::IdTable<Self::IdInfo>> = Some(&OF_TABLE);
+
+    fn probe(
+        pdev: &mut platform::Device,
+        info: Option<&Self::IdInfo>,
+    ) -> Result<Pin<KBox<AopDriver>>> {
+        let cfg = info.ok_or(ENODEV)?;
+        pdev.dma_set_mask_and_coherent(dma_bit_mask(42))?;
+        let data = AopData::new(pdev)?;
+        data.patch_bootargs(&[
+            (from_fourcc(b"EC0p"), cfg.ec0p),
+            (from_fourcc(b"nCal"), 0x0),
+            (from_fourcc(b"alig"), cfg.alig),
+            (from_fourcc(b"AOPt"), cfg.aopt),
+        ])?;
+        let rtkit = rtkit::RtKit::<AopData>::new(pdev.as_ref(), None, 0, data.clone())?;
+        *data.rtkit.lock() = Some(rtkit);
+        let _ = data.start_cpu();
+        data.start()?;
+        let data = data as Arc<dyn AOP>;
+        Ok(KBox::pin(AopDriver(data), GFP_KERNEL)?)
+    }
+}
+
+impl Drop for AopDriver {
+    fn drop(&mut self) {
+        self.0.remove();
+    }
+}
+
+module_platform_driver! {
+    type: AopDriver,
+    name: "apple_aop",
+    license: "Dual MIT/GPL",
+}
diff --git a/drivers/soc/apple/apple-pmgr-misc.c b/drivers/soc/apple/apple-pmgr-misc.c
new file mode 100644
index 00000000000000..e768f34aacc586
--- /dev/null
+++ b/drivers/soc/apple/apple-pmgr-misc.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple SoC PMGR device power state driver
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+
+#define APPLE_CLKGEN_PSTATE 0
+#define APPLE_CLKGEN_PSTATE_DESIRED GENMASK(3, 0)
+
+#define SYS_DEV_PSTATE_SUSPEND 1
+
+enum sys_device {
+	DEV_FABRIC,
+	DEV_DCS,
+	DEV_MAX,
+};
+
+struct apple_pmgr_sys_device {
+	void __iomem *base;
+	u32 active_state;
+	u32 suspend_state;
+};
+
+struct apple_pmgr_misc {
+	struct device *dev;
+	struct apple_pmgr_sys_device devices[DEV_MAX];
+};
+
+static void apple_pmgr_sys_dev_set_pstate(struct apple_pmgr_misc *misc,
+					  enum sys_device dev, bool active)
+{
+	u32 pstate;
+	u32 val;
+
+	if (!misc->devices[dev].base)
+		return;
+
+	if (active)
+		pstate = misc->devices[dev].active_state;
+	else
+		pstate = misc->devices[dev].suspend_state;
+
+	printk("set %d ps to pstate %d\n", dev, pstate);
+
+	val = readl_relaxed(misc->devices[dev].base + APPLE_CLKGEN_PSTATE);
+	val &= ~APPLE_CLKGEN_PSTATE_DESIRED;
+	val |= FIELD_PREP(APPLE_CLKGEN_PSTATE_DESIRED, pstate);
+	writel_relaxed(val, misc->devices[dev].base);
+}
+
+static int __maybe_unused apple_pmgr_misc_suspend_noirq(struct device *dev)
+{
+	struct apple_pmgr_misc *misc = dev_get_drvdata(dev);
+	int i;
+
+	for (i = 0; i < DEV_MAX; i++)
+		apple_pmgr_sys_dev_set_pstate(misc, i, false);
+
+	return 0;
+}
+
+static int __maybe_unused apple_pmgr_misc_resume_noirq(struct device *dev)
+{
+	struct apple_pmgr_misc *misc = dev_get_drvdata(dev);
+	int i;
+
+	for (i = 0; i < DEV_MAX; i++)
+		apple_pmgr_sys_dev_set_pstate(misc, i, true);
+
+	return 0;
+}
+
+static bool apple_pmgr_init_device(struct apple_pmgr_misc *misc,
+				   enum sys_device dev, const char *device_name)
+{
+	void __iomem *base;
+	char name[32];
+	u32 val;
+
+	snprintf(name, sizeof(name), "%s-ps", device_name);
+
+	base = devm_platform_ioremap_resource_byname(
+		to_platform_device(misc->dev), name);
+	if (!base)
+		return false;
+
+	val = readl_relaxed(base + APPLE_CLKGEN_PSTATE);
+
+	misc->devices[dev].base = base;
+	misc->devices[dev].active_state =
+		FIELD_GET(APPLE_CLKGEN_PSTATE_DESIRED, val);
+	misc->devices[dev].suspend_state = SYS_DEV_PSTATE_SUSPEND;
+
+	snprintf(name, sizeof(name), "apple,%s-min-ps", device_name);
+	of_property_read_u32(misc->dev->of_node, name,
+			     &misc->devices[dev].suspend_state);
+
+	return true;
+}
+
+static int apple_pmgr_misc_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct apple_pmgr_misc *misc;
+	int ret = -ENODEV;
+
+	misc = devm_kzalloc(dev, sizeof(*misc), GFP_KERNEL);
+	if (!misc)
+		return -ENOMEM;
+
+	misc->dev = dev;
+
+	if (apple_pmgr_init_device(misc, DEV_FABRIC, "fabric"))
+		ret = 0;
+
+	if (apple_pmgr_init_device(misc, DEV_DCS, "dcs"))
+		ret = 0;
+
+	platform_set_drvdata(pdev, misc);
+
+	return ret;
+}
+
+static const struct of_device_id apple_pmgr_misc_of_match[] = {
+	{ .compatible = "apple,t6000-pmgr-misc" },
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, apple_pmgr_misc_of_match);
+
+static const struct dev_pm_ops apple_pmgr_misc_pm_ops = {
+	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(apple_pmgr_misc_suspend_noirq,
+				      apple_pmgr_misc_resume_noirq)
+};
+
+static struct platform_driver apple_pmgr_misc_driver = {
+	.probe = apple_pmgr_misc_probe,
+	.driver = {
+		.name = "apple-pmgr-misc",
+		.of_match_table = apple_pmgr_misc_of_match,
+		.pm = pm_ptr(&apple_pmgr_misc_pm_ops),
+	},
+};
+
+MODULE_AUTHOR("Hector Martin <marcan@marcan.st>");
+MODULE_DESCRIPTION("PMGR misc driver for Apple SoCs");
+MODULE_LICENSE("GPL v2");
+
+module_platform_driver(apple_pmgr_misc_driver);
diff --git a/drivers/soc/apple/dockchannel.c b/drivers/soc/apple/dockchannel.c
new file mode 100644
index 00000000000000..3a0d7964007c95
--- /dev/null
+++ b/drivers/soc/apple/dockchannel.c
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple DockChannel FIFO driver
+ * Copyright The Asahi Linux Contributors
+ */
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/platform_device.h>
+#include <linux/soc/apple/dockchannel.h>
+#include <linux/unaligned.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+
+#define DOCKCHANNEL_MAX_IRQ	32
+
+#define DOCKCHANNEL_TX_TIMEOUT_MS 1000
+#define DOCKCHANNEL_RX_TIMEOUT_MS 1000
+
+#define IRQ_MASK		0x0
+#define IRQ_FLAG		0x4
+
+#define IRQ_TX			BIT(0)
+#define IRQ_RX			BIT(1)
+
+#define CONFIG_TX_THRESH	0x0
+#define CONFIG_RX_THRESH	0x4
+
+#define DATA_TX8		0x4
+#define DATA_TX16		0x8
+#define DATA_TX24		0xc
+#define DATA_TX32		0x10
+#define DATA_TX_FREE		0x14
+#define DATA_RX8		0x1c
+#define DATA_RX16		0x20
+#define DATA_RX24		0x24
+#define DATA_RX32		0x28
+#define DATA_RX_COUNT		0x2c
+
+struct dockchannel {
+	struct device *dev;
+	int tx_irq;
+	int rx_irq;
+
+	void __iomem *config_base;
+	void __iomem *data_base;
+
+	u32 fifo_size;
+	bool awaiting;
+	struct completion tx_comp;
+	struct completion rx_comp;
+
+	void *cookie;
+	void (*data_available)(void *cookie, size_t avail);
+};
+
+struct dockchannel_common {
+	struct device *dev;
+	struct irq_domain *domain;
+	int irq;
+
+	void __iomem *irq_base;
+};
+
+/* Dockchannel FIFO functions */
+
+static irqreturn_t dockchannel_tx_irq(int irq, void *data)
+{
+	struct dockchannel *dockchannel = data;
+
+	disable_irq_nosync(irq);
+	complete(&dockchannel->tx_comp);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t dockchannel_rx_irq(int irq, void *data)
+{
+	struct dockchannel *dockchannel = data;
+
+	disable_irq_nosync(irq);
+
+	if (dockchannel->awaiting) {
+		return IRQ_WAKE_THREAD;
+	} else {
+		complete(&dockchannel->rx_comp);
+		return IRQ_HANDLED;
+	}
+}
+
+static irqreturn_t dockchannel_rx_irq_thread(int irq, void *data)
+{
+	struct dockchannel *dockchannel = data;
+	size_t avail = readl_relaxed(dockchannel->data_base + DATA_RX_COUNT);
+
+	dockchannel->awaiting = false;
+	dockchannel->data_available(dockchannel->cookie, avail);
+
+	return IRQ_HANDLED;
+}
+
+int dockchannel_send(struct dockchannel *dockchannel, const void *buf, size_t count)
+{
+	size_t left = count;
+	const u8 *p = buf;
+
+	while (left > 0) {
+		size_t avail = readl_relaxed(dockchannel->data_base + DATA_TX_FREE);
+		size_t block = min(left, avail);
+
+		if (avail == 0) {
+			size_t threshold = min((size_t)(dockchannel->fifo_size / 2), left);
+
+			writel_relaxed(threshold, dockchannel->config_base + CONFIG_TX_THRESH);
+			reinit_completion(&dockchannel->tx_comp);
+			enable_irq(dockchannel->tx_irq);
+
+			if (!wait_for_completion_timeout(&dockchannel->tx_comp,
+                                                 msecs_to_jiffies(DOCKCHANNEL_TX_TIMEOUT_MS))) {
+				disable_irq(dockchannel->tx_irq);
+				return -ETIMEDOUT;
+			}
+
+			continue;
+		}
+
+		while (block >= 4) {
+			writel_relaxed(get_unaligned_le32(p), dockchannel->data_base + DATA_TX32);
+			p += 4;
+			left -= 4;
+			block -= 4;
+		}
+		while (block > 0) {
+			writeb_relaxed(*p++, dockchannel->data_base + DATA_TX8);
+			left--;
+			block--;
+		}
+	}
+
+	return count;
+}
+EXPORT_SYMBOL(dockchannel_send);
+
+int dockchannel_recv(struct dockchannel *dockchannel, void *buf, size_t count)
+{
+	size_t left = count;
+	u8 *p = buf;
+
+	while (left > 0) {
+		size_t avail = readl_relaxed(dockchannel->data_base + DATA_RX_COUNT);
+		size_t block = min(left, avail);
+
+		if (avail == 0) {
+			size_t threshold = min((size_t)(dockchannel->fifo_size / 2), left);
+
+			writel_relaxed(threshold, dockchannel->config_base + CONFIG_RX_THRESH);
+			reinit_completion(&dockchannel->rx_comp);
+			enable_irq(dockchannel->rx_irq);
+
+			if (!wait_for_completion_timeout(&dockchannel->rx_comp,
+                                                 msecs_to_jiffies(DOCKCHANNEL_RX_TIMEOUT_MS))) {
+				disable_irq(dockchannel->rx_irq);
+				return -ETIMEDOUT;
+			}
+
+			continue;
+		}
+
+		while (block >= 4) {
+			put_unaligned_le32(readl_relaxed(dockchannel->data_base + DATA_RX32), p);
+			p += 4;
+			left -= 4;
+			block -= 4;
+		}
+		while (block > 0) {
+			*p++ = readl_relaxed(dockchannel->data_base + DATA_RX8) >> 8;
+			left--;
+			block--;
+		}
+	}
+
+	return count;
+}
+EXPORT_SYMBOL(dockchannel_recv);
+
+int dockchannel_await(struct dockchannel *dockchannel,
+			    void (*callback)(void *cookie, size_t avail),
+			    void *cookie, size_t count)
+{
+	size_t threshold = min((size_t)dockchannel->fifo_size, count);
+
+	if (!count) {
+		dockchannel->awaiting = false;
+		disable_irq(dockchannel->rx_irq);
+		return 0;
+	}
+
+	dockchannel->data_available = callback;
+	dockchannel->cookie = cookie;
+	dockchannel->awaiting = true;
+	writel_relaxed(threshold, dockchannel->config_base + CONFIG_RX_THRESH);
+	enable_irq(dockchannel->rx_irq);
+
+	return threshold;
+}
+EXPORT_SYMBOL(dockchannel_await);
+
+struct dockchannel *dockchannel_init(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct dockchannel *dockchannel;
+	int ret;
+
+	dockchannel = devm_kzalloc(dev, sizeof(*dockchannel), GFP_KERNEL);
+	if (!dockchannel)
+		return ERR_PTR(-ENOMEM);
+
+	dockchannel->dev = dev;
+	dockchannel->config_base = devm_platform_ioremap_resource_byname(pdev, "config");
+	if (IS_ERR(dockchannel->config_base))
+		return (__force void *)dockchannel->config_base;
+
+	dockchannel->data_base = devm_platform_ioremap_resource_byname(pdev, "data");
+	if (IS_ERR(dockchannel->data_base))
+		return (__force void *)dockchannel->data_base;
+
+	ret = of_property_read_u32(dev->of_node, "apple,fifo-size", &dockchannel->fifo_size);
+	if (ret)
+		return ERR_PTR(dev_err_probe(dev, ret, "Missing apple,fifo-size property"));
+
+	init_completion(&dockchannel->tx_comp);
+	init_completion(&dockchannel->rx_comp);
+
+	dockchannel->tx_irq = platform_get_irq_byname(pdev, "tx");
+	if (dockchannel->tx_irq <= 0) {
+		return ERR_PTR(dev_err_probe(dev, dockchannel->tx_irq,
+				     "Failed to get TX IRQ"));
+	}
+
+	dockchannel->rx_irq = platform_get_irq_byname(pdev, "rx");
+	if (dockchannel->rx_irq <= 0) {
+		return ERR_PTR(dev_err_probe(dev, dockchannel->rx_irq,
+				     "Failed to get RX IRQ"));
+	}
+
+	ret = devm_request_irq(dev, dockchannel->tx_irq, dockchannel_tx_irq, IRQF_NO_AUTOEN,
+			       "apple-dockchannel-tx", dockchannel);
+	if (ret)
+		return ERR_PTR(dev_err_probe(dev, ret, "Failed to request TX IRQ"));
+
+	ret = devm_request_threaded_irq(dev, dockchannel->rx_irq, dockchannel_rx_irq,
+					dockchannel_rx_irq_thread, IRQF_NO_AUTOEN,
+					"apple-dockchannel-rx", dockchannel);
+	if (ret)
+		return ERR_PTR(dev_err_probe(dev, ret, "Failed to request RX IRQ"));
+
+	return dockchannel;
+}
+EXPORT_SYMBOL(dockchannel_init);
+
+
+/* Dockchannel IRQchip */
+
+static void dockchannel_irq(struct irq_desc *desc)
+{
+	unsigned int irq = irq_desc_get_irq(desc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	struct dockchannel_common *dcc = irq_get_handler_data(irq);
+	unsigned long flags = readl_relaxed(dcc->irq_base + IRQ_FLAG);
+	int bit;
+
+	chained_irq_enter(chip, desc);
+
+	for_each_set_bit(bit, &flags, DOCKCHANNEL_MAX_IRQ)
+		generic_handle_domain_irq(dcc->domain, bit);
+
+	chained_irq_exit(chip, desc);
+}
+
+static void dockchannel_irq_ack(struct irq_data *data)
+{
+	struct dockchannel_common *dcc = irq_data_get_irq_chip_data(data);
+	unsigned int hwirq = data->hwirq;
+
+	writel_relaxed(BIT(hwirq), dcc->irq_base + IRQ_FLAG);
+}
+
+static void dockchannel_irq_mask(struct irq_data *data)
+{
+	struct dockchannel_common *dcc = irq_data_get_irq_chip_data(data);
+	unsigned int hwirq = data->hwirq;
+	u32 val = readl_relaxed(dcc->irq_base + IRQ_MASK);
+
+	writel_relaxed(val & ~BIT(hwirq), dcc->irq_base + IRQ_MASK);
+}
+
+static void dockchannel_irq_unmask(struct irq_data *data)
+{
+	struct dockchannel_common *dcc = irq_data_get_irq_chip_data(data);
+	unsigned int hwirq = data->hwirq;
+	u32 val = readl_relaxed(dcc->irq_base + IRQ_MASK);
+
+	writel_relaxed(val | BIT(hwirq), dcc->irq_base + IRQ_MASK);
+}
+
+static const struct irq_chip dockchannel_irqchip = {
+	.name = "dockchannel-irqc",
+	.irq_ack = dockchannel_irq_ack,
+	.irq_mask = dockchannel_irq_mask,
+	.irq_unmask = dockchannel_irq_unmask,
+};
+
+static int dockchannel_irq_domain_map(struct irq_domain *d, unsigned int virq,
+				      irq_hw_number_t hw)
+{
+	irq_set_chip_data(virq, d->host_data);
+	irq_set_chip_and_handler(virq, &dockchannel_irqchip, handle_level_irq);
+
+	return 0;
+}
+
+static const struct irq_domain_ops dockchannel_irq_domain_ops = {
+	.xlate	= irq_domain_xlate_twocell,
+	.map	= dockchannel_irq_domain_map,
+};
+
+static int dockchannel_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct dockchannel_common *dcc;
+	struct device_node *child;
+
+	dcc = devm_kzalloc(dev, sizeof(*dcc), GFP_KERNEL);
+	if (!dcc)
+		return -ENOMEM;
+
+	dcc->dev = dev;
+	platform_set_drvdata(pdev, dcc);
+
+	dcc->irq_base = devm_platform_ioremap_resource_byname(pdev, "irq");
+	if (IS_ERR(dcc->irq_base))
+		return PTR_ERR(dcc->irq_base);
+
+	writel_relaxed(0, dcc->irq_base + IRQ_MASK);
+	writel_relaxed(~0, dcc->irq_base + IRQ_FLAG);
+
+	dcc->domain = irq_domain_add_linear(dev->of_node, DOCKCHANNEL_MAX_IRQ,
+					    &dockchannel_irq_domain_ops, dcc);
+	if (!dcc->domain)
+		return -ENOMEM;
+
+	dcc->irq = platform_get_irq(pdev, 0);
+	if (dcc->irq <= 0)
+		return dev_err_probe(dev, dcc->irq, "Failed to get IRQ");
+
+	irq_set_handler_data(dcc->irq, dcc);
+	irq_set_chained_handler(dcc->irq, dockchannel_irq);
+
+	for_each_child_of_node(dev->of_node, child)
+		of_platform_device_create(child, NULL, dev);
+
+	return 0;
+}
+
+static void dockchannel_remove(struct platform_device *pdev)
+{
+	struct dockchannel_common *dcc = platform_get_drvdata(pdev);
+	int hwirq;
+
+	device_for_each_child(&pdev->dev, NULL, of_platform_device_destroy);
+
+	irq_set_chained_handler_and_data(dcc->irq, NULL, NULL);
+
+	for (hwirq = 0; hwirq < DOCKCHANNEL_MAX_IRQ; hwirq++)
+		irq_dispose_mapping(irq_find_mapping(dcc->domain, hwirq));
+
+	irq_domain_remove(dcc->domain);
+
+	writel_relaxed(0, dcc->irq_base + IRQ_MASK);
+	writel_relaxed(~0, dcc->irq_base + IRQ_FLAG);
+}
+
+static const struct of_device_id dockchannel_of_match[] = {
+	{ .compatible = "apple,dockchannel" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, dockchannel_of_match);
+
+static struct platform_driver dockchannel_driver = {
+	.driver = {
+		.name = "dockchannel",
+		.of_match_table = dockchannel_of_match,
+	},
+	.probe = dockchannel_probe,
+	.remove = dockchannel_remove,
+};
+module_platform_driver(dockchannel_driver);
+
+MODULE_AUTHOR("Hector Martin <marcan@marcan.st>");
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_DESCRIPTION("Apple DockChannel driver");
diff --git a/drivers/soc/apple/mailbox.c b/drivers/soc/apple/mailbox.c
index 49a0955e82d6cf..00a88c3d148ffc 100644
--- a/drivers/soc/apple/mailbox.c
+++ b/drivers/soc/apple/mailbox.c
@@ -28,9 +28,9 @@
 #include <linux/of_platform.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
+#include <linux/soc/apple/mailbox.h>
 #include <linux/spinlock.h>
 #include <linux/types.h>
-#include "mailbox.h"
 
 #define APPLE_ASC_MBOX_CONTROL_FULL BIT(16)
 #define APPLE_ASC_MBOX_CONTROL_EMPTY BIT(17)
diff --git a/drivers/soc/apple/rtkit-helper.c b/drivers/soc/apple/rtkit-helper.c
new file mode 100644
index 00000000000000..080d083ed9bd2f
--- /dev/null
+++ b/drivers/soc/apple/rtkit-helper.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple Generic RTKit helper coprocessor
+ * Copyright The Asahi Linux Contributors
+ */
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/soc/apple/rtkit.h>
+
+#define APPLE_ASC_CPU_CONTROL		0x44
+#define APPLE_ASC_CPU_CONTROL_RUN	BIT(4)
+
+struct apple_rtkit_helper {
+	struct device *dev;
+	struct apple_rtkit *rtk;
+
+	void __iomem *asc_base;
+
+	struct resource *sram;
+	void __iomem *sram_base;
+};
+
+static int apple_rtkit_helper_shmem_setup(void *cookie, struct apple_rtkit_shmem *bfr)
+{
+	struct apple_rtkit_helper *helper = cookie;
+	struct resource res = {
+		.start = bfr->iova,
+		.end = bfr->iova + bfr->size - 1,
+		.name = "rtkit_map",
+	};
+
+	if (!bfr->iova) {
+		bfr->buffer = dma_alloc_coherent(helper->dev, bfr->size,
+						    &bfr->iova, GFP_KERNEL);
+		if (!bfr->buffer)
+			return -ENOMEM;
+		return 0;
+	}
+
+	if (!helper->sram) {
+		dev_err(helper->dev,
+			"RTKit buffer request with no SRAM region: %pR", &res);
+		return -EFAULT;
+	}
+
+	res.flags = helper->sram->flags;
+
+	if (res.end < res.start || !resource_contains(helper->sram, &res)) {
+		dev_err(helper->dev,
+			"RTKit buffer request outside SRAM region: %pR", &res);
+		return -EFAULT;
+	}
+
+	bfr->iomem = helper->sram_base + (res.start - helper->sram->start);
+	bfr->is_mapped = true;
+
+	return 0;
+}
+
+static void apple_rtkit_helper_shmem_destroy(void *cookie, struct apple_rtkit_shmem *bfr)
+{
+	// no-op
+}
+
+static const struct apple_rtkit_ops apple_rtkit_helper_ops = {
+	.shmem_setup = apple_rtkit_helper_shmem_setup,
+	.shmem_destroy = apple_rtkit_helper_shmem_destroy,
+};
+
+static int apple_rtkit_helper_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct apple_rtkit_helper *helper;
+	int ret;
+
+	/* 44 bits for addresses in standard RTKit requests */
+	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
+	if (ret)
+		return ret;
+
+	helper = devm_kzalloc(dev, sizeof(*helper), GFP_KERNEL);
+	if (!helper)
+		return -ENOMEM;
+
+	helper->dev = dev;
+	platform_set_drvdata(pdev, helper);
+
+	helper->asc_base = devm_platform_ioremap_resource_byname(pdev, "asc");
+	if (IS_ERR(helper->asc_base))
+		return PTR_ERR(helper->asc_base);
+
+	helper->sram = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
+	if (helper->sram) {
+		helper->sram_base = devm_ioremap_resource(dev, helper->sram);
+		if (IS_ERR(helper->sram_base))
+			return dev_err_probe(dev, PTR_ERR(helper->sram_base),
+					"Failed to map SRAM region");
+	}
+
+	helper->rtk =
+		devm_apple_rtkit_init(dev, helper, NULL, 0, &apple_rtkit_helper_ops);
+	if (IS_ERR(helper->rtk))
+		return dev_err_probe(dev, PTR_ERR(helper->rtk),
+				     "Failed to intialize RTKit");
+
+	writel_relaxed(APPLE_ASC_CPU_CONTROL_RUN,
+		       helper->asc_base + APPLE_ASC_CPU_CONTROL);
+
+	/* Works for both wake and boot */
+	ret = apple_rtkit_wake(helper->rtk);
+	if (ret != 0)
+		return dev_err_probe(dev, ret, "Failed to wake up coprocessor");
+
+	return 0;
+}
+
+static void apple_rtkit_helper_remove(struct platform_device *pdev)
+{
+	struct apple_rtkit_helper *helper = platform_get_drvdata(pdev);
+
+	if (apple_rtkit_is_running(helper->rtk))
+		apple_rtkit_quiesce(helper->rtk);
+
+	writel_relaxed(0, helper->asc_base + APPLE_ASC_CPU_CONTROL);
+}
+
+static const struct of_device_id apple_rtkit_helper_of_match[] = {
+	{ .compatible = "apple,rtk-helper-asc4" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, apple_rtkit_helper_of_match);
+
+static struct platform_driver apple_rtkit_helper_driver = {
+	.driver = {
+		.name = "rtkit-helper",
+		.of_match_table = apple_rtkit_helper_of_match,
+	},
+	.probe = apple_rtkit_helper_probe,
+	.remove = apple_rtkit_helper_remove,
+};
+module_platform_driver(apple_rtkit_helper_driver);
+
+MODULE_AUTHOR("Hector Martin <marcan@marcan.st>");
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_DESCRIPTION("Apple RTKit helper driver");
diff --git a/drivers/soc/apple/rtkit-internal.h b/drivers/soc/apple/rtkit-internal.h
index 27c9fa745fd528..c82065a8bf7b03 100644
--- a/drivers/soc/apple/rtkit-internal.h
+++ b/drivers/soc/apple/rtkit-internal.h
@@ -15,9 +15,9 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <linux/soc/apple/mailbox.h>
 #include <linux/soc/apple/rtkit.h>
 #include <linux/workqueue.h>
-#include "mailbox.h"
 
 #define APPLE_RTKIT_APP_ENDPOINT_START 0x20
 #define APPLE_RTKIT_MAX_ENDPOINTS 0x100
@@ -44,6 +44,7 @@ struct apple_rtkit {
 
 	struct apple_rtkit_shmem ioreport_buffer;
 	struct apple_rtkit_shmem crashlog_buffer;
+	struct apple_rtkit_shmem oslog_buffer;
 
 	struct apple_rtkit_shmem syslog_buffer;
 	char *syslog_msg_buffer;
diff --git a/drivers/soc/apple/rtkit.c b/drivers/soc/apple/rtkit.c
index e6d940292c9fbd..120b922ba03ef9 100644
--- a/drivers/soc/apple/rtkit.c
+++ b/drivers/soc/apple/rtkit.c
@@ -12,6 +12,7 @@ enum {
 	APPLE_RTKIT_PWR_STATE_IDLE = 0x201, /* sleeping, retain state */
 	APPLE_RTKIT_PWR_STATE_QUIESCED = 0x10, /* running but no communication */
 	APPLE_RTKIT_PWR_STATE_ON = 0x20, /* normal operating state */
+	APPLE_RTKIT_PWR_STATE_INIT = 0x220, /* init after starting the coproc */
 };
 
 enum {
@@ -21,6 +22,7 @@ enum {
 	APPLE_RTKIT_EP_DEBUG = 3,
 	APPLE_RTKIT_EP_IOREPORT = 4,
 	APPLE_RTKIT_EP_OSLOG = 8,
+	APPLE_RTKIT_EP_TRACEKIT = 0xa,
 };
 
 #define APPLE_RTKIT_MGMT_TYPE GENMASK_ULL(59, 52)
@@ -66,8 +68,9 @@ enum {
 #define APPLE_RTKIT_SYSLOG_MSG_SIZE  GENMASK_ULL(31, 24)
 
 #define APPLE_RTKIT_OSLOG_TYPE GENMASK_ULL(63, 56)
-#define APPLE_RTKIT_OSLOG_INIT	1
-#define APPLE_RTKIT_OSLOG_ACK	3
+#define APPLE_RTKIT_OSLOG_BUFFER_REQUEST 1
+#define APPLE_RTKIT_OSLOG_SIZE GENMASK_ULL(55, 36)
+#define APPLE_RTKIT_OSLOG_IOVA GENMASK_ULL(35, 0)
 
 #define APPLE_RTKIT_MIN_SUPPORTED_VERSION 11
 #define APPLE_RTKIT_MAX_SUPPORTED_VERSION 12
@@ -97,12 +100,20 @@ bool apple_rtkit_is_crashed(struct apple_rtkit *rtk)
 }
 EXPORT_SYMBOL_GPL(apple_rtkit_is_crashed);
 
-static void apple_rtkit_management_send(struct apple_rtkit *rtk, u8 type,
+static int apple_rtkit_management_send(struct apple_rtkit *rtk, u8 type,
 					u64 msg)
 {
+	int ret;
+
 	msg &= ~APPLE_RTKIT_MGMT_TYPE;
 	msg |= FIELD_PREP(APPLE_RTKIT_MGMT_TYPE, type);
-	apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_MGMT, msg, NULL, false);
+	ret = apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_MGMT, msg, NULL, false);
+
+	if (ret) {
+		dev_err(rtk->dev, "RTKit: Failed to send management message: %d\n", ret);
+	}
+
+	return ret;
 }
 
 static void apple_rtkit_management_rx_hello(struct apple_rtkit *rtk, u64 msg)
@@ -182,6 +193,7 @@ static void apple_rtkit_management_rx_epmap(struct apple_rtkit *rtk, u64 msg)
 		case APPLE_RTKIT_EP_DEBUG:
 		case APPLE_RTKIT_EP_IOREPORT:
 		case APPLE_RTKIT_EP_OSLOG:
+		case APPLE_RTKIT_EP_TRACEKIT:
 			dev_dbg(rtk->dev,
 				"RTKit: Starting system endpoint 0x%02x\n", ep);
 			apple_rtkit_start_ep(rtk, ep);
@@ -251,15 +263,20 @@ static int apple_rtkit_common_rx_get_buffer(struct apple_rtkit *rtk,
 					    struct apple_rtkit_shmem *buffer,
 					    u8 ep, u64 msg)
 {
-	size_t n_4kpages = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_SIZE, msg);
 	u64 reply;
 	int err;
 
+	if (ep == APPLE_RTKIT_EP_OSLOG) {
+		buffer->size = FIELD_GET(APPLE_RTKIT_OSLOG_SIZE, msg);
+		buffer->iova = FIELD_GET(APPLE_RTKIT_OSLOG_IOVA, msg) << 12;
+	} else {
+		buffer->size = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_SIZE, msg) << 12;
+		buffer->iova = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_IOVA, msg);
+	}
+
 	buffer->buffer = NULL;
 	buffer->iomem = NULL;
 	buffer->is_mapped = false;
-	buffer->iova = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_IOVA, msg);
-	buffer->size = n_4kpages << 12;
 
 	dev_dbg(rtk->dev, "RTKit: buffer request for 0x%zx bytes at %pad\n",
 		buffer->size, &buffer->iova);
@@ -284,17 +301,30 @@ static int apple_rtkit_common_rx_get_buffer(struct apple_rtkit *rtk,
 	}
 
 	if (!buffer->is_mapped) {
-		reply = FIELD_PREP(APPLE_RTKIT_SYSLOG_TYPE,
-				   APPLE_RTKIT_BUFFER_REQUEST);
-		reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_SIZE, n_4kpages);
-		reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_IOVA,
-				    buffer->iova);
+		/* oslog uses different fields */
+		if (ep == APPLE_RTKIT_EP_OSLOG) {
+			reply = FIELD_PREP(APPLE_RTKIT_OSLOG_TYPE,
+					   APPLE_RTKIT_OSLOG_BUFFER_REQUEST);
+			reply |= FIELD_PREP(APPLE_RTKIT_OSLOG_SIZE, buffer->size);
+			reply |= FIELD_PREP(APPLE_RTKIT_OSLOG_IOVA,
+					    buffer->iova >> 12);
+		} else {
+			reply = FIELD_PREP(APPLE_RTKIT_SYSLOG_TYPE,
+					   APPLE_RTKIT_BUFFER_REQUEST);
+			reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_SIZE,
+					    buffer->size >> 12);
+			reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_IOVA,
+					    buffer->iova);
+		}
 		apple_rtkit_send_message(rtk, ep, reply, NULL, false);
 	}
 
 	return 0;
 
 error:
+	dev_err(rtk->dev, "RTKit: failed buffer request for 0x%zx bytes (%d)\n",
+		buffer->size, err);
+
 	buffer->buffer = NULL;
 	buffer->iomem = NULL;
 	buffer->iova = 0;
@@ -334,7 +364,7 @@ static void apple_rtkit_memcpy(struct apple_rtkit *rtk, void *dst,
 static void apple_rtkit_crashlog_rx(struct apple_rtkit *rtk, u64 msg)
 {
 	u8 type = FIELD_GET(APPLE_RTKIT_SYSLOG_TYPE, msg);
-	u8 *bfr;
+	u8 *bfr __free(kfree) = NULL;
 
 	if (type != APPLE_RTKIT_CRASHLOG_CRASH) {
 		dev_warn(rtk->dev, "RTKit: Unknown crashlog message: %llx\n",
@@ -360,7 +390,6 @@ static void apple_rtkit_crashlog_rx(struct apple_rtkit *rtk, u64 msg)
 		apple_rtkit_memcpy(rtk, bfr, &rtk->crashlog_buffer, 0,
 				   rtk->crashlog_buffer.size);
 		apple_rtkit_crashlog_dump(rtk, bfr, rtk->crashlog_buffer.size);
-		kfree(bfr);
 	} else {
 		dev_err(rtk->dev,
 			"RTKit: Couldn't allocate crashlog shadow buffer\n");
@@ -368,7 +397,7 @@ static void apple_rtkit_crashlog_rx(struct apple_rtkit *rtk, u64 msg)
 
 	rtk->crashed = true;
 	if (rtk->ops->crashed)
-		rtk->ops->crashed(rtk->cookie);
+		rtk->ops->crashed(rtk->cookie, bfr, bfr ? rtk->crashlog_buffer.size : 0);
 }
 
 static void apple_rtkit_ioreport_rx(struct apple_rtkit *rtk, u64 msg)
@@ -448,7 +477,7 @@ static void apple_rtkit_syslog_rx_log(struct apple_rtkit *rtk, u64 msg)
 
 	log_context[sizeof(log_context) - 1] = 0;
 
-	msglen = rtk->syslog_msg_size - 1;
+	msglen = strnlen(rtk->syslog_msg_buffer, rtk->syslog_msg_size - 1);
 	while (msglen > 0 &&
 		   should_crop_syslog_char(rtk->syslog_msg_buffer[msglen - 1]))
 		msglen--;
@@ -482,25 +511,18 @@ static void apple_rtkit_syslog_rx(struct apple_rtkit *rtk, u64 msg)
 	}
 }
 
-static void apple_rtkit_oslog_rx_init(struct apple_rtkit *rtk, u64 msg)
-{
-	u64 ack;
-
-	dev_dbg(rtk->dev, "RTKit: oslog init: msg: 0x%llx\n", msg);
-	ack = FIELD_PREP(APPLE_RTKIT_OSLOG_TYPE, APPLE_RTKIT_OSLOG_ACK);
-	apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_OSLOG, ack, NULL, false);
-}
-
 static void apple_rtkit_oslog_rx(struct apple_rtkit *rtk, u64 msg)
 {
 	u8 type = FIELD_GET(APPLE_RTKIT_OSLOG_TYPE, msg);
 
 	switch (type) {
-	case APPLE_RTKIT_OSLOG_INIT:
-		apple_rtkit_oslog_rx_init(rtk, msg);
+	case APPLE_RTKIT_OSLOG_BUFFER_REQUEST:
+		apple_rtkit_common_rx_get_buffer(rtk, &rtk->oslog_buffer,
+						 APPLE_RTKIT_EP_OSLOG, msg);
 		break;
 	default:
-		dev_warn(rtk->dev, "RTKit: Unknown oslog message: %llx\n", msg);
+		dev_warn(rtk->dev, "RTKit: Unknown oslog message: %llx\n",
+			 msg);
 	}
 }
 
@@ -588,11 +610,18 @@ int apple_rtkit_send_message(struct apple_rtkit *rtk, u8 ep, u64 message,
 		.msg1 = ep,
 	};
 
-	if (rtk->crashed)
+	if (rtk->crashed) {
+		dev_warn(rtk->dev,
+			 "RTKit: Device is crashed, cannot send message\n");
 		return -EINVAL;
+	}
+
 	if (ep >= APPLE_RTKIT_APP_ENDPOINT_START &&
-	    !apple_rtkit_is_running(rtk))
+	    !apple_rtkit_is_running(rtk)) {
+		dev_warn(rtk->dev,
+			 "RTKit: Endpoint 0x%02x is not running, cannot send message\n", ep);
 		return -EINVAL;
+	}
 
 	/*
 	 * The message will be sent with a MMIO write. We need the barrier
@@ -611,6 +640,12 @@ int apple_rtkit_poll(struct apple_rtkit *rtk)
 }
 EXPORT_SYMBOL_GPL(apple_rtkit_poll);
 
+bool apple_rtkit_has_endpoint(struct apple_rtkit *rtk, u8 ep)
+{
+	return test_bit(ep, rtk->endpoints);
+}
+EXPORT_SYMBOL_GPL(apple_rtkit_has_endpoint);
+
 int apple_rtkit_start_ep(struct apple_rtkit *rtk, u8 endpoint)
 {
 	u64 msg;
@@ -667,7 +702,7 @@ struct apple_rtkit *apple_rtkit_init(struct device *dev, void *cookie,
 	rtk->mbox->rx = apple_rtkit_rx;
 	rtk->mbox->cookie = rtk;
 
-	rtk->wq = alloc_ordered_workqueue("rtkit-%s", WQ_MEM_RECLAIM,
+	rtk->wq = alloc_ordered_workqueue("rtkit-%s", WQ_HIGHPRI | WQ_MEM_RECLAIM,
 					  dev_name(rtk->dev));
 	if (!rtk->wq) {
 		ret = -ENOMEM;
@@ -710,6 +745,7 @@ int apple_rtkit_reinit(struct apple_rtkit *rtk)
 
 	apple_rtkit_free_buffer(rtk, &rtk->ioreport_buffer);
 	apple_rtkit_free_buffer(rtk, &rtk->crashlog_buffer);
+	apple_rtkit_free_buffer(rtk, &rtk->oslog_buffer);
 	apple_rtkit_free_buffer(rtk, &rtk->syslog_buffer);
 
 	kfree(rtk->syslog_msg_buffer);
@@ -742,8 +778,10 @@ static int apple_rtkit_set_ap_power_state(struct apple_rtkit *rtk,
 	reinit_completion(&rtk->ap_pwr_ack_completion);
 
 	msg = FIELD_PREP(APPLE_RTKIT_MGMT_PWR_STATE, state);
-	apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_AP_PWR_STATE,
-				    msg);
+	ret = apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_AP_PWR_STATE,
+					  msg);
+	if (ret)
+		return ret;
 
 	ret = apple_rtkit_wait_for_completion(&rtk->ap_pwr_ack_completion);
 	if (ret)
@@ -763,8 +801,10 @@ static int apple_rtkit_set_iop_power_state(struct apple_rtkit *rtk,
 	reinit_completion(&rtk->iop_pwr_ack_completion);
 
 	msg = FIELD_PREP(APPLE_RTKIT_MGMT_PWR_STATE, state);
-	apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE,
-				    msg);
+	ret = apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE,
+					  msg);
+	if (ret)
+		return ret;
 
 	ret = apple_rtkit_wait_for_completion(&rtk->iop_pwr_ack_completion);
 	if (ret)
@@ -865,6 +905,7 @@ EXPORT_SYMBOL_GPL(apple_rtkit_quiesce);
 int apple_rtkit_wake(struct apple_rtkit *rtk)
 {
 	u64 msg;
+	int ret;
 
 	if (apple_rtkit_is_running(rtk))
 		return -EINVAL;
@@ -875,9 +916,11 @@ int apple_rtkit_wake(struct apple_rtkit *rtk)
 	 * Use open-coded apple_rtkit_set_iop_power_state since apple_rtkit_boot
 	 * will wait for the completion anyway.
 	 */
-	msg = FIELD_PREP(APPLE_RTKIT_MGMT_PWR_STATE, APPLE_RTKIT_PWR_STATE_ON);
-	apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE,
-				    msg);
+	msg = FIELD_PREP(APPLE_RTKIT_MGMT_PWR_STATE, APPLE_RTKIT_PWR_STATE_INIT);
+	ret = apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE,
+					  msg);
+	if (ret)
+		return ret;
 
 	return apple_rtkit_boot(rtk);
 }
@@ -890,6 +933,7 @@ void apple_rtkit_free(struct apple_rtkit *rtk)
 
 	apple_rtkit_free_buffer(rtk, &rtk->ioreport_buffer);
 	apple_rtkit_free_buffer(rtk, &rtk->crashlog_buffer);
+	apple_rtkit_free_buffer(rtk, &rtk->oslog_buffer);
 	apple_rtkit_free_buffer(rtk, &rtk->syslog_buffer);
 
 	kfree(rtk->syslog_msg_buffer);
@@ -921,6 +965,12 @@ struct apple_rtkit *devm_apple_rtkit_init(struct device *dev, void *cookie,
 }
 EXPORT_SYMBOL_GPL(devm_apple_rtkit_init);
 
+void devm_apple_rtkit_free(struct device *dev, struct apple_rtkit *rtk)
+{
+	devm_release_action(dev, apple_rtkit_free_wrapper, rtk);
+}
+EXPORT_SYMBOL_GPL(devm_apple_rtkit_free);
+
 MODULE_LICENSE("Dual MIT/GPL");
 MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
 MODULE_DESCRIPTION("Apple RTKit driver");
diff --git a/drivers/soc/apple/sep.rs b/drivers/soc/apple/sep.rs
new file mode 100644
index 00000000000000..f9e2a7e6f65ee8
--- /dev/null
+++ b/drivers/soc/apple/sep.rs
@@ -0,0 +1,344 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+#![recursion_limit = "2048"]
+
+//! Apple SEP driver
+//!
+//! Copyright (C) The Asahi Linux Contributors
+
+use core::sync::atomic::{AtomicBool, Ordering};
+
+use kernel::{
+    bindings, c_str, device, dma, module_platform_driver, new_mutex, of, platform,
+    prelude::*,
+    soc::apple::mailbox::{MailCallback, Mailbox, Message},
+    sync::{Arc, Mutex},
+    types::{ARef, ForeignOwnable},
+    workqueue::{self, impl_has_work, new_work, Work, WorkItem},
+};
+
+const SHMEM_SIZE: usize = 0x30000;
+const MSG_BOOT_TZ0: u64 = 0x5;
+const MSG_BOOT_IMG4: u64 = 0x6;
+const MSG_SET_SHMEM: u64 = 0x18;
+const MSG_BOOT_TZ0_ACK1: u64 = 0x69;
+const MSG_BOOT_TZ0_ACK2: u64 = 0xD2;
+const MSG_BOOT_IMG4_ACK: u64 = 0x6A;
+const MSG_ADVERTISE_EP: u64 = 0;
+const EP_DISCOVER: u64 = 0xFD;
+const EP_SHMEM: u64 = 0xFE;
+const EP_BOOT: u64 = 0xFF;
+
+const MSG_TYPE_SHIFT: u32 = 16;
+const MSG_TYPE_MASK: u64 = 0xFF;
+//const MSG_PARAM_SHIFT: u32 = 24;
+//const MSG_PARAM_MASK: u64 = 0xFF;
+
+const MSG_EP_MASK: u64 = 0xFF;
+const MSG_DATA_SHIFT: u32 = 32;
+
+const IOVA_SHIFT: u32 = 0xC;
+
+type ShMem = dma::CoherentAllocation<u8>;
+
+fn align_up(v: usize, a: usize) -> usize {
+    (v + a - 1) & !(a - 1)
+}
+
+fn memcpy_to_iomem(iomem: &ShMem, off: usize, src: &[u8]) -> Result<()> {
+    // SAFETY:
+    // as_slice_mut() checks that off and src.len() are whithin iomem's limits.
+    // memcpy_to_iomem is only called from within probe() ansuring there are no
+    // concurrent read and write accesses to the same region while the slice is
+    // alive per as_slice_mut()'s requiremnts.
+    unsafe {
+        let target = iomem.as_slice_mut(off, src.len())?;
+        target.copy_from_slice(src);
+    }
+    Ok(())
+}
+
+fn build_shmem(dev: &platform::Device) -> Result<ShMem> {
+    let of = dev.as_ref().of_node().ok_or(EIO)?;
+    let iomem = dma::CoherentAllocation::<u8>::alloc_coherent(dev, SHMEM_SIZE, GFP_KERNEL)?;
+
+    let panic_offset = 0x4000;
+    let panic_size = 0x8000;
+    memcpy_to_iomem(&iomem, panic_offset, &1u32.to_le_bytes())?;
+
+    let lpol_offset = panic_offset + panic_size;
+    let lpol = of
+        .find_property(c_str!("local-policy-manifest"))
+        .ok_or(EIO)?;
+    memcpy_to_iomem(
+        &iomem,
+        lpol_offset,
+        &(lpol.value().len() as u32).to_le_bytes(),
+    )?;
+    memcpy_to_iomem(&iomem, lpol_offset + 4, lpol.value())?;
+    let lpol_size = align_up(lpol.value().len() + 4, 0x4000);
+
+    let ibot_offset = lpol_offset + lpol_size;
+    let ibot = of.find_property(c_str!("iboot-manifest")).ok_or(EIO)?;
+    memcpy_to_iomem(
+        &iomem,
+        ibot_offset,
+        &(ibot.value().len() as u32).to_le_bytes(),
+    )?;
+    memcpy_to_iomem(&iomem, ibot_offset + 4, ibot.value())?;
+    let ibot_size = align_up(ibot.value().len() + 4, 0x4000);
+
+    memcpy_to_iomem(&iomem, 0, b"CNIP")?;
+    memcpy_to_iomem(&iomem, 4, &(panic_size as u32).to_le_bytes())?;
+    memcpy_to_iomem(&iomem, 8, &(panic_offset as u32).to_le_bytes())?;
+
+    memcpy_to_iomem(&iomem, 16, b"OPLA")?;
+    memcpy_to_iomem(&iomem, 16 + 4, &(lpol_size as u32).to_le_bytes())?;
+    memcpy_to_iomem(&iomem, 16 + 8, &(lpol_offset as u32).to_le_bytes())?;
+
+    memcpy_to_iomem(&iomem, 32, b"IPIS")?;
+    memcpy_to_iomem(&iomem, 32 + 4, &(ibot_size as u32).to_le_bytes())?;
+    memcpy_to_iomem(&iomem, 32 + 8, &(ibot_offset as u32).to_le_bytes())?;
+
+    memcpy_to_iomem(&iomem, 48, b"llun")?;
+    Ok(iomem)
+}
+
+#[pin_data]
+struct SepReceiveWork {
+    data: Arc<SepData>,
+    msg: Message,
+    #[pin]
+    work: Work<SepReceiveWork>,
+}
+
+impl_has_work! {
+    impl HasWork<Self, 0> for SepReceiveWork { self.work }
+}
+
+impl SepReceiveWork {
+    fn new(data: Arc<SepData>, msg: Message) -> Result<Arc<Self>> {
+        Arc::pin_init(
+            pin_init!(SepReceiveWork {
+                data,
+                msg,
+                work <- new_work!("SepReceiveWork::work"),
+            }),
+            GFP_ATOMIC,
+        )
+    }
+}
+
+impl WorkItem for SepReceiveWork {
+    type Pointer = Arc<SepReceiveWork>;
+
+    fn run(this: Arc<SepReceiveWork>) {
+        this.data.process_message(this.msg);
+    }
+}
+
+struct FwRegionParams {
+    addr: u64,
+    size: usize,
+}
+
+#[pin_data]
+struct SepData {
+    dev: ARef<device::Device>,
+    #[pin]
+    mbox: Mutex<Option<Mailbox<SepData>>>,
+    shmem: ShMem,
+    region_params: FwRegionParams,
+    fw_mapped: AtomicBool,
+}
+
+impl SepData {
+    fn new(dev: &platform::Device, region_params: FwRegionParams) -> Result<Arc<SepData>> {
+        Arc::pin_init(
+            try_pin_init!(SepData {
+                shmem: build_shmem(dev)?,
+                dev: ARef::<device::Device>::from(dev.as_ref()),
+                mbox <- new_mutex!(None),
+                region_params,
+                fw_mapped: AtomicBool::new(false),
+            }),
+            GFP_KERNEL,
+        )
+    }
+    fn start(&self) -> Result<()> {
+        self.mbox.lock().as_ref().unwrap().send(
+            Message {
+                msg0: EP_BOOT | (MSG_BOOT_TZ0 << MSG_TYPE_SHIFT),
+                msg1: 0,
+            },
+            false,
+        )
+    }
+    fn load_fw_and_shmem(&self) -> Result<()> {
+        let fw_addr = unsafe {
+            let res = bindings::dma_map_resource(
+                self.dev.as_raw(),
+                self.region_params.addr,
+                self.region_params.size,
+                bindings::dma_data_direction_DMA_TO_DEVICE,
+                0,
+            );
+            if bindings::dma_mapping_error(self.dev.as_raw(), res) != 0 {
+                dev_err!(self.dev, "Failed to map firmware");
+                return Err(ENOMEM);
+            }
+            self.fw_mapped.store(true, Ordering::Relaxed);
+            res >> IOVA_SHIFT
+        };
+        let guard = self.mbox.lock();
+        let mbox = guard.as_ref().unwrap();
+        mbox.send(
+            Message {
+                msg0: EP_BOOT | (MSG_BOOT_IMG4 << MSG_TYPE_SHIFT) | (fw_addr << MSG_DATA_SHIFT),
+                msg1: 0,
+            },
+            false,
+        )?;
+        let shm_addr = self.shmem.dma_handle() >> IOVA_SHIFT;
+        mbox.send(
+            Message {
+                msg0: EP_SHMEM | (MSG_SET_SHMEM << MSG_TYPE_SHIFT) | (shm_addr << MSG_DATA_SHIFT),
+                msg1: 0,
+            },
+            false,
+        )?;
+        Ok(())
+    }
+    fn process_boot_msg(&self, msg: Message) {
+        let ty = (msg.msg0 >> MSG_TYPE_SHIFT) & MSG_TYPE_MASK;
+        match ty {
+            MSG_BOOT_TZ0_ACK1 => {}
+            MSG_BOOT_TZ0_ACK2 => {
+                let res = self.load_fw_and_shmem();
+                if let Err(e) = res {
+                    dev_err!(self.dev, "Unable to load firmware: {:?}", e);
+                }
+            }
+            MSG_BOOT_IMG4_ACK => {}
+            _ => {
+                dev_err!(self.dev, "Unknown boot message type: {}", ty);
+            }
+        }
+    }
+    fn process_discover_msg(&self, msg: Message) {
+        let ty = (msg.msg0 >> MSG_TYPE_SHIFT) & MSG_TYPE_MASK;
+        //let data = (msg.msg0 >> MSG_DATA_SHIFT) as u32;
+        //let param = (msg.msg0 >> MSG_PARAM_SHIFT) & MSG_PARAM_MASK;
+        match ty {
+            MSG_ADVERTISE_EP => {
+                /*dev_info!(
+                    self.dev,
+                    "Got endpoint {:?} at {}",
+                    core::str::from_utf8(&data.to_be_bytes()),
+                    param
+                );*/
+            }
+            _ => {
+                //dev_warn!(self.dev, "Unknown discovery message type: {}", ty);
+            }
+        }
+    }
+    fn process_message(&self, msg: Message) {
+        let ep = msg.msg0 & MSG_EP_MASK;
+        match ep {
+            EP_BOOT => self.process_boot_msg(msg),
+            EP_DISCOVER => self.process_discover_msg(msg),
+            _ => {} // dev_warn!(self.dev, "Message from unknown endpoint: {}", ep),
+        }
+    }
+    fn remove(&self) {
+        *self.mbox.lock() = None;
+        if self.fw_mapped.load(Ordering::Relaxed) {
+            unsafe {
+                bindings::dma_unmap_resource(
+                    self.dev.as_raw(),
+                    self.region_params.addr,
+                    self.region_params.size,
+                    bindings::dma_data_direction_DMA_TO_DEVICE,
+                    0,
+                );
+            }
+        }
+    }
+}
+
+impl MailCallback for SepData {
+    type Data = Arc<SepData>;
+    fn recv_message(data: <Self::Data as ForeignOwnable>::Borrowed<'_>, msg: Message) {
+        let work = SepReceiveWork::new(data.into(), msg);
+        if let Ok(work) = work {
+            let res = workqueue::system().enqueue(work);
+            if res.is_err() {
+                dev_err!(
+                    data.dev,
+                    "Unable to schedule work item for message {}",
+                    msg.msg0
+                );
+            }
+        } else {
+            dev_err!(
+                data.dev,
+                "Unable to allocate work item for message {}",
+                msg.msg0
+            );
+        }
+    }
+}
+
+unsafe impl Send for SepData {}
+unsafe impl Sync for SepData {}
+
+struct SepDriver(Arc<SepData>);
+
+kernel::of_device_table!(
+    OF_TABLE,
+    MODULE_OF_TABLE,
+    (),
+    [(of::DeviceId::new(c_str!("apple,sep")), ())]
+);
+
+impl platform::Driver for SepDriver {
+    type IdInfo = ();
+
+    const OF_ID_TABLE: Option<of::IdTable<()>> = Some(&OF_TABLE);
+
+    fn probe(pdev: &mut platform::Device, _info: Option<&()>) -> Result<Pin<KBox<SepDriver>>> {
+        let of = pdev.as_ref().of_node().ok_or(EIO)?;
+        let fw_node = of.parse_phandle(c_str!("memory-region"), 0).ok_or(EIO)?;
+        let mut reg = [0u64, 0u64];
+        fw_node
+            .find_property(c_str!("reg"))
+            .ok_or(EIO)?
+            .copy_to_slice(&mut reg)?;
+        let data = SepData::new(
+            pdev,
+            FwRegionParams {
+                addr: reg[0],
+                size: reg[1] as usize,
+            },
+        )?;
+        *data.mbox.lock() = Some(Mailbox::new_byname(
+            pdev.as_ref(),
+            c_str!("mbox"),
+            data.clone(),
+        )?);
+        data.start()?;
+        Ok(KBox::pin(SepDriver(data), GFP_KERNEL)?)
+    }
+}
+
+impl Drop for SepDriver {
+    fn drop(&mut self) {
+        self.0.remove();
+    }
+}
+
+module_platform_driver! {
+    type: SepDriver,
+    name: "apple_sep",
+    license: "Dual MIT/GPL",
+}
diff --git a/drivers/spmi/Kconfig b/drivers/spmi/Kconfig
index 73780204631463..96c73c5b572022 100644
--- a/drivers/spmi/Kconfig
+++ b/drivers/spmi/Kconfig
@@ -45,4 +45,12 @@ config SPMI_MTK_PMIF
 	  This is required for communicating with Mediatek PMICs and
 	  other devices that have the SPMI interface.
 
+config SPMI_APPLE
+	tristate "Apple SoC SPMI Controller platform driver"
+	depends on ARCH_APPLE || COMPILE_TEST
+	help
+	  This enables basic support for the SPMI controller present on
+	  many Apple SoCs, including the t8103 (M1) and t600x
+	  (M1 Pro/Max).
+
 endif
diff --git a/drivers/spmi/Makefile b/drivers/spmi/Makefile
index 7f152167bb05b2..8c80236dfac41b 100644
--- a/drivers/spmi/Makefile
+++ b/drivers/spmi/Makefile
@@ -7,3 +7,4 @@ obj-$(CONFIG_SPMI)	+= spmi.o spmi-devres.o
 obj-$(CONFIG_SPMI_HISI3670)	+= hisi-spmi-controller.o
 obj-$(CONFIG_SPMI_MSM_PMIC_ARB)	+= spmi-pmic-arb.o
 obj-$(CONFIG_SPMI_MTK_PMIF)	+= spmi-mtk-pmif.o
+obj-$(CONFIG_SPMI_APPLE)	+= spmi-apple-controller.o
diff --git a/drivers/spmi/spmi-apple-controller.c b/drivers/spmi/spmi-apple-controller.c
new file mode 100644
index 00000000000000..5a9acc642c1fd0
--- /dev/null
+++ b/drivers/spmi/spmi-apple-controller.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Apple SoC SPMI device driver
+ *
+ * Copyright The Asahi Linux Contributors
+ *
+ * Inspired by:
+ *		OpenBSD support Copyright (c) 2021 Mark Kettenis <kettenis@openbsd.org>
+ *		Correllium support Copyright (C) 2021 Corellium LLC
+ *		hisi-spmi-controller.c
+ *		spmi-pmic-ard.c Copyright (c) 2021, The Linux Foundation.
+ */
+
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/spmi.h>
+
+/* SPMI Controller Registers */
+#define SPMI_STATUS_REG 0
+#define SPMI_CMD_REG 0x4
+#define SPMI_RSP_REG 0x8
+
+#define SPMI_RX_FIFO_EMPTY BIT(24)
+#define SPMI_TX_FIFO_EMPTY BIT(8)
+
+/* Apple SPMI controler */
+struct apple_spmi {
+	void __iomem *regs;
+	struct spmi_controller *ctrl;
+};
+
+static inline u32 read_reg(struct apple_spmi *spmi, int offset)
+{
+	return (readl_relaxed(spmi->regs + offset));
+}
+
+static inline void write_reg(u32 value, struct apple_spmi *spmi, int offset)
+{
+	writel_relaxed(value, spmi->regs + offset);
+}
+
+static int spmi_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 slave_id,
+			 u16 slave_addr, u8 *__buf, size_t bc)
+{
+	struct apple_spmi *spmi;
+	u32 spmi_cmd = opc | slave_id << 8 | slave_addr << 16 | (bc - 1) |
+		       (1 << 15);
+	u32 rsp;
+	volatile u32 status;
+	size_t len_to_read;
+	u8 i;
+
+	spmi = spmi_controller_get_drvdata(ctrl);
+
+	write_reg(spmi_cmd, spmi, SPMI_CMD_REG);
+
+	/* Wait for Rx FIFO to have something */
+	/* Quite ugly msleep, need to find a better way to do it */
+	i = 0;
+	do {
+		status = read_reg(spmi, SPMI_STATUS_REG);
+		msleep(10);
+		i += 1;
+	} while ((status & SPMI_RX_FIFO_EMPTY) && i < 5);
+
+	if (i >= 5) {
+		dev_err(&ctrl->dev,
+			"spmi_read_cmd:took to long to get the status");
+		return -1;
+	}
+
+	/* Read SPMI reply status */
+	rsp = read_reg(spmi, SPMI_RSP_REG);
+
+	len_to_read = 0;
+	/* Read SPMI data reply */
+	while (!(status & SPMI_RX_FIFO_EMPTY) && (len_to_read < bc)) {
+		rsp = read_reg(spmi, SPMI_RSP_REG);
+		i = 0;
+		while ((len_to_read < bc) && (i < 4)) {
+			__buf[len_to_read++] = ((0xff << (8 * i)) & rsp) >>
+					       (8 * i);
+			i += 1;
+		}
+	}
+
+	return 0;
+}
+
+static int spmi_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 slave_id,
+			  u16 slave_addr, const u8 *__buf, size_t bc)
+{
+	struct apple_spmi *spmi;
+	u32 spmi_cmd = opc | slave_id << 8 | slave_addr << 16 | (bc - 1) |
+		       (1 << 15);
+	volatile u32 rsp;
+	volatile u32 status;
+	size_t i = 0, j;
+
+	spmi = spmi_controller_get_drvdata(ctrl);
+
+	write_reg(spmi_cmd, spmi, SPMI_CMD_REG);
+
+	while (i < bc) {
+		j = 0;
+		spmi_cmd = 0;
+		while ((j < 4) & (i < bc)) {
+			spmi_cmd |= __buf[i++] << (j++ * 8);
+		}
+		write_reg(spmi_cmd, spmi, SPMI_CMD_REG);
+	}
+
+	/* Wait for Rx FIFO to have something */
+	/* Quite ugly msleep, need to find a better way to do it */
+	i = 0;
+	do {
+		status = read_reg(spmi, SPMI_STATUS_REG);
+		msleep(10);
+		i += 1;
+	} while ((status & SPMI_RX_FIFO_EMPTY) && i < 5);
+
+	if (i >= 5) {
+		dev_err(&ctrl->dev,
+			"spmi_write_cmd:took to long to get the status");
+		return -1;
+	}
+
+	rsp = read_reg(spmi, SPMI_RSP_REG);
+	(void)rsp; // TODO: check stuff here
+
+	return 0;
+}
+
+static int spmi_controller_probe(struct platform_device *pdev)
+{
+	struct apple_spmi *spmi;
+	struct spmi_controller *ctrl;
+	int ret;
+
+	ctrl = spmi_controller_alloc(&pdev->dev, sizeof(struct apple_spmi));
+	if (IS_ERR(ctrl)) {
+		dev_err_probe(&pdev->dev, PTR_ERR(ctrl),
+			      "Can't allocate spmi_controller data\n");
+		return -ENOMEM;
+	}
+
+	spmi = spmi_controller_get_drvdata(ctrl);
+	spmi->ctrl = ctrl;
+	platform_set_drvdata(pdev, ctrl);
+
+	spmi->regs = devm_platform_ioremap_resource(pdev, 0);
+	if (IS_ERR(spmi->regs)) {
+		dev_err_probe(&pdev->dev, PTR_ERR(spmi->regs),
+			      "Can't get ioremap regs.\n");
+		return PTR_ERR(spmi->regs);
+	}
+
+	ctrl->dev.of_node = of_node_get(pdev->dev.of_node);
+
+	/* Callbacks */
+	ctrl->read_cmd = spmi_read_cmd;
+	ctrl->write_cmd = spmi_write_cmd;
+
+	ret = spmi_controller_add(ctrl);
+	if (ret) {
+		dev_err(&pdev->dev,
+			"spmi_controller_add failed with error %d!\n", ret);
+		goto err_put_controller;
+	}
+
+	/* Let's look for other nodes in device tree like the rtc */
+	ret = devm_of_platform_populate(&pdev->dev);
+	if (ret) {
+		dev_err(&pdev->dev,
+			"spmi_controller_probe: devm_of_platform_populate failed with error %d!\n",
+			ret);
+		goto err_devm_of_platform_populate;
+	}
+
+	return 0;
+
+err_put_controller:
+	spmi_controller_put(ctrl);
+err_devm_of_platform_populate:
+	return ret;
+}
+
+static void spmi_del_controller(struct platform_device *pdev)
+{
+	struct spmi_controller *ctrl = platform_get_drvdata(pdev);
+
+	spmi_controller_remove(ctrl);
+	spmi_controller_put(ctrl);
+}
+
+static const struct of_device_id spmi_controller_match_table[] = {
+	{
+		.compatible = "apple,spmi",
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, spmi_controller_match_table);
+
+static struct platform_driver spmi_controller_driver = {
+	.probe		= spmi_controller_probe,
+	.remove		= spmi_del_controller,
+	.driver		= {
+		.name	= "apple-spmi",
+		.of_match_table = spmi_controller_match_table,
+	},
+};
+module_platform_driver(spmi_controller_driver);
+
+MODULE_AUTHOR("Jean-Francois Bortolotti <jeff@borto.fr>");
+MODULE_DESCRIPTION("Apple SoC SPMI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c
index 210fff7164c138..8af26641081107 100644
--- a/drivers/tty/serial/samsung_tty.c
+++ b/drivers/tty/serial/samsung_tty.c
@@ -34,6 +34,7 @@
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/serial.h>
 #include <linux/serial_core.h>
 #include <linux/serial_s3c.h>
@@ -1296,30 +1297,49 @@ static int apple_s5l_serial_startup(struct uart_port *port)
 	return ret;
 }
 
+static int __maybe_unused s3c24xx_serial_runtime_suspend(struct device *dev)
+{
+	struct uart_port *port = dev_get_drvdata(dev);
+	struct s3c24xx_uart_port *ourport = to_ourport(port);
+	int timeout = 10000;
+
+	while (--timeout && !s3c24xx_serial_txempty_nofifo(port))
+		udelay(100);
+
+	if (!IS_ERR(ourport->baudclk))
+		clk_disable_unprepare(ourport->baudclk);
+
+	clk_disable_unprepare(ourport->clk);
+	return 0;
+};
+
+static int __maybe_unused s3c24xx_serial_runtime_resume(struct device *dev)
+{
+	struct uart_port *port = dev_get_drvdata(dev);
+	struct s3c24xx_uart_port *ourport = to_ourport(port);
+
+	clk_prepare_enable(ourport->clk);
+
+	if (!IS_ERR(ourport->baudclk))
+		clk_prepare_enable(ourport->baudclk);
+	return 0;
+};
+
 static void s3c24xx_serial_pm(struct uart_port *port, unsigned int level,
 			      unsigned int old)
 {
 	struct s3c24xx_uart_port *ourport = to_ourport(port);
-	int timeout = 10000;
 
 	ourport->pm_level = level;
 
 	switch (level) {
-	case 3:
-		while (--timeout && !s3c24xx_serial_txempty_nofifo(port))
-			udelay(100);
-
-		if (!IS_ERR(ourport->baudclk))
-			clk_disable_unprepare(ourport->baudclk);
-
-		clk_disable_unprepare(ourport->clk);
+	case UART_PM_STATE_OFF:
+		pm_runtime_mark_last_busy(port->dev);
+		pm_runtime_put_sync(port->dev);
 		break;
 
-	case 0:
-		clk_prepare_enable(ourport->clk);
-
-		if (!IS_ERR(ourport->baudclk))
-			clk_prepare_enable(ourport->baudclk);
+	case UART_PM_STATE_ON:
+		pm_runtime_get_sync(port->dev);
 		break;
 	default:
 		dev_err(port->dev, "s3c24xx_serial: unknown pm %d\n", level);
@@ -2042,18 +2062,15 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
 		}
 	}
 
+	pm_runtime_get_noresume(&pdev->dev);
+	pm_runtime_set_active(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+
 	dev_dbg(&pdev->dev, "%s: adding port\n", __func__);
 	uart_add_one_port(&s3c24xx_uart_drv, &ourport->port);
 	platform_set_drvdata(pdev, &ourport->port);
 
-	/*
-	 * Deactivate the clock enabled in s3c24xx_serial_init_port here,
-	 * so that a potential re-enablement through the pm-callback overlaps
-	 * and keeps the clock enabled in this case.
-	 */
-	clk_disable_unprepare(ourport->clk);
-	if (!IS_ERR(ourport->baudclk))
-		clk_disable_unprepare(ourport->baudclk);
+	pm_runtime_put_sync(&pdev->dev);
 
 	probe_index++;
 
@@ -2063,26 +2080,40 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
 static void s3c24xx_serial_remove(struct platform_device *dev)
 {
 	struct uart_port *port = s3c24xx_dev_to_port(&dev->dev);
+	struct s3c24xx_uart_port *ourport = to_ourport(port);
 
-	if (port)
+	if (port) {
+		pm_runtime_get_sync(&dev->dev);
 		uart_remove_one_port(&s3c24xx_uart_drv, port);
 
+		clk_disable_unprepare(ourport->clk);
+		if (!IS_ERR(ourport->baudclk))
+			clk_disable_unprepare(ourport->baudclk);
+
+		pm_runtime_disable(&dev->dev);
+		pm_runtime_set_suspended(&dev->dev);
+		pm_runtime_put_noidle(&dev->dev);
+	}
+
 	uart_unregister_driver(&s3c24xx_uart_drv);
 }
 
 /* UART power management code */
-#ifdef CONFIG_PM_SLEEP
-static int s3c24xx_serial_suspend(struct device *dev)
+
+static int __maybe_unused s3c24xx_serial_suspend(struct device *dev)
 {
 	struct uart_port *port = s3c24xx_dev_to_port(dev);
 
+	if (!console_suspend_enabled && uart_console(port))
+		device_set_wakeup_path(dev);
+
 	if (port)
 		uart_suspend_port(&s3c24xx_uart_drv, port);
 
 	return 0;
 }
 
-static int s3c24xx_serial_resume(struct device *dev)
+static int __maybe_unused s3c24xx_serial_resume(struct device *dev)
 {
 	struct uart_port *port = s3c24xx_dev_to_port(dev);
 	struct s3c24xx_uart_port *ourport = to_ourport(port);
@@ -2102,7 +2133,7 @@ static int s3c24xx_serial_resume(struct device *dev)
 	return 0;
 }
 
-static int s3c24xx_serial_resume_noirq(struct device *dev)
+static int __maybe_unused s3c24xx_serial_resume_noirq(struct device *dev)
 {
 	struct uart_port *port = s3c24xx_dev_to_port(dev);
 	struct s3c24xx_uart_port *ourport = to_ourport(port);
@@ -2176,13 +2207,9 @@ static int s3c24xx_serial_resume_noirq(struct device *dev)
 static const struct dev_pm_ops s3c24xx_serial_pm_ops = {
 	SET_SYSTEM_SLEEP_PM_OPS(s3c24xx_serial_suspend, s3c24xx_serial_resume)
 	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, s3c24xx_serial_resume_noirq)
+	SET_RUNTIME_PM_OPS(s3c24xx_serial_runtime_suspend,
+			   s3c24xx_serial_runtime_resume, NULL)
 };
-#define SERIAL_SAMSUNG_PM_OPS	(&s3c24xx_serial_pm_ops)
-
-#else /* !CONFIG_PM_SLEEP */
-
-#define SERIAL_SAMSUNG_PM_OPS	NULL
-#endif /* CONFIG_PM_SLEEP */
 
 /* Console code */
 
@@ -2670,7 +2697,7 @@ static struct platform_driver samsung_serial_driver = {
 	.id_table	= s3c24xx_serial_driver_ids,
 	.driver		= {
 		.name	= "samsung-uart",
-		.pm	= SERIAL_SAMSUNG_PM_OPS,
+		.pm	= &s3c24xx_serial_pm_ops,
 		.of_match_table	= of_match_ptr(s3c24xx_uart_dt_match),
 	},
 };
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 66a08b5271653a..86e4ba4148f45d 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -156,6 +156,9 @@ void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode, bool ignore_susphy)
 	dwc->current_dr_role = mode;
 }
 
+static void dwc3_core_exit(struct dwc3 *dwc);
+static int dwc3_core_init_for_resume(struct dwc3 *dwc);
+
 static void __dwc3_set_mode(struct work_struct *work)
 {
 	struct dwc3 *dwc = work_to_dwc(work);
@@ -175,7 +178,7 @@ static void __dwc3_set_mode(struct work_struct *work)
 	if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_OTG)
 		dwc3_otg_update(dwc, 0);
 
-	if (!desired_dr_role)
+	if (!desired_dr_role && !dwc->role_switch_reset_quirk)
 		goto out;
 
 	if (desired_dr_role == dwc->current_dr_role)
@@ -203,13 +206,32 @@ static void __dwc3_set_mode(struct work_struct *work)
 		break;
 	}
 
+	if (dwc->role_switch_reset_quirk) {
+		if (dwc->current_dr_role) {
+			dwc->current_dr_role = 0;
+			dwc3_core_exit(dwc);
+		}
+
+		if (desired_dr_role) {
+			ret = dwc3_core_init_for_resume(dwc);
+			if (ret) {
+				dev_err(dwc->dev,
+				    "failed to reinitialize core\n");
+				goto out;
+			}
+		} else {
+			goto out;
+		}
+	}
+
 	/*
 	 * When current_dr_role is not set, there's no role switching.
 	 * Only perform GCTL.CoreSoftReset when there's DRD role switching.
 	 */
-	if (dwc->current_dr_role && ((DWC3_IP_IS(DWC3) ||
+	if (dwc->role_switch_reset_quirk ||
+		(dwc->current_dr_role && ((DWC3_IP_IS(DWC3) ||
 			DWC3_VER_IS_PRIOR(DWC31, 190A)) &&
-			desired_dr_role != DWC3_GCTL_PRTCAP_OTG)) {
+			desired_dr_role != DWC3_GCTL_PRTCAP_OTG))) {
 		reg = dwc3_readl(dwc->regs, DWC3_GCTL);
 		reg |= DWC3_GCTL_CORESOFTRESET;
 		dwc3_writel(dwc->regs, DWC3_GCTL, reg);
@@ -1370,6 +1392,9 @@ static int dwc3_core_init(struct dwc3 *dwc)
 	if (ret)
 		goto err_exit_phy;
 
+	if (dwc->role_switch_reset_quirk)
+		dwc3_enable_susphy(dwc, true);
+
 	dwc3_core_setup_global_control(dwc);
 	dwc3_core_num_eps(dwc);
 
@@ -1633,6 +1658,18 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
 		ret = dwc3_drd_init(dwc);
 		if (ret)
 			return dev_err_probe(dev, ret, "failed to initialize dual-role\n");
+
+		/*
+		 * If the role switch reset quirk is required the first role
+		 * switch notification will initialize the core such that we
+		 * have to shut it down here. Make sure that the __dwc3_set_mode
+		 * queued by dwc3_drd_init has completed before since it
+		 * may still try to access MMIO.
+		 */
+		if (dwc->role_switch_reset_quirk) {
+			flush_work(&dwc->drd_work);
+			dwc3_core_exit(dwc);
+		}
 		break;
 	default:
 		dev_err(dev, "Unsupported mode of operation %d\n", dwc->dr_mode);
@@ -2218,6 +2255,23 @@ static int dwc3_probe(struct platform_device *pdev)
 	if (ret)
 		goto err_put_psy;
 
+	if (dev->of_node) {
+		if (of_device_is_compatible(dev->of_node, "apple,dwc3")) {
+			if (!IS_ENABLED(CONFIG_USB_ROLE_SWITCH) ||
+			    !IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)) {
+				dev_err(dev,
+				    "Apple DWC3 requires role switch support.\n"
+				    );
+				ret = -EINVAL;
+				goto err_put_psy;
+			}
+
+			dwc->dr_mode = USB_DR_MODE_OTG;
+			dwc->role_switch_reset_quirk = true;
+			dwc->no_early_roothub_poweroff = true;
+		}
+	}
+
 	ret = reset_control_deassert(dwc->reset);
 	if (ret)
 		goto err_put_psy;
@@ -2357,7 +2411,6 @@ static void dwc3_remove(struct platform_device *pdev)
 		power_supply_put(dwc->usb_psy);
 }
 
-#ifdef CONFIG_PM
 static int dwc3_core_init_for_resume(struct dwc3 *dwc)
 {
 	int ret;
@@ -2384,6 +2437,7 @@ static int dwc3_core_init_for_resume(struct dwc3 *dwc)
 	return ret;
 }
 
+#ifdef CONFIG_PM
 static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
 {
 	u32 reg;
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 27eae4cf223dfd..83a276ddecf302 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -1153,6 +1153,8 @@ struct dwc3_scratchpad_array {
  * @suspended: set to track suspend event due to U3/L2.
  * @susphy_state: state of DWC3_GUSB2PHYCFG_SUSPHY + DWC3_GUSB3PIPECTL_SUSPHY
  *		  before PM suspend.
+ * @role_switch_reset_quirk: set to force reinitialization after any role switch
+ * @no_early_roothub_poweroff: set to skip early root hub port power off
  * @imod_interval: set the interrupt moderation interval in 250ns
  *			increments or 0 to disable.
  * @max_cfg_eps: current max number of IN eps used across all USB configs.
@@ -1390,6 +1392,9 @@ struct dwc3 {
 	unsigned		suspended:1;
 	unsigned		susphy_state:1;
 
+	unsigned		role_switch_reset_quirk:1;
+	unsigned		no_early_roothub_poweroff:1;
+
 	u16			imod_interval;
 
 	int			max_cfg_eps;
diff --git a/drivers/usb/dwc3/drd.c b/drivers/usb/dwc3/drd.c
index 7977860932b142..65450db91bdea0 100644
--- a/drivers/usb/dwc3/drd.c
+++ b/drivers/usb/dwc3/drd.c
@@ -464,6 +464,9 @@ static int dwc3_usb_role_switch_set(struct usb_role_switch *sw,
 		break;
 	}
 
+	if (dwc->role_switch_reset_quirk && role == USB_ROLE_NONE)
+		mode = 0;
+
 	dwc3_set_mode(dwc, mode);
 	return 0;
 }
@@ -492,6 +495,10 @@ static enum usb_role dwc3_usb_role_switch_get(struct usb_role_switch *sw)
 			role = USB_ROLE_DEVICE;
 		break;
 	}
+
+	if (dwc->role_switch_reset_quirk && !dwc->current_dr_role)
+		role = USB_ROLE_NONE;
+
 	spin_unlock_irqrestore(&dwc->lock, flags);
 	return role;
 }
@@ -502,7 +509,9 @@ static int dwc3_setup_role_switch(struct dwc3 *dwc)
 	u32 mode;
 
 	dwc->role_switch_default_mode = usb_get_role_switch_default_mode(dwc->dev);
-	if (dwc->role_switch_default_mode == USB_DR_MODE_HOST) {
+	if (dwc->role_switch_reset_quirk) {
+		mode = 0;
+	} else if (dwc->role_switch_default_mode == USB_DR_MODE_HOST) {
 		mode = DWC3_GCTL_PRTCAP_HOST;
 	} else {
 		dwc->role_switch_default_mode = USB_DR_MODE_PERIPHERAL;
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index b48e108fc8fe73..16a98bedd85090 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -134,8 +134,11 @@ int dwc3_host_init(struct dwc3 *dwc)
 	/*
 	 * Some platforms need to power off all Root hub ports immediately after DWC3 set to host
 	 * mode to avoid VBUS glitch happen when xhci get reset later.
+	 * On Apple platforms we must not touch any MMIO yet because dwc3
+	 * will not work correctly before its PHY has been initialized.
 	 */
-	dwc3_power_off_all_roothub_ports(dwc);
+	if (!dwc->no_early_roothub_poweroff)
+		dwc3_power_off_all_roothub_ports(dwc);
 
 	irq = dwc3_host_get_irq(dwc);
 	if (irq < 0)
@@ -220,7 +223,8 @@ void dwc3_host_exit(struct dwc3 *dwc)
 	if (dwc->sys_wakeup)
 		device_init_wakeup(&dwc->xhci->dev, false);
 
-	dwc3_enable_susphy(dwc, false);
+	if (!dwc->role_switch_reset_quirk)
+		dwc3_enable_susphy(dwc, false);
 	platform_device_unregister(dwc->xhci);
 	dwc->xhci = NULL;
 }
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index d011d6c753edfc..2540f26bc68006 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -51,6 +51,15 @@ config USB_XHCI_PCI_RENESAS
 	  installed on your system for this device to work.
 	  If unsure, say 'N'.
 
+config USB_XHCI_PCI_ASMEDIA
+	tristate "Support for ASMedia xHCI controller with firmware"
+	default USB_XHCI_PCI if ARCH_APPLE
+	depends on USB_XHCI_PCI
+	help
+	  Say 'Y' to enable support for ASMedia xHCI controllers with
+	  host-supplied firmware. These are usually present on Apple devices.
+	  If unsure, say 'N'.
+
 config USB_XHCI_PLATFORM
 	tristate "Generic xHCI driver for a platform device"
 	help
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index be4e5245c52fe9..96f408c562cfa3 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -68,6 +68,8 @@ obj-$(CONFIG_USB_UHCI_HCD)	+= uhci-hcd.o
 obj-$(CONFIG_USB_FHCI_HCD)	+= fhci.o
 obj-$(CONFIG_USB_XHCI_HCD)	+= xhci-hcd.o
 obj-$(CONFIG_USB_XHCI_PCI)	+= xhci-pci.o
+xhci-pci-y                     += xhci-pci-core.o
+xhci-pci-$(CONFIG_USB_XHCI_PCI_ASMEDIA)	+= xhci-pci-asmedia.o
 obj-$(CONFIG_USB_XHCI_PCI_RENESAS)	+= xhci-pci-renesas.o
 obj-$(CONFIG_USB_XHCI_PLATFORM) += xhci-plat-hcd.o
 obj-$(CONFIG_USB_XHCI_HISTB)	+= xhci-histb.o
diff --git a/drivers/usb/host/xhci-pci-asmedia.c b/drivers/usb/host/xhci-pci-asmedia.c
new file mode 100644
index 00000000000000..09e884573bc532
--- /dev/null
+++ b/drivers/usb/host/xhci-pci-asmedia.c
@@ -0,0 +1,394 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * ASMedia xHCI firmware loader
+ * Copyright (C) The Asahi Linux Contributors
+ */
+
+#include <linux/acpi.h>
+#include <linux/firmware.h>
+#include <linux/pci.h>
+#include <linux/iopoll.h>
+#include <linux/slab.h>
+#include <linux/unaligned.h>
+
+#include "xhci.h"
+#include "xhci-trace.h"
+#include "xhci-pci.h"
+
+/* Configuration space registers */
+#define ASMT_CFG_CONTROL		0xe0
+#define ASMT_CFG_CONTROL_WRITE		BIT(1)
+#define ASMT_CFG_CONTROL_READ		BIT(0)
+
+#define ASMT_CFG_SRAM_ADDR		0xe2
+
+#define ASMT_CFG_SRAM_ACCESS		0xef
+#define ASMT_CFG_SRAM_ACCESS_READ	BIT(6)
+#define ASMT_CFG_SRAM_ACCESS_ENABLE	BIT(7)
+
+#define ASMT_CFG_DATA_READ0		0xf0
+#define ASMT_CFG_DATA_READ1		0xf4
+
+#define ASMT_CFG_DATA_WRITE0		0xf8
+#define ASMT_CFG_DATA_WRITE1		0xfc
+
+#define ASMT_CMD_GET_FWVER		0x8000060840
+#define ASMT_FWVER_ROM			0x010250090816
+
+/* BAR0 registers */
+#define ASMT_REG_ADDR			0x3000
+
+#define ASMT_REG_WDATA			0x3004
+#define ASMT_REG_RDATA			0x3008
+
+#define ASMT_REG_STATUS			0x3009
+#define ASMT_REG_STATUS_BUSY		BIT(7)
+
+#define ASMT_REG_CODE_WDATA		0x3010
+#define ASMT_REG_CODE_RDATA		0x3018
+
+#define ASMT_MMIO_CPU_MISC		0x500e
+#define ASMT_MMIO_CPU_MISC_CODE_RAM_WR	BIT(0)
+
+#define ASMT_MMIO_CPU_MODE_NEXT		0x5040
+#define ASMT_MMIO_CPU_MODE_CUR		0x5041
+
+#define ASMT_MMIO_CPU_MODE_RAM		BIT(0)
+#define ASMT_MMIO_CPU_MODE_HALFSPEED	BIT(1)
+
+#define ASMT_MMIO_CPU_EXEC_CTRL		0x5042
+#define ASMT_MMIO_CPU_EXEC_CTRL_RESET	BIT(0)
+#define ASMT_MMIO_CPU_EXEC_CTRL_HALT	BIT(1)
+
+#define TIMEOUT_USEC			10000
+#define RESET_TIMEOUT_USEC		500000
+
+static int asmedia_mbox_tx(struct pci_dev *pdev, u64 data)
+{
+	u8 op;
+	int i;
+
+	for (i = 0; i < TIMEOUT_USEC; i++) {
+		pci_read_config_byte(pdev, ASMT_CFG_CONTROL, &op);
+		if (!(op & ASMT_CFG_CONTROL_WRITE))
+			break;
+		udelay(1);
+	}
+
+	if (op & ASMT_CFG_CONTROL_WRITE) {
+		dev_err(&pdev->dev,
+			"Timed out on mailbox tx: 0x%llx\n",
+			data);
+		return -ETIMEDOUT;
+	}
+
+	pci_write_config_dword(pdev, ASMT_CFG_DATA_WRITE0, data);
+	pci_write_config_dword(pdev, ASMT_CFG_DATA_WRITE1, data >> 32);
+	pci_write_config_byte(pdev, ASMT_CFG_CONTROL,
+			      ASMT_CFG_CONTROL_WRITE);
+
+	return 0;
+}
+
+static int asmedia_mbox_rx(struct pci_dev *pdev, u64 *data)
+{
+	u8 op;
+	u32 low, high;
+	int i;
+
+	for (i = 0; i < TIMEOUT_USEC; i++) {
+		pci_read_config_byte(pdev, ASMT_CFG_CONTROL, &op);
+		if (op & ASMT_CFG_CONTROL_READ)
+			break;
+		udelay(1);
+	}
+
+	if (!(op & ASMT_CFG_CONTROL_READ)) {
+		dev_err(&pdev->dev, "Timed out on mailbox rx\n");
+		return -ETIMEDOUT;
+	}
+
+	pci_read_config_dword(pdev, ASMT_CFG_DATA_READ0, &low);
+	pci_read_config_dword(pdev, ASMT_CFG_DATA_READ1, &high);
+	pci_write_config_byte(pdev, ASMT_CFG_CONTROL,
+			      ASMT_CFG_CONTROL_READ);
+
+	*data = ((u64)high << 32) | low;
+	return 0;
+}
+
+static int asmedia_get_fw_version(struct pci_dev *pdev, u64 *version)
+{
+	int err = 0;
+	u64 cmd;
+
+	err = asmedia_mbox_tx(pdev, ASMT_CMD_GET_FWVER);
+	if (err)
+		return err;
+	err = asmedia_mbox_tx(pdev, 0);
+	if (err)
+		return err;
+
+	err = asmedia_mbox_rx(pdev, &cmd);
+	if (err)
+		return err;
+	err = asmedia_mbox_rx(pdev, version);
+	if (err)
+		return err;
+
+	if (cmd != ASMT_CMD_GET_FWVER) {
+		dev_err(&pdev->dev, "Unexpected reply command 0x%llx\n", cmd);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static bool asmedia_check_firmware(struct pci_dev *pdev)
+{
+	u64 fwver;
+	int ret;
+
+	ret = asmedia_get_fw_version(pdev, &fwver);
+	if (ret)
+		return ret;
+
+	dev_info(&pdev->dev, "Firmware version: 0x%llx\n", fwver);
+
+	return fwver != ASMT_FWVER_ROM;
+}
+
+static int asmedia_wait_reset(struct pci_dev *pdev)
+{
+	struct usb_hcd *hcd = dev_get_drvdata(&pdev->dev);
+	struct xhci_cap_regs __iomem *cap = hcd->regs;
+	struct xhci_op_regs __iomem *op;
+	u32 val;
+	int ret;
+
+	op = hcd->regs + HC_LENGTH(readl(&cap->hc_capbase));
+
+	ret = readl_poll_timeout(&op->command,
+				 val, !(val & CMD_RESET),
+				 1000, RESET_TIMEOUT_USEC);
+
+	if (!ret)
+		return 0;
+
+	dev_err(hcd->self.controller, "Reset timed out, trying to kick it\n");
+
+	pci_write_config_byte(pdev, ASMT_CFG_SRAM_ACCESS,
+			      ASMT_CFG_SRAM_ACCESS_ENABLE);
+
+	pci_write_config_byte(pdev, ASMT_CFG_SRAM_ACCESS, 0);
+
+	ret = readl_poll_timeout(&op->command,
+				 val, !(val & CMD_RESET),
+				 1000, RESET_TIMEOUT_USEC);
+
+	if (ret)
+		dev_err(hcd->self.controller, "Reset timed out, giving up\n");
+
+	return ret;
+}
+
+static u8 asmedia_read_reg(struct usb_hcd *hcd, u16 addr) {
+	void __iomem *regs = hcd->regs;
+	u8 status;
+	int ret;
+
+	ret = readb_poll_timeout(regs + ASMT_REG_STATUS,
+				 status, !(status & ASMT_REG_STATUS_BUSY),
+				 1000, TIMEOUT_USEC);
+
+	if (ret) {
+		dev_err(hcd->self.controller,
+			"Read reg wait timed out ([%04x])\n", addr);
+		return ~0;
+	}
+
+	writew_relaxed(addr, regs + ASMT_REG_ADDR);
+
+	ret = readb_poll_timeout(regs + ASMT_REG_STATUS,
+				 status, !(status & ASMT_REG_STATUS_BUSY),
+				 1000, TIMEOUT_USEC);
+
+	if (ret) {
+		dev_err(hcd->self.controller,
+			"Read reg addr timed out ([%04x])\n", addr);
+		return ~0;
+	}
+
+	return readb_relaxed(regs + ASMT_REG_RDATA);
+}
+
+static void asmedia_write_reg(struct usb_hcd *hcd, u16 addr, u8 data, bool wait) {
+	void __iomem *regs = hcd->regs;
+	u8 status;
+	int ret, i;
+
+	writew_relaxed(addr, regs + ASMT_REG_ADDR);
+
+	ret = readb_poll_timeout(regs + ASMT_REG_STATUS,
+				 status, !(status & ASMT_REG_STATUS_BUSY),
+				 1000, TIMEOUT_USEC);
+
+	if (ret)
+		dev_err(hcd->self.controller,
+			"Write reg addr timed out ([%04x] = %02x)\n",
+			addr, data);
+
+	writeb_relaxed(data, regs + ASMT_REG_WDATA);
+
+	ret = readb_poll_timeout(regs + ASMT_REG_STATUS,
+				 status, !(status & ASMT_REG_STATUS_BUSY),
+				 1000, TIMEOUT_USEC);
+
+	if (ret)
+		dev_err(hcd->self.controller,
+			"Write reg data timed out ([%04x] = %02x)\n",
+			addr, data);
+
+	if (!wait)
+		return;
+
+	for (i = 0; i < TIMEOUT_USEC; i++) {
+		if (asmedia_read_reg(hcd, addr) == data)
+			break;
+	}
+
+	if (i >= TIMEOUT_USEC) {
+		dev_err(hcd->self.controller,
+			"Verify register timed out ([%04x] = %02x)\n",
+			addr, data);
+	}
+}
+
+static int asmedia_load_fw(struct pci_dev *pdev, const struct firmware *fw)
+{
+	struct usb_hcd *hcd;
+	void __iomem *regs;
+	const u16 *fw_data = (const u16 *)fw->data;
+	u16 raddr;
+	u32 data;
+	size_t index = 0, addr = 0;
+	size_t words = fw->size >> 1;
+	int ret, i;
+
+	hcd = dev_get_drvdata(&pdev->dev);
+	regs = hcd->regs;
+
+	asmedia_write_reg(hcd, ASMT_MMIO_CPU_MODE_NEXT,
+			  ASMT_MMIO_CPU_MODE_HALFSPEED, false);
+
+	asmedia_write_reg(hcd, ASMT_MMIO_CPU_EXEC_CTRL,
+			  ASMT_MMIO_CPU_EXEC_CTRL_RESET, false);
+
+	ret = asmedia_wait_reset(pdev);
+	if (ret) {
+		dev_err(hcd->self.controller, "Failed pre-upload reset\n");
+		return ret;
+	}
+
+	asmedia_write_reg(hcd, ASMT_MMIO_CPU_EXEC_CTRL,
+			  ASMT_MMIO_CPU_EXEC_CTRL_HALT, false);
+
+	asmedia_write_reg(hcd, ASMT_MMIO_CPU_MISC,
+			  ASMT_MMIO_CPU_MISC_CODE_RAM_WR, true);
+
+	pci_write_config_byte(pdev, ASMT_CFG_SRAM_ACCESS,
+			      ASMT_CFG_SRAM_ACCESS_ENABLE);
+
+	/* The firmware upload is interleaved in 0x4000 word blocks */
+	addr = index = 0;
+	while (index < words) {
+		data = fw_data[index];
+		if ((index | 0x4000) < words)
+			data |= fw_data[index | 0x4000] << 16;
+
+		pci_write_config_word(pdev, ASMT_CFG_SRAM_ADDR,
+				      addr);
+
+		writel_relaxed(data, regs + ASMT_REG_CODE_WDATA);
+
+		for (i = 0; i < TIMEOUT_USEC; i++) {
+			pci_read_config_word(pdev, ASMT_CFG_SRAM_ADDR, &raddr);
+			if (raddr != addr)
+				break;
+			udelay(1);
+		}
+
+		if (raddr == addr) {
+			dev_err(hcd->self.controller, "Word write timed out\n");
+			return -ETIMEDOUT;
+		}
+
+		if (++index & 0x4000)
+			index += 0x4000;
+		addr += 2;
+	}
+
+	pci_write_config_byte(pdev, ASMT_CFG_SRAM_ACCESS, 0);
+
+	asmedia_write_reg(hcd, ASMT_MMIO_CPU_MISC, 0, true);
+
+	asmedia_write_reg(hcd, ASMT_MMIO_CPU_MODE_NEXT,
+			  ASMT_MMIO_CPU_MODE_RAM |
+			  ASMT_MMIO_CPU_MODE_HALFSPEED, false);
+
+	asmedia_write_reg(hcd, ASMT_MMIO_CPU_EXEC_CTRL, 0, false);
+
+	ret = asmedia_wait_reset(pdev);
+	if (ret) {
+		dev_err(hcd->self.controller, "Failed post-upload reset\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+int asmedia_xhci_check_request_fw(struct pci_dev *pdev,
+				  const struct pci_device_id *id)
+{
+	struct xhci_driver_data *driver_data =
+		(struct xhci_driver_data *)id->driver_data;
+	const char *fw_name = driver_data->firmware;
+	const struct firmware *fw;
+	int ret;
+
+	/* Check if device has firmware, if so skip everything */
+	ret = asmedia_check_firmware(pdev);
+	if (ret < 0)
+		return ret;
+	else if (ret == 1)
+		return 0;
+
+	pci_dev_get(pdev);
+	ret = request_firmware(&fw, fw_name, &pdev->dev);
+	pci_dev_put(pdev);
+	if (ret) {
+		dev_err(&pdev->dev, "Could not load firmware %s: %d\n",
+			fw_name, ret);
+		return ret;
+	}
+
+	ret = asmedia_load_fw(pdev, fw);
+	if (ret) {
+		dev_err(&pdev->dev, "Firmware upload failed: %d\n", ret);
+		goto err;
+	}
+
+	ret = asmedia_check_firmware(pdev);
+	if (ret < 0) {
+		goto err;
+	} else if (ret != 1) {
+		dev_err(&pdev->dev, "Firmware version is too old after upload\n");
+		ret = -EIO;
+	} else {
+		ret = 0;
+	}
+
+err:
+	release_firmware(fw);
+	return ret;
+}
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci-core.c
similarity index 97%
rename from drivers/usb/host/xhci-pci.c
rename to drivers/usb/host/xhci-pci-core.c
index 54460d11f7ee81..200316fc92aa21 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci-core.c
@@ -544,6 +544,18 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
 	struct pci_dev		*pdev = to_pci_dev(hcd->self.controller);
 	int			retval;
 	u8			sbrn;
+	struct xhci_driver_data *driver_data;
+	const struct pci_device_id *id;
+
+	id = pci_match_id(to_pci_driver(pdev->dev.driver)->id_table, pdev);
+	if (id && id->driver_data && usb_hcd_is_primary_hcd(hcd)) {
+		driver_data = (struct xhci_driver_data *)id->driver_data;
+		if (driver_data->quirks & XHCI_ASMEDIA_FW_QUIRK) {
+			retval = asmedia_xhci_check_request_fw(pdev, id);
+			if (retval < 0)
+				return retval;
+		}
+	}
 
 	xhci = hcd_to_xhci(hcd);
 
@@ -904,10 +916,19 @@ static void xhci_pci_shutdown(struct usb_hcd *hcd)
 		pci_set_power_state(pdev, PCI_D3hot);
 }
 
+#define ASMEDIA_APPLE_FW_NAME	"asmedia/asm2214a-apple.bin"
+
 /*-------------------------------------------------------------------------*/
+static const struct xhci_driver_data asmedia_data = {
+	.quirks  = XHCI_ASMEDIA_FW_QUIRK,
+	.firmware = ASMEDIA_APPLE_FW_NAME,
+};
 
 /* PCI driver selection metadata; PCI hotplugging uses this */
 static const struct pci_device_id pci_ids[] = {
+	{ PCI_DEVICE(0x1b21, 0x2142),
+		.driver_data = (unsigned long)&asmedia_data,
+	},
 	/* handle any USB 3.0 xHCI controller */
 	{ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_XHCI, ~0),
 	},
@@ -915,6 +936,10 @@ static const struct pci_device_id pci_ids[] = {
 };
 MODULE_DEVICE_TABLE(pci, pci_ids);
 
+#if IS_ENABLED(CONFIG_USB_XHCI_PCI_ASMEDIA)
+MODULE_FIRMWARE(ASMEDIA_APPLE_FW_NAME);
+#endif
+
 /* pci driver glue; this is a "new style" PCI driver module */
 static struct pci_driver xhci_pci_driver = {
 	.name =		hcd_name,
diff --git a/drivers/usb/host/xhci-pci.h b/drivers/usb/host/xhci-pci.h
index e87c7d9d76b8e2..452908d1c069ba 100644
--- a/drivers/usb/host/xhci-pci.h
+++ b/drivers/usb/host/xhci-pci.h
@@ -7,4 +7,22 @@
 int xhci_pci_common_probe(struct pci_dev *dev, const struct pci_device_id *id);
 void xhci_pci_remove(struct pci_dev *dev);
 
+struct xhci_driver_data {
+	u64 quirks;
+	const char *firmware;
+};
+
+#if IS_ENABLED(CONFIG_USB_XHCI_PCI_ASMEDIA)
+int asmedia_xhci_check_request_fw(struct pci_dev *dev,
+				  const struct pci_device_id *id);
+
+#else
+static inline int asmedia_xhci_check_request_fw(struct pci_dev *dev,
+						const struct pci_device_id *id)
+{
+	return 0;
+}
+
+#endif
+
 #endif
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 2c394cba120f15..8009e43563fa0b 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1637,6 +1637,7 @@ struct xhci_hcd {
 #define XHCI_WRITE_64_HI_LO	BIT_ULL(47)
 #define XHCI_CDNS_SCTX_QUIRK	BIT_ULL(48)
 #define XHCI_ETRON_HOST	BIT_ULL(49)
+#define XHCI_ASMEDIA_FW_QUIRK	BIT_ULL(50)
 
 	unsigned int		num_active_eps;
 	unsigned int		limit_active_eps;
diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c
index 7ee721a877c12d..9ffb54cfbac807 100644
--- a/drivers/usb/typec/tipd/core.c
+++ b/drivers/usb/typec/tipd/core.c
@@ -171,11 +171,15 @@ tps6598x_block_read(struct tps6598x *tps, u8 reg, void *val, size_t len)
 		return regmap_raw_read(tps->regmap, reg, val, len);
 
 	ret = regmap_raw_read(tps->regmap, reg, data, len + 1);
-	if (ret)
+	if (ret) {
+		dev_err(tps->dev, "regmap_raw_read returned %d\n", ret);
 		return ret;
+	}
 
-	if (data[0] < len)
+	if (data[0] < len) {
+		dev_err(tps->dev, "expected %zu bytes, got %d\n", len, data[0]);
 		return -EIO;
+	}
 
 	memcpy(val, &data[1], len);
 	return 0;
@@ -470,7 +474,7 @@ static bool tps6598x_read_status(struct tps6598x *tps, u32 *status)
 
 	ret = tps6598x_read32(tps, TPS_REG_STATUS, status);
 	if (ret) {
-		dev_err(tps->dev, "%s: failed to read status\n", __func__);
+		dev_err(tps->dev, "%s: failed to read status: %d\n", __func__, ret);
 		return false;
 	}
 
@@ -545,24 +549,23 @@ static irqreturn_t cd321x_interrupt(int irq, void *data)
 	if (!event)
 		goto err_unlock;
 
+	tps6598x_write64(tps, TPS_REG_INT_CLEAR1, event);
+
 	if (!tps6598x_read_status(tps, &status))
-		goto err_clear_ints;
+		goto err_unlock;
 
 	if (event & APPLE_CD_REG_INT_POWER_STATUS_UPDATE)
 		if (!tps6598x_read_power_status(tps))
-			goto err_clear_ints;
+			goto err_unlock;
 
 	if (event & APPLE_CD_REG_INT_DATA_STATUS_UPDATE)
 		if (!tps6598x_read_data_status(tps))
-			goto err_clear_ints;
+			goto err_unlock;
 
 	/* Handle plug insert or removal */
 	if (event & APPLE_CD_REG_INT_PLUG_EVENT)
 		tps6598x_handle_plug_event(tps, status);
 
-err_clear_ints:
-	tps6598x_write64(tps, TPS_REG_INT_CLEAR1, event);
-
 err_unlock:
 	mutex_unlock(&tps->lock);
 
@@ -668,25 +671,24 @@ static irqreturn_t tps6598x_interrupt(int irq, void *data)
 	if (!(event1[0] | event1[1] | event2[0] | event2[1]))
 		goto err_unlock;
 
+	tps6598x_block_write(tps, TPS_REG_INT_CLEAR1, event1, intev_len);
+	tps6598x_block_write(tps, TPS_REG_INT_CLEAR2, event2, intev_len);
+
 	if (!tps6598x_read_status(tps, &status))
-		goto err_clear_ints;
+		goto err_unlock;
 
 	if ((event1[0] | event2[0]) & TPS_REG_INT_POWER_STATUS_UPDATE)
 		if (!tps6598x_read_power_status(tps))
-			goto err_clear_ints;
+			goto err_unlock;
 
 	if ((event1[0] | event2[0]) & TPS_REG_INT_DATA_STATUS_UPDATE)
 		if (!tps6598x_read_data_status(tps))
-			goto err_clear_ints;
+			goto err_unlock;
 
 	/* Handle plug insert or removal */
 	if ((event1[0] | event2[0]) & TPS_REG_INT_PLUG_EVENT)
 		tps6598x_handle_plug_event(tps, status);
 
-err_clear_ints:
-	tps6598x_block_write(tps, TPS_REG_INT_CLEAR1, event1, intev_len);
-	tps6598x_block_write(tps, TPS_REG_INT_CLEAR2, event2, intev_len);
-
 err_unlock:
 	mutex_unlock(&tps->lock);
 
diff --git a/drivers/watchdog/apple_wdt.c b/drivers/watchdog/apple_wdt.c
index 95d9e37df41cd3..66a158f67a712b 100644
--- a/drivers/watchdog/apple_wdt.c
+++ b/drivers/watchdog/apple_wdt.c
@@ -95,9 +95,12 @@ static int apple_wdt_ping(struct watchdog_device *wdd)
 static int apple_wdt_set_timeout(struct watchdog_device *wdd, unsigned int s)
 {
 	struct apple_wdt *wdt = to_apple_wdt(wdd);
+	u32 actual;
 
 	writel_relaxed(0, wdt->regs + APPLE_WDT_WD1_CUR_TIME);
-	writel_relaxed(wdt->clk_rate * s, wdt->regs + APPLE_WDT_WD1_BITE_TIME);
+
+	actual = min(s, wdd->max_hw_heartbeat_ms / 1000);
+	writel_relaxed(wdt->clk_rate * actual, wdt->regs + APPLE_WDT_WD1_BITE_TIME);
 
 	wdd->timeout = s;
 
@@ -177,7 +180,7 @@ static int apple_wdt_probe(struct platform_device *pdev)
 
 	wdt->wdd.ops = &apple_wdt_ops;
 	wdt->wdd.info = &apple_wdt_info;
-	wdt->wdd.max_timeout = U32_MAX / wdt->clk_rate;
+	wdt->wdd.max_hw_heartbeat_ms = U32_MAX / wdt->clk_rate * 1000;
 	wdt->wdd.timeout = APPLE_WDT_TIMEOUT_DEFAULT;
 
 	wdt_ctrl = readl_relaxed(wdt->regs + APPLE_WDT_WD1_CTRL);
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index fdae947682cd0b..f700e4996eccb9 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -432,6 +432,14 @@ struct drm_gem_object {
 	 * The current LRU list that the GEM object is on.
 	 */
 	struct drm_gem_lru *lru;
+
+	/**
+	 * @exportable:
+	 *
+	 * Whether this GEM object can be exported via the drm_gem_object_funcs->export
+	 * callback. Defaults to true.
+	 */
+	bool exportable;
 };
 
 /**
diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
index d22e3fb53631ab..b70d3cc35bd194 100644
--- a/include/drm/drm_gem_shmem_helper.h
+++ b/include/drm/drm_gem_shmem_helper.h
@@ -132,6 +132,9 @@ void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
 			      struct drm_printer *p, unsigned int indent);
 
 extern const struct vm_operations_struct drm_gem_shmem_vm_ops;
+vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf);
+void drm_gem_shmem_vm_open(struct vm_area_struct *vma);
+void drm_gem_shmem_vm_close(struct vm_area_struct *vma);
 
 /*
  * GEM object functions
diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h
index 00d4e43b76b6c1..6635379daf4d02 100644
--- a/include/drm/drm_gpuvm.h
+++ b/include/drm/drm_gpuvm.h
@@ -56,10 +56,17 @@ enum drm_gpuva_flags {
 	 */
 	DRM_GPUVA_SPARSE = (1 << 1),
 
+	/**
+	 * @DRM_GPUVA_SINGLE_PAGE:
+	 *
+	 * Flag indicating that the &drm_gpuva is a single-page mapping.
+	 */
+	DRM_GPUVA_SINGLE_PAGE = (1 << 2),
+
 	/**
 	 * @DRM_GPUVA_USERBITS: user defined bits
 	 */
-	DRM_GPUVA_USERBITS = (1 << 2),
+	DRM_GPUVA_USERBITS = (1 << 3),
 };
 
 /**
@@ -161,12 +168,14 @@ struct drm_gpuva *drm_gpuva_find_prev(struct drm_gpuvm *gpuvm, u64 start);
 struct drm_gpuva *drm_gpuva_find_next(struct drm_gpuvm *gpuvm, u64 end);
 
 static inline void drm_gpuva_init(struct drm_gpuva *va, u64 addr, u64 range,
-				  struct drm_gem_object *obj, u64 offset)
+				  struct drm_gem_object *obj, u64 offset,
+				  enum drm_gpuva_flags flags)
 {
 	va->va.addr = addr;
 	va->va.range = range;
 	va->gem.obj = obj;
 	va->gem.offset = offset;
+	va->flags = flags;
 }
 
 /**
@@ -851,6 +860,11 @@ struct drm_gpuva_op_map {
 		 */
 		struct drm_gem_object *obj;
 	} gem;
+
+	/**
+	 * @flags: requested flags for the &drm_gpuva for this mapping
+	 */
+	enum drm_gpuva_flags flags;
 };
 
 /**
@@ -1056,7 +1070,8 @@ struct drm_gpuva_ops {
 struct drm_gpuva_ops *
 drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
 			    u64 addr, u64 range,
-			    struct drm_gem_object *obj, u64 offset);
+			    struct drm_gem_object *obj, u64 offset,
+			    enum drm_gpuva_flags flags);
 struct drm_gpuva_ops *
 drm_gpuvm_sm_unmap_ops_create(struct drm_gpuvm *gpuvm,
 			      u64 addr, u64 range);
@@ -1075,7 +1090,7 @@ static inline void drm_gpuva_init_from_op(struct drm_gpuva *va,
 					  struct drm_gpuva_op_map *op)
 {
 	drm_gpuva_init(va, op->va.addr, op->va.range,
-		       op->gem.obj, op->gem.offset);
+		       op->gem.obj, op->gem.offset, op->flags);
 }
 
 /**
@@ -1201,10 +1216,12 @@ struct drm_gpuvm_ops {
 
 int drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
 		     u64 addr, u64 range,
-		     struct drm_gem_object *obj, u64 offset);
+		     struct drm_gem_object *obj, u64 offset,
+		     enum drm_gpuva_flags flags);
 
 int drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv,
 		       u64 addr, u64 range);
+int drm_gpuvm_bo_unmap(struct drm_gpuvm_bo *bo, void *priv);
 
 void drm_gpuva_map(struct drm_gpuvm *gpuvm,
 		   struct drm_gpuva *va,
diff --git a/include/drm/drm_panic.h b/include/drm/drm_panic.h
index f4e1fa9ae607a8..ff78d00c3da50b 100644
--- a/include/drm/drm_panic.h
+++ b/include/drm/drm_panic.h
@@ -163,4 +163,11 @@ static inline void drm_panic_unlock(struct drm_device *dev, unsigned long flags)
 
 #endif
 
+#if defined(CONFIG_DRM_PANIC_SCREEN_QR_CODE)
+size_t drm_panic_qr_max_data_size(u8 version, size_t url_len);
+
+u8 drm_panic_qr_generate(const char *url, u8 *data, size_t data_len, size_t data_size,
+			 u8 *tmp, size_t tmp_size);
+#endif
+
 #endif /* __DRM_PANIC_H__ */
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 95e17504e46a38..9fc2b51f129a4e 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -307,6 +307,11 @@ struct drm_sched_fence {
          * @lock: the lock used by the scheduled and the finished fences.
          */
 	spinlock_t			lock;
+        /**
+         * @sched_name: the name of the scheduler that owns this fence. We
+	 * keep a copy here since fences can outlive their scheduler.
+         */
+	char sched_name[16];
         /**
          * @owner: job owner for debugging
          */
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index 147bd3ee4f7bae..58fc7f932b3fc5 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -37,13 +37,7 @@ struct arm_pmu_entry {
 	struct arm_pmu *arm_pmu;
 };
 
-DECLARE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
-
-static __always_inline bool kvm_arm_support_pmu_v3(void)
-{
-	return static_branch_likely(&kvm_arm_pmu_available);
-}
-
+bool kvm_supports_guest_pmuv3(void);
 #define kvm_arm_pmu_irq_initialized(v)	((v)->arch.pmu.irq_num >= VGIC_NR_SGIS)
 u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
 void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
@@ -86,7 +80,7 @@ void kvm_vcpu_pmu_resync_el0(void);
  */
 #define kvm_pmu_update_vcpu_events(vcpu)				\
 	do {								\
-		if (!has_vhe() && kvm_arm_support_pmu_v3())		\
+		if (!has_vhe() && system_supports_pmuv3())		\
 			vcpu->arch.pmu.events = *kvm_get_pmu_events();	\
 	} while (0)
 
@@ -102,7 +96,7 @@ void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu);
 struct kvm_pmu {
 };
 
-static inline bool kvm_arm_support_pmu_v3(void)
+static inline bool kvm_supports_guest_pmuv3(void)
 {
 	return false;
 }
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
index 6fa0a268d53827..8c499bfac9658d 100644
--- a/include/linux/fwnode.h
+++ b/include/linux/fwnode.h
@@ -224,5 +224,6 @@ int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup,
 void fwnode_links_purge(struct fwnode_handle *fwnode);
 void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode);
 bool fw_devlink_is_strict(void);
+int fw_devlink_count_absent_consumers(struct fwnode_handle *fwnode);
 
 #endif
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 9ca7e26ac4e925..593b21bd64ff31 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -590,7 +590,9 @@ struct hid_input {
 enum hid_type {
 	HID_TYPE_OTHER = 0,
 	HID_TYPE_USBMOUSE,
-	HID_TYPE_USBNONE
+	HID_TYPE_USBNONE,
+	HID_TYPE_SPI_KEYBOARD,
+	HID_TYPE_SPI_MOUSE,
 };
 
 enum hid_battery_status {
@@ -750,6 +752,8 @@ struct hid_descriptor {
 	.bus = BUS_BLUETOOTH, .vendor = (ven), .product = (prod)
 #define HID_I2C_DEVICE(ven, prod)				\
 	.bus = BUS_I2C, .vendor = (ven), .product = (prod)
+#define HID_SPI_DEVICE(ven, prod)				\
+	.bus = BUS_SPI, .vendor = (ven), .product = (prod)
 
 #define HID_REPORT_ID(rep) \
 	.report_type = (rep)
diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h
index bba2a51c87d26f..fdf93e25b3189d 100644
--- a/include/linux/io-pgtable.h
+++ b/include/linux/io-pgtable.h
@@ -170,8 +170,9 @@ struct io_pgtable_cfg {
 		} arm_mali_lpae_cfg;
 
 		struct {
-			u64 ttbr[4];
+			void *ttbr[4];
 			u32 n_ttbrs;
+			u32 n_levels;
 		} apple_dart_cfg;
 
 		struct {
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 87cbe47b323e68..8ce98f35ee2223 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -273,12 +273,18 @@ enum iommu_resv_type {
 	IOMMU_RESV_MSI,
 	/* Software-managed MSI translation window */
 	IOMMU_RESV_SW_MSI,
+	/*
+	 * Memory regions which must be mapped with the specified mapping
+	 * at all times.
+	 */
+	IOMMU_RESV_TRANSLATED,
 };
 
 /**
  * struct iommu_resv_region - descriptor for a reserved memory region
  * @list: Linked list pointers
  * @start: System physical start address of the region
+ * @start: Device virtual start address of the region for IOMMU_RESV_TRANSLATED
  * @length: Length of the region in bytes
  * @prot: IOMMU Protection flags (READ/WRITE/...)
  * @type: Type of the reserved region
@@ -287,6 +293,7 @@ enum iommu_resv_type {
 struct iommu_resv_region {
 	struct list_head	list;
 	phys_addr_t		start;
+	dma_addr_t		dva;
 	size_t			length;
 	int			prot;
 	enum iommu_resv_type	type;
@@ -782,6 +789,7 @@ struct iommu_fault_param {
  * @pci_32bit_workaround: Limit DMA allocations to 32-bit IOVAs
  * @require_direct: device requires IOMMU_RESV_DIRECT regions
  * @shadow_on_flush: IOTLB flushes are used to sync shadow tables
+ * @require_translated: device requires IOMMU_RESV_TRANSLATED regions
  *
  * TODO: migrate other per device data pointers under iommu_dev_data, e.g.
  *	struct iommu_group	*iommu_group;
@@ -797,6 +805,7 @@ struct dev_iommu {
 	u32				pci_32bit_workaround:1;
 	u32				require_direct:1;
 	u32				shadow_on_flush:1;
+	u32				require_translated:1;
 };
 
 int iommu_device_register(struct iommu_device *iommu,
@@ -877,6 +886,9 @@ extern bool iommu_default_passthrough(void);
 extern struct iommu_resv_region *
 iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot,
 			enum iommu_resv_type type, gfp_t gfp);
+extern struct iommu_resv_region *
+iommu_alloc_resv_region_tr(phys_addr_t start, dma_addr_t dva_start, size_t length,
+			   int prot, enum iommu_resv_type type, gfp_t gfp);
 extern int iommu_get_group_resv_regions(struct iommu_group *group,
 					struct list_head *head);
 
diff --git a/include/linux/memory_ordering_model.h b/include/linux/memory_ordering_model.h
new file mode 100644
index 00000000000000..267a12ca66307e
--- /dev/null
+++ b/include/linux/memory_ordering_model.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MEMORY_ORDERING_MODEL_H
+#define __ASM_MEMORY_ORDERING_MODEL_H
+
+/* Arch hooks to implement the PR_{GET_SET}_MEM_MODEL prctls */
+
+struct task_struct;
+int arch_prctl_mem_model_get(struct task_struct *t);
+int arch_prctl_mem_model_set(struct task_struct *t, unsigned long val);
+
+#endif
diff --git a/include/linux/mfd/macsmc.h b/include/linux/mfd/macsmc.h
new file mode 100644
index 00000000000000..b4efba685d8cff
--- /dev/null
+++ b/include/linux/mfd/macsmc.h
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple SMC core definitions
+ * Copyright (C) The Asahi Linux Contributors
+ */
+
+#ifndef _LINUX_MFD_MACSMC_H
+#define _LINUX_MFD_MACSMC_H
+
+struct apple_smc;
+
+typedef u32 smc_key;
+
+#define SMC_KEY(s) (smc_key)(_SMC_KEY(#s))
+#define _SMC_KEY(s) (((s)[0] << 24) | ((s)[1] << 16) | ((s)[2] << 8) | (s)[3])
+#define __SMC_KEY(a, b, c, d) (((u32)(a) << 24) | ((u32)(b) << 16) | \
+                               ((u32)(c) <<  8) |  (u32)(d))
+
+#define APPLE_SMC_READABLE BIT(7)
+#define APPLE_SMC_WRITABLE BIT(6)
+#define APPLE_SMC_FUNCTION BIT(4)
+
+struct apple_smc_key_info {
+	u8 size;
+	u32 type_code;
+	u8 flags;
+};
+
+int apple_smc_read(struct apple_smc *smc, smc_key key, void *buf, size_t size);
+int apple_smc_write(struct apple_smc *smc, smc_key key, void *buf, size_t size);
+int apple_smc_write_atomic(struct apple_smc *smc, smc_key key, void *buf, size_t size);
+int apple_smc_rw(struct apple_smc *smc, smc_key key, void *wbuf, size_t wsize,
+		 void *rbuf, size_t rsize);
+
+int apple_smc_get_key_count(struct apple_smc *smc);
+int apple_smc_find_first_key_index(struct apple_smc *smc, smc_key key);
+int apple_smc_get_key_by_index(struct apple_smc *smc, int index, smc_key *key);
+int apple_smc_get_key_info(struct apple_smc *smc, smc_key key, struct apple_smc_key_info *info);
+
+static inline bool apple_smc_key_exists(struct apple_smc *smc, smc_key key)
+{
+	return apple_smc_get_key_info(smc, key, NULL) >= 0;
+}
+
+#define APPLE_SMC_TYPE_OPS(type) \
+	static inline int apple_smc_read_##type(struct apple_smc *smc, smc_key key, type *p) \
+	{ \
+		int ret = apple_smc_read(smc, key, p, sizeof(*p)); \
+		return (ret < 0) ? ret : ((ret != sizeof(*p)) ? -EINVAL : 0); \
+	} \
+	static inline int apple_smc_write_##type(struct apple_smc *smc, smc_key key, type p) \
+	{ \
+		return apple_smc_write(smc, key, &p, sizeof(p)); \
+	} \
+	static inline int apple_smc_write_##type##_atomic(struct apple_smc *smc, smc_key key, type p) \
+	{ \
+		return apple_smc_write_atomic(smc, key, &p, sizeof(p)); \
+	} \
+	static inline int apple_smc_rw_##type(struct apple_smc *smc, smc_key key, \
+					      type w, type *r) \
+	{ \
+		int ret = apple_smc_rw(smc, key, &w, sizeof(w), r, sizeof(*r)); \
+		return (ret < 0) ? ret : ((ret != sizeof(*r)) ? -EINVAL : 0); \
+	}
+
+APPLE_SMC_TYPE_OPS(u64)
+APPLE_SMC_TYPE_OPS(u32)
+APPLE_SMC_TYPE_OPS(u16)
+APPLE_SMC_TYPE_OPS(u8)
+APPLE_SMC_TYPE_OPS(s64)
+APPLE_SMC_TYPE_OPS(s32)
+APPLE_SMC_TYPE_OPS(s16)
+APPLE_SMC_TYPE_OPS(s8)
+
+static inline int apple_smc_read_flag(struct apple_smc *smc, smc_key key)
+{
+	u8 val;
+	int ret = apple_smc_read_u8(smc, key, &val);
+	if (ret < 0)
+		return ret;
+	return val ? 1 : 0;
+}
+#define apple_smc_write_flag apple_smc_write_u8
+
+int apple_smc_read_f32_scaled(struct apple_smc *smc, smc_key key, int *p, int scale);
+int apple_smc_write_f32_scaled(struct apple_smc *smc, smc_key key, int p, int scale);
+int apple_smc_read_ioft_scaled(struct apple_smc *smc, smc_key key, u64 *p, int scale);
+
+int apple_smc_register_notifier(struct apple_smc *smc, struct notifier_block *n);
+int apple_smc_unregister_notifier(struct apple_smc *smc, struct notifier_block *n);
+
+#endif
diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h
index 3a10f8cfc3ad5c..bc2ca2c72ee23b 100644
--- a/include/linux/pci-ecam.h
+++ b/include/linux/pci-ecam.h
@@ -97,6 +97,8 @@ extern const struct pci_ecam_ops loongson_pci_ecam_ops; /* Loongson PCIe */
 #if IS_ENABLED(CONFIG_PCI_HOST_COMMON)
 /* for DT-based PCI controllers that support ECAM */
 int pci_host_common_probe(struct platform_device *pdev);
+int pci_host_common_init(struct platform_device *pdev,
+			 const struct pci_ecam_ops *ops);
 void pci_host_common_remove(struct platform_device *pdev);
 #endif
 #endif
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index 4b5b83677e3f28..7ce6dea5bfa9f0 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -100,6 +100,10 @@ struct arm_pmu {
 	void		(*stop)(struct arm_pmu *);
 	void		(*reset)(void *);
 	int		(*map_event)(struct perf_event *event);
+	/*
+	 * Called by KVM to map the PMUv3 event space onto non-PMUv3 hardware.
+	 */
+	int		(*map_pmuv3_event)(unsigned int eventsel);
 	DECLARE_BITMAP(cntr_mask, ARMPMU_MAX_HWEVENTS);
 	bool		secure_access; /* 32-bit ARM only */
 #define ARMV8_PMUV3_MAX_COMMON_EVENTS		0x40
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 1aab31370065ca..08cad9b8483fd7 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -104,6 +104,12 @@ struct dev_pm_domain_list {
  * GENPD_FLAG_DEV_NAME_FW:	Instructs genpd to generate an unique device name
  *				using ida. It is used by genpd providers which
  *				get their genpd-names directly from FW.
+ * GENPD_FLAG_DEFER_OFF:	Defer powerdown if there are any consumer
+ *				device fwlinks indicating that some consumer
+ *				devices have not yet probed. This is useful
+ *				for power domains which are active at boot and
+ *				must not be shut down until all consumers
+ *				complete their probe sequence.
  */
 #define GENPD_FLAG_PM_CLK	 (1U << 0)
 #define GENPD_FLAG_IRQ_SAFE	 (1U << 1)
@@ -114,6 +120,7 @@ struct dev_pm_domain_list {
 #define GENPD_FLAG_MIN_RESIDENCY (1U << 6)
 #define GENPD_FLAG_OPP_TABLE_FW	 (1U << 7)
 #define GENPD_FLAG_DEV_NAME_FW	 (1U << 8)
+#define GENPD_FLAG_DEFER_OFF	 (1U << 9)
 
 enum gpd_status {
 	GENPD_STATE_ON = 0,	/* PM domain is on */
diff --git a/include/linux/soc/apple/dockchannel.h b/include/linux/soc/apple/dockchannel.h
new file mode 100644
index 00000000000000..0b7093935ddf47
--- /dev/null
+++ b/include/linux/soc/apple/dockchannel.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/*
+ * Apple Dockchannel devices
+ * Copyright (C) The Asahi Linux Contributors
+ */
+#ifndef _LINUX_APPLE_DOCKCHANNEL_H_
+#define _LINUX_APPLE_DOCKCHANNEL_H_
+
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/of_platform.h>
+
+#if IS_ENABLED(CONFIG_APPLE_DOCKCHANNEL)
+
+struct dockchannel;
+
+struct dockchannel *dockchannel_init(struct platform_device *pdev);
+
+int dockchannel_send(struct dockchannel *dockchannel, const void *buf, size_t count);
+int dockchannel_recv(struct dockchannel *dockchannel, void *buf, size_t count);
+int dockchannel_await(struct dockchannel *dockchannel,
+		      void (*callback)(void *cookie, size_t avail),
+		      void *cookie, size_t count);
+
+#endif
+#endif
diff --git a/drivers/soc/apple/mailbox.h b/include/linux/soc/apple/mailbox.h
similarity index 100%
rename from drivers/soc/apple/mailbox.h
rename to include/linux/soc/apple/mailbox.h
diff --git a/include/linux/soc/apple/rtkit.h b/include/linux/soc/apple/rtkit.h
index c06d17599ae7e3..22e1d3bb35ef0c 100644
--- a/include/linux/soc/apple/rtkit.h
+++ b/include/linux/soc/apple/rtkit.h
@@ -56,7 +56,7 @@ struct apple_rtkit_shmem {
  *                 context.
  */
 struct apple_rtkit_ops {
-	void (*crashed)(void *cookie);
+	void (*crashed)(void *cookie, const void *crashlog, size_t crashlog_size);
 	void (*recv_message)(void *cookie, u8 endpoint, u64 message);
 	bool (*recv_message_early)(void *cookie, u8 endpoint, u64 message);
 	int (*shmem_setup)(void *cookie, struct apple_rtkit_shmem *bfr);
@@ -78,6 +78,13 @@ struct apple_rtkit;
 struct apple_rtkit *devm_apple_rtkit_init(struct device *dev, void *cookie,
 					  const char *mbox_name, int mbox_idx,
 					  const struct apple_rtkit_ops *ops);
+/*
+ * Frees internal RTKit state allocated by devm_apple_rtkit_init().
+ *
+ * @dev:	Pointer to the device node this coprocessor is assocated with
+ * @rtk:	Internal RTKit state initialized by devm_apple_rtkit_init()
+ */
+void devm_apple_rtkit_free(struct device *dev, struct apple_rtkit *rtk);
 
 /*
  * Non-devm version of devm_apple_rtkit_init. Must be freed with
@@ -172,4 +179,12 @@ int apple_rtkit_send_message(struct apple_rtkit *rtk, u8 ep, u64 message,
  */
 int apple_rtkit_poll(struct apple_rtkit *rtk);
 
+/*
+ * Checks if an endpoint with a given index exists
+ *
+ * @rtk:            RTKit reference
+ * @ep:             endpoint to check for
+ */
+bool apple_rtkit_has_endpoint(struct apple_rtkit *rtk, u8 ep);
+
 #endif /* _LINUX_APPLE_RTKIT_H_ */
diff --git a/include/linux/sprintf.h b/include/linux/sprintf.h
index 33dcbec719254a..51cab2def9ec10 100644
--- a/include/linux/sprintf.h
+++ b/include/linux/sprintf.h
@@ -24,4 +24,7 @@ __scanf(2, 0) int vsscanf(const char *, const char *, va_list);
 extern bool no_hash_pointers;
 int no_hash_pointers_enable(char *str);
 
+/* Used for Rust formatting ('%pA') */
+char *rust_fmt_argument(char *buf, char *end, const void *ptr);
+
 #endif	/* _LINUX_KERNEL_SPRINTF_H */
diff --git a/include/sound/control.h b/include/sound/control.h
index e07f6b960641ff..9be6546bf787de 100644
--- a/include/sound/control.h
+++ b/include/sound/control.h
@@ -14,9 +14,12 @@
 #define snd_kcontrol_chip(kcontrol) ((kcontrol)->private_data)
 
 struct snd_kcontrol;
+struct snd_ctl_file;
 typedef int (snd_kcontrol_info_t) (struct snd_kcontrol * kcontrol, struct snd_ctl_elem_info * uinfo);
 typedef int (snd_kcontrol_get_t) (struct snd_kcontrol * kcontrol, struct snd_ctl_elem_value * ucontrol);
 typedef int (snd_kcontrol_put_t) (struct snd_kcontrol * kcontrol, struct snd_ctl_elem_value * ucontrol);
+typedef int (snd_kcontrol_lock_t) (struct snd_kcontrol * kcontrol, struct snd_ctl_file *owner);
+typedef void (snd_kcontrol_unlock_t) (struct snd_kcontrol * kcontrol);
 typedef int (snd_kcontrol_tlv_rw_t)(struct snd_kcontrol *kcontrol,
 				    int op_flag, /* SNDRV_CTL_TLV_OP_XXX */
 				    unsigned int size,
@@ -55,6 +58,8 @@ struct snd_kcontrol_new {
 	snd_kcontrol_info_t *info;
 	snd_kcontrol_get_t *get;
 	snd_kcontrol_put_t *put;
+	snd_kcontrol_lock_t *lock;
+	snd_kcontrol_unlock_t *unlock;
 	union {
 		snd_kcontrol_tlv_rw_t *c;
 		const unsigned int *p;
@@ -74,6 +79,8 @@ struct snd_kcontrol {
 	snd_kcontrol_info_t *info;
 	snd_kcontrol_get_t *get;
 	snd_kcontrol_put_t *put;
+	snd_kcontrol_lock_t *lock;
+	snd_kcontrol_unlock_t *unlock;
 	union {
 		snd_kcontrol_tlv_rw_t *c;
 		const unsigned int *p;
diff --git a/include/sound/cs42l42.h b/include/sound/cs42l42.h
index 1bd8eee54f6665..b3657965d49109 100644
--- a/include/sound/cs42l42.h
+++ b/include/sound/cs42l42.h
@@ -62,6 +62,10 @@
 #define CS42L42_INTERNAL_FS_MASK	(1 << CS42L42_INTERNAL_FS_SHIFT)
 
 #define CS42L42_SFTRAMP_RATE		(CS42L42_PAGE_10 + 0x0A)
+#define CS42L42_SFTRAMP_ASR_RATE_MASK	GENMASK(7, 4)
+#define CS42L42_SFTRAMP_ASR_RATE_SHIFT	4
+#define CS42L42_SFTRAMP_DSR_RATE_MASK	GENMASK(3, 0)
+#define CS42L42_SFTRAMP_DSR_RATE_SHIFT	0
 #define CS42L42_SLOW_START_ENABLE	(CS42L42_PAGE_10 + 0x0B)
 #define CS42L42_SLOW_START_EN_MASK	GENMASK(6, 4)
 #define CS42L42_SLOW_START_EN_SHIFT	4
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 8becb450488736..8c2a29a01a2e04 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -1073,6 +1073,7 @@ int snd_interval_ranges(struct snd_interval *i, unsigned int count,
 int snd_interval_ratnum(struct snd_interval *i,
 			unsigned int rats_count, const struct snd_ratnum *rats,
 			unsigned int *nump, unsigned int *denp);
+int snd_interval_rate_bits(struct snd_interval *i, unsigned int rate_bits);
 
 void _snd_pcm_hw_params_any(struct snd_pcm_hw_params *params);
 void _snd_pcm_hw_param_setempty(struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var);
diff --git a/include/sound/soc-card.h b/include/sound/soc-card.h
index ecc02e955279fd..ef46cac97d9968 100644
--- a/include/sound/soc-card.h
+++ b/include/sound/soc-card.h
@@ -44,7 +44,7 @@ int snd_soc_card_resume_post(struct snd_soc_card *card);
 
 int snd_soc_card_probe(struct snd_soc_card *card);
 int snd_soc_card_late_probe(struct snd_soc_card *card);
-void snd_soc_card_fixup_controls(struct snd_soc_card *card);
+int snd_soc_card_fixup_controls(struct snd_soc_card *card);
 int snd_soc_card_remove(struct snd_soc_card *card);
 
 int snd_soc_card_set_bias_level(struct snd_soc_card *card,
diff --git a/include/sound/soc.h b/include/sound/soc.h
index b3e84bc47c6fdd..5d923efe648b86 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -612,8 +612,14 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol,
 	struct snd_ctl_elem_value *ucontrol);
 int snd_soc_get_volsw_range(struct snd_kcontrol *kcontrol,
 	struct snd_ctl_elem_value *ucontrol);
+bool snd_soc_control_matches(struct snd_kcontrol *kcontrol,
+	const char *pattern);
 int snd_soc_limit_volume(struct snd_soc_card *card,
 	const char *name, int max);
+int snd_soc_deactivate_kctl(struct snd_soc_card *card,
+	const char *name, int active);
+int snd_soc_set_enum_kctl(struct snd_soc_card *card,
+	const char *name, const char *strval);
 int snd_soc_bytes_info(struct snd_kcontrol *kcontrol,
 		       struct snd_ctl_elem_info *uinfo);
 int snd_soc_bytes_get(struct snd_kcontrol *kcontrol,
@@ -1040,7 +1046,7 @@ struct snd_soc_card {
 
 	int (*probe)(struct snd_soc_card *card);
 	int (*late_probe)(struct snd_soc_card *card);
-	void (*fixup_controls)(struct snd_soc_card *card);
+	int (*fixup_controls)(struct snd_soc_card *card);
 	int (*remove)(struct snd_soc_card *card);
 
 	/* the pre and post PM functions are used to do any PM work before and
diff --git a/include/uapi/drm/asahi_drm.h b/include/uapi/drm/asahi_drm.h
new file mode 100644
index 00000000000000..109a8c2de4083c
--- /dev/null
+++ b/include/uapi/drm/asahi_drm.h
@@ -0,0 +1,1194 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) The Asahi Linux Contributors
+ * Copyright (C) 2018-2023 Collabora Ltd.
+ * Copyright (C) 2014-2018 Broadcom
+ */
+#ifndef _ASAHI_DRM_H_
+#define _ASAHI_DRM_H_
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/**
+ * DOC: Introduction to the Asahi UAPI
+ *
+ * This documentation describes the Asahi IOCTLs.
+ *
+ * Just a few generic rules about the data passed to the Asahi IOCTLs (cribbed
+ * from Panthor):
+ *
+ * - Structures must be aligned on 64-bit/8-byte. If the object is not
+ *   naturally aligned, a padding field must be added.
+ * - Fields must be explicitly aligned to their natural type alignment with
+ *   pad[0..N] fields.
+ * - All padding fields will be checked by the driver to make sure they are
+ *   zeroed.
+ * - Flags can be added, but not removed/replaced.
+ * - New fields can be added to the main structures (the structures
+ *   directly passed to the ioctl). Those fields can be added at the end of
+ *   the structure, or replace existing padding fields. Any new field being
+ *   added must preserve the behavior that existed before those fields were
+ *   added when a value of zero is passed.
+ * - New fields can be added to indirect objects (objects pointed by the
+ *   main structure), iff those objects are passed a size to reflect the
+ *   size known by the userspace driver (see
+ *   drm_asahi_cmd_header::size).
+ * - If the kernel driver is too old to know some fields, those will be
+ *   ignored if zero, and otherwise rejected (and so will be zero on output).
+ * - If userspace is too old to know some fields, those will be zeroed
+ *   (input) before the structure is parsed by the kernel driver.
+ * - Each new flag/field addition must come with a driver version update so
+ *   the userspace driver doesn't have to guess which flags are supported.
+ * - Structures should not contain unions, as this would defeat the
+ *   extensibility of such structures.
+ * - IOCTLs can't be removed or replaced. New IOCTL IDs should be placed
+ *   at the end of the drm_asahi_ioctl_id enum.
+ */
+
+/**
+ * enum drm_asahi_ioctl_id - IOCTL IDs
+ *
+ * Place new ioctls at the end, don't re-order, don't replace or remove entries.
+ *
+ * These IDs are not meant to be used directly. Use the DRM_IOCTL_ASAHI_xxx
+ * definitions instead.
+ */
+enum drm_asahi_ioctl_id {
+	/** @DRM_ASAHI_GET_PARAMS: Query device properties. */
+	DRM_ASAHI_GET_PARAMS = 0,
+
+	/** @DRM_ASAHI_GET_TIME: Query device time. */
+	DRM_ASAHI_GET_TIME,
+
+	/** @DRM_ASAHI_VM_CREATE: Create a GPU VM address space. */
+	DRM_ASAHI_VM_CREATE,
+
+	/** @DRM_ASAHI_VM_DESTROY: Destroy a VM. */
+	DRM_ASAHI_VM_DESTROY,
+
+	/** @DRM_ASAHI_VM_BIND: Bind/unbind memory to a VM. */
+	DRM_ASAHI_VM_BIND,
+
+	/** @DRM_ASAHI_GEM_CREATE: Create a buffer object. */
+	DRM_ASAHI_GEM_CREATE,
+
+	/**
+	 * @DRM_ASAHI_GEM_MMAP_OFFSET: Get offset to pass to mmap() to map a
+	 * given GEM handle.
+	 */
+	DRM_ASAHI_GEM_MMAP_OFFSET,
+
+	/** @DRM_ASAHI_GEM_BIND_OBJECT: Bind memory as a special object */
+	DRM_ASAHI_GEM_BIND_OBJECT,
+
+	/** @DRM_ASAHI_QUEUE_CREATE: Create a scheduling queue. */
+	DRM_ASAHI_QUEUE_CREATE,
+
+	/** @DRM_ASAHI_QUEUE_DESTROY: Destroy a scheduling queue. */
+	DRM_ASAHI_QUEUE_DESTROY,
+
+	/** @DRM_ASAHI_SUBMIT: Submit commands to a queue. */
+	DRM_ASAHI_SUBMIT,
+};
+
+#define DRM_ASAHI_MAX_CLUSTERS	64
+
+/**
+ * struct drm_asahi_params_global - Global parameters.
+ *
+ * This struct may be queried by drm_asahi_get_params.
+ */
+struct drm_asahi_params_global {
+	/** @features: Feature bits from drm_asahi_feature */
+	__u64 features;
+
+	/** @gpu_generation: GPU generation, e.g. 13 for G13G */
+	__u32 gpu_generation;
+
+	/** @gpu_variant: GPU variant as a character, e.g. 'C' for G13C */
+	__u32 gpu_variant;
+
+	/**
+	 * @gpu_revision: GPU revision in BCD, e.g. 0x00 for 'A0' or
+	 * 0x21 for 'C1'
+	 */
+	__u32 gpu_revision;
+
+	/** @chip_id: Chip ID in BCD, e.g. 0x8103 for T8103 */
+	__u32 chip_id;
+
+	/** @num_dies: Number of dies in the SoC */
+	__u32 num_dies;
+
+	/** @num_clusters_total: Number of GPU clusters (across all dies) */
+	__u32 num_clusters_total;
+
+	/**
+	 * @num_cores_per_cluster: Number of logical cores per cluster
+	 * (including inactive/nonexistent)
+	 */
+	__u32 num_cores_per_cluster;
+
+	/** @max_frequency_khz: Maximum GPU core clock frequency */
+	__u32 max_frequency_khz;
+
+	/** @core_masks: Bitmask of present/enabled cores per cluster */
+	__u64 core_masks[DRM_ASAHI_MAX_CLUSTERS];
+
+	/**
+	 * @vm_start: VM range start VMA. Together with @vm_end, this defines
+	 * the window of valid GPU VAs. Userspace is expected to subdivide VAs
+	 * out of this window.
+	 *
+	 * This window contains all virtual addresses that userspace needs to
+	 * know about. There may be kernel-internal GPU VAs outside this range,
+	 * but that detail is not relevant here.
+	 */
+	__u64 vm_start;
+
+	/** @vm_end: VM range end VMA */
+	__u64 vm_end;
+
+	/**
+	 * @vm_kernel_min_size: Minimum kernel VMA window size.
+	 *
+	 * When creating a VM, userspace is required to carve out a section of
+	 * virtual addresses (within the range given by @vm_start and
+	 * @vm_end). The kernel will allocate various internal structures
+	 * within the specified VA range.
+	 *
+	 * Allowing userspace to choose the VA range for the kernel, rather than
+	 * the kernel reserving VAs and requiring userspace to cope, can assist
+	 * in implementing SVM.
+	 */
+	__u64 vm_kernel_min_size;
+
+	/**
+	 * @max_commands_per_submission: Maximum number of supported commands
+	 * per submission. This mirrors firmware limits. Userspace must split up
+	 * larger command buffers, which may require inserting additional
+	 * synchronization.
+	 */
+	__u32 max_commands_per_submission;
+
+	/**
+	 * @max_attachments: Maximum number of drm_asahi_attachment's per
+	 * command
+	 */
+	__u32 max_attachments;
+
+	/**
+	 * @command_timestamp_frequency_hz: Timebase frequency for timestamps
+	 * written during command exeuction, specified via drm_asahi_timestamp
+	 * structures. As this rate is controlled by the firmware, it is a
+	 * queryable parameter.
+	 *
+	 * Userspace must divide by this frequency to convert timestamps to
+	 * seconds, rather than hardcoding a particular firmware's rate.
+	 */
+	__u64 command_timestamp_frequency_hz;
+};
+
+/**
+ * enum drm_asahi_feature - Feature bits
+ *
+ * This covers only features that userspace cannot infer from the architecture
+ * version. Most features don't need to be here.
+ */
+enum drm_asahi_feature {
+	/**
+	 * @DRM_ASAHI_FEATURE_SOFT_FAULTS: GPU has "soft fault" enabled. Shader
+	 * loads of unmapped memory will return zero. Shader stores to unmapped
+	 * memory will be silently discarded. Note that only shader load/store
+	 * is affected. Other hardware units are not affected, notably including
+	 * texture sampling.
+	 *
+	 * Soft fault is set when initializing the GPU and cannot be runtime
+	 * toggled. Therefore, it is exposed as a feature bit and not a
+	 * userspace-settable flag on the VM. When soft fault is enabled,
+	 * userspace can speculate memory accesses more aggressively.
+	 */
+	DRM_ASAHI_FEATURE_SOFT_FAULTS = (1UL) << 0,
+};
+
+/**
+ * struct drm_asahi_get_params - Arguments passed to DRM_IOCTL_ASAHI_GET_PARAMS
+ */
+struct drm_asahi_get_params {
+	/** @param_group: Parameter group to fetch (MBZ) */
+	__u32 param_group;
+
+	/** @pad: MBZ */
+	__u32 pad;
+
+	/** @pointer: User pointer to write parameter struct */
+	__u64 pointer;
+
+	/**
+	 * @size: Size of the user buffer. In case of older userspace, this may
+	 * be less than sizeof(struct drm_asahi_params_global). The kernel will
+	 * not write past the length specified here, allowing extensibility.
+	 */
+	__u64 size;
+};
+
+/**
+ * struct drm_asahi_vm_create - Arguments passed to DRM_IOCTL_ASAHI_VM_CREATE
+ */
+struct drm_asahi_vm_create {
+	/**
+	 * @kernel_start: Start of the kernel-reserved address range. See
+	 * drm_asahi_params_global::vm_kernel_min_size.
+	 *
+	 * Both @kernel_start and @kernel_end must be within the range of
+	 * valid VAs given by drm_asahi_params_global::vm_start and
+	 * drm_asahi_params_global::vm_end. The size of the kernel range
+	 * (@kernel_end - @kernel_start) must be at least
+	 * drm_asahi_params_global::vm_kernel_min_size.
+	 *
+	 * Userspace must not bind any memory on this VM into this reserved
+	 * range, it is for kernel use only.
+	 */
+	__u64 kernel_start;
+
+	/**
+	 * @kernel_end: End of the kernel-reserved address range. See
+	 * @kernel_start.
+	 */
+	__u64 kernel_end;
+
+	/** @vm_id: Returned VM ID */
+	__u32 vm_id;
+
+	/** @pad: MBZ */
+	__u32 pad;
+};
+
+/**
+ * struct drm_asahi_vm_destroy - Arguments passed to DRM_IOCTL_ASAHI_VM_DESTROY
+ */
+struct drm_asahi_vm_destroy {
+	/** @vm_id: VM ID to be destroyed */
+	__u32 vm_id;
+
+	/** @pad: MBZ */
+	__u32 pad;
+};
+
+/**
+ * enum drm_asahi_gem_flags - Flags for GEM creation
+ */
+enum drm_asahi_gem_flags {
+	/**
+	 * @DRM_ASAHI_GEM_WRITEBACK: BO should be CPU-mapped as writeback.
+	 *
+	 * Map as writeback instead of write-combine. This optimizes for CPU
+	 * reads.
+	 */
+	DRM_ASAHI_GEM_WRITEBACK = (1L << 0),
+
+	/**
+	 * @DRM_ASAHI_GEM_VM_PRIVATE: BO is private to this GPU VM (no exports).
+	 */
+	DRM_ASAHI_GEM_VM_PRIVATE = (1L << 1),
+};
+
+/**
+ * struct drm_asahi_gem_create - Arguments passed to DRM_IOCTL_ASAHI_GEM_CREATE
+ */
+struct drm_asahi_gem_create {
+	/** @size: Size of the BO */
+	__u64 size;
+
+	/** @flags: Combination of drm_asahi_gem_flags flags. */
+	__u32 flags;
+
+	/**
+	 * @vm_id: VM ID to assign to the BO, if DRM_ASAHI_GEM_VM_PRIVATE is set
+	 */
+	__u32 vm_id;
+
+	/** @handle: Returned GEM handle for the BO */
+	__u32 handle;
+
+	/** @pad: MBZ */
+	__u32 pad;
+};
+
+/**
+ * struct drm_asahi_gem_mmap_offset - Arguments passed to
+ * DRM_IOCTL_ASAHI_GEM_MMAP_OFFSET
+ */
+struct drm_asahi_gem_mmap_offset {
+	/** @handle: Handle for the object being mapped. */
+	__u32 handle;
+
+	/** @flags: Must be zero */
+	__u32 flags;
+
+	/** @offset: The fake offset to use for subsequent mmap call */
+	__u64 offset;
+};
+
+/**
+ * enum drm_asahi_bind_flags - Flags for GEM binding
+ */
+enum drm_asahi_bind_flags {
+	/**
+	 * @DRM_ASAHI_BIND_UNBIND: Instead of binding a GEM object to the range,
+	 * simply unbind the GPU VMA range.
+	 */
+	DRM_ASAHI_BIND_UNBIND = (1L << 0),
+
+	/** @DRM_ASAHI_BIND_READ: Map BO with GPU read permission */
+	DRM_ASAHI_BIND_READ = (1L << 1),
+
+	/** @DRM_ASAHI_BIND_WRITE: Map BO with GPU write permission */
+	DRM_ASAHI_BIND_WRITE = (1L << 2),
+
+	/**
+	 * @DRM_ASAHI_BIND_SINGLE_PAGE: Map a single page of the BO repeatedly
+	 * across the VA range.
+	 *
+	 * This is useful to fill a VA range with scratch pages or zero pages.
+	 * It is intended as a mechanism to accelerate sparse.
+	 */
+	DRM_ASAHI_BIND_SINGLE_PAGE = (1L << 3),
+};
+
+/**
+ * struct drm_asahi_gem_bind_op - Description of a single GEM bind operation.
+ */
+struct drm_asahi_gem_bind_op {
+	/** @flags: Combination of drm_asahi_bind_flags flags. */
+	__u32 flags;
+
+	/** @handle: GEM object to bind (except for UNBIND) */
+	__u32 handle;
+
+	/**
+	 * @offset: Offset into the object (except for UNBIND).
+	 *
+	 * For a regular bind, this is the beginning of the region of the GEM
+	 * object to bind.
+	 *
+	 * For a single-page bind, this is the offset to the single page that
+	 * will be repeatedly bound.
+	 *
+	 * Must be page-size aligned.
+	 */
+	__u64 offset;
+
+	/**
+	 * @range: Number of bytes to bind/unbind to @addr.
+	 *
+	 * Must be page-size aligned.
+	 */
+	__u64 range;
+
+	/**
+	 * @addr: Address to bind to.
+	 *
+	 * Must be page-size aligned.
+	 */
+	__u64 addr;
+};
+
+/**
+ * struct drm_asahi_vm_bind - Arguments passed to
+ * DRM_IOCTL_ASAHI_VM_BIND
+ */
+struct drm_asahi_vm_bind {
+	/** @vm_id: The ID of the VM to bind to */
+	__u32 vm_id;
+
+	/** @num_binds: number of binds in this IOCTL. */
+	__u32 num_binds;
+
+	/**
+	 * @stride: Stride in bytes between consecutive binds. This allows
+	 * extensibility of drm_asahi_gem_bind_op.
+	 */
+	__u32 stride;
+
+	/** @pad: MBZ */
+	__u32 pad;
+
+	/**
+	 * @userptr: User pointer to an array of @num_binds structures of type
+	 * @drm_asahi_gem_bind_op and size @stride bytes.
+	 */
+	__u64 userptr;
+};
+
+/**
+ * enum drm_asahi_bind_object_op - Special object bind operation
+ */
+enum drm_asahi_bind_object_op {
+	/** @DRM_ASAHI_BIND_OBJECT_OP_BIND: Bind a BO as a special GPU object */
+	DRM_ASAHI_BIND_OBJECT_OP_BIND = 0,
+
+	/** @DRM_ASAHI_BIND_OBJECT_OP_UNBIND: Unbind a special GPU object */
+	DRM_ASAHI_BIND_OBJECT_OP_UNBIND = 1,
+};
+
+/**
+ * enum drm_asahi_bind_object_flags - Special object bind flags
+ */
+enum drm_asahi_bind_object_flags {
+	/**
+	 * @DRM_ASAHI_BIND_OBJECT_USAGE_TIMESTAMPS: Map a BO as a timestamp
+	 * buffer.
+	 */
+	DRM_ASAHI_BIND_OBJECT_USAGE_TIMESTAMPS = (1L << 0),
+};
+
+/**
+ * struct drm_asahi_gem_bind_object - Arguments passed to
+ * DRM_IOCTL_ASAHI_GEM_BIND_OBJECT
+ */
+struct drm_asahi_gem_bind_object {
+	/** @op: Bind operation (enum drm_asahi_bind_object_op) */
+	__u32 op;
+
+	/** @flags: Combination of drm_asahi_bind_object_flags flags. */
+	__u32 flags;
+
+	/** @handle: GEM object to bind/unbind (BIND) */
+	__u32 handle;
+
+	/** @vm_id: The ID of the VM to operate on (MBZ currently) */
+	__u32 vm_id;
+
+	/** @offset: Offset into the object (BIND only) */
+	__u64 offset;
+
+	/** @range: Number of bytes to bind/unbind (BIND only) */
+	__u64 range;
+
+	/** @object_handle: Object handle (out for BIND, in for UNBIND) */
+	__u32 object_handle;
+
+	/** @pad: MBZ */
+	__u32 pad;
+};
+
+/**
+ * enum drm_asahi_cmd_type - Command type
+ */
+enum drm_asahi_cmd_type {
+	/**
+	 * @DRM_ASAHI_CMD_RENDER: Render command, executing on the render
+	 * subqueue. Combined vertex and fragment operation.
+	 *
+	 * Followed by a @drm_asahi_cmd_render payload.
+	 */
+	DRM_ASAHI_CMD_RENDER = 0,
+
+	/**
+	 * @DRM_ASAHI_CMD_COMPUTE: Compute command on the compute subqueue.
+	 *
+	 * Followed by a @drm_asahi_cmd_compute payload.
+	 */
+	DRM_ASAHI_CMD_COMPUTE = 1,
+
+	/**
+	 * @DRM_ASAHI_SET_VERTEX_ATTACHMENTS: Software command to set
+	 * attachments for subsequent vertex shaders in the same submit.
+	 *
+	 * Followed by (possibly multiple) @drm_asahi_attachment payloads.
+	 */
+	DRM_ASAHI_SET_VERTEX_ATTACHMENTS = 2,
+
+	/**
+	 * @DRM_ASAHI_SET_FRAGMENT_ATTACHMENTS: Software command to set
+	 * attachments for subsequent fragment shaders in the same submit.
+	 *
+	 * Followed by (possibly multiple) @drm_asahi_attachment payloads.
+	 */
+	DRM_ASAHI_SET_FRAGMENT_ATTACHMENTS = 3,
+
+	/**
+	 * @DRM_ASAHI_SET_COMPUTE_ATTACHMENTS: Software command to set
+	 * attachments for subsequent compute shaders in the same submit.
+	 *
+	 * Followed by (possibly multiple) @drm_asahi_attachment payloads.
+	 */
+	DRM_ASAHI_SET_COMPUTE_ATTACHMENTS = 4,
+};
+
+/**
+ * enum drm_asahi_priority - Scheduling queue priority.
+ *
+ * These priorities are forwarded to the firmware to influence firmware
+ * scheduling. The exact policy is ultimately decided by firmware, but
+ * these enums allow userspace to communicate the intentions.
+ */
+enum drm_asahi_priority {
+	/** @DRM_ASAHI_PRIORITY_LOW: Low priority queue. */
+	DRM_ASAHI_PRIORITY_LOW = 0,
+
+	/** @DRM_ASAHI_PRIORITY_MEDIUM: Medium priority queue. */
+	DRM_ASAHI_PRIORITY_MEDIUM = 1,
+
+	/**
+	 * @DRM_ASAHI_PRIORITY_HIGH: High priority queue.
+	 *
+	 * Reserved for future extension.
+	 */
+	DRM_ASAHI_PRIORITY_HIGH = 2,
+
+	/**
+	 * @DRM_ASAHI_PRIORITY_REALTIME: Real-time priority queue.
+	 *
+	 * Reserved for future extension.
+	 */
+	DRM_ASAHI_PRIORITY_REALTIME = 3,
+};
+
+/**
+ * struct drm_asahi_queue_create - Arguments passed to
+ * DRM_IOCTL_ASAHI_QUEUE_CREATE
+ */
+struct drm_asahi_queue_create {
+	/** @flags: MBZ */
+	__u32 flags;
+
+	/** @vm_id: The ID of the VM this queue is bound to */
+	__u32 vm_id;
+
+	/** @priority: One of drm_asahi_priority */
+	__u32 priority;
+
+	/** @queue_id: The returned queue ID */
+	__u32 queue_id;
+
+	/**
+	 * @usc_exec_base: GPU base address for all USC binaries (shaders) on
+	 * this queue. USC addresses are 32-bit relative to this 64-bit base.
+	 *
+	 * This sets the following registers on all queue commands:
+	 *
+	 *	USC_EXEC_BASE_TA  (vertex)
+	 *	USC_EXEC_BASE_ISP (fragment)
+	 *	USC_EXEC_BASE_CP  (compute)
+	 *
+	 * While the hardware lets us configure these independently per command,
+	 * we do not have a use case for this. Instead, we expect userspace to
+	 * fix a 4GiB VA carveout for USC memory and pass its base address here.
+	 */
+	__u64 usc_exec_base;
+};
+
+/**
+ * struct drm_asahi_queue_destroy - Arguments passed to
+ * DRM_IOCTL_ASAHI_QUEUE_DESTROY
+ */
+struct drm_asahi_queue_destroy {
+	/** @queue_id: The queue ID to be destroyed */
+	__u32 queue_id;
+
+	/** @pad: MBZ */
+	__u32 pad;
+};
+
+/**
+ * enum drm_asahi_sync_type - Sync item type
+ */
+enum drm_asahi_sync_type {
+	/** @DRM_ASAHI_SYNC_SYNCOBJ: Binary sync object */
+	DRM_ASAHI_SYNC_SYNCOBJ = 0,
+
+	/** @DRM_ASAHI_SYNC_TIMELINE_SYNCOBJ: Timeline sync object */
+	DRM_ASAHI_SYNC_TIMELINE_SYNCOBJ = 1,
+};
+
+/**
+ * struct drm_asahi_sync - Sync item
+ */
+struct drm_asahi_sync {
+	/** @sync_type: One of drm_asahi_sync_type */
+	__u32 sync_type;
+
+	/** @handle: The sync object handle */
+	__u32 handle;
+
+	/** @timeline_value: Timeline value for timeline sync objects */
+	__u64 timeline_value;
+};
+
+/**
+ * define DRM_ASAHI_BARRIER_NONE - Command index for no barrier
+ *
+ * This special value may be passed in to drm_asahi_command::vdm_barrier or
+ * drm_asahi_command::cdm_barrier to indicate that the respective subqueue
+ * should not wait on any previous work.
+ */
+#define DRM_ASAHI_BARRIER_NONE (0xFFFFu)
+
+/**
+ * struct drm_asahi_cmd_header - Top level command structure
+ *
+ * This struct is core to the command buffer definition and therefore is not
+ * extensible.
+ */
+struct drm_asahi_cmd_header {
+	/** @cmd_type: One of drm_asahi_cmd_type */
+	__u16 cmd_type;
+
+	/**
+	 * @size: Size of this command, not including this header.
+	 *
+	 * For hardware commands, this enables extensibility of commands without
+	 * requiring extra command types. Passing a command that is shorter
+	 * than expected is explicitly allowed for backwards-compatibility.
+	 * Truncated fields will be zeroed.
+	 *
+	 * For the synthetic attachment setting commands, this implicitly
+	 * encodes the number of attachments. These commands take multiple
+	 * fixed-size @drm_asahi_attachment structures as their payload, so size
+	 * equals number of attachments * sizeof(struct drm_asahi_attachment).
+	 */
+	__u16 size;
+
+	/**
+	 * @vdm_barrier: VDM (render) command index to wait on.
+	 *
+	 * Barriers are indices relative to the beginning of a given submit. A
+	 * barrier of 0 waits on commands submitted to the respective subqueue
+	 * in previous submit ioctls. A barrier of N waits on N previous
+	 * commands on the subqueue within the current submit ioctl. As a
+	 * special case, passing @DRM_ASAHI_BARRIER_NONE avoids waiting on any
+	 * commands in the subqueue.
+	 *
+	 * Examples:
+	 *
+	 *   0: This waits on all previous work.
+	 *
+	 *   NONE: This does not wait for anything on this subqueue.
+	 *
+	 *   1: This waits on the first render command in the submit.
+	 *   This is valid only if there are multiple render commands in the
+	 *   same submit.
+	 *
+	 * Barriers are valid only for hardware commands. Synthetic software
+	 * commands to set attachments must pass NONE here.
+	 */
+	__u16 vdm_barrier;
+
+	/**
+	 * @cdm_barrier: CDM (compute) command index to wait on.
+	 *
+	 * See @vdm_barrier, and replace VDM/render with CDM/compute.
+	 */
+	__u16 cdm_barrier;
+};
+
+/**
+ * struct drm_asahi_submit - Arguments passed to DRM_IOCTL_ASAHI_SUBMIT
+ */
+struct drm_asahi_submit {
+	/**
+	 * @syncs: An optional pointer to an array of drm_asahi_sync. The first
+	 * @in_sync_count elements are in-syncs, then the remaining
+	 * @out_sync_count elements are out-syncs. Using a single array with
+	 * explicit partitioning simplifies handling.
+	 */
+	__u64 syncs;
+
+	/**
+	 * @cmdbuf: Pointer to the command buffer to submit.
+	 *
+	 * This is a flat command buffer. By design, it contains no CPU
+	 * pointers, which makes it suitable for a virtgpu wire protocol without
+	 * requiring any serializing/deserializing step.
+	 *
+	 * It consists of a series of commands. Each command begins with a
+	 * fixed-size @drm_asahi_cmd_header header and is followed by a
+	 * variable-length payload according to the type and size in the header.
+	 *
+	 * The combined count of "real" hardware commands must be nonzero and at
+	 * most drm_asahi_params_global::max_commands_per_submission.
+	 */
+	__u64 cmdbuf;
+
+	/** @flags: Flags for command submission (MBZ) */
+	__u32 flags;
+
+	/** @queue_id: The queue ID to be submitted to */
+	__u32 queue_id;
+
+	/**
+	 * @in_sync_count: Number of sync objects to wait on before starting
+	 * this job.
+	 */
+	__u32 in_sync_count;
+
+	/**
+	 * @out_sync_count: Number of sync objects to signal upon completion of
+	 * this job.
+	 */
+	__u32 out_sync_count;
+
+	/** @cmdbuf_size: Command buffer size in bytes */
+	__u32 cmdbuf_size;
+
+	/** @pad: MBZ */
+	__u32 pad;
+};
+
+/**
+ * struct drm_asahi_attachment - Describe an "attachment".
+ *
+ * Attachments are any memory written by shaders, notably including render
+ * target attachments written by the end-of-tile program. This is purely a hint
+ * about the accessed memory regions. It is optional to specify, which is
+ * fortunate as it cannot be specified precisely with bindless access anyway.
+ * But where possible, it's probably a good idea for userspace to include these
+ * hints, forwarded to the firmware.
+ *
+ * This struct is implicitly sized and therefore is not extensible.
+ */
+struct drm_asahi_attachment {
+	/** @pointer: Base address of the attachment */
+	__u64 pointer;
+
+	/** @size: Size of the attachment in bytes */
+	__u64 size;
+
+	/** @pad: MBZ */
+	__u32 pad;
+
+	/** @flags: MBZ */
+	__u32 flags;
+};
+
+enum drm_asahi_render_flags {
+	/**
+	 * @DRM_ASAHI_RENDER_VERTEX_SCRATCH: A vertex stage shader uses scratch
+	 * memory.
+	 */
+	DRM_ASAHI_RENDER_VERTEX_SCRATCH = (1U << 0),
+
+	/**
+	 * @DRM_ASAHI_RENDER_PROCESS_EMPTY_TILES: Process even empty tiles.
+	 * This must be set when clearing render targets.
+	 */
+	DRM_ASAHI_RENDER_PROCESS_EMPTY_TILES = (1U << 1),
+
+	/**
+	 * @DRM_ASAHI_RENDER_NO_VERTEX_CLUSTERING: Run vertex stage on a single
+	 * cluster (on multi-cluster GPUs)
+	 *
+	 * This harms performance but can workaround certain sync/coherency
+	 * bugs, and therefore is useful for debugging.
+	 */
+	DRM_ASAHI_RENDER_NO_VERTEX_CLUSTERING = (1U << 2),
+
+	/**
+	 * @DRM_ASAHI_RENDER_DBIAS_IS_INT: Use integer depth bias formula.
+	 *
+	 * Graphics specifications contain two alternate formulas for depth
+	 * bias, a float formula used with floating-point depth buffers and an
+	 * integer formula using with unorm depth buffers. This flag specifies
+	 * that the integer formula should be used. If omitted, the float
+	 * formula is used instead.
+	 *
+	 * This corresponds to bit 18 of the relevant hardware control register,
+	 * so we match that here for efficiency.
+	 */
+	DRM_ASAHI_RENDER_DBIAS_IS_INT = (1U << 18),
+};
+
+/**
+ * struct drm_asahi_zls_buffer - Describe a depth or stencil buffer.
+ *
+ * These fields correspond to hardware registers in the ZLS (Z Load/Store) unit.
+ * There are three hardware registers for each field respectively for loads,
+ * stores, and partial renders. In practice, it makes sense to set all to the
+ * same values, except in exceptional cases not yet implemented in userspace, so
+ * we do not duplicate here for simplicity/efficiency.
+ *
+ * This struct is embedded in other structs and therefore is not extensible.
+ */
+struct drm_asahi_zls_buffer {
+	/** @base: Base address of the buffer */
+	__u64 base;
+
+	/**
+	 * @comp_base: If the load buffer is compressed, address of the
+	 * compression metadata section.
+	 */
+	__u64 comp_base;
+
+	/**
+	 * @stride: If layered rendering is enabled, the number of bytes
+	 * between each layer of the buffer.
+	 */
+	__u32 stride;
+
+	/**
+	 * @comp_stride: If layered rendering is enabled, the number of bytes
+	 * between each layer of the compression metadata.
+	 */
+	__u32 comp_stride;
+};
+
+/**
+ * struct drm_asahi_timestamp - Describe a timestamp write.
+ *
+ * The firmware can optionally write the GPU timestamp at render pass
+ * granularities, but it needs to be mapped specially via
+ * DRM_IOCTL_ASAHI_GEM_BIND_OBJECT. This structure therefore describes where to
+ * write as a handle-offset pair, rather than a GPU address like normal.
+ *
+ * This struct is embedded in other structs and therefore is not extensible.
+ */
+struct drm_asahi_timestamp {
+	/**
+	 * @handle: Handle of the timestamp buffer, or 0 to skip this
+	 * timestamp. If nonzero, this must equal the value returned in
+	 * drm_asahi_gem_bind_object::object_handle.
+	 */
+	__u32 handle;
+
+	/** @offset: Offset to write into the timestamp buffer */
+	__u32 offset;
+};
+
+/**
+ * struct drm_asahi_timestamps - Describe timestamp writes.
+ *
+ * Each operation that can be timestamped, can be timestamped at the start and
+ * end. Therefore, drm_asahi_timestamp structs always come in pairs, bundled
+ * together into drm_asahi_timestamps.
+ *
+ * This struct is embedded in other structs and therefore is not extensible.
+ */
+struct drm_asahi_timestamps {
+	/** @start: Timestamp recorded at the start of the operation */
+	struct drm_asahi_timestamp start;
+
+	/** @end: Timestamp recorded at the end of the operation */
+	struct drm_asahi_timestamp end;
+};
+
+/**
+ * struct drm_asahi_helper_program - Describe helper program configuration.
+ *
+ * The helper program is a compute-like kernel required for various hardware
+ * functionality. Its most important role is dynamically allocating
+ * scratch/stack memory for individual subgroups, by partitioning a static
+ * allocation shared for the whole device. It is supplied by userspace via
+ * drm_asahi_helper_program and internally dispatched by the hardware as needed.
+ *
+ * This struct is embedded in other structs and therefore is not extensible.
+ */
+struct drm_asahi_helper_program {
+	/**
+	 * @binary: USC address to the helper program binary. This is a tagged
+	 * pointer with configuration in the bottom bits.
+	 */
+	__u32 binary;
+
+	/** @cfg: Additional configuration bits for the helper program. */
+	__u32 cfg;
+
+	/**
+	 * @data: Data passed to the helper program. This value is not
+	 * interpreted by the kernel, firmware, or hardware in any way. It is
+	 * simply a sideband for userspace, set with the submit ioctl and read
+	 * via special registers inside the helper program.
+	 *
+	 * In practice, userspace will pass a 64-bit GPU VA here pointing to the
+	 * actual arguments, which presumably don't fit in 64-bits.
+	 */
+	__u64 data;
+};
+
+/**
+ * struct drm_asahi_bg_eot - Describe a background or end-of-tile program.
+ *
+ * The background and end-of-tile programs are dispatched by the hardware at the
+ * beginning and end of rendering. As the hardware "tilebuffer" is simply local
+ * memory, these programs are necessary to implement API-level render targets.
+ * The fragment-like background program is responsible for loading either the
+ * clear colour or the existing render target contents, while the compute-like
+ * end-of-tile program stores the tilebuffer contents to memory.
+ *
+ * This struct is embedded in other structs and therefore is not extensible.
+ */
+struct drm_asahi_bg_eot {
+	/**
+	 * @usc: USC address of the hardware USC words binding resources
+	 * (including images and uniforms) and the program itself. Note this is
+	 * an additional layer of indirection compared to the helper program,
+	 * avoiding the need for a sideband for data. This is a tagged pointer
+	 * with additional configuration in the bottom bits.
+	 */
+	__u32 usc;
+
+	/**
+	 * @rsrc_spec: Resource specifier for the program. This is a packed
+	 * hardware data structure describing the required number of registers,
+	 * uniforms, bound textures, and bound samplers.
+	 */
+	__u32 rsrc_spec;
+};
+
+/**
+ * struct drm_asahi_cmd_render - Command to submit 3D
+ *
+ * This command submits a single render pass. The hardware control stream may
+ * include many draws and subpasses, but within the command, the framebuffer
+ * dimensions and attachments are fixed.
+ *
+ * The hardware requires the firmware to set a large number of Control Registers
+ * setting up state at render pass granularity before each command rendering 3D.
+ * The firmware bundles this state into data structures. Unfortunately, we
+ * cannot expose either any of that directly to userspace, because the
+ * kernel-firmware ABI is not stable. Although we can guarantee the firmware
+ * updates in tandem with the kernel, we cannot break old userspace when
+ * upgrading the firmware and kernel. Therefore, we need to abstract well the
+ * data structures to avoid tying our hands with future firmwares.
+ *
+ * The bulk of drm_asahi_cmd_render therefore consists of values of hardware
+ * control registers, marshalled via the firmware interface.
+ *
+ * The framebuffer/tilebuffer dimensions are also specified here. In addition to
+ * being passed to the firmware/hardware, the kernel requires these dimensions
+ * to calculate various essential tiling-related data structures. It is
+ * unfortunate that our submits are heavier than on vendors with saner
+ * hardware-software interfaces. The upshot is all of this information is
+ * readily available to userspace with all current APIs.
+ *
+ * It looks odd - but it's not overly burdensome and it ensures we can remain
+ * compatible with old userspace.
+ */
+struct drm_asahi_cmd_render {
+	/** @flags: Combination of drm_asahi_render_flags flags. */
+	__u32 flags;
+
+	/**
+	 * @isp_zls_pixels: ISP_ZLS_PIXELS register value. This contains the
+	 * depth/stencil width/height, which may differ from the framebuffer
+	 * width/height.
+	 */
+	__u32 isp_zls_pixels;
+
+	/**
+	 * @vdm_ctrl_stream_base: VDM_CTRL_STREAM_BASE register value. GPU
+	 * address to the beginning of the VDM control stream.
+	 */
+	__u64 vdm_ctrl_stream_base;
+
+	/** @vertex_helper: Helper program used for the vertex shader */
+	struct drm_asahi_helper_program vertex_helper;
+
+	/** @fragment_helper: Helper program used for the fragment shader */
+	struct drm_asahi_helper_program fragment_helper;
+
+	/**
+	 * @isp_scissor_base: ISP_SCISSOR_BASE register value. GPU address of an
+	 * array of scissor descriptors indexed in the render pass.
+	 */
+	__u64 isp_scissor_base;
+
+	/**
+	 * @isp_dbias_base: ISP_DBIAS_BASE register value. GPU address of an
+	 * array of depth bias values indexed in the render pass.
+	 */
+	__u64 isp_dbias_base;
+
+	/**
+	 * @isp_oclqry_base: ISP_OCLQRY_BASE register value. GPU address of an
+	 * array of occlusion query results written by the render pass.
+	 */
+	__u64 isp_oclqry_base;
+
+	/** @depth: Depth buffer */
+	struct drm_asahi_zls_buffer depth;
+
+	/** @stencil: Stencil buffer */
+	struct drm_asahi_zls_buffer stencil;
+
+	/** @zls_ctrl: ZLS_CTRL register value */
+	__u64 zls_ctrl;
+
+	/** @ppp_multisamplectl: PPP_MULTISAMPLECTL register value */
+	__u64 ppp_multisamplectl;
+
+	/**
+	 * @sampler_heap: Base address of the sampler heap. This heap is used
+	 * for both vertex shaders and fragment shaders. The registers are
+	 * per-stage, but there is no known use case for separate heaps.
+	 */
+	__u64 sampler_heap;
+
+	/** @ppp_ctrl: PPP_CTRL register value */
+	__u32 ppp_ctrl;
+
+	/** @width_px: Framebuffer width in pixels */
+	__u16 width_px;
+
+	/** @height_px: Framebuffer height in pixels */
+	__u16 height_px;
+
+	/** @layers: Number of layers in the framebuffer */
+	__u16 layers;
+
+	/** @sampler_count: Number of samplers in the sampler heap. */
+	__u16 sampler_count;
+
+	/** @utile_width_px: Width of a logical tilebuffer tile in pixels */
+	__u8 utile_width_px;
+
+	/** @utile_height_px: Height of a logical tilebuffer tile in pixels */
+	__u8 utile_height_px;
+
+	/** @samples: # of samples in the framebuffer. Must be 1, 2, or 4. */
+	__u8 samples;
+
+	/** @sample_size_B: # of bytes in the tilebuffer required per sample. */
+	__u8 sample_size_B;
+
+	/**
+	 * @isp_merge_upper_x: 32-bit float used in the hardware triangle
+	 * merging. Calculate as: tan(60 deg) * width.
+	 *
+	 * Making these values UAPI avoids requiring floating-point calculations
+	 * in the kernel in the hot path.
+	 */
+	__u32 isp_merge_upper_x;
+
+	/**
+	 * @isp_merge_upper_y: 32-bit float. Calculate as: tan(60 deg) * height.
+	 * See @isp_merge_upper_x.
+	 */
+	__u32 isp_merge_upper_y;
+
+	/** @bg: Background program run for each tile at the start */
+	struct drm_asahi_bg_eot bg;
+
+	/** @eot: End-of-tile program ran for each tile at the end */
+	struct drm_asahi_bg_eot eot;
+
+	/**
+	 * @partial_bg: Background program ran at the start of each tile when
+	 * resuming the render pass during a partial render.
+	 */
+	struct drm_asahi_bg_eot partial_bg;
+
+	/**
+	 * @partial_eot: End-of-tile program ran at the end of each tile when
+	 * pausing the render pass during a partial render.
+	 */
+	struct drm_asahi_bg_eot partial_eot;
+
+	/**
+	 * @isp_bgobjdepth: ISP_BGOBJDEPTH register value. This is the depth
+	 * buffer clear value, encoded in the depth buffer's format: either a
+	 * 32-bit float or a 16-bit unorm (with upper bits zeroed).
+	 */
+	__u32 isp_bgobjdepth;
+
+	/**
+	 * @isp_bgobjvals: ISP_BGOBJVALS register value. The bottom 8-bits
+	 * contain the stencil buffer clear value.
+	 */
+	__u32 isp_bgobjvals;
+
+	/** @ts_vtx: Timestamps for the vertex portion of the render */
+	struct drm_asahi_timestamps ts_vtx;
+
+	/** @ts_frag: Timestamps for the fragment portion of the render */
+	struct drm_asahi_timestamps ts_frag;
+};
+
+/**
+ * struct drm_asahi_cmd_compute - Command to submit compute
+ *
+ * This command submits a control stream consisting of compute dispatches. There
+ * is essentially no limit on how many compute dispatches may be included in a
+ * single compute command, although timestamps are at command granularity.
+ */
+struct drm_asahi_cmd_compute {
+	/** @flags: MBZ */
+	__u32 flags;
+
+	/** @sampler_count: Number of samplers in the sampler heap. */
+	__u32 sampler_count;
+
+	/**
+	 * @cdm_ctrl_stream_base: CDM_CTRL_STREAM_BASE register value. GPU
+	 * address to the beginning of the CDM control stream.
+	 */
+	__u64 cdm_ctrl_stream_base;
+
+	/**
+	 * @cdm_ctrl_stream_end: GPU base address to the end of the hardware
+	 * control stream. Note this only considers the first contiguous segment
+	 * of the control stream, as the stream might jump elsewhere.
+	 */
+	__u64 cdm_ctrl_stream_end;
+
+	/** @sampler_heap: Base address of the sampler heap. */
+	__u64 sampler_heap;
+
+	/** @helper: Helper program used for this compute command */
+	struct drm_asahi_helper_program helper;
+
+	/** @ts: Timestamps for the compute command */
+	struct drm_asahi_timestamps ts;
+};
+
+/**
+ * struct drm_asahi_get_time - Arguments passed to DRM_IOCTL_ASAHI_GET_TIME
+ */
+struct drm_asahi_get_time {
+	/** @flags: MBZ. */
+	__u64 flags;
+
+	/** @gpu_timestamp: On return, the GPU timestamp in nanoseconds. */
+	__u64 gpu_timestamp;
+};
+
+/**
+ * DRM_IOCTL_ASAHI() - Build an Asahi IOCTL number
+ * @__access: Access type. Must be R, W or RW.
+ * @__id: One of the DRM_ASAHI_xxx id.
+ * @__type: Suffix of the type being passed to the IOCTL.
+ *
+ * Don't use this macro directly, use the DRM_IOCTL_ASAHI_xxx
+ * values instead.
+ *
+ * Return: An IOCTL number to be passed to ioctl() from userspace.
+ */
+#define DRM_IOCTL_ASAHI(__access, __id, __type) \
+	DRM_IO ## __access(DRM_COMMAND_BASE + DRM_ASAHI_ ## __id, \
+			   struct drm_asahi_ ## __type)
+
+/* Note: this is an enum so that it can be resolved by Rust bindgen. */
+enum {
+	DRM_IOCTL_ASAHI_GET_PARAMS       = DRM_IOCTL_ASAHI(W, GET_PARAMS, get_params),
+	DRM_IOCTL_ASAHI_GET_TIME         = DRM_IOCTL_ASAHI(WR, GET_TIME, get_time),
+	DRM_IOCTL_ASAHI_VM_CREATE        = DRM_IOCTL_ASAHI(WR, VM_CREATE, vm_create),
+	DRM_IOCTL_ASAHI_VM_DESTROY       = DRM_IOCTL_ASAHI(W, VM_DESTROY, vm_destroy),
+	DRM_IOCTL_ASAHI_VM_BIND          = DRM_IOCTL_ASAHI(W, VM_BIND, vm_bind),
+	DRM_IOCTL_ASAHI_GEM_CREATE       = DRM_IOCTL_ASAHI(WR, GEM_CREATE, gem_create),
+	DRM_IOCTL_ASAHI_GEM_MMAP_OFFSET  = DRM_IOCTL_ASAHI(WR, GEM_MMAP_OFFSET, gem_mmap_offset),
+	DRM_IOCTL_ASAHI_GEM_BIND_OBJECT  = DRM_IOCTL_ASAHI(WR, GEM_BIND_OBJECT, gem_bind_object),
+	DRM_IOCTL_ASAHI_QUEUE_CREATE     = DRM_IOCTL_ASAHI(WR, QUEUE_CREATE, queue_create),
+	DRM_IOCTL_ASAHI_QUEUE_DESTROY    = DRM_IOCTL_ASAHI(W, QUEUE_DESTROY, queue_destroy),
+	DRM_IOCTL_ASAHI_SUBMIT           = DRM_IOCTL_ASAHI(W, SUBMIT, submit),
+};
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* _ASAHI_DRM_H_ */
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index 5c6080680cb27b..ce97c28d347d5e 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -353,4 +353,9 @@ struct prctl_mm_map {
  */
 #define PR_LOCK_SHADOW_STACK_STATUS      76
 
+#define PR_GET_MEM_MODEL	0x6d4d444c
+#define PR_SET_MEM_MODEL	0x4d4d444c
+# define PR_SET_MEM_MODEL_DEFAULT	0
+# define PR_SET_MEM_MODEL_TSO		1
+
 #endif /* _LINUX_PRCTL_H */
diff --git a/kernel/locking/lockdep_internals.h b/kernel/locking/lockdep_internals.h
index 20f9ef58d3d069..a60e2b9bd2dece 100644
--- a/kernel/locking/lockdep_internals.h
+++ b/kernel/locking/lockdep_internals.h
@@ -119,7 +119,7 @@ static const unsigned long LOCKF_USED_IN_IRQ_READ =
 
 #define MAX_LOCKDEP_CHAINS	(1UL << MAX_LOCKDEP_CHAINS_BITS)
 
-#define AVG_LOCKDEP_CHAIN_DEPTH		5
+#define AVG_LOCKDEP_CHAIN_DEPTH		10
 #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS * AVG_LOCKDEP_CHAIN_DEPTH)
 
 extern struct lock_chain lock_chains[];
diff --git a/kernel/sys.c b/kernel/sys.c
index cb366ff8703afd..23aba62674cd50 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -45,6 +45,7 @@
 #include <linux/version.h>
 #include <linux/ctype.h>
 #include <linux/syscall_user_dispatch.h>
+#include <linux/memory_ordering_model.h>
 
 #include <linux/compat.h>
 #include <linux/syscalls.h>
@@ -2466,6 +2467,16 @@ static int prctl_get_auxv(void __user *addr, unsigned long len)
 	return sizeof(mm->saved_auxv);
 }
 
+int __weak arch_prctl_mem_model_get(struct task_struct *t)
+{
+	return -EINVAL;
+}
+
+int __weak arch_prctl_mem_model_set(struct task_struct *t, unsigned long val)
+{
+	return -EINVAL;
+}
+
 SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
 		unsigned long, arg4, unsigned long, arg5)
 {
@@ -2811,6 +2822,16 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
 			return -EINVAL;
 		error = arch_lock_shadow_stack_status(me, arg2);
 		break;
+	case PR_GET_MEM_MODEL:
+		if (arg2 || arg3 || arg4 || arg5)
+			return -EINVAL;
+		error = arch_prctl_mem_model_get(me);
+		break;
+	case PR_SET_MEM_MODEL:
+		if (arg3 || arg4 || arg5)
+			return -EINVAL;
+		error = arch_prctl_mem_model_set(me, arg2);
+		break;
 	default:
 		trace_task_prctl_unknown(option, arg2, arg3, arg4, arg5);
 		error = -EINVAL;
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 35796c290ca351..14704a40e64820 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -3289,6 +3289,14 @@ config RUST_KERNEL_DOCTESTS
 
 	  If unsure, say N.
 
+config RUST_EXTRA_LOCKDEP
+	bool "Extra lockdep checking"
+	depends on RUST && PROVE_LOCKING
+	help
+	  Enabled additional lockdep integration with certain Rust types.
+
+	  If unsure, say N.
+
 endmenu # "Rust"
 
 endmenu # Kernel hacking
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index a8ac4c4fffcf27..ed33bbaccda668 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -1781,27 +1781,50 @@ char *fourcc_string(char *buf, char *end, const u32 *fourcc,
 	char output[sizeof("0123 little-endian (0x01234567)")];
 	char *p = output;
 	unsigned int i;
+	bool pix_fmt = false;
 	u32 orig, val;
 
-	if (fmt[1] != 'c' || fmt[2] != 'c')
+	if (fmt[1] != 'c')
 		return error_string(buf, end, "(%p4?)", spec);
 
 	if (check_pointer(&buf, end, fourcc, spec))
 		return buf;
 
 	orig = get_unaligned(fourcc);
-	val = orig & ~BIT(31);
+	switch (fmt[2]) {
+	case 'h':
+		val = orig;
+		break;
+	case 'r':
+		val = orig = swab32(orig);
+		break;
+	case 'l':
+		val = orig = le32_to_cpu(orig);
+		break;
+	case 'b':
+		val = orig = be32_to_cpu(orig);
+		break;
+	case 'c':
+		/* Pixel formats are printed LSB-first */
+		val = swab32(orig & ~BIT(31));
+		pix_fmt = true;
+		break;
+	default:
+		return error_string(buf, end, "(%p4?)", spec);
+	}
 
 	for (i = 0; i < sizeof(u32); i++) {
-		unsigned char c = val >> (i * 8);
+		unsigned char c = val >> ((3 - i) * 8);
 
 		/* Print non-control ASCII characters as-is, dot otherwise */
 		*p++ = isascii(c) && isprint(c) ? c : '.';
 	}
 
-	*p++ = ' ';
-	strcpy(p, orig & BIT(31) ? "big-endian" : "little-endian");
-	p += strlen(p);
+	if (pix_fmt) {
+		*p++ = ' ';
+		strcpy(p, orig & BIT(31) ? "big-endian" : "little-endian");
+		p += strlen(p);
+	}
 
 	*p++ = ' ';
 	*p++ = '(';
@@ -2284,9 +2307,6 @@ int __init no_hash_pointers_enable(char *str)
 }
 early_param("no_hash_pointers", no_hash_pointers_enable);
 
-/* Used for Rust formatting ('%pA'). */
-char *rust_fmt_argument(char *buf, char *end, const void *ptr);
-
 /*
  * Show a '%p' thing.  A kernel extension is that the '%p' is followed
  * by an extra set of alphanumeric characters that are extended format
diff --git a/localversion.05-asahi b/localversion.05-asahi
new file mode 100644
index 00000000000000..6742ba757f12ac
--- /dev/null
+++ b/localversion.05-asahi
@@ -0,0 +1 @@
+-asahi
diff --git a/rust/Makefile b/rust/Makefile
index a84c6d4b6ca21d..18b4fc821d661c 100644
--- a/rust/Makefile
+++ b/rust/Makefile
@@ -386,7 +386,7 @@ quiet_cmd_rustc_library = $(if $(skip_clippy),RUSTC,$(RUSTC_OR_CLIPPY_QUIET)) L
 		--emit=dep-info=$(depfile) --emit=obj=$@ \
 		--emit=metadata=$(dir $@)$(patsubst %.o,lib%.rmeta,$(notdir $@)) \
 		--crate-type rlib -L$(objtree)/$(obj) \
-		--crate-name $(patsubst %.o,%,$(notdir $@)) $< \
+		--crate-name $(patsubst %.o,%,$(notdir $@)) $(abspath $<) \
 		--sysroot=/dev/null \
 	$(if $(rustc_objcopy),;$(OBJCOPY) $(rustc_objcopy) $@) \
 	$(cmd_objtool)
diff --git a/rust/bindgen_parameters b/rust/bindgen_parameters
index 0f96af8b9a7fee..3f429b8750ff00 100644
--- a/rust/bindgen_parameters
+++ b/rust/bindgen_parameters
@@ -12,9 +12,15 @@
 
 # Packed type cannot transitively contain a `#[repr(align)]` type.
 --opaque-type alt_instr
+--opaque-type snd_codec_options
+--opaque-type snd_codec
+--opaque-type snd_compr_params
 --opaque-type x86_msi_data
 --opaque-type x86_msi_addr_lo
 
+# Packed types cannot have larger alignment than the maximal natural aligment of menbers
+--opaque-type snd_dec_flac
+
 # `try` is a reserved keyword since Rust 2018; solved in `bindgen` v0.59.2,
 # commit 2aed6b021680 ("context: Escape the try keyword properly").
 --opaque-type kunit_try_catch
@@ -34,3 +40,7 @@
 # We use const helpers to aid bindgen, to avoid conflicts when constants are
 # recognized, block generation of the non-helper constants.
 --blocklist-item ARCH_SLAB_MINALIGN
+# CONFIG_LIST_HARDENED triggers "Invalid or unknown abi 14" for these
+--blocklist-function __list_valid_slowpath
+--blocklist-function __list_add_valid_or_report
+--blocklist-function __list_del_entry_valid_or_report
diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helper.h
index f46cf3bb70695b..1eaec4ea66fbf1 100644
--- a/rust/bindings/bindings_helper.h
+++ b/rust/bindings/bindings_helper.h
@@ -6,22 +6,47 @@
  * Sorted alphabetically.
  */
 
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_exec.h>
+#include <drm/drm_file.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_gpuvm.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_syncobj.h>
+#include <drm/gpu_scheduler.h>
 #include <kunit/test.h>
 #include <linux/blk-mq.h>
 #include <linux/blk_types.h>
 #include <linux/blkdev.h>
 #include <linux/cred.h>
 #include <linux/device/faux.h>
+#include <linux/delay.h>
+#include <linux/devcoredump.h>
+#include <linux/dma-fence.h>
+#include <linux/dma-fence-chain.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-resv.h>
 #include <linux/errname.h>
 #include <linux/ethtool.h>
 #include <linux/file.h>
 #include <linux/firmware.h>
 #include <linux/fs.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/types.h>
+#include <linux/ioport.h>
+#include <linux/iosys-map.h>
 #include <linux/jiffies.h>
 #include <linux/jump_label.h>
+#include <linux/ktime.h>
+#include <linux/lockdep.h>
 #include <linux/mdio.h>
 #include <linux/miscdevice.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
 #include <linux/of_device.h>
+#include <linux/of_dma.h>
 #include <linux/pci.h>
 #include <linux/phy.h>
 #include <linux/pid_namespace.h>
@@ -29,14 +54,27 @@
 #include <linux/poll.h>
 #include <linux/property.h>
 #include <linux/refcount.h>
+#include <linux/siphash.h>
 #include <linux/sched.h>
 #include <linux/security.h>
 #include <linux/slab.h>
+#include <linux/soc/apple/mailbox.h>
+#include <linux/soc/apple/rtkit.h>
+#include <linux/timekeeping.h>
 #include <linux/tracepoint.h>
 #include <linux/wait.h>
 #include <linux/workqueue.h>
+#include <linux/xarray.h>
+#include <sound/core.h>
+#include <sound/dmaengine_pcm.h>
+#include <sound/pcm.h>
 #include <trace/events/rust_sample.h>
 
+#if defined(CONFIG_DRM_PANIC_SCREEN_QR_CODE)
+// Used by `#[export]` in `drivers/gpu/drm/drm_panic_qr.rs`.
+#include <drm/drm_panic.h>
+#endif
+
 /* `bindgen` gets confused at certain things. */
 const size_t RUST_CONST_HELPER_ARCH_SLAB_MINALIGN = ARCH_SLAB_MINALIGN;
 const size_t RUST_CONST_HELPER_PAGE_SIZE = PAGE_SIZE;
@@ -48,3 +86,29 @@ const gfp_t RUST_CONST_HELPER___GFP_ZERO = __GFP_ZERO;
 const gfp_t RUST_CONST_HELPER___GFP_HIGHMEM = ___GFP_HIGHMEM;
 const gfp_t RUST_CONST_HELPER___GFP_NOWARN = ___GFP_NOWARN;
 const blk_features_t RUST_CONST_HELPER_BLK_FEAT_ROTATIONAL = BLK_FEAT_ROTATIONAL;
+const fop_flags_t RUST_CONST_HELPER_FOP_UNSIGNED_OFFSET = FOP_UNSIGNED_OFFSET;
+
+const uint32_t BINDINGS_DRM_EXEC_INTERRUPTIBLE_WAIT = DRM_EXEC_INTERRUPTIBLE_WAIT;
+
+const gfp_t BINDINGS_XA_FLAGS_LOCK_IRQ = XA_FLAGS_LOCK_IRQ;
+const gfp_t BINDINGS_XA_FLAGS_LOCK_BH = XA_FLAGS_LOCK_BH;
+const gfp_t BINDINGS_XA_FLAGS_TRACK_FREE = XA_FLAGS_TRACK_FREE;
+const gfp_t BINDINGS_XA_FLAGS_ZERO_BUSY = XA_FLAGS_ZERO_BUSY;
+const gfp_t BINDINGS_XA_FLAGS_ALLOC_WRAPPED = XA_FLAGS_ALLOC_WRAPPED;
+const gfp_t BINDINGS_XA_FLAGS_ACCOUNT = XA_FLAGS_ACCOUNT;
+const gfp_t BINDINGS_XA_FLAGS_ALLOC = XA_FLAGS_ALLOC;
+const gfp_t BINDINGS_XA_FLAGS_ALLOC1 = XA_FLAGS_ALLOC1;
+
+const xa_mark_t BINDINGS_XA_MARK_0 = XA_MARK_0;
+const xa_mark_t BINDINGS_XA_MARK_1 = XA_MARK_1;
+const xa_mark_t BINDINGS_XA_MARK_2 = XA_MARK_2;
+const xa_mark_t BINDINGS_XA_PRESENT = XA_PRESENT;
+const xa_mark_t BINDINGS_XA_MARK_MAX = XA_MARK_MAX;
+const xa_mark_t BINDINGS_XA_FREE_MARK = XA_FREE_MARK;
+
+const u64 BINDINGS_SNDRV_PCM_FMTBIT_FLOAT_LE = SNDRV_PCM_FMTBIT_FLOAT_LE;
+
+const u32 BINDINGS_IIO_CHAN_INFO_RAW = IIO_CHAN_INFO_RAW;
+const u32 BINDINGS_IIO_CHAN_INFO_PROCESSED = IIO_CHAN_INFO_PROCESSED;
+const u32 BINDINGS_IIO_ANGL = IIO_ANGL;
+const u32 BINDINGS_IIO_LIGHT = IIO_LIGHT;
diff --git a/rust/helpers/device.c b/rust/helpers/device.c
index b2135c6686b027..8d0b0fddedfd6e 100644
--- a/rust/helpers/device.c
+++ b/rust/helpers/device.c
@@ -8,3 +8,8 @@ int rust_helper_devm_add_action(struct device *dev,
 {
 	return devm_add_action(dev, action, data);
 }
+
+void *rust_helper_dev_get_drvdata(struct device *dev)
+{
+	return dev_get_drvdata(dev);
+}
diff --git a/rust/helpers/dma-fence.c b/rust/helpers/dma-fence.c
new file mode 100644
index 00000000000000..6491016262934b
--- /dev/null
+++ b/rust/helpers/dma-fence.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/dma-fence.h>
+#include <linux/dma-fence-chain.h>
+
+#ifdef CONFIG_DMA_SHARED_BUFFER
+
+void rust_helper_dma_fence_get(struct dma_fence *fence)
+{
+	dma_fence_get(fence);
+}
+
+void rust_helper_dma_fence_put(struct dma_fence *fence)
+{
+	dma_fence_put(fence);
+}
+
+struct dma_fence_chain *rust_helper_dma_fence_chain_alloc(void)
+{
+	return dma_fence_chain_alloc();
+}
+
+void rust_helper_dma_fence_chain_free(struct dma_fence_chain *chain)
+{
+	dma_fence_chain_free(chain);
+}
+
+void rust_helper_dma_fence_set_error(struct dma_fence *fence, int error)
+{
+	dma_fence_set_error(fence, error);
+}
+
+#endif
diff --git a/rust/helpers/dma-mapping.c b/rust/helpers/dma-mapping.c
new file mode 100644
index 00000000000000..0d795b1b0738dc
--- /dev/null
+++ b/rust/helpers/dma-mapping.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/dma-mapping.h>
+
+int rust_helper_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+	return dma_mapping_error(dev, dma_addr);
+}
diff --git a/rust/helpers/dma-resv.c b/rust/helpers/dma-resv.c
new file mode 100644
index 00000000000000..05501cb814513b
--- /dev/null
+++ b/rust/helpers/dma-resv.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/dma-resv.h>
+
+int rust_helper_dma_resv_lock(struct dma_resv *obj, struct ww_acquire_ctx *ctx)
+{
+	return dma_resv_lock(obj, ctx);
+}
+
+void rust_helper_dma_resv_unlock(struct dma_resv *obj)
+{
+	dma_resv_unlock(obj);
+}
diff --git a/rust/helpers/dma.c b/rust/helpers/dma.c
new file mode 100644
index 00000000000000..8eb482386f934a
--- /dev/null
+++ b/rust/helpers/dma.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/dma-mapping.h>
+
+int rust_helper_dma_set_mask_and_coherent(struct device *dev, u64 mask)
+{
+	return dma_set_mask_and_coherent(dev, mask);
+}
diff --git a/rust/helpers/drm.c b/rust/helpers/drm.c
new file mode 100644
index 00000000000000..032400032a6eb1
--- /dev/null
+++ b/rust/helpers/drm.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_vma_manager.h>
+
+void rust_helper_drm_gem_object_get(struct drm_gem_object *obj)
+{
+	drm_gem_object_get(obj);
+}
+
+void rust_helper_drm_gem_object_put(struct drm_gem_object *obj)
+{
+	drm_gem_object_put(obj);
+}
+
+__u64 rust_helper_drm_vma_node_offset_addr(struct drm_vma_offset_node *node)
+{
+	return drm_vma_node_offset_addr(node);
+}
+
+#ifdef CONFIG_DRM_GEM_SHMEM_HELPER
+void rust_helper_drm_gem_shmem_object_free(struct drm_gem_object *obj)
+{
+	return drm_gem_shmem_object_free(obj);
+}
+
+void rust_helper_drm_gem_shmem_object_print_info(struct drm_printer *p, unsigned int indent,
+                                                  const struct drm_gem_object *obj)
+{
+	drm_gem_shmem_object_print_info(p, indent, obj);
+}
+
+int rust_helper_drm_gem_shmem_object_pin(struct drm_gem_object *obj)
+{
+	return drm_gem_shmem_object_pin(obj);
+}
+
+void rust_helper_drm_gem_shmem_object_unpin(struct drm_gem_object *obj)
+{
+	drm_gem_shmem_object_unpin(obj);
+}
+
+struct sg_table *rust_helper_drm_gem_shmem_object_get_sg_table(struct drm_gem_object *obj)
+{
+	return drm_gem_shmem_object_get_sg_table(obj);
+}
+
+int rust_helper_drm_gem_shmem_object_vmap(struct drm_gem_object *obj,
+                                           struct iosys_map *map)
+{
+	return drm_gem_shmem_object_vmap(obj, map);
+}
+
+void rust_helper_drm_gem_shmem_object_vunmap(struct drm_gem_object *obj,
+                                              struct iosys_map *map)
+{
+	drm_gem_shmem_object_vunmap(obj, map);
+}
+
+int rust_helper_drm_gem_shmem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+	return drm_gem_shmem_object_mmap(obj, vma);
+}
+#endif
diff --git a/rust/helpers/drm_gpuvm.c b/rust/helpers/drm_gpuvm.c
new file mode 100644
index 00000000000000..f4f4ea2c4ec897
--- /dev/null
+++ b/rust/helpers/drm_gpuvm.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <drm/drm_gpuvm.h>
+
+#ifdef CONFIG_DRM
+#ifdef CONFIG_DRM_GPUVM
+
+struct drm_gpuvm *rust_helper_drm_gpuvm_get(struct drm_gpuvm *obj)
+{
+	return drm_gpuvm_get(obj);
+}
+
+void rust_helper_drm_gpuvm_exec_unlock(struct drm_gpuvm_exec *vm_exec)
+{
+	return drm_gpuvm_exec_unlock(vm_exec);
+}
+
+void rust_helper_drm_gpuva_init_from_op(struct drm_gpuva *va, struct drm_gpuva_op_map *op)
+{
+	drm_gpuva_init_from_op(va, op);
+}
+
+struct drm_gpuvm_bo *rust_helper_drm_gpuvm_bo_get(struct drm_gpuvm_bo *vm_bo)
+{
+	return drm_gpuvm_bo_get(vm_bo);
+}
+
+bool rust_helper_drm_gpuvm_is_extobj(struct drm_gpuvm *gpuvm, struct drm_gem_object *obj)
+{
+	return drm_gpuvm_is_extobj(gpuvm, obj);
+}
+
+#endif
+#endif
diff --git a/rust/helpers/drm_syncobj.c b/rust/helpers/drm_syncobj.c
new file mode 100644
index 00000000000000..9e14c989edfd72
--- /dev/null
+++ b/rust/helpers/drm_syncobj.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <drm/drm_syncobj.h>
+
+#ifdef CONFIG_DRM
+
+void rust_helper_drm_syncobj_get(struct drm_syncobj *obj)
+{
+	drm_syncobj_get(obj);
+}
+
+void rust_helper_drm_syncobj_put(struct drm_syncobj *obj)
+{
+	drm_syncobj_put(obj);
+}
+
+struct dma_fence *rust_helper_drm_syncobj_fence_get(struct drm_syncobj *syncobj)
+{
+	return drm_syncobj_fence_get(syncobj);
+}
+
+#endif
diff --git a/rust/helpers/helpers.c b/rust/helpers/helpers.c
index 0640b7e115be15..90ec9765fffe63 100644
--- a/rust/helpers/helpers.c
+++ b/rust/helpers/helpers.c
@@ -13,12 +13,22 @@
 #include "build_bug.c"
 #include "cred.c"
 #include "device.c"
+#include "dma.c"
+#include "dma-fence.c"
+#include "dma-mapping.c"
+#include "dma-resv.c"
+#include "drm.c"
+#include "drm_gpuvm.c"
+#include "drm_syncobj.c"
 #include "err.c"
 #include "fs.c"
 #include "io.c"
+#include "jiffies.c"
 #include "jump_label.c"
 #include "kunit.c"
+#include "lockdep.c"
 #include "mutex.c"
+#include "of.c"
 #include "page.c"
 #include "platform.c"
 #include "pci.c"
@@ -26,12 +36,17 @@
 #include "rbtree.c"
 #include "rcu.c"
 #include "refcount.c"
+#include "scatterlist.c"
 #include "security.c"
 #include "signal.c"
+#include "siphash.c"
 #include "slab.c"
 #include "spinlock.c"
 #include "task.c"
+#include "time_namespace.c"
+#include "timekeeping.c"
 #include "uaccess.c"
 #include "vmalloc.c"
 #include "wait.c"
 #include "workqueue.c"
+#include "xarray.c"
diff --git a/rust/helpers/io.c b/rust/helpers/io.c
index 15ea187c546625..6b1b05ab977b0c 100644
--- a/rust/helpers/io.c
+++ b/rust/helpers/io.c
@@ -1,17 +1,33 @@
 // SPDX-License-Identifier: GPL-2.0
 
 #include <linux/io.h>
+#include <linux/ioport.h>
 
 void __iomem *rust_helper_ioremap(phys_addr_t offset, size_t size)
 {
 	return ioremap(offset, size);
 }
 
+void __iomem *rust_helper_ioremap_np(phys_addr_t offset, size_t size)
+{
+	return ioremap_np(offset, size);
+}
+
 void rust_helper_iounmap(void __iomem *addr)
 {
 	iounmap(addr);
 }
 
+void rust_helper_memcpy_fromio(void *to, const void __iomem *from, long count)
+{
+	memcpy_fromio(to, from, count);
+}
+
+void rust_helper_memcpy_toio(void __iomem *to, const void *from, size_t count)
+{
+	memcpy_toio(to, from, count);
+}
+
 u8 rust_helper_readb(const void __iomem *addr)
 {
 	return readb(addr);
@@ -99,3 +115,38 @@ void rust_helper_writeq_relaxed(u64 value, void __iomem *addr)
 	writeq_relaxed(value, addr);
 }
 #endif
+
+resource_size_t rust_helper_resource_size(struct resource *res)
+{
+	return resource_size(res);
+}
+
+struct resource *rust_helper_request_mem_region(resource_size_t start,
+						resource_size_t n,
+						const char *name)
+{
+	return request_mem_region(start, n, name);
+}
+
+void rust_helper_release_mem_region(resource_size_t start, resource_size_t n)
+{
+	release_mem_region(start, n);
+}
+
+struct resource *rust_helper_request_region(resource_size_t start,
+					    resource_size_t n, const char *name)
+{
+	return request_region(start, n, name);
+}
+
+struct resource *rust_helper_request_muxed_region(resource_size_t start,
+						  resource_size_t n,
+						  const char *name)
+{
+	return request_muxed_region(start, n, name);
+}
+
+void rust_helper_release_region(resource_size_t start, resource_size_t n)
+{
+	release_region(start, n);
+}
diff --git a/rust/helpers/jiffies.c b/rust/helpers/jiffies.c
new file mode 100644
index 00000000000000..c046d82951d882
--- /dev/null
+++ b/rust/helpers/jiffies.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/jiffies.h>
+
+unsigned long rust_helper_msecs_to_jiffies(const unsigned int m)
+{
+	return msecs_to_jiffies(m);
+}
diff --git a/rust/helpers/lockdep.c b/rust/helpers/lockdep.c
new file mode 100644
index 00000000000000..c3178001f3a5cc
--- /dev/null
+++ b/rust/helpers/lockdep.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/instruction_pointer.h>
+#include <linux/lockdep.h>
+
+void rust_helper_lock_acquire_ret(struct lockdep_map *lock, unsigned int subclass,
+				  int trylock, int read, int check,
+				  struct lockdep_map *nest_lock)
+{
+	lock_acquire(lock, subclass, trylock, read, check, nest_lock, _RET_IP_);
+}
+
+void rust_helper_lock_release_ret(struct lockdep_map *lock)
+{
+	lock_release(lock, _RET_IP_);
+}
diff --git a/rust/helpers/of.c b/rust/helpers/of.c
new file mode 100644
index 00000000000000..986484226d9938
--- /dev/null
+++ b/rust/helpers/of.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+const struct of_device_id *rust_helper_of_match_device(
+		const struct of_device_id *matches, const struct device *dev)
+{
+			return of_match_device(matches, dev);
+}
+
+#ifdef CONFIG_OF
+bool rust_helper_of_node_is_root(const struct device_node *np)
+{
+	return of_node_is_root(np);
+}
+
+bool rust_helper_of_property_present(const struct device_node *np, const char *propname)
+{
+	return of_property_present(np, propname);
+}
+#endif
+
+struct device_node *rust_helper_of_parse_phandle(const struct device_node *np,
+               const char *phandle_name,
+               int index)
+{
+	return of_parse_phandle(np, phandle_name, index);
+}
diff --git a/rust/helpers/page.c b/rust/helpers/page.c
index b3f2b8fbf87fc9..7a2f6c581d5268 100644
--- a/rust/helpers/page.c
+++ b/rust/helpers/page.c
@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 
+#include <asm/io.h>
 #include <linux/gfp.h>
 #include <linux/highmem.h>
 
@@ -17,3 +18,28 @@ void rust_helper_kunmap_local(const void *addr)
 {
 	kunmap_local(addr);
 }
+
+struct page *rust_helper_phys_to_page(phys_addr_t phys)
+{
+	return phys_to_page(phys);
+}
+
+phys_addr_t rust_helper_page_to_phys(struct page *page)
+{
+	return page_to_phys(page);
+}
+
+unsigned long rust_helper_phys_to_pfn(phys_addr_t phys)
+{
+	return __phys_to_pfn(phys);
+}
+
+struct page *rust_helper_pfn_to_page(unsigned long pfn)
+{
+	return pfn_to_page(pfn);
+}
+
+bool rust_helper_pfn_valid(unsigned long pfn)
+{
+	return pfn_valid(pfn);
+}
diff --git a/rust/helpers/scatterlist.c b/rust/helpers/scatterlist.c
new file mode 100644
index 00000000000000..cc5553b76c25f0
--- /dev/null
+++ b/rust/helpers/scatterlist.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/scatterlist.h>
+
+dma_addr_t rust_helper_sg_dma_address(const struct scatterlist *sg)
+{
+	return sg_dma_address(sg);
+}
+
+int rust_helper_sg_dma_len(const struct scatterlist *sg)
+{
+	return sg_dma_len(sg);
+}
diff --git a/rust/helpers/siphash.c b/rust/helpers/siphash.c
new file mode 100644
index 00000000000000..1eed3989953fe8
--- /dev/null
+++ b/rust/helpers/siphash.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/siphash.h>
+
+u64 rust_helper_siphash(const void *data, size_t len,
+			const siphash_key_t *key)
+{
+	    return siphash(data, len, key);
+}
diff --git a/rust/helpers/time_namespace.c b/rust/helpers/time_namespace.c
new file mode 100644
index 00000000000000..9010e8efbcfe0d
--- /dev/null
+++ b/rust/helpers/time_namespace.c
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/time_namespace.h>
+
+void rust_helper_timens_add_monotonic(struct timespec64 *ts) {
+	timens_add_monotonic(ts);
+}
diff --git a/rust/helpers/timekeeping.c b/rust/helpers/timekeeping.c
new file mode 100644
index 00000000000000..6c130e845dcee0
--- /dev/null
+++ b/rust/helpers/timekeeping.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/timekeeping.h>
+
+ktime_t rust_helper_ktime_get_real(void) {
+	return ktime_get_real();
+}
+
+ktime_t rust_helper_ktime_get_boottime(void) {
+	return ktime_get_boottime();
+}
+
+ktime_t rust_helper_ktime_get_clocktai(void) {
+	return ktime_get_clocktai();
+}
diff --git a/rust/helpers/xarray.c b/rust/helpers/xarray.c
new file mode 100644
index 00000000000000..13d50f4bdf49db
--- /dev/null
+++ b/rust/helpers/xarray.c
@@ -0,0 +1,34 @@
+
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/xarray.h>
+
+void rust_helper_xa_init_flags(struct xarray *xa, gfp_t flags)
+{
+	xa_init_flags(xa, flags);
+}
+
+bool rust_helper_xa_empty(struct xarray *xa)
+{
+	return xa_empty(xa);
+}
+
+int rust_helper_xa_alloc(struct xarray *xa, u32 *id, void *entry, struct xa_limit limit, gfp_t gfp)
+{
+	return xa_alloc(xa, id, entry, limit, gfp);
+}
+
+void rust_helper_xa_lock(struct xarray *xa)
+{
+	xa_lock(xa);
+}
+
+void rust_helper_xa_unlock(struct xarray *xa)
+{
+	xa_unlock(xa);
+}
+
+int rust_helper_xa_err(void *entry)
+{
+	return xa_err(entry);
+}
diff --git a/rust/kernel/addr.rs b/rust/kernel/addr.rs
new file mode 100644
index 00000000000000..06aff10a033235
--- /dev/null
+++ b/rust/kernel/addr.rs
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Kernel core address types.
+
+use bindings;
+use core::ffi;
+
+/// A physical memory address (which may be wider than the CPU pointer size)
+pub type PhysicalAddr = bindings::phys_addr_t;
+/// A DMA memory address (which may be narrower than `PhysicalAddr` on some systems)
+pub type DmaAddr = bindings::dma_addr_t;
+/// A physical resource size, typically the same width as `PhysicalAddr`
+pub type ResourceSize = bindings::resource_size_t;
+/// A raw page frame number, not to be confused with the C `pfn_t` which also encodes flags.
+pub type Pfn = ffi::c_ulong;
diff --git a/rust/kernel/alloc.rs b/rust/kernel/alloc.rs
index fc9c9c41cd7926..1551d436220f6c 100644
--- a/rust/kernel/alloc.rs
+++ b/rust/kernel/alloc.rs
@@ -4,6 +4,7 @@
 
 #[cfg(not(any(test, testlib)))]
 pub mod allocator;
+pub mod drain;
 pub mod kbox;
 pub mod kvec;
 pub mod layout;
@@ -25,50 +26,20 @@ pub use self::kvec::KVec;
 pub use self::kvec::VVec;
 pub use self::kvec::Vec;
 
+use crate::types::declare_flags_type;
+
 /// Indicates an allocation error.
 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
 pub struct AllocError;
 use core::{alloc::Layout, ptr::NonNull};
 
-/// Flags to be used when allocating memory.
-///
-/// They can be combined with the operators `|`, `&`, and `!`.
-///
-/// Values can be used from the [`flags`] module.
-#[derive(Clone, Copy, PartialEq)]
-pub struct Flags(u32);
-
-impl Flags {
-    /// Get the raw representation of this flag.
-    pub(crate) fn as_raw(self) -> u32 {
-        self.0
-    }
-
-    /// Check whether `flags` is contained in `self`.
-    pub fn contains(self, flags: Flags) -> bool {
-        (self & flags) == flags
-    }
-}
-
-impl core::ops::BitOr for Flags {
-    type Output = Self;
-    fn bitor(self, rhs: Self) -> Self::Output {
-        Self(self.0 | rhs.0)
-    }
-}
-
-impl core::ops::BitAnd for Flags {
-    type Output = Self;
-    fn bitand(self, rhs: Self) -> Self::Output {
-        Self(self.0 & rhs.0)
-    }
-}
-
-impl core::ops::Not for Flags {
-    type Output = Self;
-    fn not(self) -> Self::Output {
-        Self(!self.0)
-    }
+declare_flags_type! {
+    /// Flags to be used when allocating memory.
+    ///
+    /// They can be combined with the operators `|`, `&`, and `!`.
+    ///
+    /// Values can be used from the [`flags`] module.
+    pub struct Flags(u32);
 }
 
 /// Allocation flags.
diff --git a/rust/kernel/alloc/allocator.rs b/rust/kernel/alloc/allocator.rs
index 439985e29fbc0e..0b2ca18a46ae4c 100644
--- a/rust/kernel/alloc/allocator.rs
+++ b/rust/kernel/alloc/allocator.rs
@@ -1,4 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
+// FIXME
+#![allow(clippy::undocumented_unsafe_blocks)]
 
 //! Allocator support.
 //!
diff --git a/rust/kernel/alloc/drain.rs b/rust/kernel/alloc/drain.rs
new file mode 100644
index 00000000000000..9ac91ba8b11b9d
--- /dev/null
+++ b/rust/kernel/alloc/drain.rs
@@ -0,0 +1,250 @@
+//! Rust standard library vendored code.
+//!
+//! The contents of this file come from the Rust standard library, hosted in
+//! the <https://github.com/rust-lang/rust> repository, licensed under
+//! "Apache-2.0 OR MIT" and adapted for kernel use. For copyright details,
+//! see <https://github.com/rust-lang/rust/blob/master/COPYRIGHT>.
+#![allow(clippy::undocumented_unsafe_blocks)]
+
+use core::fmt;
+use core::iter::FusedIterator;
+use core::mem::{self, ManuallyDrop, SizedTypeProperties};
+use core::ptr::{self, NonNull};
+use core::slice::{self};
+
+use super::{kvec::Vec, Allocator};
+
+/// A draining iterator for `Vec<T>`.
+///
+/// This `struct` is created by [`Vec::drain`].
+/// See its documentation for more.
+///
+/// # Example
+///
+/// ```
+/// let mut v = vec![0, 1, 2];
+/// let iter: std::vec::Drain<'_, _> = v.drain(..);
+/// ```
+// #[stable(feature = "drain", since = "1.6.0")]
+pub struct Drain<
+    'a,
+    T,
+    A: Allocator,
+    // #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + 'a = Global,
+> {
+    /// Index of tail to preserve
+    pub(super) tail_start: usize,
+    /// Length of tail
+    pub(super) tail_len: usize,
+    /// Current remaining range to remove
+    pub(super) iter: slice::Iter<'a, T>,
+    pub(super) vec: NonNull<Vec<T, A>>,
+}
+
+// #[stable(feature = "collection_debug", since = "1.17.0")]
+impl<T: fmt::Debug, A: Allocator> fmt::Debug for Drain<'_, T, A> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        f.debug_tuple("Drain").field(&self.iter.as_slice()).finish()
+    }
+}
+
+impl<'a, T, A: Allocator> Drain<'a, T, A> {
+    /// Returns the remaining items of this iterator as a slice.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// let mut vec = vec!['a', 'b', 'c'];
+    /// let mut drain = vec.drain(..);
+    /// assert_eq!(drain.as_slice(), &['a', 'b', 'c']);
+    /// let _ = drain.next().unwrap();
+    /// assert_eq!(drain.as_slice(), &['b', 'c']);
+    /// ```
+    #[must_use]
+    // #[stable(feature = "vec_drain_as_slice", since = "1.46.0")]
+    pub fn as_slice(&self) -> &[T] {
+        self.iter.as_slice()
+    }
+
+    /// Keep unyielded elements in the source `Vec`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(drain_keep_rest)]
+    ///
+    /// let mut vec = vec!['a', 'b', 'c'];
+    /// let mut drain = vec.drain(..);
+    ///
+    /// assert_eq!(drain.next().unwrap(), 'a');
+    ///
+    /// // This call keeps 'b' and 'c' in the vec.
+    /// drain.keep_rest();
+    ///
+    /// // If we wouldn't call `keep_rest()`,
+    /// // `vec` would be empty.
+    /// assert_eq!(vec, ['b', 'c']);
+    /// ```
+    // #[unstable(feature = "drain_keep_rest", issue = "101122")]
+    pub fn keep_rest(self) {
+        // At this moment layout looks like this:
+        //
+        // [head] [yielded by next] [unyielded] [yielded by next_back] [tail]
+        //        ^-- start         \_________/-- unyielded_len        \____/-- self.tail_len
+        //                          ^-- unyielded_ptr                  ^-- tail
+        //
+        // Normally `Drop` impl would drop [unyielded] and then move [tail] to the `start`.
+        // Here we want to
+        // 1. Move [unyielded] to `start`
+        // 2. Move [tail] to a new start at `start + len(unyielded)`
+        // 3. Update length of the original vec to `len(head) + len(unyielded) + len(tail)`
+        //    a. In case of ZST, this is the only thing we want to do
+        // 4. Do *not* drop self, as everything is put in a consistent state already, there is nothing to do
+        let mut this = ManuallyDrop::new(self);
+
+        unsafe {
+            let source_vec = this.vec.as_mut();
+
+            let start = source_vec.len();
+            let tail = this.tail_start;
+
+            let unyielded_len = this.iter.len();
+            let unyielded_ptr = this.iter.as_slice().as_ptr();
+
+            // ZSTs have no identity, so we don't need to move them around.
+            if !T::IS_ZST {
+                let start_ptr = source_vec.as_mut_ptr().add(start);
+
+                // memmove back unyielded elements
+                if unyielded_ptr != start_ptr {
+                    let src = unyielded_ptr;
+                    let dst = start_ptr;
+
+                    ptr::copy(src, dst, unyielded_len);
+                }
+
+                // memmove back untouched tail
+                if tail != (start + unyielded_len) {
+                    let src = source_vec.as_ptr().add(tail);
+                    let dst = start_ptr.add(unyielded_len);
+                    ptr::copy(src, dst, this.tail_len);
+                }
+            }
+
+            source_vec.set_len(start + unyielded_len + this.tail_len);
+        }
+    }
+}
+
+// #[stable(feature = "vec_drain_as_slice", since = "1.46.0")]
+impl<'a, T, A: Allocator> AsRef<[T]> for Drain<'a, T, A> {
+    fn as_ref(&self) -> &[T] {
+        self.as_slice()
+    }
+}
+
+// #[stable(feature = "drain", since = "1.6.0")]
+unsafe impl<T: Sync, A: Sync + Allocator> Sync for Drain<'_, T, A> {}
+// #[stable(feature = "drain", since = "1.6.0")]
+unsafe impl<T: Send, A: Send + Allocator> Send for Drain<'_, T, A> {}
+
+// #[stable(feature = "drain", since = "1.6.0")]
+impl<T, A: Allocator> Iterator for Drain<'_, T, A> {
+    type Item = T;
+
+    #[inline]
+    fn next(&mut self) -> Option<T> {
+        self.iter
+            .next()
+            .map(|elt| unsafe { ptr::read(elt as *const _) })
+    }
+
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        self.iter.size_hint()
+    }
+}
+
+// #[stable(feature = "drain", since = "1.6.0")]
+impl<T, A: Allocator> DoubleEndedIterator for Drain<'_, T, A> {
+    #[inline]
+    fn next_back(&mut self) -> Option<T> {
+        self.iter
+            .next_back()
+            .map(|elt| unsafe { ptr::read(elt as *const _) })
+    }
+}
+
+// #[stable(feature = "drain", since = "1.6.0")]
+impl<T, A: Allocator> Drop for Drain<'_, T, A> {
+    fn drop(&mut self) {
+        /// Moves back the un-`Drain`ed elements to restore the original `Vec`.
+        struct DropGuard<'r, 'a, T, A: Allocator>(&'r mut Drain<'a, T, A>);
+
+        impl<'r, 'a, T, A: Allocator> Drop for DropGuard<'r, 'a, T, A> {
+            fn drop(&mut self) {
+                if self.0.tail_len > 0 {
+                    unsafe {
+                        let source_vec = self.0.vec.as_mut();
+                        // memmove back untouched tail, update to new length
+                        let start = source_vec.len();
+                        let tail = self.0.tail_start;
+                        if tail != start {
+                            let src = source_vec.as_ptr().add(tail);
+                            let dst = source_vec.as_mut_ptr().add(start);
+                            ptr::copy(src, dst, self.0.tail_len);
+                        }
+                        source_vec.set_len(start + self.0.tail_len);
+                    }
+                }
+            }
+        }
+
+        let iter = mem::take(&mut self.iter);
+        let drop_len = iter.len();
+
+        let mut vec = self.vec;
+
+        if T::IS_ZST {
+            // ZSTs have no identity, so we don't need to move them around, we only need to drop the correct amount.
+            // this can be achieved by manipulating the Vec length instead of moving values out from `iter`.
+            unsafe {
+                let vec = vec.as_mut();
+                let old_len = vec.len();
+                vec.set_len(old_len + drop_len + self.tail_len);
+                vec.truncate(old_len + self.tail_len);
+            }
+
+            return;
+        }
+
+        // ensure elements are moved back into their appropriate places, even when drop_in_place panics
+        let _guard = DropGuard(self);
+
+        if drop_len == 0 {
+            return;
+        }
+
+        // as_slice() must only be called when iter.len() is > 0 because
+        // it also gets touched by vec::Splice which may turn it into a dangling pointer
+        // which would make it and the vec pointer point to different allocations which would
+        // lead to invalid pointer arithmetic below.
+        let drop_ptr = iter.as_slice().as_ptr();
+
+        unsafe {
+            // drop_ptr comes from a slice::Iter which only gives us a &[T] but for drop_in_place
+            // a pointer with mutable provenance is necessary. Therefore we must reconstruct
+            // it from the original vec but also avoid creating a &mut to the front since that could
+            // invalidate raw pointers to it which some unsafe code might rely on.
+            let vec_ptr = vec.as_mut().as_mut_ptr();
+            #[cfg(not(version("1.87")))]
+            let drop_offset = drop_ptr.sub_ptr(vec_ptr);
+            #[cfg(version("1.87"))]
+            let drop_offset = drop_ptr.offset_from_unsigned(vec_ptr);
+            let to_drop = ptr::slice_from_raw_parts_mut(vec_ptr.add(drop_offset), drop_len);
+            ptr::drop_in_place(to_drop);
+        }
+    }
+}
+
+// #[stable(feature = "fused", since = "1.26.0")]
+impl<T, A: Allocator> FusedIterator for Drain<'_, T, A> {}
diff --git a/rust/kernel/alloc/kbox.rs b/rust/kernel/alloc/kbox.rs
index cb4ebea3b07422..01a39ee955a6fb 100644
--- a/rust/kernel/alloc/kbox.rs
+++ b/rust/kernel/alloc/kbox.rs
@@ -8,8 +8,12 @@ use super::{AllocError, Allocator, Flags};
 use core::alloc::Layout;
 use core::fmt;
 use core::marker::PhantomData;
+#[cfg(not(CONFIG_RUSTC_HAS_COERCE_POINTEE))]
+use core::marker::Unsize;
 use core::mem::ManuallyDrop;
 use core::mem::MaybeUninit;
+#[cfg(not(CONFIG_RUSTC_HAS_COERCE_POINTEE))]
+use core::ops::CoerceUnsized;
 use core::ops::{Deref, DerefMut};
 use core::pin::Pin;
 use core::ptr::NonNull;
@@ -61,7 +65,8 @@ use crate::types::ForeignOwnable;
 /// `self.0` is always properly aligned and either points to memory allocated with `A` or, for
 /// zero-sized types, is a dangling, well aligned pointer.
 #[repr(transparent)]
-pub struct Box<T: ?Sized, A: Allocator>(NonNull<T>, PhantomData<A>);
+#[cfg_attr(CONFIG_RUSTC_HAS_COERCE_POINTEE, derive(core::marker::CoercePointee))]
+pub struct Box<#[pointee] T: ?Sized, A: Allocator>(NonNull<T>, PhantomData<A>);
 
 /// Type alias for [`Box`] with a [`Kmalloc`] allocator.
 ///
@@ -485,3 +490,17 @@ where
         unsafe { A::free(self.0.cast(), layout) };
     }
 }
+
+//#[unstable(feature = "coerce_unsized", issue = "18598")]
+#[cfg(not(CONFIG_RUSTC_HAS_COERCE_POINTEE))]
+impl<T: ?Sized + Unsize<U>, U: ?Sized, A: Allocator> CoerceUnsized<Box<U, A>> for Box<T, A> {}
+
+impl<T, A> AsRef<T> for Box<T, A>
+where
+    T: ?Sized,
+    A: Allocator,
+{
+    fn as_ref(&self) -> &T {
+        &**self
+    }
+}
diff --git a/rust/kernel/alloc/kvec.rs b/rust/kernel/alloc/kvec.rs
index 87a71fd40c3cad..79d83953d5fc36 100644
--- a/rust/kernel/alloc/kvec.rs
+++ b/rust/kernel/alloc/kvec.rs
@@ -7,6 +7,7 @@
 
 use super::{
     allocator::{KVmalloc, Kmalloc, Vmalloc},
+    drain::Drain,
     layout::ArrayLayout,
     AllocError, Allocator, Box, Flags,
 };
@@ -18,6 +19,7 @@ use core::{
     ops::DerefMut,
     ops::Index,
     ops::IndexMut,
+    ops::{Range, RangeBounds},
     ptr,
     ptr::NonNull,
     slice,
@@ -455,6 +457,197 @@ where
 
         Ok(())
     }
+
+    /// Resizes the Vec in-place so that `len` is equal to `new_len`.
+    ///
+    /// If `new_len` is greater than len, the Vec is extended by the difference,
+    /// with each additional slot filled with `value`.
+    /// If `new_len` is less than len, the Vec is simply truncated.
+    pub fn resize(&mut self, new_len: usize, value: T, flags: Flags) -> Result<(), AllocError>
+    where
+        T: Clone,
+    {
+        if new_len < self.len() {
+            self.truncate(new_len);
+            return Ok(());
+        }
+        if new_len == self.len() {
+            return Ok(());
+        }
+        self.reserve(new_len - self.len(), flags)?;
+        for u in self.spare_capacity_mut() {
+            u.write(value.clone());
+        }
+        // SAFETY: we just initialized them above
+        unsafe {
+            self.set_len(new_len);
+        }
+        Ok(())
+    }
+
+    /// Clears the vector, removing all values.
+    ///
+    /// Note that this method has no effect on the allocated capacity
+    /// of the vector.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// let mut v = vec![1, 2, 3];
+    ///
+    /// v.clear();
+    ///
+    /// assert!(v.is_empty());
+    /// ```
+    #[inline]
+    pub fn clear(&mut self) {
+        let elems: *mut [T] = self.as_mut_slice();
+
+        // SAFETY:
+        // - `elems` comes directly from `as_mut_slice` and is therefore valid.
+        // - Setting `self.len` before calling `drop_in_place` means that,
+        //   if an element's `Drop` impl panics, the vector's `Drop` impl will
+        //   do nothing (leaking the rest of the elements) instead of dropping
+        //   some twice.
+        unsafe {
+            self.len = 0;
+            ptr::drop_in_place(elems);
+        }
+    }
+
+    /// Shortens the vector, keeping the first `len` elements and dropping
+    /// the rest.
+    ///
+    /// If `len` is greater or equal to the vector's current length, this has
+    /// no effect.
+    ///
+    /// The [`drain`] method can emulate `truncate`, but causes the excess
+    /// elements to be returned instead of dropped.
+    ///
+    /// Note that this method has no effect on the allocated capacity
+    /// of the vector.
+    ///
+    /// # Examples
+    ///
+    /// Truncating a five element vector to two elements:
+    ///
+    /// ```
+    /// let mut vec = vec![1, 2, 3, 4, 5];
+    /// vec.truncate(2);
+    /// assert_eq!(vec, [1, 2]);
+    /// ```
+    ///
+    /// No truncation occurs when `len` is greater than the vector's current
+    /// length:
+    ///
+    /// ```
+    /// let mut vec = vec![1, 2, 3];
+    /// vec.truncate(8);
+    /// assert_eq!(vec, [1, 2, 3]);
+    /// ```
+    ///
+    /// Truncating when `len == 0` is equivalent to calling the [`clear`]
+    /// method.
+    ///
+    /// ```
+    /// let mut vec = vec![1, 2, 3];
+    /// vec.truncate(0);
+    /// assert_eq!(vec, []);
+    /// ```
+    ///
+    /// [`clear`]: Vec::clear
+    /// [`drain`]: Vec::drain
+    pub fn truncate(&mut self, len: usize) {
+        // This is safe because:
+        //
+        // * the slice passed to `drop_in_place` is valid; the `len > self.len`
+        //   case avoids creating an invalid slice, and
+        // * the `len` of the vector is shrunk before calling `drop_in_place`,
+        //   such that no value will be dropped twice in case `drop_in_place`
+        //   were to panic once (if it panics twice, the program aborts).
+        unsafe {
+            // Note: It's intentional that this is `>` and not `>=`.
+            //       Changing it to `>=` has negative performance
+            //       implications in some cases. See #78884 for more.
+            if len > self.len {
+                return;
+            }
+            let remaining_len = self.len - len;
+            let s = ptr::slice_from_raw_parts_mut(self.as_mut_ptr().add(len), remaining_len);
+            self.len = len;
+            ptr::drop_in_place(s);
+        }
+    }
+
+    /// Removes the specified range from the vector in bulk, returning all
+    /// removed elements as an iterator. If the iterator is dropped before
+    /// being fully consumed, it drops the remaining removed elements.
+    ///
+    /// The returned iterator keeps a mutable borrow on the vector to optimize
+    /// its implementation.
+    ///
+    /// # Panics
+    ///
+    /// Panics if the starting point is greater than the end point or if
+    /// the end point is greater than the length of the vector.
+    ///
+    /// # Leaking
+    ///
+    /// If the returned iterator goes out of scope without being dropped (due to
+    /// [`mem::forget`], for example), the vector may have lost and leaked
+    /// elements arbitrarily, including elements outside the range.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// let mut v = vec![1, 2, 3];
+    /// let u: Vec<_> = v.drain(1..).collect();
+    /// assert_eq!(v, &[1]);
+    /// assert_eq!(u, &[2, 3]);
+    ///
+    /// // A full range clears the vector, like `clear()` does
+    /// v.drain(..);
+    /// assert_eq!(v, &[]);
+    /// ```
+    pub fn drain<R>(&mut self, range: R) -> Drain<'_, T, A>
+    where
+        R: RangeBounds<usize>,
+    {
+        let len = self.len();
+        let Range { start, end } = slice::range(range, ..len);
+
+        unsafe {
+            // set self.vec length's to start, to be safe in case Drain is leaked
+            self.set_len(start);
+            let range_slice = slice::from_raw_parts(self.as_ptr().add(start), end - start);
+            Drain {
+                tail_start: end,
+                tail_len: len - end,
+                iter: range_slice.iter(),
+                vec: NonNull::from(self),
+            }
+        }
+    }
+    /// Removes an element from the vector and returns it.
+    ///
+    /// The removed element is replaced by the last element of the vector.
+    ///
+    /// This does not preserve ordering of the remaining elements, but is *O*(1).
+    /// If you need to preserve the element order, use [`remove`] instead.
+    pub fn swap_remove(&mut self, index: usize) -> T {
+        if index > self.len() {
+            panic!("Index out of range");
+        }
+        // SAFETY: index is in range
+        // self.len() - 1 is in range since at last 1 element exists
+        unsafe {
+            let old = ptr::read(self.as_ptr().add(index));
+            let last = ptr::read(self.as_ptr().add(self.len() - 1));
+            ptr::write(self.as_mut_ptr().add(index), last);
+            self.set_len(self.len - 1);
+            old
+        }
+    }
 }
 
 impl<T: Clone, A: Allocator> Vec<T, A> {
@@ -914,3 +1107,51 @@ where
         }
     }
 }
+
+// #[stable(feature = "array_try_from_vec", since = "1.48.0")]
+impl<T, A: Allocator, const N: usize> TryFrom<Vec<T, A>> for [T; N] {
+    type Error = Vec<T, A>;
+
+    /// Gets the entire contents of the `Vec<T>` as an array,
+    /// if its size exactly matches that of the requested array.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// assert_eq!(vec![1, 2, 3].try_into(), Ok([1, 2, 3]));
+    /// assert_eq!(<Vec<i32>>::new().try_into(), Ok([]));
+    /// ```
+    ///
+    /// If the length doesn't match, the input comes back in `Err`:
+    /// ```
+    /// let r: Result<[i32; 4], _> = (0..10).collect::<Vec<_>>().try_into();
+    /// assert_eq!(r, Err(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]));
+    /// ```
+    ///
+    /// If you're fine with just getting a prefix of the `Vec<T>`,
+    /// you can call [`.truncate(N)`](Vec::truncate) first.
+    /// ```
+    /// let mut v = String::from("hello world").into_bytes();
+    /// v.sort();
+    /// v.truncate(2);
+    /// let [a, b]: [_; 2] = v.try_into().unwrap();
+    /// assert_eq!(a, b' ');
+    /// assert_eq!(b, b'd');
+    /// ```
+    fn try_from(mut vec: Vec<T, A>) -> Result<[T; N], Vec<T, A>> {
+        if vec.len() != N {
+            return Err(vec);
+        }
+
+        // SAFETY: `.set_len(0)` is always sound.
+        unsafe { vec.set_len(0) };
+
+        // SAFETY: A `Vec`'s pointer is always aligned properly, and
+        // the alignment the array needs is the same as the items.
+        // We checked earlier that we have sufficient items.
+        // The items will not double-drop as the `set_len`
+        // tells the `Vec` not to also drop them.
+        let array = unsafe { ptr::read(vec.as_ptr() as *const [T; N]) };
+        Ok(array)
+    }
+}
diff --git a/rust/kernel/delay.rs b/rust/kernel/delay.rs
new file mode 100644
index 00000000000000..1e987fa659419b
--- /dev/null
+++ b/rust/kernel/delay.rs
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Delay functions for operations like sleeping.
+//!
+//! C header: [`include/linux/delay.h`](../../../../include/linux/delay.h)
+
+use crate::bindings;
+use core::{cmp::min, time::Duration};
+
+const MILLIS_PER_SEC: u64 = 1_000;
+
+fn coarse_sleep_conversion(duration: Duration) -> core::ffi::c_uint {
+    let milli_as_nanos = Duration::MILLISECOND.subsec_nanos();
+
+    // Rounds the nanosecond component of `duration` up to the nearest millisecond.
+    let nanos_as_millis = duration.subsec_nanos().wrapping_add(milli_as_nanos - 1) / milli_as_nanos;
+
+    // Saturates the second component of `duration` to `c_uint::MAX`.
+    let seconds_as_millis = min(
+        duration.as_secs().saturating_mul(MILLIS_PER_SEC),
+        u64::from(core::ffi::c_uint::MAX),
+    ) as core::ffi::c_uint;
+
+    seconds_as_millis.saturating_add(nanos_as_millis)
+}
+
+/// Sleeps safely even with waitqueue interruptions.
+///
+/// This function forwards the call to the C side `msleep` function. As a result,
+/// `duration` will be rounded up to the nearest millisecond if granularity less
+/// than a millisecond is provided. Any [`Duration`] that exceeds
+/// [`c_uint::MAX`][core::ffi::c_uint::MAX] in milliseconds is saturated.
+///
+/// # Examples
+///
+// Keep these in sync with `test_coarse_sleep_examples`.
+/// ```
+/// # use core::time::Duration;
+/// # use kernel::delay::coarse_sleep;
+/// coarse_sleep(Duration::ZERO);                   // Equivalent to `msleep(0)`.
+/// coarse_sleep(Duration::from_nanos(1));          // Equivalent to `msleep(1)`.
+///
+/// coarse_sleep(Duration::from_nanos(1_000_000));  // Equivalent to `msleep(1)`.
+/// coarse_sleep(Duration::from_nanos(1_000_001));  // Equivalent to `msleep(2)`.
+/// coarse_sleep(Duration::from_nanos(1_999_999));  // Equivalent to `msleep(2)`.
+///
+/// coarse_sleep(Duration::from_millis(1));         // Equivalent to `msleep(1)`.
+/// coarse_sleep(Duration::from_millis(2));         // Equivalent to `msleep(2)`.
+///
+/// coarse_sleep(Duration::from_secs(1));           // Equivalent to `msleep(1000)`.
+/// coarse_sleep(Duration::new(1, 1));              // Equivalent to `msleep(1001)`.
+/// coarse_sleep(Duration::new(1, 2));              // Equivalent to `msleep(1001)`.
+/// ```
+pub fn coarse_sleep(duration: Duration) {
+    // SAFETY: `msleep` is safe for all values of its argument.
+    unsafe { bindings::msleep(coarse_sleep_conversion(duration)) }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::{coarse_sleep_conversion, MILLIS_PER_SEC};
+    use core::time::Duration;
+
+    #[test]
+    fn test_coarse_sleep_examples() {
+        // Keep these in sync with `coarse_sleep`'s `# Examples` section.
+
+        assert_eq!(coarse_sleep_conversion(Duration::ZERO), 0);
+        assert_eq!(coarse_sleep_conversion(Duration::from_nanos(1)), 1);
+
+        assert_eq!(coarse_sleep_conversion(Duration::from_nanos(1_000_000)), 1);
+        assert_eq!(coarse_sleep_conversion(Duration::from_nanos(1_000_001)), 2);
+        assert_eq!(coarse_sleep_conversion(Duration::from_nanos(1_999_999)), 2);
+
+        assert_eq!(coarse_sleep_conversion(Duration::from_millis(1)), 1);
+        assert_eq!(coarse_sleep_conversion(Duration::from_millis(2)), 2);
+
+        assert_eq!(coarse_sleep_conversion(Duration::from_secs(1)), 1000);
+        assert_eq!(coarse_sleep_conversion(Duration::new(1, 1)), 1001);
+        assert_eq!(coarse_sleep_conversion(Duration::new(1, 2)), 1001);
+    }
+
+    #[test]
+    fn test_coarse_sleep_saturation() {
+        assert!(
+            coarse_sleep_conversion(Duration::new(
+                core::ffi::c_uint::MAX as u64 / MILLIS_PER_SEC,
+                0
+            )) < core::ffi::c_uint::MAX
+        );
+        assert_eq!(
+            coarse_sleep_conversion(Duration::new(
+                core::ffi::c_uint::MAX as u64 / MILLIS_PER_SEC,
+                999_999_999
+            )),
+            core::ffi::c_uint::MAX
+        );
+
+        assert_eq!(
+            coarse_sleep_conversion(Duration::MAX),
+            core::ffi::c_uint::MAX
+        );
+    }
+}
diff --git a/rust/kernel/devcoredump.rs b/rust/kernel/devcoredump.rs
new file mode 100644
index 00000000000000..540f5d847f1e97
--- /dev/null
+++ b/rust/kernel/devcoredump.rs
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+//! Device coredump support.
+//!
+//! C header: [`include/linux/devcoredump.h`](../../../../include/linux/devcoredump.h)
+
+use crate::{
+    alloc, bindings, device, error::from_result, prelude::Result, time::Jiffies,
+    types::ForeignOwnable, ThisModule,
+};
+
+use core::ops::Deref;
+
+/// The default timeout for device coredumps.
+pub const DEFAULT_TIMEOUT: Jiffies = bindings::DEVCD_TIMEOUT as Jiffies;
+
+/// Trait to implement reading from a device coredump.
+///
+/// Users must implement this trait to provide device coredump support.
+pub trait DevCoreDump {
+    /// Returns the IOVA (virtual address) of the buffer from RTKit's point of view, or an error if
+    /// unavailable.
+    fn read(&self, buf: &mut [u8], offset: usize) -> Result<usize>;
+}
+
+unsafe extern "C" fn read_callback<
+    'a,
+    T: ForeignOwnable<Borrowed<'a>: Deref<Target = D>>,
+    D: DevCoreDump,
+>(
+    buffer: *mut crate::ffi::c_char,
+    offset: bindings::loff_t,
+    count: usize,
+    data: *mut crate::ffi::c_void,
+    _datalen: usize,
+) -> isize {
+    // SAFETY: This pointer came from into_foreign() below.
+    let coredump = unsafe { T::borrow(data) };
+    // SAFETY: The caller guarantees `buffer` points to at least `count` bytes.
+    let buf = unsafe { core::slice::from_raw_parts_mut(buffer, count) };
+
+    from_result(|| Ok(coredump.read(buf, offset.try_into()?)?.try_into()?))
+}
+
+unsafe extern "C" fn free_callback<
+    'a,
+    T: ForeignOwnable<Borrowed<'a>: Deref<Target = D>>,
+    D: DevCoreDump,
+>(
+    data: *mut crate::ffi::c_void,
+) {
+    // SAFETY: This pointer came from into_foreign() below.
+    unsafe {
+        T::from_foreign(data);
+    }
+}
+
+/// Registers a coredump for the given device.
+pub fn dev_coredump<'a, T: ForeignOwnable<Borrowed<'a>: Deref<Target = D>>, D: DevCoreDump>(
+    dev: &device::Device,
+    module: &'static ThisModule,
+    coredump: T,
+    gfp: alloc::Flags,
+    timeout: Jiffies,
+) {
+    // SAFETY: Call upholds dev_coredumpm lifetime requirements.
+    unsafe {
+        bindings::dev_coredumpm_timeout(
+            dev.as_raw(),
+            module.0,
+            coredump.into_foreign() as *mut _,
+            0,
+            gfp.as_raw(),
+            Some(read_callback::<'a, T, D>),
+            Some(free_callback::<'a, T, D>),
+            timeout,
+        )
+    }
+}
diff --git a/rust/kernel/device.rs b/rust/kernel/device.rs
index db2d9658ba47d9..45de963f3e1a57 100644
--- a/rust/kernel/device.rs
+++ b/rust/kernel/device.rs
@@ -5,7 +5,7 @@
 //! C header: [`include/linux/device.h`](srctree/include/linux/device.h)
 
 use crate::{
-    bindings,
+    bindings, of,
     str::CStr,
     types::{ARef, Opaque},
 };
@@ -61,10 +61,28 @@ impl Device {
     }
 
     /// Obtain the raw `struct device *`.
-    pub(crate) fn as_raw(&self) -> *mut bindings::device {
+    pub fn as_raw(&self) -> *mut bindings::device {
         self.0.get()
     }
 
+    /// Returns the parent device
+    pub fn parent(&self) -> Option<ARef<Self>> {
+        // SAFETY: pointer is valid by type invariant
+        let pdev = unsafe { (*self.as_raw()).parent };
+        if pdev == ptr::null_mut() {
+            return None;
+        }
+        // SAFETY: if the parent pointer is not null it points to a valid device
+        unsafe { Some(Self::get_device(pdev)) }
+    }
+
+    /// Returns the driver_data pointer.
+    pub fn get_drvdata<T>(&self) -> *mut T {
+        // SAFETY: dev_get_drvdata returns a field of the device,
+        //   pointer to which is valid by type invariant
+        unsafe { bindings::dev_get_drvdata(self.as_raw()) as *mut T }
+    }
+
     /// Convert a raw C `struct device` pointer to a `&'a Device`.
     ///
     /// # Safety
@@ -78,6 +96,13 @@ impl Device {
         unsafe { &*ptr.cast() }
     }
 
+    /// Gets the OpenFirmware node attached to this device
+    pub fn of_node(&self) -> Option<of::Node> {
+        let ptr = self.0.get();
+        // SAFETY: This is safe as long as of_node is NULL or valid.
+        unsafe { of::Node::get_from_raw((*ptr).of_node) }
+    }
+
     /// Prints an emergency-level message (level 0) prefixed with device information.
     ///
     /// More details are available from [`dev_emerg`].
diff --git a/rust/kernel/dma.rs b/rust/kernel/dma.rs
new file mode 100644
index 00000000000000..027ef75a461aa0
--- /dev/null
+++ b/rust/kernel/dma.rs
@@ -0,0 +1,521 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Direct memory access (DMA).
+//!
+//! C header: [`include/linux/dma-mapping.h`](srctree/include/linux/dma-mapping.h)
+
+use crate::{
+    bindings, build_assert, device,
+    error::code::*,
+    error::Result,
+    prelude::*,
+    transmute::{AsBytes, FromBytes},
+    types::ARef,
+};
+
+/// Trait to be implemented by bus specific devices.
+///
+/// The [`Device`] trait should be implemented by bus specific device representations, where the
+/// underlying bus has potential support for DMA, such as [`crate::pci::Device`] or
+/// [crate::platform::Device].
+pub trait Device: AsRef<device::Device> {
+    /// Inform the kernel about the device's DMA addressing capabilities.
+    ///
+    /// Set both the DMA mask and the coherent DMA mask to the same value.
+    ///
+    /// Note that we don't check the return value from the C `dma_set_coherent_mask` as the DMA API
+    /// guarantees that the coherent DMA mask can be set to the same or smaller than the streaming
+    /// DMA mask.
+    fn dma_set_mask_and_coherent(&mut self, mask: u64) -> Result {
+        // SAFETY: By the type invariant of `device::Device`, `self.as_ref().as_raw()` is valid.
+        let ret = unsafe { bindings::dma_set_mask_and_coherent(self.as_ref().as_raw(), mask) };
+        if ret != 0 {
+            Err(Error::from_errno(ret))
+        } else {
+            Ok(())
+        }
+    }
+
+    /// Same as [`Self::dma_set_mask_and_coherent`], but set the mask only for streaming mappings.
+    fn dma_set_mask(&mut self, mask: u64) -> Result {
+        // SAFETY: By the type invariant of `device::Device`, `self.as_ref().as_raw()` is valid.
+        let ret = unsafe { bindings::dma_set_mask(self.as_ref().as_raw(), mask) };
+        if ret != 0 {
+            Err(Error::from_errno(ret))
+        } else {
+            Ok(())
+        }
+    }
+}
+
+/// Possible attributes associated with a DMA mapping.
+///
+/// They can be combined with the operators `|`, `&`, and `!`.
+///
+/// Values can be used from the [`attrs`] module.
+///
+/// # Examples
+///
+/// ```
+/// use kernel::dma::{attrs::*, Device, CoherentAllocation};
+///
+/// # fn test(dev: &dyn Device) -> Result {
+/// let attribs = DMA_ATTR_FORCE_CONTIGUOUS | DMA_ATTR_NO_WARN;
+/// let c: CoherentAllocation<u64> =
+///     CoherentAllocation::alloc_attrs(dev, 4, GFP_KERNEL, attribs)?;
+/// # Ok::<(), Error>(()) }
+/// ```
+#[derive(Clone, Copy, PartialEq)]
+#[repr(transparent)]
+pub struct Attrs(u32);
+
+impl Attrs {
+    /// Get the raw representation of this attribute.
+    pub(crate) fn as_raw(self) -> crate::ffi::c_ulong {
+        self.0 as _
+    }
+
+    /// Check whether `flags` is contained in `self`.
+    pub fn contains(self, flags: Attrs) -> bool {
+        (self & flags) == flags
+    }
+}
+
+impl core::ops::BitOr for Attrs {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl core::ops::BitAnd for Attrs {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl core::ops::Not for Attrs {
+    type Output = Self;
+    fn not(self) -> Self::Output {
+        Self(!self.0)
+    }
+}
+
+/// DMA mapping attributes.
+pub mod attrs {
+    use super::Attrs;
+
+    /// Specifies that reads and writes to the mapping may be weakly ordered, that is that reads
+    /// and writes may pass each other.
+    pub const DMA_ATTR_WEAK_ORDERING: Attrs = Attrs(bindings::DMA_ATTR_WEAK_ORDERING);
+
+    /// Specifies that writes to the mapping may be buffered to improve performance.
+    pub const DMA_ATTR_WRITE_COMBINE: Attrs = Attrs(bindings::DMA_ATTR_WRITE_COMBINE);
+
+    /// Lets the platform to avoid creating a kernel virtual mapping for the allocated buffer.
+    pub const DMA_ATTR_NO_KERNEL_MAPPING: Attrs = Attrs(bindings::DMA_ATTR_NO_KERNEL_MAPPING);
+
+    /// Allows platform code to skip synchronization of the CPU cache for the given buffer assuming
+    /// that it has been already transferred to 'device' domain.
+    pub const DMA_ATTR_SKIP_CPU_SYNC: Attrs = Attrs(bindings::DMA_ATTR_SKIP_CPU_SYNC);
+
+    /// Forces contiguous allocation of the buffer in physical memory.
+    pub const DMA_ATTR_FORCE_CONTIGUOUS: Attrs = Attrs(bindings::DMA_ATTR_FORCE_CONTIGUOUS);
+
+    /// This is a hint to the DMA-mapping subsystem that it's probably not worth the time to try
+    /// to allocate memory to in a way that gives better TLB efficiency.
+    pub const DMA_ATTR_ALLOC_SINGLE_PAGES: Attrs = Attrs(bindings::DMA_ATTR_ALLOC_SINGLE_PAGES);
+
+    /// This tells the DMA-mapping subsystem to suppress allocation failure reports (similarly to
+    /// __GFP_NOWARN).
+    pub const DMA_ATTR_NO_WARN: Attrs = Attrs(bindings::DMA_ATTR_NO_WARN);
+
+    /// Used to indicate that the buffer is fully accessible at an elevated privilege level (and
+    /// ideally inaccessible or at least read-only at lesser-privileged levels).
+    pub const DMA_ATTR_PRIVILEGED: Attrs = Attrs(bindings::DMA_ATTR_PRIVILEGED);
+}
+
+/// An abstraction of the `dma_alloc_coherent` API.
+///
+/// This is an abstraction around the `dma_alloc_coherent` API which is used to allocate and map
+/// large consistent DMA regions.
+///
+/// A [`CoherentAllocation`] instance contains a pointer to the allocated region (in the
+/// processor's virtual address space) and the device address which can be given to the device
+/// as the DMA address base of the region. The region is released once [`CoherentAllocation`]
+/// is dropped.
+///
+/// # Invariants
+///
+/// For the lifetime of an instance of [`CoherentAllocation`], the `cpu_addr` is a valid pointer
+/// to an allocated region of consistent memory and `dma_handle` is the DMA address base of
+/// the region.
+// TODO
+//
+// DMA allocations potentially carry device resources (e.g.IOMMU mappings), hence for soundness
+// reasons DMA allocation would need to be embedded in a `Devres` container, in order to ensure
+// that device resources can never survive device unbind.
+//
+// However, it is neither desirable nor necessary to protect the allocated memory of the DMA
+// allocation from surviving device unbind; it would require RCU read side critical sections to
+// access the memory, which may require subsequent unnecessary copies.
+//
+// Hence, find a way to revoke the device resources of a `CoherentAllocation`, but not the
+// entire `CoherentAllocation` including the allocated memory itself.
+pub struct CoherentAllocation<T: AsBytes + FromBytes> {
+    dev: ARef<device::Device>,
+    dma_handle: bindings::dma_addr_t,
+    count: usize,
+    cpu_addr: *mut T,
+    dma_attrs: Attrs,
+}
+
+impl<T: AsBytes + FromBytes> CoherentAllocation<T> {
+    /// Allocates a region of `size_of::<T> * count` of consistent memory.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use kernel::dma::{attrs::*, Device, CoherentAllocation};
+    ///
+    /// # fn test(dev: &dyn Device) -> Result {
+    /// let c: CoherentAllocation<u64> =
+    ///     CoherentAllocation::alloc_attrs(dev, 4, GFP_KERNEL, DMA_ATTR_NO_WARN)?;
+    /// # Ok::<(), Error>(()) }
+    /// ```
+    pub fn alloc_attrs(
+        dev: &dyn Device,
+        count: usize,
+        gfp_flags: kernel::alloc::Flags,
+        dma_attrs: Attrs,
+    ) -> Result<CoherentAllocation<T>> {
+        build_assert!(
+            core::mem::size_of::<T>() > 0,
+            "It doesn't make sense for the allocated type to be a ZST"
+        );
+
+        let dev = dev.as_ref();
+
+        let size = count
+            .checked_mul(core::mem::size_of::<T>())
+            .ok_or(EOVERFLOW)?;
+        let mut dma_handle = 0;
+        // SAFETY: Device pointer is guaranteed as valid by the type invariant on `Device`.
+        let ret = unsafe {
+            bindings::dma_alloc_attrs(
+                dev.as_raw(),
+                size,
+                &mut dma_handle,
+                gfp_flags.as_raw(),
+                dma_attrs.as_raw(),
+            )
+        };
+        if ret.is_null() {
+            return Err(ENOMEM);
+        }
+        // INVARIANT: We just successfully allocated a coherent region which is accessible for
+        // `count` elements, hence the cpu address is valid. We also hold a refcounted reference
+        // to the device.
+        Ok(Self {
+            dev: dev.into(),
+            dma_handle,
+            count,
+            cpu_addr: ret as *mut T,
+            dma_attrs,
+        })
+    }
+
+    /// Performs the same functionality as [`CoherentAllocation::alloc_attrs`], except the
+    /// `dma_attrs` is 0 by default.
+    pub fn alloc_coherent(
+        dev: &dyn Device,
+        count: usize,
+        gfp_flags: kernel::alloc::Flags,
+    ) -> Result<CoherentAllocation<T>> {
+        CoherentAllocation::alloc_attrs(dev, count, gfp_flags, Attrs(0))
+    }
+
+    /// Returns the base address to the allocated region in the CPU's virtual address space.
+    pub fn start_ptr(&self) -> *const T {
+        self.cpu_addr
+    }
+
+    /// Returns the base address to the allocated region in the CPU's virtual address space as
+    /// a mutable pointer.
+    pub fn start_ptr_mut(&mut self) -> *mut T {
+        self.cpu_addr
+    }
+
+    /// Returns a DMA handle which may given to the device as the DMA address base of
+    /// the region.
+    pub fn dma_handle(&self) -> bindings::dma_addr_t {
+        self.dma_handle
+    }
+
+    /// Returns the data from the region starting from `offset` as a slice.
+    /// `offset` and `count` are in units of `T`, not the number of bytes.
+    ///
+    /// Due to the safety requirements of slice, the caller should consider that the region could
+    /// be modified by the device at anytime. For ringbuffer type of r/w access or use-cases where
+    /// the pointer to the live data is needed, `start_ptr()` or `start_ptr_mut()` could be
+    /// used instead.
+    ///
+    /// # Safety
+    ///
+    /// * Callers must ensure that no hardware operations that involve the buffer are currently
+    ///   taking place while the returned slice is live.
+    /// * Callers must ensure that this call does not race with a write to the same region while
+    ///   while the returned slice is live.
+    pub unsafe fn as_slice(&self, offset: usize, count: usize) -> Result<&[T]> {
+        let end = offset.checked_add(count).ok_or(EOVERFLOW)?;
+        if end >= self.count {
+            return Err(EINVAL);
+        }
+        // SAFETY:
+        // - The pointer is valid due to type invariant on `CoherentAllocation`,
+        // we've just checked that the range and index is within bounds. The immutability of the
+        // of data is also guaranteed by the safety requirements of the function.
+        // - `offset` can't overflow since it is smaller than `self.count` and we've checked
+        // that `self.count` won't overflow early in the constructor.
+        Ok(unsafe { core::slice::from_raw_parts(self.cpu_addr.add(offset), count) })
+    }
+
+    /// Performs the same functionality as [`CoherentAllocation::as_slice`], except that a mutable
+    /// slice is returned.
+    ///
+    /// # Safety
+    ///
+    /// * Callers must ensure that no hardware operations that involve the buffer are currently
+    ///   taking place while the returned slice is live.
+    /// * Callers must ensure that this call does not race with a read or write to the same region
+    ///   while the returned slice is live.
+    pub unsafe fn as_slice_mut(&self, offset: usize, count: usize) -> Result<&mut [T]> {
+        let end = offset.checked_add(count).ok_or(EOVERFLOW)?;
+        if end >= self.count {
+            return Err(EINVAL);
+        }
+        // SAFETY:
+        // - The pointer is valid due to type invariant on `CoherentAllocation`,
+        // we've just checked that the range and index is within bounds. The immutability of the
+        // of data is also guaranteed by the safety requirements of the function.
+        // - `offset` can't overflow since it is smaller than `self.count` and we've checked
+        // that `self.count` won't overflow early in the constructor.
+        Ok(unsafe { core::slice::from_raw_parts_mut(self.cpu_addr.add(offset), count) })
+    }
+
+    /// Writes data to the region starting from `offset`. `offset` is in units of `T`, not the
+    /// number of bytes.
+    ///
+    /// # Safety
+    ///
+    /// * Callers must ensure that no hardware operations that involve the buffer overlaps with
+    ///   this write.
+    /// * Callers must ensure that this call does not race with a read or write to the same region
+    ///   that overlaps with this write.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # fn test(alloc: &mut kernel::dma::CoherentAllocation<u8>) -> Result {
+    /// let somedata: [u8; 4] = [0xf; 4];
+    /// let buf: &[u8] = &somedata;
+    /// // SAFETY: No hw operation on the device and no other r/w access to the region at this point.
+    /// unsafe { alloc.write(buf, 0)?; }
+    /// # Ok::<(), Error>(()) }
+    /// ```
+    pub unsafe fn write(&self, src: &[T], offset: usize) -> Result {
+        let end = offset.checked_add(src.len()).ok_or(EOVERFLOW)?;
+        if end >= self.count {
+            return Err(EINVAL);
+        }
+        // SAFETY:
+        // - The pointer is valid due to type invariant on `CoherentAllocation`
+        // and we've just checked that the range and index is within bounds.
+        // - `offset` can't overflow since it is smaller than `self.count` and we've checked
+        // that `self.count` won't overflow early in the constructor.
+        unsafe {
+            core::ptr::copy_nonoverlapping(src.as_ptr(), self.cpu_addr.add(offset), src.len())
+        };
+        Ok(())
+    }
+
+    /// Returns a pointer to an element from the region with bounds checking. `offset` is in
+    /// units of `T`, not the number of bytes.
+    ///
+    /// Public but hidden since it should only be used from [`dma_read`] and [`dma_write`] macros.
+    #[doc(hidden)]
+    pub fn item_from_index(&self, offset: usize) -> Result<*mut T> {
+        if offset >= self.count {
+            return Err(EINVAL);
+        }
+        // SAFETY:
+        // - The pointer is valid due to type invariant on `CoherentAllocation`
+        // and we've just checked that the range and index is within bounds.
+        // - `offset` can't overflow since it is smaller than `self.count` and we've checked
+        // that `self.count` won't overflow early in the constructor.
+        Ok(unsafe { self.cpu_addr.add(offset) })
+    }
+
+    /// Reads the value of `field` and ensures that its type is [`FromBytes`].
+    ///
+    /// # Safety
+    ///
+    /// This must be called from the [`dma_read`] macro which ensures that the `field` pointer is
+    /// validated beforehand.
+    ///
+    /// Public but hidden since it should only be used from [`dma_read`] macro.
+    #[doc(hidden)]
+    pub unsafe fn field_read<F: FromBytes>(&self, field: *const F) -> F {
+        // SAFETY:
+        // - By the safety requirements field is valid.
+        // - Using read_volatile() here is not sound as per the usual rules, the usage here is
+        // a special exception with the following notes in place. When dealing with a potential
+        // race from a hardware or code outside kernel (e.g. user-space program), we need that
+        // read on a valid memory is not UB. Currently read_volatile() is used for this, and the
+        // rationale behind is that it should generate the same code as READ_ONCE() which the
+        // kernel already relies on to avoid UB on data races. Note that the usage of
+        // read_volatile() is limited to this particular case, it cannot be used to prevent
+        // the UB caused by racing between two kernel functions nor do they provide atomicity.
+        unsafe { field.read_volatile() }
+    }
+
+    /// Writes a value to `field` and ensures that its type is [`AsBytes`].
+    ///
+    /// # Safety
+    ///
+    /// This must be called from the [`dma_write`] macro which ensures that the `field` pointer is
+    /// validated beforehand.
+    ///
+    /// Public but hidden since it should only be used from [`dma_write`] macro.
+    #[doc(hidden)]
+    pub unsafe fn field_write<F: AsBytes>(&self, field: *mut F, val: F) {
+        // SAFETY:
+        // - By the safety requirements field is valid.
+        // - Using write_volatile() here is not sound as per the usual rules, the usage here is
+        // a special exception with the following notes in place. When dealing with a potential
+        // race from a hardware or code outside kernel (e.g. user-space program), we need that
+        // write on a valid memory is not UB. Currently write_volatile() is used for this, and the
+        // rationale behind is that it should generate the same code as WRITE_ONCE() which the
+        // kernel already relies on to avoid UB on data races. Note that the usage of
+        // write_volatile() is limited to this particular case, it cannot be used to prevent
+        // the UB caused by racing between two kernel functions nor do they provide atomicity.
+        unsafe { field.write_volatile(val) }
+    }
+}
+
+/// Note that the device configured to do DMA must be halted before this object is dropped.
+impl<T: AsBytes + FromBytes> Drop for CoherentAllocation<T> {
+    fn drop(&mut self) {
+        let size = self.count * core::mem::size_of::<T>();
+        // SAFETY: Device pointer is guaranteed as valid by the type invariant on `Device`.
+        // The cpu address, and the dma handle are valid due to the type invariants on
+        // `CoherentAllocation`.
+        unsafe {
+            bindings::dma_free_attrs(
+                self.dev.as_raw(),
+                size,
+                self.cpu_addr as _,
+                self.dma_handle,
+                self.dma_attrs.as_raw(),
+            )
+        }
+    }
+}
+
+/// Reads a field of an item from an allocated region of structs.
+///
+/// # Examples
+///
+/// ```
+/// use kernel::device::Device;
+/// use kernel::dma::{attrs::*, CoherentAllocation};
+///
+/// struct MyStruct { field: u32, }
+///
+/// // SAFETY: All bit patterns are acceptable values for `MyStruct`.
+/// unsafe impl kernel::transmute::FromBytes for MyStruct{};
+/// // SAFETY: Instances of `MyStruct` have no uninitialized portions.
+/// unsafe impl kernel::transmute::AsBytes for MyStruct{};
+///
+/// # fn test(alloc: &kernel::dma::CoherentAllocation<MyStruct>) -> Result {
+/// let whole = kernel::dma_read!(alloc[2]);
+/// let field = kernel::dma_read!(alloc[1].field);
+/// # Ok::<(), Error>(()) }
+/// ```
+#[macro_export]
+macro_rules! dma_read {
+    ($dma:expr, $idx: expr, $($field:tt)*) => {{
+        let item = $crate::dma::CoherentAllocation::item_from_index(&$dma, $idx)?;
+        // SAFETY: `item_from_index` ensures that `item` is always a valid pointer and can be
+        // dereferenced. The compiler also further validates the expression on whether `field`
+        // is a member of `item` when expanded by the macro.
+        unsafe {
+            let ptr_field = ::core::ptr::addr_of!((*item) $($field)*);
+            $crate::dma::CoherentAllocation::field_read(&$dma, ptr_field)
+        }
+    }};
+    ($dma:ident [ $idx:expr ] $($field:tt)* ) => {
+        $crate::dma_read!($dma, $idx, $($field)*);
+    };
+    ($($dma:ident).* [ $idx:expr ] $($field:tt)* ) => {
+        $crate::dma_read!($($dma).*, $idx, $($field)*);
+    };
+}
+
+/// Writes to a field of an item from an allocated region of structs.
+///
+/// # Examples
+///
+/// ```
+/// use kernel::device::Device;
+/// use kernel::dma::{attrs::*, CoherentAllocation};
+///
+/// struct MyStruct { member: u32, }
+///
+/// // SAFETY: All bit patterns are acceptable values for `MyStruct`.
+/// unsafe impl kernel::transmute::FromBytes for MyStruct{};
+/// // SAFETY: Instances of `MyStruct` have no uninitialized portions.
+/// unsafe impl kernel::transmute::AsBytes for MyStruct{};
+///
+/// # fn test(alloc: &kernel::dma::CoherentAllocation<MyStruct>) -> Result {
+/// kernel::dma_write!(alloc[2].member = 0xf);
+/// kernel::dma_write!(alloc[1] = MyStruct { member: 0xf });
+/// # Ok::<(), Error>(()) }
+/// ```
+#[macro_export]
+macro_rules! dma_write {
+    ($dma:ident [ $idx:expr ] $($field:tt)*) => {{
+        $crate::dma_write!($dma, $idx, $($field)*);
+    }};
+    ($($dma:ident).* [ $idx:expr ] $($field:tt)* ) => {{
+        $crate::dma_write!($($dma).*, $idx, $($field)*);
+    }};
+    ($dma:expr, $idx: expr, = $val:expr) => {
+        let item = $crate::dma::CoherentAllocation::item_from_index(&$dma, $idx)?;
+        // SAFETY: `item_from_index` ensures that `item` is always a valid item.
+        unsafe { $crate::dma::CoherentAllocation::field_write(&$dma, item, $val) }
+    };
+    ($dma:expr, $idx: expr, $(.$field:ident)* = $val:expr) => {
+        let item = $crate::dma::CoherentAllocation::item_from_index(&$dma, $idx)?;
+        // SAFETY: `item_from_index` ensures that `item` is always a valid pointer and can be
+        // dereferenced. The compiler also further validates the expression on whether `field`
+        // is a member of `item` when expanded by the macro.
+        unsafe {
+            let ptr_field = ::core::ptr::addr_of_mut!((*item) $(.$field)*);
+            $crate::dma::CoherentAllocation::field_write(&$dma, ptr_field, $val)
+        }
+    };
+}
+
+/// Helper function to set the bit mask for DMA addressing.
+pub const fn dma_bit_mask(n: usize) -> u64 {
+    if n > 64 {
+        return 0;
+    }
+    if n == 64 {
+        !0
+    } else {
+        (1 << (n)) - 1
+    }
+}
diff --git a/rust/kernel/dma_fence.rs b/rust/kernel/dma_fence.rs
new file mode 100644
index 00000000000000..ede7b69c7ba3f3
--- /dev/null
+++ b/rust/kernel/dma_fence.rs
@@ -0,0 +1,542 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! DMA fence abstraction.
+//!
+//! C header: [`include/linux/dma_fence.h`](../../include/linux/dma_fence.h)
+
+use crate::{
+    bindings,
+    error::{to_result, Result},
+    prelude::*,
+    sync::LockClassKey,
+    types::Opaque,
+};
+use core::fmt::Write;
+use core::ops::{Deref, DerefMut};
+use core::ptr::addr_of_mut;
+use core::sync::atomic::{AtomicU64, Ordering};
+
+/// Any kind of DMA Fence Object
+///
+/// # Invariants
+/// raw() returns a valid pointer to a dma_fence and we own a reference to it.
+pub trait RawDmaFence: crate::private::Sealed {
+    /// Returns the raw `struct dma_fence` pointer.
+    fn raw(&self) -> *mut bindings::dma_fence;
+
+    /// Returns the raw `struct dma_fence` pointer and consumes the object.
+    ///
+    /// The caller is responsible for dropping the reference.
+    fn into_raw(self) -> *mut bindings::dma_fence
+    where
+        Self: Sized,
+    {
+        let ptr = self.raw();
+        core::mem::forget(self);
+        ptr
+    }
+
+    /// Advances this fence to the chain node which will signal this sequence number.
+    /// If no sequence number is provided, this returns `self` again.
+    /// If the seqno has already been signaled, returns None.
+    fn chain_find_seqno(self, seqno: u64) -> Result<Option<Fence>>
+    where
+        Self: Sized,
+    {
+        let mut ptr = self.into_raw();
+
+        // SAFETY: This will safely fail if this DmaFence is not a chain.
+        // `ptr` is valid per the type invariant.
+        let ret = unsafe { bindings::dma_fence_chain_find_seqno(&mut ptr, seqno) };
+
+        if ret != 0 {
+            // SAFETY: This is either an owned reference or NULL, dma_fence_put can handle both.
+            unsafe { bindings::dma_fence_put(ptr) };
+            Err(Error::from_errno(ret))
+        } else if ptr.is_null() {
+            Ok(None)
+        } else {
+            // SAFETY: ptr is valid and non-NULL as checked above.
+            Ok(Some(unsafe { Fence::from_raw(ptr) }))
+        }
+    }
+
+    /// Signal completion of this fence
+    fn signal(&self) -> Result {
+        // SAFETY: Safe to call on any valid dma_fence object
+        to_result(unsafe { bindings::dma_fence_signal(self.raw()) })
+    }
+
+    /// Set the error flag on this fence
+    fn set_error(&self, err: Error) {
+        // SAFETY: Safe to call on any valid dma_fence object
+        unsafe { bindings::dma_fence_set_error(self.raw(), err.to_errno()) };
+    }
+}
+
+/// A generic DMA Fence Object
+///
+/// # Invariants
+/// ptr is a valid pointer to a dma_fence and we own a reference to it.
+pub struct Fence {
+    ptr: *mut bindings::dma_fence,
+}
+
+impl Fence {
+    /// Create a new Fence object from a raw pointer to a dma_fence.
+    ///
+    /// # Safety
+    /// The caller must own a reference to the dma_fence, which is transferred to the new object.
+    pub(crate) unsafe fn from_raw(ptr: *mut bindings::dma_fence) -> Fence {
+        Fence { ptr }
+    }
+
+    /// Create a new Fence object from a raw pointer to a dma_fence.
+    ///
+    /// # Safety
+    /// Takes a borrowed reference to the dma_fence, and increments the reference count.
+    pub(crate) unsafe fn get_raw(ptr: *mut bindings::dma_fence) -> Fence {
+        // SAFETY: Pointer is valid per the safety contract
+        unsafe { bindings::dma_fence_get(ptr) };
+        Fence { ptr }
+    }
+
+    /// Create a new Fence object from a RawDmaFence.
+    pub fn from_fence(fence: &dyn RawDmaFence) -> Fence {
+        // SAFETY: Pointer is valid per the RawDmaFence contract
+        unsafe { Self::get_raw(fence.raw()) }
+    }
+}
+
+impl crate::private::Sealed for Fence {}
+
+impl RawDmaFence for Fence {
+    fn raw(&self) -> *mut bindings::dma_fence {
+        self.ptr
+    }
+}
+
+impl Drop for Fence {
+    fn drop(&mut self) {
+        // SAFETY: We own a reference to this syncobj.
+        unsafe { bindings::dma_fence_put(self.ptr) };
+    }
+}
+
+impl Clone for Fence {
+    fn clone(&self) -> Self {
+        // SAFETY: `ptr` is valid per the type invariant and we own a reference to it.
+        unsafe {
+            bindings::dma_fence_get(self.ptr);
+            Self::from_raw(self.ptr)
+        }
+    }
+}
+
+// SAFETY: The API for these objects is thread safe
+unsafe impl Sync for Fence {}
+// SAFETY: The API for these objects is thread safe
+unsafe impl Send for Fence {}
+
+/// Trait which must be implemented by driver-specific fence objects.
+#[vtable]
+pub trait FenceOps: Sized + Send + Sync {
+    /// True if this dma_fence implementation uses 64bit seqno, false otherwise.
+    const USE_64BIT_SEQNO: bool;
+
+    /// Returns the driver name. This is a callback to allow drivers to compute the name at
+    /// runtime, without having it to store permanently for each fence, or build a cache of
+    /// some sort.
+    fn get_driver_name<'a>(self: &'a FenceObject<Self>) -> &'a CStr;
+
+    /// Return the name of the context this fence belongs to. This is a callback to allow drivers
+    /// to compute the name at runtime, without having it to store permanently for each fence, or
+    /// build a cache of some sort.
+    fn get_timeline_name<'a>(self: &'a FenceObject<Self>) -> &'a CStr;
+
+    /// Enable software signaling of fence.
+    fn enable_signaling(self: &FenceObject<Self>) -> bool {
+        false
+    }
+
+    /// Peek whether the fence is signaled, as a fastpath optimization for e.g. dma_fence_wait() or
+    /// dma_fence_add_callback().
+    fn signaled(self: &FenceObject<Self>) -> bool {
+        false
+    }
+
+    /// Callback to fill in free-form debug info specific to this fence, like the sequence number.
+    fn fence_value_str(self: &FenceObject<Self>, _output: &mut dyn Write) {}
+
+    /// Fills in the current value of the timeline as a string, like the sequence number. Note that
+    /// the specific fence passed to this function should not matter, drivers should only use it to
+    /// look up the corresponding timeline structures.
+    fn timeline_value_str(self: &FenceObject<Self>, _output: &mut dyn Write) {}
+}
+
+unsafe extern "C" fn get_driver_name_cb<T: FenceOps>(
+    fence: *mut bindings::dma_fence,
+) -> *const crate::ffi::c_char {
+    // SAFETY: All of our fences are FenceObject<T>.
+    let p = unsafe { crate::container_of!(fence, FenceObject<T>, fence) as *mut FenceObject<T> };
+
+    // SAFETY: The caller is responsible for passing a valid dma_fence subtype
+    T::get_driver_name(unsafe { &mut *p }).as_char_ptr()
+}
+
+unsafe extern "C" fn get_timeline_name_cb<T: FenceOps>(
+    fence: *mut bindings::dma_fence,
+) -> *const crate::ffi::c_char {
+    // SAFETY: All of our fences are FenceObject<T>.
+    let p = unsafe { crate::container_of!(fence, FenceObject<T>, fence) as *mut FenceObject<T> };
+
+    // SAFETY: The caller is responsible for passing a valid dma_fence subtype
+    T::get_timeline_name(unsafe { &mut *p }).as_char_ptr()
+}
+
+unsafe extern "C" fn enable_signaling_cb<T: FenceOps>(fence: *mut bindings::dma_fence) -> bool {
+    // SAFETY: All of our fences are FenceObject<T>.
+    let p = unsafe { crate::container_of!(fence, FenceObject<T>, fence) as *mut FenceObject<T> };
+
+    // SAFETY: The caller is responsible for passing a valid dma_fence subtype
+    T::enable_signaling(unsafe { &mut *p })
+}
+
+unsafe extern "C" fn signaled_cb<T: FenceOps>(fence: *mut bindings::dma_fence) -> bool {
+    // SAFETY: All of our fences are FenceObject<T>.
+    let p = unsafe { crate::container_of!(fence, FenceObject<T>, fence) as *mut FenceObject<T> };
+
+    // SAFETY: The caller is responsible for passing a valid dma_fence subtype
+    T::signaled(unsafe { &mut *p })
+}
+
+unsafe extern "C" fn release_cb<T: FenceOps>(fence: *mut bindings::dma_fence) {
+    // SAFETY: All of our fences are FenceObject<T>.
+    let p = unsafe { crate::container_of!(fence, FenceObject<T>, fence) as *mut FenceObject<T> };
+
+    // SAFETY: p is never used after this
+    unsafe {
+        core::ptr::drop_in_place(&mut (*p).inner);
+    }
+
+    // SAFETY: All of our fences are allocated using kmalloc, so this is safe.
+    unsafe { bindings::dma_fence_free(fence) };
+}
+
+unsafe extern "C" fn fence_value_str_cb<T: FenceOps>(
+    fence: *mut bindings::dma_fence,
+    string: *mut crate::ffi::c_char,
+    size: crate::ffi::c_int,
+) {
+    let size: usize = size.try_into().unwrap_or(0);
+
+    if size == 0 {
+        return;
+    }
+
+    // SAFETY: All of our fences are FenceObject<T>.
+    let p = unsafe { crate::container_of!(fence, FenceObject<T>, fence) as *mut FenceObject<T> };
+
+    // SAFETY: The caller is responsible for the validity of string/size
+    let mut f = unsafe { crate::str::Formatter::from_buffer(string as *mut _, size) };
+
+    // SAFETY: The caller is responsible for passing a valid dma_fence subtype
+    T::fence_value_str(unsafe { &mut *p }, &mut f);
+    let _ = f.write_str("\0");
+
+    // SAFETY: `size` is at least 1 per the check above
+    unsafe { *string.add(size - 1) = 0 };
+}
+
+unsafe extern "C" fn timeline_value_str_cb<T: FenceOps>(
+    fence: *mut bindings::dma_fence,
+    string: *mut crate::ffi::c_char,
+    size: crate::ffi::c_int,
+) {
+    let size: usize = size.try_into().unwrap_or(0);
+
+    if size == 0 {
+        return;
+    }
+
+    // SAFETY: All of our fences are FenceObject<T>.
+    let p = unsafe { crate::container_of!(fence, FenceObject<T>, fence) as *mut FenceObject<T> };
+
+    // SAFETY: The caller is responsible for the validity of string/size
+    let mut f = unsafe { crate::str::Formatter::from_buffer(string as *mut _, size) };
+
+    // SAFETY: The caller is responsible for passing a valid dma_fence subtype
+    T::timeline_value_str(unsafe { &mut *p }, &mut f);
+    let _ = f.write_str("\0");
+
+    // SAFETY: `size` is at least 1 per the check above
+    unsafe { *string.add(size - 1) = 0 };
+}
+
+/// A driver-specific DMA Fence Object
+///
+/// # Invariants
+/// ptr is a valid pointer to a dma_fence and we own a reference to it.
+#[repr(C)]
+pub struct FenceObject<T: FenceOps> {
+    fence: bindings::dma_fence,
+    lock: Opaque<bindings::spinlock>,
+    inner: T,
+}
+
+impl<T: FenceOps> FenceObject<T> {
+    const SIZE: usize = core::mem::size_of::<Self>();
+
+    const VTABLE: bindings::dma_fence_ops = bindings::dma_fence_ops {
+        use_64bit_seqno: T::USE_64BIT_SEQNO,
+        get_driver_name: Some(get_driver_name_cb::<T>),
+        get_timeline_name: Some(get_timeline_name_cb::<T>),
+        enable_signaling: if T::HAS_ENABLE_SIGNALING {
+            Some(enable_signaling_cb::<T>)
+        } else {
+            None
+        },
+        signaled: if T::HAS_SIGNALED {
+            Some(signaled_cb::<T>)
+        } else {
+            None
+        },
+        wait: None, // Deprecated
+        release: Some(release_cb::<T>),
+        fence_value_str: if T::HAS_FENCE_VALUE_STR {
+            Some(fence_value_str_cb::<T>)
+        } else {
+            None
+        },
+        timeline_value_str: if T::HAS_TIMELINE_VALUE_STR {
+            Some(timeline_value_str_cb::<T>)
+        } else {
+            None
+        },
+        set_deadline: None,
+    };
+}
+
+impl<T: FenceOps> Deref for FenceObject<T> {
+    type Target = T;
+
+    fn deref(&self) -> &T {
+        &self.inner
+    }
+}
+
+impl<T: FenceOps> DerefMut for FenceObject<T> {
+    fn deref_mut(&mut self) -> &mut T {
+        &mut self.inner
+    }
+}
+
+impl<T: FenceOps> crate::private::Sealed for FenceObject<T> {}
+impl<T: FenceOps> RawDmaFence for FenceObject<T> {
+    fn raw(&self) -> *mut bindings::dma_fence {
+        &self.fence as *const _ as *mut _
+    }
+}
+
+/// A unique reference to a driver-specific fence object
+pub struct UniqueFence<T: FenceOps>(*mut FenceObject<T>);
+
+impl<T: FenceOps> Deref for UniqueFence<T> {
+    type Target = FenceObject<T>;
+
+    fn deref(&self) -> &FenceObject<T> {
+        // SAFETY: The pointer is always valid for UniqueFence objects
+        unsafe { &*self.0 }
+    }
+}
+
+impl<T: FenceOps> DerefMut for UniqueFence<T> {
+    fn deref_mut(&mut self) -> &mut FenceObject<T> {
+        // SAFETY: The pointer is always valid for UniqueFence objects
+        unsafe { &mut *self.0 }
+    }
+}
+
+impl<T: FenceOps> crate::private::Sealed for UniqueFence<T> {}
+impl<T: FenceOps> RawDmaFence for UniqueFence<T> {
+    fn raw(&self) -> *mut bindings::dma_fence {
+        // SAFETY: The pointer is always valid for UniqueFence objects
+        unsafe { addr_of_mut!((*self.0).fence) }
+    }
+}
+
+impl<T: FenceOps> From<UniqueFence<T>> for UserFence<T> {
+    fn from(value: UniqueFence<T>) -> Self {
+        let ptr = value.0;
+        core::mem::forget(value);
+
+        UserFence(ptr)
+    }
+}
+
+impl<T: FenceOps> Drop for UniqueFence<T> {
+    fn drop(&mut self) {
+        // SAFETY: We own a reference to this fence.
+        unsafe { bindings::dma_fence_put(self.raw()) };
+    }
+}
+
+// SAFETY: The API for these objects is thread safe
+unsafe impl<T: FenceOps> Sync for UniqueFence<T> {}
+// SAFETY: The API for these objects is thread safe
+unsafe impl<T: FenceOps> Send for UniqueFence<T> {}
+
+/// A shared reference to a driver-specific fence object
+pub struct UserFence<T: FenceOps>(*mut FenceObject<T>);
+
+impl<T: FenceOps> Deref for UserFence<T> {
+    type Target = FenceObject<T>;
+
+    fn deref(&self) -> &FenceObject<T> {
+        // SAFETY: The pointer is always valid for UserFence objects
+        unsafe { &*self.0 }
+    }
+}
+
+impl<T: FenceOps> Clone for UserFence<T> {
+    fn clone(&self) -> Self {
+        // SAFETY: `ptr` is valid per the type invariant and we own a reference to it.
+        unsafe {
+            bindings::dma_fence_get(self.raw());
+            Self(self.0)
+        }
+    }
+}
+
+impl<T: FenceOps> crate::private::Sealed for UserFence<T> {}
+impl<T: FenceOps> RawDmaFence for UserFence<T> {
+    fn raw(&self) -> *mut bindings::dma_fence {
+        // SAFETY: The pointer is always valid for UserFence objects
+        unsafe { addr_of_mut!((*self.0).fence) }
+    }
+}
+
+impl<T: FenceOps> Drop for UserFence<T> {
+    fn drop(&mut self) {
+        // SAFETY: We own a reference to this fence.
+        unsafe { bindings::dma_fence_put(self.raw()) };
+    }
+}
+
+// SAFETY: The API for these objects is thread safe
+unsafe impl<T: FenceOps> Sync for UserFence<T> {}
+// SAFETY: The API for these objects is thread safe
+unsafe impl<T: FenceOps> Send for UserFence<T> {}
+
+/// An array of fence contexts, out of which fences can be created.
+pub struct FenceContexts {
+    start: u64,
+    count: u32,
+    seqnos: KVec<AtomicU64>,
+    lock_name: &'static CStr,
+    lock_key: LockClassKey,
+}
+
+impl FenceContexts {
+    /// Create a new set of fence contexts.
+    pub fn new(count: u32, name: &'static CStr, key: LockClassKey) -> Result<FenceContexts> {
+        let mut seqnos: KVec<AtomicU64> = KVec::new();
+
+        seqnos.reserve(count as usize, GFP_KERNEL)?;
+
+        for _ in 0..count {
+            seqnos.push(Default::default(), GFP_KERNEL)?;
+        }
+
+        // SAFETY: This is always safe to call
+        let start = unsafe { bindings::dma_fence_context_alloc(count as crate::ffi::c_uint) };
+
+        Ok(FenceContexts {
+            start,
+            count,
+            seqnos,
+            lock_name: name,
+            lock_key: key,
+        })
+    }
+
+    /// Create a new fence in a given context index.
+    pub fn new_fence<T: FenceOps>(&self, context: u32, inner: T) -> Result<UniqueFence<T>> {
+        if context > self.count {
+            return Err(EINVAL);
+        }
+
+        // SAFETY: krealloc is always safe to call like this
+        let p = unsafe {
+            bindings::krealloc(
+                core::ptr::null_mut(),
+                FenceObject::<T>::SIZE,
+                bindings::GFP_KERNEL | bindings::__GFP_ZERO,
+            ) as *mut FenceObject<T>
+        };
+
+        if p.is_null() {
+            return Err(ENOMEM);
+        }
+
+        let seqno = self.seqnos[context as usize].fetch_add(1, Ordering::Relaxed);
+
+        // SAFETY: The pointer is valid, so pointers to members are too.
+        // After this, all fields are initialized.
+        unsafe {
+            addr_of_mut!((*p).inner).write(inner);
+            bindings::__spin_lock_init(
+                addr_of_mut!((*p).lock) as *mut _,
+                self.lock_name.as_char_ptr(),
+                self.lock_key.as_ptr(),
+            );
+            bindings::dma_fence_init(
+                addr_of_mut!((*p).fence),
+                &FenceObject::<T>::VTABLE,
+                addr_of_mut!((*p).lock) as *mut _,
+                self.start + context as u64,
+                seqno,
+            );
+        };
+
+        Ok(UniqueFence(p))
+    }
+}
+
+/// A DMA Fence Chain Object
+///
+/// # Invariants
+/// ptr is a valid pointer to a dma_fence_chain which we own.
+pub struct FenceChain {
+    ptr: *mut bindings::dma_fence_chain,
+}
+
+impl FenceChain {
+    /// Create a new DmaFenceChain object.
+    pub fn new() -> Result<Self> {
+        // SAFETY: This function is safe to call and takes no arguments.
+        let ptr = unsafe { bindings::dma_fence_chain_alloc() };
+
+        if ptr.is_null() {
+            Err(ENOMEM)
+        } else {
+            Ok(FenceChain { ptr })
+        }
+    }
+
+    /// Convert the DmaFenceChain into the underlying raw pointer.
+    ///
+    /// This assumes the caller will take ownership of the object.
+    pub(crate) fn into_raw(self) -> *mut bindings::dma_fence_chain {
+        let ptr = self.ptr;
+        core::mem::forget(self);
+        ptr
+    }
+}
+
+impl Drop for FenceChain {
+    fn drop(&mut self) {
+        // SAFETY: We own this dma_fence_chain.
+        unsafe { bindings::dma_fence_chain_free(self.ptr) };
+    }
+}
diff --git a/rust/kernel/drm/device.rs b/rust/kernel/drm/device.rs
new file mode 100644
index 00000000000000..92e1112cfb01ec
--- /dev/null
+++ b/rust/kernel/drm/device.rs
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+//! DRM device.
+//!
+//! C header: [`include/linux/drm/drm_device.h`](srctree/include/linux/drm/drm_device.h)
+
+use crate::{
+    bindings, device, drm,
+    drm::drv::AllocImpl,
+    error::code::*,
+    error::from_err_ptr,
+    error::Result,
+    ffi,
+    types::{ARef, AlwaysRefCounted, ForeignOwnable, Opaque},
+};
+use core::{marker::PhantomData, ops::Deref, ptr::NonNull};
+
+#[cfg(CONFIG_DRM_LEGACY)]
+macro_rules! drm_legacy_fields {
+    ( $($field:ident: $val:expr),* $(,)? ) => {
+        bindings::drm_driver {
+            $( $field: $val ),*,
+            firstopen: None,
+            preclose: None,
+            dma_ioctl: None,
+            dma_quiescent: None,
+            context_dtor: None,
+            irq_handler: None,
+            irq_preinstall: None,
+            irq_postinstall: None,
+            irq_uninstall: None,
+            get_vblank_counter: None,
+            enable_vblank: None,
+            disable_vblank: None,
+            dev_priv_size: 0,
+        }
+    }
+}
+
+#[cfg(not(CONFIG_DRM_LEGACY))]
+macro_rules! drm_legacy_fields {
+    ( $($field:ident: $val:expr),* $(,)? ) => {
+        bindings::drm_driver {
+            $( $field: $val ),*
+        }
+    }
+}
+
+/// A typed DRM device with a specific `drm::drv::Driver` implementation. The device is always
+/// reference-counted.
+///
+/// # Invariants
+///
+/// `drm_dev_release()` can be called from any non-atomic context.
+#[repr(transparent)]
+pub struct Device<T: drm::drv::Driver>(Opaque<bindings::drm_device>, PhantomData<T>);
+
+impl<T: drm::drv::Driver> Device<T> {
+    const VTABLE: bindings::drm_driver = drm_legacy_fields! {
+        load: None,
+        open: Some(drm::file::open_callback::<T::File>),
+        postclose: Some(drm::file::postclose_callback::<T::File>),
+        unload: None,
+        release: Some(Self::release),
+        master_set: None,
+        master_drop: None,
+        debugfs_init: None,
+        gem_create_object: T::Object::ALLOC_OPS.gem_create_object,
+        prime_handle_to_fd: T::Object::ALLOC_OPS.prime_handle_to_fd,
+        prime_fd_to_handle: T::Object::ALLOC_OPS.prime_fd_to_handle,
+        gem_prime_import: T::Object::ALLOC_OPS.gem_prime_import,
+        gem_prime_import_sg_table: T::Object::ALLOC_OPS.gem_prime_import_sg_table,
+        dumb_create: T::Object::ALLOC_OPS.dumb_create,
+        dumb_map_offset: T::Object::ALLOC_OPS.dumb_map_offset,
+        show_fdinfo: None,
+        fbdev_probe: None,
+
+        major: T::INFO.major,
+        minor: T::INFO.minor,
+        patchlevel: T::INFO.patchlevel,
+        name: T::INFO.name.as_char_ptr() as *mut _,
+        desc: T::INFO.desc.as_char_ptr() as *mut _,
+
+        driver_features: T::FEATURES,
+        ioctls: T::IOCTLS.as_ptr(),
+        num_ioctls: T::IOCTLS.len() as i32,
+        fops: &Self::GEM_FOPS as _,
+    };
+
+    const GEM_FOPS: bindings::file_operations = drm::gem::create_fops();
+
+    /// Create a new `drm::device::Device` for a `drm::drv::Driver`.
+    pub fn new(dev: &device::Device) -> Result<ARef<Self>> {
+        // SAFETY: `dev` is valid by its type invarants; `VTABLE`, as a `const` is pinned to the
+        // read-only section of the compilation.
+        let raw_drm = unsafe { bindings::drm_dev_alloc(&Self::VTABLE, dev.as_raw()) };
+        let raw_drm = NonNull::new(from_err_ptr(raw_drm)? as *mut _).ok_or(ENOMEM)?;
+
+        // SAFETY: The reference count is one, and now we take ownership of that reference as a
+        // drm::device::Device.
+        Ok(unsafe { ARef::<Self>::from_raw(raw_drm) })
+    }
+
+    pub(crate) fn as_raw(&self) -> *mut bindings::drm_device {
+        self.0.get()
+    }
+
+    /// # Safety
+    ///
+    /// Callers must ensure that `ptr` is valid, non-null, and has a non-zero reference count,
+    /// i.e. it must be ensured that the reference count of the C `struct drm_device` `ptr` points
+    /// to can't drop to zero, for the duration of this function call and the entire duration when
+    /// the returned reference exists.
+    pub(crate) unsafe fn borrow<'a>(ptr: *const bindings::drm_device) -> &'a Self {
+        // SAFETY: Safe by the safety requirements of this function.
+        unsafe { &*ptr.cast() }
+    }
+
+    fn raw_data(&self) -> *mut ffi::c_void {
+        // SAFETY: `self` is guaranteed to hold a valid `bindings::drm_device` pointer.
+        unsafe { *self.as_raw() }.dev_private
+    }
+
+    /// # Safety
+    ///
+    /// Must be called only once after device creation.
+    pub(crate) unsafe fn set_raw_data(&self, ptr: *const ffi::c_void) {
+        // SAFETY: Safe as by the safety precondition.
+        unsafe { &mut *self.as_raw() }.dev_private = ptr as _;
+    }
+
+    #[allow(clippy::missing_safety_doc)]
+    unsafe extern "C" fn release(drm: *mut bindings::drm_device) {
+        // SAFETY: Guaranteed to be a valid pointer to a `struct drm_device`.
+        let drm = unsafe { Self::borrow(drm) };
+
+        if !drm.raw_data().is_null() {
+            // SAFETY: `Self::data` is either `NULL` or a valid `ForeignOwnable`.
+            unsafe { <T::Data as ForeignOwnable>::from_foreign(drm.raw_data()) };
+        }
+    }
+}
+
+/// Same as [`Device`], but with an accessor of the device' driver private data.
+#[repr(transparent)]
+pub struct RegisteredDevice<T: drm::drv::Driver>(Device<T>);
+
+impl<T: drm::drv::Driver> RegisteredDevice<T> {
+    /// Not intended to be called externally, except via declare_drm_ioctls!()
+    ///
+    /// # Safety
+    ///
+    /// Callers must ensure that `ptr` is valid, non-null, and has a non-zero reference count,
+    /// i.e. it must be ensured that the reference count of the C `struct drm_device` `ptr` points
+    /// to can't drop to zero, for the duration of this function call and the entire duration when
+    /// the returned reference exists.
+    ///
+    /// Additionally, callers must ensure that the corresponding `struct drm_device` is registered.
+    #[doc(hidden)]
+    pub unsafe fn borrow<'a>(ptr: *const bindings::drm_device) -> &'a Self {
+        // SAFETY: By the safety requirements of this function `ptr` is valid.
+        unsafe { &*ptr.cast() }
+    }
+
+    /// Returns a borrowed reference to the user data associated with this Device.
+    pub fn data(&self) -> <T::Data as ForeignOwnable>::Borrowed<'_> {
+        // SAFETY: `dev_private` is always set once the device is registered.
+        unsafe { T::Data::borrow(self.raw_data()) }
+    }
+}
+
+impl<T: drm::drv::Driver> Deref for RegisteredDevice<T> {
+    type Target = Device<T>;
+
+    fn deref(&self) -> &Self::Target {
+        &self.0
+    }
+}
+
+// SAFETY: DRM device objects are always reference counted and the get/put functions
+// satisfy the requirements.
+unsafe impl<T: drm::drv::Driver> AlwaysRefCounted for Device<T> {
+    fn inc_ref(&self) {
+        // SAFETY: The existence of a shared reference guarantees that the refcount is non-zero.
+        unsafe { bindings::drm_dev_get(self.as_raw()) };
+    }
+
+    unsafe fn dec_ref(obj: NonNull<Self>) {
+        // SAFETY: The safety requirements guarantee that the refcount is non-zero.
+        unsafe { bindings::drm_dev_put(obj.cast().as_ptr()) };
+    }
+}
+
+impl<T: drm::drv::Driver> AsRef<device::Device> for Device<T> {
+    fn as_ref(&self) -> &device::Device {
+        // SAFETY: `bindings::drm_device::dev` is valid as long as the DRM device itself is valid,
+        // which is guaranteed by the type invariant.
+        unsafe { device::Device::as_ref((*self.as_raw()).dev) }
+    }
+}
+
+// SAFETY: As by the type invariant `Device` can be sent to any thread.
+unsafe impl<T: drm::drv::Driver> Send for Device<T> {}
+
+// SAFETY: `Device` can be shared among threads because all immutable methods are protected by the
+// synchronization in `struct drm_device`.
+unsafe impl<T: drm::drv::Driver> Sync for Device<T> {}
diff --git a/rust/kernel/drm/drv.rs b/rust/kernel/drm/drv.rs
new file mode 100644
index 00000000000000..99b6b9f67a3cc6
--- /dev/null
+++ b/rust/kernel/drm/drv.rs
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+//! DRM driver core.
+//!
+//! C header: [`include/linux/drm/drm_drv.h`](srctree/include/linux/drm/drm_drv.h)
+
+use crate::{
+    bindings, drm,
+    error::{Error, Result},
+    private::Sealed,
+    str::CStr,
+    types::{ARef, ForeignOwnable},
+};
+use macros::vtable;
+
+/// Driver use the GEM memory manager. This should be set for all modern drivers.
+pub const FEAT_GEM: u32 = bindings::drm_driver_feature_DRIVER_GEM;
+/// Driver supports mode setting interfaces (KMS).
+pub const FEAT_MODESET: u32 = bindings::drm_driver_feature_DRIVER_MODESET;
+/// Driver supports dedicated render nodes.
+pub const FEAT_RENDER: u32 = bindings::drm_driver_feature_DRIVER_RENDER;
+/// Driver supports the full atomic modesetting userspace API.
+///
+/// Drivers which only use atomic internally, but do not support the full userspace API (e.g. not
+/// all properties converted to atomic, or multi-plane updates are not guaranteed to be tear-free)
+/// should not set this flag.
+pub const FEAT_ATOMIC: u32 = bindings::drm_driver_feature_DRIVER_ATOMIC;
+/// Driver supports DRM sync objects for explicit synchronization of command submission.
+pub const FEAT_SYNCOBJ: u32 = bindings::drm_driver_feature_DRIVER_SYNCOBJ;
+/// Driver supports the timeline flavor of DRM sync objects for explicit synchronization of command
+/// submission.
+pub const FEAT_SYNCOBJ_TIMELINE: u32 = bindings::drm_driver_feature_DRIVER_SYNCOBJ_TIMELINE;
+/// Driver supports compute acceleration devices. This flag is mutually exclusive with `FEAT_RENDER`
+/// and `FEAT_MODESET`. Devices that support both graphics and compute acceleration should be
+/// handled by two drivers that are connected using auxiliary bus.
+pub const FEAT_COMPUTE_ACCEL: u32 = bindings::drm_driver_feature_DRIVER_COMPUTE_ACCEL;
+/// Driver supports user defined GPU VA bindings for GEM objects.
+pub const FEAT_GEM_GPUVA: u32 = bindings::drm_driver_feature_DRIVER_GEM_GPUVA;
+/// Driver supports and requires cursor hotspot information in the cursor plane (e.g. cursor plane
+/// has to actually track the mouse cursor and the clients are required to set hotspot in order for
+/// the cursor planes to work correctly).
+pub const FEAT_CURSOR_HOTSPOT: u32 = bindings::drm_driver_feature_DRIVER_CURSOR_HOTSPOT;
+
+/// Information data for a DRM Driver.
+pub struct DriverInfo {
+    /// Driver major version.
+    pub major: i32,
+    /// Driver minor version.
+    pub minor: i32,
+    /// Driver patchlevel version.
+    pub patchlevel: i32,
+    /// Driver name.
+    pub name: &'static CStr,
+    /// Driver description.
+    pub desc: &'static CStr,
+    /// Driver date.
+    pub date: &'static CStr,
+}
+
+/// Internal memory management operation set, normally created by memory managers (e.g. GEM).
+///
+/// See `kernel::drm::gem` and `kernel::drm::gem::shmem`.
+pub struct AllocOps {
+    pub(crate) gem_create_object: Option<
+        unsafe extern "C" fn(
+            dev: *mut bindings::drm_device,
+            size: usize,
+        ) -> *mut bindings::drm_gem_object,
+    >,
+    pub(crate) prime_handle_to_fd: Option<
+        unsafe extern "C" fn(
+            dev: *mut bindings::drm_device,
+            file_priv: *mut bindings::drm_file,
+            handle: u32,
+            flags: u32,
+            prime_fd: *mut core::ffi::c_int,
+        ) -> core::ffi::c_int,
+    >,
+    pub(crate) prime_fd_to_handle: Option<
+        unsafe extern "C" fn(
+            dev: *mut bindings::drm_device,
+            file_priv: *mut bindings::drm_file,
+            prime_fd: core::ffi::c_int,
+            handle: *mut u32,
+        ) -> core::ffi::c_int,
+    >,
+    pub(crate) gem_prime_import: Option<
+        unsafe extern "C" fn(
+            dev: *mut bindings::drm_device,
+            dma_buf: *mut bindings::dma_buf,
+        ) -> *mut bindings::drm_gem_object,
+    >,
+    pub(crate) gem_prime_import_sg_table: Option<
+        unsafe extern "C" fn(
+            dev: *mut bindings::drm_device,
+            attach: *mut bindings::dma_buf_attachment,
+            sgt: *mut bindings::sg_table,
+        ) -> *mut bindings::drm_gem_object,
+    >,
+    pub(crate) dumb_create: Option<
+        unsafe extern "C" fn(
+            file_priv: *mut bindings::drm_file,
+            dev: *mut bindings::drm_device,
+            args: *mut bindings::drm_mode_create_dumb,
+        ) -> core::ffi::c_int,
+    >,
+    pub(crate) dumb_map_offset: Option<
+        unsafe extern "C" fn(
+            file_priv: *mut bindings::drm_file,
+            dev: *mut bindings::drm_device,
+            handle: u32,
+            offset: *mut u64,
+        ) -> core::ffi::c_int,
+    >,
+}
+
+/// Trait for memory manager implementations. Implemented internally.
+pub trait AllocImpl: Sealed + drm::gem::IntoGEMObject {
+    /// The C callback operations for this memory manager.
+    const ALLOC_OPS: AllocOps;
+}
+
+/// The DRM `Driver` trait.
+///
+/// This trait must be implemented by drivers in order to create a `struct drm_device` and `struct
+/// drm_driver` to be registered in the DRM subsystem.
+#[vtable]
+pub trait Driver {
+    /// Context data associated with the DRM driver
+    ///
+    /// Determines the type of the context data passed to each of the methods of the trait.
+    type Data: ForeignOwnable + Sync + Send;
+
+    /// The type used to manage memory for this driver.
+    ///
+    /// Should be either `drm::gem::Object<T>` or `drm::gem::shmem::Object<T>`.
+    type Object: AllocImpl;
+
+    /// The type used to represent a DRM File (client)
+    type File: drm::file::DriverFile;
+
+    /// Driver metadata
+    const INFO: DriverInfo;
+
+    /// Feature flags
+    const FEATURES: u32;
+
+    /// IOCTL list. See `kernel::drm::ioctl::declare_drm_ioctls!{}`.
+    const IOCTLS: &'static [drm::ioctl::DrmIoctlDescriptor];
+}
+
+/// The registration type of a `drm::device::Device`.
+///
+/// Once the `Registration` structure is dropped, the device is unregistered.
+pub struct Registration<T: Driver>(ARef<drm::device::Device<T>>);
+
+impl<T: Driver> Registration<T> {
+    /// Creates a new [`Registration`] and registers it.
+    pub fn new(drm: ARef<drm::device::Device<T>>, data: T::Data, flags: usize) -> Result<Self> {
+        let data_ptr = <T::Data as ForeignOwnable>::into_foreign(data);
+
+        // SAFETY: We set `dev_private` exactly once, before we call `drm_dev_register`, hence any
+        // subsequent access after registration is valid.
+        unsafe { drm.set_raw_data(data_ptr) };
+
+        // SAFETY: Safe by the invariants of `drm::device::Device`.
+        let ret = unsafe { bindings::drm_dev_register(drm.as_raw(), flags) };
+        if ret < 0 {
+            return Err(Error::from_errno(ret));
+        }
+
+        Ok(Self(drm))
+    }
+
+    /// Returns a reference to the `Device` instance for this registration.
+    pub fn device(&self) -> &drm::device::Device<T> {
+        &self.0
+    }
+}
+
+// SAFETY: `Registration` doesn't offer any methods or access to fields when shared between
+// threads, hence it's safe to share it.
+unsafe impl<T: Driver> Sync for Registration<T> {}
+
+// SAFETY: Registration with and unregistration from the DRM subsystem can happen from any thread.
+unsafe impl<T: Driver> Send for Registration<T> {}
+
+impl<T: Driver> Drop for Registration<T> {
+    /// Removes the registration from the kernel if it has completed successfully before.
+    fn drop(&mut self) {
+        // SAFETY: Safe by the invariant of `ARef<drm::device::Device<T>>`. The existance of this
+        // `Registration` also guarantees the this `drm::device::Device` is actually registered.
+        unsafe { bindings::drm_dev_unregister(self.0.as_raw()) };
+    }
+}
diff --git a/rust/kernel/drm/file.rs b/rust/kernel/drm/file.rs
new file mode 100644
index 00000000000000..a4641f4ef74a85
--- /dev/null
+++ b/rust/kernel/drm/file.rs
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+//! DRM File objects.
+//!
+//! C header: [`include/linux/drm/drm_file.h`](srctree/include/linux/drm/drm_file.h)
+
+use crate::{bindings, drm, error::Result, prelude::*, types::ForeignOwnable};
+use core::marker::PhantomData;
+use core::pin::Pin;
+
+/// Declare all assiciated types of [`DriverFile`].
+///
+/// The argument to this macro must be the parent `Driver` implementation for this [`DriverFile`].
+#[macro_export]
+macro_rules! define_driver_file_types {
+    ($driver:ty) => {
+        type Driver = $driver;
+        // TODO: Make the two below default types of `DriverFile` once `associated_type_defaults`
+        // are stable.
+        type Data = <Self::Driver as drm::drv::Driver>::Data;
+        type BorrowedData<'a> = <Self::Data as ForeignOwnable>::Borrowed<'a>;
+    };
+}
+
+/// Trait that must be implemented by DRM drivers to represent a DRM File (a client instance).
+pub trait DriverFile {
+    /// The parent `Driver` implementation for this `DriverFile`.
+    type Driver: drm::drv::Driver;
+
+    /// The driver's private data.
+    type Data: ForeignOwnable;
+
+    /// The driver's private data as `ForeignOwnable::Borrowed`.
+    type BorrowedData<'a>;
+
+    /// Open a new file (called when a client opens the DRM device).
+    fn open(
+        device: &drm::device::Device<Self::Driver>,
+        data: <<Self::Driver as drm::drv::Driver>::Data as ForeignOwnable>::Borrowed<'_>,
+    ) -> Result<Pin<KBox<Self>>>;
+}
+
+/// An open DRM File.
+///
+/// # Invariants
+/// `raw` is a valid pointer to an open `drm_file` struct.
+#[repr(transparent)]
+pub struct File<T: DriverFile> {
+    raw: *mut bindings::drm_file,
+    _p: PhantomData<T>,
+}
+
+#[allow(clippy::missing_safety_doc)]
+/// The open callback of a `struct drm_file`.
+pub(super) unsafe extern "C" fn open_callback<T: DriverFile>(
+    raw_dev: *mut bindings::drm_device,
+    raw_file: *mut bindings::drm_file,
+) -> core::ffi::c_int {
+    // SAFETY: A callback from `struct drm_driver::open` guarantees that
+    // - `raw_dev` is valid pointer to a `sturct drm_device`,
+    // - the corresponding `sturct drm_device` has been registered.
+    let drm = unsafe { drm::device::RegisteredDevice::borrow(raw_dev) };
+    // SAFETY: This reference won't escape this function
+    let file = unsafe { &mut *raw_file };
+
+    let inner = match T::open(drm, drm.data()) {
+        Err(e) => {
+            return e.to_errno();
+        }
+        Ok(i) => i,
+    };
+
+    // SAFETY: This pointer is treated as pinned, and the Drop guarantee is upheld below.
+    file.driver_priv = KBox::into_raw(unsafe { Pin::into_inner_unchecked(inner) }) as *mut _;
+
+    0
+}
+
+#[allow(clippy::missing_safety_doc)]
+/// The postclose callback of a `struct drm_file`.
+pub(super) unsafe extern "C" fn postclose_callback<T: DriverFile>(
+    _raw_dev: *mut bindings::drm_device,
+    raw_file: *mut bindings::drm_file,
+) {
+    // SAFETY: This reference won't escape this function
+    let file = unsafe { &*raw_file };
+
+    // SAFETY: `file.driver_priv` has been created in `open_callback` through `KBox::into_raw`.
+    unsafe {
+        let _ = KBox::from_raw(file.driver_priv as *mut T);
+    };
+}
+
+impl<T: DriverFile> File<T> {
+    #[doc(hidden)]
+    /// Not intended to be called externally, except via declare_drm_ioctls!()
+    ///
+    /// # Safety
+    ///
+    /// `raw_file` must be a valid pointer to an open `struct drm_file`, opened through `T::open`.
+    pub unsafe fn from_raw(raw_file: *mut bindings::drm_file) -> File<T> {
+        File {
+            raw: raw_file,
+            _p: PhantomData,
+        }
+    }
+
+    #[allow(dead_code)]
+    /// Return the raw pointer to the underlying `drm_file`.
+    pub(super) fn raw(&self) -> *const bindings::drm_file {
+        self.raw
+    }
+
+    /// Return an immutable reference to the raw `drm_file` structure.
+    pub(super) fn file(&self) -> &bindings::drm_file {
+        // SAFETY: `self.raw` is a valid pointer to a `struct drm_file`.
+        unsafe { &*self.raw }
+    }
+
+    /// Return a pinned reference to the driver file structure.
+    pub fn inner(&self) -> Pin<&T> {
+        // SAFETY: By the type invariant the pointer `self.raw` points to a valid and opened
+        // `struct drm_file`, hence `self.raw.driver_priv` has been properly initialized by
+        // `open_callback`.
+        unsafe { Pin::new_unchecked(&*(self.file().driver_priv as *const T)) }
+    }
+}
+
+impl<T: DriverFile> crate::private::Sealed for File<T> {}
+
+/// Generic trait to allow users that don't care about driver specifics to accept any `File<T>`.
+///
+/// # Safety
+///
+/// Must only be implemented for `File<T>` and return the pointer, following the normal invariants
+/// of that type.
+pub unsafe trait GenericFile: crate::private::Sealed {
+    /// Returns the raw const pointer to the `struct drm_file`
+    fn raw(&self) -> *const bindings::drm_file;
+    /// Returns the raw mut pointer to the `struct drm_file`
+    fn raw_mut(&mut self) -> *mut bindings::drm_file;
+}
+
+// SAFETY: Implementation for `File<T>`, holding up its type invariants.
+unsafe impl<T: DriverFile> GenericFile for File<T> {
+    fn raw(&self) -> *const bindings::drm_file {
+        self.raw
+    }
+    fn raw_mut(&mut self) -> *mut bindings::drm_file {
+        self.raw
+    }
+}
diff --git a/rust/kernel/drm/gem/mod.rs b/rust/kernel/drm/gem/mod.rs
new file mode 100644
index 00000000000000..3a7e9f80b414bf
--- /dev/null
+++ b/rust/kernel/drm/gem/mod.rs
@@ -0,0 +1,431 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+//! DRM GEM API
+//!
+//! C header: [`include/linux/drm/drm_gem.h`](srctree/include/linux/drm/drm_gem.h)
+#[cfg(CONFIG_DRM_GEM_SHMEM_HELPER = "y")]
+pub mod shmem;
+
+use crate::{
+    alloc::flags::*,
+    bindings,
+    drm::{device, drv, file},
+    error::{to_result, Result},
+    prelude::*,
+};
+use core::{marker::PhantomPinned, mem, ops::Deref, ops::DerefMut};
+
+/// GEM object functions, which must be implemented by drivers.
+pub trait BaseDriverObject<T: BaseObject>: Sync + Send + Sized {
+    /// Create a new driver data object for a GEM object of a given size.
+    fn new(dev: &device::Device<T::Driver>, size: usize) -> impl PinInit<Self, Error>;
+
+    /// Open a new handle to an existing object, associated with a File.
+    fn open(
+        _obj: &<<T as IntoGEMObject>::Driver as drv::Driver>::Object,
+        _file: &file::File<<<T as IntoGEMObject>::Driver as drv::Driver>::File>,
+    ) -> Result {
+        Ok(())
+    }
+
+    /// Close a handle to an existing object, associated with a File.
+    fn close(
+        _obj: &<<T as IntoGEMObject>::Driver as drv::Driver>::Object,
+        _file: &file::File<<<T as IntoGEMObject>::Driver as drv::Driver>::File>,
+    ) {
+    }
+}
+
+/// Trait that represents a GEM object subtype
+pub trait IntoGEMObject: Sized + crate::private::Sealed {
+    /// Owning driver for this type
+    type Driver: drv::Driver;
+
+    /// Returns a reference to the raw `drm_gem_object` structure, which must be valid as long as
+    /// this owning object is valid.
+    fn gem_obj(&self) -> &bindings::drm_gem_object;
+
+    /// Returns a reference to the raw `drm_gem_object` structure, which must be valid as long as
+    /// this owning object is valid.
+    fn mut_gem_obj(&mut self) -> &mut bindings::drm_gem_object;
+
+    /// Converts a pointer to a `drm_gem_object` into a pointer to this type.
+    fn from_gem_obj(obj: *mut bindings::drm_gem_object) -> *mut Self;
+}
+
+/// Trait which must be implemented by drivers using base GEM objects.
+pub trait DriverObject: BaseDriverObject<Object<Self>> {
+    /// Parent `Driver` for this object.
+    type Driver: drv::Driver;
+}
+
+#[allow(clippy::missing_safety_doc)]
+unsafe extern "C" fn free_callback<T: DriverObject>(obj: *mut bindings::drm_gem_object) {
+    // SAFETY: All of our objects are Object<T>.
+    let this = unsafe { crate::container_of!(obj, Object<T>, obj) } as *mut Object<T>;
+
+    // SAFETY: The pointer we got has to be valid
+    unsafe { bindings::drm_gem_object_release(obj) };
+
+    // SAFETY: All of our objects are allocated via KBox<>, and we're in the
+    // free callback which guarantees this object has zero remaining references,
+    // so we can drop it
+    unsafe {
+        let _ = KBox::from_raw(this);
+    };
+}
+
+#[allow(clippy::missing_safety_doc)]
+unsafe extern "C" fn open_callback<T: BaseDriverObject<U>, U: BaseObject>(
+    raw_obj: *mut bindings::drm_gem_object,
+    raw_file: *mut bindings::drm_file,
+) -> core::ffi::c_int {
+    // SAFETY: The pointer we got has to be valid.
+    let file = unsafe {
+        file::File::<<<U as IntoGEMObject>::Driver as drv::Driver>::File>::from_raw(raw_file)
+    };
+    let obj =
+        <<<U as IntoGEMObject>::Driver as drv::Driver>::Object as IntoGEMObject>::from_gem_obj(
+            raw_obj,
+        );
+
+    // SAFETY: from_gem_obj() returns a valid pointer as long as the type is
+    // correct and the raw_obj we got is valid.
+    match T::open(unsafe { &*obj }, &file) {
+        Err(e) => e.to_errno(),
+        Ok(()) => 0,
+    }
+}
+
+#[allow(clippy::missing_safety_doc)]
+unsafe extern "C" fn close_callback<T: BaseDriverObject<U>, U: BaseObject>(
+    raw_obj: *mut bindings::drm_gem_object,
+    raw_file: *mut bindings::drm_file,
+) {
+    // SAFETY: The pointer we got has to be valid.
+    let file = unsafe {
+        file::File::<<<U as IntoGEMObject>::Driver as drv::Driver>::File>::from_raw(raw_file)
+    };
+    let obj =
+        <<<U as IntoGEMObject>::Driver as drv::Driver>::Object as IntoGEMObject>::from_gem_obj(
+            raw_obj,
+        );
+
+    // SAFETY: from_gem_obj() returns a valid pointer as long as the type is
+    // correct and the raw_obj we got is valid.
+    T::close(unsafe { &*obj }, &file);
+}
+
+impl<T: DriverObject> IntoGEMObject for Object<T> {
+    type Driver = T::Driver;
+
+    fn gem_obj(&self) -> &bindings::drm_gem_object {
+        &self.obj
+    }
+
+    fn mut_gem_obj(&mut self) -> &mut bindings::drm_gem_object {
+        &mut self.obj
+    }
+
+    fn from_gem_obj(obj: *mut bindings::drm_gem_object) -> *mut Object<T> {
+        // SAFETY: All of our objects are Object<T>.
+        unsafe { crate::container_of!(obj, Object<T>, obj) as *mut Object<T> }
+    }
+}
+
+/// Base operations shared by all GEM object classes
+pub trait BaseObject: IntoGEMObject {
+    /// Returns the size of the object in bytes.
+    fn size(&self) -> usize {
+        self.gem_obj().size
+    }
+
+    /// Sets the exportable flag, which controls whether the object can be exported via PRIME.
+    fn set_exportable(&mut self, exportable: bool) {
+        self.mut_gem_obj().exportable = exportable;
+    }
+
+    /// Creates a new reference to the object.
+    fn reference(&self) -> ObjectRef<Self> {
+        // SAFETY: Having a reference to an Object implies holding a GEM reference
+        unsafe {
+            bindings::drm_gem_object_get(self.gem_obj() as *const _ as *mut _);
+        }
+        ObjectRef {
+            ptr: self as *const _,
+        }
+    }
+
+    /// Creates a new handle for the object associated with a given `File`
+    /// (or returns an existing one).
+    fn create_handle(
+        &self,
+        file: &file::File<<<Self as IntoGEMObject>::Driver as drv::Driver>::File>,
+    ) -> Result<u32> {
+        let mut handle: u32 = 0;
+        // SAFETY: The arguments are all valid per the type invariants.
+        to_result(unsafe {
+            bindings::drm_gem_handle_create(
+                file.raw() as *mut _,
+                self.gem_obj() as *const _ as *mut _,
+                &mut handle,
+            )
+        })?;
+        Ok(handle)
+    }
+
+    /// Looks up an object by its handle for a given `File`.
+    fn lookup_handle(
+        file: &file::File<<<Self as IntoGEMObject>::Driver as drv::Driver>::File>,
+        handle: u32,
+    ) -> Result<ObjectRef<Self>> {
+        // SAFETY: The arguments are all valid per the type invariants.
+        let ptr = unsafe { bindings::drm_gem_object_lookup(file.raw() as *mut _, handle) };
+
+        if ptr.is_null() {
+            Err(ENOENT)
+        } else {
+            Ok(ObjectRef {
+                ptr: ptr as *const _,
+            })
+        }
+    }
+
+    /// Creates an mmap offset to map the object from userspace.
+    fn create_mmap_offset(&self) -> Result<u64> {
+        // SAFETY: The arguments are valid per the type invariant.
+        to_result(unsafe {
+            bindings::drm_gem_create_mmap_offset(self.gem_obj() as *const _ as *mut _)
+        })?;
+
+        // SAFETY: The arguments are valid per the type invariant.
+        Ok(unsafe {
+            bindings::drm_vma_node_offset_addr(&self.gem_obj().vma_node as *const _ as *mut _)
+        })
+    }
+}
+
+impl<T: IntoGEMObject> BaseObject for T {}
+
+/// A base GEM object.
+#[repr(C)]
+#[pin_data]
+pub struct Object<T: DriverObject> {
+    obj: bindings::drm_gem_object,
+    // The DRM core ensures the Device exists as long as its objects exist, so we don't need to
+    // manage the reference count here.
+    dev: *const bindings::drm_device,
+    #[pin]
+    inner: T,
+    #[pin]
+    _p: PhantomPinned,
+}
+
+// SAFETY: This struct is safe to zero-initialize
+unsafe impl init::Zeroable for bindings::drm_gem_object {}
+
+impl<T: DriverObject> Object<T> {
+    /// The size of this object's structure.
+    pub const SIZE: usize = mem::size_of::<Self>();
+
+    const OBJECT_FUNCS: bindings::drm_gem_object_funcs = bindings::drm_gem_object_funcs {
+        free: Some(free_callback::<T>),
+        open: Some(open_callback::<T, Object<T>>),
+        close: Some(close_callback::<T, Object<T>>),
+        print_info: None,
+        export: None,
+        pin: None,
+        unpin: None,
+        get_sg_table: None,
+        vmap: None,
+        vunmap: None,
+        mmap: None,
+        status: None,
+        vm_ops: core::ptr::null_mut(),
+        evict: None,
+        rss: None,
+    };
+
+    /// Create a new GEM object.
+    pub fn new(dev: &device::Device<T::Driver>, size: usize) -> Result<Pin<UniqueObjectRef<Self>>> {
+        let obj: Pin<KBox<Self>> = KBox::pin_init(
+            try_pin_init!(Self {
+                // SAFETY: This struct is expected to be zero-initialized
+                obj: bindings::drm_gem_object {
+                    funcs: &Self::OBJECT_FUNCS,
+                    ..Default::default()
+                },
+                inner <- T::new(dev, size),
+                // SAFETY: The drm subsystem guarantees that the drm_device will live as long as
+                // the GEM object lives, so we can conjure a reference out of thin air.
+                dev: dev.as_raw(),
+                _p: PhantomPinned
+            }),
+            GFP_KERNEL,
+        )?;
+
+        // SAFETY: The arguments are all valid per the type invariants.
+        to_result(unsafe {
+            bindings::drm_gem_object_init(dev.as_raw(), &obj.obj as *const _ as *mut _, size)
+        })?;
+
+        // SAFETY: We never move out of self
+        let obj_ref = unsafe {
+            Pin::new_unchecked(UniqueObjectRef {
+                // SAFETY: We never move out of the Box
+                ptr: KBox::leak(Pin::into_inner_unchecked(obj)),
+                _p: PhantomPinned,
+            })
+        };
+
+        Ok(obj_ref)
+    }
+
+    /// Returns the `Device` that owns this GEM object.
+    pub fn dev(&self) -> &device::Device<T::Driver> {
+        // SAFETY: The drm subsystem guarantees that the drm_device will live as long as
+        // the GEM object lives, so we can just borrow from the raw pointer.
+        unsafe { device::Device::borrow(self.dev) }
+    }
+}
+
+impl<T: DriverObject> crate::private::Sealed for Object<T> {}
+
+impl<T: DriverObject> Deref for Object<T> {
+    type Target = T;
+
+    fn deref(&self) -> &Self::Target {
+        &self.inner
+    }
+}
+
+impl<T: DriverObject> DerefMut for Object<T> {
+    fn deref_mut(&mut self) -> &mut Self::Target {
+        &mut self.inner
+    }
+}
+
+impl<T: DriverObject> drv::AllocImpl for Object<T> {
+    const ALLOC_OPS: drv::AllocOps = drv::AllocOps {
+        gem_create_object: None,
+        prime_handle_to_fd: None,
+        prime_fd_to_handle: None,
+        gem_prime_import: None,
+        gem_prime_import_sg_table: None,
+        dumb_create: None,
+        dumb_map_offset: None,
+    };
+}
+
+/// A reference-counted shared reference to a base GEM object.
+pub struct ObjectRef<T: IntoGEMObject> {
+    // Invariant: the pointer is valid and initialized, and this ObjectRef owns a reference to it.
+    ptr: *const T,
+}
+
+impl<T: IntoGEMObject> ObjectRef<T> {
+    /// Downgrade this reference to a shared reference.
+    pub fn from_pinned_unique(pin: Pin<UniqueObjectRef<T>>) -> Self {
+        // SAFETY: A (shared) `ObjectRef` doesn't need to be pinned, since it doesn't allow us to
+        // optain a mutable reference.
+        let uq = unsafe { Pin::into_inner_unchecked(pin) };
+
+        uq.into_ref()
+    }
+}
+
+/// SAFETY: GEM object references are safe to send between threads.
+unsafe impl<T: IntoGEMObject> Send for ObjectRef<T> {}
+/// SAFETY: GEM object references are safe to share between threads.
+unsafe impl<T: IntoGEMObject> Sync for ObjectRef<T> {}
+
+impl<T: IntoGEMObject> Clone for ObjectRef<T> {
+    fn clone(&self) -> Self {
+        self.reference()
+    }
+}
+
+impl<T: IntoGEMObject> Drop for ObjectRef<T> {
+    fn drop(&mut self) {
+        // SAFETY: Having an ObjectRef implies holding a GEM reference.
+        // The free callback will take care of deallocation.
+        unsafe {
+            bindings::drm_gem_object_put((*self.ptr).gem_obj() as *const _ as *mut _);
+        }
+    }
+}
+
+impl<T: IntoGEMObject> Deref for ObjectRef<T> {
+    type Target = T;
+
+    fn deref(&self) -> &Self::Target {
+        // SAFETY: The pointer is valid per the invariant
+        unsafe { &*self.ptr }
+    }
+}
+
+/// A unique reference to a base GEM object.
+pub struct UniqueObjectRef<T: IntoGEMObject> {
+    // Invariant: the pointer is valid and initialized, and this ObjectRef owns the only reference
+    // to it.
+    ptr: *mut T,
+    _p: PhantomPinned,
+}
+
+impl<T: IntoGEMObject> UniqueObjectRef<T> {
+    /// Downgrade this reference to a shared reference.
+    pub fn into_ref(self) -> ObjectRef<T> {
+        let ptr = self.ptr as *const _;
+        core::mem::forget(self);
+
+        ObjectRef { ptr }
+    }
+}
+
+impl<T: IntoGEMObject> Drop for UniqueObjectRef<T> {
+    fn drop(&mut self) {
+        // SAFETY: Having a UniqueObjectRef implies holding a GEM
+        // reference. The free callback will take care of deallocation.
+        unsafe {
+            bindings::drm_gem_object_put((*self.ptr).gem_obj() as *const _ as *mut _);
+        }
+    }
+}
+
+impl<T: IntoGEMObject> Deref for UniqueObjectRef<T> {
+    type Target = T;
+
+    fn deref(&self) -> &Self::Target {
+        // SAFETY: The pointer is valid per the invariant
+        unsafe { &*self.ptr }
+    }
+}
+
+impl<T: IntoGEMObject> DerefMut for UniqueObjectRef<T> {
+    fn deref_mut(&mut self) -> &mut Self::Target {
+        // SAFETY: The pointer is valid per the invariant
+        unsafe { &mut *self.ptr }
+    }
+}
+
+pub(super) const fn create_fops() -> bindings::file_operations {
+    // SAFETY: As by the type invariant, it is safe to initialize `bindings::file_operations`
+    // zeroed.
+    let mut fops: bindings::file_operations = unsafe { core::mem::zeroed() };
+
+    fops.owner = core::ptr::null_mut();
+    fops.open = Some(bindings::drm_open);
+    fops.release = Some(bindings::drm_release);
+    fops.unlocked_ioctl = Some(bindings::drm_ioctl);
+    #[cfg(CONFIG_COMPAT)]
+    {
+        fops.compat_ioctl = Some(bindings::drm_compat_ioctl);
+    }
+    fops.poll = Some(bindings::drm_poll);
+    fops.read = Some(bindings::drm_read);
+    fops.llseek = Some(bindings::noop_llseek);
+    fops.mmap = Some(bindings::drm_gem_mmap);
+    fops.fop_flags = bindings::FOP_UNSIGNED_OFFSET;
+
+    fops
+}
diff --git a/rust/kernel/drm/gem/shmem.rs b/rust/kernel/drm/gem/shmem.rs
new file mode 100644
index 00000000000000..39c5656ea04c84
--- /dev/null
+++ b/rust/kernel/drm/gem/shmem.rs
@@ -0,0 +1,451 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! DRM GEM shmem helper objects
+//!
+//! C header: [`include/linux/drm/drm_gem_shmem_helper.h`](srctree/include/linux/drm/drm_gem_shmem_helper.h)
+
+use crate::drm::{device, drv, gem};
+use crate::{
+    error::{from_err_ptr, to_result},
+    prelude::*,
+};
+use core::{
+    marker::{PhantomData, PhantomPinned},
+    mem,
+    mem::MaybeUninit,
+    ops::{Deref, DerefMut},
+    slice,
+};
+
+use gem::{BaseObject, IntoGEMObject};
+
+/// Trait which must be implemented by drivers using shmem-backed GEM objects.
+pub trait DriverObject: gem::BaseDriverObject<Object<Self>> {
+    /// Parent `Driver` for this object.
+    type Driver: drv::Driver;
+}
+
+// FIXME: This is terrible and I don't know how to avoid it
+#[cfg(CONFIG_NUMA)]
+macro_rules! vm_numa_fields {
+    ( $($field:ident: $val:expr),* $(,)? ) => {
+        bindings::vm_operations_struct {
+            $( $field: $val ),*,
+            set_policy: None,
+            get_policy: None,
+        }
+    }
+}
+
+#[cfg(not(CONFIG_NUMA))]
+macro_rules! vm_numa_fields {
+    ( $($field:ident: $val:expr),* $(,)? ) => {
+        bindings::vm_operations_struct {
+            $( $field: $val ),*
+        }
+    }
+}
+
+const SHMEM_VM_OPS: bindings::vm_operations_struct = vm_numa_fields! {
+    open: Some(bindings::drm_gem_shmem_vm_open),
+    close: Some(bindings::drm_gem_shmem_vm_close),
+    may_split: None,
+    mremap: None,
+    mprotect: None,
+    fault: Some(bindings::drm_gem_shmem_fault),
+    huge_fault: None,
+    map_pages: None,
+    pagesize: None,
+    page_mkwrite: None,
+    pfn_mkwrite: None,
+    access: None,
+    name: None,
+    find_special_page: None,
+};
+
+/// A shmem-backed GEM object.
+#[repr(C)]
+#[pin_data]
+pub struct Object<T: DriverObject> {
+    #[pin]
+    obj: bindings::drm_gem_shmem_object,
+    // The DRM core ensures the Device exists as long as its objects exist, so we don't need to
+    // manage the reference count here.
+    dev: *const bindings::drm_device,
+    // Parent object that owns this object's DMA reservation object
+    parent_resv_obj: *const bindings::drm_gem_object,
+    #[pin]
+    inner: T,
+}
+
+// SAFETY: drm_gem_shmem_object is safe to zero-initialize
+unsafe impl init::Zeroable for bindings::drm_gem_shmem_object {}
+
+unsafe extern "C" fn gem_create_object<T: DriverObject>(
+    dev: *mut bindings::drm_device,
+    size: usize,
+) -> *mut bindings::drm_gem_object {
+    // SAFETY: krealloc is always safe to call like this
+    let p = unsafe {
+        bindings::krealloc(core::ptr::null(), Object::<T>::SIZE, bindings::GFP_KERNEL)
+            as *mut Object<T>
+    };
+
+    if p.is_null() {
+        return ENOMEM.to_ptr();
+    }
+
+    let init = try_pin_init!(Object {
+        obj <- init::zeroed(),
+        // SAFETY: GEM ensures the device lives as long as its objects live
+        inner <- T::new(unsafe { device::Device::borrow(dev)}, size),
+        dev,
+        parent_resv_obj: core::ptr::null(),
+    });
+
+    // SAFETY: p is a valid pointer to an uninitialized Object<T>.
+    if let Err(e) = unsafe { init.__pinned_init(p) } {
+        // SAFETY: p is a valid pointer from `krealloc` and __pinned_init guarantees we can dealloc it.
+        unsafe { bindings::kfree(p as *mut _) };
+
+        return e.to_ptr();
+    }
+
+    // SAFETY: __pinned_init() guarantees the object has been initialized
+    let new: &mut Object<T> = unsafe { &mut *(p as *mut _) };
+
+    new.obj.base.funcs = &Object::<T>::VTABLE;
+    &mut new.obj.base
+}
+
+unsafe extern "C" fn free_callback<T: DriverObject>(obj: *mut bindings::drm_gem_object) {
+    // SAFETY: All of our objects are Object<T>.
+    let shmem = unsafe {
+        crate::container_of!(obj, bindings::drm_gem_shmem_object, base)
+            as *mut bindings::drm_gem_shmem_object
+    };
+    // SAFETY: All of our objects are Object<T>.
+    let p = unsafe { crate::container_of!(shmem, Object<T>, obj) as *mut Object<T> };
+
+    // SAFETY: p is never used after this
+    unsafe {
+        core::ptr::drop_in_place(&mut (*p).inner);
+    }
+
+    // SAFETY: parent_resv_obj is either NULL or a valid reference to the
+    // GEM object owning the DMA reservation for this object, which we drop
+    // here.
+    unsafe {
+        if !(*p).parent_resv_obj.is_null() {
+            bindings::drm_gem_object_put((*p).parent_resv_obj as *const _ as *mut _);
+        }
+    }
+
+    // SAFETY: This pointer has to be valid, since p is valid
+    unsafe {
+        bindings::drm_gem_shmem_free(&mut (*p).obj);
+    }
+}
+
+impl<T: DriverObject> Object<T> {
+    /// The size of this object's structure.
+    const SIZE: usize = mem::size_of::<Self>();
+
+    /// `drm_gem_object_funcs` vtable suitable for GEM shmem objects.
+    const VTABLE: bindings::drm_gem_object_funcs = bindings::drm_gem_object_funcs {
+        free: Some(free_callback::<T>),
+        open: Some(super::open_callback::<T, Object<T>>),
+        close: Some(super::close_callback::<T, Object<T>>),
+        print_info: Some(bindings::drm_gem_shmem_object_print_info),
+        export: None,
+        pin: Some(bindings::drm_gem_shmem_object_pin),
+        unpin: Some(bindings::drm_gem_shmem_object_unpin),
+        get_sg_table: Some(bindings::drm_gem_shmem_object_get_sg_table),
+        vmap: Some(bindings::drm_gem_shmem_object_vmap),
+        vunmap: Some(bindings::drm_gem_shmem_object_vunmap),
+        mmap: Some(bindings::drm_gem_shmem_object_mmap),
+        status: None,
+        rss: None,
+        vm_ops: &SHMEM_VM_OPS,
+        evict: None,
+    };
+
+    // SAFETY: Must only be used with DRM functions that are thread-safe
+    unsafe fn mut_shmem(&self) -> *mut bindings::drm_gem_shmem_object {
+        &self.obj as *const _ as *mut _
+    }
+
+    /// Create a new shmem-backed DRM object of the given size.
+    pub fn new(dev: &device::Device<T::Driver>, size: usize) -> Result<gem::UniqueObjectRef<Self>> {
+        // SAFETY: This function can be called as long as the ALLOC_OPS are set properly
+        // for this driver, and the gem_create_object is called.
+        let p = unsafe {
+            let p = from_err_ptr(bindings::drm_gem_shmem_create(dev.as_raw(), size))?;
+            crate::container_of!(p, Object<T>, obj) as *mut _
+        };
+
+        // SAFETY: The gem_create_object callback ensures this is a valid Object<T>,
+        // so we can take a unique reference to it.
+        let obj_ref = gem::UniqueObjectRef {
+            ptr: p,
+            _p: PhantomPinned,
+        };
+
+        Ok(obj_ref)
+    }
+
+    /// Returns the `Device` that owns this GEM object.
+    pub fn dev(&self) -> &device::Device<T::Driver> {
+        // SAFETY: GEM ensures that the device outlives its objects, so we can
+        // just borrow here.
+        unsafe { device::Device::borrow(self.dev) }
+    }
+
+    /// Creates (if necessary) and returns a scatter-gather table of DMA pages for this object.
+    ///
+    /// This will pin the object in memory.
+    pub fn sg_table(&self) -> Result<SGTable<T>> {
+        // SAFETY: drm_gem_shmem_get_pages_sgt is thread-safe.
+        let sgt = from_err_ptr(unsafe { bindings::drm_gem_shmem_get_pages_sgt(self.mut_shmem()) })?;
+
+        Ok(SGTable {
+            sgt,
+            _owner: self.reference(),
+        })
+    }
+
+    /// Creates and returns a virtual kernel memory mapping for this object.
+    pub fn vmap(&self) -> Result<VMap<T>> {
+        let mut map: MaybeUninit<bindings::iosys_map> = MaybeUninit::uninit();
+
+        // SAFETY: drm_gem_shmem_vmap can be called with the DMA reservation lock held
+        to_result(unsafe {
+            let resv = self.obj.base.resv as *const _ as *mut _;
+            bindings::dma_resv_lock(resv, core::ptr::null_mut());
+            let ret = bindings::drm_gem_shmem_vmap(self.mut_shmem(), map.as_mut_ptr());
+            bindings::dma_resv_unlock(resv);
+            ret
+        })?;
+
+        // SAFETY: if drm_gem_shmem_vmap did not fail, map is initialized now
+        let map = unsafe { map.assume_init() };
+
+        Ok(VMap {
+            map,
+            owner: self.reference(),
+        })
+    }
+
+    /// Set the write-combine flag for this object.
+    ///
+    /// Should be called before any mappings are made.
+    pub fn set_wc(&mut self, map_wc: bool) {
+        self.obj.set_map_wc(map_wc);
+    }
+
+    /// Share the dma_resv object from another GEM object.
+    ///
+    /// Should be called before the object is used/shared. Can only be called once.
+    pub fn share_dma_resv(&mut self, from_object: &impl IntoGEMObject) -> Result {
+        let from_obj = from_object.gem_obj();
+        if !self.parent_resv_obj.is_null() {
+            Err(EBUSY)
+        } else {
+            // SAFETY: from_obj is a valid object pointer per the trait Invariant.
+            unsafe {
+                bindings::drm_gem_object_get(from_obj as *const _ as *mut _);
+            }
+            self.parent_resv_obj = from_obj;
+            let gem = self.mut_gem_obj();
+            gem.resv = from_obj.resv;
+            Ok(())
+        }
+    }
+}
+
+impl<T: DriverObject> Deref for Object<T> {
+    type Target = T;
+
+    fn deref(&self) -> &Self::Target {
+        &self.inner
+    }
+}
+
+impl<T: DriverObject> DerefMut for Object<T> {
+    fn deref_mut(&mut self) -> &mut Self::Target {
+        &mut self.inner
+    }
+}
+
+impl<T: DriverObject> crate::private::Sealed for Object<T> {}
+
+impl<T: DriverObject> gem::IntoGEMObject for Object<T> {
+    type Driver = T::Driver;
+
+    fn gem_obj(&self) -> &bindings::drm_gem_object {
+        &self.obj.base
+    }
+
+    fn mut_gem_obj(&mut self) -> &mut bindings::drm_gem_object {
+        &mut self.obj.base
+    }
+
+    fn from_gem_obj(obj: *mut bindings::drm_gem_object) -> *mut Object<T> {
+        // SAFETY: The invariant guarantees this is correct.
+        unsafe {
+            let shmem = crate::container_of!(obj, bindings::drm_gem_shmem_object, base)
+                as *mut bindings::drm_gem_shmem_object;
+            crate::container_of!(shmem, Object<T>, obj) as *mut Object<T>
+        }
+    }
+}
+
+impl<T: DriverObject> drv::AllocImpl for Object<T> {
+    const ALLOC_OPS: drv::AllocOps = drv::AllocOps {
+        gem_create_object: Some(gem_create_object::<T>),
+        prime_handle_to_fd: None,
+        prime_fd_to_handle: None,
+        gem_prime_import: None,
+        gem_prime_import_sg_table: Some(bindings::drm_gem_shmem_prime_import_sg_table),
+        dumb_create: Some(bindings::drm_gem_shmem_dumb_create),
+        dumb_map_offset: None,
+    };
+}
+
+/// A virtual mapping for a shmem-backed GEM object in kernel address space.
+pub struct VMap<T: DriverObject> {
+    map: bindings::iosys_map,
+    owner: gem::ObjectRef<Object<T>>,
+}
+
+impl<T: DriverObject> VMap<T> {
+    /// Returns a const raw pointer to the start of the mapping.
+    pub fn as_ptr(&self) -> *const core::ffi::c_void {
+        // SAFETY: The shmem helpers always return non-iomem maps
+        unsafe { self.map.__bindgen_anon_1.vaddr }
+    }
+
+    /// Returns a mutable raw pointer to the start of the mapping.
+    pub fn as_mut_ptr(&mut self) -> *mut core::ffi::c_void {
+        // SAFETY: The shmem helpers always return non-iomem maps
+        unsafe { self.map.__bindgen_anon_1.vaddr }
+    }
+
+    /// Returns a byte slice view of the mapping.
+    pub fn as_slice(&self) -> &[u8] {
+        // SAFETY: The vmap maps valid memory up to the owner size
+        unsafe { slice::from_raw_parts(self.as_ptr() as *const u8, self.owner.size()) }
+    }
+
+    /// Returns mutable a byte slice view of the mapping.
+    pub fn as_mut_slice(&mut self) -> &mut [u8] {
+        // SAFETY: The vmap maps valid memory up to the owner size
+        unsafe { slice::from_raw_parts_mut(self.as_mut_ptr() as *mut u8, self.owner.size()) }
+    }
+
+    /// Borrows a reference to the object that owns this virtual mapping.
+    pub fn owner(&self) -> &gem::ObjectRef<Object<T>> {
+        &self.owner
+    }
+}
+
+impl<T: DriverObject> Drop for VMap<T> {
+    fn drop(&mut self) {
+        // SAFETY: This function is safe to call with the DMA reservation lock held
+        unsafe {
+            let resv = self.owner.obj.base.resv as *const _ as *mut _;
+            bindings::dma_resv_lock(resv, core::ptr::null_mut());
+            bindings::drm_gem_shmem_vunmap(self.owner.mut_shmem(), &mut self.map);
+            bindings::dma_resv_unlock(resv);
+        }
+    }
+}
+
+/// SAFETY: `iosys_map` objects are safe to send across threads.
+unsafe impl<T: DriverObject> Send for VMap<T> {}
+/// SAFETY: `iosys_map` objects are safe to send across threads.
+unsafe impl<T: DriverObject> Sync for VMap<T> {}
+
+/// A single scatter-gather entry, representing a span of pages in the device's DMA address space.
+///
+/// For devices not behind a standalone IOMMU, this corresponds to physical addresses.
+#[repr(transparent)]
+pub struct SGEntry(bindings::scatterlist);
+
+impl SGEntry {
+    /// Returns the starting DMA address of this span
+    pub fn dma_address(&self) -> usize {
+        // SAFETY: Always safe to call on scatterlist objects
+        (unsafe { bindings::sg_dma_address(&self.0) }) as usize
+    }
+
+    /// Returns the length of this span in bytes
+    pub fn dma_len(&self) -> usize {
+        // SAFETY: Always safe to call on scatterlist objects
+        (unsafe { bindings::sg_dma_len(&self.0) }) as usize
+    }
+}
+
+/// A scatter-gather table of DMA address spans for a GEM shmem object.
+///
+/// # Invariants
+/// `sgt` must be a valid pointer to the `sg_table`, which must correspond to the owned
+/// object in `_owner` (which ensures it remains valid).
+pub struct SGTable<T: DriverObject> {
+    sgt: *const bindings::sg_table,
+    _owner: gem::ObjectRef<Object<T>>,
+}
+
+impl<T: DriverObject> SGTable<T> {
+    /// Returns an iterator through the SGTable's entries
+    pub fn iter(&'_ self) -> SGTableIter<'_> {
+        SGTableIter {
+            // SAFETY: sgt is always a valid pointer
+            left: unsafe { (*self.sgt).nents } as usize,
+            // SAFETY: sgt is always a valid pointer
+            sg: unsafe { (*self.sgt).sgl },
+            _p: PhantomData,
+        }
+    }
+}
+
+impl<'a, T: DriverObject> IntoIterator for &'a SGTable<T> {
+    type Item = &'a SGEntry;
+    type IntoIter = SGTableIter<'a>;
+
+    fn into_iter(self) -> Self::IntoIter {
+        self.iter()
+    }
+}
+
+/// SAFETY: `sg_table` objects are safe to send across threads.
+unsafe impl<T: DriverObject> Send for SGTable<T> {}
+/// SAFETY: `sg_table` objects are safe to send across threads.
+unsafe impl<T: DriverObject> Sync for SGTable<T> {}
+
+/// An iterator through `SGTable` entries.
+///
+/// # Invariants
+/// `sg` must be a valid pointer to the scatterlist, which must outlive our lifetime.
+pub struct SGTableIter<'a> {
+    sg: *mut bindings::scatterlist,
+    left: usize,
+    _p: PhantomData<&'a ()>,
+}
+
+impl<'a> Iterator for SGTableIter<'a> {
+    type Item = &'a SGEntry;
+
+    fn next(&mut self) -> Option<Self::Item> {
+        if self.left == 0 {
+            None
+        } else {
+            let sg = self.sg;
+            // SAFETY: `self.sg` is always a valid pointer
+            self.sg = unsafe { bindings::sg_next(self.sg) };
+            self.left -= 1;
+            // SAFETY: `self.sg` is always a valid pointer
+            Some(unsafe { &(*(sg as *const SGEntry)) })
+        }
+    }
+}
diff --git a/rust/kernel/drm/gpuvm.rs b/rust/kernel/drm/gpuvm.rs
new file mode 100644
index 00000000000000..01a8131796f95e
--- /dev/null
+++ b/rust/kernel/drm/gpuvm.rs
@@ -0,0 +1,738 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+//! DRM Sync Objects
+//!
+//! C header: [`include/drm/drm_gpuvm.h`](../../../../include/drm/drm_gpuvm.h)
+
+#![allow(missing_docs)]
+
+use crate::{
+    bindings,
+    drm::{device, drv},
+    error::{
+        code::{EINVAL, ENOMEM},
+        from_result, to_result, Result,
+    },
+    init,
+    prelude::*,
+    types::{ARef, AlwaysRefCounted, Opaque},
+};
+
+use crate::drm::gem::IntoGEMObject;
+use core::cell::UnsafeCell;
+use core::marker::{PhantomData, PhantomPinned};
+use core::mem::ManuallyDrop;
+use core::ops::{Deref, DerefMut, Range};
+use core::ptr::NonNull;
+
+/// GpuVaFlags to be used for a GpuVa.
+///
+/// They can be combined with the operators `|`, `&`, and `!`.
+#[derive(Clone, Copy, PartialEq, Default)]
+pub struct GpuVaFlags(u32);
+
+impl GpuVaFlags {
+    /// No GpuVaFlags (zero)
+    pub const NONE: GpuVaFlags = GpuVaFlags(0);
+
+    /// The backing GEM is invalidated.
+    pub const INVALIDATED: GpuVaFlags = GpuVaFlags(bindings::drm_gpuva_flags_DRM_GPUVA_INVALIDATED);
+
+    /// The GpuVa is a sparse mapping.
+    pub const SPARSE: GpuVaFlags = GpuVaFlags(bindings::drm_gpuva_flags_DRM_GPUVA_SPARSE);
+
+    /// The GpuVa is a sparse mapping.
+    pub const SINGLE_PAGE: GpuVaFlags = GpuVaFlags(bindings::drm_gpuva_flags_DRM_GPUVA_SPARSE);
+
+    /// Construct a driver-specific GpuVaFlag.
+    ///
+    /// The argument must be a flag index in the range [0..28].
+    pub const fn user_flag(index: u32) -> GpuVaFlags {
+        let flags = bindings::drm_gpuva_flags_DRM_GPUVA_USERBITS << index;
+        assert!(flags != 0);
+        GpuVaFlags(flags)
+    }
+
+    /// Get the raw representation of this flag.
+    pub(crate) fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    /// Check whether `flags` is contained in `self`.
+    pub fn contains(self, flags: GpuVaFlags) -> bool {
+        (self & flags) == flags
+    }
+}
+
+impl core::ops::BitOr for GpuVaFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl core::ops::BitAnd for GpuVaFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl core::ops::Not for GpuVaFlags {
+    type Output = Self;
+    fn not(self) -> Self::Output {
+        Self(!self.0)
+    }
+}
+
+/// Trait that must be implemented by DRM drivers to represent a DRM GpuVm (a GPU address space).
+pub trait DriverGpuVm: Sized {
+    /// The parent `Driver` implementation for this `DriverGpuVm`.
+    type Driver: drv::Driver;
+    type GpuVa: DriverGpuVa = ();
+    type GpuVmBo: DriverGpuVmBo = ();
+    type StepContext = ();
+
+    fn step_map(
+        self: &mut UpdatingGpuVm<'_, Self>,
+        op: &mut OpMap<Self>,
+        ctx: &mut Self::StepContext,
+    ) -> Result;
+    fn step_unmap(
+        self: &mut UpdatingGpuVm<'_, Self>,
+        op: &mut OpUnMap<Self>,
+        ctx: &mut Self::StepContext,
+    ) -> Result;
+    fn step_remap(
+        self: &mut UpdatingGpuVm<'_, Self>,
+        op: &mut OpReMap<Self>,
+        vm_bo: &GpuVmBo<Self>,
+        ctx: &mut Self::StepContext,
+    ) -> Result;
+}
+
+struct StepContext<'a, T: DriverGpuVm> {
+    gpuvm: &'a GpuVm<T>,
+    ctx: &'a mut T::StepContext,
+}
+
+/// Trait that must be implemented by DRM drivers to represent a DRM GpuVa (a mapping in GPU address space).
+pub trait DriverGpuVa: Sized {}
+
+impl DriverGpuVa for () {}
+
+/// Trait that must be implemented by DRM drivers to represent a DRM GpuVmBo (a connection between a BO and a VM).
+pub trait DriverGpuVmBo: Sized {
+    fn new() -> impl PinInit<Self>;
+}
+
+/// Provide a default implementation for trivial types
+impl<T: Default> DriverGpuVmBo for T {
+    fn new() -> impl PinInit<Self> {
+        init::default()
+    }
+}
+
+#[repr(transparent)]
+pub struct OpMap<T: DriverGpuVm>(bindings::drm_gpuva_op_map, PhantomData<T>);
+#[repr(transparent)]
+pub struct OpUnMap<T: DriverGpuVm>(bindings::drm_gpuva_op_unmap, PhantomData<T>);
+#[repr(transparent)]
+pub struct OpReMap<T: DriverGpuVm>(bindings::drm_gpuva_op_remap, PhantomData<T>);
+
+impl<T: DriverGpuVm> OpMap<T> {
+    pub fn addr(&self) -> u64 {
+        self.0.va.addr
+    }
+    pub fn range(&self) -> u64 {
+        self.0.va.range
+    }
+    pub fn offset(&self) -> u64 {
+        self.0.gem.offset
+    }
+    pub fn flags(&self) -> GpuVaFlags {
+        GpuVaFlags(self.0.flags)
+    }
+    pub fn object(&self) -> &<T::Driver as drv::Driver>::Object {
+        let p = <<T::Driver as drv::Driver>::Object as IntoGEMObject>::from_gem_obj(self.0.gem.obj);
+        // SAFETY: The GEM object has an active reference for the lifetime of this op
+        unsafe { &*p }
+    }
+    pub fn map_and_link_va(
+        &mut self,
+        gpuvm: &mut UpdatingGpuVm<'_, T>,
+        gpuva: Pin<KBox<GpuVa<T>>>,
+        gpuvmbo: &GpuVmBo<T>,
+    ) -> Result<(), Pin<KBox<GpuVa<T>>>> {
+        // SAFETY: We are handing off the GpuVa ownership and it will not be moved.
+        let p = KBox::leak(unsafe { Pin::into_inner_unchecked(gpuva) });
+        // SAFETY: These C functions are called with the correct invariants
+        unsafe {
+            bindings::drm_gpuva_init_from_op(&mut p.gpuva, &mut self.0);
+            if bindings::drm_gpuva_insert(gpuvm.0.gpuvm() as *mut _, &mut p.gpuva) != 0 {
+                // EEXIST, return the GpuVa to the caller as an error
+                return Err(Pin::new_unchecked(KBox::from_raw(p)));
+            };
+            // SAFETY: This takes a new reference to the gpuvmbo.
+            bindings::drm_gpuva_link(&mut p.gpuva, &gpuvmbo.bo as *const _ as *mut _);
+        }
+        Ok(())
+    }
+}
+
+impl<T: DriverGpuVm> OpUnMap<T> {
+    pub fn va(&self) -> Option<&GpuVa<T>> {
+        if self.0.va.is_null() {
+            return None;
+        }
+        // SAFETY: Container invariant is guaranteed for ops structs created for our types.
+        let p = unsafe { crate::container_of!(self.0.va, GpuVa<T>, gpuva) as *mut GpuVa<T> };
+        // SAFETY: The GpuVa object reference is valid per the op_unmap contract
+        Some(unsafe { &*p })
+    }
+    pub fn unmap_and_unlink_va(&mut self) -> Option<Pin<KBox<GpuVa<T>>>> {
+        if self.0.va.is_null() {
+            return None;
+        }
+        // SAFETY: Container invariant is guaranteed for ops structs created for our types.
+        let p = unsafe { crate::container_of!(self.0.va, GpuVa<T>, gpuva) as *mut GpuVa<T> };
+
+        // SAFETY: The GpuVa object reference is valid per the op_unmap contract
+        unsafe {
+            bindings::drm_gpuva_unmap(&mut self.0);
+            bindings::drm_gpuva_unlink(self.0.va);
+        }
+
+        // Unlinking/unmapping relinquishes ownership of the GpuVa object,
+        // so clear the pointer
+        self.0.va = core::ptr::null_mut();
+        // SAFETY: The GpuVa object reference is valid per the op_unmap contract
+        Some(unsafe { Pin::new_unchecked(KBox::from_raw(p)) })
+    }
+}
+
+impl<T: DriverGpuVm> OpReMap<T> {
+    pub fn prev_map(&mut self) -> Option<&mut OpMap<T>> {
+        // SAFETY: The prev pointer must be valid if not-NULL per the op_remap contract
+        unsafe { (self.0.prev as *mut OpMap<T>).as_mut() }
+    }
+    pub fn next_map(&mut self) -> Option<&mut OpMap<T>> {
+        // SAFETY: The next pointer must be valid if not-NULL per the op_remap contract
+        unsafe { (self.0.next as *mut OpMap<T>).as_mut() }
+    }
+    pub fn unmap(&mut self) -> &mut OpUnMap<T> {
+        // SAFETY: The unmap pointer is always valid per the op_remap contract
+        unsafe { (self.0.unmap as *mut OpUnMap<T>).as_mut().unwrap() }
+    }
+}
+
+/// A base GPU VA.
+#[repr(C)]
+#[pin_data]
+pub struct GpuVa<T: DriverGpuVm> {
+    #[pin]
+    gpuva: bindings::drm_gpuva,
+    #[pin]
+    inner: T::GpuVa,
+    #[pin]
+    _p: PhantomPinned,
+}
+
+// SAFETY: This type is safe to zero-init (as far as C is concerned).
+unsafe impl init::Zeroable for bindings::drm_gpuva {}
+
+impl<T: DriverGpuVm> GpuVa<T> {
+    pub fn new<E>(inner: impl PinInit<T::GpuVa, E>) -> Result<Pin<KBox<GpuVa<T>>>>
+    where
+        Error: From<E>,
+    {
+        KBox::try_pin_init(
+            try_pin_init!(Self {
+                gpuva <- init::zeroed(),
+                inner <- inner,
+                _p: PhantomPinned
+            }),
+            GFP_KERNEL,
+        )
+    }
+
+    pub fn addr(&self) -> u64 {
+        self.gpuva.va.addr
+    }
+    pub fn range(&self) -> u64 {
+        self.gpuva.va.range
+    }
+    pub fn offset(&self) -> u64 {
+        self.gpuva.gem.offset
+    }
+    pub fn flags(&self) -> GpuVaFlags {
+        GpuVaFlags(self.gpuva.flags)
+    }
+}
+
+/// A base GpuVm BO.
+#[repr(C)]
+#[pin_data]
+pub struct GpuVmBo<T: DriverGpuVm> {
+    #[pin]
+    bo: bindings::drm_gpuvm_bo,
+    #[pin]
+    inner: T::GpuVmBo,
+    #[pin]
+    _p: PhantomPinned,
+}
+
+impl<T: DriverGpuVm> GpuVmBo<T> {
+    /// Return a reference to the inner driver data for this GpuVmBo
+    pub fn inner(&self) -> &T::GpuVmBo {
+        &self.inner
+    }
+}
+
+// SAFETY: DRM GpuVmBo objects are always reference counted and the get/put functions
+// satisfy the requirements.
+unsafe impl<T: DriverGpuVm> AlwaysRefCounted for GpuVmBo<T> {
+    fn inc_ref(&self) {
+        // SAFETY: The drm_gpuvm_get function satisfies the requirements for inc_ref().
+        unsafe { bindings::drm_gpuvm_bo_get(&self.bo as *const _ as *mut _) };
+    }
+
+    unsafe fn dec_ref(mut obj: NonNull<Self>) {
+        // SAFETY: drm_gpuvm_bo_put() requires holding the gpuva lock, which is the dma_resv lock by default.
+        // The drm_gpuvm_put function satisfies the requirements for dec_ref().
+        // (We do not support custom locks yet.)
+        unsafe {
+            let resv = (*obj.as_mut().bo.obj).resv;
+            bindings::dma_resv_lock(resv, core::ptr::null_mut());
+            bindings::drm_gpuvm_bo_put(&mut obj.as_mut().bo);
+            bindings::dma_resv_unlock(resv);
+        }
+    }
+}
+
+/// A base GPU VM.
+#[repr(C)]
+#[pin_data]
+pub struct GpuVm<T: DriverGpuVm> {
+    #[pin]
+    gpuvm: Opaque<bindings::drm_gpuvm>,
+    #[pin]
+    inner: UnsafeCell<T>,
+    #[pin]
+    _p: PhantomPinned,
+}
+
+pub(super) unsafe extern "C" fn vm_free_callback<T: DriverGpuVm>(
+    raw_gpuvm: *mut bindings::drm_gpuvm,
+) {
+    // SAFETY: Container invariant is guaranteed for objects using our callback.
+    let p = unsafe {
+        crate::container_of!(
+            raw_gpuvm as *mut Opaque<bindings::drm_gpuvm>,
+            GpuVm<T>,
+            gpuvm
+        ) as *mut GpuVm<T>
+    };
+
+    // SAFETY: p is guaranteed to be valid for drm_gpuvm objects using this callback.
+    unsafe { drop(KBox::from_raw(p)) };
+}
+
+pub(super) unsafe extern "C" fn vm_bo_alloc_callback<T: DriverGpuVm>() -> *mut bindings::drm_gpuvm_bo
+{
+    let obj: Result<Pin<KBox<GpuVmBo<T>>>> = KBox::try_pin_init(
+        try_pin_init!(GpuVmBo::<T> {
+            bo <- init::default(),
+            inner <- T::GpuVmBo::new(),
+            _p: PhantomPinned
+        }),
+        GFP_KERNEL,
+    );
+
+    match obj {
+        Ok(obj) =>
+        // SAFETY: The DRM core will keep this object pinned
+        unsafe {
+            let p = KBox::leak(Pin::into_inner_unchecked(obj));
+            &mut p.bo
+        },
+        Err(_) => core::ptr::null_mut(),
+    }
+}
+
+pub(super) unsafe extern "C" fn vm_bo_free_callback<T: DriverGpuVm>(
+    raw_vm_bo: *mut bindings::drm_gpuvm_bo,
+) {
+    // SAFETY: Container invariant is guaranteed for objects using this callback.
+    let p = unsafe { crate::container_of!(raw_vm_bo, GpuVmBo<T>, bo) as *mut GpuVmBo<T> };
+
+    // SAFETY: p is guaranteed to be valid for drm_gpuvm_bo objects using this callback.
+    unsafe { drop(KBox::from_raw(p)) };
+}
+
+pub(super) unsafe extern "C" fn step_map_callback<T: DriverGpuVm>(
+    op: *mut bindings::drm_gpuva_op,
+    _priv: *mut core::ffi::c_void,
+) -> core::ffi::c_int {
+    // SAFETY: We know this is a map op, and OpMap is a transparent wrapper.
+    let map = unsafe { &mut *((&mut (*op).__bindgen_anon_1.map) as *mut _ as *mut OpMap<T>) };
+    // SAFETY: This is a pointer to a StepContext created inline in sm_map(), which is
+    // guaranteed to outlive this function.
+    let ctx = unsafe { &mut *(_priv as *mut StepContext<'_, T>) };
+
+    from_result(|| {
+        UpdatingGpuVm(ctx.gpuvm).step_map(map, ctx.ctx)?;
+        Ok(0)
+    })
+}
+
+pub(super) unsafe extern "C" fn step_remap_callback<T: DriverGpuVm>(
+    op: *mut bindings::drm_gpuva_op,
+    _priv: *mut core::ffi::c_void,
+) -> core::ffi::c_int {
+    // SAFETY: We know this is a map op, and OpReMap is a transparent wrapper.
+    let remap = unsafe { &mut *((&mut (*op).__bindgen_anon_1.remap) as *mut _ as *mut OpReMap<T>) };
+    // SAFETY: This is a pointer to a StepContext created inline in sm_map(), which is
+    // guaranteed to outlive this function.
+    let ctx = unsafe { &mut *(_priv as *mut StepContext<'_, T>) };
+
+    let p_vm_bo = remap.unmap().va().unwrap().gpuva.vm_bo;
+
+    let res = {
+        // SAFETY: vm_bo pointer must be valid and non-null by the step_remap invariants.
+        // Since we grab a ref, this reference's lifetime is until the decref.
+        let vm_bo_ref = unsafe {
+            bindings::drm_gpuvm_bo_get(p_vm_bo);
+            &*(crate::container_of!(p_vm_bo, GpuVmBo<T>, bo) as *mut GpuVmBo<T>)
+        };
+
+        from_result(|| {
+            UpdatingGpuVm(ctx.gpuvm).step_remap(remap, vm_bo_ref, ctx.ctx)?;
+            Ok(0)
+        })
+    };
+
+    // SAFETY: We incremented the refcount above, and the Rust reference we took is
+    // no longer in scope.
+    unsafe { bindings::drm_gpuvm_bo_put(p_vm_bo) };
+
+    res
+}
+pub(super) unsafe extern "C" fn step_unmap_callback<T: DriverGpuVm>(
+    op: *mut bindings::drm_gpuva_op,
+    _priv: *mut core::ffi::c_void,
+) -> core::ffi::c_int {
+    // SAFETY: We know this is a map op, and OpUnMap is a transparent wrapper.
+    let unmap = unsafe { &mut *((&mut (*op).__bindgen_anon_1.unmap) as *mut _ as *mut OpUnMap<T>) };
+    // SAFETY: This is a pointer to a StepContext created inline in sm_map(), which is
+    // guaranteed to outlive this function.
+    let ctx = unsafe { &mut *(_priv as *mut StepContext<'_, T>) };
+
+    from_result(|| {
+        UpdatingGpuVm(ctx.gpuvm).step_unmap(unmap, ctx.ctx)?;
+        Ok(0)
+    })
+}
+
+pub(super) unsafe extern "C" fn exec_lock_gem_object(
+    vm_exec: *mut bindings::drm_gpuvm_exec,
+) -> core::ffi::c_int {
+    // SAFETY: The gpuvm_exec object is valid and priv_ is a GEM object pointer
+    // when this callback is used
+    unsafe { bindings::drm_exec_lock_obj(&mut (*vm_exec).exec, (*vm_exec).extra.priv_ as *mut _) }
+}
+
+impl<T: DriverGpuVm> GpuVm<T> {
+    const OPS: bindings::drm_gpuvm_ops = bindings::drm_gpuvm_ops {
+        vm_free: Some(vm_free_callback::<T>),
+        op_alloc: None,
+        op_free: None,
+        vm_bo_alloc: Some(vm_bo_alloc_callback::<T>),
+        vm_bo_free: Some(vm_bo_free_callback::<T>),
+        vm_bo_validate: None,
+        sm_step_map: Some(step_map_callback::<T>),
+        sm_step_remap: Some(step_remap_callback::<T>),
+        sm_step_unmap: Some(step_unmap_callback::<T>),
+    };
+
+    fn gpuvm(&self) -> *const bindings::drm_gpuvm {
+        self.gpuvm.get()
+    }
+
+    pub fn new<E>(
+        name: &'static CStr,
+        dev: &device::Device<T::Driver>,
+        r_obj: &<T::Driver as drv::Driver>::Object,
+        range: Range<u64>,
+        reserve_range: Range<u64>,
+        inner: impl PinInit<T, E>,
+    ) -> Result<ARef<GpuVm<T>>>
+    where
+        Error: From<E>,
+    {
+        let obj: Pin<KBox<Self>> = KBox::try_pin_init(
+            try_pin_init!(Self {
+                // SAFETY: drm_gpuvm_init cannot fail and always initializes the member
+                gpuvm <- unsafe {
+                    init::pin_init_from_closure(move |slot: *mut Opaque<bindings::drm_gpuvm> | {
+                        // Zero-init required by drm_gpuvm_init
+                        *slot = Opaque::zeroed();
+                        bindings::drm_gpuvm_init(
+                            Opaque::raw_get(slot),
+                            name.as_char_ptr(),
+                            0,
+                            dev.as_raw(),
+                            r_obj.gem_obj() as *const _ as *mut _,
+                            range.start,
+                            range.end - range.start,
+                            reserve_range.start,
+                            reserve_range.end - reserve_range.start,
+                            &Self::OPS
+                        );
+                        Ok(())
+                    })
+                },
+                // SAFETY: Just passing through to the initializer argument
+                inner <- unsafe {
+                    init::pin_init_from_closure(move |slot: *mut UnsafeCell<T> | {
+                        inner.__pinned_init(slot as *mut _)
+                    })
+                },
+                _p: PhantomPinned
+            }),
+            GFP_KERNEL,
+        )?;
+
+        // SAFETY: We never move out of the object
+        let vm_ref = unsafe {
+            ARef::from_raw(NonNull::new_unchecked(KBox::leak(
+                Pin::into_inner_unchecked(obj),
+            )))
+        };
+
+        Ok(vm_ref)
+    }
+
+    pub fn exec_lock<'a, 'b>(
+        &'a self,
+        obj: Option<&'b <T::Driver as drv::Driver>::Object>,
+        interruptible: bool,
+    ) -> Result<LockedGpuVm<'a, 'b, T>> {
+        // Do not try to lock the object if it is internal (since it is already locked).
+        let is_ext = obj.map(|a| self.is_extobj(a)).unwrap_or(false);
+
+        let mut guard = ManuallyDrop::new(LockedGpuVm {
+            gpuvm: self,
+            // vm_exec needs to be pinned, so stick it in a Box.
+            vm_exec: KBox::init(
+                init!(bindings::drm_gpuvm_exec {
+                    vm: self.gpuvm() as *mut _,
+                    flags: if interruptible {
+                        bindings::BINDINGS_DRM_EXEC_INTERRUPTIBLE_WAIT
+                    } else {
+                        0
+                    },
+                    exec: Default::default(),
+                    extra: match (is_ext, obj) {
+                        (true, Some(obj)) => bindings::drm_gpuvm_exec__bindgen_ty_1 {
+                            fn_: Some(exec_lock_gem_object),
+                            priv_: obj.gem_obj() as *const _ as *mut _,
+                        },
+                        _ => Default::default(),
+                    },
+                    num_fences: 0,
+                }),
+                GFP_KERNEL,
+            )?,
+            obj,
+        });
+
+        // SAFETY: The object is valid and was initialized above
+        to_result(unsafe { bindings::drm_gpuvm_exec_lock(&mut *guard.vm_exec) })?;
+
+        Ok(ManuallyDrop::into_inner(guard))
+    }
+
+    /// Returns true if the given object is external to the GPUVM
+    /// (that is, if it does not share the DMA reservation object of the GPUVM).
+    pub fn is_extobj(&self, obj: &impl IntoGEMObject) -> bool {
+        let gem = obj.gem_obj() as *const _ as *mut _;
+        // SAFETY: This is safe to call as long as the arguments are valid pointers.
+        unsafe { bindings::drm_gpuvm_is_extobj(self.gpuvm() as *mut _, gem) }
+    }
+}
+
+// SAFETY: DRM GpuVm objects are always reference counted and the get/put functions
+// satisfy the requirements.
+unsafe impl<T: DriverGpuVm> AlwaysRefCounted for GpuVm<T> {
+    fn inc_ref(&self) {
+        // SAFETY: The drm_gpuvm_get function satisfies the requirements for inc_ref().
+        unsafe { bindings::drm_gpuvm_get(&self.gpuvm as *const _ as *mut _) };
+    }
+
+    unsafe fn dec_ref(obj: NonNull<Self>) {
+        // SAFETY: The drm_gpuvm_put function satisfies the requirements for dec_ref().
+        unsafe { bindings::drm_gpuvm_put(Opaque::raw_get(&(*obj.as_ptr()).gpuvm)) };
+    }
+}
+
+pub struct LockedGpuVm<'a, 'b, T: DriverGpuVm> {
+    gpuvm: &'a GpuVm<T>,
+    vm_exec: KBox<bindings::drm_gpuvm_exec>,
+    obj: Option<&'b <T::Driver as drv::Driver>::Object>,
+}
+
+impl<T: DriverGpuVm> LockedGpuVm<'_, '_, T> {
+    pub fn find_bo(&mut self) -> Option<ARef<GpuVmBo<T>>> {
+        let obj = self.obj?;
+        // SAFETY: LockedGpuVm implies the right locks are held.
+        let p = unsafe {
+            bindings::drm_gpuvm_bo_find(
+                self.gpuvm.gpuvm() as *mut _,
+                obj.gem_obj() as *const _ as *mut _,
+            )
+        };
+        if p.is_null() {
+            None
+        } else {
+            // SAFETY: All the drm_gpuvm_bo objects in this GpuVm are always allocated by us as GpuVmBo<T>.
+            let p = unsafe { crate::container_of!(p, GpuVmBo<T>, bo) as *mut GpuVmBo<T> };
+            // SAFETY: We checked for NULL above, and the types ensure that
+            // this object was created by vm_bo_alloc_callback<T>.
+            Some(unsafe { ARef::from_raw(NonNull::new_unchecked(p)) })
+        }
+    }
+
+    pub fn obtain_bo(&mut self) -> Result<ARef<GpuVmBo<T>>> {
+        let obj = self.obj.ok_or(EINVAL)?;
+        // SAFETY: LockedGpuVm implies the right locks are held.
+        let p = unsafe {
+            bindings::drm_gpuvm_bo_obtain(
+                self.gpuvm.gpuvm() as *mut _,
+                obj.gem_obj() as *const _ as *mut _,
+            )
+        };
+        if p.is_null() {
+            Err(ENOMEM)
+        } else {
+            // SAFETY: Container invariant is guaranteed for GpuVmBo objects for this GpuVm.
+            let p = unsafe { crate::container_of!(p, GpuVmBo<T>, bo) as *mut GpuVmBo<T> };
+            // SAFETY: We checked for NULL above, and the types ensure that
+            // this object was created by vm_bo_alloc_callback<T>.
+            Ok(unsafe { ARef::from_raw(NonNull::new_unchecked(p)) })
+        }
+    }
+
+    pub fn sm_map(
+        &mut self,
+        ctx: &mut T::StepContext,
+        req_addr: u64,
+        req_range: u64,
+        req_offset: u64,
+        flags: GpuVaFlags,
+    ) -> Result {
+        let obj = self.obj.ok_or(EINVAL)?;
+        let mut ctx = StepContext {
+            ctx,
+            gpuvm: self.gpuvm,
+        };
+        // SAFETY: LockedGpuVm implies the right locks are held.
+        to_result(unsafe {
+            bindings::drm_gpuvm_sm_map(
+                self.gpuvm.gpuvm() as *mut _,
+                &mut ctx as *mut _ as *mut _,
+                req_addr,
+                req_range,
+                obj.gem_obj() as *const _ as *mut _,
+                req_offset,
+                flags.as_raw(),
+            )
+        })
+    }
+
+    pub fn sm_unmap(&mut self, ctx: &mut T::StepContext, req_addr: u64, req_range: u64) -> Result {
+        let mut ctx = StepContext {
+            ctx,
+            gpuvm: self.gpuvm,
+        };
+        // SAFETY: LockedGpuVm implies the right locks are held.
+        to_result(unsafe {
+            bindings::drm_gpuvm_sm_unmap(
+                self.gpuvm.gpuvm() as *mut _,
+                &mut ctx as *mut _ as *mut _,
+                req_addr,
+                req_range,
+            )
+        })
+    }
+
+    pub fn bo_unmap(&mut self, ctx: &mut T::StepContext, bo: &GpuVmBo<T>) -> Result {
+        let mut ctx = StepContext {
+            ctx,
+            gpuvm: self.gpuvm,
+        };
+        // SAFETY: LockedGpuVm implies the right locks are held.
+        to_result(unsafe {
+            bindings::drm_gpuvm_bo_unmap(&bo.bo as *const _ as *mut _, &mut ctx as *mut _ as *mut _)
+        })
+    }
+}
+
+impl<T: DriverGpuVm> Deref for LockedGpuVm<'_, '_, T> {
+    type Target = T;
+
+    fn deref(&self) -> &T {
+        // SAFETY: The existence of this LockedGpuVm implies the lock is held,
+        // so this is the only reference
+        unsafe { &*self.gpuvm.inner.get() }
+    }
+}
+
+impl<T: DriverGpuVm> DerefMut for LockedGpuVm<'_, '_, T> {
+    fn deref_mut(&mut self) -> &mut T {
+        // SAFETY: The existence of this UpdatingGpuVm implies the lock is held,
+        // so this is the only reference
+        unsafe { &mut *self.gpuvm.inner.get() }
+    }
+}
+
+impl<T: DriverGpuVm> Drop for LockedGpuVm<'_, '_, T> {
+    fn drop(&mut self) {
+        // SAFETY: We hold the lock, so it's safe to unlock
+        unsafe {
+            bindings::drm_gpuvm_exec_unlock(&mut *self.vm_exec);
+        }
+    }
+}
+
+pub struct UpdatingGpuVm<'a, T: DriverGpuVm>(&'a GpuVm<T>);
+
+impl<T: DriverGpuVm> UpdatingGpuVm<'_, T> {}
+
+impl<T: DriverGpuVm> Deref for UpdatingGpuVm<'_, T> {
+    type Target = T;
+
+    fn deref(&self) -> &T {
+        // SAFETY: The existence of this UpdatingGpuVm implies the lock is held,
+        // so this is the only reference
+        unsafe { &*self.0.inner.get() }
+    }
+}
+
+impl<T: DriverGpuVm> DerefMut for UpdatingGpuVm<'_, T> {
+    fn deref_mut(&mut self) -> &mut T {
+        // SAFETY: The existence of this UpdatingGpuVm implies the lock is held,
+        // so this is the only reference
+        unsafe { &mut *self.0.inner.get() }
+    }
+}
+
+// SAFETY: All our trait methods take locks
+unsafe impl<T: DriverGpuVm> Sync for GpuVm<T> {}
+// SAFETY: All our trait methods take locks
+unsafe impl<T: DriverGpuVm> Send for GpuVm<T> {}
+
+// SAFETY: All our trait methods take locks
+unsafe impl<T: DriverGpuVm> Sync for GpuVmBo<T> {}
+// SAFETY: All our trait methods take locks
+unsafe impl<T: DriverGpuVm> Send for GpuVmBo<T> {}
diff --git a/rust/kernel/drm/ioctl.rs b/rust/kernel/drm/ioctl.rs
new file mode 100644
index 00000000000000..ab31ad57af3e5f
--- /dev/null
+++ b/rust/kernel/drm/ioctl.rs
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+#![allow(non_snake_case)]
+
+//! DRM IOCTL definitions.
+//!
+//! C header: [`include/linux/drm/drm_ioctl.h`](srctree/include/linux/drm/drm_ioctl.h)
+
+use crate::ioctl;
+
+const BASE: u32 = uapi::DRM_IOCTL_BASE as u32;
+
+/// Construct a DRM ioctl number with no argument.
+#[inline(always)]
+pub const fn IO(nr: u32) -> u32 {
+    ioctl::_IO(BASE, nr)
+}
+
+/// Construct a DRM ioctl number with a read-only argument.
+#[inline(always)]
+pub const fn IOR<T>(nr: u32) -> u32 {
+    ioctl::_IOR::<T>(BASE, nr)
+}
+
+/// Construct a DRM ioctl number with a write-only argument.
+#[inline(always)]
+pub const fn IOW<T>(nr: u32) -> u32 {
+    ioctl::_IOW::<T>(BASE, nr)
+}
+
+/// Construct a DRM ioctl number with a read-write argument.
+#[inline(always)]
+pub const fn IOWR<T>(nr: u32) -> u32 {
+    ioctl::_IOWR::<T>(BASE, nr)
+}
+
+/// Descriptor type for DRM ioctls. Use the `declare_drm_ioctls!{}` macro to construct them.
+pub type DrmIoctlDescriptor = bindings::drm_ioctl_desc;
+
+/// This is for ioctl which are used for rendering, and require that the file descriptor is either
+/// for a render node, or if it’s a legacy/primary node, then it must be authenticated.
+pub const AUTH: u32 = bindings::drm_ioctl_flags_DRM_AUTH;
+
+/// This must be set for any ioctl which can change the modeset or display state. Userspace must
+/// call the ioctl through a primary node, while it is the active master.
+///
+/// Note that read-only modeset ioctl can also be called by unauthenticated clients, or when a
+/// master is not the currently active one.
+pub const MASTER: u32 = bindings::drm_ioctl_flags_DRM_MASTER;
+
+/// Anything that could potentially wreak a master file descriptor needs to have this flag set.
+///
+/// Current that’s only for the SETMASTER and DROPMASTER ioctl, which e.g. logind can call to
+/// force a non-behaving master (display compositor) into compliance.
+///
+/// This is equivalent to callers with the SYSADMIN capability.
+pub const ROOT_ONLY: u32 = bindings::drm_ioctl_flags_DRM_ROOT_ONLY;
+
+/// This is used for all ioctl needed for rendering only, for drivers which support render nodes.
+/// This should be all new render drivers, and hence it should be always set for any ioctl with
+/// `AUTH` set. Note though that read-only query ioctl might have this set, but have not set
+/// DRM_AUTH because they do not require authentication.
+pub const RENDER_ALLOW: u32 = bindings::drm_ioctl_flags_DRM_RENDER_ALLOW;
+
+/// Internal structures used by the `declare_drm_ioctls!{}` macro. Do not use directly.
+#[doc(hidden)]
+pub mod internal {
+    pub use bindings::drm_device;
+    pub use bindings::drm_file;
+    pub use bindings::drm_ioctl_desc;
+}
+
+/// Declare the DRM ioctls for a driver.
+///
+/// Each entry in the list should have the form:
+///
+/// `(ioctl_number, argument_type, flags, user_callback),`
+///
+/// `argument_type` is the type name within the `bindings` crate.
+/// `user_callback` should have the following prototype:
+///
+/// ```ignore
+/// fn foo(device: &kernel::drm::device::Device<Self>,
+///        data: &mut bindings::argument_type,
+///        file: &kernel::drm::file::File<Self::File>,
+/// )
+/// ```
+/// where `Self` is the drm::drv::Driver implementation these ioctls are being declared within.
+///
+/// # Examples
+///
+/// ```ignore
+/// kernel::declare_drm_ioctls! {
+///     (FOO_GET_PARAM, drm_foo_get_param, ioctl::RENDER_ALLOW, my_get_param_handler),
+/// }
+/// ```
+///
+#[macro_export]
+macro_rules! declare_drm_ioctls {
+    ( $(($cmd:ident, $struct:ident, $flags:expr, $func:expr)),* $(,)? ) => {
+        const IOCTLS: &'static [$crate::drm::ioctl::DrmIoctlDescriptor] = {
+            use $crate::uapi::*;
+            const _:() = {
+                let i: u32 = $crate::uapi::DRM_COMMAND_BASE;
+                // Assert that all the IOCTLs are in the right order and there are no gaps,
+                // and that the sizeof of the specified type is correct.
+                $(
+                    let cmd: u32 = $crate::macros::concat_idents!(DRM_IOCTL_, $cmd);
+                    ::core::assert!(i == $crate::ioctl::_IOC_NR(cmd));
+                    ::core::assert!(core::mem::size_of::<$crate::uapi::$struct>() ==
+                                    $crate::ioctl::_IOC_SIZE(cmd));
+                    let i: u32 = i + 1;
+                )*
+            };
+
+            let ioctls = &[$(
+                $crate::drm::ioctl::internal::drm_ioctl_desc {
+                    cmd: $crate::macros::concat_idents!(DRM_IOCTL_, $cmd) as u32,
+                    func: {
+                        #[allow(non_snake_case)]
+                        unsafe extern "C" fn $cmd(
+                                raw_dev: *mut $crate::drm::ioctl::internal::drm_device,
+                                raw_data: *mut ::core::ffi::c_void,
+                                raw_file_priv: *mut $crate::drm::ioctl::internal::drm_file,
+                        ) -> core::ffi::c_int {
+                            // SAFETY:
+                            // - The DRM core ensures the device lives while callbacks are being
+                            //   called.
+                            // - The DRM device must have been registered when we're called through
+                            //   an IOCTL.
+                            //
+                            // FIXME: Currently there is nothing enforcing that the types of the
+                            // dev/file match the current driver these ioctls are being declared
+                            // for, and it's not clear how to enforce this within the type system.
+                            let dev = $crate::drm::device::RegisteredDevice::borrow(raw_dev);
+                            // SAFETY: This is just the ioctl argument, which hopefully has the
+                            // right type (we've done our best checking the size).
+                            let data = unsafe { &mut *(raw_data as *mut $crate::uapi::$struct) };
+                            // SAFETY: This is just the DRM file structure
+                            let file = unsafe { $crate::drm::file::File::from_raw(raw_file_priv) };
+
+                            match $func(dev, dev.data(), data, &file) {
+                                Err(e) => e.to_errno(),
+                                Ok(i) => i.try_into()
+                                            .unwrap_or($crate::error::code::ERANGE.to_errno()),
+                            }
+                        }
+                        Some($cmd)
+                    },
+                    flags: $flags,
+                    name: $crate::c_str!(::core::stringify!($cmd)).as_char_ptr(),
+                }
+            ),*];
+            ioctls
+        };
+    };
+}
diff --git a/rust/kernel/drm/mm.rs b/rust/kernel/drm/mm.rs
new file mode 100644
index 00000000000000..accfe0f3910fdb
--- /dev/null
+++ b/rust/kernel/drm/mm.rs
@@ -0,0 +1,310 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+//! DRM MM range allocator
+//!
+//! C header: [`include/drm/drm_mm.h`](../../../../include/drm/drm_mm.h)
+
+use crate::{
+    alloc::flags::*,
+    bindings,
+    error::{to_result, Result},
+    sync::{Arc, Mutex, UniqueArc},
+    types::Opaque,
+};
+
+use crate::init::InPlaceInit;
+use crate::prelude::KBox;
+
+use core::{
+    marker::{PhantomData, PhantomPinned},
+    ops::Deref,
+    pin::Pin,
+};
+
+/// Type alias representing a DRM MM node.
+pub type Node<A, T> = Pin<KBox<NodeData<A, T>>>;
+
+/// Trait which must be implemented by the inner allocator state type provided by the user.
+pub trait AllocInner<T> {
+    /// Notification that a node was dropped from the allocator.
+    fn drop_object(&mut self, _start: u64, _size: u64, _color: usize, _object: &mut T) {}
+}
+
+impl<T> AllocInner<T> for () {}
+
+/// Wrapper type for a `struct drm_mm` plus user AllocInner object.
+///
+/// # Invariants
+/// The `drm_mm` struct is valid and initialized.
+struct MmInner<A: AllocInner<T>, T>(Opaque<bindings::drm_mm>, A, PhantomData<T>);
+
+/// Represents a single allocated node in the MM allocator
+pub struct NodeData<A: AllocInner<T>, T> {
+    node: bindings::drm_mm_node,
+    mm: Arc<Mutex<MmInner<A, T>>>,
+    valid: bool,
+    /// A drm_mm_node needs to be pinned because nodes reference each other in a linked list.
+    _pin: PhantomPinned,
+    inner: T,
+}
+
+// SAFETY: Allocator ops take the mutex, and there are no mutable actions on the node.
+unsafe impl<A: Send + AllocInner<T>, T: Send> Send for NodeData<A, T> {}
+// SAFETY: Allocator ops take the mutex, and there are no mutable actions on the node.
+unsafe impl<A: Send + AllocInner<T>, T: Sync> Sync for NodeData<A, T> {}
+
+/// Available MM node insertion modes
+#[repr(u32)]
+pub enum InsertMode {
+    /// Search for the smallest hole (within the search range) that fits the desired node.
+    ///
+    /// Allocates the node from the bottom of the found hole.
+    Best = bindings::drm_mm_insert_mode_DRM_MM_INSERT_BEST,
+
+    /// Search for the lowest hole (address closest to 0, within the search range) that fits the
+    /// desired node.
+    ///
+    /// Allocates the node from the bottom of the found hole.
+    Low = bindings::drm_mm_insert_mode_DRM_MM_INSERT_LOW,
+
+    /// Search for the highest hole (address closest to U64_MAX, within the search range) that fits
+    /// the desired node.
+    ///
+    /// Allocates the node from the top of the found hole. The specified alignment for the node is
+    /// applied to the base of the node (`Node.start()`).
+    High = bindings::drm_mm_insert_mode_DRM_MM_INSERT_HIGH,
+
+    /// Search for the most recently evicted hole (within the search range) that fits the desired
+    /// node. This is appropriate for use immediately after performing an eviction scan and removing
+    /// the selected nodes to form a hole.
+    ///
+    /// Allocates the node from the bottom of the found hole.
+    Evict = bindings::drm_mm_insert_mode_DRM_MM_INSERT_EVICT,
+}
+
+/// A clonable, interlocked reference to the allocator state.
+///
+/// This is useful to perform actions on the user-supplied `AllocInner<T>` type given just a Node,
+/// without immediately taking the lock.
+#[derive(Clone)]
+pub struct InnerRef<A: AllocInner<T>, T>(Arc<Mutex<MmInner<A, T>>>);
+
+impl<A: AllocInner<T>, T> InnerRef<A, T> {
+    /// Operate on the user `AllocInner<T>` implementation, taking the lock.
+    pub fn with<RetVal>(&self, cb: impl FnOnce(&mut A) -> RetVal) -> RetVal {
+        let mut l = self.0.lock();
+        cb(&mut l.1)
+    }
+}
+
+impl<A: AllocInner<T>, T> NodeData<A, T> {
+    /// Returns the color of the node (an opaque value)
+    pub fn color(&self) -> usize {
+        self.node.color as usize
+    }
+
+    /// Returns the start address of the node
+    pub fn start(&self) -> u64 {
+        self.node.start
+    }
+
+    /// Returns the size of the node in bytes
+    pub fn size(&self) -> u64 {
+        self.node.size
+    }
+
+    /// Operate on the user `AllocInner<T>` implementation associated with this node's allocator.
+    pub fn with_inner<RetVal>(&self, cb: impl FnOnce(&mut A) -> RetVal) -> RetVal {
+        let mut l = self.mm.lock();
+        cb(&mut l.1)
+    }
+
+    /// Return a clonable, detached reference to the allocator inner data.
+    pub fn alloc_ref(&self) -> InnerRef<A, T> {
+        InnerRef(self.mm.clone())
+    }
+
+    /// Return a mutable reference to the inner data.
+    pub fn inner_mut(self: Pin<&mut Self>) -> &mut T {
+        // SAFETY: This is okay because inner is not structural
+        unsafe { &mut self.get_unchecked_mut().inner }
+    }
+}
+
+impl<A: AllocInner<T>, T> Deref for NodeData<A, T> {
+    type Target = T;
+
+    fn deref(&self) -> &Self::Target {
+        &self.inner
+    }
+}
+
+impl<A: AllocInner<T>, T> Drop for NodeData<A, T> {
+    fn drop(&mut self) {
+        if self.valid {
+            let mut guard = self.mm.lock();
+
+            // Inform the user allocator that a node is being dropped.
+            guard
+                .1
+                .drop_object(self.start(), self.size(), self.color(), &mut self.inner);
+            // SAFETY: The MM lock is still taken, so we can safely remove the node.
+            unsafe { bindings::drm_mm_remove_node(&mut self.node) };
+        }
+    }
+}
+
+/// An instance of a DRM MM range allocator.
+pub struct Allocator<A: AllocInner<T>, T> {
+    mm: Arc<Mutex<MmInner<A, T>>>,
+    _p: PhantomData<T>,
+}
+
+impl<A: AllocInner<T>, T> Allocator<A, T> {
+    /// Create a new range allocator for the given start and size range of addresses.
+    ///
+    /// The user may optionally provide an inner object representing allocator state, which will
+    /// be protected by the same lock. If not required, `()` can be used.
+    #[track_caller]
+    pub fn new(start: u64, size: u64, inner: A) -> Result<Allocator<A, T>> {
+        // SAFETY: We call `Mutex::init_lock` below.
+        let mm = UniqueArc::pin_init(
+            Mutex::new(MmInner(Opaque::uninit(), inner, PhantomData)),
+            GFP_KERNEL,
+        )?;
+
+        // SAFETY: The Opaque instance provides a valid pointer, and it is initialized after
+        // this call.
+        unsafe {
+            bindings::drm_mm_init(mm.lock().0.get(), start, size);
+        }
+
+        Ok(Allocator {
+            mm: mm.into(),
+            _p: PhantomData,
+        })
+    }
+
+    /// Insert a new node into the allocator of a given size.
+    ///
+    /// `node` is the user `T` type data to store into the node.
+    pub fn insert_node(&mut self, node: T, size: u64) -> Result<Node<A, T>> {
+        self.insert_node_generic(node, size, 0, 0, InsertMode::Best)
+    }
+
+    /// Insert a new node into the allocator of a given size, with configurable alignment,
+    /// color, and insertion mode.
+    ///
+    /// `node` is the user `T` type data to store into the node.
+    pub fn insert_node_generic(
+        &mut self,
+        node: T,
+        size: u64,
+        alignment: u64,
+        color: usize,
+        mode: InsertMode,
+    ) -> Result<Node<A, T>> {
+        self.insert_node_in_range(node, size, alignment, color, 0, u64::MAX, mode)
+    }
+
+    /// Insert a new node into the allocator of a given size, with configurable alignment,
+    /// color, insertion mode, and sub-range to allocate from.
+    ///
+    /// `node` is the user `T` type data to store into the node.
+    #[allow(clippy::too_many_arguments)]
+    pub fn insert_node_in_range(
+        &mut self,
+        node: T,
+        size: u64,
+        alignment: u64,
+        color: usize,
+        start: u64,
+        end: u64,
+        mode: InsertMode,
+    ) -> Result<Node<A, T>> {
+        let mut mm_node = KBox::new(
+            NodeData {
+                // SAFETY: This C struct should be zero-initialized.
+                node: unsafe { core::mem::zeroed() },
+                valid: false,
+                inner: node,
+                mm: self.mm.clone(),
+                _pin: PhantomPinned,
+            },
+            GFP_KERNEL,
+        )?;
+
+        let guard = self.mm.lock();
+        // SAFETY: We hold the lock and all pointers are valid.
+        to_result(unsafe {
+            bindings::drm_mm_insert_node_in_range(
+                guard.0.get(),
+                &mut mm_node.node,
+                size,
+                alignment,
+                color,
+                start,
+                end,
+                mode as u32,
+            )
+        })?;
+
+        mm_node.valid = true;
+
+        Ok(Pin::from(mm_node))
+    }
+
+    /// Insert a node into the allocator at a fixed start address.
+    ///
+    /// `node` is the user `T` type data to store into the node.
+    pub fn reserve_node(
+        &mut self,
+        node: T,
+        start: u64,
+        size: u64,
+        color: usize,
+    ) -> Result<Node<A, T>> {
+        let mut mm_node = KBox::new(
+            NodeData {
+                // SAFETY: This C struct should be zero-initialized.
+                node: unsafe { core::mem::zeroed() },
+                valid: false,
+                inner: node,
+                mm: self.mm.clone(),
+                _pin: PhantomPinned,
+            },
+            GFP_KERNEL,
+        )?;
+
+        mm_node.node.start = start;
+        mm_node.node.size = size;
+        mm_node.node.color = color as crate::ffi::c_ulong;
+
+        let guard = self.mm.lock();
+        // SAFETY: We hold the lock and all pointers are valid.
+        to_result(unsafe { bindings::drm_mm_reserve_node(guard.0.get(), &mut mm_node.node) })?;
+
+        mm_node.valid = true;
+
+        Ok(Pin::from(mm_node))
+    }
+
+    /// Operate on the inner user type `A`, taking the allocator lock
+    pub fn with_inner<RetVal>(&self, cb: impl FnOnce(&mut A) -> RetVal) -> RetVal {
+        let mut guard = self.mm.lock();
+        cb(&mut guard.1)
+    }
+}
+
+impl<A: AllocInner<T>, T> Drop for MmInner<A, T> {
+    fn drop(&mut self) {
+        // SAFETY: If the MmInner is dropped then all nodes are gone (since they hold references),
+        // so it is safe to tear down the allocator.
+        unsafe {
+            bindings::drm_mm_takedown(self.0.get());
+        }
+    }
+}
+
+// SAFETY: MmInner is safely Send if the AllocInner user type is Send.
+unsafe impl<A: Send + AllocInner<T>, T> Send for MmInner<A, T> {}
diff --git a/rust/kernel/drm/mod.rs b/rust/kernel/drm/mod.rs
new file mode 100644
index 00000000000000..50d1bb9139dcd3
--- /dev/null
+++ b/rust/kernel/drm/mod.rs
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+//! DRM subsystem abstractions.
+
+pub mod device;
+pub mod drv;
+pub mod file;
+pub mod gem;
+#[cfg(CONFIG_DRM_GPUVM = "y")]
+pub mod gpuvm;
+pub mod ioctl;
+pub mod mm;
+pub mod sched;
+pub mod syncobj;
diff --git a/rust/kernel/drm/sched.rs b/rust/kernel/drm/sched.rs
new file mode 100644
index 00000000000000..7e98916a5bd2a9
--- /dev/null
+++ b/rust/kernel/drm/sched.rs
@@ -0,0 +1,368 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+//! DRM Scheduler
+//!
+//! C header: [`include/drm/gpu_scheduler.h`](../../../../include/drm/gpu_scheduler.h)
+
+use crate::{
+    bindings, device,
+    dma_fence::*,
+    error::{to_result, Result},
+    prelude::*,
+    sync::{Arc, UniqueArc},
+};
+use core::marker::PhantomData;
+use core::mem::MaybeUninit;
+use core::ops::{Deref, DerefMut};
+use core::ptr::addr_of_mut;
+
+/// Scheduler status after timeout recovery
+#[repr(u32)]
+pub enum Status {
+    /// Device recovered from the timeout and can execute jobs again
+    Nominal = bindings::drm_gpu_sched_stat_DRM_GPU_SCHED_STAT_NOMINAL,
+    /// Device is no longer available
+    NoDevice = bindings::drm_gpu_sched_stat_DRM_GPU_SCHED_STAT_ENODEV,
+}
+
+/// Scheduler priorities
+#[repr(u32)]
+pub enum Priority {
+    /// Low userspace priority
+    Low = bindings::drm_sched_priority_DRM_SCHED_PRIORITY_LOW,
+    /// Normal userspace priority
+    Normal = bindings::drm_sched_priority_DRM_SCHED_PRIORITY_NORMAL,
+    /// High userspace priority
+    High = bindings::drm_sched_priority_DRM_SCHED_PRIORITY_HIGH,
+    /// Kernel priority (highest)
+    Kernel = bindings::drm_sched_priority_DRM_SCHED_PRIORITY_KERNEL,
+}
+
+/// Trait to be implemented by driver job objects.
+pub trait JobImpl: Sized {
+    /// Called when the scheduler is considering scheduling this job next, to get another Fence
+    /// for this job to block on. Once it returns None, run() may be called.
+    fn prepare(_job: &mut Job<Self>) -> Option<Fence> {
+        None // Equivalent to NULL function pointer
+    }
+
+    /// Called to execute the job once all of the dependencies have been resolved. This may be
+    /// called multiple times, if timed_out() has happened and drm_sched_job_recovery() decides
+    /// to try it again.
+    fn run(job: &mut Job<Self>) -> Result<Option<Fence>>;
+
+    /// Called when a job has taken too long to execute, to trigger GPU recovery.
+    ///
+    /// This method is called in a workqueue context.
+    fn timed_out(job: &mut Job<Self>) -> Status;
+}
+
+unsafe extern "C" fn prepare_job_cb<T: JobImpl>(
+    sched_job: *mut bindings::drm_sched_job,
+    _s_entity: *mut bindings::drm_sched_entity,
+) -> *mut bindings::dma_fence {
+    // SAFETY: All of our jobs are Job<T>.
+    let p = unsafe { crate::container_of!(sched_job, Job<T>, job) as *mut Job<T> };
+
+    // SAFETY: All of our jobs are Job<T>.
+    match T::prepare(unsafe { &mut *p }) {
+        None => core::ptr::null_mut(),
+        Some(fence) => fence.into_raw(),
+    }
+}
+
+unsafe extern "C" fn run_job_cb<T: JobImpl>(
+    sched_job: *mut bindings::drm_sched_job,
+) -> *mut bindings::dma_fence {
+    // SAFETY: All of our jobs are Job<T>.
+    let p = unsafe { crate::container_of!(sched_job, Job<T>, job) as *mut Job<T> };
+
+    // SAFETY: All of our jobs are Job<T>.
+    match T::run(unsafe { &mut *p }) {
+        Err(e) => e.to_ptr(),
+        Ok(None) => core::ptr::null_mut(),
+        Ok(Some(fence)) => fence.into_raw(),
+    }
+}
+
+unsafe extern "C" fn timedout_job_cb<T: JobImpl>(
+    sched_job: *mut bindings::drm_sched_job,
+) -> bindings::drm_gpu_sched_stat {
+    // SAFETY: All of our jobs are Job<T>.
+    let p = unsafe { crate::container_of!(sched_job, Job<T>, job) as *mut Job<T> };
+
+    // SAFETY: All of our jobs are Job<T>.
+    T::timed_out(unsafe { &mut *p }) as bindings::drm_gpu_sched_stat
+}
+
+unsafe extern "C" fn free_job_cb<T: JobImpl>(sched_job: *mut bindings::drm_sched_job) {
+    // SAFETY: All of our jobs are Job<T>.
+    let p = unsafe { crate::container_of!(sched_job, Job<T>, job) as *mut Job<T> };
+
+    // Convert the job back to a Box and drop it
+    // SAFETY: All of our Job<T>s are created inside a box.
+    unsafe { drop(KBox::from_raw(p)) };
+}
+
+/// A DRM scheduler job.
+pub struct Job<T: JobImpl> {
+    job: bindings::drm_sched_job,
+    inner: T,
+}
+
+impl<T: JobImpl> Deref for Job<T> {
+    type Target = T;
+
+    fn deref(&self) -> &Self::Target {
+        &self.inner
+    }
+}
+
+impl<T: JobImpl> DerefMut for Job<T> {
+    fn deref_mut(&mut self) -> &mut Self::Target {
+        &mut self.inner
+    }
+}
+
+impl<T: JobImpl> Drop for Job<T> {
+    fn drop(&mut self) {
+        // SAFETY: At this point the job has either been submitted and this is being called from
+        // `free_job_cb` above, or it hasn't and it is safe to call `drm_sched_job_cleanup`.
+        unsafe { bindings::drm_sched_job_cleanup(&mut self.job) };
+    }
+}
+
+/// A pending DRM scheduler job (not yet armed)
+pub struct PendingJob<'a, T: JobImpl>(KBox<Job<T>>, PhantomData<&'a T>);
+
+impl<'a, T: JobImpl> PendingJob<'a, T> {
+    /// Add a fence as a dependency to the job
+    pub fn add_dependency(&mut self, fence: Fence) -> Result {
+        // SAFETY: C call with correct arguments
+        to_result(unsafe {
+            bindings::drm_sched_job_add_dependency(&mut self.0.job, fence.into_raw())
+        })
+    }
+
+    /// Arm the job to make it ready for execution
+    pub fn arm(mut self) -> ArmedJob<'a, T> {
+        // SAFETY: C call with correct arguments
+        unsafe { bindings::drm_sched_job_arm(&mut self.0.job) };
+        ArmedJob(self.0, PhantomData)
+    }
+}
+
+impl<'a, T: JobImpl> Deref for PendingJob<'a, T> {
+    type Target = Job<T>;
+
+    fn deref(&self) -> &Self::Target {
+        &self.0
+    }
+}
+
+impl<'a, T: JobImpl> DerefMut for PendingJob<'a, T> {
+    fn deref_mut(&mut self) -> &mut Self::Target {
+        &mut self.0
+    }
+}
+
+/// An armed DRM scheduler job (not yet submitted)
+pub struct ArmedJob<'a, T: JobImpl>(KBox<Job<T>>, PhantomData<&'a T>);
+
+impl<'a, T: JobImpl> ArmedJob<'a, T> {
+    /// Returns the job fences
+    pub fn fences(&mut self) -> JobFences<'_> {
+        // SAFETY: s_fence is always a valid drm_sched_fence pointer
+        JobFences(unsafe { &mut *self.0.job.s_fence })
+    }
+
+    /// Push the job for execution into the scheduler
+    pub fn push(self) {
+        // After this point, the job is submitted and owned by the scheduler
+        let ptr = match self {
+            ArmedJob(job, _) => KBox::<Job<T>>::into_raw(job),
+        };
+
+        // SAFETY: We are passing in ownership of a valid Box raw pointer.
+        unsafe { bindings::drm_sched_entity_push_job(addr_of_mut!((*ptr).job)) };
+    }
+}
+impl<'a, T: JobImpl> Deref for ArmedJob<'a, T> {
+    type Target = Job<T>;
+
+    fn deref(&self) -> &Self::Target {
+        &self.0
+    }
+}
+
+impl<'a, T: JobImpl> DerefMut for ArmedJob<'a, T> {
+    fn deref_mut(&mut self) -> &mut Self::Target {
+        &mut self.0
+    }
+}
+
+/// Reference to the bundle of fences attached to a DRM scheduler job
+pub struct JobFences<'a>(&'a mut bindings::drm_sched_fence);
+
+impl<'a> JobFences<'a> {
+    /// Returns a new reference to the job scheduled fence.
+    pub fn scheduled(&mut self) -> Fence {
+        // SAFETY: self.0.scheduled is always a valid fence
+        unsafe { Fence::get_raw(&mut self.0.scheduled) }
+    }
+
+    /// Returns a new reference to the job finished fence.
+    pub fn finished(&mut self) -> Fence {
+        // SAFETY: self.0.finished is always a valid fence
+        unsafe { Fence::get_raw(&mut self.0.finished) }
+    }
+}
+
+struct EntityInner<T: JobImpl> {
+    entity: bindings::drm_sched_entity,
+    // TODO: Allow users to share guilty flag between entities
+    sched: Arc<SchedulerInner<T>>,
+    guilty: bindings::atomic_t,
+    _p: PhantomData<T>,
+}
+
+impl<T: JobImpl> Drop for EntityInner<T> {
+    fn drop(&mut self) {
+        // SAFETY: The EntityInner is initialized. This will cancel/free all jobs.
+        unsafe { bindings::drm_sched_entity_destroy(&mut self.entity) };
+    }
+}
+
+// SAFETY: TODO
+unsafe impl<T: JobImpl> Sync for EntityInner<T> {}
+// SAFETY: TODO
+unsafe impl<T: JobImpl> Send for EntityInner<T> {}
+
+/// A DRM scheduler entity.
+pub struct Entity<T: JobImpl>(Pin<KBox<EntityInner<T>>>);
+
+impl<T: JobImpl> Entity<T> {
+    /// Create a new scheduler entity.
+    pub fn new(sched: &Scheduler<T>, priority: Priority) -> Result<Self> {
+        let mut entity: KBox<MaybeUninit<EntityInner<T>>> =
+            KBox::new_uninit(GFP_KERNEL | __GFP_ZERO)?;
+
+        let mut sched_ptr = &sched.0.sched as *const _ as *mut _;
+
+        // SAFETY: The Box is allocated above and valid.
+        unsafe {
+            bindings::drm_sched_entity_init(
+                addr_of_mut!((*entity.as_mut_ptr()).entity),
+                priority as _,
+                &mut sched_ptr,
+                1,
+                addr_of_mut!((*entity.as_mut_ptr()).guilty),
+            )
+        };
+
+        // SAFETY: The Box is allocated above and valid.
+        unsafe { addr_of_mut!((*entity.as_mut_ptr()).sched).write(sched.0.clone()) };
+
+        // SAFETY: entity is now initialized.
+        Ok(Self(Pin::from(unsafe { entity.assume_init() })))
+    }
+
+    /// Create a new job on this entity.
+    ///
+    /// The entity must outlive the pending job until it transitions into the submitted state,
+    /// after which the scheduler owns it. Since jobs must be submitted in creation order,
+    /// this requires a mutable reference to the entity, ensuring that only one new job can be
+    /// in flight at once.
+    pub fn new_job(&mut self, credits: u32, inner: T) -> Result<PendingJob<'_, T>> {
+        let mut job: KBox<MaybeUninit<Job<T>>> = Box::new_uninit(GFP_KERNEL | __GFP_ZERO)?;
+
+        // SAFETY: We hold a reference to the entity (which is a valid pointer),
+        // and the job object was just allocated above.
+        to_result(unsafe {
+            bindings::drm_sched_job_init(
+                addr_of_mut!((*job.as_mut_ptr()).job),
+                &self.0.as_ref().get_ref().entity as *const _ as *mut _,
+                credits,
+                core::ptr::null_mut(),
+            )
+        })?;
+
+        // SAFETY: The Box pointer is valid, and this initializes the inner member.
+        unsafe { addr_of_mut!((*job.as_mut_ptr()).inner).write(inner) };
+
+        // SAFETY: All fields of the Job<T> are now initialized.
+        Ok(PendingJob(unsafe { job.assume_init() }, PhantomData))
+    }
+}
+
+/// DRM scheduler inner data
+pub struct SchedulerInner<T: JobImpl> {
+    sched: bindings::drm_gpu_scheduler,
+    _p: PhantomData<T>,
+}
+
+impl<T: JobImpl> Drop for SchedulerInner<T> {
+    fn drop(&mut self) {
+        // SAFETY: The scheduler is valid. This assumes drm_sched_fini() will take care of
+        // freeing all in-progress jobs.
+        unsafe { bindings::drm_sched_fini(&mut self.sched) };
+    }
+}
+
+// SAFETY: TODO
+unsafe impl<T: JobImpl> Sync for SchedulerInner<T> {}
+// SAFETY: TODO
+unsafe impl<T: JobImpl> Send for SchedulerInner<T> {}
+
+/// A DRM Scheduler
+pub struct Scheduler<T: JobImpl>(Arc<SchedulerInner<T>>);
+
+impl<T: JobImpl> Scheduler<T> {
+    const OPS: bindings::drm_sched_backend_ops = bindings::drm_sched_backend_ops {
+        prepare_job: Some(prepare_job_cb::<T>),
+        run_job: Some(run_job_cb::<T>),
+        timedout_job: Some(timedout_job_cb::<T>),
+        free_job: Some(free_job_cb::<T>),
+        update_job_credits: None,
+    };
+    /// Creates a new DRM Scheduler object
+    // TODO: Shared timeout workqueues & scores
+    pub fn new(
+        device: &device::Device,
+        num_rqs: u32,
+        credit_limit: u32,
+        hang_limit: u32,
+        timeout_ms: usize,
+        name: &'static CStr,
+    ) -> Result<Scheduler<T>> {
+        let mut sched: UniqueArc<MaybeUninit<SchedulerInner<T>>> =
+            UniqueArc::new_uninit(GFP_KERNEL)?;
+
+        // SAFETY: zero sched->sched_rq as drm_sched_init() uses it to exit early withoput initialisation
+        // TODO: allocate sched zzeroed instead
+        unsafe {
+            (*sched.as_mut_ptr()).sched.sched_rq = core::ptr::null_mut();
+        };
+
+        // SAFETY: The drm_sched pointer is valid and pinned as it was just allocated above.
+        //         `device` is valid by its type invarants
+        to_result(unsafe {
+            bindings::drm_sched_init(
+                addr_of_mut!((*sched.as_mut_ptr()).sched),
+                &Self::OPS,
+                core::ptr::null_mut(),
+                num_rqs,
+                credit_limit,
+                hang_limit,
+                bindings::msecs_to_jiffies(timeout_ms.try_into()?).try_into()?,
+                core::ptr::null_mut(),
+                core::ptr::null_mut(),
+                name.as_char_ptr(),
+                device.as_raw(),
+            )
+        })?;
+
+        // SAFETY: All fields of SchedulerInner are now initialized.
+        Ok(Scheduler(unsafe { sched.assume_init() }.into()))
+    }
+}
diff --git a/rust/kernel/drm/syncobj.rs b/rust/kernel/drm/syncobj.rs
new file mode 100644
index 00000000000000..e2d82c0ceb1e0b
--- /dev/null
+++ b/rust/kernel/drm/syncobj.rs
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+//! DRM Sync Objects
+//!
+//! C header: [`include/drm/drm_syncobj.h`](../../../../include/drm/drm_syncobj.h)
+
+use crate::{bindings, dma_fence::*, drm, error::Result, prelude::*};
+
+/// A DRM Sync Object
+///
+/// # Invariants
+/// ptr is a valid pointer to a drm_syncobj and we own a reference to it.
+pub struct SyncObj {
+    ptr: *mut bindings::drm_syncobj,
+}
+
+impl SyncObj {
+    /// Looks up a sync object by its handle for a given `File`.
+    pub fn lookup_handle(file: &impl drm::file::GenericFile, handle: u32) -> Result<SyncObj> {
+        // SAFETY: The arguments are all valid per the type invariants.
+        let ptr = unsafe { bindings::drm_syncobj_find(file.raw() as *mut _, handle) };
+
+        if ptr.is_null() {
+            Err(ENOENT)
+        } else {
+            Ok(SyncObj { ptr })
+        }
+    }
+
+    /// Returns the DMA fence associated with this sync object, if any.
+    pub fn fence_get(&self) -> Option<Fence> {
+        // SAFETY: self.ptr is always valid
+        let fence = unsafe { bindings::drm_syncobj_fence_get(self.ptr) };
+        if fence.is_null() {
+            None
+        } else {
+            // SAFETY: The pointer is non-NULL and drm_syncobj_fence_get acquired an
+            // additional reference.
+            Some(unsafe { Fence::from_raw(fence) })
+        }
+    }
+
+    /// Replaces the DMA fence with a new one, or removes it if fence is None.
+    pub fn replace_fence(&self, fence: Option<&Fence>) {
+        // SAFETY: All arguments should be valid per the respective type invariants.
+        unsafe {
+            bindings::drm_syncobj_replace_fence(
+                self.ptr,
+                fence.map_or(core::ptr::null_mut(), |a| a.raw()),
+            )
+        };
+    }
+
+    /// Adds a new timeline point to the syncobj.
+    pub fn add_point(&self, chain: FenceChain, fence: &Fence, point: u64) {
+        // SAFETY: All arguments should be valid per the respective type invariants.
+        // This takes over the FenceChain ownership.
+        unsafe { bindings::drm_syncobj_add_point(self.ptr, chain.into_raw(), fence.raw(), point) };
+    }
+}
+
+impl Drop for SyncObj {
+    fn drop(&mut self) {
+        // SAFETY: We own a reference to this syncobj.
+        unsafe { bindings::drm_syncobj_put(self.ptr) };
+    }
+}
+
+impl Clone for SyncObj {
+    fn clone(&self) -> Self {
+        // SAFETY: `ptr` is valid per the type invariant and we own a reference to it.
+        unsafe { bindings::drm_syncobj_get(self.ptr) };
+        SyncObj { ptr: self.ptr }
+    }
+}
+
+// SAFETY: drm_syncobj operations are internally locked.
+unsafe impl Sync for SyncObj {}
+// SAFETY: drm_syncobj operations are internally locked.
+unsafe impl Send for SyncObj {}
diff --git a/rust/kernel/error.rs b/rust/kernel/error.rs
index a194d83e6835c0..e659b9f36dc26b 100644
--- a/rust/kernel/error.rs
+++ b/rust/kernel/error.rs
@@ -64,6 +64,11 @@ pub mod code {
     declare_err!(EPIPE, "Broken pipe.");
     declare_err!(EDOM, "Math argument out of domain of func.");
     declare_err!(ERANGE, "Math result not representable.");
+    declare_err!(ENOSYS, "Invalid system call number.");
+    declare_err!(ENODATA, "No data available.");
+    declare_err!(EOVERFLOW, "Value too large for defined data type.");
+    declare_err!(ETIMEDOUT, "Connection timed out.");
+    declare_err!(ECANCELED, "Operation Canceled.");
     declare_err!(ERESTARTSYS, "Restart the system call.");
     declare_err!(ERESTARTNOINTR, "System call was interrupted by a signal and will be restarted.");
     declare_err!(ERESTARTNOHAND, "Restart if no handler.");
diff --git a/rust/kernel/iio/common/aop_sensors.rs b/rust/kernel/iio/common/aop_sensors.rs
new file mode 100644
index 00000000000000..49b30bf246d9d9
--- /dev/null
+++ b/rust/kernel/iio/common/aop_sensors.rs
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+//! Apple AOP sensors common code
+//!
+//! Copyright (C) The Asahi Linux Contributors
+
+use core::marker::{PhantomData, PhantomPinned};
+use core::ptr;
+use core::sync::atomic::{AtomicU32, Ordering};
+
+use kernel::{
+    bindings, platform, prelude::*, soc::apple::aop::FakehidListener, sync::Arc,
+    types::ForeignOwnable, ThisModule,
+};
+
+/// TODO: add documentation
+pub trait MessageProcessor {
+    /// TODO: add documentation
+    fn process(&self, message: &[u8]) -> u32;
+}
+
+/// TODO: add documentation
+pub struct AopSensorData<T: MessageProcessor> {
+    dev: platform::Device,
+    ty: u32,
+    value: AtomicU32,
+    msg_proc: T,
+}
+
+impl<T: MessageProcessor> AopSensorData<T> {
+    /// TODO: add documentation
+    pub fn new(dev: platform::Device, ty: u32, msg_proc: T) -> Result<Arc<AopSensorData<T>>> {
+        Ok(Arc::new(
+            AopSensorData {
+                dev,
+                ty,
+                value: AtomicU32::new(0),
+                msg_proc,
+            },
+            GFP_KERNEL,
+        )?)
+    }
+}
+
+impl<T: MessageProcessor> FakehidListener for AopSensorData<T> {
+    fn process_fakehid_report(&self, data: &[u8]) -> Result<()> {
+        self.value
+            .store(self.msg_proc.process(data), Ordering::Relaxed);
+        Ok(())
+    }
+}
+
+unsafe extern "C" fn aop_read_raw<T: MessageProcessor + 'static>(
+    dev: *mut bindings::iio_dev,
+    chan: *const bindings::iio_chan_spec,
+    val: *mut i32,
+    _: *mut i32,
+    mask: isize,
+) -> i32 {
+    let data = unsafe { Arc::<AopSensorData<T>>::borrow((*dev).priv_) };
+    let ty = unsafe { (*chan).type_ };
+    if mask != bindings::BINDINGS_IIO_CHAN_INFO_PROCESSED as isize
+        && mask != bindings::BINDINGS_IIO_CHAN_INFO_RAW as isize
+    {
+        return EINVAL.to_errno();
+    }
+    if data.ty != ty {
+        return EINVAL.to_errno();
+    }
+    let value = data.value.load(Ordering::Relaxed);
+    unsafe {
+        *val = value as i32;
+    }
+    bindings::IIO_VAL_INT as i32
+}
+
+struct IIOSpec {
+    spec: [bindings::iio_chan_spec; 1],
+    vtable: bindings::iio_info,
+    _p: PhantomPinned,
+}
+
+/// TODO: add documentation
+pub struct IIORegistration<T: MessageProcessor + 'static> {
+    dev: *mut bindings::iio_dev,
+    spec: Pin<KBox<IIOSpec>>,
+    registered: bool,
+    _p: PhantomData<AopSensorData<T>>,
+}
+
+impl<T: MessageProcessor + 'static> IIORegistration<T> {
+    /// TODO: add documentation
+    pub fn new(
+        data: Arc<AopSensorData<T>>,
+        name: &'static CStr,
+        ty: u32,
+        info_mask: isize,
+        module: &ThisModule,
+    ) -> Result<Self> {
+        let spec = KBox::pin(
+            IIOSpec {
+                spec: [bindings::iio_chan_spec {
+                    type_: ty,
+                    __bindgen_anon_1: bindings::iio_chan_spec__bindgen_ty_1 {
+                        scan_type: bindings::iio_scan_type {
+                            sign: b'u' as _,
+                            realbits: 32,
+                            storagebits: 32,
+                            ..Default::default()
+                        },
+                    },
+                    info_mask_separate: info_mask,
+                    ..Default::default()
+                }],
+                vtable: bindings::iio_info {
+                    read_raw: Some(aop_read_raw::<T>),
+                    ..Default::default()
+                },
+                _p: PhantomPinned,
+            },
+            GFP_KERNEL,
+        )?;
+        let mut this = IIORegistration {
+            dev: ptr::null_mut(),
+            spec,
+            registered: false,
+            _p: PhantomData,
+        };
+        this.dev = unsafe { bindings::iio_device_alloc(data.dev.as_ref().as_raw(), 0) };
+        unsafe {
+            (*this.dev).priv_ = data.clone().into_foreign() as _;
+            (*this.dev).name = name.as_ptr() as _;
+            // spec is now pinned
+            (*this.dev).channels = this.spec.spec.as_ptr();
+            (*this.dev).num_channels = this.spec.spec.len() as i32;
+            (*this.dev).info = &this.spec.vtable;
+        }
+        let ret = unsafe { bindings::__iio_device_register(this.dev, module.as_ptr()) };
+        if ret < 0 {
+            dev_err!(data.dev.as_ref(), "Unable to register iio sensor");
+            return Err(Error::from_errno(ret));
+        }
+        this.registered = true;
+        Ok(this)
+    }
+}
+
+impl<T: MessageProcessor + 'static> Drop for IIORegistration<T> {
+    fn drop(&mut self) {
+        if self.dev != ptr::null_mut() {
+            unsafe {
+                if self.registered {
+                    bindings::iio_device_unregister(self.dev);
+                }
+                Arc::<AopSensorData<T>>::from_foreign((*self.dev).priv_);
+                bindings::iio_device_free(self.dev);
+            }
+        }
+    }
+}
+
+unsafe impl<T: MessageProcessor> Send for IIORegistration<T> {}
+unsafe impl<T: MessageProcessor> Sync for IIORegistration<T> {}
diff --git a/rust/kernel/iio/common/mod.rs b/rust/kernel/iio/common/mod.rs
new file mode 100644
index 00000000000000..570644ce0938a7
--- /dev/null
+++ b/rust/kernel/iio/common/mod.rs
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+//! IIO common modules
+
+#[cfg(any(
+    CONFIG_IIO_AOP_SENSOR_LAS = "y",
+    CONFIG_IIO_AOP_SENSOR_ALS = "m",
+    CONFIG_IIO_AOP_SENSOR_LAS = "y",
+    CONFIG_IIO_AOP_SENSOR_ALS = "m",
+))]
+pub mod aop_sensors;
diff --git a/rust/kernel/iio/mod.rs b/rust/kernel/iio/mod.rs
new file mode 100644
index 00000000000000..b0cb308f0b454c
--- /dev/null
+++ b/rust/kernel/iio/mod.rs
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+
+//! Industrial IO drivers
+
+pub mod common;
diff --git a/rust/kernel/init.rs b/rust/kernel/init.rs
index e25d047f3c8276..10c7532d25a14d 100644
--- a/rust/kernel/init.rs
+++ b/rust/kernel/init.rs
@@ -1,4 +1,6 @@
 // SPDX-License-Identifier: Apache-2.0 OR MIT
+// FIXME
+#![allow(clippy::undocumented_unsafe_blocks)]
 
 //! API to safely and fallibly initialize pinned `struct`s using in-place constructors.
 //!
@@ -46,7 +48,7 @@
 //! }
 //!
 //! let foo = pin_init!(Foo {
-//!     a <- new_mutex!(42, "Foo::a"),
+//!     a <- Mutex::new_named(42, "Foo::a"),
 //!     b: 24,
 //! });
 //! ```
@@ -65,7 +67,7 @@
 //! #     b: u32,
 //! # }
 //! # let foo = pin_init!(Foo {
-//! #     a <- new_mutex!(42, "Foo::a"),
+//! #     a <- Mutex::new_named(42, "Foo::a"),
 //! #     b: 24,
 //! # });
 //! let foo: Result<Pin<KBox<Foo>>> = KBox::pin_init(foo, GFP_KERNEL);
@@ -98,7 +100,7 @@
 //! impl DriverData {
 //!     fn new() -> impl PinInit<Self, Error> {
 //!         try_pin_init!(Self {
-//!             status <- new_mutex!(0, "DriverData::status"),
+//!             status <- Mutex::new_named(0, "DriverData::status"),
 //!             buffer: KBox::init(kernel::init::zeroed(), GFP_KERNEL)?,
 //!         })
 //!     }
@@ -253,7 +255,7 @@ pub mod macros;
 /// }
 ///
 /// stack_pin_init!(let foo = pin_init!(Foo {
-///     a <- new_mutex!(42),
+///     a <- Mutex::new(42),
 ///     b: Bar {
 ///         x: 64,
 ///     },
@@ -313,7 +315,7 @@ macro_rules! stack_pin_init {
 /// }
 ///
 /// stack_try_pin_init!(let foo: Result<Pin<&mut Foo>, AllocError> = pin_init!(Foo {
-///     a <- new_mutex!(42),
+///     a <- Mutex::new(42),
 ///     b: KBox::new(Bar {
 ///         x: 64,
 ///     }, GFP_KERNEL)?,
@@ -347,7 +349,7 @@ macro_rules! stack_pin_init {
 /// }
 ///
 /// stack_try_pin_init!(let foo: Pin<&mut Foo> =? pin_init!(Foo {
-///     a <- new_mutex!(42),
+///     a <- Mutex::new(42),
 ///     b: KBox::new(Bar {
 ///         x: 64,
 ///     }, GFP_KERNEL)?,
@@ -569,12 +571,12 @@ macro_rules! stack_try_pin_init {
 // module `__internal` inside of `init/__internal.rs`.
 #[macro_export]
 macro_rules! pin_init {
-    ($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? {
+    ($(&$this:ident in)? $t:ident $(::$p:ident)* $(::<$($generics:ty),* $(,)?>)? {
         $($fields:tt)*
     }) => {
         $crate::__init_internal!(
             @this($($this)?),
-            @typ($t $(::<$($generics),*>)?),
+            @typ($t $(::$p)* $(::<$($generics),*>)?),
             @fields($($fields)*),
             @error(::core::convert::Infallible),
             @data(PinData, use_data),
@@ -625,12 +627,12 @@ macro_rules! pin_init {
 // module `__internal` inside of `init/__internal.rs`.
 #[macro_export]
 macro_rules! try_pin_init {
-    ($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? {
+    ($(&$this:ident in)? $t:ident $(::$p:ident)* $(::<$($generics:ty),* $(,)?>)? {
         $($fields:tt)*
     }) => {
         $crate::__init_internal!(
             @this($($this)?),
-            @typ($t $(::<$($generics),*>)? ),
+            @typ($t $(::$p)* $(::<$($generics),*>)? ),
             @fields($($fields)*),
             @error($crate::error::Error),
             @data(PinData, use_data),
@@ -639,12 +641,12 @@ macro_rules! try_pin_init {
             @munch_fields($($fields)*),
         )
     };
-    ($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? {
+    ($(&$this:ident in)? $t:ident $(::$p:ident)* $(::<$($generics:ty),* $(,)?>)? {
         $($fields:tt)*
     }? $err:ty) => {
         $crate::__init_internal!(
             @this($($this)?),
-            @typ($t $(::<$($generics),*>)? ),
+            @typ($t $(::$p)* $(::<$($generics),*>)? ),
             @fields($($fields)*),
             @error($err),
             @data(PinData, use_data),
@@ -674,12 +676,12 @@ macro_rules! try_pin_init {
 // module `__internal` inside of `init/__internal.rs`.
 #[macro_export]
 macro_rules! init {
-    ($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? {
+    ($(&$this:ident in)? $t:ident $(::$p:ident)* $(::<$($generics:ty),* $(,)?>)? {
         $($fields:tt)*
     }) => {
         $crate::__init_internal!(
             @this($($this)?),
-            @typ($t $(::<$($generics),*>)?),
+            @typ($t $(::$p)* $(::<$($generics),*>)?),
             @fields($($fields)*),
             @error(::core::convert::Infallible),
             @data(InitData, /*no use_data*/),
@@ -725,12 +727,12 @@ macro_rules! init {
 // module `__internal` inside of `init/__internal.rs`.
 #[macro_export]
 macro_rules! try_init {
-    ($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? {
+    ($(&$this:ident in)? $t:ident $(::$p:ident)* $(::<$($generics:ty),* $(,)?>)? {
         $($fields:tt)*
     }) => {
         $crate::__init_internal!(
             @this($($this)?),
-            @typ($t $(::<$($generics),*>)?),
+            @typ($t $(::$p)* $(::<$($generics),*>)?),
             @fields($($fields)*),
             @error($crate::error::Error),
             @data(InitData, /*no use_data*/),
@@ -739,12 +741,12 @@ macro_rules! try_init {
             @munch_fields($($fields)*),
         )
     };
-    ($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? {
+    ($(&$this:ident in)? $t:ident $(::$p:ident)* $(::<$($generics:ty),* $(,)?>)? {
         $($fields:tt)*
     }? $err:ty) => {
         $crate::__init_internal!(
             @this($($this)?),
-            @typ($t $(::<$($generics),*>)?),
+            @typ($t $(::$p)* $(::<$($generics),*>)?),
             @fields($($fields)*),
             @error($err),
             @data(InitData, /*no use_data*/),
@@ -1358,6 +1360,21 @@ pub unsafe trait PinnedDrop: __internal::HasPinData {
     fn drop(self: Pin<&mut Self>, only_call_from_drop: __internal::OnlyCallFromDrop);
 }
 
+/// Create a new default T.
+///
+/// The returned initializer will use Default::default to initialize the `slot`.
+#[inline]
+pub fn default<T: Default>() -> impl Init<T> {
+    // SAFETY: Because `T: Default`, T cannot require pinning and
+    // we can just move the data into the slot.
+    unsafe {
+        init_from_closure(|slot: *mut T| {
+            *slot = Default::default();
+            Ok(())
+        })
+    }
+}
+
 /// Marker trait for types that can be initialized by writing just zeroes.
 ///
 /// # Safety
@@ -1368,7 +1385,14 @@ pub unsafe trait PinnedDrop: __internal::HasPinData {
 /// ```rust,ignore
 /// let val: Self = unsafe { core::mem::zeroed() };
 /// ```
-pub unsafe trait Zeroable {}
+pub unsafe trait Zeroable: core::marker::Sized {
+    /// Create a new zeroed T.
+    ///
+    /// Directly returns a zeroed T, analogous to Default::default().
+    fn zeroed() -> Self {
+        unsafe { core::mem::zeroed() }
+    }
+}
 
 /// Create a new zeroed T.
 ///
diff --git a/rust/kernel/init/macros.rs b/rust/kernel/init/macros.rs
index b7213962a6a5ac..daba04d88ffbad 100644
--- a/rust/kernel/init/macros.rs
+++ b/rust/kernel/init/macros.rs
@@ -513,7 +513,9 @@ macro_rules! __pinned_drop {
             }
         ),
     ) => {
+        #[allow(clippy::undocumented_unsafe_blocks)]
         // SAFETY: TODO.
+        // FIXME
         unsafe $($impl_sig)* {
             // Inherit all attributes and the type/ident tokens for the signature.
             $(#[$($attr)*])*
@@ -868,12 +870,16 @@ macro_rules! __pin_data {
             {
                 type PinData = __ThePinData<$($ty_generics)*>;
 
+                #[allow(clippy::undocumented_unsafe_blocks)]
+                // FIXME
                 unsafe fn __pin_data() -> Self::PinData {
                     __ThePinData { __phantom: ::core::marker::PhantomData }
                 }
             }
 
+            #[allow(clippy::undocumented_unsafe_blocks)]
             // SAFETY: TODO.
+            // FIXME
             unsafe impl<$($impl_generics)*>
                 $crate::init::__internal::PinData for __ThePinData<$($ty_generics)*>
             where $($whr)*
@@ -1000,7 +1006,9 @@ macro_rules! __pin_data {
                     slot: *mut $p_type,
                     init: impl $crate::init::PinInit<$p_type, E>,
                 ) -> ::core::result::Result<(), E> {
+                    #[allow(clippy::undocumented_unsafe_blocks)]
                     // SAFETY: TODO.
+                    // FIXME
                     unsafe { $crate::init::PinInit::__pinned_init(init, slot) }
                 }
             )*
@@ -1011,7 +1019,9 @@ macro_rules! __pin_data {
                     slot: *mut $type,
                     init: impl $crate::init::Init<$type, E>,
                 ) -> ::core::result::Result<(), E> {
+                    #[allow(clippy::undocumented_unsafe_blocks)]
                     // SAFETY: TODO.
+                    // FIXME
                     unsafe { $crate::init::Init::__init(init, slot) }
                 }
             )*
@@ -1126,6 +1136,8 @@ macro_rules! __init_internal {
         // no possibility of returning without `unsafe`.
         struct __InitOk;
         // Get the data about fields from the supplied type.
+        #[allow(clippy::undocumented_unsafe_blocks)]
+        // FIXME
         //
         // SAFETY: TODO.
         let data = unsafe {
@@ -1183,6 +1195,7 @@ macro_rules! __init_internal {
         let init = move |slot| -> ::core::result::Result<(), $err> {
             init(slot).map(|__InitOk| ())
         };
+        #[allow(clippy::undocumented_unsafe_blocks)]
         // SAFETY: TODO.
         let init = unsafe { $crate::init::$construct_closure::<_, $err>(init) };
         init
@@ -1332,6 +1345,7 @@ macro_rules! __init_internal {
         // Endpoint, nothing more to munch, create the initializer.
         // Since we are in the closure that is never called, this will never get executed.
         // We abuse `slot` to get the correct type inference here:
+        #[allow(clippy::undocumented_unsafe_blocks)]
         //
         // SAFETY: TODO.
         unsafe {
diff --git a/rust/kernel/io.rs b/rust/kernel/io.rs
index d4a73e52e3ee68..c966706e260913 100644
--- a/rust/kernel/io.rs
+++ b/rust/kernel/io.rs
@@ -7,6 +7,9 @@
 use crate::error::{code::EINVAL, Result};
 use crate::{bindings, build_assert};
 
+pub mod mem;
+pub mod resource;
+
 /// Raw representation of an MMIO region.
 ///
 /// By itself, the existence of an instance of this structure does not provide any guarantees that
@@ -200,6 +203,15 @@ impl<const SIZE: usize> Io<SIZE> {
         }
     }
 
+    #[inline]
+    const fn length_valid(offset: usize, length: usize, size: usize) -> bool {
+        if let Some(end) = offset.checked_add(length) {
+            end <= size
+        } else {
+            false
+        }
+    }
+
     #[inline]
     fn io_addr<U>(&self, offset: usize) -> Result<usize> {
         if !Self::offset_valid::<U>(offset, self.maxsize()) {
@@ -218,6 +230,67 @@ impl<const SIZE: usize> Io<SIZE> {
         self.addr() + offset
     }
 
+    /// Copy memory block from an i/o memory by filling the specified buffer with it.
+    ///
+    /// # Examples
+    /// ```
+    /// use kernel::io::mem::IoMem;
+    /// use kernel::io::mem::Resource;
+    ///
+    /// fn test(device: &Device, res: Resource) -> Result {
+    ///     // Create an i/o memory block of at least 100 bytes.
+    ///     let devres_mem = IoMem::<100>::new(res, device)?;
+    ///     // aquire access to memory block
+    ///     let mem = devres_mem.try_access()?;
+    ///
+    ///     let mut buffer: [u8; 32] = [0; 32];
+    ///
+    ///     // Memcpy 16 bytes from an offset 10 of i/o memory block into the buffer.
+    ///     mem.try_memcpy_fromio(&mut buffer[..16], 10)?;
+    ///
+    ///     Ok(())
+    /// }
+    /// ```
+    pub fn try_memcpy_fromio(&self, buffer: &mut [u8], offset: usize) -> Result {
+        if buffer.len() == 0 || !Self::length_valid(offset, buffer.len(), self.maxsize()) {
+            return Err(EINVAL);
+        }
+        let addr = self.io_addr::<crate::ffi::c_char>(offset)?;
+
+        // SAFETY:
+        //   - The type invariants guarantee that `adr` is a valid pointer.
+        //   - The bounds of `buffer` are checked with a call to `length_valid`.
+        unsafe {
+            bindings::memcpy_fromio(
+                buffer.as_mut_ptr() as *mut _,
+                addr as *const _,
+                buffer.len() as _,
+            )
+        };
+        Ok(())
+    }
+
+    /// Copy memory block to i/o memory from the specified buffer.
+    pub fn try_memcpy_toio(&self, offset: usize, buffer: &[u8]) -> Result {
+        if buffer.len() == 0 || !Self::length_valid(offset, buffer.len(), self.maxsize()) {
+            return Err(EINVAL);
+        }
+        // no need to check since offset + buffer.len() - 1 is valid
+        let addr = self.io_addr::<crate::ffi::c_char>(offset)?;
+
+        // SAFETY:
+        //   - The type invariants guarantee that `adr` is a valid pointer.
+        //   - The bounds of `buffer` are checked with a call to `length_valid`.
+        unsafe {
+            bindings::memcpy_toio(
+                addr as *mut _,
+                buffer.as_ptr() as *const _,
+                buffer.len() as _,
+            )
+        };
+        Ok(())
+    }
+
     define_read!(readb, try_readb, u8);
     define_read!(readw, try_readw, u16);
     define_read!(readl, try_readl, u32);
diff --git a/rust/kernel/io/mem.rs b/rust/kernel/io/mem.rs
new file mode 100644
index 00000000000000..3748df21f56f43
--- /dev/null
+++ b/rust/kernel/io/mem.rs
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Generic memory-mapped IO.
+
+use core::ops::Deref;
+use core::ptr::NonNull;
+
+use crate::device::Device;
+use crate::devres::Devres;
+use crate::io::resource::flags::IORESOURCE_MEM_NONPOSTED;
+use crate::io::resource::Region;
+use crate::io::resource::Resource;
+use crate::io::Io;
+use crate::io::IoRaw;
+use crate::prelude::*;
+use crate::types::declare_flags_type;
+
+/// An exclusive memory-mapped IO region.
+///
+/// # Invariants
+///
+/// - ExclusiveIoMem has exclusive access to the underlying `iomem`.
+pub struct ExclusiveIoMem<const SIZE: usize> {
+    /// The region abstraction. This represents exclusive access to the
+    /// range represented by the underlying `iomem`.
+    ///
+    /// It's placed first to ensure that the region is released before it is
+    /// unmapped as a result of the drop order.
+    #[allow(dead_code)]
+    region: Region,
+    /// The underlying `IoMem` instance.
+    iomem: IoMem<SIZE>,
+}
+
+impl<const SIZE: usize> ExclusiveIoMem<SIZE> {
+    /// Creates a new `ExclusiveIoMem` instance.
+    pub(crate) fn ioremap(resource: &Resource) -> Result<Self> {
+        let iomem = IoMem::ioremap(resource)?;
+
+        let start = resource.start();
+        let size = resource.size();
+        let name = resource.name();
+
+        let region = resource
+            .request_mem_region(start, size, name)
+            .ok_or(EBUSY)?;
+
+        let iomem = ExclusiveIoMem { iomem, region };
+
+        Ok(iomem)
+    }
+
+    pub(crate) fn new(resource: &Resource, device: &Device) -> Result<Devres<Self>> {
+        let iomem = Self::ioremap(resource)?;
+        let devres = Devres::new(device, iomem, GFP_KERNEL)?;
+
+        Ok(devres)
+    }
+}
+
+impl<const SIZE: usize> Deref for ExclusiveIoMem<SIZE> {
+    type Target = Io<SIZE>;
+
+    fn deref(&self) -> &Self::Target {
+        &*self.iomem
+    }
+}
+
+/// A generic memory-mapped IO region.
+///
+/// Accesses to the underlying region is checked either at compile time, if the
+/// region's size is known at that point, or at runtime otherwise.
+///
+/// # Invariants
+///
+/// `IoMem` always holds an `IoRaw` inststance that holds a valid pointer to the
+/// start of the I/O memory mapped region.
+pub struct IoMem<const SIZE: usize = 0> {
+    io: IoRaw<SIZE>,
+}
+
+impl<const SIZE: usize> IoMem<SIZE> {
+    fn ioremap(resource: &Resource) -> Result<Self> {
+        let size = resource.size();
+        if size == 0 {
+            return Err(EINVAL);
+        }
+
+        let res_start = resource.start();
+
+        // SAFETY:
+        // - `res_start` and `size` are read from a presumably valid `struct resource`.
+        // - `size` is known not to be zero at this point.
+        let addr = if resource.flags().contains(IORESOURCE_MEM_NONPOSTED) {
+            unsafe { bindings::ioremap_np(res_start, size as usize) }
+        } else {
+            unsafe { bindings::ioremap(res_start, size as usize) }
+        };
+        if addr.is_null() {
+            return Err(ENOMEM);
+        }
+
+        let io = IoRaw::new(addr as usize, size as usize)?;
+        let io = IoMem { io };
+
+        Ok(io)
+    }
+
+    /// Creates a new `IoMem` instance.
+    pub(crate) fn new(resource: &Resource, device: &Device) -> Result<Devres<Self>> {
+        let io = Self::ioremap(resource)?;
+        let devres = Devres::new(device, io, GFP_KERNEL)?;
+
+        Ok(devres)
+    }
+}
+
+impl<const SIZE: usize> Drop for IoMem<SIZE> {
+    fn drop(&mut self) {
+        // SAFETY: Safe as by the invariant of `Io`.
+        unsafe { bindings::iounmap(self.io.addr() as *mut core::ffi::c_void) }
+    }
+}
+
+impl<const SIZE: usize> Deref for IoMem<SIZE> {
+    type Target = Io<SIZE>;
+
+    fn deref(&self) -> &Self::Target {
+        // SAFETY: Safe as by the invariant of `IoMem`.
+        unsafe { Io::from_raw(&self.io) }
+    }
+}
+
+declare_flags_type! {
+    /// Flags to be used when remapping memory.
+    ///
+    /// They can be combined with the operators `|`, `&`, and `!`.
+    pub struct MemFlags(crate::ffi::c_ulong) = 0;
+}
+
+impl MemFlags {
+    /// Matches the default mapping for System RAM on the architecture.
+    ///
+    /// This is usually a read-allocate write-back cache. Moreover, if this flag is specified and
+    /// the requested remap region is RAM, memremap() will bypass establishing a new mapping and
+    /// instead return a pointer into the direct map.
+    pub const WB: MemFlags = MemFlags(bindings::MEMREMAP_WB as _);
+
+    /// Establish a mapping whereby writes either bypass the cache or are written through to memory
+    /// and never exist in a cache-dirty state with respect to program visibility.
+    ///
+    /// Attempts to map System RAM with this mapping type will fail.
+    pub const WT: MemFlags = MemFlags(bindings::MEMREMAP_WT as _);
+    /// Establish a writecombine mapping, whereby writes may be coalesced together  (e.g. in the
+    /// CPU's write buffers), but is otherwise uncached.
+    ///
+    /// Attempts to map System RAM with this mapping type will fail.
+    pub const WC: MemFlags = MemFlags(bindings::MEMREMAP_WC as _);
+
+    // Note: Skipping MEMREMAP_ENC/DEC since they are under-documented and have zero
+    // users outside of arch/x86.
+}
+
+/// Represents a non-MMIO memory block. This is like [`IoMem`], but for cases where it is known
+/// that the resource being mapped does not have I/O side effects.
+// Invariants:
+// `ptr` is a non-null and valid address of at least `usize` bytes and returned by a `memremap`
+// call.
+// ```
+pub struct Mem {
+    ptr: NonNull<crate::ffi::c_void>,
+    size: usize,
+}
+
+impl Mem {
+    /// Tries to create a new instance of a memory block from a Resource.
+    ///
+    /// The resource described by `res` is mapped into the CPU's address space so that it can be
+    /// accessed directly. It is also consumed by this function so that it can't be mapped again
+    /// to a different address.
+    ///
+    /// If multiple caching flags are specified, the different mapping types will be attempted in
+    /// the order [`MemFlags::WB`], [`MemFlags::WT`], [`MemFlags::WC`].
+    ///
+    /// # Flags
+    ///
+    /// * [`MemFlags::WB`]: Matches the default mapping for System RAM on the architecture.
+    ///   This is usually a read-allocate write-back cache. Moreover, if this flag is specified and
+    ///   the requested remap region is RAM, memremap() will bypass establishing a new mapping and
+    ///   instead return a pointer into the direct map.
+    ///
+    /// * [`MemFlags::WT`]: Establish a mapping whereby writes either bypass the cache or are written
+    ///   through to memory and never exist in a cache-dirty state with respect to program visibility.
+    ///   Attempts to map System RAM with this mapping type will fail.
+    /// * [`MemFlags::WC`]: Establish a writecombine mapping, whereby writes may be coalesced together
+    ///   (e.g. in the CPU's write buffers), but is otherwise uncached. Attempts to map System RAM with
+    ///   this mapping type will fail.
+    ///
+    /// # Safety
+    ///
+    /// Callers must ensure that either (a) the resulting interface cannot be used to initiate DMA
+    /// operations, or (b) that DMA operations initiated via the returned interface use DMA handles
+    /// allocated through the `dma` module.
+    pub unsafe fn try_new(res: Resource, flags: MemFlags) -> Result<Self> {
+        let size: usize = res.size().try_into()?;
+
+        let addr = unsafe { bindings::memremap(res.start(), size, flags.as_raw()) };
+        let ptr = NonNull::new(addr).ok_or(ENOMEM)?;
+        // INVARIANT: `ptr` is non-null and was returned by `memremap`, so it is valid.
+        Ok(Self { ptr, size })
+    }
+
+    /// Returns the base address of the memory mapping as a raw pointer.
+    ///
+    /// It is up to the caller to use this pointer safely, depending on the requirements of the
+    /// hardware backing this memory block.
+    pub fn ptr(&self) -> *mut u8 {
+        self.ptr.cast().as_ptr()
+    }
+
+    /// Returns the size of this mapped memory block.
+    pub fn size(&self) -> usize {
+        self.size
+    }
+}
+
+impl Drop for Mem {
+    fn drop(&mut self) {
+        // SAFETY: By the type invariant, `self.ptr` is a value returned by a previous successful
+        // call to `memremap`.
+        unsafe { bindings::memunmap(self.ptr.as_ptr()) };
+    }
+}
diff --git a/rust/kernel/io/resource.rs b/rust/kernel/io/resource.rs
new file mode 100644
index 00000000000000..63b032d7c9b747
--- /dev/null
+++ b/rust/kernel/io/resource.rs
@@ -0,0 +1,266 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Abstraction for system resources.
+//!
+//! C header: [`include/linux/ioport.h`](srctree/include/linux/ioport.h)
+
+use core::ops::Deref;
+use core::ptr::NonNull;
+
+use crate::str::CStr;
+use crate::types::Opaque;
+
+type RequestFn = unsafe extern "C" fn(
+    ResourceSize,
+    ResourceSize,
+    *const kernel::ffi::c_char,
+) -> *mut bindings::resource;
+
+#[cfg(CONFIG_HAS_IOPORT)]
+/// Returns a reference to the global `ioport_resource` variable.
+pub fn ioport_resource() -> &'static Resource {
+    // SAFETY: `bindings::ioport_resoure` has global lifetime and is of type Resource.
+    unsafe { Resource::from_ptr(core::ptr::addr_of_mut!(bindings::ioport_resource)) }
+}
+
+/// Returns a reference to the global `iomem_resource` variable.
+pub fn iomem_resource() -> &'static Resource {
+    // SAFETY: `bindings::iomem_resoure` has global lifetime and is of type Resource.
+    unsafe { Resource::from_ptr(core::ptr::addr_of_mut!(bindings::iomem_resource)) }
+}
+
+/// Resource Size type.
+/// This is a type alias to `u64`
+/// depending on the config option `CONFIG_PHYS_ADDR_T_64BIT`.
+#[cfg(CONFIG_PHYS_ADDR_T_64BIT)]
+pub type ResourceSize = u64;
+
+/// Resource Size type.
+/// This is a type alias to `u32`
+/// depending on the config option `CONFIG_PHYS_ADDR_T_64BIT`.
+#[cfg(not(CONFIG_PHYS_ADDR_T_64BIT))]
+pub type ResourceSize = u32;
+
+/// A region allocated from a parent resource.
+///
+/// # Invariants
+/// - `self.0` points to a valid `bindings::resource` that was obtained through
+/// `__request_region`.
+pub struct Region(NonNull<bindings::resource>);
+
+impl Deref for Region {
+    type Target = Resource;
+
+    fn deref(&self) -> &Self::Target {
+        // SAFETY: Safe as per the invariant of `Region`
+        unsafe { Resource::from_ptr(self.0.as_ptr()) }
+    }
+}
+
+impl Drop for Region {
+    fn drop(&mut self) {
+        // SAFETY: Safe as per the invariant of `Region`
+        let res = unsafe { Resource::from_ptr(self.0.as_ptr()) };
+        let flags = res.flags();
+
+        let release_fn = if flags.contains(flags::IORESOURCE_MEM) {
+            bindings::release_mem_region
+        } else {
+            bindings::release_region
+        };
+
+        // SAFETY: Safe as per the invariant of `Region`
+        unsafe { release_fn(res.start(), res.size()) };
+    }
+}
+
+// SAFETY: `Region` only holds a pointer to a C `struct resource`, which is safe to be used from
+// any thead.
+unsafe impl Send for Region {}
+
+// SAFETY: `Region` only holds a pointer to a C `struct resource`, references to which are
+// safe to be used from any thead.
+unsafe impl Sync for Region {}
+
+/// A resource abstraction.
+///
+/// # Invariants
+///
+/// `Resource` is a transparent wrapper around a valid `bindings::resource`.
+#[repr(transparent)]
+pub struct Resource(Opaque<bindings::resource>);
+
+impl Resource {
+    /// Creates a reference to a [`Resource`] from a valid pointer.
+    ///
+    /// # Safety
+    ///
+    /// The caller must ensure that for the duration of 'a, the pointer will
+    /// point at a valid `bindings::resource`
+    ///
+    /// The caller must also ensure that the `Resource` is only accessed via the
+    /// returned reference for the duration of 'a.
+    pub(crate) const unsafe fn from_ptr<'a>(ptr: *mut bindings::resource) -> &'a Self {
+        // SAFETY: Self is a transparent wrapper around `Opaque<bindings::resource>`.
+        unsafe { &*ptr.cast() }
+    }
+
+    /// Creates a [`Resource`] from a valid pointer.
+    ///
+    /// # Safety
+    ///
+    /// The caller must ensure that the pointer points at a valid `bindings::resource`
+    pub(crate) fn new_from_ptr(ptr: *const bindings::resource) -> Self {
+        Resource {
+            0: unsafe { Opaque::<bindings::resource>::new(*ptr) },
+        }
+    }
+
+    /// A helper to abstract the common pattern of requesting a region.
+    fn request_region_checked(
+        &self,
+        start: ResourceSize,
+        size: ResourceSize,
+        name: &CStr,
+        request_fn: RequestFn,
+    ) -> Option<Region> {
+        // SAFETY: Safe as per the invariant of `Resource`
+        let region = unsafe { request_fn(start, size, name.as_char_ptr()) };
+
+        Some(Region(NonNull::new(region)?))
+    }
+
+    /// Requests a resource region.
+    ///
+    /// Exclusive access will be given and the region will be marked as busy.
+    /// Further calls to `request_region` will return `None` if the region, or a
+    /// part of it, is already in use.
+    pub fn request_region(
+        &self,
+        start: ResourceSize,
+        size: ResourceSize,
+        name: &CStr,
+    ) -> Option<Region> {
+        self.request_region_checked(start, size, name, bindings::request_region)
+    }
+
+    /// Requests a resource region with the IORESOURCE_MUXED flag.
+    ///
+    /// Exclusive access will be given and the region will be marked as busy.
+    /// Further calls to `request_region` will return `None` if the region, or a
+    /// part of it, is already in use.
+    pub fn request_muxed_region(
+        &self,
+        start: ResourceSize,
+        size: ResourceSize,
+        name: &CStr,
+    ) -> Option<Region> {
+        self.request_region_checked(start, size, name, bindings::request_muxed_region)
+    }
+
+    /// Requests a memory resource region, i.e.: a resource of type
+    /// IORESOURCE_MEM.
+    ///
+    /// Exclusive access will be given and the region will be marked as busy.
+    /// Further calls to `request_region` will return `None` if the region, or a
+    /// part of it, is already in use.
+    pub fn request_mem_region(
+        &self,
+        start: ResourceSize,
+        size: ResourceSize,
+        name: &CStr,
+    ) -> Option<Region> {
+        self.request_region_checked(start, size, name, bindings::request_mem_region)
+    }
+
+    /// Returns the size of the resource.
+    pub fn size(&self) -> ResourceSize {
+        let inner = self.0.get();
+        // SAFETY: safe as per the invariants of `Resource`
+        unsafe { bindings::resource_size(inner) }
+    }
+
+    /// Returns the start address of the resource.
+    pub fn start(&self) -> u64 {
+        let inner = self.0.get();
+        // SAFETY: safe as per the invariants of `Resource`
+        unsafe { *inner }.start
+    }
+
+    /// Returns the name of the resource.
+    pub fn name(&self) -> &CStr {
+        let inner = self.0.get();
+        // SAFETY: safe as per the invariants of `Resource`
+        unsafe { CStr::from_char_ptr((*inner).name) }
+    }
+
+    /// Returns the flags associated with the resource.
+    pub fn flags(&self) -> Flags {
+        let inner = self.0.get();
+        // SAFETY: safe as per the invariants of `Resource`
+        let flags = unsafe { *inner }.flags;
+
+        Flags(flags)
+    }
+}
+
+// SAFETY: `Resource` only holds a pointer to a C `struct resource`, which is safe to be used from
+// any thead.
+unsafe impl Send for Resource {}
+
+// SAFETY: `Resource` only holds a pointer to a C `struct resource`, references to which are
+// safe to be used from any thead.
+unsafe impl Sync for Resource {}
+
+/// Resource flags as stored in the C `struct resource::flags` field.
+///
+/// They can be combined with the operators `|`, `&`, and `!`.
+///
+/// Values can be used from the [`flags`] module.
+#[derive(Clone, Copy, PartialEq)]
+pub struct Flags(usize);
+
+impl Flags {
+    /// Check whether `flags` is contained in `self`.
+    pub fn contains(self, flags: Flags) -> bool {
+        (self & flags) == flags
+    }
+}
+
+impl core::ops::BitOr for Flags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl core::ops::BitAnd for Flags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl core::ops::Not for Flags {
+    type Output = Self;
+    fn not(self) -> Self::Output {
+        Self(!self.0)
+    }
+}
+
+/// Resource flags as stored in the `struct resource::flags` field.
+pub mod flags {
+    use super::Flags;
+
+    /// PCI/ISA I/O ports
+    pub const IORESOURCE_IO: Flags = Flags(bindings::IORESOURCE_IO as usize);
+
+    /// Resource is software muxed.
+    pub const IORESOURCE_MUXED: Flags = Flags(bindings::IORESOURCE_MUXED as usize);
+
+    /// Resource represents a memory region.
+    pub const IORESOURCE_MEM: Flags = Flags(bindings::IORESOURCE_MEM as usize);
+
+    /// Resource represents a memory region.
+    pub const IORESOURCE_MEM_NONPOSTED: Flags = Flags(bindings::IORESOURCE_MEM_NONPOSTED as usize);
+}
diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs
index 7697c60b2d1a67..2f9a385f092207 100644
--- a/rust/kernel/lib.rs
+++ b/rust/kernel/lib.rs
@@ -13,17 +13,25 @@
 
 #![no_std]
 #![feature(arbitrary_self_types)]
+#![feature(associated_type_defaults)]
+#![feature(cfg_version)]
 #![cfg_attr(CONFIG_RUSTC_HAS_COERCE_POINTEE, feature(derive_coerce_pointee))]
+#![cfg_attr(CONFIG_RUSTC_HAS_COERCE_POINTEE, feature(pin_coerce_unsized_trait))]
 #![cfg_attr(not(CONFIG_RUSTC_HAS_COERCE_POINTEE), feature(coerce_unsized))]
 #![cfg_attr(not(CONFIG_RUSTC_HAS_COERCE_POINTEE), feature(dispatch_from_dyn))]
 #![cfg_attr(not(CONFIG_RUSTC_HAS_COERCE_POINTEE), feature(unsize))]
+#![feature(duration_constants)]
 #![feature(inline_const)]
 #![feature(lint_reasons)]
+#![feature(ptr_sub_ptr)]
+#![feature(sized_type_properties)]
+#![feature(slice_range)]
 // Stable in Rust 1.83
 #![feature(const_maybe_uninit_as_mut_ptr)]
 #![feature(const_mut_refs)]
 #![feature(const_ptr_write)]
 #![feature(const_refs_to_cell)]
+#![warn(clippy::undocumented_unsafe_blocks)]
 
 // Ensure conditional compilation based on the kernel configuration works;
 // otherwise we may silently break things like initcall handling.
@@ -35,21 +43,31 @@ extern crate self as kernel;
 
 pub use ffi;
 
+pub mod addr;
 pub mod alloc;
 #[cfg(CONFIG_BLOCK)]
 pub mod block;
 #[doc(hidden)]
 pub mod build_assert;
 pub mod cred;
+pub mod delay;
+pub mod devcoredump;
 pub mod device;
 pub mod device_id;
 pub mod devres;
+pub mod dma;
+#[cfg(CONFIG_DMA_SHARED_BUFFER)]
+pub mod dma_fence;
 pub mod driver;
+#[cfg(CONFIG_DRM = "y")]
+pub mod drm;
 pub mod error;
 pub mod faux;
 #[cfg(CONFIG_RUST_FW_LOADER_ABSTRACTIONS)]
 pub mod firmware;
 pub mod fs;
+#[cfg(CONFIG_IIO)]
+pub mod iio;
 pub mod init;
 pub mod io;
 pub mod ioctl;
@@ -58,6 +76,7 @@ pub mod jump_label;
 pub mod kunit;
 pub mod list;
 pub mod miscdevice;
+pub mod module_param;
 #[cfg(CONFIG_NET)]
 pub mod net;
 pub mod of;
@@ -72,7 +91,9 @@ pub mod rbtree;
 pub mod revocable;
 pub mod security;
 pub mod seq_file;
+pub mod siphash;
 pub mod sizes;
+pub mod soc;
 mod static_assert;
 #[doc(hidden)]
 pub mod std_vendor;
@@ -85,12 +106,18 @@ pub mod transmute;
 pub mod types;
 pub mod uaccess;
 pub mod workqueue;
+pub mod xarray;
 
 #[doc(hidden)]
 pub use bindings;
 pub use macros;
 pub use uapi;
 
+pub(crate) mod private {
+    #[allow(unreachable_pub)]
+    pub trait Sealed {}
+}
+
 /// Prefix to appear before log messages printed from within the `kernel` crate.
 const __LOG_PREFIX: &[u8] = b"rust_kernel\0";
 
diff --git a/rust/kernel/module_param.rs b/rust/kernel/module_param.rs
new file mode 100644
index 00000000000000..0f153aa64dae07
--- /dev/null
+++ b/rust/kernel/module_param.rs
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Support for module parameters.
+//!
+//! C header: [`include/linux/moduleparam.h`](srctree/include/linux/moduleparam.h)
+
+use crate::prelude::*;
+use crate::str::BStr;
+
+/// Newtype to make `bindings::kernel_param` [`Sync`].
+#[repr(transparent)]
+#[doc(hidden)]
+pub struct RacyKernelParam(pub ::kernel::bindings::kernel_param);
+
+// SAFETY: C kernel handles serializing access to this type. We never access it
+// from Rust module.
+unsafe impl Sync for RacyKernelParam {}
+
+/// Types that can be used for module parameters.
+pub trait ModuleParam: Sized {
+    /// The [`ModuleParam`] will be used by the kernel module through this type.
+    ///
+    /// This may differ from `Self` if, for example, `Self` needs to track
+    /// ownership without exposing it or allocate extra space for other possible
+    /// parameter values.
+    // This is required to support string parameters in the future.
+    type Value: ?Sized;
+
+    /// Parse a parameter argument into the parameter value.
+    ///
+    /// `Err(_)` should be returned when parsing of the argument fails.
+    ///
+    /// Parameters passed at boot time will be set before [`kmalloc`] is
+    /// available (even if the module is loaded at a later time). However, in
+    /// this case, the argument buffer will be valid for the entire lifetime of
+    /// the kernel. So implementations of this method which need to allocate
+    /// should first check that the allocator is available (with
+    /// [`crate::bindings::slab_is_available`]) and when it is not available
+    /// provide an alternative implementation which doesn't allocate. In cases
+    /// where the allocator is not available it is safe to save references to
+    /// `arg` in `Self`, but in other cases a copy should be made.
+    ///
+    /// [`kmalloc`]: srctree/include/linux/slab.h
+    fn try_from_param_arg(arg: &'static BStr) -> Result<Self>;
+}
+
+/// Set the module parameter from a string.
+///
+/// Used to set the parameter value at kernel initialization, when loading
+/// the module or when set through `sysfs`.
+///
+/// See `struct kernel_param_ops.set`.
+///
+/// # Safety
+///
+/// - If `val` is non-null then it must point to a valid null-terminated string.
+///   The `arg` field of `param` must be an instance of `T`.
+/// - `param.arg` must be a pointer to valid `*mut T` as set up by the
+///   [`module!`] macro.
+///
+/// # Invariants
+///
+/// Currently, we only support read-only parameters that are not readable
+/// from `sysfs`. Thus, this function is only called at kernel
+/// initialization time, or at module load time, and we have exclusive
+/// access to the parameter for the duration of the function.
+///
+/// [`module!`]: macros::module
+unsafe extern "C" fn set_param<T>(
+    val: *const kernel::ffi::c_char,
+    param: *const crate::bindings::kernel_param,
+) -> core::ffi::c_int
+where
+    T: ModuleParam,
+{
+    // NOTE: If we start supporting arguments without values, val _is_ allowed
+    // to be null here.
+    if val.is_null() {
+        // TODO: Use pr_warn_once available.
+        crate::pr_warn!("Null pointer passed to `module_param::set_param`");
+        return EINVAL.to_errno();
+    }
+
+    // SAFETY: By function safety requirement, val is non-null and
+    // null-terminated. By C API contract, `val` is live and valid for reads
+    // for the duration of this function.
+    let arg = unsafe { CStr::from_char_ptr(val) };
+
+    crate::error::from_result(|| {
+        let new_value = T::try_from_param_arg(arg)?;
+
+        // SAFETY: `param` is guaranteed to be valid by C API contract
+        // and `arg` is guaranteed to point to an instance of `T`.
+        let old_value = unsafe { (*param).__bindgen_anon_1.arg as *mut T };
+
+        // SAFETY: `old_value` is valid for writes, as we have exclusive
+        // access. `old_value` is pointing to an initialized static, and
+        // so it is properly initialized.
+        unsafe { core::ptr::replace(old_value, new_value) };
+        Ok(0)
+    })
+}
+
+/// Drop the parameter.
+///
+/// Called when unloading a module.
+///
+/// # Safety
+///
+/// The `arg` field of `param` must be an initialized instance of `T`.
+unsafe extern "C" fn free<T>(arg: *mut core::ffi::c_void)
+where
+    T: ModuleParam,
+{
+    // SAFETY: By function safety requirement, `arg` is an initialized
+    // instance of `T`. By C API contract, `arg` will not be used after
+    // this function returns.
+    unsafe { core::ptr::drop_in_place(arg as *mut T) };
+}
+
+macro_rules! impl_int_module_param {
+    ($ty:ident) => {
+        impl ModuleParam for $ty {
+            type Value = $ty;
+
+            fn try_from_param_arg(arg: &'static BStr) -> Result<Self> {
+                <$ty as crate::str::parse_int::ParseInt>::from_str(arg)
+            }
+        }
+    };
+}
+
+impl_int_module_param!(i8);
+impl_int_module_param!(u8);
+impl_int_module_param!(i16);
+impl_int_module_param!(u16);
+impl_int_module_param!(i32);
+impl_int_module_param!(u32);
+impl_int_module_param!(i64);
+impl_int_module_param!(u64);
+impl_int_module_param!(isize);
+impl_int_module_param!(usize);
+
+/// A wrapper for kernel parameters.
+///
+/// This type is instantiated by the [`module!`] macro when module parameters are
+/// defined. You should never need to instantiate this type directly.
+///
+/// Note: This type is `pub` because it is used by module crates to access
+/// parameter values.
+#[repr(transparent)]
+pub struct ModuleParamAccess<T> {
+    data: core::cell::UnsafeCell<T>,
+}
+
+// SAFETY: We only create shared references to the contents of this container,
+// so if `T` is `Sync`, so is `ModuleParamAccess`.
+unsafe impl<T: Sync> Sync for ModuleParamAccess<T> {}
+
+impl<T> ModuleParamAccess<T> {
+    #[doc(hidden)]
+    pub const fn new(value: T) -> Self {
+        Self {
+            data: core::cell::UnsafeCell::new(value),
+        }
+    }
+
+    /// Get a shared reference to the parameter value.
+    // Note: When sysfs access to parameters are enabled, we have to pass in a
+    // held lock guard here.
+    pub fn get(&self) -> &T {
+        // SAFETY: As we only support read only parameters with no sysfs
+        // exposure, the kernel will not touch the parameter data after module
+        // initialization.
+        unsafe { &*self.data.get() }
+    }
+
+    /// Get a mutable pointer to the parameter value.
+    pub const fn as_mut_ptr(&self) -> *mut T {
+        self.data.get()
+    }
+}
+
+#[doc(hidden)]
+#[macro_export]
+/// Generate a static [`kernel_param_ops`](srctree/include/linux/moduleparam.h) struct.
+///
+/// # Examples
+///
+/// ```ignore
+/// make_param_ops!(
+///     /// Documentation for new param ops.
+///     PARAM_OPS_MYTYPE, // Name for the static.
+///     MyType // A type which implements [`ModuleParam`].
+/// );
+/// ```
+macro_rules! make_param_ops {
+    ($ops:ident, $ty:ty) => {
+        ///
+        /// Static [`kernel_param_ops`](srctree/include/linux/moduleparam.h)
+        /// struct generated by `make_param_ops`
+        #[doc = concat!("for [`", stringify!($ty), "`].")]
+        pub static $ops: $crate::bindings::kernel_param_ops = $crate::bindings::kernel_param_ops {
+            flags: 0,
+            set: Some(set_param::<$ty>),
+            get: None,
+            free: Some(free::<$ty>),
+        };
+    };
+}
+
+make_param_ops!(PARAM_OPS_I8, i8);
+make_param_ops!(PARAM_OPS_U8, u8);
+make_param_ops!(PARAM_OPS_I16, i16);
+make_param_ops!(PARAM_OPS_U16, u16);
+make_param_ops!(PARAM_OPS_I32, i32);
+make_param_ops!(PARAM_OPS_U32, u32);
+make_param_ops!(PARAM_OPS_I64, i64);
+make_param_ops!(PARAM_OPS_U64, u64);
+make_param_ops!(PARAM_OPS_ISIZE, isize);
+make_param_ops!(PARAM_OPS_USIZE, usize);
diff --git a/rust/kernel/net/phy.rs b/rust/kernel/net/phy.rs
index bb654a28dab36f..a59469c785e339 100644
--- a/rust/kernel/net/phy.rs
+++ b/rust/kernel/net/phy.rs
@@ -790,7 +790,7 @@ impl DeviceMask {
 ///         DeviceId::new_with_driver::<PhySample>()
 ///     ],
 ///     name: "rust_sample_phy",
-///     author: "Rust for Linux Contributors",
+///     authors: ["Rust for Linux Contributors"],
 ///     description: "Rust sample PHYs driver",
 ///     license: "GPL",
 /// }
@@ -819,7 +819,7 @@ impl DeviceMask {
 /// module! {
 ///     type: Module,
 ///     name: "rust_sample_phy",
-///     author: "Rust for Linux Contributors",
+///     authors: ["Rust for Linux Contributors"],
 ///     description: "Rust sample PHYs driver",
 ///     license: "GPL",
 /// }
diff --git a/rust/kernel/of.rs b/rust/kernel/of.rs
index 04f2d8ef29cb95..f3febb52d610ee 100644
--- a/rust/kernel/of.rs
+++ b/rust/kernel/of.rs
@@ -2,7 +2,16 @@
 
 //! Device Tree / Open Firmware abstractions.
 
-use crate::{bindings, device_id::RawDeviceId, prelude::*};
+use crate::{
+    bindings, device_id::RawDeviceId, error::to_result, io::resource::Resource, prelude::*,
+};
+// Note: Most OF functions turn into inline dummies with CONFIG_OF(_*) disabled.
+// We have to either add config conditionals to helpers.c or here; let's do it
+// here for now. In the future, once bindgen can auto-generate static inline
+// helpers, this can go away if desired.
+
+use core::marker::PhantomData;
+use core::num::NonZeroU32;
 
 /// IdTable type for OF drivers.
 pub type IdTable<T> = &'static dyn kernel::device_id::IdTable<DeviceId, T>;
@@ -45,6 +54,608 @@ impl DeviceId {
     }
 }
 
+/// Type alias for an OF phandle
+pub type PHandle = bindings::phandle;
+
+/// An OF device tree node.
+///
+/// # Invariants
+///
+/// `raw_node` points to a valid OF node, and we hold a reference to it.
+pub struct Node {
+    raw_node: *mut bindings::device_node,
+}
+
+#[allow(dead_code)]
+impl Node {
+    /// Creates a `Node` from a raw C pointer. The pointer must be owned (the caller
+    /// gives up its reference). If the pointer is NULL, returns None.
+    pub(crate) unsafe fn from_raw(raw_node: *mut bindings::device_node) -> Option<Node> {
+        if raw_node.is_null() {
+            None
+        } else {
+            // INVARIANT: `raw_node` is valid per the above contract, and non-null per the
+            // above check.
+            Some(Node { raw_node })
+        }
+    }
+
+    /// Creates a `Node` from a raw C pointer. The pointer must be borrowed (the caller
+    /// retains its reference, which must be valid for the duration of the call). If the
+    /// pointer is NULL, returns None.
+    pub(crate) unsafe fn get_from_raw(raw_node: *mut bindings::device_node) -> Option<Node> {
+        // SAFETY: `raw_node` is valid or NULL per the above contract. `of_node_get` can handle
+        // NULL.
+        unsafe {
+            #[cfg(CONFIG_OF_DYNAMIC)]
+            bindings::of_node_get(raw_node);
+            Node::from_raw(raw_node)
+        }
+    }
+
+    /// Returns a reference to the underlying C `device_node` structure.
+    pub(crate) fn node(&self) -> &bindings::device_node {
+        // SAFETY: `raw_node` is valid per the type invariant.
+        unsafe { &*self.raw_node }
+    }
+
+    /// Returns a reference to the underlying C `device_node` structure.
+    pub unsafe fn as_raw(&self) -> *mut bindings::device_node {
+        self.raw_node
+    }
+
+    /// Returns the name of the node.
+    pub fn name(&self) -> &CStr {
+        // SAFETY: The lifetime of the `CStr` is the same as the lifetime of this `Node`.
+        unsafe { CStr::from_char_ptr(self.node().name) }
+    }
+
+    /// Returns the phandle for this node.
+    pub fn phandle(&self) -> PHandle {
+        self.node().phandle
+    }
+
+    /// Returns the full name (with address) for this node.
+    pub fn full_name(&self) -> &CStr {
+        // SAFETY: The lifetime of the `CStr` is the same as the lifetime of this `Node`.
+        unsafe { CStr::from_char_ptr(self.node().full_name) }
+    }
+
+    /// Returns `true` if the node is the root node.
+    pub fn is_root(&self) -> bool {
+        #[cfg(not(CONFIG_OF))]
+        {
+            false
+        }
+        #[cfg(CONFIG_OF)]
+        // SAFETY: `raw_node` is valid per the type invariant
+        unsafe {
+            bindings::of_node_is_root(self.raw_node)
+        }
+    }
+
+    /// Returns the parent node, if any.
+    pub fn parent(&self) -> Option<Node> {
+        #[cfg(not(CONFIG_OF))]
+        {
+            None
+        }
+        #[cfg(CONFIG_OF)]
+        // SAFETY: `raw_node` is valid per the type invariant, and `of_get_parent()` takes a
+        // new reference to the parent (or returns NULL).
+        unsafe {
+            Node::from_raw(bindings::of_get_parent(self.raw_node))
+        }
+    }
+
+    /// Returns an iterator over the node's children.
+    // TODO: use type alias for return type once type_alias_impl_trait is stable
+    pub fn children(
+        &self,
+    ) -> NodeIterator<'_, impl Fn(*mut bindings::device_node) -> *mut bindings::device_node + '_>
+    {
+        #[cfg(not(CONFIG_OF))]
+        {
+            NodeIterator::new(|_prev| core::ptr::null_mut())
+        }
+        #[cfg(CONFIG_OF)]
+        // SAFETY: `raw_node` is valid per the type invariant, and the lifetime of the `NodeIterator`
+        // does not exceed the lifetime of the `Node` so it can borrow its reference.
+        NodeIterator::new(|prev| unsafe { bindings::of_get_next_child(self.raw_node, prev) })
+    }
+
+    /// Find a child by its name and return it, or None if not found.
+    #[allow(unused_variables)]
+    pub fn get_child_by_name(&self, name: &CStr) -> Option<Node> {
+        #[cfg(not(CONFIG_OF))]
+        {
+            None
+        }
+        #[cfg(CONFIG_OF)]
+        // SAFETY: `raw_node` is valid per the type invariant.
+        unsafe {
+            Node::from_raw(bindings::of_get_child_by_name(
+                self.raw_node,
+                name.as_char_ptr(),
+            ))
+        }
+    }
+
+    /// Checks whether the node is compatible with the given compatible string.
+    ///
+    /// Returns `None` if there is no match, or `Some<NonZeroU32>` if there is, with the value
+    /// representing as match score (higher values for more specific compatible matches).
+    #[allow(unused_variables)]
+    pub fn is_compatible(&self, compatible: &CStr) -> Option<NonZeroU32> {
+        #[cfg(not(CONFIG_OF))]
+        let ret = 0;
+        #[cfg(CONFIG_OF)]
+        let ret =
+            // SAFETY: `raw_node` is valid per the type invariant.
+            unsafe { bindings::of_device_is_compatible(self.raw_node, compatible.as_char_ptr()) };
+
+        NonZeroU32::new(ret.try_into().ok()?)
+    }
+
+    /// Parse a phandle property and return the Node referenced at a given index, if any.
+    ///
+    /// Used only for phandle properties with no arguments.
+    #[allow(unused_variables)]
+    pub fn parse_phandle(&self, name: &CStr, index: usize) -> Option<Node> {
+        #[cfg(not(CONFIG_OF))]
+        {
+            None
+        }
+        #[cfg(CONFIG_OF)]
+        // SAFETY: `raw_node` is valid per the type invariant. `of_parse_phandle` returns an
+        // owned reference.
+        unsafe {
+            Node::from_raw(bindings::of_parse_phandle(
+                self.raw_node,
+                name.as_char_ptr(),
+                index.try_into().ok()?,
+            ))
+        }
+    }
+
+    /// Parse a phandle property and return the Node referenced at a given name, if any.
+    ///
+    /// Used only for phandle properties with no arguments.
+    #[allow(unused_variables)]
+    pub fn parse_phandle_by_name(
+        &self,
+        prop: &CStr,
+        propnames: &CStr,
+        name: &CStr,
+    ) -> Option<Node> {
+        #[cfg(not(CONFIG_OF))]
+        {
+            None
+        }
+        #[cfg(CONFIG_OF)]
+        // SAFETY: `raw_node` is valid per the type invariant. `of_parse_phandle` returns an
+        // owned reference.
+        unsafe {
+            let index = bindings::of_property_match_string(
+                self.raw_node,
+                propnames.as_char_ptr(),
+                name.as_char_ptr(),
+            );
+            if index < 0 {
+                return None;
+            };
+
+            Node::from_raw(bindings::of_parse_phandle(
+                self.raw_node,
+                prop.as_char_ptr(),
+                index.try_into().ok()?,
+            ))
+        }
+    }
+
+    /// Translate device tree address and return as resource
+    pub fn address_as_resource(&self, index: usize) -> Result<Resource> {
+        #[cfg(not(CONFIG_OF))]
+        {
+            Err(EINVAL)
+        }
+        #[cfg(CONFIG_OF)]
+        {
+            let mut res = core::mem::MaybeUninit::<bindings::resource>::uninit();
+            // SAFETY: This function is safe to call as long as the arguments are valid pointers.
+            let ret = unsafe {
+                bindings::of_address_to_resource(self.raw_node, index.try_into()?, res.as_mut_ptr())
+            };
+            to_result(ret)?;
+            // SAFETY: We have checked the return value above, so the resource must be initialized now
+            let res = unsafe { res.assume_init() };
+
+            Ok(Resource::new_from_ptr(&res))
+        }
+    }
+
+    #[allow(unused_variables)]
+    /// Check whether node property exists.
+    pub fn property_present(&self, propname: &CStr) -> bool {
+        #[cfg(not(CONFIG_OF))]
+        {
+            false
+        }
+        #[cfg(CONFIG_OF)]
+        // SAFETY: `raw_node` is valid per the type invariant.
+        unsafe {
+            bool::from(bindings::of_property_present(
+                self.raw_node,
+                propname.as_char_ptr(),
+            ))
+        }
+    }
+
+    #[allow(unused_variables)]
+    /// Look up a node property by name, returning a `Property` object if found.
+    pub fn find_property(&self, propname: &CStr) -> Option<Property<'_>> {
+        #[cfg(not(CONFIG_OF))]
+        {
+            None
+        }
+        #[cfg(CONFIG_OF)]
+        // SAFETY: `raw_node` is valid per the type invariant. The property structure
+        // returned borrows the reference to the owning node, and so has the same
+        // lifetime.
+        unsafe {
+            Property::from_raw(bindings::of_find_property(
+                self.raw_node,
+                propname.as_char_ptr(),
+                core::ptr::null_mut(),
+            ))
+        }
+    }
+
+    /// Look up a mandatory node property by name, and decode it into a value type.
+    ///
+    /// Returns `Err(ENOENT)` if the property is not found.
+    ///
+    /// The type `T` must implement `TryFrom<Property<'_>>`.
+    pub fn get_property<'a, T: TryFrom<Property<'a>>>(&'a self, propname: &CStr) -> Result<T>
+    where
+        crate::error::Error: From<<T as TryFrom<Property<'a>>>::Error>,
+    {
+        Ok(self.find_property(propname).ok_or(ENOENT)?.try_into()?)
+    }
+
+    /// Look up an optional node property by name, and decode it into a value type.
+    ///
+    /// Returns `Ok(None)` if the property is not found.
+    ///
+    /// The type `T` must implement `TryFrom<Property<'_>>`.
+    pub fn get_opt_property<'a, T: TryFrom<Property<'a>>>(
+        &'a self,
+        propname: &CStr,
+    ) -> Result<Option<T>>
+    where
+        crate::error::Error: From<<T as TryFrom<Property<'a>>>::Error>,
+    {
+        self.find_property(propname)
+            .map_or(Ok(None), |p| Ok(Some(p.try_into()?)))
+    }
+}
+
+/// A property attached to a device tree `Node`.
+///
+/// # Invariants
+///
+/// `raw` must be valid and point to a property that outlives the lifetime of this object.
+#[derive(Copy, Clone)]
+pub struct Property<'a> {
+    raw: *mut bindings::property,
+    _p: PhantomData<&'a Node>,
+}
+
+impl<'a> Property<'a> {
+    #[cfg(CONFIG_OF)]
+    /// Create a `Property` object from a raw C pointer. Returns `None` if NULL.
+    ///
+    /// The passed pointer must be valid and outlive the lifetime argument, or NULL.
+    unsafe fn from_raw(raw: *mut bindings::property) -> Option<Property<'a>> {
+        if raw.is_null() {
+            None
+        } else {
+            Some(Property {
+                raw,
+                _p: PhantomData,
+            })
+        }
+    }
+
+    /// Returns the name of the property as a `CStr`.
+    pub fn name(&self) -> &CStr {
+        // SAFETY: `raw` is valid per the type invariant, and the lifetime of the `CStr` does not
+        // outlive it.
+        unsafe { CStr::from_char_ptr((*self.raw).name) }
+    }
+
+    /// Returns the name of the property as a `&[u8]`.
+    pub fn value(&self) -> &[u8] {
+        // SAFETY: `raw` is valid per the type invariant, and the lifetime of the slice does not
+        // outlive it.
+        unsafe { core::slice::from_raw_parts((*self.raw).value as *const u8, self.len()) }
+    }
+
+    /// Returns the length of the property in bytes.
+    pub fn len(&self) -> usize {
+        // SAFETY: `raw` is valid per the type invariant.
+        unsafe { (*self.raw).length.try_into().unwrap() }
+    }
+
+    /// Returns true if the property is empty (zero-length), which typically represents boolean true.
+    pub fn is_empty(&self) -> bool {
+        self.len() == 0
+    }
+
+    /// Copy a device-tree property to a slice
+    ///
+    /// Enforces that the length of the property is an exact match of the slice.
+    pub fn copy_to_slice<T: PropertyUnit>(&self, target: &mut [T]) -> Result<()> {
+        if self.len() % T::UNIT_SIZE != 0 {
+            return Err(EINVAL);
+        }
+
+        if self.len() / T::UNIT_SIZE != target.len() {
+            return Err(EINVAL);
+        }
+
+        let val = self.value();
+        for (i, off) in (0..self.len()).step_by(T::UNIT_SIZE).enumerate() {
+            target[i] = T::from_bytes(&val[off..off + T::UNIT_SIZE])?
+        }
+        Ok(())
+    }
+}
+
+/// A trait that represents a value decodable from a property with a fixed unit size.
+///
+/// This allows us to auto-derive property decode implementations for `Vec<T: PropertyUnit>`.
+pub trait PropertyUnit: Sized {
+    /// The size in bytes of a single data unit.
+    const UNIT_SIZE: usize;
+
+    /// Decode this data unit from a byte slice. The passed slice will have a length of `UNIT_SIZE`.
+    fn from_bytes(data: &[u8]) -> Result<Self>;
+}
+
+// This doesn't work...
+// impl<'a, T: PropertyUnit> TryFrom<Property<'a>> for T {
+//     type Error = Error;
+//
+//     fn try_from(p: Property<'_>) -> core::result::Result<T, Self::Error> {
+//         if p.value().len() != T::UNIT_SIZE {
+//             Err(EINVAL)
+//         } else {
+//             Ok(T::from_bytes(p.value())?)
+//         }
+//     }
+// }
+
+impl<'a, T: PropertyUnit> TryFrom<Property<'a>> for KVec<T> {
+    type Error = Error;
+
+    fn try_from(p: Property<'_>) -> core::result::Result<KVec<T>, Self::Error> {
+        if p.len() % T::UNIT_SIZE != 0 {
+            return Err(EINVAL);
+        }
+
+        let mut v = Vec::new();
+        let val = p.value();
+        for off in (0..p.len()).step_by(T::UNIT_SIZE) {
+            v.push(T::from_bytes(&val[off..off + T::UNIT_SIZE])?, GFP_KERNEL)?;
+        }
+        Ok(v)
+    }
+}
+
+impl<'a, T: PropertyUnit> TryFrom<Property<'a>> for KVVec<T> {
+    type Error = Error;
+
+    fn try_from(p: Property<'_>) -> core::result::Result<KVVec<T>, Self::Error> {
+        if p.len() % T::UNIT_SIZE != 0 {
+            return Err(EINVAL);
+        }
+
+        let mut v = Vec::new();
+        let val = p.value();
+        for off in (0..p.len()).step_by(T::UNIT_SIZE) {
+            v.push(T::from_bytes(&val[off..off + T::UNIT_SIZE])?, GFP_KERNEL)?;
+        }
+        Ok(v)
+    }
+}
+
+macro_rules! prop_int_type (
+    ($type:ty) => {
+        impl<'a> TryFrom<Property<'a>> for $type {
+            type Error = Error;
+
+            fn try_from(p: Property<'_>) -> core::result::Result<$type, Self::Error> {
+                Ok(<$type>::from_be_bytes(p.value().try_into().or(Err(EINVAL))?))
+            }
+        }
+
+        impl PropertyUnit for $type {
+            const UNIT_SIZE: usize = <$type>::BITS as usize / 8;
+
+            fn from_bytes(data: &[u8]) -> Result<Self> {
+                Ok(<$type>::from_be_bytes(data.try_into().or(Err(EINVAL))?))
+            }
+        }
+    }
+);
+
+prop_int_type!(u8);
+prop_int_type!(u16);
+prop_int_type!(u32);
+prop_int_type!(u64);
+prop_int_type!(i8);
+prop_int_type!(i16);
+prop_int_type!(i32);
+prop_int_type!(i64);
+
+/// An iterator across a collection of Node objects.
+///
+/// # Invariants
+///
+/// `cur` must be NULL or a valid node owned reference. If NULL, it represents either the first
+/// or last position of the iterator.
+///
+/// If `done` is true, `cur` must be NULL.
+///
+/// fn_next must be a callback that iterates from one node to the next, and it must not capture
+/// values that exceed the lifetime of the iterator. It must return owned references and also
+/// take owned references.
+pub struct NodeIterator<'a, T>
+where
+    T: Fn(*mut bindings::device_node) -> *mut bindings::device_node,
+{
+    cur: *mut bindings::device_node,
+    done: bool,
+    fn_next: T,
+    _p: PhantomData<&'a T>,
+}
+
+impl<'a, T> NodeIterator<'a, T>
+where
+    T: Fn(*mut bindings::device_node) -> *mut bindings::device_node,
+{
+    fn new(next: T) -> NodeIterator<'a, T> {
+        // INVARIANT: `cur` is initialized to NULL to represent the initial state.
+        NodeIterator {
+            cur: core::ptr::null_mut(),
+            done: false,
+            fn_next: next,
+            _p: PhantomData,
+        }
+    }
+}
+
+impl<'a, T> Iterator for NodeIterator<'a, T>
+where
+    T: Fn(*mut bindings::device_node) -> *mut bindings::device_node,
+{
+    type Item = Node;
+
+    fn next(&mut self) -> Option<Self::Item> {
+        if self.done {
+            None
+        } else {
+            // INVARIANT: if the new `cur` is NULL, then the iterator has reached its end and we
+            // set `done` to `true`.
+            self.cur = (self.fn_next)(self.cur);
+            self.done = self.cur.is_null();
+            // SAFETY: `fn_next` must return an owned reference per the iterator contract.
+            // The iterator itself is considered to own this reference, so we take another one.
+            unsafe { Node::get_from_raw(self.cur) }
+        }
+    }
+}
+
+// Drop impl to ensure we drop the current node being iterated on, if any.
+impl<'a, T> Drop for NodeIterator<'a, T>
+where
+    T: Fn(*mut bindings::device_node) -> *mut bindings::device_node,
+{
+    fn drop(&mut self) {
+        // SAFETY: `cur` is valid or NULL, and `of_node_put()` can handle NULL.
+        #[cfg(CONFIG_OF_DYNAMIC)]
+        unsafe {
+            bindings::of_node_put(self.cur)
+        };
+    }
+}
+
+/// Returns the root node of the OF device tree (if any).
+pub fn root() -> Option<Node> {
+    #[cfg(not(CONFIG_OF))]
+    {
+        None
+    }
+    #[cfg(CONFIG_OF)]
+    // SAFETY: bindings::of_root is always valid or NULL
+    unsafe {
+        Node::get_from_raw(bindings::of_root)
+    }
+}
+
+/// Returns the /chosen node of the OF device tree (if any).
+pub fn chosen() -> Option<Node> {
+    #[cfg(not(CONFIG_OF))]
+    {
+        None
+    }
+    #[cfg(CONFIG_OF)]
+    // SAFETY: bindings::of_chosen is always valid or NULL
+    unsafe {
+        Node::get_from_raw(bindings::of_chosen)
+    }
+}
+
+/// Returns the /aliases node of the OF device tree (if any).
+pub fn aliases() -> Option<Node> {
+    #[cfg(not(CONFIG_OF))]
+    {
+        None
+    }
+    #[cfg(CONFIG_OF)]
+    // SAFETY: bindings::of_aliases is always valid or NULL
+    unsafe {
+        Node::get_from_raw(bindings::of_aliases)
+    }
+}
+
+/// Returns the system stdout node of the OF device tree (if any).
+pub fn stdout() -> Option<Node> {
+    #[cfg(not(CONFIG_OF))]
+    {
+        None
+    }
+    #[cfg(CONFIG_OF)]
+    // SAFETY: bindings::of_stdout is always valid or NULL
+    unsafe {
+        Node::get_from_raw(bindings::of_stdout)
+    }
+}
+
+#[allow(unused_variables)]
+/// Looks up a node in the device tree by phandle.
+pub fn find_node_by_phandle(handle: PHandle) -> Option<Node> {
+    #[cfg(not(CONFIG_OF))]
+    {
+        None
+    }
+    #[cfg(CONFIG_OF)]
+    // SAFETY: bindings::of_find_node_by_phandle always returns a valid pointer or NULL
+    unsafe {
+        #[allow(dead_code)]
+        Node::from_raw(bindings::of_find_node_by_phandle(handle))
+    }
+}
+
+impl Clone for Node {
+    fn clone(&self) -> Node {
+        // SAFETY: `raw_node` is valid and non-NULL per the type invariant,
+        // so this can never return None.
+        unsafe { Node::get_from_raw(self.raw_node).unwrap() }
+    }
+}
+
+impl Drop for Node {
+    fn drop(&mut self) {
+        #[cfg(CONFIG_OF_DYNAMIC)]
+        // SAFETY: `raw_node` is valid per the type invariant.
+        unsafe {
+            bindings::of_node_put(self.raw_node)
+        };
+    }
+}
+
 /// Create an OF `IdTable` with an "alias" for modpost.
 #[macro_export]
 macro_rules! of_device_table {
diff --git a/rust/kernel/page.rs b/rust/kernel/page.rs
index f6126aca33a681..ca5c43f3709689 100644
--- a/rust/kernel/page.rs
+++ b/rust/kernel/page.rs
@@ -3,12 +3,15 @@
 //! Kernel page allocation and management.
 
 use crate::{
+    addr::*,
     alloc::{AllocError, Flags},
     bindings,
     error::code::*,
     error::Result,
+    types::{Opaque, Ownable, Owned},
     uaccess::UserSliceReader,
 };
+use core::mem::ManuallyDrop;
 use core::ptr::{self, NonNull};
 
 /// A bitwise shift for the page size.
@@ -30,13 +33,10 @@ pub const fn page_align(addr: usize) -> usize {
     (addr + (PAGE_SIZE - 1)) & PAGE_MASK
 }
 
-/// A pointer to a page that owns the page allocation.
-///
-/// # Invariants
-///
-/// The pointer is valid, and has ownership over the page.
+/// A struct page.
+#[repr(transparent)]
 pub struct Page {
-    page: NonNull<bindings::page>,
+    page: Opaque<bindings::page>,
 }
 
 // SAFETY: Pages have no logic that relies on them staying on a given thread, so moving them across
@@ -69,19 +69,20 @@ impl Page {
     /// let page = Page::alloc_page(GFP_KERNEL | __GFP_ZERO)?;
     /// # Ok::<(), kernel::alloc::AllocError>(())
     /// ```
-    pub fn alloc_page(flags: Flags) -> Result<Self, AllocError> {
+    pub fn alloc_page(flags: Flags) -> Result<Owned<Self>, AllocError> {
         // SAFETY: Depending on the value of `gfp_flags`, this call may sleep. Other than that, it
         // is always safe to call this method.
         let page = unsafe { bindings::alloc_pages(flags.as_raw(), 0) };
         let page = NonNull::new(page).ok_or(AllocError)?;
-        // INVARIANT: We just successfully allocated a page, so we now have ownership of the newly
-        // allocated page. We transfer that ownership to the new `Page` object.
-        Ok(Self { page })
+        // SAFETY: We just successfully allocated a page, so we now have ownership of the newly
+        // allocated page. We transfer that ownership to the new `Owned<Page>` object.
+        // Since `Page` is transparent, we can cast the pointer directly.
+        Ok(unsafe { Owned::from_raw(page.cast()) })
     }
 
     /// Returns a raw pointer to the page.
     pub fn as_ptr(&self) -> *mut bindings::page {
-        self.page.as_ptr()
+        Opaque::raw_get(&self.page)
     }
 
     /// Runs a piece of code with this page mapped to an address.
@@ -100,7 +101,7 @@ impl Page {
     /// different addresses. However, even if the addresses are different, the underlying memory is
     /// still the same for these purposes (e.g., it's still a data race if they both write to the
     /// same underlying byte at the same time).
-    fn with_page_mapped<T>(&self, f: impl FnOnce(*mut u8) -> T) -> T {
+    pub fn with_page_mapped<T>(&self, f: impl FnOnce(*mut u8) -> T) -> T {
         // SAFETY: `page` is valid due to the type invariants on `Page`.
         let mapped_addr = unsafe { bindings::kmap_local_page(self.as_ptr()) };
 
@@ -141,7 +142,7 @@ impl Page {
     /// different addresses. However, even if the addresses are different, the underlying memory is
     /// still the same for these purposes (e.g., it's still a data race if they both write to the
     /// same underlying byte at the same time).
-    fn with_pointer_into_page<T>(
+    pub fn with_pointer_into_page<T>(
         &self,
         off: usize,
         len: usize,
@@ -248,11 +249,76 @@ impl Page {
             reader.read_raw(unsafe { core::slice::from_raw_parts_mut(dst.cast(), len) })
         })
     }
+
+    /// Returns the physical address of this page.
+    pub fn phys(&self) -> PhysicalAddr {
+        // SAFETY: `page` is valid due to the type invariants on `Page`.
+        unsafe { bindings::page_to_phys(self.as_ptr()) }
+    }
+
+    /// Converts a Rust-owned Page into its physical address.
+    /// The caller is responsible for calling `from_phys()` to avoid
+    /// leaking memory.
+    pub fn into_phys(this: Owned<Self>) -> PhysicalAddr {
+        ManuallyDrop::new(this).phys()
+    }
+
+    /// Converts a physical address to a Rust-owned Page.
+    ///
+    /// SAFETY:
+    /// The caller must ensure that the physical address was previously returned
+    /// by a call to `Page::into_phys()`, and that the physical address is no
+    /// longer used after this call, nor is `from_phys()` called again on it.
+    pub unsafe fn from_phys(phys: PhysicalAddr) -> Owned<Self> {
+        // SAFETY: By the safety requirements, the physical address must be valid and
+        // have come from `into_phys()`, so phys_to_page() cannot fail and
+        // must return the original struct page pointer.
+        unsafe { Owned::from_raw(NonNull::new_unchecked(bindings::phys_to_page(phys)).cast()) }
+    }
+
+    /// Borrows a Page from a physical address, without taking over ownership.
+    ///
+    /// If the physical address does not have a `struct page` entry or is not
+    /// part of the System RAM region, returns None.
+    ///
+    /// SAFETY:
+    /// The caller must ensure that the physical address, if it is backed by a
+    /// `struct page`, remains available for the duration of the borrowed
+    /// lifetime.
+    pub unsafe fn borrow_phys(phys: &PhysicalAddr) -> Option<&Self> {
+        // SAFETY: This is always safe, as it is just arithmetic
+        let pfn = unsafe { bindings::phys_to_pfn(*phys) };
+        // SAFETY: This function is safe to call with any pfn
+        if !unsafe { bindings::pfn_valid(pfn) && bindings::page_is_ram(pfn) != 0 } {
+            None
+        } else {
+            // SAFETY: We have just checked that the pfn is valid above, so it must
+            // have a corresponding struct page. By the safety requirements, we can
+            // return a borrowed reference to it.
+            Some(unsafe { &*(bindings::pfn_to_page(pfn) as *mut Self as *const Self) })
+        }
+    }
+
+    /// Borrows a Page from a physical address, without taking over ownership
+    /// nor checking for validity.
+    ///
+    /// SAFETY:
+    /// The caller must ensure that the physical address is backed by a
+    /// `struct page` and corresponds to System RAM.
+    pub unsafe fn borrow_phys_unchecked(phys: &PhysicalAddr) -> &Self {
+        // SAFETY: This is always safe, as it is just arithmetic
+        let pfn = unsafe { bindings::phys_to_pfn(*phys) };
+        // SAFETY: The caller guarantees that the pfn is valid. By the safety
+        // requirements, we can return a borrowed reference to it.
+        unsafe { &*(bindings::pfn_to_page(pfn) as *mut Self as *const Self) }
+    }
 }
 
-impl Drop for Page {
-    fn drop(&mut self) {
+// SAFETY: See below.
+unsafe impl Ownable for Page {
+    unsafe fn release(this: NonNull<Self>) {
         // SAFETY: By the type invariants, we have ownership of the page and can free it.
-        unsafe { bindings::__free_pages(self.page.as_ptr(), 0) };
+        // Since Page is transparent, we can cast the raw pointer directly.
+        unsafe { bindings::__free_pages(this.cast().as_ptr(), 0) };
     }
 }
diff --git a/rust/kernel/pci.rs b/rust/kernel/pci.rs
index 4c98b5b9aa1e92..5839aa5d409865 100644
--- a/rust/kernel/pci.rs
+++ b/rust/kernel/pci.rs
@@ -103,7 +103,7 @@ impl<T: Driver + 'static> Adapter<T> {
 /// kernel::module_pci_driver! {
 ///     type: MyDriver,
 ///     name: "Module name",
-///     author: "Author name",
+///     authors: ["Author name"],
 ///     description: "Description",
 ///     license: "GPL v2",
 /// }
@@ -432,3 +432,5 @@ impl AsRef<device::Device> for Device {
         &self.0
     }
 }
+
+impl crate::dma::Device for Device {}
diff --git a/rust/kernel/platform.rs b/rust/kernel/platform.rs
index 50e6b042181322..6c013013cec58b 100644
--- a/rust/kernel/platform.rs
+++ b/rust/kernel/platform.rs
@@ -5,8 +5,14 @@
 //! C header: [`include/linux/platform_device.h`](srctree/include/linux/platform_device.h)
 
 use crate::{
-    bindings, container_of, device, driver,
+    bindings, container_of, device,
+    devres::Devres,
+    driver,
     error::{to_result, Result},
+    io::{
+        mem::{ExclusiveIoMem, IoMem},
+        resource::Resource,
+    },
     of,
     prelude::*,
     str::CStr,
@@ -101,7 +107,7 @@ impl<T: Driver + 'static> driver::Adapter for Adapter<T> {
 /// kernel::module_platform_driver! {
 ///     type: MyDriver,
 ///     name: "Module name",
-///     author: "Author name",
+///     authors: ["Author name"],
 ///     description: "Description",
 ///     license: "GPL v2",
 /// }
@@ -191,6 +197,121 @@ impl Device {
         // embedded in `struct platform_device`.
         unsafe { container_of!(self.0.as_raw(), bindings::platform_device, dev) }.cast_mut()
     }
+
+    /// Maps a platform resource through ioremap() where the size is known at
+    /// compile time.
+    ///
+    /// # Examples
+    ///
+    /// ```no_run
+    /// use kernel::{bindings, c_str, platform};
+    ///
+    /// fn probe(pdev: &mut platform::Device, /* ... */) -> Result<()> {
+    ///     let offset = 0; // Some offset.
+    ///
+    ///     // If the size is known at compile time, use `ioremap_resource_sized`.
+    ///     // No runtime checks will apply when reading and writing.
+    ///     let resource = pdev.resource(0).ok_or(ENODEV)?;
+    ///     let iomem = pdev.ioremap_resource_sized::<42>(&resource)?;
+    ///
+    ///     // Read and write a 32-bit value at `offset`. Calling `try_access()` on
+    ///     // the `Devres` makes sure that the resource is still valid.
+    ///     let data = iomem.try_access().ok_or(ENODEV)?.readl(offset);
+    ///
+    ///     iomem.try_access().ok_or(ENODEV)?.writel(data, offset);
+    ///
+    ///     # Ok::<(), Error>(())
+    /// }
+    /// ```
+    pub fn ioremap_resource_sized<const SIZE: usize>(
+        &self,
+        resource: &Resource,
+    ) -> Result<Devres<IoMem<SIZE>>> {
+        IoMem::new(resource, self.as_ref())
+    }
+
+    /// Same as [`Self::ioremap_resource_sized`] but with exclusive access to the
+    /// underlying region.
+    pub fn ioremap_resource_exclusive_sized<const SIZE: usize>(
+        &self,
+        resource: &Resource,
+    ) -> Result<Devres<ExclusiveIoMem<SIZE>>> {
+        ExclusiveIoMem::new(resource, self.as_ref())
+    }
+
+    /// Maps a platform resource through ioremap().
+    ///
+    /// # Examples
+    ///
+    /// ```no_run
+    /// # use kernel::{bindings, c_str, platform};
+    ///
+    /// fn probe(pdev: &mut platform::Device, /* ... */) -> Result<()> {
+    ///     let offset = 0; // Some offset.
+    ///
+    ///     // Unlike `ioremap_resource_sized`, here the size of the memory region
+    ///     // is not known at compile time, so only the `try_read*` and `try_write*`
+    ///     // family of functions are exposed, leading to runtime checks on every
+    ///     // access.
+    ///     let resource = pdev.resource(0).ok_or(ENODEV)?;
+    ///     let iomem = pdev.ioremap_resource(&resource)?;
+    ///
+    ///     let data = iomem.try_access().ok_or(ENODEV)?.try_readl(offset)?;
+    ///
+    ///     iomem.try_access().ok_or(ENODEV)?.try_writel(data, offset)?;
+    ///
+    ///     # Ok::<(), Error>(())
+    /// }
+    /// ```
+    pub fn ioremap_resource(&self, resource: &Resource) -> Result<Devres<IoMem<0>>> {
+        self.ioremap_resource_sized::<0>(resource)
+    }
+
+    /// Same as [`Self::ioremap_resource`] but with exclusive access to the underlying
+    /// region.
+    pub fn ioremap_resource_exclusive(
+        &self,
+        resource: &Resource,
+    ) -> Result<Devres<ExclusiveIoMem<0>>> {
+        self.ioremap_resource_exclusive_sized::<0>(resource)
+    }
+
+    /// Returns the resource at `index`, if any.
+    pub fn resource(&self, index: u32) -> Option<&Resource> {
+        // SAFETY: `self.as_raw()` returns a valid pointer to a `struct platform_device`.
+        let resource = unsafe {
+            bindings::platform_get_resource(self.as_raw(), bindings::IORESOURCE_MEM, index)
+        };
+
+        if resource.is_null() {
+            return None;
+        }
+
+        // SAFETY: `resource` is a valid pointer to a `struct resource` as
+        // returned by `platform_get_resource`.
+        Some(unsafe { Resource::from_ptr(resource) })
+    }
+
+    /// Returns the resource with a given `name`, if any.
+    pub fn resource_by_name(&self, name: &CStr) -> Option<&Resource> {
+        // SAFETY: `self.as_raw()` returns a valid pointer to a `struct
+        // platform_device` and `name` points to a valid C string.
+        let resource = unsafe {
+            bindings::platform_get_resource_byname(
+                self.as_raw(),
+                bindings::IORESOURCE_MEM,
+                name.as_char_ptr(),
+            )
+        };
+
+        if resource.is_null() {
+            return None;
+        }
+
+        // SAFETY: `resource` is a valid pointer to a `struct resource` as
+        // returned by `platform_get_resource`.
+        Some(unsafe { Resource::from_ptr(resource) })
+    }
 }
 
 impl AsRef<device::Device> for Device {
@@ -198,3 +319,5 @@ impl AsRef<device::Device> for Device {
         &self.0
     }
 }
+
+impl crate::dma::Device for Device {}
diff --git a/rust/kernel/prelude.rs b/rust/kernel/prelude.rs
index dde2e0649790ca..889102f5a81e9a 100644
--- a/rust/kernel/prelude.rs
+++ b/rust/kernel/prelude.rs
@@ -17,7 +17,7 @@ pub use core::pin::Pin;
 pub use crate::alloc::{flags::*, Box, KBox, KVBox, KVVec, KVec, VBox, VVec, Vec};
 
 #[doc(no_inline)]
-pub use macros::{module, pin_data, pinned_drop, vtable, Zeroable};
+pub use macros::{export, module, pin_data, pinned_drop, vtable, Zeroable};
 
 pub use super::{build_assert, build_error};
 
diff --git a/rust/kernel/print.rs b/rust/kernel/print.rs
index 61ee36c5e5f5db..1e97ea8d31f030 100644
--- a/rust/kernel/print.rs
+++ b/rust/kernel/print.rs
@@ -1,4 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
+// FIXME
+#![allow(clippy::undocumented_unsafe_blocks)]
 
 //! Printing facilities.
 //!
@@ -8,13 +10,14 @@
 
 use crate::{
     ffi::{c_char, c_void},
+    prelude::*,
     str::RawFormatter,
 };
 use core::fmt;
 
 // Called from `vsprintf` with format specifier `%pA`.
 #[expect(clippy::missing_safety_doc)]
-#[no_mangle]
+#[export]
 unsafe extern "C" fn rust_fmt_argument(
     buf: *mut c_char,
     end: *mut c_char,
diff --git a/rust/kernel/siphash.rs b/rust/kernel/siphash.rs
new file mode 100644
index 00000000000000..2f14b57b589ca7
--- /dev/null
+++ b/rust/kernel/siphash.rs
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! A core::hash::Hasher wrapper for the kernel siphash implementation.
+//!
+//! This module allows Rust code to use the kernel's siphash implementation
+//! to hash Rust objects.
+
+use core::hash::Hasher;
+
+/// A Hasher implementation that uses the kernel siphash implementation.
+#[derive(Default)]
+pub struct SipHasher {
+    // SipHash state is 4xu64, but the Linux implementation
+    // doesn't expose incremental hashing so let's just chain
+    // individual SipHash calls for now, which return a u64
+    // hash.
+    state: u64,
+}
+
+impl SipHasher {
+    /// Create a new SipHasher with zeroed state.
+    pub fn new() -> Self {
+        SipHasher { state: 0 }
+    }
+}
+
+impl Hasher for SipHasher {
+    fn finish(&self) -> u64 {
+        self.state
+    }
+
+    fn write(&mut self, bytes: &[u8]) {
+        let key = bindings::siphash_key_t {
+            key: [self.state, 0],
+        };
+
+        // SAFETY: Safe to call on a valid slice
+        self.state = unsafe { bindings::siphash(bytes.as_ptr() as *const _, bytes.len(), &key) };
+    }
+}
diff --git a/rust/kernel/soc/apple/aop.rs b/rust/kernel/soc/apple/aop.rs
new file mode 100644
index 00000000000000..37aae200b81eb4
--- /dev/null
+++ b/rust/kernel/soc/apple/aop.rs
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+//! Common code for AOP endpoint drivers
+
+use kernel::{prelude::*, sync::Arc};
+
+/// Representation of an "EPIC" service.
+#[derive(Clone, Copy, PartialEq, Eq)]
+#[repr(C)]
+pub struct EPICService {
+    /// Channel id
+    pub channel: u32,
+    /// RTKit endpoint
+    pub endpoint: u8,
+}
+
+/// Listener for the "HID" events sent by aop
+pub trait FakehidListener {
+    /// Process the event.
+    fn process_fakehid_report(&self, data: &[u8]) -> Result<()>;
+}
+
+/// AOP communications manager.
+pub trait AOP: Send + Sync {
+    /// Calls a method on a specified service
+    fn epic_call(&self, svc: &EPICService, subtype: u16, msg_bytes: &[u8]) -> Result<u32>;
+    /// Adds the listener for the specified service
+    fn add_fakehid_listener(
+        &self,
+        svc: EPICService,
+        listener: Arc<dyn FakehidListener>,
+    ) -> Result<()>;
+    /// Remove the listener for the specified service
+    fn remove_fakehid_listener(&self, svc: &EPICService) -> bool;
+    /// Internal method to detach the device.
+    fn remove(&self);
+}
+
+/// Converts a text representation of a FourCC to u32
+pub const fn from_fourcc(b: &[u8]) -> u32 {
+    b[3] as u32 | (b[2] as u32) << 8 | (b[1] as u32) << 16 | (b[0] as u32) << 24
+}
diff --git a/rust/kernel/soc/apple/mailbox.rs b/rust/kernel/soc/apple/mailbox.rs
new file mode 100644
index 00000000000000..a4010b15eb3210
--- /dev/null
+++ b/rust/kernel/soc/apple/mailbox.rs
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+//! Support for Apple ASC Mailbox.
+//!
+//! C header: [`include/linux/soc/apple/mailbox.h`](../../../../include/linux/gpio/driver.h)
+
+use crate::{
+    bindings, device,
+    error::{from_err_ptr, to_result, Result},
+    str::CStr,
+    types::{ForeignOwnable, ScopeGuard},
+};
+
+use core::marker::PhantomData;
+
+/// 96-bit message. What it means is up to the upper layer
+pub type Message = bindings::apple_mbox_msg;
+
+/// Mailbox receive callback
+pub trait MailCallback {
+    /// Callback context
+    type Data: ForeignOwnable + Send + Sync;
+
+    /// The actual callback. Called in an interrupt context.
+    fn recv_message(data: <Self::Data as ForeignOwnable>::Borrowed<'_>, msg: Message);
+}
+
+/// Wrapper over `struct apple_mbox *`
+#[repr(transparent)]
+pub struct Mailbox<T: MailCallback> {
+    mbox: *mut bindings::apple_mbox,
+    _p: PhantomData<T>,
+}
+
+extern "C" fn mailbox_rx_callback<T: MailCallback>(
+    _mbox: *mut bindings::apple_mbox,
+    msg: Message,
+    cookie: *mut core::ffi::c_void,
+) {
+    // SAFETY: cookie came from a call to `into_foreign`
+    T::recv_message(unsafe { T::Data::borrow(cookie) }, msg);
+}
+
+impl<T: MailCallback> Mailbox<T> {
+    /// Creates a mailbox for the specified name.
+    pub fn new_byname(
+        dev: &device::Device,
+        mbox_name: &'static CStr,
+        data: T::Data,
+    ) -> Result<Mailbox<T>> {
+        let ptr = data.into_foreign() as *mut _;
+        let guard = ScopeGuard::new(|| {
+            // SAFETY: `ptr` came from a previous call to `into_foreign`.
+            unsafe { T::Data::from_foreign(ptr) };
+        });
+        // SAFETY: Just calling the c function, all values are valid.
+        let mbox = unsafe {
+            from_err_ptr(bindings::apple_mbox_get_byname(
+                dev.as_raw(),
+                mbox_name.as_char_ptr(),
+            ))?
+        };
+        // SAFETY: mbox is a valid pointer
+        unsafe {
+            (*mbox).cookie = ptr;
+            (*mbox).rx = Some(mailbox_rx_callback::<T>);
+            to_result(bindings::apple_mbox_start(mbox))?;
+        }
+        guard.dismiss();
+        Ok(Mailbox {
+            mbox,
+            _p: PhantomData,
+        })
+    }
+    /// Sends the specified message
+    pub fn send(&self, msg: Message, atomic: bool) -> Result<()> {
+        // SAFETY: Calling the c function, `mbox` is a valid pointer
+        to_result(unsafe { bindings::apple_mbox_send(self.mbox, msg, atomic) })
+    }
+}
+
+impl<T: MailCallback> Drop for Mailbox<T> {
+    fn drop(&mut self) {
+        // SAFETY: mbox is a valid pointer
+        unsafe { bindings::apple_mbox_stop(self.mbox) };
+        // SAFETY: `cookie` came from `into_foreign`
+        unsafe { T::Data::from_foreign((*self.mbox).cookie) };
+    }
+}
+
+unsafe impl<T> Sync for Mailbox<T>
+where
+    T: MailCallback,
+    T::Data: Sync,
+{
+}
+
+unsafe impl<T> Send for Mailbox<T>
+where
+    T: MailCallback,
+    T::Data: Send,
+{
+}
diff --git a/rust/kernel/soc/apple/mod.rs b/rust/kernel/soc/apple/mod.rs
new file mode 100644
index 00000000000000..51149360872094
--- /dev/null
+++ b/rust/kernel/soc/apple/mod.rs
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+//! Apple SoC drivers
+
+#[cfg(CONFIG_APPLE_RTKIT = "y")]
+pub mod rtkit;
+
+#[cfg(any(CONFIG_APPLE_AOP = "y", CONFIG_APPLE_AOP = "m"))]
+pub mod aop;
+
+#[cfg(CONFIG_APPLE_MAILBOX = "y")]
+pub mod mailbox;
diff --git a/rust/kernel/soc/apple/rtkit.rs b/rust/kernel/soc/apple/rtkit.rs
new file mode 100644
index 00000000000000..c536c1fbd76756
--- /dev/null
+++ b/rust/kernel/soc/apple/rtkit.rs
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+//! Support for Apple RTKit coprocessors.
+//!
+//! C header: [`include/linux/soc/apple/rtkit.h`](../../../../include/linux/gpio/driver.h)
+
+use crate::{
+    alloc::flags::*,
+    bindings, device,
+    error::{code::*, from_err_ptr, from_result, to_result, Result},
+    prelude::KBox,
+    str::CStr,
+    types::{ForeignOwnable, ScopeGuard},
+};
+
+use core::marker::PhantomData;
+use core::ptr;
+use macros::vtable;
+
+/// Trait to represent allocatable buffers for the RTKit core.
+///
+/// Users must implement this trait for their own representation of those allocations.
+pub trait Buffer {
+    /// Returns the IOVA (virtual address) of the buffer from RTKit's point of view, or an error if
+    /// unavailable.
+    fn iova(&self) -> Result<usize>;
+
+    /// Returns a mutable byte slice of the buffer contents, or an
+    /// error if unavailable.
+    fn buf(&mut self) -> Result<&mut [u8]>;
+}
+
+/// Callback operations for an RTKit client.
+#[vtable]
+pub trait Operations {
+    /// Arbitrary user context type.
+    type Data: ForeignOwnable + Send + Sync;
+
+    /// Type representing an allocated buffer for RTKit.
+    type Buffer: Buffer;
+
+    /// Called when RTKit crashes.
+    fn crashed(_data: <Self::Data as ForeignOwnable>::Borrowed<'_>, _crashlog: Option<&[u8]>) {}
+
+    /// Called when a message was received on a non-system endpoint. Called in non-IRQ context.
+    fn recv_message(
+        _data: <Self::Data as ForeignOwnable>::Borrowed<'_>,
+        _endpoint: u8,
+        _message: u64,
+    ) {
+    }
+
+    /// Called in IRQ context when a message was received on a non-system endpoint.
+    ///
+    /// Must return `true` if the message is handled, or `false` to process it in
+    /// the handling thread.
+    fn recv_message_early(
+        _data: <Self::Data as ForeignOwnable>::Borrowed<'_>,
+        _endpoint: u8,
+        _message: u64,
+    ) -> bool {
+        false
+    }
+
+    /// Allocate a buffer for use by RTKit.
+    fn shmem_alloc(
+        _data: <Self::Data as ForeignOwnable>::Borrowed<'_>,
+        _size: usize,
+    ) -> Result<Self::Buffer> {
+        Err(EINVAL)
+    }
+
+    /// Map an existing buffer used by RTKit at a device-specified virtual address.
+    fn shmem_map(
+        _data: <Self::Data as ForeignOwnable>::Borrowed<'_>,
+        _iova: usize,
+        _size: usize,
+    ) -> Result<Self::Buffer> {
+        Err(EINVAL)
+    }
+}
+
+/// Represents `struct apple_rtkit *`.
+///
+/// # Invariants
+///
+/// The rtk pointer is valid.
+/// The data pointer is a valid pointer from T::Data::into_foreign().
+pub struct RtKit<T: Operations> {
+    rtk: *mut bindings::apple_rtkit,
+    data: *mut core::ffi::c_void,
+    _p: PhantomData<T>,
+}
+
+unsafe extern "C" fn crashed_callback<T: Operations>(
+    cookie: *mut core::ffi::c_void,
+    crashlog: *const core::ffi::c_void,
+    crashlog_size: usize,
+) {
+    let crashlog = if !crashlog.is_null() && crashlog_size > 0 {
+        // SAFETY: The crashlog is either missing or a byte buffer of the specified size
+        Some(unsafe { core::slice::from_raw_parts(crashlog as *const u8, crashlog_size) })
+    } else {
+        None
+    };
+    // SAFETY: cookie is always a T::Data in this API
+    T::crashed(unsafe { T::Data::borrow(cookie) }, crashlog);
+}
+
+unsafe extern "C" fn recv_message_callback<T: Operations>(
+    cookie: *mut core::ffi::c_void,
+    endpoint: u8,
+    message: u64,
+) {
+    // SAFETY: cookie is always a T::Data in this API
+    T::recv_message(unsafe { T::Data::borrow(cookie) }, endpoint, message);
+}
+
+unsafe extern "C" fn recv_message_early_callback<T: Operations>(
+    cookie: *mut core::ffi::c_void,
+    endpoint: u8,
+    message: u64,
+) -> bool {
+    // SAFETY: cookie is always a T::Data in this API
+    T::recv_message_early(unsafe { T::Data::borrow(cookie) }, endpoint, message)
+}
+
+unsafe extern "C" fn shmem_setup_callback<T: Operations>(
+    cookie: *mut core::ffi::c_void,
+    bfr: *mut bindings::apple_rtkit_shmem,
+) -> core::ffi::c_int {
+    // SAFETY: `bfr` is a valid buffer
+    let bfr_mut = unsafe { &mut *bfr };
+
+    from_result(|| {
+        let mut buf = if bfr_mut.iova != 0 {
+            bfr_mut.is_mapped = true;
+            T::shmem_map(
+                // SAFETY: `cookie` came from a previous call to `into_foreign`.
+                unsafe { T::Data::borrow(cookie) },
+                bfr_mut.iova as usize,
+                bfr_mut.size,
+            )?
+        } else {
+            bfr_mut.is_mapped = false;
+            // SAFETY: `cookie` came from a previous call to `into_foreign`.
+            T::shmem_alloc(unsafe { T::Data::borrow(cookie) }, bfr_mut.size)?
+        };
+
+        let iova = buf.iova()?;
+        let slice = buf.buf()?;
+
+        if slice.len() < bfr_mut.size {
+            return Err(ENOMEM);
+        }
+
+        bfr_mut.iova = iova as u64;
+        bfr_mut.buffer = slice.as_mut_ptr() as *mut _;
+
+        // Now box the returned buffer type and stash it in the private pointer of the
+        // `apple_rtkit_shmem` struct for safekeeping.
+        let boxed = KBox::new(buf, GFP_KERNEL)?;
+        bfr_mut.private = KBox::into_raw(boxed) as *mut _;
+        Ok(0)
+    })
+}
+
+unsafe extern "C" fn shmem_destroy_callback<T: Operations>(
+    _cookie: *mut core::ffi::c_void,
+    bfr: *mut bindings::apple_rtkit_shmem,
+) {
+    // SAFETY: `bfr` is a valid buffer
+    let bfr_mut = unsafe { &mut *bfr };
+    if !bfr_mut.private.is_null() {
+        // SAFETY: Per shmem_setup_callback, this has to be a pointer to a Buffer if it is set.
+        unsafe {
+            core::mem::drop(KBox::from_raw(bfr_mut.private as *mut T::Buffer));
+        }
+        bfr_mut.private = core::ptr::null_mut();
+    }
+}
+
+impl<T: Operations> RtKit<T> {
+    const VTABLE: bindings::apple_rtkit_ops = bindings::apple_rtkit_ops {
+        crashed: Some(crashed_callback::<T>),
+        recv_message: Some(recv_message_callback::<T>),
+        recv_message_early: Some(recv_message_early_callback::<T>),
+        shmem_setup: if T::HAS_SHMEM_ALLOC || T::HAS_SHMEM_MAP {
+            Some(shmem_setup_callback::<T>)
+        } else {
+            None
+        },
+        shmem_destroy: if T::HAS_SHMEM_ALLOC || T::HAS_SHMEM_MAP {
+            Some(shmem_destroy_callback::<T>)
+        } else {
+            None
+        },
+    };
+
+    /// Creates a new RTKit client for a given device and optional mailbox name or index.
+    pub fn new(
+        dev: &device::Device,
+        mbox_name: Option<&'static CStr>,
+        mbox_idx: usize,
+        data: T::Data,
+    ) -> Result<Self> {
+        let ptr = data.into_foreign() as *mut _;
+        let guard = ScopeGuard::new(|| {
+            // SAFETY: `ptr` came from a previous call to `into_foreign`.
+            unsafe { T::Data::from_foreign(ptr) };
+        });
+        // SAFETY: `dev` is valid by its type invarants and otherwise his just
+        //          calls the C init function.
+        let rtk = unsafe {
+            from_err_ptr(bindings::apple_rtkit_init(
+                dev.as_raw(),
+                ptr,
+                match mbox_name {
+                    Some(s) => s.as_char_ptr(),
+                    None => ptr::null(),
+                },
+                mbox_idx.try_into()?,
+                &Self::VTABLE,
+            ))
+        }?;
+
+        guard.dismiss();
+        // INVARIANT: `rtk` and `data` are valid here.
+        Ok(Self {
+            rtk,
+            data: ptr,
+            _p: PhantomData,
+        })
+    }
+
+    /// Boots (wakes up) the RTKit coprocessor.
+    pub fn wake(&mut self) -> Result {
+        // SAFETY: `rtk` is valid per the type invariant.
+        to_result(unsafe { bindings::apple_rtkit_wake(self.rtk) })
+    }
+
+    /// Waits for the RTKit coprocessor to finish booting.
+    pub fn boot(&mut self) -> Result {
+        // SAFETY: `rtk` is valid per the type invariant.
+        to_result(unsafe { bindings::apple_rtkit_boot(self.rtk) })
+    }
+
+    /// Starts a non-system endpoint.
+    pub fn start_endpoint(&mut self, endpoint: u8) -> Result {
+        // SAFETY: `rtk` is valid per the type invariant.
+        to_result(unsafe { bindings::apple_rtkit_start_ep(self.rtk, endpoint) })
+    }
+
+    /// Sends a message to a given endpoint.
+    pub fn send_message(&mut self, endpoint: u8, message: u64) -> Result {
+        // SAFETY: `rtk` is valid per the type invariant.
+        to_result(unsafe {
+            bindings::apple_rtkit_send_message(self.rtk, endpoint, message, ptr::null_mut(), false)
+        })
+    }
+
+    /// Checks if an endpoint is present
+    pub fn has_endpoint(&self, endpoint: u8) -> bool {
+        unsafe { bindings::apple_rtkit_has_endpoint(self.rtk, endpoint) }
+    }
+}
+
+// SAFETY: `RtKit` operations require a mutable reference
+unsafe impl<T: Operations> Sync for RtKit<T> {}
+
+// SAFETY: `RtKit` operations require a mutable reference
+unsafe impl<T: Operations> Send for RtKit<T> {}
+
+impl<T: Operations> Drop for RtKit<T> {
+    fn drop(&mut self) {
+        // SAFETY: The pointer is valid by the type invariant.
+        unsafe { bindings::apple_rtkit_free(self.rtk) };
+
+        // Free context data.
+        //
+        // SAFETY: This matches the call to `into_foreign` from `new` in the success case.
+        unsafe { T::Data::from_foreign(self.data) };
+    }
+}
diff --git a/rust/kernel/soc/mod.rs b/rust/kernel/soc/mod.rs
new file mode 100644
index 00000000000000..e3024042e74f0d
--- /dev/null
+++ b/rust/kernel/soc/mod.rs
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! SoC drivers
+
+pub mod apple;
diff --git a/rust/kernel/str.rs b/rust/kernel/str.rs
index 474ddddd43e4d5..c49a95153b2084 100644
--- a/rust/kernel/str.rs
+++ b/rust/kernel/str.rs
@@ -31,6 +31,29 @@ impl BStr {
         // SAFETY: `BStr` is transparent to `[u8]`.
         unsafe { &*(bytes as *const [u8] as *const BStr) }
     }
+
+    /// Returns a reference to the inner [u8].
+    #[inline]
+    pub const fn deref_const(&self) -> &[u8] {
+        &self.0
+    }
+
+    /// Strip a prefix from `self`. Delegates to [`slice::strip_prefix`].
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # use kernel::b_str;
+    /// assert_eq!(Some(b_str!("bar")), b_str!("foobar").strip_prefix(b_str!("foo")));
+    /// assert_eq!(None, b_str!("foobar").strip_prefix(b_str!("bar")));
+    /// assert_eq!(Some(b_str!("foobar")), b_str!("foobar").strip_prefix(b_str!("")));
+    /// assert_eq!(Some(b_str!("")), b_str!("foobar").strip_prefix(b_str!("foobar")));
+    /// ```
+    pub fn strip_prefix(&self, pattern: impl AsRef<Self>) -> Option<&BStr> {
+        self.deref()
+            .strip_prefix(pattern.as_ref().deref())
+            .map(Self::from_bytes)
+    }
 }
 
 impl fmt::Display for BStr {
@@ -104,7 +127,36 @@ impl Deref for BStr {
 
     #[inline]
     fn deref(&self) -> &Self::Target {
-        &self.0
+        self.deref_const()
+    }
+}
+
+impl PartialEq for BStr {
+    fn eq(&self, other: &Self) -> bool {
+        self.deref().eq(other.deref())
+    }
+}
+
+impl<Idx> Index<Idx> for BStr
+where
+    [u8]: Index<Idx, Output = [u8]>,
+{
+    type Output = Self;
+
+    fn index(&self, index: Idx) -> &Self::Output {
+        BStr::from_bytes(&self.0[index])
+    }
+}
+
+impl AsRef<BStr> for [u8] {
+    fn as_ref(&self) -> &BStr {
+        BStr::from_bytes(self)
+    }
+}
+
+impl AsRef<BStr> for BStr {
+    fn as_ref(&self) -> &BStr {
+        self
     }
 }
 
@@ -900,3 +952,173 @@ impl fmt::Debug for CString {
 macro_rules! fmt {
     ($($f:tt)*) => ( core::format_args!($($f)*) )
 }
+
+pub mod parse_int {
+    //! Integer parsing functions for parsing signed and unsigned integers
+    //! potentially prefixed with `0x`, `0o`, or `0b`.
+
+    use crate::prelude::*;
+    use crate::str::BStr;
+    use core::ops::Deref;
+
+    // Make `FromStrRadix` a public type with a private name. This seals
+    // `ParseInt`, that is, prevents downstream users from implementing the
+    // trait.
+    mod private {
+        use crate::str::BStr;
+
+        /// Trait that allows parsing a [`&BStr`] to an integer with a radix.
+        ///
+        /// # Safety
+        ///
+        /// The member functions of this trait must be implemented according to
+        /// their documentation.
+        ///
+        /// [`&BStr`]: kernel::str::BStr
+        // This is required because the `from_str_radix` function on the primitive
+        // integer types is not part of any trait.
+        pub unsafe trait FromStrRadix: Sized {
+            /// The minimum value this integer type can assume.
+            const MIN: Self;
+
+            /// Parse `src` to `Self` using radix `radix`.
+            fn from_str_radix(src: &BStr, radix: u32) -> Result<Self, crate::error::Error>;
+
+            /// Return the absolute value of Self::MIN.
+            fn abs_min() -> u64;
+
+            /// Perform bitwise 2's complement on `self`.
+            ///
+            /// Note: This function does not make sense for unsigned integers.
+            fn complement(self) -> Self;
+        }
+    }
+
+    /// Extract the radix from an integer literal optionally prefixed with
+    /// one of `0x`, `0X`, `0o`, `0O`, `0b`, `0B`, `0`.
+    fn strip_radix(src: &BStr) -> (u32, &BStr) {
+        match src.deref() {
+            [b'0', b'x' | b'X', rest @ ..] => (16, rest.as_ref()),
+            [b'0', b'o' | b'O', rest @ ..] => (8, rest.as_ref()),
+            [b'0', b'b' | b'B', rest @ ..] => (2, rest.as_ref()),
+            // NOTE: We are including the leading zero to be able to parse
+            // literal 0 here. If we removed it as a radix prefix, we would not
+            // be able to parse `0`.
+            [b'0', ..] => (8, src),
+            _ => (10, src),
+        }
+    }
+
+    /// Trait for parsing string representations of integers.
+    ///
+    /// Strings beginning with `0x`, `0o`, or `0b` are parsed as hex, octal, or
+    /// binary respectively. Strings beginning with `0` otherwise are parsed as
+    /// octal. Anything else is parsed as decimal. A leading `+` or `-` is also
+    /// permitted. Any string parsed by [`kstrtol()`] or [`kstrtoul()`] will be
+    /// successfully parsed.
+    ///
+    /// [`kstrtol()`]: https://www.kernel.org/doc/html/latest/core-api/kernel-api.html#c.kstrtol
+    /// [`kstrtoul()`]: https://www.kernel.org/doc/html/latest/core-api/kernel-api.html#c.kstrtoul
+    ///
+    /// # Example
+    /// ```
+    /// use kernel::str::parse_int::ParseInt;
+    /// use kernel::b_str;
+    ///
+    /// assert_eq!(Ok(0), u8::from_str(b_str!("0")));
+    ///
+    /// assert_eq!(Ok(0xa2u8), u8::from_str(b_str!("0xa2")));
+    /// assert_eq!(Ok(-0xa2i32), i32::from_str(b_str!("-0xa2")));
+    ///
+    /// assert_eq!(Ok(-0o57i8), i8::from_str(b_str!("-0o57")));
+    /// assert_eq!(Ok(0o57i8), i8::from_str(b_str!("057")));
+    ///
+    /// assert_eq!(Ok(0b1001i16), i16::from_str(b_str!("0b1001")));
+    /// assert_eq!(Ok(-0b1001i16), i16::from_str(b_str!("-0b1001")));
+    ///
+    /// assert_eq!(Ok(127), i8::from_str(b_str!("127")));
+    /// assert!(i8::from_str(b_str!("128")).is_err());
+    /// assert_eq!(Ok(-128), i8::from_str(b_str!("-128")));
+    /// assert!(i8::from_str(b_str!("-129")).is_err());
+    /// assert_eq!(Ok(255), u8::from_str(b_str!("255")));
+    /// assert!(u8::from_str(b_str!("256")).is_err());
+    /// ```
+    pub trait ParseInt: private::FromStrRadix + TryFrom<u64> {
+        /// Parse a string according to the description in [`Self`].
+        fn from_str(src: &BStr) -> Result<Self> {
+            match src.deref() {
+                [b'-', rest @ ..] => {
+                    let (radix, digits) = strip_radix(rest.as_ref());
+                    // 2's complement values range from -2^(b-1) to 2^(b-1)-1.
+                    // So if we want to parse negative numbers as positive and
+                    // later multiply by -1, we have to parse into a larger
+                    // integer. We choose u64 as sufficiently large. NOTE: 128
+                    // bit integers are not available on all platforms, hence
+                    // the choice of 64 bit.
+                    let val = u64::from_str_radix(
+                        core::str::from_utf8(digits).map_err(|_| EINVAL)?,
+                        radix,
+                    )
+                    .map_err(|_| EINVAL)?;
+
+                    if val > Self::abs_min() {
+                        return Err(EINVAL);
+                    }
+
+                    if val == Self::abs_min() {
+                        return Ok(Self::MIN);
+                    }
+
+                    // SAFETY: We checked that `val` will fit in `Self` above.
+                    let val: Self = unsafe { val.try_into().unwrap_unchecked() };
+
+                    Ok(val.complement())
+                }
+                _ => {
+                    let (radix, digits) = strip_radix(src);
+                    Self::from_str_radix(digits, radix).map_err(|_| EINVAL)
+                }
+            }
+        }
+    }
+
+    macro_rules! impl_parse_int {
+        ($ty:ty) => {
+            // SAFETY: We implement the trait according to the documentation.
+            unsafe impl private::FromStrRadix for $ty {
+                const MIN: Self = <$ty>::MIN;
+
+                fn from_str_radix(src: &BStr, radix: u32) -> Result<Self, crate::error::Error> {
+                    <$ty>::from_str_radix(core::str::from_utf8(src).map_err(|_| EINVAL)?, radix)
+                        .map_err(|_| EINVAL)
+                }
+
+                fn abs_min() -> u64 {
+                    #[allow(unused_comparisons)]
+                    if Self::MIN < 0 {
+                        1u64 << (Self::BITS - 1)
+                    } else {
+                        0
+                    }
+                }
+
+                fn complement(self) -> Self {
+                    (!self).wrapping_add((1 as $ty))
+                }
+            }
+
+            impl ParseInt for $ty {}
+        };
+    }
+
+    impl_parse_int!(i8);
+    impl_parse_int!(u8);
+    impl_parse_int!(i16);
+    impl_parse_int!(u16);
+    impl_parse_int!(i32);
+    impl_parse_int!(u32);
+    impl_parse_int!(i64);
+    impl_parse_int!(u64);
+    impl_parse_int!(isize);
+    impl_parse_int!(usize);
+}
diff --git a/rust/kernel/sync.rs b/rust/kernel/sync.rs
index 16eab9138b2baa..28e05699f74207 100644
--- a/rust/kernel/sync.rs
+++ b/rust/kernel/sync.rs
@@ -5,8 +5,6 @@
 //! This module contains the kernel APIs related to synchronisation that have been ported or
 //! wrapped for usage by Rust code in the kernel.
 
-use crate::types::Opaque;
-
 mod arc;
 mod condvar;
 pub mod lock;
@@ -14,24 +12,24 @@ mod locked_by;
 pub mod poll;
 pub mod rcu;
 
+#[cfg(CONFIG_LOCKDEP)]
+mod lockdep;
+#[cfg(not(CONFIG_LOCKDEP))]
+mod no_lockdep;
+#[cfg(not(CONFIG_LOCKDEP))]
+use no_lockdep as lockdep;
+
 pub use arc::{Arc, ArcBorrow, UniqueArc};
 pub use condvar::{new_condvar, CondVar, CondVarTimeoutResult};
 pub use lock::global::{global_lock, GlobalGuard, GlobalLock, GlobalLockBackend, GlobalLockedBy};
-pub use lock::mutex::{new_mutex, Mutex, MutexGuard};
-pub use lock::spinlock::{new_spinlock, SpinLock, SpinLockGuard};
+pub use lock::mutex::{new_mutex, Mutex};
+pub use lock::spinlock::{new_spinlock, SpinLock};
+pub use lockdep::{LockClassKey, StaticLockClassKey};
 pub use locked_by::LockedBy;
 
-/// Represents a lockdep class. It's a wrapper around C's `lock_class_key`.
-#[repr(transparent)]
-pub struct LockClassKey(Opaque<bindings::lock_class_key>);
-
-// SAFETY: `bindings::lock_class_key` is designed to be used concurrently from multiple threads and
-// provides its own synchronization.
-unsafe impl Sync for LockClassKey {}
-
-impl LockClassKey {
-    pub(crate) fn as_ptr(&self) -> *mut bindings::lock_class_key {
-        self.0.get()
+impl Default for StaticLockClassKey {
+    fn default() -> Self {
+        Self::new()
     }
 }
 
@@ -40,11 +38,8 @@ impl LockClassKey {
 #[macro_export]
 macro_rules! static_lock_class {
     () => {{
-        static CLASS: $crate::sync::LockClassKey =
-            // SAFETY: lockdep expects uninitialized memory when it's handed a statically allocated
-            // lock_class_key
-            unsafe { ::core::mem::MaybeUninit::uninit().assume_init() };
-        &CLASS
+        static CLASS: $crate::sync::StaticLockClassKey = $crate::sync::StaticLockClassKey::new();
+        CLASS.key()
     }};
 }
 
diff --git a/rust/kernel/sync/arc.rs b/rust/kernel/sync/arc.rs
index 3cefda7a437255..68cebc7182a42f 100644
--- a/rust/kernel/sync/arc.rs
+++ b/rust/kernel/sync/arc.rs
@@ -34,6 +34,9 @@ use core::{
 };
 use macros::pin_data;
 
+#[cfg(CONFIG_RUST_EXTRA_LOCKDEP)]
+use crate::sync::lockdep::LockdepMap;
+
 mod std_vendor;
 
 /// A reference-counted pointer to an instance of `T`.
@@ -140,6 +143,17 @@ pub struct Arc<T: ?Sized> {
     _p: PhantomData<ArcInner<T>>,
 }
 
+#[cfg(CONFIG_RUST_EXTRA_LOCKDEP)]
+#[pin_data]
+#[repr(C)]
+struct ArcInner<T: ?Sized> {
+    refcount: Opaque<bindings::refcount_t>,
+    lockdep_map: LockdepMap,
+    data: T,
+}
+
+// FIXME: pin_data does not work well with cfg attributes within the struct definition.
+#[cfg(not(CONFIG_RUST_EXTRA_LOCKDEP))]
 #[pin_data]
 #[repr(C)]
 struct ArcInner<T: ?Sized> {
@@ -185,6 +199,11 @@ impl<T: ?Sized> ArcInner<T> {
 #[cfg(not(CONFIG_RUSTC_HAS_COERCE_POINTEE))]
 impl<T: ?Sized + core::marker::Unsize<U>, U: ?Sized> core::ops::CoerceUnsized<Arc<U>> for Arc<T> {}
 
+// This is to allow coercion from `Arc<T>` to `Arc<U>` if `T` can be converted to the
+// dynamically-sized type (DST) `U`.
+#[cfg(CONFIG_RUSTC_HAS_COERCE_POINTEE)]
+unsafe impl<T: ?Sized> core::pin::PinCoerceUnsized for Arc<T> {}
+
 // This is to allow `Arc<U>` to be dispatched on when `Arc<T>` can be coerced into `Arc<U>`.
 #[cfg(not(CONFIG_RUSTC_HAS_COERCE_POINTEE))]
 impl<T: ?Sized + core::marker::Unsize<U>, U: ?Sized> core::ops::DispatchFromDyn<Arc<U>> for Arc<T> {}
@@ -204,11 +223,14 @@ unsafe impl<T: ?Sized + Sync + Send> Sync for Arc<T> {}
 
 impl<T> Arc<T> {
     /// Constructs a new reference counted instance of `T`.
+    #[track_caller]
     pub fn new(contents: T, flags: Flags) -> Result<Self, AllocError> {
         // INVARIANT: The refcount is initialised to a non-zero value.
         let value = ArcInner {
             // SAFETY: There are no safety requirements for this FFI call.
             refcount: Opaque::new(unsafe { bindings::REFCOUNT_INIT(1) }),
+            #[cfg(CONFIG_RUST_EXTRA_LOCKDEP)]
+            lockdep_map: LockdepMap::new(),
             data: contents,
         };
 
@@ -418,15 +440,50 @@ impl<T: ?Sized> Drop for Arc<T> {
         // freed/invalid memory as long as it is never dereferenced.
         let refcount = unsafe { self.ptr.as_ref() }.refcount.get();
 
+        #[cfg(CONFIG_RUST_EXTRA_LOCKDEP)]
+        // SAFETY: By the type invariant, there is necessarily a reference to the object.
+        // We cannot hold the map lock across the reference decrement, as we might race
+        // another thread. Therefore, we lock and immediately drop the guard here. This
+        // only serves to inform lockdep of the dependency up the call stack.
+        unsafe { self.ptr.as_ref() }.lockdep_map.lock();
+
         // INVARIANT: If the refcount reaches zero, there are no other instances of `Arc`, and
         // this instance is being dropped, so the broken invariant is not observable.
         // SAFETY: Also by the type invariant, we are allowed to decrement the refcount.
         let is_zero = unsafe { bindings::refcount_dec_and_test(refcount) };
+
         if is_zero {
             // The count reached zero, we must free the memory.
-            //
-            // SAFETY: The pointer was initialised from the result of `KBox::leak`.
-            unsafe { drop(KBox::from_raw(self.ptr.as_ptr())) };
+
+            #[cfg(CONFIG_RUST_EXTRA_LOCKDEP)]
+            // SAFETY: If we get this far, we had the last reference to the object.
+            // That means we are responsible for freeing it, so we can safely lock
+            // the fake lock again. This wraps dropping the inner object, which
+            // informs lockdep of the dependencies down the call stack.
+            let guard = unsafe { self.ptr.as_ref() }.lockdep_map.lock();
+
+            // SAFETY: The pointer was initialised from the result of `Box::leak`,
+            // and the value is valid.
+            unsafe { core::ptr::drop_in_place(&mut self.ptr.as_mut().data) };
+
+            // We need to drop the lock guard before freeing the LockdepMap itself
+            #[cfg(CONFIG_RUST_EXTRA_LOCKDEP)]
+            core::mem::drop(guard);
+
+            #[cfg(CONFIG_RUST_EXTRA_LOCKDEP)]
+            // SAFETY: The pointer was initialised from the result of `Box::leak`,
+            // and the lockdep map is valid.
+            unsafe {
+                core::ptr::drop_in_place(&mut self.ptr.as_mut().lockdep_map)
+            };
+
+            // SAFETY: The pointer was initialised from the result of `Box::leak`, and
+            // a ManuallyDrop<T> is compatible. We already dropped the contents above.
+            unsafe {
+                drop(KBox::from_raw(
+                    self.ptr.as_ptr() as *mut ManuallyDrop<ArcInner<T>>
+                ))
+            };
         }
     }
 }
@@ -661,6 +718,7 @@ pub struct UniqueArc<T: ?Sized> {
 
 impl<T> UniqueArc<T> {
     /// Tries to allocate a new [`UniqueArc`] instance.
+    #[track_caller]
     pub fn new(value: T, flags: Flags) -> Result<Self, AllocError> {
         Ok(Self {
             // INVARIANT: The newly-created object has a refcount of 1.
@@ -669,8 +727,24 @@ impl<T> UniqueArc<T> {
     }
 
     /// Tries to allocate a new [`UniqueArc`] instance whose contents are not initialised yet.
+    #[track_caller]
     pub fn new_uninit(flags: Flags) -> Result<UniqueArc<MaybeUninit<T>>, AllocError> {
         // INVARIANT: The refcount is initialised to a non-zero value.
+        #[cfg(CONFIG_RUST_EXTRA_LOCKDEP)]
+        let inner = {
+            let map = LockdepMap::new();
+            KBox::try_init::<AllocError>(
+                try_init!(ArcInner {
+                // SAFETY: There are no safety requirements for this FFI call.
+                refcount: Opaque::new(unsafe { bindings::REFCOUNT_INIT(1) }),
+                lockdep_map: map,
+                data <- init::uninit::<T, AllocError>(),
+            }? AllocError),
+                flags,
+            )?
+        };
+        // FIXME: try_init!() does not work with cfg attributes.
+        #[cfg(not(CONFIG_RUST_EXTRA_LOCKDEP))]
         let inner = KBox::try_init::<AllocError>(
             try_init!(ArcInner {
                 // SAFETY: There are no safety requirements for this FFI call.
diff --git a/rust/kernel/sync/condvar.rs b/rust/kernel/sync/condvar.rs
index 7df565038d7d0d..b368de9930b4d1 100644
--- a/rust/kernel/sync/condvar.rs
+++ b/rust/kernel/sync/condvar.rs
@@ -101,7 +101,7 @@ unsafe impl Sync for CondVar {}
 
 impl CondVar {
     /// Constructs a new condvar initialiser.
-    pub fn new(name: &'static CStr, key: &'static LockClassKey) -> impl PinInit<Self> {
+    pub fn new(name: &'static CStr, key: LockClassKey) -> impl PinInit<Self> {
         pin_init!(Self {
             _pin: PhantomPinned,
             // SAFETY: `slot` is valid while the closure is called and both `name` and `key` have
diff --git a/rust/kernel/sync/lock.rs b/rust/kernel/sync/lock.rs
index eb80048e0110d2..1a73cdc71335f0 100644
--- a/rust/kernel/sync/lock.rs
+++ b/rust/kernel/sync/lock.rs
@@ -5,11 +5,12 @@
 //! It contains a generic Rust lock and guard that allow for different backends (e.g., mutexes,
 //! spinlocks, raw spinlocks) to be provided with minimal effort.
 
-use super::LockClassKey;
+use super::{lockdep::caller_lock_class, LockClassKey};
 use crate::{
     init::PinInit,
     pin_init,
     str::CStr,
+    try_pin_init,
     types::{NotThreadSafe, Opaque, ScopeGuard},
 };
 use core::{cell::UnsafeCell, marker::PhantomPinned};
@@ -117,6 +118,7 @@ pub struct Lock<T: ?Sized, B: Backend> {
     _pin: PhantomPinned,
 
     /// The data protected by the lock.
+    #[pin]
     pub(crate) data: UnsafeCell<T>,
 }
 
@@ -129,7 +131,41 @@ unsafe impl<T: ?Sized + Send, B: Backend> Sync for Lock<T, B> {}
 
 impl<T, B: Backend> Lock<T, B> {
     /// Constructs a new lock initialiser.
-    pub fn new(t: T, name: &'static CStr, key: &'static LockClassKey) -> impl PinInit<Self> {
+    #[track_caller]
+    pub fn new(t: T) -> impl PinInit<Self> {
+        let (key, name) = caller_lock_class();
+        Self::new_with_key(t, name, key)
+    }
+
+    /// Constructs a new lock initialiser taking an initialiser/
+    #[track_caller]
+    pub fn pin_init<E>(t: impl PinInit<T, E>) -> impl PinInit<Self, E>
+    where
+        E: core::convert::From<core::convert::Infallible>,
+    {
+        let (key, name) = caller_lock_class();
+        Self::pin_init_with_key(t, name, key)
+    }
+
+    /// Constructs a new lock initialiser.
+    #[track_caller]
+    pub fn new_named(t: T, name: &'static CStr) -> impl PinInit<Self> {
+        let (key, _) = caller_lock_class();
+        Self::new_with_key(t, name, key)
+    }
+
+    /// Constructs a new lock initialiser taking an initialiser/
+    #[track_caller]
+    pub fn pin_init_named<E>(t: impl PinInit<T, E>, name: &'static CStr) -> impl PinInit<Self, E>
+    where
+        E: core::convert::From<core::convert::Infallible>,
+    {
+        let (key, _) = caller_lock_class();
+        Self::pin_init_with_key(t, name, key)
+    }
+
+    /// Constructs a new lock initialiser given a particular name and lock class key.
+    pub fn new_with_key(t: T, name: &'static CStr, key: LockClassKey) -> impl PinInit<Self> {
         pin_init!(Self {
             data: UnsafeCell::new(t),
             _pin: PhantomPinned,
@@ -140,6 +176,32 @@ impl<T, B: Backend> Lock<T, B> {
             }),
         })
     }
+
+    /// Constructs a new lock initialiser taking an initialiser given a particular
+    /// name and lock class key.
+    pub fn pin_init_with_key<E>(
+        t: impl PinInit<T, E>,
+        name: &'static CStr,
+        key: LockClassKey,
+    ) -> impl PinInit<Self, E>
+    where
+        E: core::convert::From<core::convert::Infallible>,
+    {
+        try_pin_init!(Self {
+            // SAFETY: We are just forwarding the initialization across a
+            // cast away from UnsafeCell, so the pin_init_from_closure and
+            // __pinned_init() requirements are in sync.
+            data <- unsafe { crate::init::pin_init_from_closure(move |slot: *mut UnsafeCell<T>| {
+                t.__pinned_init(slot as *mut T)
+            })},
+            _pin: PhantomPinned,
+            // SAFETY: `slot` is valid while the closure is called and both `name` and `key` have
+            // static lifetimes so they live indefinitely.
+            state <- Opaque::ffi_init(|slot| unsafe {
+                B::init(slot, name.as_char_ptr(), key.as_ptr())
+            }),
+        }? E)
+    }
 }
 
 impl<B: Backend> Lock<(), B> {
diff --git a/rust/kernel/sync/lock/mutex.rs b/rust/kernel/sync/lock/mutex.rs
index 70cadbc2e8e23f..7944dfc859397d 100644
--- a/rust/kernel/sync/lock/mutex.rs
+++ b/rust/kernel/sync/lock/mutex.rs
@@ -11,12 +11,25 @@
 #[macro_export]
 macro_rules! new_mutex {
     ($inner:expr $(, $name:literal)? $(,)?) => {
-        $crate::sync::Mutex::new(
+        $crate::sync::Mutex::new_with_key(
             $inner, $crate::optional_name!($($name)?), $crate::static_lock_class!())
     };
 }
 pub use new_mutex;
 
+/// Creates a [`Mutex`] initialiser with the given name and a newly-created lock class,
+/// given an initialiser for the inner type.
+///
+/// It uses the name if one is given, otherwise it generates one based on the file name and line
+/// number.
+#[macro_export]
+macro_rules! new_mutex_pinned {
+    ($inner:expr $(, $name:literal)? $(,)?) => {
+        $crate::sync::Mutex::pin_init_with_key(
+            $inner, $crate::optional_name!($($name)?), $crate::static_lock_class!())
+    };
+}
+
 /// A mutual exclusion primitive.
 ///
 /// Exposes the kernel's [`struct mutex`]. When multiple threads attempt to lock the same mutex,
diff --git a/rust/kernel/sync/lock/spinlock.rs b/rust/kernel/sync/lock/spinlock.rs
index ab2f8d07531166..87327788591f55 100644
--- a/rust/kernel/sync/lock/spinlock.rs
+++ b/rust/kernel/sync/lock/spinlock.rs
@@ -11,7 +11,7 @@
 #[macro_export]
 macro_rules! new_spinlock {
     ($inner:expr $(, $name:literal)? $(,)?) => {
-        $crate::sync::SpinLock::new(
+        $crate::sync::SpinLock::new_with_class(
             $inner, $crate::optional_name!($($name)?), $crate::static_lock_class!())
     };
 }
diff --git a/rust/kernel/sync/lockdep.rs b/rust/kernel/sync/lockdep.rs
new file mode 100644
index 00000000000000..6a95e2d8e823c4
--- /dev/null
+++ b/rust/kernel/sync/lockdep.rs
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Lockdep utilities.
+//!
+//! This module abstracts the parts of the kernel lockdep API relevant to Rust
+//! modules, including lock classes.
+
+use crate::{
+    alloc::flags::*,
+    c_str, fmt,
+    init::InPlaceInit,
+    new_mutex,
+    prelude::{KBox, KVec, Result},
+    str::{CStr, CString},
+    sync::Mutex,
+    types::Opaque,
+};
+
+use core::hash::{Hash, Hasher};
+use core::pin::Pin;
+use core::sync::atomic::{AtomicPtr, Ordering};
+
+/// Represents a lockdep class. It's a wrapper around C's `lock_class_key`.
+#[repr(transparent)]
+pub struct StaticLockClassKey(Opaque<bindings::lock_class_key>);
+
+impl StaticLockClassKey {
+    /// Creates a new lock class key.
+    pub const fn new() -> Self {
+        Self(Opaque::uninit())
+    }
+
+    /// Returns the lock class key reference for this static lock class.
+    pub const fn key(&self) -> LockClassKey {
+        LockClassKey(self.0.get())
+    }
+}
+
+// SAFETY: `bindings::lock_class_key` just represents an opaque memory location, and is never
+// actually dereferenced.
+unsafe impl Sync for StaticLockClassKey {}
+
+/// A reference to a lock class key. This is a raw pointer to a lock_class_key,
+/// which is required to have a static lifetime.
+#[derive(Copy, Clone)]
+pub struct LockClassKey(*mut bindings::lock_class_key);
+
+impl LockClassKey {
+    pub(crate) fn as_ptr(&self) -> *mut bindings::lock_class_key {
+        self.0
+    }
+}
+
+// SAFETY: `bindings::lock_class_key` just represents an opaque memory location, and is never
+// actually dereferenced.
+unsafe impl Send for LockClassKey {}
+unsafe impl Sync for LockClassKey {}
+
+// Location is 'static but not really, since module unloads will
+// invalidate existing static Locations within that module.
+// To avoid breakage, we maintain our own location struct which is
+// dynamically allocated on first reference. We store a hash of the
+// whole location (including the filename string), as well as the
+// line and column separately. The assumption is that this whole
+// struct is highly unlikely to ever collide with a reasonable
+// hash (this saves us from having to check the filename string
+// itself).
+#[derive(PartialEq, Debug)]
+struct LocationKey {
+    hash: u64,
+    line: u32,
+    column: u32,
+}
+
+struct DynLockClassKey {
+    key: Opaque<bindings::lock_class_key>,
+    loc: LocationKey,
+    name: CString,
+}
+
+impl LocationKey {
+    fn new(loc: &'static core::panic::Location<'static>) -> Self {
+        let mut hasher = crate::siphash::SipHasher::new();
+        loc.hash(&mut hasher);
+
+        LocationKey {
+            hash: hasher.finish(),
+            line: loc.line(),
+            column: loc.column(),
+        }
+    }
+}
+
+impl DynLockClassKey {
+    fn key(&'static self) -> LockClassKey {
+        LockClassKey(self.key.get())
+    }
+
+    fn name(&'static self) -> &CStr {
+        &self.name
+    }
+}
+
+const LOCK_CLASS_BUCKETS: usize = 1024;
+
+#[track_caller]
+fn caller_lock_class_inner() -> Result<&'static DynLockClassKey> {
+    // This is just a hack to make the below static array initialization work.
+    #[allow(clippy::declare_interior_mutable_const)]
+    const ATOMIC_PTR: AtomicPtr<Mutex<KVec<&'static DynLockClassKey>>> =
+        AtomicPtr::new(core::ptr::null_mut());
+
+    #[allow(clippy::complexity)]
+    static LOCK_CLASSES: [AtomicPtr<Mutex<KVec<&'static DynLockClassKey>>>; LOCK_CLASS_BUCKETS] =
+        [ATOMIC_PTR; LOCK_CLASS_BUCKETS];
+
+    let loc = core::panic::Location::caller();
+    let loc_key = LocationKey::new(loc);
+
+    let index = (loc_key.hash % (LOCK_CLASS_BUCKETS as u64)) as usize;
+    let slot = &LOCK_CLASSES[index];
+
+    let mut ptr = slot.load(Ordering::Relaxed);
+    if ptr.is_null() {
+        let new_element = KBox::pin_init(new_mutex!(KVec::new()), GFP_KERNEL)?;
+
+        // SAFETY: We never move out of this Box
+        let raw = KBox::into_raw(unsafe { Pin::into_inner_unchecked(new_element) });
+
+        if slot
+            .compare_exchange(
+                core::ptr::null_mut(),
+                raw,
+                Ordering::Relaxed,
+                Ordering::Relaxed,
+            )
+            .is_err()
+        {
+            // SAFETY: We just got this pointer from `into_raw()`
+            unsafe { drop(KBox::from_raw(raw)) };
+        }
+
+        ptr = slot.load(Ordering::Relaxed);
+        assert!(!ptr.is_null());
+    }
+
+    // SAFETY: This mutex was either just created above or previously allocated,
+    // and we never free these objects so the pointer is guaranteed to be valid.
+    let mut guard = unsafe { (*ptr).lock() };
+
+    for i in guard.iter() {
+        if i.loc == loc_key {
+            return Ok(i);
+        }
+    }
+
+    // We immediately leak the class, so it becomes 'static
+    let new_class = KBox::leak(KBox::new(
+        DynLockClassKey {
+            key: Opaque::zeroed(),
+            loc: loc_key,
+            name: CString::try_from_fmt(fmt!("{}:{}:{}", loc.file(), loc.line(), loc.column()))?,
+        },
+        GFP_KERNEL,
+    )?);
+
+    // SAFETY: This is safe to call with a pointer to a dynamically allocated lockdep key,
+    // and we never free the objects so it is safe to never unregister the key.
+    unsafe { bindings::lockdep_register_key(new_class.key.get()) };
+
+    guard.push(new_class, GFP_KERNEL)?;
+
+    Ok(new_class)
+}
+
+#[track_caller]
+pub(crate) fn caller_lock_class() -> (LockClassKey, &'static CStr) {
+    match caller_lock_class_inner() {
+        Ok(a) => (a.key(), a.name()),
+        Err(_) => {
+            crate::pr_err!(
+                "Failed to dynamically allocate lock class, lockdep may be unreliable.\n"
+            );
+
+            let loc = core::panic::Location::caller();
+            // SAFETY: LockClassKey is opaque and the lockdep implementation only needs
+            // unique addresses for statically allocated keys, so it is safe to just cast
+            // the Location reference directly into a LockClassKey. However, this will
+            // result in multiple keys for the same callsite due to monomorphization,
+            // as well as spuriously destroyed keys when the static key is allocated in
+            // the wrong module, which is what makes this unreliable.
+            (
+                LockClassKey(loc as *const _ as *mut _),
+                c_str!("fallback_lock_class"),
+            )
+        }
+    }
+}
+
+pub(crate) struct LockdepMap(Opaque<bindings::lockdep_map>);
+pub(crate) struct LockdepGuard<'a>(&'a LockdepMap);
+
+#[allow(dead_code)]
+impl LockdepMap {
+    #[track_caller]
+    pub(crate) fn new() -> Self {
+        let map = Opaque::uninit();
+        let (key, name) = caller_lock_class();
+
+        // SAFETY: Just calling the C API
+        unsafe {
+            bindings::lockdep_init_map_type(
+                map.get(),
+                name.as_char_ptr(),
+                key.as_ptr(),
+                0,
+                bindings::lockdep_wait_type_LD_WAIT_INV as _,
+                bindings::lockdep_wait_type_LD_WAIT_INV as _,
+                bindings::lockdep_lock_type_LD_LOCK_NORMAL as _,
+            )
+        };
+
+        LockdepMap(map)
+    }
+
+    #[inline(always)]
+    pub(crate) fn lock(&self) -> LockdepGuard<'_> {
+        // SAFETY: Just calling the C API
+        unsafe { bindings::lock_acquire_ret(self.0.get(), 0, 0, 1, 1, core::ptr::null_mut()) };
+
+        LockdepGuard(self)
+    }
+}
+
+impl<'a> Drop for LockdepGuard<'a> {
+    #[inline(always)]
+    fn drop(&mut self) {
+        // SAFETY: Just calling the C API
+        unsafe { bindings::lock_release_ret(self.0 .0.get()) };
+    }
+}
diff --git a/rust/kernel/sync/no_lockdep.rs b/rust/kernel/sync/no_lockdep.rs
new file mode 100644
index 00000000000000..de53c4de7fbe53
--- /dev/null
+++ b/rust/kernel/sync/no_lockdep.rs
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Dummy lockdep utilities.
+//!
+//! Takes the place of the `lockdep` module when lockdep is disabled.
+
+use crate::{c_str, str::CStr};
+
+/// A dummy, zero-sized lock class.
+pub struct StaticLockClassKey();
+
+impl StaticLockClassKey {
+    /// Creates a new dummy lock class key.
+    pub const fn new() -> Self {
+        Self()
+    }
+
+    /// Returns the lock class key reference for this static lock class.
+    pub const fn key(&self) -> LockClassKey {
+        LockClassKey()
+    }
+}
+
+/// A dummy reference to a lock class key.
+#[derive(Copy, Clone)]
+pub struct LockClassKey();
+
+impl LockClassKey {
+    pub(crate) fn as_ptr(&self) -> *mut bindings::lock_class_key {
+        core::ptr::null_mut()
+    }
+}
+
+pub(crate) fn caller_lock_class() -> (LockClassKey, &'static CStr) {
+    static DUMMY_LOCK_CLASS: StaticLockClassKey = StaticLockClassKey::new();
+
+    (DUMMY_LOCK_CLASS.key(), c_str!("dummy"))
+}
diff --git a/rust/kernel/sync/poll.rs b/rust/kernel/sync/poll.rs
index d5f17153b42446..5aae7ac1bfbd0a 100644
--- a/rust/kernel/sync/poll.rs
+++ b/rust/kernel/sync/poll.rs
@@ -89,7 +89,7 @@ pub struct PollCondVar {
 
 impl PollCondVar {
     /// Constructs a new condvar initialiser.
-    pub fn new(name: &'static CStr, key: &'static LockClassKey) -> impl PinInit<Self> {
+    pub fn new(name: &'static CStr, key: LockClassKey) -> impl PinInit<Self> {
         pin_init!(Self {
             inner <- CondVar::new(name, key),
         })
diff --git a/rust/kernel/time.rs b/rust/kernel/time.rs
index 379c0f5772e575..8216a392a49230 100644
--- a/rust/kernel/time.rs
+++ b/rust/kernel/time.rs
@@ -1,16 +1,20 @@
 // SPDX-License-Identifier: GPL-2.0
 
-//! Time related primitives.
+//! Time related primitives and functions.
 //!
 //! This module contains the kernel APIs related to time and timers that
 //! have been ported or wrapped for usage by Rust code in the kernel.
 //!
 //! C header: [`include/linux/jiffies.h`](srctree/include/linux/jiffies.h).
 //! C header: [`include/linux/ktime.h`](srctree/include/linux/ktime.h).
+//! C header: [`include/linux/timekeeping.h`](srctree/include/linux/timekeeping.h)
 
 /// The number of nanoseconds per millisecond.
 pub const NSEC_PER_MSEC: i64 = bindings::NSEC_PER_MSEC as i64;
 
+/// The number of nanoseconds per second.
+pub const NSEC_PER_SEC: i64 = bindings::NSEC_PER_SEC as i64;
+
 /// The time unit of Linux kernel. One jiffy equals (1/HZ) second.
 pub type Jiffies = crate::ffi::c_ulong;
 
@@ -81,3 +85,151 @@ impl core::ops::Sub for Ktime {
         }
     }
 }
+
+use crate::{bindings, pr_err};
+use core::marker::PhantomData;
+use core::time::Duration;
+
+/// Represents a clock, that is, a unique time source.
+pub trait Clock: Sized {}
+
+/// A time source that can be queried for the current time.
+pub trait Now: Clock {
+    /// Returns the current time for this clock.
+    fn now() -> Instant<Self>;
+}
+
+/// Marker trait for clock sources that are guaranteed to be monotonic.
+pub trait Monotonic {}
+
+/// Marker trait for clock sources that represent a calendar (wall clock)
+/// relative to the UNIX epoch.
+pub trait WallTime {}
+
+/// An instant in time associated with a given clock source.
+#[derive(Debug)]
+pub struct Instant<T: Clock> {
+    nanoseconds: i64,
+    _type: PhantomData<T>,
+}
+
+impl<T: Clock> Clone for Instant<T> {
+    fn clone(&self) -> Self {
+        *self
+    }
+}
+
+impl<T: Clock> Copy for Instant<T> {}
+
+impl<T: Clock> Instant<T> {
+    fn new(nanoseconds: i64) -> Self {
+        Instant {
+            nanoseconds,
+            _type: PhantomData,
+        }
+    }
+
+    /// Returns the time elapsed since an earlier Instant<t>, or
+    /// None if the argument is a later Instant.
+    pub fn since(&self, earlier: Instant<T>) -> Option<Duration> {
+        if earlier.nanoseconds > self.nanoseconds {
+            None
+        } else {
+            // Casting to u64 and subtracting is guaranteed to give the right
+            // result for all inputs, as long as the condition we checked above
+            // holds.
+            Some(Duration::from_nanos(
+                self.nanoseconds as u64 - earlier.nanoseconds as u64,
+            ))
+        }
+    }
+}
+
+impl<T: Clock + Now + Monotonic> Instant<T> {
+    /// Returns the time elapsed since this Instant<T>.
+    ///
+    /// This is guaranteed to return a positive result, since
+    /// it is only implemented for monotonic clocks.
+    pub fn elapsed(&self) -> Duration {
+        T::now().since(*self).unwrap_or_else(|| {
+            pr_err!(
+                "Monotonic clock {} went backwards!",
+                core::any::type_name::<T>()
+            );
+            Duration::ZERO
+        })
+    }
+}
+
+/// Contains the various clock source types available to the kernel.
+pub mod clock {
+    use super::*;
+
+    /// A clock representing the default kernel time source.
+    ///
+    /// This is `CLOCK_MONOTONIC` (though it is not the only
+    /// monotonic clock) and also the default clock used by
+    /// `ktime_get()` in the C API.
+    ///
+    /// This is like `BootTime`, but does not include time
+    /// spent sleeping.
+
+    pub struct KernelTime;
+
+    impl Clock for KernelTime {}
+    impl Monotonic for KernelTime {}
+    impl Now for KernelTime {
+        fn now() -> Instant<Self> {
+            // SAFETY: Always safe to call
+            Instant::<Self>::new(unsafe { bindings::ktime_get() })
+        }
+    }
+
+    /// A clock representing the time elapsed since boot.
+    ///
+    /// This is `CLOCK_MONOTONIC` (though it is not the only
+    /// monotonic clock) and also the default clock used by
+    /// `ktime_get()` in the C API.
+    ///
+    /// This is like `KernelTime`, but does include time
+    /// spent sleeping.
+    pub struct BootTime;
+
+    impl Clock for BootTime {}
+    impl Monotonic for BootTime {}
+    impl Now for BootTime {
+        fn now() -> Instant<Self> {
+            // SAFETY: Always safe to call
+            Instant::<Self>::new(unsafe { bindings::ktime_get_boottime() })
+        }
+    }
+
+    /// A clock representing TAI time.
+    ///
+    /// This clock is not monotonic and can be changed from userspace.
+    /// However, it is not affected by leap seconds.
+    pub struct TaiTime;
+
+    impl Clock for TaiTime {}
+    impl WallTime for TaiTime {}
+    impl Now for TaiTime {
+        fn now() -> Instant<Self> {
+            // SAFETY: Always safe to call
+            Instant::<Self>::new(unsafe { bindings::ktime_get_clocktai() })
+        }
+    }
+
+    /// A clock representing wall clock time.
+    ///
+    /// This clock is not monotonic and can be changed from userspace.
+    pub struct RealTime;
+
+    impl Clock for RealTime {}
+    impl WallTime for RealTime {}
+    impl Now for RealTime {
+        fn now() -> Instant<Self> {
+            // SAFETY: Always safe to call
+            Instant::<Self>::new(unsafe { bindings::ktime_get_real() })
+        }
+    }
+}
diff --git a/rust/kernel/types.rs b/rust/kernel/types.rs
index 2bbaab83b9d65d..15ffd1cf84890b 100644
--- a/rust/kernel/types.rs
+++ b/rust/kernel/types.rs
@@ -326,6 +326,14 @@ impl<T> Opaque<T> {
         }
     }
 
+    /// Creates a zeroed value.
+    pub fn zeroed() -> Self {
+        Self {
+            value: UnsafeCell::new(MaybeUninit::zeroed()),
+            _pin: PhantomPinned,
+        }
+    }
+
     /// Create an opaque pin-initializer from the given pin-initializer.
     pub fn pin_init(slot: impl PinInit<T>) -> impl PinInit<Self> {
         Self::ffi_init(|ptr: *mut T| {
@@ -535,6 +543,114 @@ impl<T: AlwaysRefCounted> Drop for ARef<T> {
     }
 }
 
+/// Types that may be owned by Rust code or borrowed, but have a lifetime managed by C code.
+///
+/// It allows such types to define their own custom destructor function to be called when
+/// a Rust-owned reference is dropped.
+///
+/// This is usually implemented by wrappers to existing structures on the C side of the code.
+///
+/// # Safety
+///
+/// Implementers must ensure that any objects borrowed directly stay alive for the duration
+/// of the borrow lifetime, and that any objects deemed owned by Rust stay alive while
+/// that owned reference exists, until the [`Ownable::release()`] function is called.
+pub unsafe trait Ownable {
+    /// Releases the object (frees it or returns it to foreign ownership).
+    ///
+    /// # Safety
+    ///
+    /// Callers must ensure that the object is no longer referenced after this call.
+    unsafe fn release(this: NonNull<Self>);
+}
+
+/// A subtrait of Ownable that asserts that an Owned<T> Rust reference is not only unique
+/// within Rust, but also follows the same rules in kernel C code. That is, the kernel
+/// will never mutate the contents of the object while Rust owns it.
+///
+/// When this type is implemented for an Ownable type, it allows Owned<T> to be dereferenced
+/// into a &mut T.
+
+/// # Safety
+///
+/// Implementers must ensure that the kernel never mutates the underlying type while
+/// Rust owns it.
+pub unsafe trait OwnableMut: Ownable {}
+
+/// An owned reference to an ownable kernel object.
+///
+/// The object is automatically freed or released when an instance of [`Owned`] is
+/// dropped.
+///
+/// # Invariants
+///
+/// The pointer stored in `ptr` is non-null and valid for the lifetime of the [`Owned`] instance.
+pub struct Owned<T: Ownable> {
+    ptr: NonNull<T>,
+    _p: PhantomData<T>,
+}
+
+// SAFETY: It is safe to send `Owned<T>` to another thread when the underlying `T` is `Send` because
+// it effectively means sharing `&mut T` (which is safe because `T` is `Send`).
+unsafe impl<T: Ownable + Send> Send for Owned<T> {}
+
+// SAFETY: It is safe to send `&Owned<T>` to another thread when the underlying `T` is `Sync`
+// because it effectively means sharing `&T` (which is safe because `T` is `Sync`).
+unsafe impl<T: Ownable + Sync> Sync for Owned<T> {}
+
+impl<T: Ownable> Owned<T> {
+    /// Creates a new instance of [`Owned`].
+    ///
+    /// It takes over ownership of the underlying object.
+    ///
+    /// # Safety
+    ///
+    /// Callers must ensure that the underlying object is acquired and can be considered owned by
+    /// Rust.
+    pub unsafe fn from_raw(ptr: NonNull<T>) -> Self {
+        // INVARIANT: The safety requirements guarantee that the new instance now owns the
+        // reference.
+        Self {
+            ptr,
+            _p: PhantomData,
+        }
+    }
+
+    /// Consumes the `Owned`, returning a raw pointer.
+    ///
+    /// This function does not actually relinquish ownership of the object.
+    /// After calling this function, the caller is responsible for ownership previously managed
+    /// by the `Owned`.
+    pub fn into_raw(me: Self) -> NonNull<T> {
+        ManuallyDrop::new(me).ptr
+    }
+}
+
+impl<T: Ownable> Deref for Owned<T> {
+    type Target = T;
+
+    fn deref(&self) -> &Self::Target {
+        // SAFETY: The type invariants guarantee that the object is valid.
+        unsafe { self.ptr.as_ref() }
+    }
+}
+
+impl<T: Ownable + OwnableMut> DerefMut for Owned<T> {
+    fn deref_mut(&mut self) -> &mut Self::Target {
+        // SAFETY: The type invariants guarantee that the object is valid,
+        // and that we can safely return a mutable reference to it.
+        unsafe { self.ptr.as_mut() }
+    }
+}
+
+impl<T: Ownable> Drop for Owned<T> {
+    fn drop(&mut self) {
+        // SAFETY: The type invariants guarantee that the `Owned` owns the object we're about to
+        // release.
+        unsafe { T::release(self.ptr) };
+    }
+}
+
 /// A sum type that always holds either a value of type `L` or `R`.
 ///
 /// # Examples
@@ -573,3 +689,86 @@ pub type NotThreadSafe = PhantomData<*mut ()>;
 /// [`NotThreadSafe`]: type@NotThreadSafe
 #[allow(non_upper_case_globals)]
 pub const NotThreadSafe: NotThreadSafe = PhantomData;
+
+/// Helper macro to declare a bitfield style type. The type will automatically
+/// gain boolean operator implementations, as well as the `as_raw()` and `contains()`
+/// methods, Debug, Copy, Clone, and PartialEq implementations.
+///
+/// Optionally, a default value can be specified with `= value` syntax, which
+/// will add a Default trait implementation.
+///
+/// # Examples
+///
+/// ```
+/// declare_flags_type! {
+///     /// Flags to be used for foo.
+///     pub struct FooFlags(u32);
+/// }
+///
+/// declare_flags_type! {
+///     /// Flags to be used for bar.
+///     pub struct BarFlags(u32) = 0;
+/// }
+/// ```
+macro_rules! declare_flags_type (
+    (
+        $(#[$outer:meta])*
+        $v:vis struct $t:ident ( $base:ty );
+        $($rest:tt)*
+    ) => {
+        $(#[$outer])*
+        #[derive(Debug, Clone, Copy, PartialEq)]
+        $v struct $t($base);
+
+        impl $t {
+            /// Get the raw representation of this flag.
+            pub(crate) fn as_raw(self) -> $base {
+                self.0
+            }
+
+            /// Check whether `flags` is contained in `self`.
+            pub fn contains(self, flags: Self) -> bool {
+                (self & flags) == flags
+            }
+        }
+
+        impl core::ops::BitOr for $t {
+            type Output = Self;
+            fn bitor(self, rhs: Self) -> Self::Output {
+                Self(self.0 | rhs.0)
+            }
+        }
+
+        impl core::ops::BitAnd for $t {
+            type Output = Self;
+            fn bitand(self, rhs: Self) -> Self::Output {
+                Self(self.0 & rhs.0)
+            }
+        }
+
+        impl core::ops::Not for $t {
+            type Output = Self;
+            fn not(self) -> Self::Output {
+                Self(!self.0)
+            }
+        }
+    };
+    (
+        $(#[$outer:meta])*
+        $v:vis struct $t:ident ( $base:ty ) = $default:expr;
+        $($rest:tt)*
+    ) => {
+        declare_flags_type! {
+            $(#[$outer])*
+            $v struct $t ($base);
+            $($rest)*
+        }
+        impl Default for $t {
+            fn default() -> Self {
+                Self($default)
+            }
+        }
+    };
+);
+
+pub(crate) use declare_flags_type;
diff --git a/rust/kernel/workqueue.rs b/rust/kernel/workqueue.rs
index b7be224cdf4bbb..f4b62645fb1872 100644
--- a/rust/kernel/workqueue.rs
+++ b/rust/kernel/workqueue.rs
@@ -1,4 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
+// FIXME
+#![allow(clippy::undocumented_unsafe_blocks)]
 
 //! Work queues.
 //!
@@ -369,7 +371,7 @@ unsafe impl<T: ?Sized, const ID: u64> Sync for Work<T, ID> {}
 impl<T: ?Sized, const ID: u64> Work<T, ID> {
     /// Creates a new instance of [`Work`].
     #[inline]
-    pub fn new(name: &'static CStr, key: &'static LockClassKey) -> impl PinInit<Self>
+    pub fn new(name: &'static CStr, key: LockClassKey) -> impl PinInit<Self>
     where
         T: WorkItem<ID>,
     {
diff --git a/rust/kernel/xarray.rs b/rust/kernel/xarray.rs
new file mode 100644
index 00000000000000..703b5237672dfb
--- /dev/null
+++ b/rust/kernel/xarray.rs
@@ -0,0 +1,339 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! XArray abstraction.
+//!
+//! C header: [`include/linux/xarray.h`](../../include/linux/xarray.h)
+
+use crate::{
+    bindings,
+    error::{Error, Result},
+    types::{ForeignOwnable, Opaque, ScopeGuard},
+};
+use core::{
+    marker::{PhantomData, PhantomPinned},
+    pin::Pin,
+    ptr::NonNull,
+};
+
+/// Flags passed to `XArray::new` to configure the `XArray`.
+type Flags = bindings::gfp_t;
+
+/// Flag values passed to `XArray::new` to configure the `XArray`.
+pub mod flags {
+    /// Use IRQ-safe locking.
+    pub const LOCK_IRQ: super::Flags = bindings::BINDINGS_XA_FLAGS_LOCK_IRQ;
+    /// Use softirq-safe locking.
+    pub const LOCK_BH: super::Flags = bindings::BINDINGS_XA_FLAGS_LOCK_BH;
+    /// Track which entries are free (distinct from None).
+    pub const TRACK_FREE: super::Flags = bindings::BINDINGS_XA_FLAGS_TRACK_FREE;
+    /// Initialize array index 0 as busy.
+    pub const ZERO_BUSY: super::Flags = bindings::BINDINGS_XA_FLAGS_ZERO_BUSY;
+    /// Use GFP_ACCOUNT for internal memory allocations.
+    pub const ACCOUNT: super::Flags = bindings::BINDINGS_XA_FLAGS_ACCOUNT;
+    /// Create an allocating `XArray` starting at index 0.
+    pub const ALLOC: super::Flags = bindings::BINDINGS_XA_FLAGS_ALLOC;
+    /// Create an allocating `XArray` starting at index 1.
+    pub const ALLOC1: super::Flags = bindings::BINDINGS_XA_FLAGS_ALLOC1;
+}
+
+/// Wrapper for a value owned by the `XArray` which holds the `XArray` lock until dropped.
+pub struct Guard<'a, T: ForeignOwnable>(NonNull<T>, Pin<&'a XArray<T>>);
+
+impl<'a, T: ForeignOwnable> Guard<'a, T> {
+    /// Borrow the underlying value wrapped by the `Guard`.
+    ///
+    /// Returns a `T::Borrowed` type for the owned `ForeignOwnable` type.
+    pub fn borrow(&self) -> T::Borrowed<'_> {
+        // SAFETY: The value is owned by the `XArray`, the lifetime it is borrowed for must not
+        // outlive the `XArray` itself, nor the Guard that holds the lock ensuring the value
+        // remains in the `XArray`.
+        unsafe { T::borrow(self.0.as_ptr() as _) }
+    }
+}
+
+impl<'a, T: ForeignOwnable> Drop for Guard<'a, T> {
+    fn drop(&mut self) {
+        // SAFETY: The XArray we have a reference to owns the C xarray object.
+        unsafe { bindings::xa_unlock(self.1.xa.get()) };
+    }
+}
+
+/// Represents a reserved slot in an `XArray`, which does not yet have a value but has an assigned
+/// index and may not be allocated by any other user. If the Reservation is dropped without
+/// being filled, the entry is marked as available again.
+///
+/// Users must ensure that reserved slots are not filled by other mechanisms, or otherwise their
+/// contents may be dropped and replaced (which will print a warning).
+pub struct Reservation<'a, T: ForeignOwnable>(Pin<&'a XArray<T>>, usize, PhantomData<T>);
+
+impl<'a, T: ForeignOwnable> Reservation<'a, T> {
+    /// Stores a value into the reserved slot.
+    pub fn store(self, value: T) -> Result<usize> {
+        if self.0.replace(self.1, value)?.is_some() {
+            crate::pr_err!("XArray: Reservation stored but the entry already had data!\n");
+            // Consider it a success anyway, not much we can do
+        }
+        let index = self.1;
+        // The reservation is now fulfilled, so do not run our destructor.
+        core::mem::forget(self);
+        Ok(index)
+    }
+
+    /// Returns the index of this reservation.
+    pub fn index(&self) -> usize {
+        self.1
+    }
+}
+
+impl<'a, T: ForeignOwnable> Drop for Reservation<'a, T> {
+    fn drop(&mut self) {
+        if self.0.remove(self.1).is_some() {
+            crate::pr_err!("XArray: Reservation dropped but the entry was not empty!\n");
+        }
+    }
+}
+
+/// An array which efficiently maps sparse integer indices to owned objects.
+///
+/// This is similar to a `Vec<Option<T>>`, but more efficient when there are holes in the
+/// index space, and can be efficiently grown.
+///
+/// This structure is expected to often be used with an inner type that can either be efficiently
+/// cloned, such as an `Arc<T>`.
+pub struct XArray<T: ForeignOwnable> {
+    xa: Opaque<bindings::xarray>,
+    _p: PhantomData<T>,
+    _q: PhantomPinned,
+}
+
+impl<T: ForeignOwnable> XArray<T> {
+    /// The maximum supported index
+    pub const MAX: usize = core::ffi::c_ulong::MAX as usize;
+
+    /// Creates a new `XArray` with the given flags.
+    pub fn new(flags: Flags) -> XArray<T> {
+        let xa = Opaque::uninit();
+
+        // SAFETY: We have just created `xa`. This data structure does not require
+        // pinning.
+        unsafe { bindings::xa_init_flags(xa.get(), flags) };
+
+        // INVARIANT: Initialize the `XArray` with a valid `xa`.
+        XArray {
+            xa,
+            _p: PhantomData,
+            _q: PhantomPinned,
+        }
+    }
+
+    /// Replaces an entry with a new value, returning the old value (if any).
+    pub fn replace(self: Pin<&Self>, index: usize, value: T) -> Result<Option<T>> {
+        let new = value.into_foreign();
+        // SAFETY: `new` just came from into_foreign(), and we dismiss this guard if
+        // the xa_store operation succeeds and takes ownership of the pointer.
+        let guard = ScopeGuard::new(|| unsafe {
+            T::from_foreign(new);
+        });
+
+        // SAFETY: `self.xa` is always valid by the type invariant, and we are storing
+        // a `T::into_foreign()` result which upholds the later invariants.
+        let old = unsafe {
+            bindings::xa_store(
+                self.xa.get(),
+                index.try_into()?,
+                new as *mut _,
+                bindings::GFP_KERNEL,
+            )
+        };
+
+        // SAFETY: `xa_err` is safe to call on any pointer
+        let ret = unsafe { bindings::xa_err(old) };
+        if ret != 0 {
+            Err(Error::from_errno(ret))
+        } else if old.is_null() {
+            guard.dismiss();
+            Ok(None)
+        } else {
+            guard.dismiss();
+            // SAFETY: The old value must have been stored by either this function or
+            // `alloc_limits_opt`, both of which ensure non-NULL entries are valid
+            // ForeignOwnable pointers.
+            Ok(Some(unsafe { T::from_foreign(old) }))
+        }
+    }
+
+    /// Replaces an entry with a new value, dropping the old value (if any).
+    pub fn set(self: Pin<&Self>, index: usize, value: T) -> Result {
+        self.replace(index, value)?;
+        Ok(())
+    }
+
+    /// Looks up and returns a reference to an entry in the array, returning a `Guard` if it
+    /// exists.
+    ///
+    /// This guard blocks all other actions on the `XArray`. Callers are expected to drop the
+    /// `Guard` eagerly to avoid blocking other users, such as by taking a clone of the value.
+    pub fn get(self: Pin<&Self>, index: usize) -> Option<Guard<'_, T>> {
+        // SAFETY: `self.xa` is always valid by the type invariant.
+        unsafe { bindings::xa_lock(self.xa.get()) };
+
+        // SAFETY: `self.xa` is always valid by the type invariant.
+        let guard = ScopeGuard::new(|| unsafe { bindings::xa_unlock(self.xa.get()) });
+
+        // SAFETY: `self.xa` is always valid by the type invariant.
+        let p = unsafe { bindings::xa_load(self.xa.get(), index.try_into().ok()?) };
+
+        NonNull::new(p as *mut T).map(|p| {
+            guard.dismiss();
+            Guard(p, self)
+        })
+    }
+
+    /// Looks up and returns a reference to the lowest entry in the array between index and max,
+    /// returning a tuple of its index and a `Guard` if one exists.
+    ///
+    /// This guard blocks all other actions on the `XArray`. Callers are expected to drop the
+    /// `Guard` eagerly to avoid blocking other users, such as by taking a clone of the value.
+    pub fn find(self: Pin<&Self>, index: usize, max: usize) -> Option<(usize, Guard<'_, T>)> {
+        let mut index: usize = index;
+
+        // SAFETY: `self.xa` is always valid by the type invariant.
+        unsafe { bindings::xa_lock(self.xa.get()) };
+
+        // SAFETY: `self.xa` is always valid by the type invariant.
+        let guard = ScopeGuard::new(|| unsafe { bindings::xa_unlock(self.xa.get()) });
+
+        // SAFETY: `self.xa` is always valid by the type invariant.
+        let p = unsafe {
+            bindings::xa_find(
+                self.xa.get(),
+                &mut index,
+                max.try_into().ok()?,
+                bindings::BINDINGS_XA_PRESENT,
+            )
+        };
+
+        NonNull::new(p as *mut T).map(|p| {
+            guard.dismiss();
+            (index, Guard(p, self))
+        })
+    }
+
+    /// Removes and returns an entry, returning it if it existed.
+    pub fn remove(self: Pin<&Self>, index: usize) -> Option<T> {
+        // SAFETY: self.xa is always valid and pinned.
+        let p = unsafe { bindings::xa_erase(self.xa.get(), index.try_into().ok()?) };
+        if p.is_null() {
+            None
+        } else {
+            // SAFETY: Pointers stored in the xarray are always T types.
+            Some(unsafe { T::from_foreign(p) })
+        }
+    }
+
+    /// Allocates a new index in the array, optionally storing a new value into it, with
+    /// configurable bounds for the index range to allocate from.
+    ///
+    /// If `value` is `None`, then the index is reserved from further allocation but remains
+    /// free for storing a value into it.
+    fn alloc_limits_opt(self: Pin<&Self>, value: Option<T>, min: u32, max: u32) -> Result<usize> {
+        let new = value.map_or(core::ptr::null(), |a| a.into_foreign());
+        let mut id: u32 = 0;
+
+        // SAFETY: `self.xa` is always valid by the type invariant. If this succeeds, it
+        // takes ownership of the passed `T` (if any). If it fails, we must drop the
+        // `T` again.
+        let ret = unsafe {
+            bindings::xa_alloc(
+                self.xa.get(),
+                &mut id,
+                new as *mut _,
+                bindings::xa_limit { min, max },
+                bindings::GFP_KERNEL,
+            )
+        };
+
+        if ret < 0 {
+            // Make sure to drop the value we failed to store
+            if !new.is_null() {
+                // SAFETY: If `new` is not NULL, it came from the `ForeignOwnable` we got
+                // from the caller.
+                unsafe { T::from_foreign(new as *mut _) };
+            }
+            Err(Error::from_errno(ret))
+        } else {
+            Ok(id as usize)
+        }
+    }
+
+    /// Allocates a new index in the array, storing a new value into it, with configurable
+    /// bounds for the index range to allocate from.
+    pub fn alloc_limits(self: Pin<&Self>, value: T, min: u32, max: u32) -> Result<usize> {
+        self.alloc_limits_opt(Some(value), min, max)
+    }
+
+    /// Allocates a new index in the array, storing a new value into it.
+    pub fn alloc(self: Pin<&Self>, value: T) -> Result<usize> {
+        self.alloc_limits(value, 0, u32::MAX)
+    }
+
+    /// Reserves a new index in the array within configurable bounds for the index.
+    ///
+    /// Returns a `Reservation` object, which can then be used to store a value at this index or
+    /// otherwise free it for reuse.
+    pub fn reserve_limits(self: Pin<&Self>, min: u32, max: u32) -> Result<Reservation<'_, T>> {
+        Ok(Reservation(
+            self,
+            self.alloc_limits_opt(None, min, max)?,
+            PhantomData,
+        ))
+    }
+
+    /// Reserves a new index in the array.
+    ///
+    /// Returns a `Reservation` object, which can then be used to store a value at this index or
+    /// otherwise free it for reuse.
+    pub fn reserve(self: Pin<&Self>) -> Result<Reservation<'_, T>> {
+        Ok(Reservation(
+            self,
+            self.alloc_limits_opt(None, 0, u32::MAX)?,
+            PhantomData,
+        ))
+    }
+}
+
+impl<T: ForeignOwnable> Drop for XArray<T> {
+    fn drop(&mut self) {
+        // SAFETY: `self.xa` is valid by the type invariant, and as we have the only reference to
+        // the `XArray` we can safely iterate its contents and drop everything.
+        unsafe {
+            let mut index: usize = 0;
+            let mut entry = bindings::xa_find(
+                self.xa.get(),
+                &mut index,
+                usize::MAX,
+                bindings::BINDINGS_XA_PRESENT,
+            );
+            while !entry.is_null() {
+                T::from_foreign(entry);
+                entry = bindings::xa_find_after(
+                    self.xa.get(),
+                    &mut index,
+                    usize::MAX,
+                    bindings::BINDINGS_XA_PRESENT,
+                );
+            }
+
+            // Locked locks are not safe to drop. Normally we would want to try_lock()/unlock() here
+            // for safety or something similar, but in this case xa_destroy() is guaranteed to
+            // acquire the lock anyway. This will deadlock if a lock guard was improperly dropped,
+            // but that is not UB, so it's sufficient for soundness purposes.
+            bindings::xa_destroy(self.xa.get());
+        }
+    }
+}
+
+// SAFETY: XArray is thread-safe and all mutation operations are internally locked.
+unsafe impl<T: Send + ForeignOwnable> Send for XArray<T> {}
+// SAFETY: XArray is thread-safe and all mutation operations are internally locked.
+unsafe impl<T: Sync + ForeignOwnable> Sync for XArray<T> {}
diff --git a/rust/macros/export.rs b/rust/macros/export.rs
new file mode 100644
index 00000000000000..a08f6337d5c8dc
--- /dev/null
+++ b/rust/macros/export.rs
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use crate::helpers::function_name;
+use proc_macro::TokenStream;
+
+/// Please see [`crate::export`] for documentation.
+pub(crate) fn export(_attr: TokenStream, ts: TokenStream) -> TokenStream {
+    let Some(name) = function_name(ts.clone()) else {
+        return "::core::compile_error!(\"The #[export] attribute must be used on a function.\");"
+            .parse::<TokenStream>()
+            .unwrap();
+    };
+
+    // This verifies that the function has the same signature as the declaration generated by
+    // bindgen. It makes use of the fact that all branches of an if/else must have the same type.
+    let signature_check = quote!(
+        const _: () = {
+            if true {
+                ::kernel::bindings::#name
+            } else {
+                #name
+            };
+        };
+    );
+
+    let no_mangle = quote!(#[no_mangle]);
+
+    TokenStream::from_iter([signature_check, no_mangle, ts])
+}
diff --git a/rust/macros/helpers.rs b/rust/macros/helpers.rs
index 563dcd2b7ace5e..26e29ae6591560 100644
--- a/rust/macros/helpers.rs
+++ b/rust/macros/helpers.rs
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 
-use proc_macro::{token_stream, Group, TokenStream, TokenTree};
+use proc_macro::{token_stream, Group, Ident, TokenStream, TokenTree};
 
 pub(crate) fn try_ident(it: &mut token_stream::IntoIter) -> Option<String> {
     if let Some(TokenTree::Ident(ident)) = it.next() {
@@ -10,6 +10,17 @@ pub(crate) fn try_ident(it: &mut token_stream::IntoIter) -> Option<String> {
     }
 }
 
+pub(crate) fn try_sign(it: &mut token_stream::IntoIter) -> Option<char> {
+    let peek = it.clone().next();
+    match peek {
+        Some(TokenTree::Punct(punct)) if punct.as_char() == '-' => {
+            let _ = it.next();
+            Some(punct.as_char())
+        }
+        _ => None,
+    }
+}
+
 pub(crate) fn try_literal(it: &mut token_stream::IntoIter) -> Option<String> {
     if let Some(TokenTree::Literal(literal)) = it.next() {
         Some(literal.to_string())
@@ -215,3 +226,34 @@ pub(crate) fn parse_generics(input: TokenStream) -> (Generics, Vec<TokenTree>) {
         rest,
     )
 }
+
+/// Given a function declaration, finds the name of the function.
+pub(crate) fn function_name(input: TokenStream) -> Option<Ident> {
+    let mut input = input.into_iter();
+    while let Some(token) = input.next() {
+        match token {
+            TokenTree::Ident(i) if i.to_string() == "fn" => {
+                if let Some(TokenTree::Ident(i)) = input.next() {
+                    return Some(i);
+                }
+                return None;
+            }
+            _ => continue,
+        }
+    }
+    None
+}
+
+/// Parse a token stream of the form `expected_name: "value",` and return the
+/// string in the position of "value".
+///
+/// # Panics
+///
+/// - On parse error.
+pub(crate) fn expect_string_field(it: &mut token_stream::IntoIter, expected_name: &str) -> String {
+    assert_eq!(expect_ident(it), expected_name);
+    assert_eq!(expect_punct(it), ':');
+    let string = expect_string(it);
+    assert_eq!(expect_punct(it), ',');
+    string
+}
diff --git a/rust/macros/lib.rs b/rust/macros/lib.rs
index d61bc6a56425e9..ff6f7673d4b297 100644
--- a/rust/macros/lib.rs
+++ b/rust/macros/lib.rs
@@ -9,11 +9,13 @@
 #[macro_use]
 mod quote;
 mod concat_idents;
+mod export;
 mod helpers;
 mod module;
 mod paste;
 mod pin_data;
 mod pinned_drop;
+mod versions;
 mod vtable;
 mod zeroable;
 
@@ -24,6 +26,30 @@ use proc_macro::TokenStream;
 /// The `type` argument should be a type which implements the [`Module`]
 /// trait. Also accepts various forms of kernel metadata.
 ///
+/// The `params` field describe module parameters. Each entry has the form
+///
+/// ```ignore
+/// parameter_name: type {
+///     default: default_value,
+///     description: "Description",
+/// }
+/// ```
+///
+/// `type` may be one of
+///
+/// - [`i8`]
+/// - [`u8`]
+/// - [`i8`]
+/// - [`u8`]
+/// - [`i16`]
+/// - [`u16`]
+/// - [`i32`]
+/// - [`u32`]
+/// - [`i64`]
+/// - [`u64`]
+/// - [`isize`]
+/// - [`usize`]
+///
 /// C header: [`include/linux/moduleparam.h`](srctree/include/linux/moduleparam.h)
 ///
 /// [`Module`]: ../kernel/trait.Module.html
@@ -36,10 +62,16 @@ use proc_macro::TokenStream;
 /// module!{
 ///     type: MyModule,
 ///     name: "my_kernel_module",
-///     author: "Rust for Linux Contributors",
+///     authors: ["Rust for Linux Contributors"],
 ///     description: "My very own kernel module!",
 ///     license: "GPL",
 ///     alias: ["alternate_module_name"],
+///     params: {
+///         my_parameter: i64 {
+///             default: 1,
+///             description: "This parameter has a default of 1",
+///         },
+///     },
 /// }
 ///
 /// struct MyModule(i32);
@@ -48,6 +80,7 @@ use proc_macro::TokenStream;
 ///     fn init(_module: &'static ThisModule) -> Result<Self> {
 ///         let foo: i32 = 42;
 ///         pr_info!("I contain:  {}\n", foo);
+///         pr_info!("i32 param is:  {}\n", module_parameters::my_parameter.read());
 ///         Ok(Self(foo))
 ///     }
 /// }
@@ -69,7 +102,7 @@ use proc_macro::TokenStream;
 /// module!{
 ///     type: MyDeviceDriverModule,
 ///     name: "my_device_driver_module",
-///     author: "Rust for Linux Contributors",
+///     authors: ["Rust for Linux Contributors"],
 ///     description: "My device driver requires firmware",
 ///     license: "GPL",
 ///     firmware: ["my_device_firmware1.bin", "my_device_firmware2.bin"],
@@ -88,7 +121,7 @@ use proc_macro::TokenStream;
 /// # Supported argument types
 ///   - `type`: type which implements the [`Module`] trait (required).
 ///   - `name`: ASCII string literal of the name of the kernel module (required).
-///   - `author`: string literal of the author of the kernel module.
+///   - `authors`: array of ASCII string literals of the authors of the kernel module.
 ///   - `description`: string literal of the description of the kernel module.
 ///   - `license`: ASCII string literal of the license of the kernel module (required).
 ///   - `alias`: array of ASCII string literals of the alias names of the kernel module.
@@ -99,6 +132,12 @@ pub fn module(ts: TokenStream) -> TokenStream {
     module::module(ts)
 }
 
+/// Declares multiple variants of a structure or impl code
+#[proc_macro_attribute]
+pub fn versions(attr: TokenStream, item: TokenStream) -> TokenStream {
+    versions::versions(attr, item)
+}
+
 /// Declares or implements a vtable trait.
 ///
 /// Linux's use of pure vtables is very close to Rust traits, but they differ
@@ -174,6 +213,29 @@ pub fn vtable(attr: TokenStream, ts: TokenStream) -> TokenStream {
     vtable::vtable(attr, ts)
 }
 
+/// Export a function so that C code can call it via a header file.
+///
+/// Functions exported using this macro can be called from C code using the declaration in the
+/// appropriate header file. It should only be used in cases where C calls the function through a
+/// header file; cases where C calls into Rust via a function pointer in a vtable (such as
+/// `file_operations`) should not use this macro.
+///
+/// This macro has the following effect:
+///
+/// * Disables name mangling for this function.
+/// * Verifies at compile-time that the function signature matches the declaration in the header
+///   file.
+///
+/// You must declare the signature of the Rust function in a header file that is included by
+/// `rust/bindings/bindings_helper.h`.
+///
+/// This macro is *not* the same as the C macros `EXPORT_SYMBOL_*`. All Rust symbols are currently
+/// automatically exported with `EXPORT_SYMBOL_GPL`.
+#[proc_macro_attribute]
+pub fn export(attr: TokenStream, ts: TokenStream) -> TokenStream {
+    export::export(attr, ts)
+}
+
 /// Concatenate two identifiers.
 ///
 /// This is useful in macros that need to declare or reference items with names
diff --git a/rust/macros/module.rs b/rust/macros/module.rs
index 3f462e71ff0ef8..815dfaf9afaf1a 100644
--- a/rust/macros/module.rs
+++ b/rust/macros/module.rs
@@ -26,6 +26,7 @@ struct ModInfoBuilder<'a> {
     module: &'a str,
     counter: usize,
     buffer: String,
+    param_buffer: String,
 }
 
 impl<'a> ModInfoBuilder<'a> {
@@ -34,10 +35,11 @@ impl<'a> ModInfoBuilder<'a> {
             module,
             counter: 0,
             buffer: String::new(),
+            param_buffer: String::new(),
         }
     }
 
-    fn emit_base(&mut self, field: &str, content: &str, builtin: bool) {
+    fn emit_base(&mut self, field: &str, content: &str, builtin: bool, param: bool) {
         let string = if builtin {
             // Built-in modules prefix their modinfo strings by `module.`.
             format!(
@@ -51,8 +53,14 @@ impl<'a> ModInfoBuilder<'a> {
             format!("{field}={content}\0")
         };
 
+        let buffer = if param {
+            &mut self.param_buffer
+        } else {
+            &mut self.buffer
+        };
+
         write!(
-            &mut self.buffer,
+            buffer,
             "
                 {cfg}
                 #[doc(hidden)]
@@ -75,29 +83,170 @@ impl<'a> ModInfoBuilder<'a> {
         self.counter += 1;
     }
 
-    fn emit_only_builtin(&mut self, field: &str, content: &str) {
-        self.emit_base(field, content, true)
+    fn emit_only_builtin(&mut self, field: &str, content: &str, param: bool) {
+        self.emit_base(field, content, true, param)
     }
 
-    fn emit_only_loadable(&mut self, field: &str, content: &str) {
-        self.emit_base(field, content, false)
+    fn emit_only_loadable(&mut self, field: &str, content: &str, param: bool) {
+        self.emit_base(field, content, false, param)
     }
 
     fn emit(&mut self, field: &str, content: &str) {
-        self.emit_only_builtin(field, content);
-        self.emit_only_loadable(field, content);
+        self.emit_internal(field, content, false);
+    }
+
+    fn emit_internal(&mut self, field: &str, content: &str, param: bool) {
+        self.emit_only_builtin(field, content, param);
+        self.emit_only_loadable(field, content, param);
+    }
+
+    fn emit_param(&mut self, field: &str, param: &str, content: &str) {
+        let content = format!("{param}:{content}", param = param, content = content);
+        self.emit_internal(field, &content, true);
+    }
+
+    fn emit_params(&mut self, info: &ModuleInfo) {
+        let Some(params) = &info.params else {
+            return;
+        };
+
+        for param in params {
+            let ops = param_ops_path(&param.ptype);
+
+            // Note: The spelling of these fields is dictated by the user space
+            // tool `modinfo`.
+            self.emit_param("parmtype", &param.name, &param.ptype);
+            self.emit_param("parm", &param.name, &param.description);
+
+            write!(
+                self.param_buffer,
+                "
+                    pub(crate) static {param_name}:
+                        ::kernel::module_param::ModuleParamAccess<{param_type}> =
+                            ::kernel::module_param::ModuleParamAccess::new({param_default});
+
+                    #[link_section = \"__param\"]
+                    #[used]
+                    static __{module_name}_{param_name}_struct:
+                        ::kernel::module_param::RacyKernelParam =
+                        ::kernel::module_param::RacyKernelParam(::kernel::bindings::kernel_param {{
+                            name: if cfg!(MODULE) {{
+                                ::kernel::c_str!(\"{param_name}\").as_bytes_with_nul()
+                            }} else {{
+                                ::kernel::c_str!(\"{module_name}.{param_name}\").as_bytes_with_nul()
+                            }}.as_ptr(),
+                            // SAFETY: `__this_module` is constructed by the kernel at load time
+                            // and will not be freed until the module is unloaded.
+                            #[cfg(MODULE)]
+                            mod_: unsafe {{
+                                (&::kernel::bindings::__this_module
+                                    as *const ::kernel::bindings::module)
+                                    .cast_mut()
+                            }},
+                            #[cfg(not(MODULE))]
+                            mod_: ::core::ptr::null_mut(),
+                            ops: &{ops} as *const ::kernel::bindings::kernel_param_ops,
+                            perm: 0, // Will not appear in sysfs
+                            level: -1,
+                            flags: 0,
+                            __bindgen_anon_1:
+                                ::kernel::bindings::kernel_param__bindgen_ty_1 {{
+                                    arg: {param_name}.as_mut_ptr().cast()
+                                }},
+                        }});
+                ",
+                module_name = info.name,
+                param_type = param.ptype,
+                param_default = param.default,
+                param_name = param.name,
+                ops = ops,
+            )
+            .unwrap();
+        }
+    }
+}
+
+fn param_ops_path(param_type: &str) -> &'static str {
+    match param_type {
+        "i8" => "::kernel::module_param::PARAM_OPS_I8",
+        "u8" => "::kernel::module_param::PARAM_OPS_U8",
+        "i16" => "::kernel::module_param::PARAM_OPS_I16",
+        "u16" => "::kernel::module_param::PARAM_OPS_U16",
+        "i32" => "::kernel::module_param::PARAM_OPS_I32",
+        "u32" => "::kernel::module_param::PARAM_OPS_U32",
+        "i64" => "::kernel::module_param::PARAM_OPS_I64",
+        "u64" => "::kernel::module_param::PARAM_OPS_U64",
+        "isize" => "::kernel::module_param::PARAM_OPS_ISIZE",
+        "usize" => "::kernel::module_param::PARAM_OPS_USIZE",
+        t => panic!("Unsupported parameter type {}", t),
     }
 }
 
+fn expect_param_default(param_it: &mut token_stream::IntoIter) -> String {
+    assert_eq!(expect_ident(param_it), "default");
+    assert_eq!(expect_punct(param_it), ':');
+    let sign = try_sign(param_it);
+    let default = try_literal(param_it).expect("Expected default param value");
+    assert_eq!(expect_punct(param_it), ',');
+    let mut value = sign.map(String::from).unwrap_or_default();
+    value.push_str(&default);
+    value
+}
+
 #[derive(Debug, Default)]
 struct ModuleInfo {
     type_: String,
     license: String,
     name: String,
     author: Option<String>,
+    authors: Option<Vec<String>>,
     description: Option<String>,
     alias: Option<Vec<String>>,
     firmware: Option<Vec<String>>,
+    params: Option<Vec<Parameter>>,
+}
+
+#[derive(Debug)]
+struct Parameter {
+    name: String,
+    ptype: String,
+    default: String,
+    description: String,
+}
+
+fn expect_params(it: &mut token_stream::IntoIter) -> Vec<Parameter> {
+    let params = expect_group(it);
+    assert_eq!(params.delimiter(), Delimiter::Brace);
+    let mut it = params.stream().into_iter();
+    let mut parsed = Vec::new();
+
+    loop {
+        let param_name = match it.next() {
+            Some(TokenTree::Ident(ident)) => ident.to_string(),
+            Some(_) => panic!("Expected Ident or end"),
+            None => break,
+        };
+
+        assert_eq!(expect_punct(&mut it), ':');
+        let param_type = expect_ident(&mut it);
+        let group = expect_group(&mut it);
+        assert_eq!(group.delimiter(), Delimiter::Brace);
+        assert_eq!(expect_punct(&mut it), ',');
+
+        let mut param_it = group.stream().into_iter();
+        let param_default = expect_param_default(&mut param_it);
+        let param_description = expect_string_field(&mut param_it, "description");
+        expect_end(&mut param_it);
+
+        parsed.push(Parameter {
+            name: param_name,
+            ptype: param_type,
+            default: param_default,
+            description: param_description,
+        })
+    }
+
+    parsed
 }
 
 impl ModuleInfo {
@@ -108,10 +257,12 @@ impl ModuleInfo {
             "type",
             "name",
             "author",
+            "authors",
             "description",
             "license",
             "alias",
             "firmware",
+            "params",
         ];
         const REQUIRED_KEYS: &[&str] = &["type", "name", "license"];
         let mut seen_keys = Vec::new();
@@ -133,10 +284,12 @@ impl ModuleInfo {
                 "type" => info.type_ = expect_ident(it),
                 "name" => info.name = expect_string_ascii(it),
                 "author" => info.author = Some(expect_string(it)),
+                "authors" => info.authors = Some(expect_string_array(it)),
                 "description" => info.description = Some(expect_string(it)),
                 "license" => info.license = expect_string_ascii(it),
                 "alias" => info.alias = Some(expect_string_array(it)),
                 "firmware" => info.firmware = Some(expect_string_array(it)),
+                "params" => info.params = Some(expect_params(it)),
                 _ => panic!("Unknown key \"{key}\". Valid keys are: {EXPECTED_KEYS:?}."),
             }
 
@@ -174,28 +327,35 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream {
     let info = ModuleInfo::parse(&mut it);
 
     let mut modinfo = ModInfoBuilder::new(info.name.as_ref());
-    if let Some(author) = info.author {
-        modinfo.emit("author", &author);
+    if let Some(author) = &info.author {
+        modinfo.emit("author", author);
     }
-    if let Some(description) = info.description {
-        modinfo.emit("description", &description);
+    if let Some(authors) = &info.authors {
+        for author in authors {
+            modinfo.emit("author", author);
+        }
+    }
+    if let Some(description) = &info.description {
+        modinfo.emit("description", description);
     }
     modinfo.emit("license", &info.license);
-    if let Some(aliases) = info.alias {
+    if let Some(aliases) = &info.alias {
         for alias in aliases {
-            modinfo.emit("alias", &alias);
+            modinfo.emit("alias", alias);
         }
     }
-    if let Some(firmware) = info.firmware {
+    if let Some(firmware) = &info.firmware {
         for fw in firmware {
-            modinfo.emit("firmware", &fw);
+            modinfo.emit("firmware", fw);
         }
     }
 
     // Built-in modules also export the `file` modinfo string.
     let file =
         std::env::var("RUST_MODFILE").expect("Unable to fetch RUST_MODFILE environmental variable");
-    modinfo.emit_only_builtin("file", &file);
+    modinfo.emit_only_builtin("file", &file, false);
+
+    modinfo.emit_params(&info);
 
     format!(
         "
@@ -353,14 +513,17 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream {
                             __MOD.assume_init_drop();
                         }}
                     }}
-
                     {modinfo}
                 }}
             }}
+            mod module_parameters {{
+                {params}
+            }}
         ",
         type_ = info.type_,
         name = info.name,
         modinfo = modinfo.buffer,
+        params = modinfo.param_buffer,
         initcall_section = ".initcall6.init"
     )
     .parse()
diff --git a/rust/macros/quote.rs b/rust/macros/quote.rs
index 33a199e4f17691..31b7ebe504f489 100644
--- a/rust/macros/quote.rs
+++ b/rust/macros/quote.rs
@@ -20,6 +20,12 @@ impl ToTokens for proc_macro::Group {
     }
 }
 
+impl ToTokens for proc_macro::Ident {
+    fn to_tokens(&self, tokens: &mut TokenStream) {
+        tokens.extend([TokenTree::from(self.clone())]);
+    }
+}
+
 impl ToTokens for TokenTree {
     fn to_tokens(&self, tokens: &mut TokenStream) {
         tokens.extend([self.clone()]);
@@ -40,7 +46,7 @@ impl ToTokens for TokenStream {
 /// `quote` crate but provides only just enough functionality needed by the current `macros` crate.
 macro_rules! quote_spanned {
     ($span:expr => $($tt:tt)*) => {{
-        let mut tokens;
+        let mut tokens: ::std::vec::Vec<::proc_macro::TokenTree>;
         #[allow(clippy::vec_init_then_push)]
         {
             tokens = ::std::vec::Vec::new();
@@ -65,7 +71,8 @@ macro_rules! quote_spanned {
         quote_spanned!(@proc $v $span $($tt)*);
     };
     (@proc $v:ident $span:ident ( $($inner:tt)* ) $($tt:tt)*) => {
-        let mut tokens = ::std::vec::Vec::new();
+        #[allow(unused_mut)]
+        let mut tokens = ::std::vec::Vec::<::proc_macro::TokenTree>::new();
         quote_spanned!(@proc tokens $span $($inner)*);
         $v.push(::proc_macro::TokenTree::Group(::proc_macro::Group::new(
             ::proc_macro::Delimiter::Parenthesis,
@@ -136,6 +143,22 @@ macro_rules! quote_spanned {
         ));
         quote_spanned!(@proc $v $span $($tt)*);
     };
+    (@proc $v:ident $span:ident = $($tt:tt)*) => {
+        $v.push(::proc_macro::TokenTree::Punct(
+                ::proc_macro::Punct::new('=', ::proc_macro::Spacing::Alone)
+        ));
+        quote_spanned!(@proc $v $span $($tt)*);
+    };
+    (@proc $v:ident $span:ident # $($tt:tt)*) => {
+        $v.push(::proc_macro::TokenTree::Punct(
+                ::proc_macro::Punct::new('#', ::proc_macro::Spacing::Alone)
+        ));
+        quote_spanned!(@proc $v $span $($tt)*);
+    };
+    (@proc $v:ident $span:ident _ $($tt:tt)*) => {
+        $v.push(::proc_macro::TokenTree::Ident(::proc_macro::Ident::new("_", $span)));
+        quote_spanned!(@proc $v $span $($tt)*);
+    };
     (@proc $v:ident $span:ident $id:ident $($tt:tt)*) => {
         $v.push(::proc_macro::TokenTree::Ident(::proc_macro::Ident::new(stringify!($id), $span)));
         quote_spanned!(@proc $v $span $($tt)*);
diff --git a/rust/macros/versions.rs b/rust/macros/versions.rs
new file mode 100644
index 00000000000000..b13a5d55c0e17b
--- /dev/null
+++ b/rust/macros/versions.rs
@@ -0,0 +1,341 @@
+use proc_macro::{Delimiter, Group, Ident, Punct, Spacing, Span, TokenStream, TokenTree};
+
+//use crate::helpers::expect_punct;
+
+fn expect_group(it: &mut impl Iterator<Item = TokenTree>) -> Group {
+    if let Some(TokenTree::Group(group)) = it.next() {
+        group
+    } else {
+        panic!("Expected Group")
+    }
+}
+
+fn expect_punct(it: &mut impl Iterator<Item = TokenTree>) -> String {
+    if let Some(TokenTree::Punct(punct)) = it.next() {
+        punct.to_string()
+    } else {
+        panic!("Expected Group")
+    }
+}
+
+fn drop_until_punct(it: &mut impl Iterator<Item = TokenTree>, delimiter: &str, is_struct: bool) {
+    let mut depth: isize = 0;
+    let mut colons: isize = 0;
+    for token in it.by_ref() {
+        if let TokenTree::Punct(punct) = token {
+            match punct.as_char() {
+                ':' => {
+                    colons += 1;
+                }
+                '<' => {
+                    if depth > 0 || colons == 2 || is_struct {
+                        depth += 1;
+                    }
+                    colons = 0;
+                }
+                '>' => {
+                    if depth > 0 {
+                        depth -= 1;
+                    }
+                    colons = 0;
+                }
+                _ => {
+                    colons = 0;
+                    if depth == 0 && delimiter.contains(&punct.to_string()) {
+                        break;
+                    }
+                }
+            }
+        }
+    }
+}
+
+fn drop_until_braces(it: &mut impl Iterator<Item = TokenTree>) {
+    let mut depth: isize = 0;
+    let mut colons: isize = 0;
+    for token in it.by_ref() {
+        match token {
+            TokenTree::Punct(punct) => match punct.as_char() {
+                ':' => {
+                    colons += 1;
+                }
+                '<' => {
+                    if depth > 0 || colons == 2 {
+                        depth += 1;
+                    }
+                    colons = 0;
+                }
+                '>' => {
+                    if depth > 0 {
+                        depth -= 1;
+                    }
+                    colons = 0;
+                }
+                _ => colons = 0,
+            },
+            TokenTree::Group(group) if group.delimiter() == Delimiter::Brace => {
+                if depth == 0 {
+                    break;
+                }
+            }
+            _ => (),
+        }
+    }
+}
+
+struct VersionConfig {
+    fields: &'static [&'static str],
+    enums: &'static [&'static [&'static str]],
+    versions: &'static [&'static [&'static str]],
+}
+
+static AGX_VERSIONS: VersionConfig = VersionConfig {
+    fields: &["G", "V"],
+    enums: &[
+        &["G13", "G14", "G14X"],
+        &["V12_3", "V12_4", "V13_0B4", "V13_2", "V13_3", "V13_5"],
+    ],
+    versions: &[
+        &["G13", "V12_3"],
+        &["G14", "V12_4"],
+        &["G13", "V13_5"],
+        &["G14", "V13_5"],
+        &["G14X", "V13_5"],
+    ],
+};
+
+fn check_version(
+    config: &VersionConfig,
+    ver: &[usize],
+    it: &mut impl Iterator<Item = TokenTree>,
+) -> bool {
+    let first = it.next().unwrap();
+    let val: bool = match &first {
+        TokenTree::Group(group) => check_version(config, ver, &mut group.stream().into_iter()),
+        TokenTree::Ident(ident) => {
+            let key = config
+                .fields
+                .iter()
+                .position(|&r| r == ident.to_string())
+                .unwrap_or_else(|| panic!("Unknown field {}", ident));
+            let mut operator = expect_punct(it);
+            let mut rhs_token = it.next().unwrap();
+            if let TokenTree::Punct(punct) = &rhs_token {
+                operator.extend(std::iter::once(punct.as_char()));
+                rhs_token = it.next().unwrap();
+            }
+            let rhs_name = if let TokenTree::Ident(ident) = &rhs_token {
+                ident.to_string()
+            } else {
+                panic!("Unexpected token {}", ident)
+            };
+
+            let rhs = config.enums[key]
+                .iter()
+                .position(|&r| r == rhs_name)
+                .unwrap_or_else(|| panic!("Unknown value for {}:{}", ident, rhs_name));
+            let lhs = ver[key];
+
+            match operator.as_str() {
+                "==" => lhs == rhs,
+                "!=" => lhs != rhs,
+                ">" => lhs > rhs,
+                ">=" => lhs >= rhs,
+                "<" => lhs < rhs,
+                "<=" => lhs <= rhs,
+                _ => panic!("Unknown operator {}", operator),
+            }
+        }
+        _ => {
+            panic!("Unknown token {}", first)
+        }
+    };
+
+    let boolop = it.next();
+    match boolop {
+        Some(TokenTree::Punct(punct)) => {
+            let right = expect_punct(it);
+            if right != punct.to_string() {
+                panic!("Unexpected op {}{}", punct, right);
+            }
+            match punct.as_char() {
+                '&' => val && check_version(config, ver, it),
+                '|' => val || check_version(config, ver, it),
+                _ => panic!("Unexpected op {}{}", right, right),
+            }
+        }
+        Some(a) => panic!("Unexpected op {}", a),
+        None => val,
+    }
+}
+
+fn filter_versions(
+    config: &VersionConfig,
+    tag: &str,
+    ver: &[usize],
+    tree: impl IntoIterator<Item = TokenTree>,
+    is_struct: bool,
+) -> Vec<TokenTree> {
+    let mut out = Vec::<TokenTree>::new();
+    let mut it = tree.into_iter();
+
+    while let Some(token) = it.next() {
+        let mut tail: Option<TokenTree> = None;
+        match &token {
+            TokenTree::Punct(punct) if punct.to_string() == "#" => {
+                let group = expect_group(&mut it);
+                let mut grp_it = group.stream().into_iter();
+                let attr = grp_it.next().unwrap();
+                match attr {
+                    TokenTree::Ident(ident) if ident.to_string() == "ver" => {
+                        if check_version(config, ver, &mut grp_it) {
+                        } else if is_struct {
+                            drop_until_punct(&mut it, ",", true);
+                        } else {
+                            let first = it.next().unwrap();
+                            match &first {
+                                TokenTree::Ident(ident)
+                                    if ["while", "for", "loop", "if", "match", "unsafe", "fn"]
+                                        .contains(&ident.to_string().as_str()) =>
+                                {
+                                    drop_until_braces(&mut it);
+                                }
+                                TokenTree::Group(_) => (),
+                                _ => {
+                                    drop_until_punct(&mut it, ",;", false);
+                                }
+                            }
+                        }
+                    }
+                    _ => {
+                        out.push(token.clone());
+                        out.push(TokenTree::Group(group.clone()));
+                    }
+                }
+                continue;
+            }
+            TokenTree::Punct(punct) if punct.to_string() == ":" => {
+                let next = it.next();
+                match next {
+                    Some(TokenTree::Punct(punct)) if punct.to_string() == ":" => {
+                        let next = it.next();
+                        match next {
+                            Some(TokenTree::Ident(idtag)) if idtag.to_string() == "ver" => {
+                                let ident = match out.pop() {
+                                    Some(TokenTree::Ident(ident)) => ident,
+                                    a => panic!("$ver not following ident: {:?}", a),
+                                };
+                                let name = ident.to_string() + tag;
+                                let new_ident = Ident::new(name.as_str(), ident.span());
+                                out.push(TokenTree::Ident(new_ident));
+                                continue;
+                            }
+                            Some(a) => {
+                                out.push(token.clone());
+                                out.push(token.clone());
+                                tail = Some(a);
+                            }
+                            None => {
+                                out.push(token.clone());
+                                out.push(token.clone());
+                            }
+                        }
+                    }
+                    Some(a) => {
+                        out.push(token.clone());
+                        tail = Some(a);
+                    }
+                    None => {
+                        out.push(token.clone());
+                        continue;
+                    }
+                }
+            }
+            _ => {
+                tail = Some(token);
+            }
+        }
+        match &tail {
+            Some(TokenTree::Group(group)) => {
+                let new_body =
+                    filter_versions(config, tag, ver, group.stream().into_iter(), is_struct);
+                let mut stream = TokenStream::new();
+                stream.extend(new_body);
+                let mut filtered_group = Group::new(group.delimiter(), stream);
+                filtered_group.set_span(group.span());
+                out.push(TokenTree::Group(filtered_group));
+            }
+            Some(token) => {
+                out.push(token.clone());
+            }
+            None => {}
+        }
+    }
+
+    out
+}
+
+pub(crate) fn versions(attr: TokenStream, item: TokenStream) -> TokenStream {
+    let config = match attr.to_string().as_str() {
+        "AGX" => &AGX_VERSIONS,
+        _ => panic!("Unknown version group {}", attr),
+    };
+
+    let mut it = item.into_iter();
+    let mut out = TokenStream::new();
+    let mut body: Vec<TokenTree> = Vec::new();
+    let mut is_struct = false;
+
+    while let Some(token) = it.next() {
+        match token {
+            TokenTree::Punct(punct) if punct.to_string() == "#" => {
+                body.push(TokenTree::Punct(punct));
+                body.push(it.next().unwrap());
+            }
+            TokenTree::Ident(ident)
+                if ["struct", "enum", "union", "const", "type"]
+                    .contains(&ident.to_string().as_str()) =>
+            {
+                is_struct = ident.to_string() != "const";
+                body.push(TokenTree::Ident(ident));
+                body.push(it.next().unwrap());
+                // This isn't valid syntax in a struct definition, so add it for the user
+                body.push(TokenTree::Punct(Punct::new(':', Spacing::Joint)));
+                body.push(TokenTree::Punct(Punct::new(':', Spacing::Alone)));
+                body.push(TokenTree::Ident(Ident::new("ver", Span::call_site())));
+                break;
+            }
+            TokenTree::Ident(ident) if ident.to_string() == "impl" => {
+                body.push(TokenTree::Ident(ident));
+                break;
+            }
+            TokenTree::Ident(ident) if ident.to_string() == "fn" => {
+                body.push(TokenTree::Ident(ident));
+                break;
+            }
+            _ => {
+                body.push(token);
+            }
+        }
+    }
+
+    body.extend(it);
+
+    for ver in config.versions {
+        let tag = ver.join("");
+        let mut ver_num = Vec::<usize>::new();
+        for (i, comp) in ver.iter().enumerate() {
+            let idx = config.enums[i].iter().position(|&r| r == *comp).unwrap();
+            ver_num.push(idx);
+        }
+        out.extend(filter_versions(
+            config,
+            &tag,
+            &ver_num,
+            body.clone(),
+            is_struct,
+        ));
+    }
+
+    out
+}
diff --git a/rust/uapi/uapi_helper.h b/rust/uapi/uapi_helper.h
index 76d3f103e76499..4e0836b27f38de 100644
--- a/rust/uapi/uapi_helper.h
+++ b/rust/uapi/uapi_helper.h
@@ -7,6 +7,10 @@
  */
 
 #include <uapi/asm-generic/ioctl.h>
+#include <uapi/drm/asahi_drm.h>
+#include <uapi/drm/drm.h>
+#include <uapi/linux/elf.h>
+#include <uapi/linux/elf-em.h>
 #include <uapi/linux/mdio.h>
 #include <uapi/linux/mii.h>
 #include <uapi/linux/ethtool.h>
diff --git a/samples/rust/rust_minimal.rs b/samples/rust/rust_minimal.rs
index 4aaf117bf8e3c0..c04cc07b3249c0 100644
--- a/samples/rust/rust_minimal.rs
+++ b/samples/rust/rust_minimal.rs
@@ -7,9 +7,15 @@ use kernel::prelude::*;
 module! {
     type: RustMinimal,
     name: "rust_minimal",
-    author: "Rust for Linux Contributors",
+    authors: ["Rust for Linux Contributors"],
     description: "Rust minimal sample",
     license: "GPL",
+    params: {
+        test_parameter: i64 {
+            default: 1,
+            description: "This parameter has a default of 1",
+        },
+    },
 }
 
 struct RustMinimal {
@@ -20,6 +26,10 @@ impl kernel::Module for RustMinimal {
     fn init(_module: &'static ThisModule) -> Result<Self> {
         pr_info!("Rust minimal sample (init)\n");
         pr_info!("Am I built-in? {}\n", !cfg!(MODULE));
+        pr_info!(
+            "test_parameter: {}\n",
+            *module_parameters::test_parameter.get()
+        );
 
         let mut numbers = KVec::new();
         numbers.push(72, GFP_KERNEL)?;
diff --git a/samples/rust/rust_print_main.rs b/samples/rust/rust_print_main.rs
index 7e8af5f176a339..8ea95e8c2f3647 100644
--- a/samples/rust/rust_print_main.rs
+++ b/samples/rust/rust_print_main.rs
@@ -8,7 +8,7 @@ use kernel::prelude::*;
 module! {
     type: RustPrint,
     name: "rust_print",
-    author: "Rust for Linux Contributors",
+    authors: ["Rust for Linux Contributors"],
     description: "Rust printing macros sample",
     license: "GPL",
 }
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 993708d1187459..6528cdf0c11f8b 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -252,7 +252,7 @@ rust_common_cmd = \
 # would not match each other.
 
 quiet_cmd_rustc_o_rs = $(RUSTC_OR_CLIPPY_QUIET) $(quiet_modtag) $@
-      cmd_rustc_o_rs = $(rust_common_cmd) --emit=obj=$@ $< $(cmd_objtool)
+      cmd_rustc_o_rs = $(rust_common_cmd) --emit=obj=$@ $(abspath $<) $(cmd_objtool)
 
 define rule_rustc_o_rs
 	$(call cmd_and_fixdep,rustc_o_rs)
@@ -264,20 +264,20 @@ $(obj)/%.o: $(obj)/%.rs FORCE
 
 quiet_cmd_rustc_rsi_rs = $(RUSTC_OR_CLIPPY_QUIET) $(quiet_modtag) $@
       cmd_rustc_rsi_rs = \
-	$(rust_common_cmd) -Zunpretty=expanded $< >$@; \
+	$(rust_common_cmd) -Zunpretty=expanded $(abspath $<) >$@; \
 	command -v $(RUSTFMT) >/dev/null && $(RUSTFMT) $@
 
 $(obj)/%.rsi: $(obj)/%.rs FORCE
 	+$(call if_changed_dep,rustc_rsi_rs)
 
 quiet_cmd_rustc_s_rs = $(RUSTC_OR_CLIPPY_QUIET) $(quiet_modtag) $@
-      cmd_rustc_s_rs = $(rust_common_cmd) --emit=asm=$@ $<
+      cmd_rustc_s_rs = $(rust_common_cmd) --emit=asm=$@ $(abspath $<)
 
 $(obj)/%.s: $(obj)/%.rs FORCE
 	+$(call if_changed_dep,rustc_s_rs)
 
 quiet_cmd_rustc_ll_rs = $(RUSTC_OR_CLIPPY_QUIET) $(quiet_modtag) $@
-      cmd_rustc_ll_rs = $(rust_common_cmd) --emit=llvm-ir=$@ $<
+      cmd_rustc_ll_rs = $(rust_common_cmd) --emit=llvm-ir=$@ $(abspath $<)
 
 $(obj)/%.ll: $(obj)/%.rs FORCE
 	+$(call if_changed_dep,rustc_ll_rs)
diff --git a/scripts/dtc/data.c b/scripts/dtc/data.c
index 14734233ad8b7e..d12c1f0146bedf 100644
--- a/scripts/dtc/data.c
+++ b/scripts/dtc/data.c
@@ -184,6 +184,33 @@ struct data data_append_integer(struct data d, uint64_t value, int bits)
 	}
 }
 
+struct data data_append_float(struct data d, double value, int bits)
+{
+	float f32;
+	uint32_t u32;
+	double f64;
+	uint64_t u64;
+	fdt32_t value_32;
+	fdt64_t value_64;
+
+	switch (bits) {
+	case 32:
+		f32 = value;
+		memcpy(&u32, &f32, sizeof(u32));
+		value_32 = cpu_to_fdt32(u32);
+		return data_append_data(d, &value_32, 4);
+
+	case 64:
+		f64 = value;
+		memcpy(&u64, &f64, sizeof(u64));
+		value_64 = cpu_to_fdt64(u64);
+		return data_append_data(d, &value_64, 8);
+
+	default:
+		die("Invalid literal size (%d)\n", bits);
+	}
+}
+
 struct data data_append_re(struct data d, uint64_t address, uint64_t size)
 {
 	struct fdt_reserve_entry re;
diff --git a/scripts/dtc/dtc-lexer.l b/scripts/dtc/dtc-lexer.l
index de60a70b6bdbcb..ac0fadff20802d 100644
--- a/scripts/dtc/dtc-lexer.l
+++ b/scripts/dtc/dtc-lexer.l
@@ -151,6 +151,28 @@ static void PRINTF(1, 2) lexical_error(const char *fmt, ...);
 			return DT_LABEL;
 		}
 
+<V1>[-+]?(([0-9]+\.[0-9]*)|([0-9]*\.[0-9]+))(e[-+]?[0-9]+)?f? {
+			char *e;
+			DPRINT("Floating-point Literal: '%s'\n", yytext);
+
+			errno = 0;
+			yylval.floating = strtod(yytext, &e);
+
+			if (*e && (*e != 'f' || e[1])) {
+				lexical_error("Bad floating-point literal '%s'",
+					      yytext);
+			}
+
+			if (errno == ERANGE)
+				lexical_error("Floating-point literal '%s' out of range",
+					      yytext);
+			else
+				/* ERANGE is the only strtod error triggerable
+				 *  by strings matching the pattern */
+				assert(errno == 0);
+			return DT_FP_LITERAL;
+		}
+
 <V1>([0-9]+|0[xX][0-9a-fA-F]+)(U|L|UL|LL|ULL)? {
 			char *e;
 			DPRINT("Integer Literal: '%s'\n", yytext);
diff --git a/scripts/dtc/dtc-parser.y b/scripts/dtc/dtc-parser.y
index 4d5eece5262434..225a6b41b14fcf 100644
--- a/scripts/dtc/dtc-parser.y
+++ b/scripts/dtc/dtc-parser.y
@@ -48,6 +48,7 @@ static bool is_ref_relative(const char *ref)
 	struct node *nodelist;
 	struct reserve_info *re;
 	uint64_t integer;
+	double floating;
 	unsigned int flags;
 }
 
@@ -61,6 +62,7 @@ static bool is_ref_relative(const char *ref)
 %token DT_OMIT_NO_REF
 %token <propnodename> DT_PROPNODENAME
 %token <integer> DT_LITERAL
+%token <floating> DT_FP_LITERAL
 %token <integer> DT_CHAR_LITERAL
 %token <byte> DT_BYTE
 %token <data> DT_STRING
@@ -86,6 +88,7 @@ static bool is_ref_relative(const char *ref)
 %type <node> subnode
 %type <nodelist> subnodes
 
+%type <floating> floating_prim
 %type <integer> integer_prim
 %type <integer> integer_unary
 %type <integer> integer_mul
@@ -395,6 +398,15 @@ arrayprefix:
 			$$.data = data_add_marker(empty_data, TYPE_UINT32, NULL);
 			$$.bits = 32;
 		}
+	| arrayprefix floating_prim
+		{
+			if ($1.bits < 32) {
+				ERROR(&@2, "Floating-point values must be"
+				      " 32-bit or 64-bit");
+			}
+
+			$$.data = data_append_float($1.data, $2, $1.bits);
+		}
 	| arrayprefix integer_prim
 		{
 			if ($1.bits < 64) {
@@ -439,6 +451,10 @@ arrayprefix:
 		}
 	;
 
+floating_prim:
+	DT_FP_LITERAL
+	;
+
 integer_prim:
 	  DT_LITERAL
 	| DT_CHAR_LITERAL
diff --git a/scripts/dtc/dtc.h b/scripts/dtc/dtc.h
index 4c4aaca1fc417c..8561e71ae45a74 100644
--- a/scripts/dtc/dtc.h
+++ b/scripts/dtc/dtc.h
@@ -177,6 +177,7 @@ struct data data_insert_at_marker(struct data d, struct marker *m,
 struct data data_merge(struct data d1, struct data d2);
 struct data data_append_cell(struct data d, cell_t word);
 struct data data_append_integer(struct data d, uint64_t word, int bits);
+struct data data_append_float(struct data d, double value, int bits);
 struct data data_append_re(struct data d, uint64_t address, uint64_t size);
 struct data data_append_addr(struct data d, uint64_t addr);
 struct data data_append_byte(struct data d, uint8_t byte);
diff --git a/sound/core/control.c b/sound/core/control.c
index 0ddade871b524a..f4b4e902e027ea 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -123,10 +123,12 @@ static int snd_ctl_release(struct inode *inode, struct file *file)
 	scoped_guard(rwsem_write, &card->controls_rwsem) {
 		list_for_each_entry(control, &card->controls, list)
 			for (idx = 0; idx < control->count; idx++)
-				if (control->vd[idx].owner == ctl)
+				if (control->vd[idx].owner == ctl) {
 					control->vd[idx].owner = NULL;
+					if (control->unlock)
+						control->unlock(control);
+				}
 	}
-
 	snd_fasync_free(ctl->fasync);
 	snd_ctl_empty_read_queue(ctl);
 	put_pid(ctl->pid);
@@ -303,6 +305,8 @@ struct snd_kcontrol *snd_ctl_new1(const struct snd_kcontrol_new *ncontrol,
 	kctl->info = ncontrol->info;
 	kctl->get = ncontrol->get;
 	kctl->put = ncontrol->put;
+	kctl->lock = ncontrol->lock;
+	kctl->unlock = ncontrol->unlock;
 	kctl->tlv.p = ncontrol->tlv.p;
 
 	kctl->private_value = ncontrol->private_value;
@@ -1359,6 +1363,12 @@ static int snd_ctl_elem_lock(struct snd_ctl_file *file,
 	vd = &kctl->vd[snd_ctl_get_ioff(kctl, &id)];
 	if (vd->owner)
 		return -EBUSY;
+
+	if (kctl->lock) {
+		int err = kctl->lock(kctl, file);
+		if (err < 0)
+			return err;
+	}
 	vd->owner = file;
 	return 0;
 }
@@ -1383,6 +1393,8 @@ static int snd_ctl_elem_unlock(struct snd_ctl_file *file,
 	if (vd->owner != file)
 		return -EPERM;
 	vd->owner = NULL;
+	if (kctl->unlock)
+		kctl->unlock(kctl);
 	return 0;
 }
 
diff --git a/sound/core/pcm_dmaengine.c b/sound/core/pcm_dmaengine.c
index b134a51b3fd587..8a53bb89095fbc 100644
--- a/sound/core/pcm_dmaengine.c
+++ b/sound/core/pcm_dmaengine.c
@@ -22,6 +22,8 @@
 struct dmaengine_pcm_runtime_data {
 	struct dma_chan *dma_chan;
 	dma_cookie_t cookie;
+	struct work_struct complete_wq; /* for nonatomic PCM */
+	struct snd_pcm_substream *substream;
 
 	unsigned int pos;
 };
@@ -145,6 +147,21 @@ static void dmaengine_pcm_dma_complete(void *arg)
 	snd_pcm_period_elapsed(substream);
 }
 
+static void dmaengine_pcm_dma_complete_nonatomic(struct work_struct *wq)
+{
+	struct dmaengine_pcm_runtime_data *prtd = \
+				container_of(wq, struct dmaengine_pcm_runtime_data, complete_wq);
+	struct snd_pcm_substream *substream = prtd->substream;
+	dmaengine_pcm_dma_complete(substream);
+}
+
+static void dmaengine_pcm_dma_complete_nonatomic_callback(void *arg)
+{
+	struct snd_pcm_substream *substream = arg;
+	struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+	schedule_work(&prtd->complete_wq);
+}
+
 static int dmaengine_pcm_prepare_and_submit(struct snd_pcm_substream *substream)
 {
 	struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
@@ -167,7 +184,11 @@ static int dmaengine_pcm_prepare_and_submit(struct snd_pcm_substream *substream)
 	if (!desc)
 		return -ENOMEM;
 
-	desc->callback = dmaengine_pcm_dma_complete;
+	if (substream->pcm->nonatomic)
+		desc->callback = dmaengine_pcm_dma_complete_nonatomic_callback;
+	else
+		desc->callback = dmaengine_pcm_dma_complete;
+
 	desc->callback_param = substream;
 	prtd->cookie = dmaengine_submit(desc);
 
@@ -320,6 +341,10 @@ int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream,
 	if (!prtd)
 		return -ENOMEM;
 
+	if (substream->pcm->nonatomic)
+		INIT_WORK(&prtd->complete_wq, dmaengine_pcm_dma_complete_nonatomic);
+
+	prtd->substream = substream;
 	prtd->dma_chan = chan;
 
 	substream->runtime->private_data = prtd;
@@ -374,7 +399,14 @@ static void __snd_dmaengine_pcm_close(struct snd_pcm_substream *substream,
 	if (status == DMA_PAUSED)
 		dmaengine_terminate_async(prtd->dma_chan);
 
+	/*
+	 * The PCM might have been closed while suspended, which would
+	 * skip the STOP trigger. Make sure we terminate.
+	 */
+	dmaengine_terminate_async(prtd->dma_chan);
 	dmaengine_synchronize(prtd->dma_chan);
+	if (substream->pcm->nonatomic)
+		flush_work(&prtd->complete_wq);
 	if (release_channel)
 		dma_release_channel(prtd->dma_chan);
 	kfree(prtd);
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index 6eaa950504cfc0..d0df847152f0dd 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -1149,6 +1149,43 @@ static int snd_interval_step(struct snd_interval *i, unsigned int step)
 	return changed;
 }
 
+/**
+ * snd_interval_rate_bits - refine the rate interval from a rate bitmask
+ * @i: the rate interval to refine
+ * @mask: the rate bitmask
+ *
+ * Refines the interval value, assumed to be the sample rate, according to
+ * a bitmask of available rates (an ORed combination of SNDRV_PCM_RATE_*).
+ *
+ * Return: Positive if the value is changed, zero if it's not changed, or a
+ * negative error code.
+ */
+int snd_interval_rate_bits(struct snd_interval *i, unsigned int mask)
+{
+	unsigned int k;
+	struct snd_interval mask_range;
+
+	if (!mask)
+		return -EINVAL;
+
+	snd_interval_any(&mask_range);
+	mask_range.min = UINT_MAX;
+	mask_range.max = 0;
+	for (k = 0; k < snd_pcm_known_rates.count; k++) {
+		unsigned int rate = snd_pcm_known_rates.list[k];
+		if (!(mask & (1 << k)))
+			continue;
+
+		if (rate > mask_range.max)
+			mask_range.max = rate;
+
+		if (rate < mask_range.min)
+			mask_range.min = rate;
+	}
+	return snd_interval_refine(i, &mask_range);
+}
+EXPORT_SYMBOL(snd_interval_rate_bits);
+
 /* Info constraints helpers */
 
 /**
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 6c2b6a62d9d2f8..c76f8ee339c87c 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -922,8 +922,9 @@ static int snd_pcm_hw_free(struct snd_pcm_substream *substream)
 		goto unlock;
 	result = do_hw_free(substream);
 	snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
-	cpu_latency_qos_remove_request(&substream->latency_pm_qos_req);
- unlock:
+	if (cpu_latency_qos_request_active(&substream->latency_pm_qos_req))
+		cpu_latency_qos_remove_request(&substream->latency_pm_qos_req);
+unlock:
 	snd_pcm_buffer_access_unlock(runtime);
 	return result;
 }
@@ -2435,6 +2436,7 @@ const struct snd_pcm_hw_constraint_list snd_pcm_known_rates = {
 	.count = ARRAY_SIZE(rates),
 	.list = rates,
 };
+EXPORT_SYMBOL_GPL(snd_pcm_known_rates);
 
 static int snd_pcm_hw_rule_rate(struct snd_pcm_hw_params *params,
 				struct snd_pcm_hw_rule *rule)
diff --git a/sound/soc/apple/Kconfig b/sound/soc/apple/Kconfig
index 793f7782e0d721..3efc11602c3121 100644
--- a/sound/soc/apple/Kconfig
+++ b/sound/soc/apple/Kconfig
@@ -1,3 +1,15 @@
+config SND_SOC_APPLE_AOP_AUDIO
+	tristate "AOP audio driver"
+	depends on ARCH_APPLE || COMPILE_TEST
+	depends on RUST
+	select APPLE_AOP
+	select SND_DMAENGINE_PCM
+	default m if ARCH_APPLE
+	help
+	  This option enables an ASoC driver for sound devices connected to the AOP
+	  co-processor on ARM Macs. This includes the built-in microphone on those
+	  machines.
+
 config SND_SOC_APPLE_MCA
 	tristate "Apple Silicon MCA driver"
 	depends on ARCH_APPLE || COMPILE_TEST
@@ -6,3 +18,21 @@ config SND_SOC_APPLE_MCA
 	help
 	  This option enables an ASoC platform driver for MCA peripherals found
 	  on Apple Silicon SoCs.
+
+config SND_SOC_APPLE_MACAUDIO
+	tristate "Sound support for Apple Silicon Macs"
+	depends on ARCH_APPLE || COMPILE_TEST
+	select SND_SOC_APPLE_MCA
+	select SND_SIMPLE_CARD_UTILS
+	select APPLE_ADMAC if DMADEVICES
+	select COMMON_CLK_APPLE_NCO if COMMON_CLK
+	select SND_SOC_TAS2764 if I2C
+	select SND_SOC_TAS2770 if I2C
+	select SND_SOC_CS42L83 if I2C
+	select SND_SOC_CS42L84 if I2C
+	select REGULATOR_FIXED_VOLTAGE if REGULATOR
+	default ARCH_APPLE
+	help
+	  This option enables an ASoC machine-level driver for Apple Silicon Macs
+	  and it also enables the required SoC and codec drivers for overall
+	  sound support on these machines.
diff --git a/sound/soc/apple/Makefile b/sound/soc/apple/Makefile
index 1eb8fbef60c617..7d4901f407014d 100644
--- a/sound/soc/apple/Makefile
+++ b/sound/soc/apple/Makefile
@@ -1,3 +1,10 @@
+snd-soc-aop-y		:= aop_audio.o
+obj-$(CONFIG_SND_SOC_APPLE_AOP_AUDIO)	+= snd-soc-aop.o
+
 snd-soc-apple-mca-y	:= mca.o
 
 obj-$(CONFIG_SND_SOC_APPLE_MCA)	+= snd-soc-apple-mca.o
+
+snd-soc-macaudio-objs	:= macaudio.o
+
+obj-$(CONFIG_SND_SOC_APPLE_MACAUDIO)	+= snd-soc-macaudio.o
diff --git a/sound/soc/apple/aop_audio.rs b/sound/soc/apple/aop_audio.rs
new file mode 100644
index 00000000000000..dda95d1f5cff99
--- /dev/null
+++ b/sound/soc/apple/aop_audio.rs
@@ -0,0 +1,703 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+#![recursion_limit = "2048"]
+
+//! Apple AOP audio driver
+//!
+//! Copyright (C) The Asahi Linux Contributors
+
+use core::sync::atomic::{AtomicU32, Ordering};
+use core::{mem, ptr, slice};
+
+use kernel::{
+    bindings, c_str, device,
+    error::from_err_ptr,
+    init::Zeroable,
+    module_platform_driver,
+    of::{self, Node},
+    platform,
+    prelude::*,
+    soc::apple::aop::{from_fourcc, EPICService, AOP},
+    sync::Arc,
+    types::{ARef, ForeignOwnable},
+};
+
+const EPIC_SUBTYPE_WRAPPED_CALL: u16 = 0x20;
+const CALLTYPE_AUDIO_ATTACH_DEVICE: u32 = 0xc3000002;
+const CALLTYPE_AUDIO_SET_PROP: u32 = 0xc3000005;
+const PDM_NUM_COEFFS: usize = 120;
+const DECIMATION_RATIOS: [u8; 3] = [0xf, 5, 2];
+const COEFFICIENTS: [u8; PDM_NUM_COEFFS * mem::size_of::<u32>()] = [
+    0x88, 0x03, 0x00, 0x00, 0x82, 0x08, 0x00, 0x00, 0x51, 0x12, 0x00, 0x00, 0x0a, 0x23, 0x00, 0x00,
+    0xce, 0x3d, 0x00, 0x00, 0x97, 0x66, 0x00, 0x00, 0x43, 0xa2, 0x00, 0x00, 0x9c, 0xf6, 0x00, 0x00,
+    0x53, 0x6a, 0x01, 0x00, 0xe6, 0x04, 0x02, 0x00, 0x7e, 0xce, 0x02, 0x00, 0xae, 0xcf, 0x03, 0x00,
+    0x2e, 0x11, 0x05, 0x00, 0x7d, 0x9b, 0x06, 0x00, 0x75, 0x76, 0x08, 0x00, 0xd8, 0xa8, 0x0a, 0x00,
+    0xd2, 0x37, 0x0d, 0x00, 0x82, 0x26, 0x10, 0x00, 0x86, 0x75, 0x13, 0x00, 0x97, 0x22, 0x17, 0x00,
+    0x39, 0x28, 0x1b, 0x00, 0x89, 0x7d, 0x1f, 0x00, 0x2e, 0x16, 0x24, 0x00, 0x69, 0xe2, 0x28, 0x00,
+    0x56, 0xcf, 0x2d, 0x00, 0x51, 0xc7, 0x32, 0x00, 0x80, 0xb2, 0x37, 0x00, 0x87, 0x77, 0x3c, 0x00,
+    0x4c, 0xfc, 0x40, 0x00, 0xd9, 0x26, 0x45, 0x00, 0x47, 0xde, 0x48, 0x00, 0xa0, 0x0b, 0x4c, 0x00,
+    0xc1, 0x9a, 0x4e, 0x00, 0x1f, 0x7b, 0x50, 0x00, 0x68, 0xa0, 0x51, 0x00, 0x06, 0x03, 0x52, 0x00,
+    0x4a, 0x25, 0x00, 0x00, 0x4c, 0xaf, 0x00, 0x00, 0xc0, 0x07, 0x02, 0x00, 0x45, 0x99, 0x04, 0x00,
+    0x9a, 0x84, 0x08, 0x00, 0x7d, 0x38, 0x0d, 0x00, 0x5f, 0x1a, 0x11, 0x00, 0xd9, 0x81, 0x11, 0x00,
+    0x80, 0x44, 0x0b, 0x00, 0x8e, 0xe5, 0xfb, 0xff, 0xca, 0x32, 0xe3, 0xff, 0x52, 0xc7, 0xc4, 0xff,
+    0xa6, 0xbc, 0xa8, 0xff, 0x83, 0xe6, 0x9a, 0xff, 0xb8, 0x5b, 0xa8, 0xff, 0x6b, 0xae, 0xdb, 0xff,
+    0xe7, 0xd8, 0x38, 0x00, 0x24, 0x42, 0xba, 0x00, 0x33, 0x20, 0x50, 0x01, 0x6e, 0xdc, 0xe2, 0x01,
+    0x42, 0x23, 0x58, 0x02, 0x2c, 0x50, 0x99, 0x02, 0xcf, 0xfa, 0xff, 0xff, 0x53, 0x0a, 0xff, 0xff,
+    0x66, 0x23, 0xfb, 0xff, 0xa0, 0x3e, 0xf4, 0xff, 0xe6, 0x68, 0xf0, 0xff, 0xb8, 0x35, 0xf7, 0xff,
+    0x56, 0xec, 0x04, 0x00, 0x37, 0xa3, 0x09, 0x00, 0x00, 0xd4, 0xfe, 0xff, 0x78, 0xa3, 0xf5, 0xff,
+    0x03, 0xbf, 0xfe, 0xff, 0x84, 0xd5, 0x0b, 0x00, 0xbe, 0x0b, 0x04, 0x00, 0x52, 0x54, 0xf2, 0xff,
+    0x6d, 0x3f, 0xf8, 0xff, 0xc5, 0x7f, 0x0f, 0x00, 0xe6, 0x9e, 0x0c, 0x00, 0x79, 0x03, 0xef, 0xff,
+    0xd5, 0x33, 0xed, 0xff, 0xec, 0xd1, 0x11, 0x00, 0x7d, 0x69, 0x1a, 0x00, 0xd6, 0x55, 0xee, 0xff,
+    0x88, 0x66, 0xdc, 0xff, 0x57, 0x26, 0x10, 0x00, 0xc7, 0x8d, 0x2e, 0x00, 0x82, 0x2e, 0xf3, 0xff,
+    0x63, 0x69, 0xc4, 0xff, 0xcd, 0x08, 0x07, 0x00, 0x35, 0x34, 0x4b, 0x00, 0xaf, 0x21, 0x02, 0x00,
+    0x83, 0xb6, 0xa1, 0xff, 0xe2, 0xd5, 0xef, 0xff, 0x94, 0x9b, 0x76, 0x00, 0xf3, 0xd7, 0x25, 0x00,
+    0xff, 0xfc, 0x67, 0xff, 0xe3, 0xac, 0xb6, 0xff, 0x52, 0x1b, 0xcc, 0x00, 0x3c, 0x8a, 0x8b, 0x00,
+    0x9f, 0x0c, 0xcd, 0xfe, 0x5c, 0x68, 0xcc, 0xfe, 0x4d, 0xc5, 0x98, 0x02, 0x82, 0xcf, 0xfb, 0x06,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+];
+const FILTER_LENGTHS: u32 = 0x542c47;
+const AUDIO_DEV_PDM0: u32 = from_fourcc(b"pdm0");
+const AUDIO_DEV_LPAI: u32 = from_fourcc(b"lpai");
+const AUDIO_DEV_HPAI: u32 = from_fourcc(b"hpai");
+const POWER_STATE_OFF: u32 = from_fourcc(b"idle");
+const POWER_STATE_IDLE: u32 = from_fourcc(b"pw1 ");
+const POWER_STATE_ON: u32 = from_fourcc(b"pwrd");
+
+#[repr(C, packed)]
+#[derive(Clone, Copy, Default)]
+struct AudioAttachDevice {
+    _zero0: u32,
+    unk0: u32,
+    calltype: u32,
+    _zero1: u64,
+    _zero2: u64,
+    _pad0: u32,
+    len: u64,
+    dev_id: u32,
+    _pad1: u32,
+}
+
+impl AudioAttachDevice {
+    fn new(dev_id: u32) -> AudioAttachDevice {
+        AudioAttachDevice {
+            unk0: 0xFFFFFFFF,
+            calltype: CALLTYPE_AUDIO_ATTACH_DEVICE,
+            dev_id,
+            len: 0x2c,
+            ..AudioAttachDevice::default()
+        }
+    }
+}
+
+#[repr(C, packed)]
+#[derive(Clone, Copy, Default)]
+struct LpaiChannelConfig {
+    unk1: u32,
+    unk2: u32,
+    unk3: u32,
+    unk4: u32,
+}
+
+#[repr(C, packed)]
+#[derive(Debug, Copy, Clone)]
+struct PDMConfig {
+    bytes_per_sample: u32,
+    clock_source: u32,
+    pdm_frequency: u32,
+    pdmc_frequency: u32,
+    slow_clock_speed: u32,
+    fast_clock_speed: u32,
+    channel_polarity_select: u32,
+    channel_phase_select: u32,
+    unk1: u32,
+    unk2: u16,
+    ratio1: u8,
+    ratio2: u8,
+    ratio3: u8,
+    _pad0: u8,
+    filter_lengths: u32,
+    coeff_bulk: u32,
+    coeffs: [u8; PDM_NUM_COEFFS * mem::size_of::<u32>()],
+    unk3: u32,
+    mic_turn_on_time_ms: u32,
+    _zero0: u64,
+    _zero1: u64,
+    unk4: u32,
+    mic_settle_time_ms: u32,
+    _zero2: [u8; 69], // ?????
+}
+
+unsafe impl Zeroable for PDMConfig {}
+
+#[repr(C, packed)]
+#[derive(Debug, Copy, Clone)]
+struct DecimatorConfig {
+    latency: u32,
+    ratio1: u8,
+    ratio2: u8,
+    ratio3: u8,
+    _pad0: u8,
+    filter_lengths: u32,
+    coeff_bulk: u32,
+    coeffs: [u8; PDM_NUM_COEFFS * mem::size_of::<u32>()],
+}
+
+unsafe impl Zeroable for DecimatorConfig {}
+
+#[repr(C, packed)]
+#[derive(Clone, Copy, Default, Debug)]
+struct PowerSetting {
+    dev_id: u32,
+    cookie: u32,
+    _unk0: u32,
+    _zero0: u64,
+    target_pstate: u32,
+    unk1: u32,
+    _zero1: [u8; 20],
+}
+
+impl PowerSetting {
+    fn new(dev_id: u32, cookie: u32, target_pstate: u32, unk1: u32) -> PowerSetting {
+        PowerSetting {
+            dev_id,
+            cookie,
+            target_pstate,
+            unk1,
+            ..PowerSetting::default()
+        }
+    }
+}
+
+#[repr(C, packed)]
+#[derive(Clone, Copy, Default, Debug)]
+struct AudioSetDeviceProp<T> {
+    _zero0: u32,
+    unk0: u32,
+    calltype: u32,
+    _zero1: u64,
+    _zero2: u64,
+    _pad0: u32,
+    len: u64,
+    dev_id: u32,
+    modifier: u32,
+    len2: u32,
+    data: T,
+}
+
+impl<T: Default> AudioSetDeviceProp<T> {
+    fn new(dev_id: u32, modifier: u32, data: T) -> AudioSetDeviceProp<T> {
+        AudioSetDeviceProp {
+            unk0: 0xFFFFFFFF,
+            calltype: CALLTYPE_AUDIO_SET_PROP,
+            dev_id,
+            modifier,
+            len: mem::size_of::<T>() as u64 + 0x30,
+            len2: mem::size_of::<T>() as u32,
+            data,
+            ..AudioSetDeviceProp::default()
+        }
+    }
+}
+
+unsafe impl<T> Zeroable for AudioSetDeviceProp<T> {}
+
+impl<T: Zeroable> AudioSetDeviceProp<T> {
+    fn try_init<E>(
+        dev_id: u32,
+        modifier: u32,
+        data: impl Init<T, E>,
+    ) -> impl Init<AudioSetDeviceProp<T>, Error>
+    where
+        Error: From<E>,
+    {
+        try_init!(
+            AudioSetDeviceProp {
+                unk0: 0xFFFFFFFF,
+                calltype: CALLTYPE_AUDIO_SET_PROP,
+                dev_id,
+                modifier,
+                len: mem::size_of::<T>() as u64 + 0x30,
+                len2: mem::size_of::<T>() as u32,
+                data <- data,
+                ..Zeroable::zeroed()
+            }
+        )
+    }
+}
+
+struct SndSocAopData {
+    dev: ARef<device::Device>,
+    adata: Arc<dyn AOP>,
+    service: EPICService,
+    pstate_cookie: AtomicU32,
+    of: Node,
+}
+
+impl SndSocAopData {
+    fn new(
+        dev: ARef<device::Device>,
+        adata: Arc<dyn AOP>,
+        service: EPICService,
+        of: Node,
+    ) -> Result<Arc<SndSocAopData>> {
+        Ok(Arc::new(
+            SndSocAopData {
+                dev,
+                adata,
+                service,
+                of,
+                pstate_cookie: AtomicU32::new(1),
+            },
+            GFP_KERNEL,
+        )?)
+    }
+    fn set_pdm_config(&self) -> Result<()> {
+        let pdm_cfg = init!(PDMConfig {
+            bytes_per_sample: 2,
+            clock_source: 0x706c6c20, // 'pll '
+            pdm_frequency: 2400000,
+            pdmc_frequency: 24000000,
+            slow_clock_speed: 24000000,
+            fast_clock_speed: 24000000,
+            channel_polarity_select: 256,
+            channel_phase_select: 0,
+            unk1: 0xf7600,
+            unk2: 0,
+            ratio1: DECIMATION_RATIOS[0],
+            ratio2: DECIMATION_RATIOS[1],
+            ratio3: DECIMATION_RATIOS[2],
+            filter_lengths: FILTER_LENGTHS,
+            coeff_bulk: PDM_NUM_COEFFS as u32,
+            coeffs: COEFFICIENTS,
+            unk3: 1,
+            mic_turn_on_time_ms: 20,
+            unk4: 1,
+            mic_settle_time_ms: 50,
+            ..Zeroable::zeroed()
+        });
+        let set_prop = AudioSetDeviceProp::<PDMConfig>::try_init(AUDIO_DEV_PDM0, 200, pdm_cfg);
+        let msg = KBox::try_init(set_prop, GFP_KERNEL)?;
+        let ret = self.epic_wrapped_call(msg.as_ref())?;
+        if ret != 0 {
+            dev_err!(self.dev, "Unable to set pdm config, return code {}", ret);
+            return Err(EIO);
+        } else {
+            Ok(())
+        }
+    }
+    fn set_decimator_config(&self) -> Result<()> {
+        let pdm_cfg = init!(DecimatorConfig {
+            latency: 15,
+            ratio1: DECIMATION_RATIOS[0],
+            ratio2: DECIMATION_RATIOS[1],
+            ratio3: DECIMATION_RATIOS[2],
+            filter_lengths: FILTER_LENGTHS,
+            coeff_bulk: PDM_NUM_COEFFS as u32,
+            coeffs: COEFFICIENTS,
+            ..Zeroable::zeroed()
+        });
+        let set_prop =
+            AudioSetDeviceProp::<DecimatorConfig>::try_init(AUDIO_DEV_PDM0, 210, pdm_cfg);
+        let msg = KBox::try_init(set_prop, GFP_KERNEL)?;
+        let ret = self.epic_wrapped_call(msg.as_ref())?;
+        if ret != 0 {
+            dev_err!(
+                self.dev,
+                "Unable to set decimator config, return code {}",
+                ret
+            );
+            return Err(EIO);
+        } else {
+            Ok(())
+        }
+    }
+    fn set_lpai_channel_cfg(&self) -> Result<()> {
+        let cfg = LpaiChannelConfig {
+            unk1: 7,
+            unk2: 7,
+            unk3: 1,
+            unk4: 7,
+        };
+        let msg = AudioSetDeviceProp::new(AUDIO_DEV_LPAI, 301, cfg);
+        let ret = self.epic_wrapped_call(&msg)?;
+        if ret != 0 {
+            dev_err!(
+                self.dev,
+                "Unable to set lpai channel config, return code {}",
+                ret
+            );
+            return Err(EIO);
+        } else {
+            Ok(())
+        }
+    }
+    fn audio_attach_device(&self, dev_id: u32) -> Result<()> {
+        let msg = AudioAttachDevice::new(dev_id);
+        let ret = self.epic_wrapped_call(&msg)?;
+        if ret != 0 {
+            dev_err!(
+                self.dev,
+                "Unable to attach device {:?}, return code {}",
+                dev_id,
+                ret
+            );
+            return Err(EIO);
+        } else {
+            Ok(())
+        }
+    }
+    fn set_audio_power(&self, pstate: u32, unk1: u32) -> Result<()> {
+        let set_pstate = PowerSetting::new(
+            AUDIO_DEV_HPAI,
+            self.pstate_cookie.fetch_add(1, Ordering::Relaxed),
+            pstate,
+            unk1,
+        );
+        let msg = AudioSetDeviceProp::new(AUDIO_DEV_HPAI, 202, set_pstate);
+        let ret = self.epic_wrapped_call(&msg)?;
+        if ret != 0 {
+            dev_err!(
+                self.dev,
+                "Unable to set power state {:?}, return code {}",
+                pstate,
+                ret
+            );
+            return Err(EIO);
+        } else {
+            Ok(())
+        }
+    }
+    fn epic_wrapped_call<T>(&self, data: &T) -> Result<u32> {
+        let msg_bytes =
+            unsafe { slice::from_raw_parts(data as *const T as *const u8, mem::size_of::<T>()) };
+        self.adata
+            .epic_call(&self.service, EPIC_SUBTYPE_WRAPPED_CALL, msg_bytes)
+    }
+    fn request_dma_channel(&self) -> Result<*mut bindings::dma_chan> {
+        let res = unsafe {
+            from_err_ptr(bindings::of_dma_request_slave_channel(
+                self.of.as_raw(),
+                c_str!("dma").as_ptr() as _,
+            ))
+        };
+        if res.is_err() {
+            dev_err!(self.dev, "Unable to get dma channel");
+        }
+        res
+    }
+}
+
+#[repr(transparent)]
+struct SndSocAopDriver(*mut bindings::snd_card);
+
+fn copy_str(target: &mut [u8], source: &[u8]) {
+    for i in 0..source.len() {
+        target[i] = source[i];
+    }
+}
+
+unsafe fn dmaengine_slave_config(
+    chan: *mut bindings::dma_chan,
+    config: *mut bindings::dma_slave_config,
+) -> i32 {
+    unsafe {
+        match (*(*chan).device).device_config {
+            Some(dc) => dc(chan, config),
+            None => ENOSYS.to_errno(),
+        }
+    }
+}
+
+unsafe extern "C" fn aop_hw_params(
+    substream: *mut bindings::snd_pcm_substream,
+    params: *mut bindings::snd_pcm_hw_params,
+) -> i32 {
+    let chan = unsafe { bindings::snd_dmaengine_pcm_get_chan(substream) };
+    let mut slave_config = bindings::dma_slave_config::default();
+    let ret =
+        unsafe { bindings::snd_hwparams_to_dma_slave_config(substream, params, &mut slave_config) };
+    if ret < 0 {
+        return ret;
+    }
+    slave_config.src_port_window_size = 4;
+    unsafe { dmaengine_slave_config(chan, &mut slave_config) }
+}
+
+unsafe extern "C" fn aop_pcm_open(substream: *mut bindings::snd_pcm_substream) -> i32 {
+    let data = unsafe { Arc::<SndSocAopData>::borrow((*substream).private_data) };
+    if let Err(e) = data.set_audio_power(POWER_STATE_IDLE, 0) {
+        dev_err!(data.dev, "Unable to enter 'pw1 ' state");
+        return e.to_errno();
+    }
+    let mut hwparams = bindings::snd_pcm_hardware {
+        info: bindings::SNDRV_PCM_INFO_MMAP
+            | bindings::SNDRV_PCM_INFO_MMAP_VALID
+            | bindings::SNDRV_PCM_INFO_INTERLEAVED,
+        formats: bindings::BINDINGS_SNDRV_PCM_FMTBIT_FLOAT_LE,
+        subformats: 0,
+        rates: bindings::SNDRV_PCM_RATE_48000,
+        rate_min: 48000,
+        rate_max: 48000,
+        channels_min: 3,
+        channels_max: 3,
+        periods_min: 2,
+        buffer_bytes_max: usize::MAX,
+        period_bytes_max: 0x4000,
+        periods_max: u32::MAX,
+        period_bytes_min: 256,
+        fifo_size: 16,
+    };
+    let dma_chan = match data.request_dma_channel() {
+        Ok(dc) => dc,
+        Err(e) => return e.to_errno(),
+    };
+    let ret = unsafe {
+        let mut dai_data = bindings::snd_dmaengine_dai_dma_data::default();
+        bindings::snd_dmaengine_pcm_refine_runtime_hwparams(
+            substream,
+            &mut dai_data,
+            &mut hwparams,
+            dma_chan,
+        )
+    };
+    if ret != 0 {
+        dev_err!(data.dev, "Unable to refine hwparams");
+        return ret;
+    }
+    if let Err(e) = data.set_audio_power(POWER_STATE_ON, 1) {
+        dev_err!(data.dev, "Unable to power mic on");
+        return e.to_errno();
+    }
+    unsafe {
+        (*(*substream).runtime).hw = hwparams;
+        bindings::snd_dmaengine_pcm_open(substream, dma_chan)
+    }
+}
+
+unsafe extern "C" fn aop_pcm_prepare(_: *mut bindings::snd_pcm_substream) -> i32 {
+    0
+}
+
+unsafe extern "C" fn aop_pcm_close(substream: *mut bindings::snd_pcm_substream) -> i32 {
+    let data = unsafe { Arc::<SndSocAopData>::borrow((*substream).private_data) };
+    if let Err(e) = data.set_audio_power(POWER_STATE_IDLE, 1) {
+        dev_err!(data.dev, "Unable to power mic off");
+        return e.to_errno();
+    }
+    let ret = unsafe { bindings::snd_dmaengine_pcm_close_release_chan(substream) };
+    if ret != 0 {
+        dev_err!(data.dev, "Unable to close channel");
+        return ret;
+    }
+    if let Err(e) = data.set_audio_power(POWER_STATE_OFF, 0) {
+        dev_err!(data.dev, "Unable to enter 'idle' power state");
+        return e.to_errno();
+    }
+    0
+}
+
+unsafe extern "C" fn aop_pcm_free_private(pcm: *mut bindings::snd_pcm) {
+    unsafe {
+        Arc::<SndSocAopData>::from_foreign((*pcm).private_data);
+    }
+}
+
+impl SndSocAopDriver {
+    const VTABLE: bindings::snd_pcm_ops = bindings::snd_pcm_ops {
+        open: Some(aop_pcm_open),
+        close: Some(aop_pcm_close),
+        prepare: Some(aop_pcm_prepare),
+        trigger: Some(bindings::snd_dmaengine_pcm_trigger),
+        pointer: Some(bindings::snd_dmaengine_pcm_pointer),
+        ioctl: None,
+        hw_params: Some(aop_hw_params),
+        hw_free: None,
+        sync_stop: None,
+        get_time_info: None,
+        fill_silence: None,
+        copy: None,
+        page: None,
+        mmap: None,
+        ack: None,
+    };
+    fn new(data: Arc<SndSocAopData>) -> Result<Self> {
+        let mut this = SndSocAopDriver(ptr::null_mut());
+        let ret = unsafe {
+            bindings::snd_card_new(
+                data.dev.as_raw(),
+                -1,
+                ptr::null(),
+                THIS_MODULE.as_ptr(),
+                0,
+                &mut this.0,
+            )
+        };
+        if ret < 0 {
+            dev_err!(data.dev, "Unable to allocate sound card");
+            return Err(Error::from_errno(ret));
+        }
+        let chassis = data
+            .of
+            .find_property(c_str!("apple,chassis-name"))
+            .ok_or(EIO)?;
+        let machine_kind = data
+            .of
+            .find_property(c_str!("apple,machine-kind"))
+            .ok_or(EIO)?;
+        unsafe {
+            let name = b"aop_audio\0";
+            let target = (*this.0).driver.as_mut();
+            copy_str(target, name.as_ref());
+        }
+        unsafe {
+            let prefix = b"Apple";
+            let target = (*this.0).id.as_mut();
+            copy_str(target, prefix.as_ref());
+            let mut ptr = prefix.len();
+            copy_str(&mut target[ptr..], chassis.value());
+            ptr += chassis.len() - 1;
+            let suffix = b"HPAI\0";
+            copy_str(&mut target[ptr..], suffix);
+        }
+        let longname_suffix = b"High-Power Audio Interface\0";
+        let mut machine_name = KVec::with_capacity(
+            chassis.len() + 1 + machine_kind.len() + longname_suffix.len(),
+            GFP_KERNEL,
+        )?;
+        machine_name.extend_from_slice(machine_kind.value(), GFP_KERNEL)?;
+        let last_item = machine_name.len() - 1;
+        machine_name[last_item] = b' ';
+        machine_name.extend_from_slice(chassis.value(), GFP_KERNEL)?;
+        let last_item = machine_name.len() - 1;
+        machine_name[last_item] = b' ';
+        unsafe {
+            let target = (*this.0).shortname.as_mut();
+            copy_str(target, machine_name.as_ref());
+            let ptr = machine_name.len();
+            let suffix = b"HPAI\0";
+            copy_str(&mut target[ptr..], suffix);
+        }
+        machine_name.extend_from_slice(longname_suffix, GFP_KERNEL)?;
+        unsafe {
+            let target = (*this.0).longname.as_mut();
+            copy_str(target, machine_name.as_ref());
+        }
+
+        let mut pcm = ptr::null_mut();
+        let ret =
+            unsafe { bindings::snd_pcm_new(this.0, machine_name.as_ptr() as _, 0, 0, 1, &mut pcm) };
+        if ret < 0 {
+            dev_err!(data.dev, "Unable to allocate PCM device");
+            return Err(Error::from_errno(ret));
+        }
+
+        unsafe {
+            bindings::snd_pcm_set_ops(
+                pcm,
+                bindings::SNDRV_PCM_STREAM_CAPTURE as i32,
+                &Self::VTABLE,
+            );
+        }
+        data.set_audio_power(POWER_STATE_IDLE, 0)?;
+        let dma_chan = data.request_dma_channel()?;
+        let ret = unsafe {
+            bindings::snd_pcm_set_managed_buffer_all(
+                pcm,
+                bindings::SNDRV_DMA_TYPE_DEV_IRAM as i32,
+                (*(*dma_chan).device).dev,
+                0,
+                0,
+            )
+        };
+        if ret < 0 {
+            dev_err!(data.dev, "Unable to allocate dma buffers");
+            return Err(Error::from_errno(ret));
+        }
+        unsafe {
+            bindings::dma_release_channel(dma_chan);
+        }
+        data.set_audio_power(POWER_STATE_OFF, 0)?;
+
+        unsafe {
+            (*pcm).private_data = data.clone().into_foreign() as _;
+            (*pcm).private_free = Some(aop_pcm_free_private);
+            (*pcm).info_flags = 0;
+            let name = c_str!("aop_audio");
+            copy_str((*pcm).name.as_mut(), name.as_ref());
+        }
+
+        let ret = unsafe { bindings::snd_card_register(this.0) };
+        if ret < 0 {
+            dev_err!(data.dev, "Unable to register sound card");
+            return Err(Error::from_errno(ret));
+        }
+        Ok(this)
+    }
+}
+
+impl Drop for SndSocAopDriver {
+    fn drop(&mut self) {
+        if self.0 != ptr::null_mut() {
+            unsafe {
+                bindings::snd_card_free(self.0);
+            }
+        }
+    }
+}
+
+unsafe impl Send for SndSocAopDriver {}
+unsafe impl Sync for SndSocAopDriver {}
+
+kernel::of_device_table!(OF_TABLE, MODULE_OF_TABLE, (), [] as [(of::DeviceId, ()); 0]);
+
+impl platform::Driver for SndSocAopDriver {
+    type IdInfo = ();
+
+    const OF_ID_TABLE: Option<of::IdTable<()>> = Some(&OF_TABLE);
+
+    fn probe(
+        pdev: &mut platform::Device,
+        _info: Option<&()>,
+    ) -> Result<Pin<KBox<SndSocAopDriver>>> {
+        let dev = ARef::<device::Device>::from(pdev.as_ref());
+        let parent = pdev.as_ref().parent().unwrap();
+        // SAFETY: our parent is AOP, and AopDriver is repr(transparent) for Arc<dyn Aop>
+        let adata_ptr = unsafe { Pin::<KBox<Arc<dyn AOP>>>::borrow(parent.get_drvdata()) };
+        let adata = (&*adata_ptr).clone();
+        // SAFETY: AOP sets the platform data correctly
+        let svc = unsafe { *((*dev.as_raw()).platform_data as *const EPICService) };
+        let of = parent
+            .of_node()
+            .ok_or(EIO)?
+            .get_child_by_name(c_str!("audio"))
+            .ok_or(EIO)?;
+        let audio = *module_parameters::mic_check_123.get() != 0;
+        if !audio && of.property_present(c_str!("apple,no-beamforming")) {
+            return Err(ENODEV);
+        }
+        let data = SndSocAopData::new(dev, adata, svc, of)?;
+        for dev in [AUDIO_DEV_PDM0, AUDIO_DEV_HPAI, AUDIO_DEV_LPAI] {
+            data.audio_attach_device(dev)?;
+        }
+        data.set_lpai_channel_cfg()?;
+        data.set_pdm_config()?;
+        data.set_decimator_config()?;
+        Ok(Box::pin(SndSocAopDriver::new(data)?, GFP_KERNEL)?)
+    }
+}
+
+module_platform_driver! {
+    type: SndSocAopDriver,
+    name: "snd_soc_apple_aop",
+    license: "Dual MIT/GPL",
+    alias: ["platform:snd_soc_apple_aop"],
+    params: {
+        mic_check_123: u8 {
+            default: 0,
+            description: "Enable mics without user space handling",
+        },
+    },
+}
diff --git a/sound/soc/apple/macaudio.c b/sound/soc/apple/macaudio.c
new file mode 100644
index 00000000000000..31f6ec45f80979
--- /dev/null
+++ b/sound/soc/apple/macaudio.c
@@ -0,0 +1,1679 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ASoC machine driver for Apple Silicon Macs
+ *
+ * Copyright (C) The Asahi Linux Contributors
+ *
+ * Based on sound/soc/qcom/{sc7180.c|common.c}
+ * Copyright (c) 2018, Linaro Limited.
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ *
+ *
+ * The platform driver has independent frontend and backend DAIs with the
+ * option of routing backends to any of the frontends. The platform
+ * driver configures the routing based on DPCM couplings in ASoC runtime
+ * structures, which in turn are determined from DAPM paths by ASoC. But the
+ * platform driver doesn't supply relevant DAPM paths and leaves that up for
+ * the machine driver to fill in. The filled-in virtual topology can be
+ * anything as long as any backend isn't connected to more than one frontend
+ * at any given time. (The limitation is due to the unsupported case of
+ * reparenting of live BEs.)
+ */
+
+/* #define DEBUG */
+
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <sound/core.h>
+#include <sound/jack.h>
+#include <sound/pcm.h>
+#include <sound/simple_card_utils.h>
+#include <sound/soc.h>
+#include <sound/soc-jack.h>
+#include <uapi/linux/input-event-codes.h>
+
+#define DRIVER_NAME "snd-soc-macaudio"
+
+/*
+ * CPU side is bit and frame clock provider
+ * I2S has both clocks inverted
+ */
+#define MACAUDIO_DAI_FMT	(SND_SOC_DAIFMT_I2S | \
+				 SND_SOC_DAIFMT_CBC_CFC | \
+				 SND_SOC_DAIFMT_GATED | \
+				 SND_SOC_DAIFMT_IB_IF)
+#define MACAUDIO_JACK_MASK	(SND_JACK_HEADSET | SND_JACK_HEADPHONE)
+#define MACAUDIO_SLOTWIDTH	32
+/*
+ * Maximum BCLK frequency
+ *
+ * Codec maximums:
+ *  CS42L42  26.0 MHz
+ *  TAS2770  27.1 MHz
+ *  TAS2764  24.576 MHz
+ */
+#define MACAUDIO_MAX_BCLK_FREQ	24576000
+
+#define SPEAKER_MAGIC_VALUE (s32)0xdec1be15
+/* milliseconds */
+#define SPEAKER_LOCK_TIMEOUT 250
+
+enum macaudio_amp_type {
+	AMP_NONE,
+	AMP_TAS5770,
+	AMP_SN012776,
+	AMP_SSM3515,
+};
+
+enum macaudio_spkr_config {
+	SPKR_NONE,	/* No speakers */
+	SPKR_1W,	/* 1 woofer / ch */
+	SPKR_2W,	/* 2 woofers / ch */
+	SPKR_1W1T,	/* 1 woofer + 1 tweeter / ch */
+	SPKR_2W1T,	/* 2 woofers + 1 tweeter / ch */
+};
+
+struct macaudio_platform_cfg {
+	bool enable_speakers;
+	enum macaudio_amp_type amp;
+	enum macaudio_spkr_config speakers;
+	bool stereo;
+	int amp_gain;
+	int safe_vol;
+};
+
+static const char *volume_control_names[] = {
+	[AMP_TAS5770] = "* Speaker Playback Volume",
+	[AMP_SN012776] = "* Speaker Volume",
+	[AMP_SSM3515] = "* DAC Playback Volume",
+};
+
+#define SN012776_0DB 201
+#define SN012776_DB(x) (SN012776_0DB + 2 * (x))
+/* Same as SN012776 */
+#define TAS5770_0DB SN012776_0DB
+#define TAS5770_DB(x) SN012776_DB(x)
+
+#define SSM3515_0DB (255 - 64) /* +24dB max, steps of 3/8 dB */
+#define SSM3515_DB(x) (SSM3515_0DB + (8 * (x) / 3))
+
+struct macaudio_snd_data {
+	struct snd_soc_card card;
+	struct snd_soc_jack jack;
+	int jack_plugin_state;
+
+	const struct macaudio_platform_cfg *cfg;
+	bool has_speakers;
+	bool has_sense;
+	bool has_safety;
+	unsigned int max_channels;
+
+	struct macaudio_link_props {
+		/* frontend props */
+		unsigned int bclk_ratio;
+		bool is_sense;
+
+		/* backend props */
+		bool is_speakers;
+		bool is_headphones;
+		unsigned int tdm_mask;
+	} *link_props;
+
+	int speaker_sample_rate;
+	struct snd_kcontrol *speaker_sample_rate_kctl;
+
+	struct mutex volume_lock_mutex;
+	bool speaker_volume_unlocked;
+	bool speaker_volume_was_locked;
+	struct snd_kcontrol *speaker_lock_kctl;
+	struct snd_ctl_file *speaker_lock_owner;
+	u64 bes_active;
+	bool speaker_lock_timeout_enabled;
+	ktime_t speaker_lock_timeout;
+	ktime_t speaker_lock_remain;
+	struct delayed_work lock_timeout_work;
+	struct work_struct lock_update_work;
+
+};
+
+static int please_blow_up_my_speakers;
+module_param(please_blow_up_my_speakers, int, 0644);
+MODULE_PARM_DESC(please_blow_up_my_speakers, "Allow unsafe or untested operating configurations");
+
+SND_SOC_DAILINK_DEFS(primary,
+	DAILINK_COMP_ARRAY(COMP_CPU("mca-pcm-0")), // CPU
+	DAILINK_COMP_ARRAY(COMP_DUMMY()), // CODEC
+	DAILINK_COMP_ARRAY(COMP_EMPTY())); // platform (filled at runtime)
+
+SND_SOC_DAILINK_DEFS(secondary,
+	DAILINK_COMP_ARRAY(COMP_CPU("mca-pcm-1")), // CPU
+	DAILINK_COMP_ARRAY(COMP_DUMMY()), // CODEC
+	DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+SND_SOC_DAILINK_DEFS(sense,
+	DAILINK_COMP_ARRAY(COMP_CPU("mca-pcm-2")), // CPU
+	DAILINK_COMP_ARRAY(COMP_DUMMY()), // CODEC
+	DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+static struct snd_soc_dai_link macaudio_fe_links[] = {
+	{
+		.name = "Primary",
+		.stream_name = "Primary",
+		.dynamic = 1,
+		.dpcm_merged_rate = 1,
+		.dpcm_merged_chan = 1,
+		.dpcm_merged_format = 1,
+		.dai_fmt = MACAUDIO_DAI_FMT,
+		SND_SOC_DAILINK_REG(primary),
+	},
+	{
+		.name = "Secondary",
+		.stream_name = "Secondary",
+		.dynamic = 1,
+		.dpcm_merged_rate = 1,
+		.dpcm_merged_chan = 1,
+		.dpcm_merged_format = 1,
+		.dai_fmt = MACAUDIO_DAI_FMT,
+		.playback_only = 1,
+		SND_SOC_DAILINK_REG(secondary),
+	},
+	{
+		.name = "Speaker Sense",
+		.stream_name = "Speaker Sense",
+		.capture_only = 1,
+		.dynamic = 1,
+		.dai_fmt = (SND_SOC_DAIFMT_I2S | \
+					SND_SOC_DAIFMT_CBP_CFP | \
+					SND_SOC_DAIFMT_GATED | \
+					SND_SOC_DAIFMT_IB_IF),
+		SND_SOC_DAILINK_REG(sense),
+	},
+};
+
+static struct macaudio_link_props macaudio_fe_link_props[] = {
+	{
+		/*
+		 * Primary FE
+		 *
+		 * The bclk ratio at 64 for the primary frontend is important
+		 * to ensure that the headphones codec's idea of left and right
+		 * in a stereo stream over I2S fits in nicely with everyone else's.
+		 * (This is until the headphones codec's driver supports
+		 * set_tdm_slot.)
+		 *
+		 * The low bclk ratio precludes transmitting more than two
+		 * channels over I2S, but that's okay since there is the secondary
+		 * FE for speaker arrays anyway.
+		 */
+		.bclk_ratio = 64,
+	},
+	{
+		/*
+		 * Secondary FE
+		 *
+		 * Here we want frames plenty long to be able to drive all
+		 * those fancy speaker arrays.
+		 */
+		.bclk_ratio = 256,
+	},
+	{
+		.is_sense = 1,
+	}
+};
+
+static void macaudio_vlimit_unlock(struct macaudio_snd_data *ma, bool unlock)
+{
+	int ret, max;
+	const char *name = volume_control_names[ma->cfg->amp];
+
+	if (!name) {
+		WARN_ON_ONCE(1);
+		return;
+	}
+
+	switch (ma->cfg->amp) {
+	case AMP_NONE:
+		WARN_ON_ONCE(1);
+		return;
+	case AMP_TAS5770:
+		if (unlock)
+			max = TAS5770_0DB;
+		else
+			max = 1; //TAS5770_DB(ma->cfg->safe_vol);
+		break;
+	case AMP_SN012776:
+		if (unlock)
+			max = SN012776_0DB;
+		else
+			max = 1; //SN012776_DB(ma->cfg->safe_vol);
+		break;
+	case AMP_SSM3515:
+		if (unlock)
+			max = SSM3515_0DB;
+		else
+			max = SSM3515_DB(ma->cfg->safe_vol);
+		break;
+	}
+
+	ret = snd_soc_limit_volume(&ma->card, name, max);
+	if (ret < 0)
+		dev_err(ma->card.dev, "Failed to %slock volume %s: %d\n",
+			unlock ? "un" : "", name, ret);
+}
+
+static void macaudio_vlimit_update(struct macaudio_snd_data *ma)
+{
+	int i;
+	bool unlock = true;
+	struct snd_kcontrol *kctl;
+	const char *reason;
+
+	/* Do nothing if there is no safety configured */
+	if (!ma->has_safety)
+		return;
+
+	/* Check that someone is holding the main lock */
+	if (!ma->speaker_lock_owner) {
+		reason = "Main control not locked";
+		unlock = false;
+	}
+
+	/* Check that the control has been pinged within the timeout */
+	if (ma->speaker_lock_remain <= 0) {
+		reason = "Lock timeout";
+		unlock = false;
+	}
+
+	/* Check that *every* limited control is locked by the same owner */
+	list_for_each_entry(kctl, &ma->card.snd_card->controls, list) {
+		if(!snd_soc_control_matches(kctl, volume_control_names[ma->cfg->amp]))
+			continue;
+
+		for (i = 0; i < kctl->count; i++) {
+			if (kctl->vd[i].owner != ma->speaker_lock_owner) {
+				reason = "Not all child controls locked by the same process";
+				unlock = false;
+			}
+		}
+	}
+
+
+	if (unlock != ma->speaker_volume_unlocked) {
+		if (unlock) {
+			dev_info(ma->card.dev, "Speaker volumes unlocked\n");
+		} else  {
+			dev_info(ma->card.dev, "Speaker volumes locked: %s\n", reason);
+			ma->speaker_volume_was_locked = true;
+		}
+
+		macaudio_vlimit_unlock(ma, unlock);
+		ma->speaker_volume_unlocked = unlock;
+		snd_ctl_notify(ma->card.snd_card, SNDRV_CTL_EVENT_MASK_VALUE,
+			       &ma->speaker_lock_kctl->id);
+	}
+}
+
+static void macaudio_vlimit_enable_timeout(struct macaudio_snd_data *ma)
+{
+	mutex_lock(&ma->volume_lock_mutex);
+
+	if (ma->speaker_lock_timeout_enabled) {
+		mutex_unlock(&ma->volume_lock_mutex);
+		return;
+	}
+
+	if (ma->speaker_lock_remain > 0) {
+		ma->speaker_lock_timeout = ktime_add(ktime_get(), ma->speaker_lock_remain);
+		schedule_delayed_work(&ma->lock_timeout_work, usecs_to_jiffies(ktime_to_us(ma->speaker_lock_remain)));
+		dev_dbg(ma->card.dev, "Enabling volume limit timeout: %ld us left\n",
+			(long)ktime_to_us(ma->speaker_lock_remain));
+	}
+
+	macaudio_vlimit_update(ma);
+
+	ma->speaker_lock_timeout_enabled = true;
+	mutex_unlock(&ma->volume_lock_mutex);
+}
+
+static void macaudio_vlimit_disable_timeout(struct macaudio_snd_data *ma)
+{
+	ktime_t now;
+
+	mutex_lock(&ma->volume_lock_mutex);
+
+	if (!ma->speaker_lock_timeout_enabled) {
+		mutex_unlock(&ma->volume_lock_mutex);
+		return;
+	}
+
+	now = ktime_get();
+
+	cancel_delayed_work(&ma->lock_timeout_work);
+
+	if (ktime_after(now, ma->speaker_lock_timeout))
+		ma->speaker_lock_remain = 0;
+	else if (ma->speaker_lock_remain > 0)
+		ma->speaker_lock_remain = ktime_sub(ma->speaker_lock_timeout, now);
+
+	dev_dbg(ma->card.dev, "Disabling volume limit timeout: %ld us left\n",
+		(long)ktime_to_us(ma->speaker_lock_remain));
+
+	macaudio_vlimit_update(ma);
+
+	ma->speaker_lock_timeout_enabled = false;
+
+	mutex_unlock(&ma->volume_lock_mutex);
+}
+
+static void macaudio_vlimit_timeout_work(struct work_struct *wrk)
+{
+        struct macaudio_snd_data *ma = container_of(to_delayed_work(wrk),
+						    struct macaudio_snd_data, lock_timeout_work);
+
+	mutex_lock(&ma->volume_lock_mutex);
+
+	ma->speaker_lock_remain = 0;
+	macaudio_vlimit_update(ma);
+
+	mutex_unlock(&ma->volume_lock_mutex);
+}
+
+static void macaudio_vlimit_update_work(struct work_struct *wrk)
+{
+        struct macaudio_snd_data *ma = container_of(wrk,
+						    struct macaudio_snd_data, lock_update_work);
+
+	if (ma->bes_active)
+		macaudio_vlimit_enable_timeout(ma);
+	else
+		macaudio_vlimit_disable_timeout(ma);
+}
+
+static int macaudio_copy_link(struct device *dev, struct snd_soc_dai_link *target,
+			       struct snd_soc_dai_link *source)
+{
+	memcpy(target, source, sizeof(struct snd_soc_dai_link));
+
+	target->cpus = devm_kmemdup(dev, target->cpus,
+				sizeof(*target->cpus) * target->num_cpus,
+				GFP_KERNEL);
+	target->codecs = devm_kmemdup(dev, target->codecs,
+				sizeof(*target->codecs) * target->num_codecs,
+				GFP_KERNEL);
+	target->platforms = devm_kmemdup(dev, target->platforms,
+				sizeof(*target->platforms) * target->num_platforms,
+				GFP_KERNEL);
+
+	if (!target->cpus || !target->codecs || !target->platforms)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static int macaudio_parse_of_component(struct device_node *node, int index,
+				struct snd_soc_dai_link_component *comp)
+{
+	struct of_phandle_args args;
+	int ret;
+
+	ret = of_parse_phandle_with_args(node, "sound-dai", "#sound-dai-cells",
+						index, &args);
+	if (ret)
+		return ret;
+	comp->of_node = args.np;
+	return snd_soc_get_dai_name(&args, &comp->dai_name);
+}
+
+/*
+ * Parse one DPCM backend from the devicetree. This means taking one
+ * of the CPU DAIs and combining it with one or more CODEC DAIs.
+ */
+static int macaudio_parse_of_be_dai_link(struct macaudio_snd_data *ma,
+				struct snd_soc_dai_link *link,
+				int be_index, int ncodecs_per_be,
+				struct device_node *cpu,
+				struct device_node *codec)
+{
+	struct snd_soc_dai_link_component *comp;
+	struct device *dev = ma->card.dev;
+	int codec_base = be_index * ncodecs_per_be;
+	int ret, i;
+
+	link->no_pcm = 1;
+
+	link->dai_fmt = MACAUDIO_DAI_FMT;
+
+	link->num_codecs = ncodecs_per_be;
+	link->codecs = devm_kcalloc(dev, ncodecs_per_be,
+				    sizeof(*comp), GFP_KERNEL);
+	link->num_cpus = 1;
+	link->cpus = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL);
+
+	if (!link->codecs || !link->cpus)
+		return -ENOMEM;
+
+	link->num_platforms = 0;
+
+	for_each_link_codecs(link, i, comp) {
+		ret = macaudio_parse_of_component(codec, codec_base + i, comp);
+		if (ret)
+			return dev_err_probe(ma->card.dev, ret, "parsing CODEC DAI of link '%s' at %pOF\n",
+					     link->name, codec);
+	}
+
+	ret = macaudio_parse_of_component(cpu, be_index, link->cpus);
+	if (ret)
+		return dev_err_probe(ma->card.dev, ret, "parsing CPU DAI of link '%s' at %pOF\n",
+				     link->name, codec);
+
+	link->name = link->cpus[0].dai_name;
+
+	return 0;
+}
+
+static int macaudio_parse_of(struct macaudio_snd_data *ma)
+{
+	struct device_node *codec = NULL;
+	struct device_node *cpu = NULL;
+	struct device_node *np = NULL;
+	struct device_node *platform = NULL;
+	struct snd_soc_dai_link *link = NULL;
+	struct snd_soc_card *card = &ma->card;
+	struct device *dev = card->dev;
+	struct macaudio_link_props *link_props;
+	int ret, num_links, i;
+
+	ret = snd_soc_of_parse_card_name(card, "model");
+	if (ret) {
+		dev_err_probe(dev, ret, "parsing card name\n");
+		return ret;
+	}
+
+	/* Populate links, start with the fixed number of FE links */
+	num_links = ARRAY_SIZE(macaudio_fe_links);
+
+	/* Now add together the (dynamic) number of BE links */
+	for_each_available_child_of_node(dev->of_node, np) {
+		int num_cpus;
+
+		cpu = of_get_child_by_name(np, "cpu");
+		if (!cpu) {
+			ret = dev_err_probe(dev, -EINVAL,
+				"missing CPU DAI node at %pOF\n", np);
+			goto err_free;
+		}
+
+		num_cpus = of_count_phandle_with_args(cpu, "sound-dai",
+						"#sound-dai-cells");
+
+		if (num_cpus <= 0) {
+			ret = dev_err_probe(card->dev, -EINVAL,
+				"missing sound-dai property at %pOF\n", cpu);
+			goto err_free;
+		}
+		of_node_put(cpu);
+		cpu = NULL;
+
+		/* Each CPU specified counts as one BE link */
+		num_links += num_cpus;
+	}
+
+	/* Allocate the DAI link array */
+	card->dai_link = devm_kcalloc(dev, num_links, sizeof(*link), GFP_KERNEL);
+	ma->link_props = devm_kcalloc(dev, num_links, sizeof(*ma->link_props), GFP_KERNEL);
+	if (!card->dai_link || !ma->link_props)
+		return -ENOMEM;
+
+	link = card->dai_link;
+	link_props = ma->link_props;
+
+	for (i = 0; i < ARRAY_SIZE(macaudio_fe_links); i++) {
+		ret = macaudio_copy_link(dev, link, &macaudio_fe_links[i]);
+		if (ret)
+			goto err_free;
+
+		memcpy(link_props, &macaudio_fe_link_props[i], sizeof(struct macaudio_link_props));
+		link++; link_props++;
+	}
+
+	for (i = 0; i < num_links; i++)
+		card->dai_link[i].id = i;
+
+	/* We might disable the speakers, so count again */
+	num_links = ARRAY_SIZE(macaudio_fe_links);
+
+	/* Fill in the BEs */
+	for_each_available_child_of_node(dev->of_node, np) {
+		const char *link_name;
+		bool speakers;
+		int be_index, num_codecs, num_bes, ncodecs_per_cpu, nchannels;
+		unsigned int left_mask, right_mask;
+
+		ret = of_property_read_string(np, "link-name", &link_name);
+		if (ret) {
+			dev_err_probe(card->dev, ret, "missing link name\n");
+			goto err_free;
+		}
+
+		dev_dbg(ma->card.dev, "parsing link '%s'\n", link_name);
+
+		speakers = !strcmp(link_name, "Speaker")
+			   || !strcmp(link_name, "Speakers");
+		if (speakers) {
+			if (!ma->cfg->enable_speakers  && !please_blow_up_my_speakers) {
+				dev_err(card->dev, "driver can't assure safety on this model, disabling speakers\n");
+				continue;
+			}
+			ma->has_speakers = 1;
+			if (ma->cfg->amp != AMP_SSM3515 && ma->cfg->safe_vol != 0)
+				ma->has_sense = 1;
+		}
+
+		cpu = of_get_child_by_name(np, "cpu");
+		codec = of_get_child_by_name(np, "codec");
+
+		if (!codec || !cpu) {
+			ret = dev_err_probe(dev, -EINVAL,
+				"missing DAI specifications for '%s'\n", link_name);
+			goto err_free;
+		}
+
+		num_bes = of_count_phandle_with_args(cpu, "sound-dai",
+						     "#sound-dai-cells");
+		if (num_bes <= 0) {
+			ret = dev_err_probe(card->dev, -EINVAL,
+				"missing sound-dai property at %pOF\n", cpu);
+			goto err_free;
+		}
+
+		num_codecs = of_count_phandle_with_args(codec, "sound-dai",
+							"#sound-dai-cells");
+		if (num_codecs <= 0) {
+			ret = dev_err_probe(card->dev, -EINVAL,
+				"missing sound-dai property at %pOF\n", codec);
+			goto err_free;
+		}
+
+		dev_dbg(ma->card.dev, "link '%s': %d CPUs %d CODECs\n",
+			link_name, num_bes, num_codecs);
+
+		if (num_codecs % num_bes != 0) {
+			ret = dev_err_probe(card->dev, -EINVAL,
+				"bad combination of CODEC (%d) and CPU (%d) number at %pOF\n",
+				num_codecs, num_bes, np);
+			goto err_free;
+		}
+
+		/*
+		 * Now parse the cpu/codec lists into a number of DPCM backend links.
+		 * In each link there will be one DAI from the cpu list paired with
+		 * an evenly distributed number of DAIs from the codec list. (As is
+		 * the binding semantics.)
+		 */
+		ncodecs_per_cpu = num_codecs / num_bes;
+		nchannels = num_codecs * (speakers ? 1 : 2);
+
+		/* Save the max number of channels on the platform */
+		if (nchannels > ma->max_channels)
+			ma->max_channels = nchannels;
+
+		/*
+		 * If there is a single speaker, assign two channels to it, because
+		 * it can do downmix.
+		 */
+		if (nchannels < 2)
+			nchannels = 2;
+
+		left_mask = 0;
+		for (i = 0; i < nchannels; i += 2)
+			left_mask = left_mask << 2 | 1;
+		right_mask = left_mask << 1;
+
+		for (be_index = 0; be_index < num_bes; be_index++) {
+			/*
+			 * Set initial link name to be overwritten by a BE-specific
+			 * name later so that we can use at least use the provisional
+			 * name in error messages.
+			 */
+			link->name = link_name;
+
+			ret = macaudio_parse_of_be_dai_link(ma, link, be_index,
+							    ncodecs_per_cpu, cpu, codec);
+			if (ret)
+				goto err_free;
+
+			link_props->is_speakers = speakers;
+			link_props->is_headphones = !speakers;
+
+			if (num_bes == 2)
+				/* This sound peripheral is split between left and right BE */
+				link_props->tdm_mask = be_index ? right_mask : left_mask;
+			else
+				/* One BE covers all of the peripheral */
+				link_props->tdm_mask = left_mask | right_mask;
+
+			/* Steal platform OF reference for use in FE links later */
+			platform = link->cpus->of_node;
+
+			link++; link_props++;
+		}
+
+		of_node_put(codec);
+		of_node_put(cpu);
+		cpu = codec = NULL;
+
+		num_links += num_bes;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(macaudio_fe_links); i++)
+		card->dai_link[i].platforms->of_node = platform;
+
+	/* Skip the speaker sense PCM link if this amp has no sense (or no speakers) */
+	if (!ma->has_sense) {
+		for (i = 0; i < ARRAY_SIZE(macaudio_fe_links); i++) {
+			if (ma->link_props[i].is_sense) {
+				memmove(&card->dai_link[i], &card->dai_link[i + 1],
+					(num_links - i - 1) * sizeof (struct snd_soc_dai_link));
+				num_links--;
+				break;
+			}
+		}
+	}
+
+	card->num_links = num_links;
+
+	return 0;
+
+err_free:
+	of_node_put(codec);
+	of_node_put(cpu);
+	of_node_put(np);
+
+	if (!card->dai_link)
+		return ret;
+
+	for (i = 0; i < num_links; i++) {
+		/*
+		 * TODO: If we don't go through this path are the references
+		 * freed inside ASoC?
+		 */
+		snd_soc_of_put_dai_link_codecs(&card->dai_link[i]);
+		snd_soc_of_put_dai_link_cpus(&card->dai_link[i]);
+	}
+
+	return ret;
+}
+
+static int macaudio_get_runtime_bclk_ratio(struct snd_pcm_substream *substream)
+{
+	struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
+	struct macaudio_snd_data *ma = snd_soc_card_get_drvdata(rtd->card);
+	struct snd_soc_dpcm *dpcm;
+
+	/*
+	 * If this is a FE, look it up in link_props directly.
+	 * If this is a BE, look it up in the respective FE.
+	 */
+	if (!rtd->dai_link->no_pcm)
+		return ma->link_props[rtd->dai_link->id].bclk_ratio;
+
+	for_each_dpcm_fe(rtd, substream->stream, dpcm) {
+		int fe_id = dpcm->fe->dai_link->id;
+
+		return ma->link_props[fe_id].bclk_ratio;
+	}
+
+	return 0;
+}
+
+static int macaudio_dpcm_hw_params(struct snd_pcm_substream *substream,
+				   struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
+	struct macaudio_snd_data *ma = snd_soc_card_get_drvdata(rtd->card);
+	struct macaudio_link_props *props = &ma->link_props[rtd->dai_link->id];
+	struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
+	struct snd_interval *rate = hw_param_interval(params,
+						      SNDRV_PCM_HW_PARAM_RATE);
+	int bclk_ratio = macaudio_get_runtime_bclk_ratio(substream);
+	int i;
+
+	if (props->is_sense) {
+		rate->min = rate->max = cpu_dai->symmetric_rate;
+		return 0;
+	}
+
+	/* Speakers BE */
+	if (props->is_speakers) {
+		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+			/* Sense PCM: keep the existing BE rate (0 if not already running) */
+			rate->min = rate->max = cpu_dai->symmetric_rate;
+
+			return 0;
+		} else {
+			/*
+			 * Set the sense PCM rate control to inform userspace of the
+			 * new sample rate.
+			 */
+			ma->speaker_sample_rate = params_rate(params);
+			snd_ctl_notify(ma->card.snd_card, SNDRV_CTL_EVENT_MASK_VALUE,
+				       &ma->speaker_sample_rate_kctl->id);
+		}
+	}
+
+	if (bclk_ratio) {
+		struct snd_soc_dai *dai;
+		int mclk = params_rate(params) * bclk_ratio;
+
+		for_each_rtd_codec_dais(rtd, i, dai) {
+			snd_soc_dai_set_sysclk(dai, 0, mclk, SND_SOC_CLOCK_IN);
+			snd_soc_dai_set_bclk_ratio(dai, bclk_ratio);
+		}
+
+		snd_soc_dai_set_sysclk(cpu_dai, 0, mclk, SND_SOC_CLOCK_OUT);
+		snd_soc_dai_set_bclk_ratio(cpu_dai, bclk_ratio);
+	}
+
+	return 0;
+}
+
+static int macaudio_fe_startup(struct snd_pcm_substream *substream)
+{
+
+	struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
+	struct macaudio_snd_data *ma = snd_soc_card_get_drvdata(rtd->card);
+	struct macaudio_link_props *props = &ma->link_props[rtd->dai_link->id];
+	int max_rate, ret;
+
+	if (props->is_sense) {
+		/*
+		 * Sense stream will not return data while playback is inactive,
+		 * so do not time out.
+		 */
+		substream->wait_time = MAX_SCHEDULE_TIMEOUT;
+		return 0;
+	}
+
+	ret = snd_pcm_hw_constraint_minmax(substream->runtime,
+					   SNDRV_PCM_HW_PARAM_CHANNELS,
+					   0, ma->max_channels);
+	if (ret < 0)
+		return ret;
+
+	max_rate = MACAUDIO_MAX_BCLK_FREQ / props->bclk_ratio;
+	ret = snd_pcm_hw_constraint_minmax(substream->runtime,
+					   SNDRV_PCM_HW_PARAM_RATE,
+					   0, max_rate);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int macaudio_fe_hw_params(struct snd_pcm_substream *substream,
+				   struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
+	struct snd_soc_pcm_runtime *be;
+	struct snd_soc_dpcm *dpcm;
+
+	be = NULL;
+	for_each_dpcm_be(rtd, substream->stream, dpcm) {
+		be = dpcm->be;
+		break;
+	}
+
+	if (!be) {
+		dev_err(rtd->dev, "opening PCM device '%s' with no audio route configured by the user\n",
+				rtd->dai_link->name);
+		return -EINVAL;
+	}
+
+	return macaudio_dpcm_hw_params(substream, params);
+}
+
+
+static void macaudio_dpcm_shutdown(struct snd_pcm_substream *substream)
+{
+	struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
+	struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
+	struct snd_soc_dai *dai;
+	int bclk_ratio = macaudio_get_runtime_bclk_ratio(substream);
+	int i;
+
+	if (bclk_ratio) {
+		for_each_rtd_codec_dais(rtd, i, dai)
+			snd_soc_dai_set_sysclk(dai, 0, 0, SND_SOC_CLOCK_IN);
+
+		snd_soc_dai_set_sysclk(cpu_dai, 0, 0, SND_SOC_CLOCK_OUT);
+	}
+}
+
+static int macaudio_be_hw_free(struct snd_pcm_substream *substream)
+{
+	struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
+	struct macaudio_snd_data *ma = snd_soc_card_get_drvdata(rtd->card);
+	struct macaudio_link_props *props = &ma->link_props[rtd->dai_link->id];
+	struct snd_soc_dai *dai;
+	int i;
+
+	if (props->is_speakers && substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		/*
+		 * Clear the DAI rates, so the next open can change the sample rate.
+		 * This won't happen automatically if the sense PCM is open.
+		 */
+		for_each_rtd_dais(rtd, i, dai) {
+			dai->symmetric_rate = 0;
+		}
+
+		/* Notify userspace that the speakers are closed */
+		ma->speaker_sample_rate = 0;
+		snd_ctl_notify(ma->card.snd_card, SNDRV_CTL_EVENT_MASK_VALUE,
+			       &ma->speaker_sample_rate_kctl->id);
+	}
+
+	return 0;
+}
+
+static int macaudio_be_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+	struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
+	struct macaudio_snd_data *ma = snd_soc_card_get_drvdata(rtd->card);
+	struct macaudio_link_props *props = &ma->link_props[rtd->dai_link->id];
+
+	if (props->is_speakers && substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		switch (cmd) {
+		case SNDRV_PCM_TRIGGER_START:
+		case SNDRV_PCM_TRIGGER_RESUME:
+		case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+			ma->bes_active |= BIT(rtd->dai_link->id);
+			break;
+		case SNDRV_PCM_TRIGGER_SUSPEND:
+		case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+		case SNDRV_PCM_TRIGGER_STOP:
+			ma->bes_active &= ~BIT(rtd->dai_link->id);
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		schedule_work(&ma->lock_update_work);
+	}
+
+	return 0;
+}
+
+static const struct snd_soc_ops macaudio_fe_ops = {
+	.startup	= macaudio_fe_startup,
+	.shutdown	= macaudio_dpcm_shutdown,
+	.hw_params	= macaudio_fe_hw_params,
+};
+
+static const struct snd_soc_ops macaudio_be_ops = {
+	.hw_free	= macaudio_be_hw_free,
+	.shutdown	= macaudio_dpcm_shutdown,
+	.hw_params	= macaudio_dpcm_hw_params,
+	.trigger	= macaudio_be_trigger,
+};
+
+static int macaudio_be_assign_tdm(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_soc_card *card = rtd->card;
+	struct macaudio_snd_data *ma = snd_soc_card_get_drvdata(card);
+	struct macaudio_link_props *props = &ma->link_props[rtd->dai_link->id];
+	struct snd_soc_dai *dai;
+	unsigned int mask;
+	int nslots, ret, i;
+
+	if (!props->tdm_mask)
+		return 0;
+
+	mask = props->tdm_mask;
+	nslots = __fls(mask) + 1;
+
+	if (rtd->dai_link->num_codecs == 1) {
+		ret = snd_soc_dai_set_tdm_slot(snd_soc_rtd_to_codec(rtd, 0), mask,
+					       0, nslots, MACAUDIO_SLOTWIDTH);
+
+		/*
+		 * Headphones get a pass on -ENOTSUPP (see the comment
+		 * around bclk_ratio value for primary FE).
+		 */
+		if (ret == -ENOTSUPP && props->is_headphones)
+			return 0;
+
+		return ret;
+	}
+
+	for_each_rtd_codec_dais(rtd, i, dai) {
+		int slot = __ffs(mask);
+
+		mask &= ~(1 << slot);
+		ret = snd_soc_dai_set_tdm_slot(dai, 1 << slot, 0, nslots,
+					       MACAUDIO_SLOTWIDTH);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int macaudio_be_init(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_soc_card *card = rtd->card;
+	struct macaudio_snd_data *ma = snd_soc_card_get_drvdata(card);
+	struct macaudio_link_props *props = &ma->link_props[rtd->dai_link->id];
+	struct snd_soc_dai *dai;
+	int i, ret;
+
+	ret = macaudio_be_assign_tdm(rtd);
+	if (ret < 0)
+		return ret;
+
+	if (props->is_headphones) {
+		for_each_rtd_codec_dais(rtd, i, dai)
+			snd_soc_component_set_jack(dai->component, &ma->jack, NULL);
+	}
+
+	return 0;
+}
+
+static void macaudio_be_exit(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_soc_card *card = rtd->card;
+	struct macaudio_snd_data *ma = snd_soc_card_get_drvdata(card);
+	struct macaudio_link_props *props = &ma->link_props[rtd->dai_link->id];
+	struct snd_soc_dai *dai;
+	int i;
+
+	if (props->is_headphones) {
+		for_each_rtd_codec_dais(rtd, i, dai)
+			snd_soc_component_set_jack(dai->component, NULL, NULL);
+	}
+}
+
+static int macaudio_fe_init(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_soc_card *card = rtd->card;
+	struct macaudio_snd_data *ma = snd_soc_card_get_drvdata(card);
+	struct macaudio_link_props *props = &ma->link_props[rtd->dai_link->id];
+	int nslots = props->bclk_ratio / MACAUDIO_SLOTWIDTH;
+
+	if (props->is_sense)
+		return snd_soc_dai_set_tdm_slot(snd_soc_rtd_to_cpu(rtd, 0), 0, 0xffff, 16, 16);
+
+	return snd_soc_dai_set_tdm_slot(snd_soc_rtd_to_cpu(rtd, 0), (1 << nslots) - 1,
+					(1 << nslots) - 1, nslots, MACAUDIO_SLOTWIDTH);
+}
+
+static struct snd_soc_jack_pin macaudio_jack_pins[] = {
+	{
+		.pin = "Headphone",
+		.mask = SND_JACK_HEADPHONE,
+	},
+	{
+		.pin = "Headset Mic",
+		.mask = SND_JACK_MICROPHONE,
+	},
+};
+
+static int macaudio_probe(struct snd_soc_card *card)
+{
+	struct macaudio_snd_data *ma = snd_soc_card_get_drvdata(card);
+	int ret;
+
+	dev_dbg(card->dev, "%s!\n", __func__);
+
+	ret = snd_soc_card_jack_new_pins(card, "Headphone Jack",
+			SND_JACK_HEADSET | SND_JACK_HEADPHONE,
+			&ma->jack, macaudio_jack_pins,
+			ARRAY_SIZE(macaudio_jack_pins));
+	if (ret < 0) {
+		dev_err(card->dev, "jack creation failed: %d\n", ret);
+		return ret;
+	}
+
+	return ret;
+}
+
+static int macaudio_add_backend_dai_route(struct snd_soc_card *card, struct snd_soc_dai *dai,
+					  bool is_speakers)
+{
+	struct snd_soc_dapm_route routes[2];
+	struct snd_soc_dapm_route *r;
+	int nroutes = 0;
+	int ret;
+
+	memset(routes, 0, sizeof(routes));
+
+	dev_dbg(card->dev, "adding routes for '%s'\n", dai->name);
+
+	r = &routes[nroutes++];
+	if (is_speakers)
+		r->source = "Speaker Playback";
+	else
+		r->source = "Headphone Playback";
+	r->sink = dai->stream[SNDRV_PCM_STREAM_PLAYBACK].widget->name;
+
+	/* If headphone jack, add capture path */
+	if (!is_speakers) {
+		r = &routes[nroutes++];
+		r->source = dai->stream[SNDRV_PCM_STREAM_CAPTURE].widget->name;
+		r->sink = "Headset Capture";
+	}
+
+	/* If speakers, add sense capture path */
+	if (is_speakers) {
+		r = &routes[nroutes++];
+		r->source = dai->stream[SNDRV_PCM_STREAM_CAPTURE].widget->name;
+		r->sink = "Speaker Sense Capture";
+	}
+
+	ret = snd_soc_dapm_add_routes(&card->dapm, routes, nroutes);
+	if (ret)
+		dev_err(card->dev, "failed adding dynamic DAPM routes for %s\n",
+			dai->name);
+	return ret;
+}
+
+static int macaudio_add_pin_routes(struct snd_soc_card *card, struct snd_soc_component *component,
+				   bool is_speakers)
+{
+	struct snd_soc_dapm_route routes[2];
+	struct snd_soc_dapm_route *r;
+	int nroutes = 0;
+	char buf[32];
+	int ret;
+
+	memset(routes, 0, sizeof(routes));
+
+	/* Connect the far ends of CODECs to pins */
+	if (is_speakers) {
+		r = &routes[nroutes++];
+		r->source = "OUT";
+		if (component->name_prefix) {
+			snprintf(buf, sizeof(buf) - 1, "%s OUT", component->name_prefix);
+			r->source = buf;
+		}
+		r->sink = "Speaker";
+	} else {
+		r = &routes[nroutes++];
+		r->source = "Jack HP";
+		r->sink = "Headphone";
+		r = &routes[nroutes++];
+		r->source = "Headset Mic";
+		r->sink = "Jack HS";
+	}
+
+	ret = snd_soc_dapm_add_routes(&card->dapm, routes, nroutes);
+	if (ret)
+		dev_err(card->dev, "failed adding dynamic DAPM routes for %s\n",
+			component->name);
+	return ret;
+}
+
+static int macaudio_late_probe(struct snd_soc_card *card)
+{
+	struct macaudio_snd_data *ma = snd_soc_card_get_drvdata(card);
+	struct snd_soc_pcm_runtime *rtd;
+	struct snd_soc_dai *dai;
+	int ret, i;
+
+	/* Add the dynamic DAPM routes */
+	for_each_card_rtds(card, rtd) {
+		struct macaudio_link_props *props = &ma->link_props[rtd->dai_link->id];
+
+		if (!rtd->dai_link->no_pcm)
+			continue;
+
+		for_each_rtd_cpu_dais(rtd, i, dai) {
+			ret = macaudio_add_backend_dai_route(card, dai, props->is_speakers);
+
+			if (ret)
+				return ret;
+		}
+
+		for_each_rtd_codec_dais(rtd, i, dai) {
+			ret = macaudio_add_pin_routes(card, dai->component,
+						      props->is_speakers);
+
+			if (ret)
+				return ret;
+		}
+	}
+
+	if (ma->has_speakers)
+		ma->speaker_sample_rate_kctl = snd_soc_card_get_kcontrol(card,
+									 "Speaker Sample Rate");
+	if (ma->has_safety) {
+		ma->speaker_lock_kctl = snd_soc_card_get_kcontrol(card,
+								  "Speaker Volume Unlock");
+
+		mutex_lock(&ma->volume_lock_mutex);
+		macaudio_vlimit_unlock(ma, false);
+		mutex_unlock(&ma->volume_lock_mutex);
+	}
+
+	return 0;
+}
+
+#define CHECK(call, pattern, value, min)                                       \
+	{                                                                      \
+		int ret = call(card, pattern, value);                          \
+		int err = (ret >= 0 && ret < min) ? -ERANGE : ret;             \
+		if (err < 0) {                                                 \
+			dev_err(card->dev, "%s on '%s': %d\n", #call, pattern, \
+				ret);                                          \
+			if (please_blow_up_my_speakers < 2)                    \
+				return err;                                    \
+		} else {                                                       \
+			dev_dbg(card->dev, "%s on '%s': %d hits\n", #call,     \
+				pattern, ret);                                 \
+		}                                                              \
+	}
+
+#define CHECK_CONCAT(call, suffix, value) \
+	{ \
+		snprintf(buf, sizeof(buf), "%s%s", prefix, suffix); \
+		CHECK(call, buf, value, 1); \
+	}
+
+static int macaudio_set_speaker(struct snd_soc_card *card, const char *prefix, bool tweeter)
+{
+	struct macaudio_snd_data *ma = snd_soc_card_get_drvdata(card);
+	char buf[256];
+
+	if (!ma->has_speakers)
+		return 0;
+
+	switch (ma->cfg->amp) {
+	case AMP_TAS5770:
+		if (ma->cfg->stereo) {
+			CHECK_CONCAT(snd_soc_set_enum_kctl, "ASI1 Sel", "Left");
+			CHECK_CONCAT(snd_soc_deactivate_kctl, "ASI1 Sel", 0);
+		}
+
+		CHECK_CONCAT(snd_soc_limit_volume, "Amp Gain Volume", ma->cfg->amp_gain);
+		break;
+	case AMP_SN012776:
+		if (ma->cfg->stereo) {
+			CHECK_CONCAT(snd_soc_set_enum_kctl, "ASI1 Sel", "Left");
+			CHECK_CONCAT(snd_soc_deactivate_kctl, "ASI1 Sel", 0);
+		}
+
+		CHECK_CONCAT(snd_soc_limit_volume, "Amp Gain Volume", ma->cfg->amp_gain);
+		CHECK_CONCAT(snd_soc_set_enum_kctl, "HPF Corner Frequency",
+			     tweeter ? "800 Hz" : "2 Hz");
+
+		if (please_blow_up_my_speakers < 2)
+			CHECK_CONCAT(snd_soc_deactivate_kctl, "HPF Corner Frequency", 0);
+
+		CHECK_CONCAT(snd_soc_set_enum_kctl, "OCE Handling", "Retry");
+		CHECK_CONCAT(snd_soc_deactivate_kctl, "OCE Handling", 0);
+		break;
+	case AMP_SSM3515:
+		/* TODO: check */
+		CHECK_CONCAT(snd_soc_set_enum_kctl, "DAC Analog Gain Select", "8.4 V Span");
+
+		if (please_blow_up_my_speakers < 2)
+			CHECK_CONCAT(snd_soc_deactivate_kctl, "DAC Analog Gain Select", 0);
+
+		/* TODO: HPF, needs new call to set */
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int macaudio_fixup_controls(struct snd_soc_card *card)
+{
+	struct macaudio_snd_data *ma = snd_soc_card_get_drvdata(card);
+	const char *p;
+
+	/* Set the card ID early to avoid races with udev */
+	p = strrchr(card->name, ' ');
+	if (p) {
+		snprintf(card->snd_card->id, sizeof(card->snd_card->id),
+			 "Apple%s", p + 1);
+	}
+
+	if (!ma->has_speakers)
+		return 0;
+
+	/*
+	 * This needs some care to avoid matches against cs42l84's
+	 * "Jack HPF Corner Frequency".
+	 */
+	switch(ma->cfg->speakers) {
+	case SPKR_NONE:
+		WARN_ON(please_blow_up_my_speakers < 2);
+		return please_blow_up_my_speakers >= 2 ? 0 : -EINVAL;
+	case SPKR_1W:
+		/* only 1W stereo system (J313) is uses cs42l83 */
+		if (ma->cfg->stereo) {
+			CHECK(macaudio_set_speaker, "* ", false, 0);
+		} else {
+			CHECK(macaudio_set_speaker, "", false, 0);
+		}
+		break;
+	case SPKR_2W:
+		CHECK(macaudio_set_speaker, "* Front ", false, 0);
+		CHECK(macaudio_set_speaker, "* Rear ", false, 0);
+		break;
+	case SPKR_1W1T:
+		CHECK(macaudio_set_speaker, "* Tweeter ", true, 0);
+		CHECK(macaudio_set_speaker, "* Woofer ", false, 0);
+		break;
+	case SPKR_2W1T:
+		CHECK(macaudio_set_speaker, "* Tweeter ", true, 0);
+		CHECK(macaudio_set_speaker, "* Woofer 1 ", false, 0);
+		CHECK(macaudio_set_speaker, "* Woofer 2 ", false, 0);
+		break;
+	}
+
+	return 0;
+}
+
+static const char * const macaudio_spk_mux_texts[] = {
+	"Primary",
+	"Secondary"
+};
+
+SOC_ENUM_SINGLE_VIRT_DECL(macaudio_spk_mux_enum, macaudio_spk_mux_texts);
+
+static const struct snd_kcontrol_new macaudio_spk_mux =
+	SOC_DAPM_ENUM("Speaker Playback Mux", macaudio_spk_mux_enum);
+
+static const char * const macaudio_hp_mux_texts[] = {
+	"Primary",
+	"Secondary"
+};
+
+SOC_ENUM_SINGLE_VIRT_DECL(macaudio_hp_mux_enum, macaudio_hp_mux_texts);
+
+static const struct snd_kcontrol_new macaudio_hp_mux =
+	SOC_DAPM_ENUM("Headphones Playback Mux", macaudio_hp_mux_enum);
+
+static const struct snd_soc_dapm_widget macaudio_snd_widgets[] = {
+	SND_SOC_DAPM_SPK("Speaker", NULL),
+	SND_SOC_DAPM_SPK("Speaker (Static)", NULL),
+	SND_SOC_DAPM_HP("Headphone", NULL),
+	SND_SOC_DAPM_MIC("Headset Mic", NULL),
+
+	SND_SOC_DAPM_MUX("Speaker Playback Mux", SND_SOC_NOPM, 0, 0, &macaudio_spk_mux),
+	SND_SOC_DAPM_MUX("Headphone Playback Mux", SND_SOC_NOPM, 0, 0, &macaudio_hp_mux),
+
+	SND_SOC_DAPM_AIF_OUT("Speaker Playback", NULL, 0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("Headphone Playback", NULL, 0, SND_SOC_NOPM, 0, 0),
+
+	SND_SOC_DAPM_AIF_IN("Headset Capture", NULL, 0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_IN("Speaker Sense Capture", NULL, 0, SND_SOC_NOPM, 0, 0),
+};
+
+static int macaudio_sss_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+	uinfo->count = 1;
+	uinfo->value.integer.min = 0;
+	uinfo->value.integer.max = 192000;
+
+	return 0;
+}
+
+static int macaudio_sss_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *uvalue)
+{
+	struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
+	struct macaudio_snd_data *ma = snd_soc_card_get_drvdata(card);
+
+	/*
+	 * TODO: Check if any locking is in order here. I would
+	 * assume there is some ALSA-level lock, but DAPM implementations
+	 * of kcontrol ops do explicit locking, so look into it.
+	 */
+	uvalue->value.integer.value[0] = ma->speaker_sample_rate;
+
+	return 0;
+}
+
+static int macaudio_slk_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+	uinfo->count = 1;
+	uinfo->value.integer.min = INT_MIN;
+	uinfo->value.integer.max = INT_MAX;
+
+	return 0;
+}
+
+static int macaudio_slk_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *uvalue)
+{
+	struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
+	struct macaudio_snd_data *ma = snd_soc_card_get_drvdata(card);
+
+	if (!ma->speaker_lock_owner)
+		return -EPERM;
+
+	if (uvalue->value.integer.value[0] != SPEAKER_MAGIC_VALUE)
+		return -EINVAL;
+
+	/* Serves as a notification that the lock was lost at some point */
+	if (ma->speaker_volume_was_locked) {
+		ma->speaker_volume_was_locked = false;
+		return -ETIMEDOUT;
+	}
+
+	mutex_lock(&ma->volume_lock_mutex);
+
+	cancel_delayed_work(&ma->lock_timeout_work);
+
+	ma->speaker_lock_remain = ms_to_ktime(SPEAKER_LOCK_TIMEOUT);
+	ma->speaker_lock_timeout = ktime_add(ktime_get(), ma->speaker_lock_remain);
+	macaudio_vlimit_update(ma);
+
+	if (ma->speaker_lock_timeout_enabled) {
+		dev_dbg(ma->card.dev, "Volume limit timeout ping: %ld us left\n",
+			(long)ktime_to_us(ma->speaker_lock_remain));
+		schedule_delayed_work(&ma->lock_timeout_work, usecs_to_jiffies(ktime_to_us(ma->speaker_lock_remain)));
+	}
+
+	mutex_unlock(&ma->volume_lock_mutex);
+
+	return 0;
+}
+
+static int macaudio_slk_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *uvalue)
+{
+	struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
+	struct macaudio_snd_data *ma = snd_soc_card_get_drvdata(card);
+
+	uvalue->value.integer.value[0] = ma->speaker_volume_unlocked ? 1 : 0;
+
+	return 0;
+}
+
+static int macaudio_slk_lock(struct snd_kcontrol *kcontrol, struct snd_ctl_file *owner)
+{
+	struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
+	struct macaudio_snd_data *ma = snd_soc_card_get_drvdata(card);
+
+	mutex_lock(&ma->volume_lock_mutex);
+	ma->speaker_lock_owner = owner;
+	macaudio_vlimit_update(ma);
+
+	/*
+	 * Reset the unintended lock flag when the control is first locked.
+	 * At this point the state is locked and cannot be unlocked until
+	 * userspace writes to this control, so this cannot spuriously become
+	 * true again until that point.
+	 */
+	ma->speaker_volume_was_locked = false;
+
+	mutex_unlock(&ma->volume_lock_mutex);
+
+	return 0;
+}
+
+static void macaudio_slk_unlock(struct snd_kcontrol *kcontrol)
+{
+	struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
+	struct macaudio_snd_data *ma = snd_soc_card_get_drvdata(card);
+
+	ma->speaker_lock_owner = NULL;
+	ma->speaker_lock_timeout = 0;
+	macaudio_vlimit_update(ma);
+}
+
+/*
+ * Speaker limit controls go last. We only drop the unlock control,
+ * leaving sample rate, since that can be useful for safety
+ * bring-up before the kernel-side caps are ready.
+ */
+#define MACAUDIO_NUM_SPEAKER_LIMIT_CONTROLS 1
+/*
+ * If there are no speakers configured at all, we can drop both
+ * controls.
+ */
+#define MACAUDIO_NUM_SPEAKER_CONTROLS 2
+
+static const struct snd_kcontrol_new macaudio_controls[] = {
+	SOC_DAPM_PIN_SWITCH("Speaker"),
+	SOC_DAPM_PIN_SWITCH("Headphone"),
+	SOC_DAPM_PIN_SWITCH("Headset Mic"),
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.access = SNDRV_CTL_ELEM_ACCESS_READ |
+			SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+		.name = "Speaker Sample Rate",
+		.info = macaudio_sss_info, .get = macaudio_sss_get,
+	},
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.access = SNDRV_CTL_ELEM_ACCESS_READ |
+			SNDRV_CTL_ELEM_ACCESS_WRITE |
+			SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+		.name = "Speaker Volume Unlock",
+		.info = macaudio_slk_info,
+		.put = macaudio_slk_put, .get = macaudio_slk_get,
+		.lock = macaudio_slk_lock, .unlock = macaudio_slk_unlock,
+	},
+};
+
+static const struct snd_soc_dapm_route macaudio_dapm_routes[] = {
+	/* Playback paths */
+	{ "Speaker Playback Mux", "Primary", "PCM0 TX" },
+	{ "Speaker Playback Mux", "Secondary", "PCM1 TX" },
+	{ "Speaker Playback", NULL, "Speaker Playback Mux"},
+
+	{ "Headphone Playback Mux", "Primary", "PCM0 TX" },
+	{ "Headphone Playback Mux", "Secondary", "PCM1 TX" },
+	{ "Headphone Playback", NULL, "Headphone Playback Mux"},
+	/*
+	 * Additional paths (to specific I2S ports) are added dynamically.
+	 */
+
+	/* Capture paths */
+	{ "PCM0 RX", NULL, "Headset Capture" },
+
+	/* Sense paths */
+	{ "PCM2 RX", NULL, "Speaker Sense Capture" },
+};
+
+/*	enable	amp		speakers	stereo	gain	safe_vol */
+struct macaudio_platform_cfg macaudio_j180_cfg = {
+	false,	AMP_SN012776,	SPKR_1W1T,	false,	10,	-20,
+};
+struct macaudio_platform_cfg macaudio_j274_cfg = {
+	true,	AMP_TAS5770,	SPKR_1W,	false,	20,	-20,
+};
+
+struct macaudio_platform_cfg macaudio_j293_cfg = {
+	true,	AMP_TAS5770,	SPKR_2W,	true,	15,	-20,
+};
+
+struct macaudio_platform_cfg macaudio_j313_cfg = {
+	true,	AMP_TAS5770,	SPKR_1W,	true,	10,	-20,
+};
+
+struct macaudio_platform_cfg macaudio_j314_cfg = {
+	true,	AMP_SN012776,	SPKR_2W1T,	true,	15,	-20,
+};
+
+struct macaudio_platform_cfg macaudio_j316_cfg = {
+	true,	AMP_SN012776,	SPKR_2W1T,	true,	15,	-20,
+};
+
+struct macaudio_platform_cfg macaudio_j37x_j47x_cfg = {
+	true,	AMP_SN012776,	SPKR_1W,	false,	20,	-20,
+};
+
+struct macaudio_platform_cfg macaudio_j413_cfg = {
+	true,	AMP_SN012776,	SPKR_1W1T,	true,	15,	-20,
+};
+
+struct macaudio_platform_cfg macaudio_j415_cfg = {
+	true,	AMP_SN012776,	SPKR_2W1T,	true,	15,	-20,
+};
+
+struct macaudio_platform_cfg macaudio_j45x_cfg = {
+	false,	AMP_SSM3515,	SPKR_1W1T,	true,	9,	-20, /* TODO: gain?? */
+};
+
+struct macaudio_platform_cfg macaudio_j493_cfg = {
+	true,	AMP_SN012776,	SPKR_2W,	true,	15,	-20,
+};
+
+struct macaudio_platform_cfg macaudio_fallback_cfg = {
+	false,	AMP_NONE,	SPKR_NONE,	false,	0,	0,
+};
+
+/*
+ * DT compatible/ID table rules:
+ *
+ * 1. Machines with **identical** speaker configurations (amps, models, chassis)
+ *    are allowed to declare compatibility with the first model (chronologically),
+ *    and are not enumerated in this array.
+ *
+ * 2. Machines with identical amps and speakers (=identical speaker protection
+ *    rules) but a different chassis must use different compatibles, but may share
+ *    the private data structure here. They are explicitly enumerated.
+ *
+ * 3. Machines with different amps or speaker layouts must use separate
+ *    data structures.
+ *
+ * 4. Machines with identical speaker layouts and amps (but possibly different
+ *    speaker models/chassis) may share the data structure, since only userspace
+ *    cares about that (assuming our general -20dB safe level standard holds).
+ */
+static const struct of_device_id macaudio_snd_device_id[]  = {
+	/* Model   ID      Amp         Gain    Speakers */
+	/* j180    AID19   sn012776    10      1× 1W+1T */
+	{ .compatible = "apple,j180-macaudio", .data = &macaudio_j180_cfg },
+	/* j274    AID6    tas5770     20      1× 1W */
+	{ .compatible = "apple,j274-macaudio", .data = &macaudio_j274_cfg },
+	/* j293    AID3    tas5770     15      2× 2W */
+	{ .compatible = "apple,j293-macaudio", .data = &macaudio_j293_cfg },
+	/* j313    AID4    tas5770     10      2× 1W */
+	{ .compatible = "apple,j313-macaudio", .data = &macaudio_j313_cfg },
+	/* j314    AID8    sn012776    15      2× 2W+1T */
+	{ .compatible = "apple,j314-macaudio", .data = &macaudio_j314_cfg },
+	/* j316    AID9    sn012776    15      2× 2W+1T */
+	{ .compatible = "apple,j316-macaudio", .data = &macaudio_j316_cfg },
+	/* j375    AID10   sn012776    15      1× 1W */
+	{ .compatible = "apple,j375-macaudio", .data = &macaudio_j37x_j47x_cfg },
+	/* j413    AID13   sn012776    15      2× 1W+1T */
+	{ .compatible = "apple,j413-macaudio", .data = &macaudio_j413_cfg },
+	/* j414    AID14   sn012776    15      2× 2W+1T Compat: apple,j314-macaudio */
+	/* j415    AID27   sn012776    15      2× 2W+1T */
+	{ .compatible = "apple,j415-macaudio", .data = &macaudio_j415_cfg },
+	/* j416    AID15   sn012776    15      2× 2W+1T Compat: apple,j316-macaudio */
+	/* j456    AID5    ssm3515     15      2× 1W+1T */
+	{ .compatible = "apple,j456-macaudio", .data = &macaudio_j45x_cfg },
+	/* j457    AID7    ssm3515     15      2× 1W+1T Compat: apple,j456-macaudio */
+	/* j473    AID12   sn012776    20      1× 1W */
+	{ .compatible = "apple,j473-macaudio", .data = &macaudio_j37x_j47x_cfg },
+	/* j474    AID26   sn012776    20      1× 1W    Compat: apple,j473-macaudio */
+	/* j475    AID25   sn012776    20      1× 1W    Compat: apple,j375-macaudio */
+	/* j493    AID18   sn012776    15      2× 2W */
+	{ .compatible = "apple,j493-macaudio", .data = &macaudio_j493_cfg },
+	/* Fallback, jack only */
+	{ .compatible = "apple,macaudio"},
+	{ }
+};
+MODULE_DEVICE_TABLE(of, macaudio_snd_device_id);
+
+static int macaudio_snd_platform_probe(struct platform_device *pdev)
+{
+	struct snd_soc_card *card;
+	struct macaudio_snd_data *data;
+	struct device *dev = &pdev->dev;
+	struct snd_soc_dai_link *link;
+	const struct of_device_id *of_id;
+	int ret;
+	int i;
+
+	of_id = of_match_device(macaudio_snd_device_id, dev);
+	if (!of_id)
+		return -EINVAL;
+
+	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+	card = &data->card;
+	snd_soc_card_set_drvdata(card, data);
+	dev_set_drvdata(&pdev->dev, data);
+	mutex_init(&data->volume_lock_mutex);
+
+	card->owner = THIS_MODULE;
+	card->driver_name = "macaudio";
+	card->dev = dev;
+	card->dapm_widgets = macaudio_snd_widgets;
+	card->num_dapm_widgets = ARRAY_SIZE(macaudio_snd_widgets);
+	card->dapm_routes = macaudio_dapm_routes;
+	card->num_dapm_routes = ARRAY_SIZE(macaudio_dapm_routes);
+	card->controls = macaudio_controls;
+	card->num_controls = ARRAY_SIZE(macaudio_controls);
+	card->probe = macaudio_probe;
+	card->late_probe = macaudio_late_probe;
+	card->component_chaining = true;
+	card->fully_routed = true;
+
+	if (of_id->data)
+		data->cfg = of_id->data;
+	else
+		data->cfg = &macaudio_fallback_cfg;
+
+	card->fixup_controls = macaudio_fixup_controls;
+
+	ret = macaudio_parse_of(data);
+	if (ret)
+		return ret;
+
+	/* Remove useless controls */
+	if (!data->has_speakers) /* No speakers, remove both */
+		card->num_controls -= MACAUDIO_NUM_SPEAKER_CONTROLS;
+	else if (!data->cfg->safe_vol) /* No safety, remove unlock */
+		card->num_controls -= MACAUDIO_NUM_SPEAKER_LIMIT_CONTROLS;
+	else /* Speakers with safety, mark us as such */
+		data->has_safety = true;
+
+	for_each_card_prelinks(card, i, link) {
+		if (link->no_pcm) {
+			link->ops = &macaudio_be_ops;
+			link->init = macaudio_be_init;
+			link->exit = macaudio_be_exit;
+		} else {
+			link->ops = &macaudio_fe_ops;
+			link->init = macaudio_fe_init;
+		}
+	}
+
+	INIT_WORK(&data->lock_update_work, macaudio_vlimit_update_work);
+	INIT_DELAYED_WORK(&data->lock_timeout_work, macaudio_vlimit_timeout_work);
+
+	return devm_snd_soc_register_card(dev, card);
+}
+
+static void macaudio_snd_platform_remove(struct platform_device *pdev)
+{
+	struct macaudio_snd_data *ma = dev_get_drvdata(&pdev->dev);
+
+	cancel_delayed_work_sync(&ma->lock_timeout_work);
+}
+
+static struct platform_driver macaudio_snd_driver = {
+	.probe = macaudio_snd_platform_probe,
+	.remove = macaudio_snd_platform_remove,
+	.driver = {
+		.name = DRIVER_NAME,
+		.of_match_table = macaudio_snd_device_id,
+		.pm = &snd_soc_pm_ops,
+	},
+};
+module_platform_driver(macaudio_snd_driver);
+
+MODULE_AUTHOR("Martin Povišer <povik+lin@cutebit.org>");
+MODULE_DESCRIPTION("Apple Silicon Macs machine-level sound driver");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/apple/mca.c b/sound/soc/apple/mca.c
index b4f4696809dd23..17d26faf8e244c 100644
--- a/sound/soc/apple/mca.c
+++ b/sound/soc/apple/mca.c
@@ -133,12 +133,17 @@ struct mca_cluster {
 	struct clk *clk_parent;
 	struct dma_chan *dma_chans[SNDRV_PCM_STREAM_LAST + 1];
 
-	bool port_started[SNDRV_PCM_STREAM_LAST + 1];
-	int port_driver; /* The cluster driving this cluster's port */
+	bool clk_provider;
+
+	bool port_clk_started[SNDRV_PCM_STREAM_LAST + 1];
+	int port_clk_driver; /* The cluster driving this cluster's port */
 
 	bool clocks_in_use[SNDRV_PCM_STREAM_LAST + 1];
 	struct device_link *pd_link;
 
+	/* In case of clock consumer FE */
+	int syncgen_in_use;
+
 	unsigned int bclk_ratio;
 
 	/* Masks etc. picked up via the set_tdm_slot method */
@@ -157,7 +162,7 @@ struct mca_data {
 	struct reset_control *rstc;
 	struct device_link *pd_link;
 
-	/* Mutex for accessing port_driver of foreign clusters */
+	/* Mutex for accessing port_clk_driver of foreign clusters */
 	struct mutex port_mutex;
 
 	int nclusters;
@@ -211,15 +216,21 @@ static void mca_fe_early_trigger(struct snd_pcm_substream *substream, int cmd,
 			   SERDES_STATUS_RST);
 		/*
 		 * Experiments suggest that it takes at most ~1 us
-		 * for the bit to clear, so wait 2 us for good measure.
+		 * for the bit to clear, so wait 5 us for good measure.
 		 */
-		udelay(2);
+		udelay(50);
 		WARN_ON(readl_relaxed(cl->base + serdes_unit + REG_SERDES_STATUS) &
 			SERDES_STATUS_RST);
 		mca_modify(cl, serdes_conf, SERDES_CONF_SYNC_SEL,
 			   FIELD_PREP(SERDES_CONF_SYNC_SEL, 0));
 		mca_modify(cl, serdes_conf, SERDES_CONF_SYNC_SEL,
 			   FIELD_PREP(SERDES_CONF_SYNC_SEL, cl->no + 1));
+		/*
+		 * ADMAC gets started right after this. This delay seems
+		 * to be needed for that to be reliable, e.g. ensure the
+		 * clock is stable?
+		 */
+		udelay(100);
 		break;
 	default:
 		break;
@@ -256,11 +267,28 @@ static int mca_fe_trigger(struct snd_pcm_substream *substream, int cmd,
 	return 0;
 }
 
+static int mca_fe_get_portmask(struct snd_pcm_substream *substream)
+{
+	struct snd_soc_pcm_runtime *fe = snd_soc_substream_to_rtd(substream);
+	struct snd_soc_dpcm *dpcm;
+	int mask = 0;
+
+	for_each_dpcm_be(fe, substream->stream, dpcm) {
+		int no = mca_dai_to_cluster(snd_soc_rtd_to_cpu(dpcm->be, 0))->no;
+		mask |= 1 << no;
+	}
+
+	return mask;
+}
+
 static int mca_fe_enable_clocks(struct mca_cluster *cl)
 {
 	struct mca_data *mca = cl->host;
 	int ret;
 
+	if (!cl->clk_provider)
+		return -EINVAL;
+
 	ret = clk_prepare_enable(cl->clk_parent);
 	if (ret) {
 		dev_err(mca->dev,
@@ -274,6 +302,7 @@ static int mca_fe_enable_clocks(struct mca_cluster *cl)
 	 * the power state driver would error out on seeing the device
 	 * as clock-gated.
 	 */
+	WARN_ON(cl->pd_link);
 	cl->pd_link = device_link_add(mca->dev, cl->pd_dev,
 				      DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME |
 					      DL_FLAG_RPM_ACTIVE);
@@ -297,7 +326,11 @@ static void mca_fe_disable_clocks(struct mca_cluster *cl)
 	mca_modify(cl, REG_SYNCGEN_STATUS, SYNCGEN_STATUS_EN, 0);
 	mca_modify(cl, REG_STATUS, STATUS_MCLK_EN, 0);
 
-	device_link_del(cl->pd_link);
+	if (cl->pd_link) {
+		device_link_del(cl->pd_link);
+		cl->pd_link = NULL;
+	}
+
 	clk_disable_unprepare(cl->clk_parent);
 }
 
@@ -311,7 +344,7 @@ static bool mca_fe_clocks_in_use(struct mca_cluster *cl)
 	for (i = 0; i < mca->nclusters; i++) {
 		be_cl = &mca->clusters[i];
 
-		if (be_cl->port_driver != cl->no)
+		if (be_cl->port_clk_driver != cl->no)
 			continue;
 
 		for_each_pcm_streams(stream) {
@@ -325,59 +358,55 @@ static bool mca_fe_clocks_in_use(struct mca_cluster *cl)
 	return false;
 }
 
-static int mca_be_prepare(struct snd_pcm_substream *substream,
+static int mca_fe_prepare(struct snd_pcm_substream *substream,
 			  struct snd_soc_dai *dai)
 {
 	struct mca_cluster *cl = mca_dai_to_cluster(dai);
 	struct mca_data *mca = cl->host;
-	struct mca_cluster *fe_cl;
-	int ret;
 
-	if (cl->port_driver < 0)
-		return -EINVAL;
+	if (cl->clk_provider)
+		return 0;
 
-	fe_cl = &mca->clusters[cl->port_driver];
+	if (!cl->syncgen_in_use) {
+		int port = ffs(mca_fe_get_portmask(substream)) - 1;
 
-	/*
-	 * Typically the CODECs we are paired with will require clocks
-	 * to be present at time of unmute with the 'mute_stream' op
-	 * or at time of DAPM widget power-up. We need to enable clocks
-	 * here at the latest (frontend prepare would be too late).
-	 */
-	if (!mca_fe_clocks_in_use(fe_cl)) {
-		ret = mca_fe_enable_clocks(fe_cl);
-		if (ret < 0)
-			return ret;
-	}
+		WARN_ON(cl->pd_link);
+		cl->pd_link = device_link_add(mca->dev, cl->pd_dev,
+					      DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME |
+						DL_FLAG_RPM_ACTIVE);
+		if (!cl->pd_link) {
+			dev_err(mca->dev,
+				"cluster %d: unable to prop-up power domain\n", cl->no);
+			return -EINVAL;
+		}
 
-	cl->clocks_in_use[substream->stream] = true;
+		writel_relaxed(port + 6 + 1,
+			       cl->base + REG_SYNCGEN_MCLK_SEL);
+		mca_modify(cl, REG_SYNCGEN_STATUS, SYNCGEN_STATUS_EN,
+			   SYNCGEN_STATUS_EN);
+	}
+	cl->syncgen_in_use |= 1 << substream->stream;
 
 	return 0;
 }
 
-static int mca_be_hw_free(struct snd_pcm_substream *substream,
+static int mca_fe_hw_free(struct snd_pcm_substream *substream,
 			  struct snd_soc_dai *dai)
 {
 	struct mca_cluster *cl = mca_dai_to_cluster(dai);
-	struct mca_data *mca = cl->host;
-	struct mca_cluster *fe_cl;
-
-	if (cl->port_driver < 0)
-		return -EINVAL;
 
-	/*
-	 * We are operating on a foreign cluster here, but since we
-	 * belong to the same PCM, accesses should have been
-	 * synchronized at ASoC level.
-	 */
-	fe_cl = &mca->clusters[cl->port_driver];
-	if (!mca_fe_clocks_in_use(fe_cl))
-		return 0; /* Nothing to do */
+	if (cl->clk_provider)
+		return 0;
 
-	cl->clocks_in_use[substream->stream] = false;
+	cl->syncgen_in_use &= ~(1 << substream->stream);
+	if (cl->syncgen_in_use)
+		return 0;
 
-	if (!mca_fe_clocks_in_use(fe_cl))
-		mca_fe_disable_clocks(fe_cl);
+	mca_modify(cl, REG_SYNCGEN_STATUS, SYNCGEN_STATUS_EN, 0);
+	if (cl->pd_link) {
+		device_link_del(cl->pd_link);
+		cl->pd_link = NULL;
+	}
 
 	return 0;
 }
@@ -392,7 +421,7 @@ static unsigned int mca_crop_mask(unsigned int mask, int nchans)
 
 static int mca_configure_serdes(struct mca_cluster *cl, int serdes_unit,
 				unsigned int mask, int slots, int nchans,
-				int slot_width, bool is_tx, int port)
+				int slot_width, bool is_tx, int portmask)
 {
 	__iomem void *serdes_base = cl->base + serdes_unit;
 	u32 serdes_conf, serdes_conf_mask;
@@ -451,7 +480,7 @@ static int mca_configure_serdes(struct mca_cluster *cl, int serdes_unit,
 			       serdes_base + REG_RX_SERDES_SLOTMASK);
 		writel_relaxed(~((u32)mca_crop_mask(mask, nchans)),
 			       serdes_base + REG_RX_SERDES_SLOTMASK + 0x4);
-		writel_relaxed(1 << port,
+		writel_relaxed(portmask,
 			       serdes_base + REG_RX_SERDES_PORT);
 	}
 
@@ -464,6 +493,28 @@ static int mca_configure_serdes(struct mca_cluster *cl, int serdes_unit,
 	return -EINVAL;
 }
 
+static int mca_fe_startup(struct snd_pcm_substream *substream,
+			  struct snd_soc_dai *dai)
+{
+	struct mca_cluster *cl = mca_dai_to_cluster(dai);
+	unsigned int mask, nchannels;
+
+	if (cl->tdm_slots) {
+		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+			mask = cl->tdm_tx_mask;
+		else
+			mask = cl->tdm_rx_mask;
+
+		nchannels = hweight32(mask);
+	} else {
+		nchannels = 2;
+	}
+
+	return snd_pcm_hw_constraint_minmax(substream->runtime,
+					    SNDRV_PCM_HW_PARAM_CHANNELS,
+					    1, nchannels);
+}
+
 static int mca_fe_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
 			       unsigned int rx_mask, int slots, int slot_width)
 {
@@ -485,9 +536,18 @@ static int mca_fe_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
 	u32 serdes_conf = 0;
 	u32 bitstart;
 
-	if ((fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) !=
-	    SND_SOC_DAIFMT_BP_FP)
+	switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
+	case SND_SOC_DAIFMT_BP_FP:
+		cl->clk_provider = true;
+		break;
+
+	case SND_SOC_DAIFMT_BC_FC:
+		cl->clk_provider = false;
+		break;
+
+	default:
 		goto err;
+	}
 
 	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
 	case SND_SOC_DAIFMT_I2S:
@@ -544,24 +604,6 @@ static int mca_set_bclk_ratio(struct snd_soc_dai *dai, unsigned int ratio)
 	return 0;
 }
 
-static int mca_fe_get_port(struct snd_pcm_substream *substream)
-{
-	struct snd_soc_pcm_runtime *fe = snd_soc_substream_to_rtd(substream);
-	struct snd_soc_pcm_runtime *be;
-	struct snd_soc_dpcm *dpcm;
-
-	be = NULL;
-	for_each_dpcm_be(fe, substream->stream, dpcm) {
-		be = dpcm->be;
-		break;
-	}
-
-	if (!be)
-		return -EINVAL;
-
-	return mca_dai_to_cluster(snd_soc_rtd_to_cpu(be, 0))->no;
-}
-
 static int mca_fe_hw_params(struct snd_pcm_substream *substream,
 			    struct snd_pcm_hw_params *params,
 			    struct snd_soc_dai *dai)
@@ -575,7 +617,7 @@ static int mca_fe_hw_params(struct snd_pcm_substream *substream,
 	unsigned long bclk_ratio;
 	unsigned int tdm_slots, tdm_slot_width, tdm_mask;
 	u32 regval, pad;
-	int ret, port, nchans_ceiled;
+	int ret, portmask, nchans_ceiled;
 
 	if (!cl->tdm_slot_width) {
 		/*
@@ -624,13 +666,13 @@ static int mca_fe_hw_params(struct snd_pcm_substream *substream,
 		tdm_mask = (1 << tdm_slots) - 1;
 	}
 
-	port = mca_fe_get_port(substream);
-	if (port < 0)
-		return port;
+	portmask = mca_fe_get_portmask(substream);
+	if (!portmask)
+		return -EINVAL;
 
 	ret = mca_configure_serdes(cl, is_tx ? CLUSTER_TX_OFF : CLUSTER_RX_OFF,
 				   tdm_mask, tdm_slots, params_channels(params),
-				   tdm_slot_width, is_tx, port);
+				   tdm_slot_width, is_tx, portmask);
 	if (ret)
 		return ret;
 
@@ -680,72 +722,129 @@ static int mca_fe_hw_params(struct snd_pcm_substream *substream,
 }
 
 static const struct snd_soc_dai_ops mca_fe_ops = {
+	.startup = mca_fe_startup,
 	.set_fmt = mca_fe_set_fmt,
 	.set_bclk_ratio = mca_set_bclk_ratio,
 	.set_tdm_slot = mca_fe_set_tdm_slot,
 	.hw_params = mca_fe_hw_params,
 	.trigger = mca_fe_trigger,
+	.prepare = mca_fe_prepare,
+	.hw_free = mca_fe_hw_free,
 };
 
-static bool mca_be_started(struct mca_cluster *cl)
+/*
+ * Is there a FE attached which will be feeding this port's clocks?
+ */
+static bool mca_be_clk_started(struct mca_cluster *cl)
 {
 	int stream;
 
 	for_each_pcm_streams(stream)
-		if (cl->port_started[stream])
+		if (cl->port_clk_started[stream])
 			return true;
 	return false;
 }
 
-static int mca_be_startup(struct snd_pcm_substream *substream,
+static struct snd_soc_pcm_runtime *mca_be_get_fe(struct snd_soc_pcm_runtime *be,
+						 int stream)
+{
+	struct snd_soc_pcm_runtime *fe = NULL;
+	struct snd_soc_dpcm *dpcm;
+
+	for_each_dpcm_fe(be, stream, dpcm) {
+		if (fe && dpcm->fe != fe) {
+			dev_err(be->dev, "many FE per one BE unsupported\n");
+			return NULL;
+		}
+
+		fe = dpcm->fe;
+	}
+
+	return fe;
+}
+
+static int mca_be_prepare(struct snd_pcm_substream *substream,
 			  struct snd_soc_dai *dai)
 {
 	struct snd_soc_pcm_runtime *be = snd_soc_substream_to_rtd(substream);
-	struct snd_soc_pcm_runtime *fe;
+	struct snd_soc_pcm_runtime *fe = mca_be_get_fe(be, substream->stream);
 	struct mca_cluster *cl = mca_dai_to_cluster(dai);
-	struct mca_cluster *fe_cl;
 	struct mca_data *mca = cl->host;
-	struct snd_soc_dpcm *dpcm;
+	struct mca_cluster *fe_cl, *fe_clk_cl;
+	int ret;
 
-	fe = NULL;
+	fe_cl = mca_dai_to_cluster(snd_soc_rtd_to_cpu(fe, 0));
 
-	for_each_dpcm_fe(be, substream->stream, dpcm) {
-		if (fe && dpcm->fe != fe) {
-			dev_err(mca->dev, "many FE per one BE unsupported\n");
-			return -EINVAL;
-		}
+	if (!fe_cl->clk_provider)
+		return 0;
 
-		fe = dpcm->fe;
+	if (cl->port_clk_driver < 0)
+		return 0;
+
+	fe_clk_cl = &mca->clusters[cl->port_clk_driver];
+
+	/*
+	 * Typically the CODECs we are paired with will require clocks
+	 * to be present at time of unmute with the 'mute_stream' op
+	 * or at time of DAPM widget power-up. We need to enable clocks
+	 * here at the latest (frontend prepare would be too late).
+	 */
+	if (!mca_fe_clocks_in_use(fe_clk_cl)) {
+		ret = mca_fe_enable_clocks(fe_clk_cl);
+		if (ret < 0)
+			return ret;
 	}
 
+	cl->clocks_in_use[substream->stream] = true;
+
+	return 0;
+}
+
+static int mca_be_startup(struct snd_pcm_substream *substream,
+			  struct snd_soc_dai *dai)
+{
+	struct snd_soc_pcm_runtime *be = snd_soc_substream_to_rtd(substream);
+	struct snd_soc_pcm_runtime *fe = mca_be_get_fe(be, substream->stream);
+	struct mca_cluster *cl = mca_dai_to_cluster(dai);
+	struct mca_cluster *fe_cl;
+	struct mca_data *mca = cl->host;
+
 	if (!fe)
 		return -EINVAL;
-
 	fe_cl = mca_dai_to_cluster(snd_soc_rtd_to_cpu(fe, 0));
 
-	if (mca_be_started(cl)) {
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		writel_relaxed(PORT_DATA_SEL_TXA(fe_cl->no),
+			       cl->base + REG_PORT_DATA_SEL);
+		mca_modify(cl, REG_PORT_ENABLES, PORT_ENABLES_TX_DATA,
+			   PORT_ENABLES_TX_DATA);
+	}
+
+	if (!fe_cl->clk_provider)
+		return 0;
+
+	if (mca_be_clk_started(cl)) {
 		/*
 		 * Port is already started in the other direction.
 		 * Make sure there isn't a conflict with another cluster
-		 * driving the port.
+		 * driving the port clocks.
 		 */
-		if (cl->port_driver != fe_cl->no)
+		if (cl->port_clk_driver != fe_cl->no)
 			return -EINVAL;
 
-		cl->port_started[substream->stream] = true;
+		cl->port_clk_started[substream->stream] = true;
 		return 0;
 	}
 
-	writel_relaxed(PORT_ENABLES_CLOCKS | PORT_ENABLES_TX_DATA,
-		       cl->base + REG_PORT_ENABLES);
 	writel_relaxed(FIELD_PREP(PORT_CLOCK_SEL, fe_cl->no + 1),
 		       cl->base + REG_PORT_CLOCK_SEL);
-	writel_relaxed(PORT_DATA_SEL_TXA(fe_cl->no),
-		       cl->base + REG_PORT_DATA_SEL);
+	mca_modify(cl, REG_PORT_ENABLES, PORT_ENABLES_CLOCKS,
+		   PORT_ENABLES_CLOCKS);
+
 	mutex_lock(&mca->port_mutex);
-	cl->port_driver = fe_cl->no;
+	cl->port_clk_driver = fe_cl->no;
 	mutex_unlock(&mca->port_mutex);
-	cl->port_started[substream->stream] = true;
+	cl->port_clk_started[substream->stream] = true;
 
 	return 0;
 }
@@ -753,27 +852,60 @@ static int mca_be_startup(struct snd_pcm_substream *substream,
 static void mca_be_shutdown(struct snd_pcm_substream *substream,
 			    struct snd_soc_dai *dai)
 {
+	struct snd_soc_pcm_runtime *be = snd_soc_substream_to_rtd(substream);
+	struct snd_soc_pcm_runtime *fe = mca_be_get_fe(be, substream->stream);
 	struct mca_cluster *cl = mca_dai_to_cluster(dai);
+	struct mca_cluster *fe_cl;
 	struct mca_data *mca = cl->host;
 
-	cl->port_started[substream->stream] = false;
+	if (cl->clocks_in_use[substream->stream] &&
+		!WARN_ON(cl->port_clk_driver < 0)) {
+		struct mca_cluster *fe_cl = &mca->clusters[cl->port_clk_driver];
 
-	if (!mca_be_started(cl)) {
 		/*
-		 * Were we the last direction to shutdown?
-		 * Turn off the lights.
+		 * Typically the CODECs we are paired with will require clocks
+		 * to be present at time of mute with the 'mute_stream' op.
+		 * We need to disable the clocks here at the earliest (hw_free
+		 * would be too early).
+		 *
+		 * We are operating on a foreign cluster here, but since we
+		 * belong to the same PCM, accesses should have been
+		 * synchronized at ASoC level.
 		 */
-		writel_relaxed(0, cl->base + REG_PORT_ENABLES);
+		cl->clocks_in_use[substream->stream] = false;
+
+		if (!mca_fe_clocks_in_use(fe_cl))
+			mca_fe_disable_clocks(fe_cl);
+	}
+
+	if (!fe)
+		return;
+	fe_cl = mca_dai_to_cluster(snd_soc_rtd_to_cpu(fe, 0));
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		mca_modify(cl, REG_PORT_ENABLES, PORT_ENABLES_TX_DATA, 0);
 		writel_relaxed(0, cl->base + REG_PORT_DATA_SEL);
+	}
+
+	if (!fe_cl->clk_provider)
+		return;
+
+	cl->port_clk_started[substream->stream] = false;
+	if (!mca_be_clk_started(cl)) {
+		/*
+		 * Were we the last direction to shutdown?
+		 * Turn off the lights (clocks).
+		 */
+		mca_modify(cl, REG_PORT_ENABLES, PORT_ENABLES_CLOCKS, 0);
+		writel_relaxed(0, cl->base + REG_PORT_CLOCK_SEL);
 		mutex_lock(&mca->port_mutex);
-		cl->port_driver = -1;
+		cl->port_clk_driver = -1;
 		mutex_unlock(&mca->port_mutex);
 	}
 }
 
 static const struct snd_soc_dai_ops mca_be_ops = {
 	.prepare = mca_be_prepare,
-	.hw_free = mca_be_hw_free,
 	.startup = mca_be_startup,
 	.shutdown = mca_be_shutdown,
 };
@@ -997,8 +1129,10 @@ static void apple_mca_release(struct mca_data *mca)
 			dev_pm_domain_detach(cl->pd_dev, true);
 	}
 
-	if (mca->pd_link)
+	if (mca->pd_link) {
 		device_link_del(mca->pd_link);
+		mca->pd_link = NULL;
+	}
 
 	if (!IS_ERR_OR_NULL(mca->pd_dev))
 		dev_pm_domain_detach(mca->pd_dev, true);
@@ -1073,7 +1207,7 @@ static int apple_mca_probe(struct platform_device *pdev)
 		cl->host = mca;
 		cl->no = i;
 		cl->base = base + CLUSTER_STRIDE * i;
-		cl->port_driver = -1;
+		cl->port_clk_driver = -1;
 		cl->clk_parent = of_clk_get(pdev->dev.of_node, i);
 		if (IS_ERR(cl->clk_parent)) {
 			dev_err(&pdev->dev, "unable to obtain clock %d: %ld\n",
diff --git a/sound/soc/codecs/cs42l42.c b/sound/soc/codecs/cs42l42.c
index 501c951cc327c5..0e1e3fea421a52 100644
--- a/sound/soc/codecs/cs42l42.c
+++ b/sound/soc/codecs/cs42l42.c
@@ -1148,7 +1148,6 @@ struct snd_soc_dai_driver cs42l42_dai = {
 			.formats = CS42L42_FORMATS,
 		},
 		.symmetric_rate = 1,
-		.symmetric_sample_bits = 1,
 		.ops = &cs42l42_ops,
 };
 EXPORT_SYMBOL_NS_GPL(cs42l42_dai, "SND_SOC_CS42L42_CORE");
@@ -1676,7 +1675,7 @@ irqreturn_t cs42l42_irq_thread(int irq, void *data)
 		return IRQ_NONE;
 	}
 
-	/* Read sticky registers to clear interurpt */
+	/* Read sticky registers to clear interrupt */
 	for (i = 0; i < ARRAY_SIZE(stickies); i++) {
 		regmap_read(cs42l42->regmap, irq_params_table[i].status_addr,
 				&(stickies[i]));
@@ -2420,6 +2419,16 @@ int cs42l42_init(struct cs42l42_private *cs42l42)
 			(1 << CS42L42_ADC_PDN_SHIFT) |
 			(0 << CS42L42_PDN_ALL_SHIFT));
 
+	/*
+	 * Configure a faster digital ramp time, to avoid an audible
+	 * fade-in when streams start.
+	 */
+	regmap_update_bits(cs42l42->regmap, CS42L42_SFTRAMP_RATE,
+			   CS42L42_SFTRAMP_ASR_RATE_MASK |
+			   CS42L42_SFTRAMP_DSR_RATE_MASK,
+			   (10 << CS42L42_SFTRAMP_ASR_RATE_SHIFT) |
+			   (1 << CS42L42_SFTRAMP_DSR_RATE_SHIFT));
+
 	ret = cs42l42_handle_device_data(cs42l42->dev, cs42l42);
 	if (ret != 0)
 		goto err_shutdown;
diff --git a/sound/soc/codecs/tas2764-quirks.h b/sound/soc/codecs/tas2764-quirks.h
new file mode 100644
index 00000000000000..9cbbc2a9e5944f
--- /dev/null
+++ b/sound/soc/codecs/tas2764-quirks.h
@@ -0,0 +1,185 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __TAS2764_QUIRKS__
+#define __TAS2764_QUIRKS__
+
+#include <linux/regmap.h>
+
+#include "tas2764.h"
+
+/*
+ * Disable noise gate and flip down reserved bit in NS_CFG0
+ */
+#define TAS2764_NOISE_GATE_DISABLE	BIT(0)
+
+struct reg_sequence tas2764_noise_gate_dis_seq[] = {
+	REG_SEQ0(TAS2764_REG(0x0, 0x35), 0xb0)
+};
+
+/*
+ * CONV_VBAT_PVDD_MODE=1
+ */
+#define TAS2764_CONV_VBAT_PVDD_MODE	BIT(1)
+
+struct reg_sequence tas2764_conv_vbat_pvdd_mode_seq[] = {
+	REG_SEQ0(TAS2764_REG(0x0, 0x6b), 0x41)
+};
+
+/*
+ * Reset of DAC modulator when DSP is OFF
+ */
+#define TAS2764_DMOD_RST		BIT(2)
+
+struct reg_sequence tas2764_dmod_rst_seq[] = {
+	REG_SEQ0(TAS2764_REG(0x0, 0x76), 0x0)
+};
+
+/*
+ * Unknown 0x133/0x137 writes (maybe TDM related)
+ */
+#define TAS2764_UNK_SEQ0		BIT(3)
+
+struct reg_sequence tas2764_unk_seq0[] = {
+	REG_SEQ0(TAS2764_REG(0x1, 0x33), 0x80),
+	REG_SEQ0(TAS2764_REG(0x1, 0x37), 0x3a),
+};
+
+/*
+ * Unknown 0x614 - 0x61f writes
+ */
+#define TAS2764_APPLE_UNK_SEQ1		BIT(4)
+
+struct reg_sequence tas2764_unk_seq1[] = {
+	REG_SEQ0(TAS2764_REG(0x6, 0x14), 0x0),
+	REG_SEQ0(TAS2764_REG(0x6, 0x15), 0x13),
+	REG_SEQ0(TAS2764_REG(0x6, 0x16), 0x52),
+	REG_SEQ0(TAS2764_REG(0x6, 0x17), 0x0),
+	REG_SEQ0(TAS2764_REG(0x6, 0x18), 0xe4),
+	REG_SEQ0(TAS2764_REG(0x6, 0x19), 0xc),
+	REG_SEQ0(TAS2764_REG(0x6, 0x16), 0xaa),
+	REG_SEQ0(TAS2764_REG(0x6, 0x1b), 0x0),
+	REG_SEQ0(TAS2764_REG(0x6, 0x1c), 0x12),
+	REG_SEQ0(TAS2764_REG(0x6, 0x1d), 0xa0),
+	REG_SEQ0(TAS2764_REG(0x6, 0x1e), 0xd8),
+	REG_SEQ0(TAS2764_REG(0x6, 0x1f), 0x0),
+};
+
+/*
+ * Unknown writes in the 0xfd page (with secondary paging inside)
+ */
+#define TAS2764_APPLE_UNK_SEQ2		BIT(5)
+
+struct reg_sequence tas2764_unk_seq2[] = {
+	REG_SEQ0(TAS2764_REG(0xfd, 0x0d), 0xd),
+	REG_SEQ0(TAS2764_REG(0xfd, 0x6c), 0x2),
+	REG_SEQ0(TAS2764_REG(0xfd, 0x6d), 0xf),
+	REG_SEQ0(TAS2764_REG(0xfd, 0x0d), 0x0),
+};
+
+/*
+ * Disable 'Thermal Threshold 1'
+ */
+#define TAS2764_THERMAL_TH1_DISABLE	BIT(6)
+
+struct reg_sequence tas2764_thermal_th1_dis_seq[] = {
+	REG_SEQ0(TAS2764_REG(0x1, 0x47), 0x2),
+};
+
+/*
+ * Imitate Apple's shutdown dance
+ */
+#define TAS2764_SHUTDOWN_DANCE		BIT(7)
+
+struct reg_sequence tas2764_shutdown_dance_init_seq[] = {
+	/*
+	 * SDZ_MODE=01 (immediate)
+	 *
+	 * We want the shutdown to happen under the influence of
+	 * the magic writes in the 0xfdXX region, so make sure
+	 * the shutdown is immediate and there's no grace period
+	 * followed by the codec part.
+	 */
+	REG_SEQ0(TAS2764_REG(0x0, 0x7), 0x60),
+};
+
+struct reg_sequence tas2764_pre_shutdown_seq[] = {
+	REG_SEQ0(TAS2764_REG(0xfd, 0x0d), 0xd), /* switch hidden page */
+	REG_SEQ0(TAS2764_REG(0xfd, 0x64), 0x4), /* do write (unknown semantics) */
+	REG_SEQ0(TAS2764_REG(0xfd, 0x0d), 0x0), /* switch hidden page back */
+};
+
+struct reg_sequence tas2764_post_shutdown_seq[] = {
+	REG_SEQ0(TAS2764_REG(0xfd, 0x0d), 0xd),
+	REG_SEQ0(TAS2764_REG(0xfd, 0x64), 0x0), /* revert write from pre sequence */
+	REG_SEQ0(TAS2764_REG(0xfd, 0x0d), 0x0),
+};
+
+static int tas2764_do_quirky_pwr_ctrl_change(struct tas2764_priv *tas2764,
+					     unsigned int target)
+{
+	unsigned int curr;
+	int ret;
+
+	curr = snd_soc_component_read_field(tas2764->component,
+					       TAS2764_PWR_CTRL,
+					       TAS2764_PWR_CTRL_MASK);
+
+	if (target == curr)
+		return 0;
+
+#define TRANSITION(new, old) ((new) << 8 | (old))
+	switch (TRANSITION(target, curr)) {
+	case TRANSITION(TAS2764_PWR_CTRL_SHUTDOWN, TAS2764_PWR_CTRL_MUTE):
+	case TRANSITION(TAS2764_PWR_CTRL_SHUTDOWN, TAS2764_PWR_CTRL_ACTIVE):
+		ret = regmap_multi_reg_write(tas2764->regmap, tas2764_pre_shutdown_seq,
+					     ARRAY_SIZE(tas2764_pre_shutdown_seq));
+		if (ret < 0)
+			break;
+
+		ret = snd_soc_component_update_bits(tas2764->component,
+						    TAS2764_PWR_CTRL,
+						    TAS2764_PWR_CTRL_MASK,
+						    TAS2764_PWR_CTRL_SHUTDOWN);
+		if (ret > 0)
+			break;
+
+		ret = regmap_multi_reg_write(tas2764->regmap, tas2764_post_shutdown_seq,
+					     ARRAY_SIZE(tas2764_post_shutdown_seq));
+		fallthrough;
+	default:
+		ret = snd_soc_component_update_bits(tas2764->component, TAS2764_PWR_CTRL,
+						    TAS2764_PWR_CTRL_MASK, target);
+	}
+#undef TRANSITION
+
+	if (ret < 0)
+		return ret;
+	return 0;
+}
+
+/*
+ * Via devicetree (TODO):
+ *  - switch from spread spectrum to class-D switching
+ *  - disable edge control
+ *  - set BOP settings (the BOP config bits *and* BOP_SRC)
+ */
+
+/*
+ * Other setup TODOs:
+ *  - DVC ramp rate
+ */
+
+static struct tas2764_quirk_init_sequence {
+	struct reg_sequence *seq;
+	int len;
+} tas2764_quirk_init_sequences[] = {
+	{ tas2764_noise_gate_dis_seq, ARRAY_SIZE(tas2764_noise_gate_dis_seq) },
+	{ tas2764_dmod_rst_seq, ARRAY_SIZE(tas2764_dmod_rst_seq) },
+	{ tas2764_conv_vbat_pvdd_mode_seq, ARRAY_SIZE(tas2764_conv_vbat_pvdd_mode_seq) },
+	{ tas2764_unk_seq0, ARRAY_SIZE(tas2764_unk_seq0) },
+	{ tas2764_unk_seq1, ARRAY_SIZE(tas2764_unk_seq1) },
+	{ tas2764_unk_seq2, ARRAY_SIZE(tas2764_unk_seq2) },
+	{ tas2764_thermal_th1_dis_seq, ARRAY_SIZE(tas2764_thermal_th1_dis_seq) },
+	{ tas2764_shutdown_dance_init_seq, ARRAY_SIZE(tas2764_shutdown_dance_init_seq) },
+};
+
+#endif /* __TAS2764_QUIRKS__ */
diff --git a/sound/soc/codecs/tas2764.c b/sound/soc/codecs/tas2764.c
index 58315eab492a16..16a74659a75779 100644
--- a/sound/soc/codecs/tas2764.c
+++ b/sound/soc/codecs/tas2764.c
@@ -14,7 +14,9 @@
 #include <linux/regulator/consumer.h>
 #include <linux/regmap.h>
 #include <linux/of.h>
+#include <linux/of_device.h>
 #include <linux/slab.h>
+#include <linux/sysfs.h>
 #include <sound/soc.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
@@ -23,21 +25,35 @@
 
 #include "tas2764.h"
 
+enum tas2764_devid {
+	DEVID_TAS2764  = 0,
+	DEVID_SN012776 = 1
+};
+
 struct tas2764_priv {
 	struct snd_soc_component *component;
 	struct gpio_desc *reset_gpio;
 	struct gpio_desc *sdz_gpio;
+	struct regulator *sdz_reg;
 	struct regmap *regmap;
 	struct device *dev;
 	int irq;
-	
+	enum tas2764_devid devid;
+
 	int v_sense_slot;
 	int i_sense_slot;
+	u32 sdout_zero_mask;
 
 	bool dac_powered;
 	bool unmuted;
 };
 
+static int apple_quirks = 0x3f;
+module_param(apple_quirks, int, 0644);
+MODULE_PARM_DESC(apple_quirks, "Mask of quirks to mimic after Apple's SN012776 driver");
+
+#include "tas2764-quirks.h"
+
 static const char *tas2764_int_ltch0_msgs[8] = {
 	"fault: over temperature", /* INT_LTCH0 & BIT(0) */
 	"fault: over current",
@@ -115,6 +131,9 @@ static int tas2764_update_pwr_ctrl(struct tas2764_priv *tas2764)
 	else
 		val = TAS2764_PWR_CTRL_SHUTDOWN;
 
+	if (apple_quirks & TAS2764_SHUTDOWN_DANCE)
+		return tas2764_do_quirky_pwr_ctrl_change(tas2764, val);
+
 	ret = snd_soc_component_update_bits(component, TAS2764_PWR_CTRL,
 					    TAS2764_PWR_CTRL_MASK, val);
 	if (ret < 0)
@@ -139,9 +158,13 @@ static int tas2764_codec_suspend(struct snd_soc_component *component)
 	if (tas2764->sdz_gpio)
 		gpiod_set_value_cansleep(tas2764->sdz_gpio, 0);
 
+	regulator_disable(tas2764->sdz_reg);
+
 	regcache_cache_only(tas2764->regmap, true);
 	regcache_mark_dirty(tas2764->regmap);
 
+	usleep_range(6000, 7000);
+
 	return 0;
 }
 
@@ -150,19 +173,26 @@ static int tas2764_codec_resume(struct snd_soc_component *component)
 	struct tas2764_priv *tas2764 = snd_soc_component_get_drvdata(component);
 	int ret;
 
+	ret = regulator_enable(tas2764->sdz_reg);
+
+	if (ret) {
+		dev_err(tas2764->dev, "Failed to enable regulator\n");
+		return ret;
+	}
+
 	if (tas2764->sdz_gpio) {
 		gpiod_set_value_cansleep(tas2764->sdz_gpio, 1);
-		usleep_range(1000, 2000);
 	}
 
-	ret = tas2764_update_pwr_ctrl(tas2764);
+	usleep_range(1000, 2000);
 
+	regcache_cache_only(tas2764->regmap, false);
+
+	ret = regcache_sync(tas2764->regmap);
 	if (ret < 0)
 		return ret;
 
-	regcache_cache_only(tas2764->regmap, false);
-
-	return regcache_sync(tas2764->regmap);
+	return tas2764_update_pwr_ctrl(tas2764);
 }
 #else
 #define tas2764_codec_suspend NULL
@@ -180,33 +210,6 @@ static SOC_ENUM_SINGLE_DECL(
 static const struct snd_kcontrol_new tas2764_asi1_mux =
 	SOC_DAPM_ENUM("ASI1 Source", tas2764_ASI1_src_enum);
 
-static int tas2764_dac_event(struct snd_soc_dapm_widget *w,
-			     struct snd_kcontrol *kcontrol, int event)
-{
-	struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
-	struct tas2764_priv *tas2764 = snd_soc_component_get_drvdata(component);
-	int ret;
-
-	switch (event) {
-	case SND_SOC_DAPM_POST_PMU:
-		tas2764->dac_powered = true;
-		ret = tas2764_update_pwr_ctrl(tas2764);
-		break;
-	case SND_SOC_DAPM_PRE_PMD:
-		tas2764->dac_powered = false;
-		ret = tas2764_update_pwr_ctrl(tas2764);
-		break;
-	default:
-		dev_err(tas2764->dev, "Unsupported event\n");
-		return -EINVAL;
-	}
-
-	if (ret < 0)
-		return ret;
-
-	return 0;
-}
-
 static const struct snd_kcontrol_new isense_switch =
 	SOC_DAPM_SINGLE("Switch", TAS2764_PWR_CTRL, TAS2764_ISENSE_POWER_EN, 1, 1);
 static const struct snd_kcontrol_new vsense_switch =
@@ -219,11 +222,10 @@ static const struct snd_soc_dapm_widget tas2764_dapm_widgets[] = {
 			    1, &isense_switch),
 	SND_SOC_DAPM_SWITCH("VSENSE", TAS2764_PWR_CTRL, TAS2764_VSENSE_POWER_EN,
 			    1, &vsense_switch),
-	SND_SOC_DAPM_DAC_E("DAC", NULL, SND_SOC_NOPM, 0, 0, tas2764_dac_event,
-			   SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+	SND_SOC_DAPM_DAC("DAC", NULL, SND_SOC_NOPM, 0, 0),
 	SND_SOC_DAPM_OUTPUT("OUT"),
 	SND_SOC_DAPM_SIGGEN("VMON"),
-	SND_SOC_DAPM_SIGGEN("IMON")
+	SND_SOC_DAPM_SIGGEN("IMON"),
 };
 
 static const struct snd_soc_dapm_route tas2764_audio_map[] = {
@@ -241,15 +243,39 @@ static int tas2764_mute(struct snd_soc_dai *dai, int mute, int direction)
 {
 	struct tas2764_priv *tas2764 =
 			snd_soc_component_get_drvdata(dai->component);
+	int ret;
+
+	if (!mute) {
+		tas2764->dac_powered = true;
+		ret = tas2764_update_pwr_ctrl(tas2764);
+		if (ret)
+			return ret;
+	}
 
 	tas2764->unmuted = !mute;
-	return tas2764_update_pwr_ctrl(tas2764);
+	ret = tas2764_update_pwr_ctrl(tas2764);
+	if (ret)
+		return ret;
+
+	if (mute) {
+		/* Wait for ramp-down */
+		usleep_range(6000, 7000);
+
+		tas2764->dac_powered = false;
+		ret = tas2764_update_pwr_ctrl(tas2764);
+		if (ret)
+			return ret;
+
+		/* Wait a bit after shutdown */
+		usleep_range(2000, 3000);
+	}
+
+	return 0;
 }
 
 static int tas2764_set_bitwidth(struct tas2764_priv *tas2764, int bitwidth)
 {
 	struct snd_soc_component *component = tas2764->component;
-	int sense_en;
 	int val;
 	int ret;
 
@@ -284,28 +310,6 @@ static int tas2764_set_bitwidth(struct tas2764_priv *tas2764, int bitwidth)
 	if (val < 0)
 		return val;
 
-	if (val & (1 << TAS2764_VSENSE_POWER_EN))
-		sense_en = 0;
-	else
-		sense_en = TAS2764_TDM_CFG5_VSNS_ENABLE;
-
-	ret = snd_soc_component_update_bits(tas2764->component, TAS2764_TDM_CFG5,
-					    TAS2764_TDM_CFG5_VSNS_ENABLE,
-					    sense_en);
-	if (ret < 0)
-		return ret;
-
-	if (val & (1 << TAS2764_ISENSE_POWER_EN))
-		sense_en = 0;
-	else
-		sense_en = TAS2764_TDM_CFG6_ISNS_ENABLE;
-
-	ret = snd_soc_component_update_bits(tas2764->component, TAS2764_TDM_CFG6,
-					    TAS2764_TDM_CFG6_ISNS_ENABLE,
-					    sense_en);
-	if (ret < 0)
-		return ret;
-
 	return 0;
 }
 
@@ -361,6 +365,44 @@ static int tas2764_hw_params(struct snd_pcm_substream *substream,
 	return tas2764_set_samplerate(tas2764, params_rate(params));
 }
 
+static int tas2764_write_sdout_zero_mask(struct tas2764_priv *tas2764, int bclk_ratio)
+{
+	struct snd_soc_component *component = tas2764->component;
+	int nsense_slots = bclk_ratio / 8;
+	u32 cropped_mask;
+	int i, ret;
+
+	if (!tas2764->sdout_zero_mask)
+		return 0;
+
+	cropped_mask = tas2764->sdout_zero_mask & GENMASK(nsense_slots - 1, 0);
+
+	for (i = 0; i < 4; i++) {
+		ret = snd_soc_component_write(component, TAS2764_SDOUT_HIZ_1 + i,
+					      (cropped_mask >> (i * 8)) & 0xff);
+
+		if (ret < 0)
+			return ret;
+	}
+
+	ret = snd_soc_component_update_bits(component, TAS2764_SDOUT_HIZ_9,
+					    TAS2764_SDOUT_HIZ_9_FORCE_0_EN,
+					    TAS2764_SDOUT_HIZ_9_FORCE_0_EN);
+
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int tas2764_set_bclk_ratio(struct snd_soc_dai *dai, unsigned int ratio)
+{
+	struct snd_soc_component *component = dai->component;
+	struct tas2764_priv *tas2764 = snd_soc_component_get_drvdata(component);
+
+	return tas2764_write_sdout_zero_mask(tas2764, ratio);
+}
+
 static int tas2764_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
 {
 	struct snd_soc_component *component = dai->component;
@@ -435,7 +477,6 @@ static int tas2764_set_dai_tdm_slot(struct snd_soc_dai *dai,
 				int slots, int slot_width)
 {
 	struct snd_soc_component *component = dai->component;
-	struct tas2764_priv *tas2764 = snd_soc_component_get_drvdata(component);
 	int left_slot, right_slot;
 	int slots_cfg;
 	int slot_size;
@@ -482,15 +523,26 @@ static int tas2764_set_dai_tdm_slot(struct snd_soc_dai *dai,
 	if (ret < 0)
 		return ret;
 
-	ret = snd_soc_component_update_bits(component, TAS2764_TDM_CFG5,
+	return 0;
+}
+
+static int tas2764_set_ivsense_transmit(struct tas2764_priv *tas2764, int i_slot, int v_slot)
+{
+	int ret;
+
+	ret = snd_soc_component_update_bits(tas2764->component, TAS2764_TDM_CFG5,
+					    TAS2764_TDM_CFG5_VSNS_ENABLE |
 					    TAS2764_TDM_CFG5_50_MASK,
-					    tas2764->v_sense_slot);
+					    TAS2764_TDM_CFG5_VSNS_ENABLE |
+					    v_slot);
 	if (ret < 0)
 		return ret;
 
-	ret = snd_soc_component_update_bits(component, TAS2764_TDM_CFG6,
+	ret = snd_soc_component_update_bits(tas2764->component, TAS2764_TDM_CFG6,
+					    TAS2764_TDM_CFG6_ISNS_ENABLE |
 					    TAS2764_TDM_CFG6_50_MASK,
-					    tas2764->i_sense_slot);
+					    TAS2764_TDM_CFG6_ISNS_ENABLE |
+					    i_slot);
 	if (ret < 0)
 		return ret;
 
@@ -500,6 +552,7 @@ static int tas2764_set_dai_tdm_slot(struct snd_soc_dai *dai,
 static const struct snd_soc_dai_ops tas2764_dai_ops = {
 	.mute_stream = tas2764_mute,
 	.hw_params  = tas2764_hw_params,
+	.set_bclk_ratio = tas2764_set_bclk_ratio,
 	.set_fmt    = tas2764_set_fmt,
 	.set_tdm_slot = tas2764_set_dai_tdm_slot,
 	.no_capture_mute = 1,
@@ -534,22 +587,94 @@ static struct snd_soc_dai_driver tas2764_dai_driver[] = {
 	},
 };
 
+static uint8_t sn012776_bop_presets[] = {
+	0x01, 0x32, 0x02, 0x22, 0x83, 0x2d, 0x80, 0x02, 0x06,
+	0x32, 0x46, 0x30, 0x02, 0x06, 0x38, 0x40, 0x30, 0x02,
+	0x06, 0x3e, 0x37, 0x30, 0xff, 0xe6
+};
+
+static const struct regmap_config tas2764_i2c_regmap;
+
+static int tas2764_apply_init_quirks(struct tas2764_priv * tas2764)
+{
+	int ret, i;
+
+	for (i = 0; i < ARRAY_SIZE(tas2764_quirk_init_sequences); i++) {
+		struct tas2764_quirk_init_sequence *init_seq = \
+					&tas2764_quirk_init_sequences[i];
+		if (!init_seq->seq)
+			continue;
+
+		if (!(BIT(i) & apple_quirks))
+			continue;
+
+		ret = regmap_multi_reg_write(tas2764->regmap, init_seq->seq,
+					     init_seq->len);
+
+		if (ret < 0)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int tas2764_read_die_temp(struct tas2764_priv *tas2764, int *result)
+{
+	int ret;
+
+	ret = snd_soc_component_read(tas2764->component, TAS2764_TEMP);
+	if (ret < 0)
+		return ret;
+	*result = ret - 93;
+	return 0;
+}
+
+static ssize_t die_temp_show(struct device *dev,
+			 struct device_attribute *attr, char *buf)
+{
+	struct tas2764_priv *tas2764 = i2c_get_clientdata(to_i2c_client(dev));
+	int ret, temp;
+
+	ret = tas2764_read_die_temp(tas2764, &temp);
+
+	if (ret < 0)
+		return ret;
+
+	return sysfs_emit(buf, "%d C\n", temp);
+}
+
+static DEVICE_ATTR_RO(die_temp);
+
+static struct attribute *tas2764_sysfs_attrs[] = {
+	&dev_attr_die_temp.attr,
+	NULL
+};
+ATTRIBUTE_GROUPS(tas2764_sysfs);
+
 static int tas2764_codec_probe(struct snd_soc_component *component)
 {
 	struct tas2764_priv *tas2764 = snd_soc_component_get_drvdata(component);
-	int ret;
+	int ret, i;
 
 	tas2764->component = component;
 
+	ret = regulator_enable(tas2764->sdz_reg);
+	if (ret != 0) {
+		dev_err(tas2764->dev, "Failed to enable regulator: %d\n", ret);
+		return ret;
+	}
+
 	if (tas2764->sdz_gpio) {
 		gpiod_set_value_cansleep(tas2764->sdz_gpio, 1);
-		usleep_range(1000, 2000);
 	}
 
+	usleep_range(1000, 2000);
+
 	tas2764_reset(tas2764);
+	regmap_reinit_cache(tas2764->regmap, &tas2764_i2c_regmap);
 
 	if (tas2764->irq) {
-		ret = snd_soc_component_write(tas2764->component, TAS2764_INT_MASK0, 0xff);
+		ret = snd_soc_component_write(tas2764->component, TAS2764_INT_MASK0, 0x00);
 		if (ret < 0)
 			return ret;
 
@@ -576,19 +701,52 @@ static int tas2764_codec_probe(struct snd_soc_component *component)
 			dev_warn(tas2764->dev, "failed to request IRQ: %d\n", ret);
 	}
 
-	ret = snd_soc_component_update_bits(tas2764->component, TAS2764_TDM_CFG5,
-					    TAS2764_TDM_CFG5_VSNS_ENABLE, 0);
-	if (ret < 0)
-		return ret;
+	if (tas2764->i_sense_slot != -1 && tas2764->v_sense_slot != -1) {
+		ret = tas2764_set_ivsense_transmit(tas2764, tas2764->i_sense_slot,
+						   tas2764->v_sense_slot);
+
+		if (ret < 0)
+			return ret;
+	}
+
+	if (tas2764->devid == DEVID_SN012776) {
+		ret = snd_soc_component_update_bits(component, TAS2764_PWR_CTRL,
+					TAS2764_PWR_CTRL_BOP_SRC,
+					TAS2764_PWR_CTRL_BOP_SRC);
+		if (ret < 0)
+			return ret;
+
+		for (i = 0; i < ARRAY_SIZE(sn012776_bop_presets); i++) {
+			ret = snd_soc_component_write(component,
+						TAS2764_BOP_CFG0 + i,
+						sn012776_bop_presets[i]);
+
+			if (ret < 0)
+				return ret;
+		}
+
+		ret = tas2764_apply_init_quirks(tas2764);
+
+		if (ret < 0)
+			return ret;
+	}
+
+	ret = sysfs_create_groups(&component->dev->kobj, tas2764_sysfs_groups);
 
-	ret = snd_soc_component_update_bits(tas2764->component, TAS2764_TDM_CFG6,
-					    TAS2764_TDM_CFG6_ISNS_ENABLE, 0);
 	if (ret < 0)
 		return ret;
 
 	return 0;
 }
 
+static void tas2764_codec_remove(struct snd_soc_component *component)
+{
+	struct tas2764_priv *tas2764 = snd_soc_component_get_drvdata(component);
+
+	regulator_disable(tas2764->sdz_reg);
+	sysfs_remove_groups(&component->dev->kobj, tas2764_sysfs_groups);
+}
+
 static DECLARE_TLV_DB_SCALE(tas2764_digital_tlv, 1100, 50, 0);
 static DECLARE_TLV_DB_SCALE(tas2764_playback_volume, -10050, 50, 1);
 
@@ -601,16 +759,26 @@ static SOC_ENUM_SINGLE_DECL(
 	tas2764_hpf_enum, TAS2764_DC_BLK0,
 	TAS2764_DC_BLK0_HPF_FREQ_PB_SHIFT, tas2764_hpf_texts);
 
+static const char * const tas2764_oce_texts[] = {
+	"Disable", "Retry",
+};
+
+static SOC_ENUM_SINGLE_DECL(
+	tas2764_oce_enum, TAS2764_MISC_CFG1,
+	TAS2764_MISC_CFG1_OCE_RETRY_SHIFT, tas2764_oce_texts);
+
 static const struct snd_kcontrol_new tas2764_snd_controls[] = {
 	SOC_SINGLE_TLV("Speaker Volume", TAS2764_DVC, 0,
 		       TAS2764_DVC_MAX, 1, tas2764_playback_volume),
 	SOC_SINGLE_TLV("Amp Gain Volume", TAS2764_CHNL_0, 1, 0x14, 0,
 		       tas2764_digital_tlv),
 	SOC_ENUM("HPF Corner Frequency", tas2764_hpf_enum),
+	SOC_ENUM("OCE Handling", tas2764_oce_enum),
 };
 
 static const struct snd_soc_component_driver soc_component_driver_tas2764 = {
 	.probe			= tas2764_codec_probe,
+	.remove			= tas2764_codec_remove,
 	.suspend		= tas2764_codec_suspend,
 	.resume			= tas2764_codec_resume,
 	.controls		= tas2764_snd_controls,
@@ -634,12 +802,13 @@ static const struct reg_default tas2764_reg_defaults[] = {
 	{ TAS2764_TDM_CFG2, 0x0a },
 	{ TAS2764_TDM_CFG3, 0x10 },
 	{ TAS2764_TDM_CFG5, 0x42 },
+	{ TAS2764_INT_CLK_CFG, 0x19 },
 };
 
 static const struct regmap_range_cfg tas2764_regmap_ranges[] = {
 	{
 		.range_min = 0,
-		.range_max = 1 * 128,
+		.range_max = 0xffff,
 		.selector_reg = TAS2764_PAGE,
 		.selector_mask = 0xff,
 		.selector_shift = 0,
@@ -651,9 +820,13 @@ static const struct regmap_range_cfg tas2764_regmap_ranges[] = {
 static bool tas2764_volatile_register(struct device *dev, unsigned int reg)
 {
 	switch (reg) {
+	case TAS2764_SW_RST:
 	case TAS2764_INT_LTCH0 ... TAS2764_INT_LTCH4:
 	case TAS2764_INT_CLK_CFG:
 		return true;
+	case TAS2764_REG(0xf0, 0x0) ... TAS2764_REG(0xff, 0x0):
+		/* TI's undocumented registers for the application of quirks */
+		return true;
 	default:
 		return false;
 	}
@@ -668,13 +841,18 @@ static const struct regmap_config tas2764_i2c_regmap = {
 	.cache_type = REGCACHE_RBTREE,
 	.ranges = tas2764_regmap_ranges,
 	.num_ranges = ARRAY_SIZE(tas2764_regmap_ranges),
-	.max_register = 1 * 128,
+	.max_register = 0xffff,
 };
 
 static int tas2764_parse_dt(struct device *dev, struct tas2764_priv *tas2764)
 {
 	int ret = 0;
 
+	tas2764->sdz_reg = devm_regulator_get(dev, "SDZ");
+	if (IS_ERR(tas2764->sdz_reg))
+		return dev_err_probe(dev, PTR_ERR(tas2764->sdz_reg),
+				"Failed to get SDZ supply\n");
+
 	tas2764->reset_gpio = devm_gpiod_get_optional(tas2764->dev, "reset",
 						      GPIOD_OUT_HIGH);
 	if (IS_ERR(tas2764->reset_gpio)) {
@@ -695,16 +873,23 @@ static int tas2764_parse_dt(struct device *dev, struct tas2764_priv *tas2764)
 	ret = fwnode_property_read_u32(dev->fwnode, "ti,imon-slot-no",
 				       &tas2764->i_sense_slot);
 	if (ret)
-		tas2764->i_sense_slot = 0;
+		tas2764->i_sense_slot = -1;
 
 	ret = fwnode_property_read_u32(dev->fwnode, "ti,vmon-slot-no",
 				       &tas2764->v_sense_slot);
 	if (ret)
-		tas2764->v_sense_slot = 2;
+		tas2764->v_sense_slot = -1;
+
+	ret = fwnode_property_read_u32(dev->fwnode, "ti,sdout-force-zero-mask",
+				       &tas2764->sdout_zero_mask);
+	if (ret)
+		tas2764->sdout_zero_mask = 0;
 
 	return 0;
 }
 
+static const struct of_device_id tas2764_of_match[];
+
 static int tas2764_i2c_probe(struct i2c_client *client)
 {
 	struct tas2764_priv *tas2764;
@@ -715,6 +900,11 @@ static int tas2764_i2c_probe(struct i2c_client *client)
 	if (!tas2764)
 		return -ENOMEM;
 
+	if (device_is_compatible(&client->dev, "ti,sn012776"))
+		tas2764->devid = DEVID_SN012776;
+	else
+		tas2764->devid = DEVID_TAS2764;
+
 	tas2764->dev = &client->dev;
 	tas2764->irq = client->irq;
 	i2c_set_clientdata(client, tas2764);
@@ -749,13 +939,12 @@ static const struct i2c_device_id tas2764_i2c_id[] = {
 };
 MODULE_DEVICE_TABLE(i2c, tas2764_i2c_id);
 
-#if defined(CONFIG_OF)
 static const struct of_device_id tas2764_of_match[] = {
-	{ .compatible = "ti,tas2764" },
+	{ .compatible = "ti,tas2764",  },
+	{ .compatible = "ti,sn012776", },
 	{},
 };
 MODULE_DEVICE_TABLE(of, tas2764_of_match);
-#endif
 
 static struct i2c_driver tas2764_i2c_driver = {
 	.driver = {
diff --git a/sound/soc/codecs/tas2764.h b/sound/soc/codecs/tas2764.h
index 9490f2686e3891..4a419c11d4b08e 100644
--- a/sound/soc/codecs/tas2764.h
+++ b/sound/soc/codecs/tas2764.h
@@ -29,6 +29,7 @@
 #define TAS2764_PWR_CTRL_ACTIVE		0x0
 #define TAS2764_PWR_CTRL_MUTE		BIT(0)
 #define TAS2764_PWR_CTRL_SHUTDOWN	BIT(1)
+#define TAS2764_PWR_CTRL_BOP_SRC	BIT(7)
 
 #define TAS2764_VSENSE_POWER_EN		3
 #define TAS2764_ISENSE_POWER_EN		4
@@ -43,6 +44,10 @@
 
 #define TAS2764_CHNL_0  TAS2764_REG(0X0, 0x03)
 
+/* Miscellaneous */
+#define TAS2764_MISC_CFG1		TAS2764_REG(0x0, 0x06)
+#define TAS2764_MISC_CFG1_OCE_RETRY_SHIFT  5
+
 /* TDM Configuration Reg0 */
 #define TAS2764_TDM_CFG0		TAS2764_REG(0X0, 0x08)
 #define TAS2764_TDM_CFG0_SMP_MASK	BIT(5)
@@ -112,8 +117,24 @@
 #define TAS2764_INT_LTCH3               TAS2764_REG(0x0, 0x50)
 #define TAS2764_INT_LTCH4               TAS2764_REG(0x0, 0x51)
 
+/* Readout Registers */
+#define TAS2764_TEMP                    TAS2764_REG(0x0, 0x56)
+
 /* Clock/IRQ Settings */
 #define TAS2764_INT_CLK_CFG             TAS2764_REG(0x0, 0x5c)
 #define TAS2764_INT_CLK_CFG_IRQZ_CLR    BIT(2)
 
+#define TAS2764_BOP_CFG0                TAS2764_REG(0X0, 0x1d)
+
+#define TAS2764_SDOUT_HIZ_1		TAS2764_REG(0x1, 0x3d)
+#define TAS2764_SDOUT_HIZ_2		TAS2764_REG(0x1, 0x3e)
+#define TAS2764_SDOUT_HIZ_3		TAS2764_REG(0x1, 0x3f)
+#define TAS2764_SDOUT_HIZ_4		TAS2764_REG(0x1, 0x40)
+#define TAS2764_SDOUT_HIZ_5		TAS2764_REG(0x1, 0x41)
+#define TAS2764_SDOUT_HIZ_6		TAS2764_REG(0x1, 0x42)
+#define TAS2764_SDOUT_HIZ_7		TAS2764_REG(0x1, 0x43)
+#define TAS2764_SDOUT_HIZ_8		TAS2764_REG(0x1, 0x44)
+#define TAS2764_SDOUT_HIZ_9		TAS2764_REG(0x1, 0x45)
+#define TAS2764_SDOUT_HIZ_9_FORCE_0_EN	BIT(7)
+
 #endif /* __TAS2764__ */
diff --git a/sound/soc/codecs/tas2770.c b/sound/soc/codecs/tas2770.c
index 863c3f672ba98d..867e0f25db2a8a 100644
--- a/sound/soc/codecs/tas2770.c
+++ b/sound/soc/codecs/tas2770.c
@@ -20,6 +20,7 @@
 #include <linux/regmap.h>
 #include <linux/of.h>
 #include <linux/slab.h>
+#include <linux/sysfs.h>
 #include <sound/soc.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
@@ -70,23 +71,21 @@ static int tas2770_codec_suspend(struct snd_soc_component *component)
 	struct tas2770_priv *tas2770 = snd_soc_component_get_drvdata(component);
 	int ret = 0;
 
-	regcache_cache_only(tas2770->regmap, true);
-	regcache_mark_dirty(tas2770->regmap);
+	ret = snd_soc_component_update_bits(component, TAS2770_PWR_CTRL,
+					    TAS2770_PWR_CTRL_MASK,
+					    TAS2770_PWR_CTRL_SHUTDOWN);
+	if (ret < 0)
+		return ret;
 
-	if (tas2770->sdz_gpio) {
+	if (tas2770->sdz_gpio)
 		gpiod_set_value_cansleep(tas2770->sdz_gpio, 0);
-	} else {
-		ret = snd_soc_component_update_bits(component, TAS2770_PWR_CTRL,
-						    TAS2770_PWR_CTRL_MASK,
-						    TAS2770_PWR_CTRL_SHUTDOWN);
-		if (ret < 0) {
-			regcache_cache_only(tas2770->regmap, false);
-			regcache_sync(tas2770->regmap);
-			return ret;
-		}
 
-		ret = 0;
-	}
+	regulator_disable(tas2770->sdz_reg);
+
+	regcache_cache_only(tas2770->regmap, true);
+	regcache_mark_dirty(tas2770->regmap);
+
+	usleep_range(6000, 7000);
 
 	return ret;
 }
@@ -96,18 +95,26 @@ static int tas2770_codec_resume(struct snd_soc_component *component)
 	struct tas2770_priv *tas2770 = snd_soc_component_get_drvdata(component);
 	int ret;
 
-	if (tas2770->sdz_gpio) {
-		gpiod_set_value_cansleep(tas2770->sdz_gpio, 1);
-		usleep_range(1000, 2000);
-	} else {
-		ret = tas2770_update_pwr_ctrl(tas2770);
-		if (ret < 0)
-			return ret;
+	ret = regulator_enable(tas2770->sdz_reg);
+
+	if (ret) {
+		dev_err(tas2770->dev, "Failed to enable regulator\n");
+		return ret;
 	}
 
+	if (tas2770->sdz_gpio)
+		gpiod_set_value_cansleep(tas2770->sdz_gpio, 1);
+
+
+	usleep_range(1000, 2000);
+
 	regcache_cache_only(tas2770->regmap, false);
 
-	return regcache_sync(tas2770->regmap);
+	ret = regcache_sync(tas2770->regmap);
+	if (ret < 0)
+		return ret;
+
+	return tas2770_update_pwr_ctrl(tas2770);
 }
 #else
 #define tas2770_codec_suspend NULL
@@ -156,11 +163,41 @@ static const struct snd_kcontrol_new isense_switch =
 static const struct snd_kcontrol_new vsense_switch =
 	SOC_DAPM_SINGLE("Switch", TAS2770_PWR_CTRL, 2, 1, 1);
 
+static int sense_event(struct snd_soc_dapm_widget *w,
+                          struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
+	struct tas2770_priv *tas2770 = snd_soc_component_get_drvdata(component);
+	int ret = 0;
+
+	/*
+	 * Powering up ISENSE/VSENSE requires a trip through the shutdown state.
+	 * Do that here to ensure that our changes are applied properly, otherwise
+	 * we might end up with non-functional IVSENSE if playback started earlier,
+	 * which would break software speaker protection.
+	 */
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_REG:
+		ret = snd_soc_component_update_bits(component, TAS2770_PWR_CTRL,
+						    TAS2770_PWR_CTRL_MASK,
+						    TAS2770_PWR_CTRL_SHUTDOWN);
+		break;
+	case SND_SOC_DAPM_POST_REG:
+		ret = tas2770_update_pwr_ctrl(tas2770);
+		break;
+	}
+
+	return ret;
+}
+
 static const struct snd_soc_dapm_widget tas2770_dapm_widgets[] = {
 	SND_SOC_DAPM_AIF_IN("ASI1", "ASI1 Playback", 0, SND_SOC_NOPM, 0, 0),
 	SND_SOC_DAPM_MUX("ASI1 Sel", SND_SOC_NOPM, 0, 0, &tas2770_asi1_mux),
-	SND_SOC_DAPM_SWITCH("ISENSE", TAS2770_PWR_CTRL, 3, 1, &isense_switch),
-	SND_SOC_DAPM_SWITCH("VSENSE", TAS2770_PWR_CTRL, 2, 1, &vsense_switch),
+	SND_SOC_DAPM_SWITCH_E("ISENSE", TAS2770_PWR_CTRL, 3, 1, &isense_switch,
+		sense_event, SND_SOC_DAPM_PRE_REG | SND_SOC_DAPM_POST_REG),
+	SND_SOC_DAPM_SWITCH_E("VSENSE", TAS2770_PWR_CTRL, 2, 1, &vsense_switch,
+		sense_event, SND_SOC_DAPM_PRE_REG | SND_SOC_DAPM_POST_REG),
 	SND_SOC_DAPM_DAC_E("DAC", NULL, SND_SOC_NOPM, 0, 0, tas2770_dac_event,
 			   SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
 	SND_SOC_DAPM_OUTPUT("OUT"),
@@ -189,6 +226,44 @@ static int tas2770_mute(struct snd_soc_dai *dai, int mute, int direction)
 	return tas2770_update_pwr_ctrl(tas2770);
 }
 
+static int tas2770_set_ivsense_transmit(struct tas2770_priv *tas2770,
+					int i_slot, int v_slot)
+{
+	struct snd_soc_component *component = tas2770->component;
+	int ret;
+
+	ret = snd_soc_component_update_bits(component, TAS2770_TDM_CFG_REG5,
+					    TAS2770_TDM_CFG_REG5_VSNS_MASK |
+					    TAS2770_TDM_CFG_REG5_50_MASK,
+					    TAS2770_TDM_CFG_REG5_VSNS_ENABLE |
+					    v_slot);
+	if (ret < 0)
+		return ret;
+
+	ret = snd_soc_component_update_bits(component, TAS2770_TDM_CFG_REG6,
+					    TAS2770_TDM_CFG_REG6_ISNS_MASK |
+					    TAS2770_TDM_CFG_REG6_50_MASK,
+					    TAS2770_TDM_CFG_REG6_ISNS_ENABLE |
+					    i_slot);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int tas2770_set_pdm_transmit(struct tas2770_priv *tas2770, int slot)
+{
+	struct snd_soc_component *component = tas2770->component;
+	int ret;
+
+	ret = snd_soc_component_update_bits(component, TAS2770_TDM_CFG_REG7,
+					    TAS2770_TDM_CFG_REG7_PDM_MASK |
+					    TAS2770_TDM_CFG_REG7_50_MASK,
+					    TAS2770_TDM_CFG_REG7_PDM_ENABLE |
+					    slot);
+	return ret;
+}
+
 static int tas2770_set_bitwidth(struct tas2770_priv *tas2770, int bitwidth)
 {
 	int ret;
@@ -199,19 +274,16 @@ static int tas2770_set_bitwidth(struct tas2770_priv *tas2770, int bitwidth)
 		ret = snd_soc_component_update_bits(component, TAS2770_TDM_CFG_REG2,
 						    TAS2770_TDM_CFG_REG2_RXW_MASK,
 						    TAS2770_TDM_CFG_REG2_RXW_16BITS);
-		tas2770->v_sense_slot = tas2770->i_sense_slot + 2;
 		break;
 	case SNDRV_PCM_FORMAT_S24_LE:
 		ret = snd_soc_component_update_bits(component, TAS2770_TDM_CFG_REG2,
 						    TAS2770_TDM_CFG_REG2_RXW_MASK,
 						    TAS2770_TDM_CFG_REG2_RXW_24BITS);
-		tas2770->v_sense_slot = tas2770->i_sense_slot + 4;
 		break;
 	case SNDRV_PCM_FORMAT_S32_LE:
 		ret = snd_soc_component_update_bits(component, TAS2770_TDM_CFG_REG2,
 						    TAS2770_TDM_CFG_REG2_RXW_MASK,
 						    TAS2770_TDM_CFG_REG2_RXW_32BITS);
-		tas2770->v_sense_slot = tas2770->i_sense_slot + 4;
 		break;
 
 	default:
@@ -221,22 +293,6 @@ static int tas2770_set_bitwidth(struct tas2770_priv *tas2770, int bitwidth)
 	if (ret < 0)
 		return ret;
 
-	ret = snd_soc_component_update_bits(component, TAS2770_TDM_CFG_REG5,
-					    TAS2770_TDM_CFG_REG5_VSNS_MASK |
-					    TAS2770_TDM_CFG_REG5_50_MASK,
-					    TAS2770_TDM_CFG_REG5_VSNS_ENABLE |
-		tas2770->v_sense_slot);
-	if (ret < 0)
-		return ret;
-
-	ret = snd_soc_component_update_bits(component, TAS2770_TDM_CFG_REG6,
-					    TAS2770_TDM_CFG_REG6_ISNS_MASK |
-					    TAS2770_TDM_CFG_REG6_50_MASK,
-					    TAS2770_TDM_CFG_REG6_ISNS_ENABLE |
-					    tas2770->i_sense_slot);
-	if (ret < 0)
-		return ret;
-
 	return 0;
 }
 
@@ -306,7 +362,7 @@ static int tas2770_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
 	struct snd_soc_component *component = dai->component;
 	struct tas2770_priv *tas2770 =
 			snd_soc_component_get_drvdata(component);
-	u8 tdm_rx_start_slot = 0, invert_fpol = 0, fpol_preinv = 0, asi_cfg_1 = 0;
+	u8 tdm_rx_start_slot = 0, invert_fpol = 0, fpol_preinv = 0, asi_cfg_1 = 0, asi_cfg_4 = 0;
 	int ret;
 
 	switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
@@ -323,6 +379,7 @@ static int tas2770_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
 		fallthrough;
 	case SND_SOC_DAIFMT_NB_NF:
 		asi_cfg_1 |= TAS2770_TDM_CFG_REG1_RX_RSING;
+		asi_cfg_4 |= TAS2770_TDM_CFG_REG4_TX_EDGE_FALLING;
 		break;
 	case SND_SOC_DAIFMT_IB_IF:
 		invert_fpol = 1;
@@ -341,6 +398,12 @@ static int tas2770_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
 	if (ret < 0)
 		return ret;
 
+	ret = snd_soc_component_update_bits(component, TAS2770_TDM_CFG_REG4,
+					    TAS2770_TDM_CFG_REG4_TX_EDGE_FALLING,
+					    asi_cfg_4);
+	if (ret < 0)
+		return ret;
+
 	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
 	case SND_SOC_DAIFMT_I2S:
 		tdm_rx_start_slot = 1;
@@ -485,26 +548,117 @@ static struct snd_soc_dai_driver tas2770_dai_driver[] = {
 	},
 };
 
+static int tas2770_read_die_temp(struct tas2770_priv *tas2770, int *result)
+{
+	int ret, reading;
+
+	ret = snd_soc_component_read(tas2770->component, TAS2770_TEMP_MSB);
+	if (ret < 0)
+		return ret;
+	reading = ret << 4;
+
+	ret = snd_soc_component_read(tas2770->component, TAS2770_TEMP_LSB);
+	if (ret < 0)
+		return ret;
+	reading |= ret >> 4;
+
+	*result = reading - (93 * 16);
+	return 0;
+}
+
+static ssize_t die_temp_show(struct device *dev,
+			 struct device_attribute *attr, char *buf)
+{
+	struct tas2770_priv *tas2770 = i2c_get_clientdata(to_i2c_client(dev));
+	int ret, temp;
+
+	ret = tas2770_read_die_temp(tas2770, &temp);
+
+	if (ret < 0)
+		return ret;
+
+	return sysfs_emit(buf, "%d.%03d C\n", temp / 16,
+			  (temp * 1000 / 16) % 1000);
+}
+
+static DEVICE_ATTR_RO(die_temp);
+
+static struct attribute *tas2770_sysfs_attrs[] = {
+	&dev_attr_die_temp.attr,
+	NULL
+};
+ATTRIBUTE_GROUPS(tas2770_sysfs);
+
 static const struct regmap_config tas2770_i2c_regmap;
 
 static int tas2770_codec_probe(struct snd_soc_component *component)
 {
 	struct tas2770_priv *tas2770 =
 			snd_soc_component_get_drvdata(component);
+	int ret;
 
 	tas2770->component = component;
 
+	ret = regulator_enable(tas2770->sdz_reg);
+	if (ret != 0) {
+		dev_err(tas2770->dev, "Failed to enable regulator: %d\n", ret);
+		return ret;
+	}
+
 	if (tas2770->sdz_gpio) {
 		gpiod_set_value_cansleep(tas2770->sdz_gpio, 1);
-		usleep_range(1000, 2000);
 	}
 
+	usleep_range(1000, 2000);
+
 	tas2770_reset(tas2770);
 	regmap_reinit_cache(tas2770->regmap, &tas2770_i2c_regmap);
 
+	if (tas2770->i_sense_slot != -1 && tas2770->v_sense_slot != -1) {
+		ret = tas2770_set_ivsense_transmit(tas2770, tas2770->i_sense_slot,
+						   tas2770->v_sense_slot);
+
+		if (ret < 0)
+			return ret;
+	}
+
+	if (tas2770->pdm_slot != -1) {
+		ret = tas2770_set_pdm_transmit(tas2770, tas2770->pdm_slot);
+
+		if (ret < 0)
+			return ret;
+	}
+
+	ret = snd_soc_component_update_bits(component, TAS2770_TDM_CFG_REG4,
+					    TAS2770_TDM_CFG_REG4_TX_FILL,
+					    tas2770->sdout_zfill ? 0 :
+					    TAS2770_TDM_CFG_REG4_TX_FILL);
+	if (ret < 0)
+		return ret;
+
+	ret = snd_soc_component_update_bits(component, TAS2770_DIN_PD,
+					    TAS2770_DIN_PD_SDOUT,
+					    tas2770->sdout_pd ?
+					    TAS2770_DIN_PD_SDOUT : 0);
+	if (ret < 0)
+		return ret;
+
+	ret = sysfs_create_groups(&component->dev->kobj, tas2770_sysfs_groups);
+
+	if (ret < 0)
+		return ret;
+
 	return 0;
 }
 
+static void tas2770_codec_remove(struct snd_soc_component *component)
+{
+	struct tas2770_priv *tas2770 = snd_soc_component_get_drvdata(component);
+
+	sysfs_remove_groups(&component->dev->kobj, tas2770_sysfs_groups);
+	regulator_disable(tas2770->sdz_reg);
+}
+
 static DECLARE_TLV_DB_SCALE(tas2770_digital_tlv, 1100, 50, 0);
 static DECLARE_TLV_DB_SCALE(tas2770_playback_volume, -10050, 50, 0);
 
@@ -517,6 +671,7 @@ static const struct snd_kcontrol_new tas2770_snd_controls[] = {
 
 static const struct snd_soc_component_driver soc_component_driver_tas2770 = {
 	.probe			= tas2770_codec_probe,
+	.remove			= tas2770_codec_remove,
 	.suspend		= tas2770_codec_suspend,
 	.resume			= tas2770_codec_resume,
 	.controls		= tas2770_snd_controls,
@@ -629,7 +784,7 @@ static int tas2770_parse_dt(struct device *dev, struct tas2770_priv *tas2770)
 		dev_info(tas2770->dev, "Property %s is missing setting default slot\n",
 			 "ti,imon-slot-no");
 
-		tas2770->i_sense_slot = 0;
+		tas2770->i_sense_slot = -1;
 	}
 
 	rc = fwnode_property_read_u32(dev->fwnode, "ti,vmon-slot-no",
@@ -638,9 +793,23 @@ static int tas2770_parse_dt(struct device *dev, struct tas2770_priv *tas2770)
 		dev_info(tas2770->dev, "Property %s is missing setting default slot\n",
 			 "ti,vmon-slot-no");
 
-		tas2770->v_sense_slot = 2;
+		tas2770->v_sense_slot = -1;
 	}
 
+	rc = fwnode_property_read_u32(dev->fwnode, "ti,pdm-slot-no",
+				      &tas2770->pdm_slot);
+	if (rc) {
+		tas2770->pdm_slot = -1;
+	}
+
+	tas2770->sdout_pd = fwnode_property_read_bool(dev->fwnode, "ti,sdout-pull-down");
+	tas2770->sdout_zfill = fwnode_property_read_bool(dev->fwnode, "ti,sdout-zero-fill");
+
+	tas2770->sdz_reg = devm_regulator_get(dev, "SDZ");
+	if (IS_ERR(tas2770->sdz_reg))
+		return dev_err_probe(dev, PTR_ERR(tas2770->sdz_reg),
+				     "Failed to get SDZ supply\n");
+
 	tas2770->sdz_gpio = devm_gpiod_get_optional(dev, "shutdown", GPIOD_OUT_HIGH);
 	if (IS_ERR(tas2770->sdz_gpio)) {
 		if (PTR_ERR(tas2770->sdz_gpio) == -EPROBE_DEFER)
diff --git a/sound/soc/codecs/tas2770.h b/sound/soc/codecs/tas2770.h
index f75f40781ab136..b309d19c58e1da 100644
--- a/sound/soc/codecs/tas2770.h
+++ b/sound/soc/codecs/tas2770.h
@@ -67,6 +67,14 @@
 #define TAS2770_TDM_CFG_REG3_RXS_SHIFT 0x4
 #define TAS2770_TDM_CFG_REG3_30_MASK  GENMASK(3, 0)
 #define TAS2770_TDM_CFG_REG3_30_SHIFT 0
+    /* TDM Configuration Reg4 */
+#define TAS2770_TDM_CFG_REG4  TAS2770_REG(0X0, 0x0E)
+#define TAS2770_TDM_CFG_REG4_TX_LSB_CFG BIT(7)
+#define TAS2770_TDM_CFG_REG4_TX_KEEPER_CFG BIT(6)
+#define TAS2770_TDM_CFG_REG4_TX_KEEPER BIT(5)
+#define TAS2770_TDM_CFG_REG4_TX_FILL BIT(4)
+#define TAS2770_TDM_CFG_REG4_TX_OFFSET_MASK GENMASK(3, 1)
+#define TAS2770_TDM_CFG_REG4_TX_EDGE_FALLING BIT(0)
     /* TDM Configuration Reg5 */
 #define TAS2770_TDM_CFG_REG5  TAS2770_REG(0X0, 0x0F)
 #define TAS2770_TDM_CFG_REG5_VSNS_MASK  BIT(6)
@@ -77,6 +85,11 @@
 #define TAS2770_TDM_CFG_REG6_ISNS_MASK  BIT(6)
 #define TAS2770_TDM_CFG_REG6_ISNS_ENABLE  BIT(6)
 #define TAS2770_TDM_CFG_REG6_50_MASK  GENMASK(5, 0)
+    /* TDM Configuration Reg10 */
+#define TAS2770_TDM_CFG_REG7  TAS2770_REG(0X0, 0x11)
+#define TAS2770_TDM_CFG_REG7_PDM_MASK  BIT(6)
+#define TAS2770_TDM_CFG_REG7_PDM_ENABLE  BIT(6)
+#define TAS2770_TDM_CFG_REG7_50_MASK	GENMASK(5, 0)
     /* Brown Out Prevention Reg0 */
 #define TAS2770_BO_PRV_REG0  TAS2770_REG(0X0, 0x1B)
     /* Interrupt MASK Reg0 */
@@ -110,6 +123,9 @@
 #define TAS2770_TEMP_LSB  TAS2770_REG(0X0, 0x2A)
     /* Interrupt Configuration */
 #define TAS2770_INT_CFG  TAS2770_REG(0X0, 0x30)
+    /* Data In Pull-Down */
+#define TAS2770_DIN_PD  TAS2770_REG(0X0, 0x31)
+#define TAS2770_DIN_PD_SDOUT BIT(7)
     /* Misc IRQ */
 #define TAS2770_MISC_IRQ  TAS2770_REG(0X0, 0x32)
     /* Clock Configuration */
@@ -134,10 +150,14 @@ struct tas2770_priv {
 	struct snd_soc_component *component;
 	struct gpio_desc *reset_gpio;
 	struct gpio_desc *sdz_gpio;
+	struct regulator *sdz_reg;
 	struct regmap *regmap;
 	struct device *dev;
 	int v_sense_slot;
 	int i_sense_slot;
+	int pdm_slot;
+	bool sdout_pd;
+	bool sdout_zfill;
 	bool dac_powered;
 	bool unmuted;
 };
diff --git a/sound/soc/mediatek/mt8188/mt8188-mt6359.c b/sound/soc/mediatek/mt8188/mt8188-mt6359.c
index 2d0d04e0232da0..672d0f6945d015 100644
--- a/sound/soc/mediatek/mt8188/mt8188-mt6359.c
+++ b/sound/soc/mediatek/mt8188/mt8188-mt6359.c
@@ -1220,7 +1220,7 @@ static struct snd_soc_dai_link mt8188_mt6359_dai_links[] = {
 	},
 };
 
-static void mt8188_fixup_controls(struct snd_soc_card *card)
+static int mt8188_fixup_controls(struct snd_soc_card *card)
 {
 	struct mtk_soc_card_data *soc_card_data = snd_soc_card_get_drvdata(card);
 	struct mtk_platform_card_data *card_data = soc_card_data->card_data;
@@ -1242,6 +1242,8 @@ static void mt8188_fixup_controls(struct snd_soc_card *card)
 		else
 			dev_warn(card->dev, "Cannot find ctl : Headphone Switch\n");
 	}
+
+	return 0;
 }
 
 static struct snd_soc_card mt8188_mt6359_soc_card = {
diff --git a/sound/soc/soc-card.c b/sound/soc/soc-card.c
index e6eb71b3010a83..68e89ca06035ab 100644
--- a/sound/soc/soc-card.c
+++ b/sound/soc/soc-card.c
@@ -194,10 +194,16 @@ int snd_soc_card_late_probe(struct snd_soc_card *card)
 	return 0;
 }
 
-void snd_soc_card_fixup_controls(struct snd_soc_card *card)
+int snd_soc_card_fixup_controls(struct snd_soc_card *card)
 {
-	if (card->fixup_controls)
-		card->fixup_controls(card);
+	if (card->fixup_controls) {
+		int ret = card->fixup_controls(card);
+
+		if (ret < 0)
+			return soc_card_ret(card, ret);
+	}
+
+	return 0;
 }
 
 int snd_soc_card_remove(struct snd_soc_card *card)
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 26b34b68850839..f710ce7970e6d7 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -2283,7 +2283,10 @@ static int snd_soc_bind_card(struct snd_soc_card *card)
 		goto probe_end;
 
 	snd_soc_dapm_new_widgets(card);
-	snd_soc_card_fixup_controls(card);
+
+	ret = snd_soc_card_fixup_controls(card);
+	if (ret < 0)
+		goto probe_end;
 
 	ret = snd_card_register(card->snd_card);
 	if (ret < 0) {
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index b5116b700d7310..8b16ab5b9afa81 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -2253,6 +2253,141 @@ static const struct file_operations dapm_bias_fops = {
 	.llseek = default_llseek,
 };
 
+static ssize_t dapm_graph_read_file(struct file *file, char __user *user_buf,
+				    size_t count, loff_t *ppos)
+{
+	struct snd_soc_card *card = file->private_data;
+	struct snd_soc_dapm_context *dapm;
+	struct snd_soc_dapm_path *p;
+	struct snd_soc_dapm_widget *w;
+	struct snd_soc_pcm_runtime *rtd;
+	struct snd_soc_dapm_widget *wdone[16];
+	struct snd_soc_dai *dai;
+	int i, num_wdone = 0, cluster = 0;
+	char *buf;
+	ssize_t bufsize;
+	ssize_t ret = 0;
+
+	bufsize = 1024 * card->num_dapm_widgets;
+	buf = kmalloc(bufsize, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	mutex_lock(&card->dapm_mutex);
+
+#define bufprintf(...) \
+		ret += scnprintf(buf + ret, bufsize - ret, __VA_ARGS__)
+
+	bufprintf("digraph dapm {\n");
+
+	/*
+	 * Print the user-visible devices of the card.
+	 */
+	bufprintf("subgraph cluster_%d {\n", cluster++);
+	bufprintf("label=\"Devices\";style=filled;fillcolor=gray;\n");
+	for_each_card_rtds(card, rtd) {
+		if (rtd->dai_link->no_pcm)
+			continue;
+
+		bufprintf("w%pK [label=\"%d: %s\"];\n", rtd,
+			  rtd->pcm->device, rtd->dai_link->name);
+	}
+	bufprintf("};\n");
+
+	/*
+	 * Print the playback/capture widgets of DAIs just next to
+	 * the user-visible devices. Keep the list of already printed
+	 * widgets in 'wdone', so they will be skipped later.
+	 */
+	for_each_card_rtds(card, rtd) {
+		for_each_rtd_cpu_dais(rtd, i, dai) {
+			if (dai->stream[SNDRV_PCM_STREAM_PLAYBACK].widget) {
+				w = dai->stream[SNDRV_PCM_STREAM_PLAYBACK].widget;
+				bufprintf("w%pK [label=\"%s\"];\n", w, w->name);
+				if (!rtd->dai_link->no_pcm)
+					bufprintf("w%pK -> w%pK;\n", rtd, w);
+				if (num_wdone < ARRAY_SIZE(wdone)) {
+					wdone[num_wdone] = w;
+					num_wdone++;
+				}
+			}
+
+			if (dai->stream[SNDRV_PCM_STREAM_CAPTURE].widget) {
+				w = dai->stream[SNDRV_PCM_STREAM_CAPTURE].widget;
+				bufprintf("w%pK [label=\"%s\"];\n", w, w->name);
+				if (!rtd->dai_link->no_pcm)
+					bufprintf("w%pK -> w%pK;\n", w, rtd);
+				if (num_wdone < ARRAY_SIZE(wdone)) {
+					wdone[num_wdone] = w;
+					num_wdone++;
+				}
+			}
+		}
+	}
+
+	for_each_card_dapms(card, dapm) {
+		const char *prefix = soc_dapm_prefix(dapm);
+
+		if (dapm != &card->dapm) {
+			bufprintf("subgraph cluster_%d {\n", cluster++);
+			if (prefix)
+				bufprintf("label=\"%s\";\n", prefix);
+			else if (dapm->component)
+				bufprintf("label=\"%s\";\n",
+					  dapm->component->name);
+		}
+
+		for_each_card_widgets(dapm->card, w) {
+			const char *name = w->name;
+			bool skip = false;
+
+			if (w->dapm != dapm)
+				continue;
+
+			if (list_empty(&w->edges[0]) && list_empty(&w->edges[1]))
+				continue;
+
+			for (i = 0; i < num_wdone; i++)
+				if (wdone[i] == w)
+					skip = true;
+			if (skip)
+				continue;
+
+			if (prefix && strlen(name) > strlen(prefix) + 1)
+				name += strlen(prefix) + 1;
+
+			bufprintf("w%pK [label=\"%s\"];\n", w, name);
+		}
+
+		if (dapm != &card->dapm)
+			bufprintf("}\n");
+	}
+
+	list_for_each_entry(p, &card->paths, list) {
+		if (p->name)
+			bufprintf("w%pK -> w%pK [label=\"%s\"];\n",
+				  p->source, p->sink, p->name);
+		else
+			bufprintf("w%pK -> w%pK;\n", p->source, p->sink);
+	}
+
+	bufprintf("}\n");
+#undef bufprintf
+
+	mutex_unlock(&card->dapm_mutex);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
+
+	kfree(buf);
+	return ret;
+}
+
+static const struct file_operations dapm_graph_fops = {
+	.open = simple_open,
+	.read = dapm_graph_read_file,
+	.llseek = default_llseek,
+};
+
 void snd_soc_dapm_debugfs_init(struct snd_soc_dapm_context *dapm,
 	struct dentry *parent)
 {
@@ -2263,6 +2398,10 @@ void snd_soc_dapm_debugfs_init(struct snd_soc_dapm_context *dapm,
 
 	debugfs_create_file("bias_level", 0444, dapm->debugfs_dapm, dapm,
 			    &dapm_bias_fops);
+
+	if (dapm == &dapm->card->dapm)
+		debugfs_create_file("graph.dot", 0444, dapm->debugfs_dapm,
+				    dapm->card, &dapm_graph_fops);
 }
 
 static void dapm_debugfs_add_widget(struct snd_soc_dapm_widget *w)
diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
index b0e4e4168f38d5..dfce92515814f4 100644
--- a/sound/soc/soc-ops.c
+++ b/sound/soc/soc-ops.c
@@ -177,28 +177,20 @@ int snd_soc_info_volsw(struct snd_kcontrol *kcontrol,
 {
 	struct soc_mixer_control *mc =
 		(struct soc_mixer_control *)kcontrol->private_value;
-	const char *vol_string = NULL;
-	int max;
+	int platform_max;
 
-	max = uinfo->value.integer.max = mc->max - mc->min;
-	if (mc->platform_max && mc->platform_max < max)
-		max = mc->platform_max;
+	if (!mc->platform_max)
+		mc->platform_max = mc->max;
+	platform_max = mc->platform_max;
 
-	if (max == 1) {
-		/* Even two value controls ending in Volume should always be integer */
-		vol_string = strstr(kcontrol->id.name, " Volume");
-		if (vol_string && !strcmp(vol_string, " Volume"))
-			uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
-		else
-			uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
-	} else {
+	if (platform_max == 1 && !strstr(kcontrol->id.name, " Volume"))
+		uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
+	else
 		uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
-	}
 
 	uinfo->count = snd_soc_volsw_is_stereo(mc) ? 2 : 1;
 	uinfo->value.integer.min = 0;
-	uinfo->value.integer.max = max;
-
+	uinfo->value.integer.max = platform_max - mc->min;
 	return 0;
 }
 EXPORT_SYMBOL_GPL(snd_soc_info_volsw);
@@ -639,37 +631,218 @@ int snd_soc_get_volsw_range(struct snd_kcontrol *kcontrol,
 }
 EXPORT_SYMBOL_GPL(snd_soc_get_volsw_range);
 
+bool snd_soc_control_matches(struct snd_kcontrol *kctl,
+	const char *pattern)
+{
+	const char *name = kctl->id.name;
+
+	if (pattern[0] == '*') {
+		int namelen;
+		int patternlen;
+
+		pattern++;
+		if (pattern[0] == ' ')
+			pattern++;
+
+		namelen = strlen(name);
+		patternlen = strlen(pattern);
+
+		if (namelen > patternlen)
+			name += namelen - patternlen;
+	}
+
+	return !strcmp(name, pattern);
+}
+EXPORT_SYMBOL_GPL(snd_soc_control_matches);
+
+static int soc_clip_to_platform_max(struct snd_kcontrol *kctl)
+{
+	struct soc_mixer_control *mc = (struct soc_mixer_control *)kctl->private_value;
+	struct snd_ctl_elem_value uctl;
+	int ret;
+
+	if (!mc->platform_max)
+		return 0;
+
+	ret = kctl->get(kctl, &uctl);
+	if (ret < 0)
+		return ret;
+
+	if (uctl.value.integer.value[0] > mc->platform_max)
+		uctl.value.integer.value[0] = mc->platform_max;
+
+	if (snd_soc_volsw_is_stereo(mc) && 
+	    uctl.value.integer.value[1] > mc->platform_max)
+		uctl.value.integer.value[1] = mc->platform_max;
+
+	ret = kctl->put(kctl, &uctl);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int soc_limit_volume(struct snd_kcontrol *kctl, int max)
+{
+	struct soc_mixer_control *mc = (struct soc_mixer_control *)kctl->private_value;
+
+	if (max <= 0 || max > mc->max - mc->min)
+		return -EINVAL;
+	mc->platform_max = max;
+
+	return soc_clip_to_platform_max(kctl);
+}
+
 /**
- * snd_soc_limit_volume - Set new limit to an existing volume control.
+ * snd_soc_limit_volume - Set new limit to existing volume controls
  *
  * @card: where to look for the control
- * @name: Name of the control
+ * @name: name pattern
  * @max: new maximum limit
+ * 
+ * Finds controls matching the given name (which can be either a name
+ * verbatim, or a pattern starting with the wildcard '*') and sets
+ * a platform volume limit on them.
  *
- * Return 0 for success, else error.
+ * Return number of matching controls on success, else error. At least
+ * one control needs to match the pattern.
  */
 int snd_soc_limit_volume(struct snd_soc_card *card,
 	const char *name, int max)
 {
 	struct snd_kcontrol *kctl;
-	int ret = -EINVAL;
+	int hits = 0;
+	int ret;
 
-	/* Sanity check for name and max */
-	if (unlikely(!name || max <= 0))
+	/* Sanity check for name */
+	if (unlikely(!name))
 		return -EINVAL;
 
-	kctl = snd_soc_card_get_kcontrol(card, name);
-	if (kctl) {
-		struct soc_mixer_control *mc = (struct soc_mixer_control *)kctl->private_value;
-		if (max <= mc->max - mc->min) {
-			mc->platform_max = max;
-			ret = 0;
-		}
+	list_for_each_entry(kctl, &card->snd_card->controls, list) {
+		if (!snd_soc_control_matches(kctl, name))
+			continue;
+
+		ret = soc_limit_volume(kctl, max);
+		if (ret < 0)
+			return ret;
+		hits++;
 	}
-	return ret;
+
+	if (!hits)
+		return -EINVAL;
+
+	return hits;
 }
 EXPORT_SYMBOL_GPL(snd_soc_limit_volume);
 
+/**
+ * snd_soc_deactivate_kctl - Activate/deactive controls matching a pattern
+ *
+ * @card: where to look for the controls
+ * @name: name pattern
+ * @active: non-zero to activate, zero to deactivate
+ *
+ * Return number of matching controls on success, else error.
+ * No controls need to match.
+ */
+int snd_soc_deactivate_kctl(struct snd_soc_card *card,
+	const char *name, int active)
+{
+	struct snd_kcontrol *kctl;
+	int hits = 0;
+	int ret;
+
+	/* Sanity check for name */
+	if (unlikely(!name))
+		return -EINVAL;
+
+	list_for_each_entry(kctl, &card->snd_card->controls, list) {
+		if (!snd_soc_control_matches(kctl, name))
+			continue;
+
+		ret = snd_ctl_activate_id(card->snd_card, &kctl->id, active);
+		if (ret < 0)
+			return ret;
+		hits++;
+	}
+
+	if (!hits)
+		return -EINVAL;
+
+	return hits;
+}
+EXPORT_SYMBOL_GPL(snd_soc_deactivate_kctl);
+
+static int soc_set_enum_kctl(struct snd_kcontrol *kctl, const char *strval)
+{
+	struct snd_ctl_elem_value value;
+	struct snd_ctl_elem_info info;
+	int sel, i, ret;
+
+	ret = kctl->info(kctl, &info);
+	if (ret < 0)
+		return ret;
+
+	if (info.type != SNDRV_CTL_ELEM_TYPE_ENUMERATED)
+		return -EINVAL;
+
+	for (sel = 0; sel < info.value.enumerated.items; sel++) {
+		info.value.enumerated.item = sel;
+		ret = kctl->info(kctl, &info);
+		if (ret < 0)
+			return ret;
+
+		if (!strcmp(strval, info.value.enumerated.name))
+			break;
+	}
+
+	if (sel == info.value.enumerated.items)
+		return -EINVAL;
+
+	for (i = 0; i < info.count; i++)
+		value.value.enumerated.item[i] = sel;
+
+	return kctl->put(kctl, &value);
+}
+
+/**
+ * snd_soc_set_enum_kctl - Set enumerated controls matching a pattern
+ *
+ * @card: where to look for the controls
+ * @name: name pattern
+ * @value: string value to set the controls to
+ *
+ * Return number of matching and set controls on success, else error.
+ * No controls need to match.
+ */
+int snd_soc_set_enum_kctl(struct snd_soc_card *card,
+	const char *name, const char *value)
+{
+	struct snd_kcontrol *kctl;
+	int hits = 0;
+	int ret;
+
+	/* Sanity check for name */
+	if (unlikely(!name))
+		return -EINVAL;
+
+	list_for_each_entry(kctl, &card->snd_card->controls, list) {
+		if (!snd_soc_control_matches(kctl, name))
+			continue;
+
+		ret = soc_set_enum_kctl(kctl, value);
+		if (ret < 0)
+			return ret;
+		hits++;
+	}
+
+	if (!hits)
+		return -EINVAL;
+
+	return hits;
+}
+EXPORT_SYMBOL_GPL(snd_soc_set_enum_kctl);
+
 int snd_soc_bytes_info(struct snd_kcontrol *kcontrol,
 		       struct snd_ctl_elem_info *uinfo)
 {
