mirror of https://github.com/torvalds/linux.git
Merge tag 'drm-misc-next-2025-04-09' of https://gitlab.freedesktop.org/drm/misc/kernel into drm-next
drm-misc-next for v6.16-rc1: UAPI Changes: - Add ASAHI uapi header! - Add apple fourcc modifiers. - Add capset virtio definitions to UAPI. - Extend EXPORT_SYNC_FILE for timeline syncobjs. Cross-subsystem Changes: - Adjust DMA-BUF sg handling to not cache map on attach. - Update drm/ci, hlcdc, virtio, maintainers. - Update fbdev todo. - Allow setting dma-device for dma-buf import. - Export efi_mem_desc_lookup to make efidrm build as a module. Core Changes: - Update drm scheduler docs. - Use the correct resv object in TTM delayed destroy. - Fix compiler warning with panic qr code, and other small fixes. - drm/ci updates. - Add debugfs file for listing all bridges. - Small fixes to drm/client, ttm tests. - Add documentation to display/hdmi. - Add kunit tests for bridges. - Dont fail managed device probing if connector polling fails. - Create Kconfig.debug for drm core. - Add tests for the drm scheduler. - Add and use new access helpers for DPCPD. - Add generic and optimized conversions for format-helper. - Begin refcounting panel for improving lifetime handling. - Unify simpledrm and ofdrm sysfb, and add extra features. - Split hdmi audio in bridge to make DP audio work. Driver Changes: - Convert drivers to use devm_platform_ioremap_resource(). - Assorted small fixes to imx/legacy-bridg, gma500, pl111, nouveau, vc4, vmwgfx, ast, mxsfb, xlnx, accel/qaic, v3d, bridge/imx8qxp-ldb, ofdrm, bridge/fsl-ldb, udl, bridge/ti-sn65dsi86, bridge/anx7625, cirrus-qemu, bridge/cdns-dsi, panel/sharp, panel/himax, bridge/sil902x, renesas, imagination, various panels. - Allow attaching more display to vkms. - Add Powertip PH128800T004-ZZA01 panel. - Add rotation quirk for ZOTAC panel. - Convert bridge/tc358775 to atomic. - Remove deprecated panel calls from synaptics, novatek, samsung panels. - Refactor shmem helper page pinning and accel drivers using it. - Add dmabuf support to accel/amdxdna. - Use 4k page table format for panfrost/mediatek. - Add common powerup/down dp link helper and use it. - Assorted compiler warning fixes. - Support dma-buf import for renesas Signed-off-by: Dave Airlie <airlied@redhat.com> # Conflicts: # include/drm/drm_kunit_helpers.h From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Link: https://lore.kernel.org/r/e147ff95-697b-4067-9e2e-7cbd424e162a@linux.intel.com
This commit is contained in:
commit
683058df13
|
|
@ -690,6 +690,13 @@ ForEachMacros:
|
|||
- 'v4l2_m2m_for_each_src_buf'
|
||||
- 'v4l2_m2m_for_each_src_buf_safe'
|
||||
- 'virtio_device_for_each_vq'
|
||||
- 'vkms_config_for_each_connector'
|
||||
- 'vkms_config_for_each_crtc'
|
||||
- 'vkms_config_for_each_encoder'
|
||||
- 'vkms_config_for_each_plane'
|
||||
- 'vkms_config_connector_for_each_possible_encoder'
|
||||
- 'vkms_config_encoder_for_each_possible_crtc'
|
||||
- 'vkms_config_plane_for_each_possible_crtc'
|
||||
- 'while_for_each_ftrace_op'
|
||||
- 'xa_for_each'
|
||||
- 'xa_for_each_marked'
|
||||
|
|
|
|||
|
|
@ -246,6 +246,8 @@ properties:
|
|||
- osddisplays,osd070t1718-19ts
|
||||
# One Stop Displays OSD101T2045-53TS 10.1" 1920x1200 panel
|
||||
- osddisplays,osd101t2045-53ts
|
||||
# POWERTIP PH128800T004-ZZA01 10.1" WXGA TFT LCD panel
|
||||
- powertip,ph128800t004-zza01
|
||||
# POWERTIP PH128800T006-ZHC01 10.1" WXGA TFT LCD panel
|
||||
- powertip,ph128800t006-zhc01
|
||||
# POWERTIP PH800480T013-IDF2 7.0" WVGA TFT LCD panel
|
||||
|
|
|
|||
|
|
@ -19,6 +19,8 @@ properties:
|
|||
- const: samsung,atna33xc20
|
||||
- items:
|
||||
- enum:
|
||||
# Samsung 14" WQXGA+ (2880×1800 pixels) eDP AMOLED panel
|
||||
- samsung,atna40yk20
|
||||
# Samsung 14.5" WQXGA+ (2880x1800 pixels) eDP AMOLED panel
|
||||
- samsung,atna45af01
|
||||
# Samsung 14.5" 3K (2944x1840 pixels) eDP AMOLED panel
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
|||
title: Broadcom V3D GPU
|
||||
|
||||
maintainers:
|
||||
- Eric Anholt <eric@anholt.net>
|
||||
- Maíra Canal <mcanal@igalia.com>
|
||||
- Nicolas Saenz Julienne <nsaenzjulienne@suse.de>
|
||||
|
||||
properties:
|
||||
|
|
@ -22,20 +22,12 @@ properties:
|
|||
- brcm,7278-v3d
|
||||
|
||||
reg:
|
||||
items:
|
||||
- description: hub register (required)
|
||||
- description: core0 register (required)
|
||||
- description: GCA cache controller register (if GCA controller present)
|
||||
- description: bridge register (if no external reset controller)
|
||||
minItems: 2
|
||||
maxItems: 4
|
||||
|
||||
reg-names:
|
||||
items:
|
||||
- const: hub
|
||||
- const: core0
|
||||
- enum: [ bridge, gca ]
|
||||
- enum: [ bridge, gca ]
|
||||
minItems: 2
|
||||
maxItems: 4
|
||||
|
||||
interrupts:
|
||||
items:
|
||||
|
|
@ -58,6 +50,76 @@ required:
|
|||
- reg-names
|
||||
- interrupts
|
||||
|
||||
allOf:
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
const: brcm,2711-v3d
|
||||
then:
|
||||
properties:
|
||||
reg:
|
||||
items:
|
||||
- description: hub register
|
||||
- description: core0 register
|
||||
reg-names:
|
||||
items:
|
||||
- const: hub
|
||||
- const: core0
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
const: brcm,2712-v3d
|
||||
then:
|
||||
properties:
|
||||
reg:
|
||||
items:
|
||||
- description: hub register
|
||||
- description: core0 register
|
||||
- description: SMS state manager register
|
||||
reg-names:
|
||||
items:
|
||||
- const: hub
|
||||
- const: core0
|
||||
- const: sms
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
const: brcm,7268-v3d
|
||||
then:
|
||||
properties:
|
||||
reg:
|
||||
items:
|
||||
- description: hub register
|
||||
- description: core0 register
|
||||
- description: GCA cache controller register
|
||||
- description: bridge register
|
||||
reg-names:
|
||||
items:
|
||||
- const: hub
|
||||
- const: core0
|
||||
- const: gca
|
||||
- const: bridge
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
const: brcm,7278-v3d
|
||||
then:
|
||||
properties:
|
||||
reg:
|
||||
items:
|
||||
- description: hub register
|
||||
- description: core0 register
|
||||
- description: bridge register
|
||||
reg-names:
|
||||
items:
|
||||
- const: hub
|
||||
- const: core0
|
||||
- const: bridge
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
|
|
@ -66,9 +128,9 @@ examples:
|
|||
compatible = "brcm,7268-v3d";
|
||||
reg = <0xf1200000 0x4000>,
|
||||
<0xf1208000 0x4000>,
|
||||
<0xf1204000 0x100>,
|
||||
<0xf1204100 0x100>;
|
||||
reg-names = "hub", "core0", "bridge", "gca";
|
||||
<0xf1204100 0x100>,
|
||||
<0xf1204000 0x100>;
|
||||
reg-names = "hub", "core0", "gca", "bridge";
|
||||
interrupts = <0 78 4>,
|
||||
<0 77 4>;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -115,6 +115,10 @@ created (eg. https://gitlab.freedesktop.org/janedoe/linux/-/pipelines)
|
|||
5. The various jobs will be run and when the pipeline is finished, all jobs
|
||||
should be green unless a regression has been found.
|
||||
|
||||
6. Warnings in the pipeline indicate that lockdep
|
||||
(see Documentation/locking/lockdep-design.rst) issues have been detected
|
||||
during the tests.
|
||||
|
||||
|
||||
How to update test expectations
|
||||
===============================
|
||||
|
|
|
|||
|
|
@ -27,3 +27,8 @@ drm/xe uAPI
|
|||
===========
|
||||
|
||||
.. kernel-doc:: include/uapi/drm/xe_drm.h
|
||||
|
||||
drm/asahi uAPI
|
||||
================
|
||||
|
||||
.. kernel-doc:: include/uapi/drm/asahi_drm.h
|
||||
|
|
|
|||
|
|
@ -233,6 +233,21 @@ Panel Self Refresh Helper Reference
|
|||
.. kernel-doc:: drivers/gpu/drm/drm_self_refresh_helper.c
|
||||
:export:
|
||||
|
||||
HDMI Atomic State Helpers
|
||||
=========================
|
||||
|
||||
Overview
|
||||
--------
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/display/drm_hdmi_state_helper.c
|
||||
:doc: hdmi helpers
|
||||
|
||||
Functions Reference
|
||||
-------------------
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/display/drm_hdmi_state_helper.c
|
||||
:export:
|
||||
|
||||
HDCP Helper Functions Reference
|
||||
===============================
|
||||
|
||||
|
|
|
|||
|
|
@ -27,3 +27,6 @@ GSP Support
|
|||
|
||||
.. kernel-doc:: drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
|
||||
:doc: GSP message queue element
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
|
||||
:doc: GSP message handling policy
|
||||
|
|
|
|||
|
|
@ -441,14 +441,15 @@ Contact: Thomas Zimmermann <tzimmermann@suse.de>
|
|||
|
||||
Level: Intermediate
|
||||
|
||||
Request memory regions in all drivers
|
||||
-------------------------------------
|
||||
Request memory regions in all fbdev drivers
|
||||
--------------------------------------------
|
||||
|
||||
Go through all drivers and add code to request the memory regions that the
|
||||
driver uses. This requires adding calls to request_mem_region(),
|
||||
Old/ancient fbdev drivers do not request their memory properly.
|
||||
Go through these drivers and add code to request the memory regions
|
||||
that the driver uses. This requires adding calls to request_mem_region(),
|
||||
pci_request_region() or similar functions. Use helpers for managed cleanup
|
||||
where possible.
|
||||
|
||||
where possible. Problematic areas include hardware that has exclusive ranges
|
||||
like VGA. VGA16fb does not request the range as it is expected.
|
||||
Drivers are pretty bad at doing this and there used to be conflicts among
|
||||
DRM and fbdev drivers. Still, it's the correct thing to do.
|
||||
|
||||
|
|
|
|||
|
|
@ -11,9 +11,9 @@ Section 7, Legacy Devices.
|
|||
|
||||
The Resource Access Control (RAC) module inside the X server [0] existed for
|
||||
the legacy VGA arbitration task (besides other bus management tasks) when more
|
||||
than one legacy device co-exists on the same machine. But the problem happens
|
||||
than one legacy device co-exist on the same machine. But the problem happens
|
||||
when these devices are trying to be accessed by different userspace clients
|
||||
(e.g. two server in parallel). Their address assignments conflict. Moreover,
|
||||
(e.g. two servers in parallel). Their address assignments conflict. Moreover,
|
||||
ideally, being a userspace application, it is not the role of the X server to
|
||||
control bus resources. Therefore an arbitration scheme outside of the X server
|
||||
is needed to control the sharing of these resources. This document introduces
|
||||
|
|
@ -106,7 +106,7 @@ In-kernel interface
|
|||
libpciaccess
|
||||
------------
|
||||
|
||||
To use the vga arbiter char device it was implemented an API inside the
|
||||
To use the vga arbiter char device, an API was implemented inside the
|
||||
libpciaccess library. One field was added to struct pci_device (each device
|
||||
on the system)::
|
||||
|
||||
|
|
|
|||
12
MAINTAINERS
12
MAINTAINERS
|
|
@ -2306,6 +2306,7 @@ F: drivers/watchdog/apple_wdt.c
|
|||
F: include/dt-bindings/interrupt-controller/apple-aic.h
|
||||
F: include/dt-bindings/pinctrl/apple.h
|
||||
F: include/linux/soc/apple/*
|
||||
F: include/uapi/drm/asahi_drm.h
|
||||
|
||||
ARM/ARTPEC MACHINE SUPPORT
|
||||
M: Jesper Nilsson <jesper.nilsson@axis.com>
|
||||
|
|
@ -7370,8 +7371,7 @@ M: Javier Martinez Canillas <javierm@redhat.com>
|
|||
L: dri-devel@lists.freedesktop.org
|
||||
S: Maintained
|
||||
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
|
||||
F: drivers/gpu/drm/tiny/ofdrm.c
|
||||
F: drivers/gpu/drm/tiny/simpledrm.c
|
||||
F: drivers/gpu/drm/sysfb/
|
||||
F: drivers/video/aperture.c
|
||||
F: drivers/video/nomodeset.c
|
||||
F: include/linux/aperture.h
|
||||
|
|
@ -7853,8 +7853,8 @@ F: drivers/gpu/drm/ci/xfails/meson*
|
|||
F: drivers/gpu/drm/meson/
|
||||
|
||||
DRM DRIVERS FOR ATMEL HLCDC
|
||||
M: Sam Ravnborg <sam@ravnborg.org>
|
||||
M: Boris Brezillon <bbrezillon@kernel.org>
|
||||
M: Manikandan Muralidharan <manikandan.m@microchip.com>
|
||||
M: Dharma Balasubiramani <dharma.b@microchip.com>
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
S: Supported
|
||||
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
|
||||
|
|
@ -8182,7 +8182,8 @@ F: drivers/gpu/drm/ttm/
|
|||
F: include/drm/ttm/
|
||||
|
||||
DRM AUTOMATED TESTING
|
||||
M: Helen Koike <helen.koike@collabora.com>
|
||||
M: Helen Koike <helen.fornazier@gmail.com>
|
||||
M: Vignesh Raman <vignesh.raman@collabora.com>
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
S: Maintained
|
||||
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
|
||||
|
|
@ -25586,6 +25587,7 @@ F: include/uapi/linux/virtio_gpio.h
|
|||
VIRTIO GPU DRIVER
|
||||
M: David Airlie <airlied@redhat.com>
|
||||
M: Gerd Hoffmann <kraxel@redhat.com>
|
||||
M: Dmitry Osipenko <dmitry.osipenko@collabora.com>
|
||||
R: Gurchetan Singh <gurchetansingh@chromium.org>
|
||||
R: Chia-I Wu <olvaffe@gmail.com>
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
|
|
|
|||
|
|
@ -1,3 +1,2 @@
|
|||
- Add import and export BO support
|
||||
- Add debugfs support
|
||||
- Add debug BO support
|
||||
|
|
|
|||
|
|
@ -758,27 +758,42 @@ int aie2_hwctx_config(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *bu
|
|||
static int aie2_populate_range(struct amdxdna_gem_obj *abo)
|
||||
{
|
||||
struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
|
||||
struct mm_struct *mm = abo->mem.notifier.mm;
|
||||
struct hmm_range range = { 0 };
|
||||
struct amdxdna_umap *mapp;
|
||||
unsigned long timeout;
|
||||
struct mm_struct *mm;
|
||||
bool found;
|
||||
int ret;
|
||||
|
||||
XDNA_INFO_ONCE(xdna, "populate memory range %llx size %lx",
|
||||
abo->mem.userptr, abo->mem.size);
|
||||
range.notifier = &abo->mem.notifier;
|
||||
range.start = abo->mem.userptr;
|
||||
range.end = abo->mem.userptr + abo->mem.size;
|
||||
range.hmm_pfns = abo->mem.pfns;
|
||||
range.default_flags = HMM_PFN_REQ_FAULT;
|
||||
|
||||
if (!mmget_not_zero(mm))
|
||||
return -EFAULT;
|
||||
|
||||
timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
|
||||
again:
|
||||
range.notifier_seq = mmu_interval_read_begin(&abo->mem.notifier);
|
||||
found = false;
|
||||
down_write(&xdna->notifier_lock);
|
||||
list_for_each_entry(mapp, &abo->mem.umap_list, node) {
|
||||
if (mapp->invalid) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found) {
|
||||
abo->mem.map_invalid = false;
|
||||
up_write(&xdna->notifier_lock);
|
||||
return 0;
|
||||
}
|
||||
kref_get(&mapp->refcnt);
|
||||
up_write(&xdna->notifier_lock);
|
||||
|
||||
XDNA_DBG(xdna, "populate memory range %lx %lx",
|
||||
mapp->vma->vm_start, mapp->vma->vm_end);
|
||||
mm = mapp->notifier.mm;
|
||||
if (!mmget_not_zero(mm)) {
|
||||
amdxdna_umap_put(mapp);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
mapp->range.notifier_seq = mmu_interval_read_begin(&mapp->notifier);
|
||||
mmap_read_lock(mm);
|
||||
ret = hmm_range_fault(&range);
|
||||
ret = hmm_range_fault(&mapp->range);
|
||||
mmap_read_unlock(mm);
|
||||
if (ret) {
|
||||
if (time_after(jiffies, timeout)) {
|
||||
|
|
@ -786,21 +801,27 @@ static int aie2_populate_range(struct amdxdna_gem_obj *abo)
|
|||
goto put_mm;
|
||||
}
|
||||
|
||||
if (ret == -EBUSY)
|
||||
if (ret == -EBUSY) {
|
||||
amdxdna_umap_put(mapp);
|
||||
goto again;
|
||||
}
|
||||
|
||||
goto put_mm;
|
||||
}
|
||||
|
||||
down_read(&xdna->notifier_lock);
|
||||
if (mmu_interval_read_retry(&abo->mem.notifier, range.notifier_seq)) {
|
||||
up_read(&xdna->notifier_lock);
|
||||
down_write(&xdna->notifier_lock);
|
||||
if (mmu_interval_read_retry(&mapp->notifier, mapp->range.notifier_seq)) {
|
||||
up_write(&xdna->notifier_lock);
|
||||
amdxdna_umap_put(mapp);
|
||||
goto again;
|
||||
}
|
||||
abo->mem.map_invalid = false;
|
||||
up_read(&xdna->notifier_lock);
|
||||
mapp->invalid = false;
|
||||
up_write(&xdna->notifier_lock);
|
||||
amdxdna_umap_put(mapp);
|
||||
goto again;
|
||||
|
||||
put_mm:
|
||||
amdxdna_umap_put(mapp);
|
||||
mmput(mm);
|
||||
return ret;
|
||||
}
|
||||
|
|
@ -908,10 +929,6 @@ void aie2_hmm_invalidate(struct amdxdna_gem_obj *abo,
|
|||
struct drm_gem_object *gobj = to_gobj(abo);
|
||||
long ret;
|
||||
|
||||
down_write(&xdna->notifier_lock);
|
||||
abo->mem.map_invalid = true;
|
||||
mmu_interval_set_seq(&abo->mem.notifier, cur_seq);
|
||||
up_write(&xdna->notifier_lock);
|
||||
ret = dma_resv_wait_timeout(gobj->resv, DMA_RESV_USAGE_BOOKKEEP,
|
||||
true, MAX_SCHEDULE_TIMEOUT);
|
||||
if (!ret || ret == -ERESTARTSYS)
|
||||
|
|
|
|||
|
|
@ -9,7 +9,10 @@
|
|||
#include <drm/drm_gem.h>
|
||||
#include <drm/drm_gem_shmem_helper.h>
|
||||
#include <drm/gpu_scheduler.h>
|
||||
#include <linux/dma-buf.h>
|
||||
#include <linux/dma-direct.h>
|
||||
#include <linux/iosys-map.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#include "amdxdna_ctx.h"
|
||||
|
|
@ -18,6 +21,8 @@
|
|||
|
||||
#define XDNA_MAX_CMD_BO_SIZE SZ_32K
|
||||
|
||||
MODULE_IMPORT_NS("DMA_BUF");
|
||||
|
||||
static int
|
||||
amdxdna_gem_insert_node_locked(struct amdxdna_gem_obj *abo, bool use_vmap)
|
||||
{
|
||||
|
|
@ -55,6 +60,306 @@ amdxdna_gem_insert_node_locked(struct amdxdna_gem_obj *abo, bool use_vmap)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool amdxdna_hmm_invalidate(struct mmu_interval_notifier *mni,
|
||||
const struct mmu_notifier_range *range,
|
||||
unsigned long cur_seq)
|
||||
{
|
||||
struct amdxdna_umap *mapp = container_of(mni, struct amdxdna_umap, notifier);
|
||||
struct amdxdna_gem_obj *abo = mapp->abo;
|
||||
struct amdxdna_dev *xdna;
|
||||
|
||||
xdna = to_xdna_dev(to_gobj(abo)->dev);
|
||||
XDNA_DBG(xdna, "Invalidating range 0x%lx, 0x%lx, type %d",
|
||||
mapp->vma->vm_start, mapp->vma->vm_end, abo->type);
|
||||
|
||||
if (!mmu_notifier_range_blockable(range))
|
||||
return false;
|
||||
|
||||
down_write(&xdna->notifier_lock);
|
||||
abo->mem.map_invalid = true;
|
||||
mapp->invalid = true;
|
||||
mmu_interval_set_seq(&mapp->notifier, cur_seq);
|
||||
up_write(&xdna->notifier_lock);
|
||||
|
||||
xdna->dev_info->ops->hmm_invalidate(abo, cur_seq);
|
||||
|
||||
if (range->event == MMU_NOTIFY_UNMAP) {
|
||||
down_write(&xdna->notifier_lock);
|
||||
if (!mapp->unmapped) {
|
||||
queue_work(xdna->notifier_wq, &mapp->hmm_unreg_work);
|
||||
mapp->unmapped = true;
|
||||
}
|
||||
up_write(&xdna->notifier_lock);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static const struct mmu_interval_notifier_ops amdxdna_hmm_ops = {
|
||||
.invalidate = amdxdna_hmm_invalidate,
|
||||
};
|
||||
|
||||
static void amdxdna_hmm_unregister(struct amdxdna_gem_obj *abo,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
|
||||
struct amdxdna_umap *mapp;
|
||||
|
||||
down_read(&xdna->notifier_lock);
|
||||
list_for_each_entry(mapp, &abo->mem.umap_list, node) {
|
||||
if (!vma || mapp->vma == vma) {
|
||||
if (!mapp->unmapped) {
|
||||
queue_work(xdna->notifier_wq, &mapp->hmm_unreg_work);
|
||||
mapp->unmapped = true;
|
||||
}
|
||||
if (vma)
|
||||
break;
|
||||
}
|
||||
}
|
||||
up_read(&xdna->notifier_lock);
|
||||
}
|
||||
|
||||
static void amdxdna_umap_release(struct kref *ref)
|
||||
{
|
||||
struct amdxdna_umap *mapp = container_of(ref, struct amdxdna_umap, refcnt);
|
||||
struct vm_area_struct *vma = mapp->vma;
|
||||
struct amdxdna_dev *xdna;
|
||||
|
||||
mmu_interval_notifier_remove(&mapp->notifier);
|
||||
if (is_import_bo(mapp->abo) && vma->vm_file && vma->vm_file->f_mapping)
|
||||
mapping_clear_unevictable(vma->vm_file->f_mapping);
|
||||
|
||||
xdna = to_xdna_dev(to_gobj(mapp->abo)->dev);
|
||||
down_write(&xdna->notifier_lock);
|
||||
list_del(&mapp->node);
|
||||
up_write(&xdna->notifier_lock);
|
||||
|
||||
kvfree(mapp->range.hmm_pfns);
|
||||
kfree(mapp);
|
||||
}
|
||||
|
||||
void amdxdna_umap_put(struct amdxdna_umap *mapp)
|
||||
{
|
||||
kref_put(&mapp->refcnt, amdxdna_umap_release);
|
||||
}
|
||||
|
||||
static void amdxdna_hmm_unreg_work(struct work_struct *work)
|
||||
{
|
||||
struct amdxdna_umap *mapp = container_of(work, struct amdxdna_umap,
|
||||
hmm_unreg_work);
|
||||
|
||||
amdxdna_umap_put(mapp);
|
||||
}
|
||||
|
||||
static int amdxdna_hmm_register(struct amdxdna_gem_obj *abo,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
|
||||
unsigned long len = vma->vm_end - vma->vm_start;
|
||||
unsigned long addr = vma->vm_start;
|
||||
struct amdxdna_umap *mapp;
|
||||
u32 nr_pages;
|
||||
int ret;
|
||||
|
||||
if (!xdna->dev_info->ops->hmm_invalidate)
|
||||
return 0;
|
||||
|
||||
mapp = kzalloc(sizeof(*mapp), GFP_KERNEL);
|
||||
if (!mapp)
|
||||
return -ENOMEM;
|
||||
|
||||
nr_pages = (PAGE_ALIGN(addr + len) - (addr & PAGE_MASK)) >> PAGE_SHIFT;
|
||||
mapp->range.hmm_pfns = kvcalloc(nr_pages, sizeof(*mapp->range.hmm_pfns),
|
||||
GFP_KERNEL);
|
||||
if (!mapp->range.hmm_pfns) {
|
||||
ret = -ENOMEM;
|
||||
goto free_map;
|
||||
}
|
||||
|
||||
ret = mmu_interval_notifier_insert_locked(&mapp->notifier,
|
||||
current->mm,
|
||||
addr,
|
||||
len,
|
||||
&amdxdna_hmm_ops);
|
||||
if (ret) {
|
||||
XDNA_ERR(xdna, "Insert mmu notifier failed, ret %d", ret);
|
||||
goto free_pfns;
|
||||
}
|
||||
|
||||
mapp->range.notifier = &mapp->notifier;
|
||||
mapp->range.start = vma->vm_start;
|
||||
mapp->range.end = vma->vm_end;
|
||||
mapp->range.default_flags = HMM_PFN_REQ_FAULT;
|
||||
mapp->vma = vma;
|
||||
mapp->abo = abo;
|
||||
kref_init(&mapp->refcnt);
|
||||
|
||||
if (abo->mem.userptr == AMDXDNA_INVALID_ADDR)
|
||||
abo->mem.userptr = addr;
|
||||
INIT_WORK(&mapp->hmm_unreg_work, amdxdna_hmm_unreg_work);
|
||||
if (is_import_bo(abo) && vma->vm_file && vma->vm_file->f_mapping)
|
||||
mapping_set_unevictable(vma->vm_file->f_mapping);
|
||||
|
||||
down_write(&xdna->notifier_lock);
|
||||
list_add_tail(&mapp->node, &abo->mem.umap_list);
|
||||
up_write(&xdna->notifier_lock);
|
||||
|
||||
return 0;
|
||||
|
||||
free_pfns:
|
||||
kvfree(mapp->range.hmm_pfns);
|
||||
free_map:
|
||||
kfree(mapp);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int amdxdna_insert_pages(struct amdxdna_gem_obj *abo,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
|
||||
unsigned long num_pages = vma_pages(vma);
|
||||
unsigned long offset = 0;
|
||||
int ret;
|
||||
|
||||
if (!is_import_bo(abo)) {
|
||||
ret = drm_gem_shmem_mmap(&abo->base, vma);
|
||||
if (ret) {
|
||||
XDNA_ERR(xdna, "Failed shmem mmap %d", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* The buffer is based on memory pages. Fix the flag. */
|
||||
vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP);
|
||||
ret = vm_insert_pages(vma, vma->vm_start, abo->base.pages,
|
||||
&num_pages);
|
||||
if (ret) {
|
||||
XDNA_ERR(xdna, "Failed insert pages %d", ret);
|
||||
vma->vm_ops->close(vma);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
vma->vm_private_data = NULL;
|
||||
vma->vm_ops = NULL;
|
||||
ret = dma_buf_mmap(to_gobj(abo)->dma_buf, vma, 0);
|
||||
if (ret) {
|
||||
XDNA_ERR(xdna, "Failed to mmap dma buf %d", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
do {
|
||||
vm_fault_t fault_ret;
|
||||
|
||||
fault_ret = handle_mm_fault(vma, vma->vm_start + offset,
|
||||
FAULT_FLAG_WRITE, NULL);
|
||||
if (fault_ret & VM_FAULT_ERROR) {
|
||||
vma->vm_ops->close(vma);
|
||||
XDNA_ERR(xdna, "Fault in page failed");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
offset += PAGE_SIZE;
|
||||
} while (--num_pages);
|
||||
|
||||
/* Drop the reference drm_gem_mmap_obj() acquired.*/
|
||||
drm_gem_object_put(to_gobj(abo));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdxdna_gem_obj_mmap(struct drm_gem_object *gobj,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev);
|
||||
struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
|
||||
int ret;
|
||||
|
||||
ret = amdxdna_hmm_register(abo, vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = amdxdna_insert_pages(abo, vma);
|
||||
if (ret) {
|
||||
XDNA_ERR(xdna, "Failed insert pages, ret %d", ret);
|
||||
goto hmm_unreg;
|
||||
}
|
||||
|
||||
XDNA_DBG(xdna, "BO map_offset 0x%llx type %d userptr 0x%lx size 0x%lx",
|
||||
drm_vma_node_offset_addr(&gobj->vma_node), abo->type,
|
||||
vma->vm_start, gobj->size);
|
||||
return 0;
|
||||
|
||||
hmm_unreg:
|
||||
amdxdna_hmm_unregister(abo, vma);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int amdxdna_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
|
||||
{
|
||||
struct drm_gem_object *gobj = dma_buf->priv;
|
||||
struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
|
||||
unsigned long num_pages = vma_pages(vma);
|
||||
int ret;
|
||||
|
||||
vma->vm_ops = &drm_gem_shmem_vm_ops;
|
||||
vma->vm_private_data = gobj;
|
||||
|
||||
drm_gem_object_get(gobj);
|
||||
ret = drm_gem_shmem_mmap(&abo->base, vma);
|
||||
if (ret)
|
||||
goto put_obj;
|
||||
|
||||
/* The buffer is based on memory pages. Fix the flag. */
|
||||
vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP);
|
||||
ret = vm_insert_pages(vma, vma->vm_start, abo->base.pages,
|
||||
&num_pages);
|
||||
if (ret)
|
||||
goto close_vma;
|
||||
|
||||
return 0;
|
||||
|
||||
close_vma:
|
||||
vma->vm_ops->close(vma);
|
||||
put_obj:
|
||||
drm_gem_object_put(gobj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct dma_buf_ops amdxdna_dmabuf_ops = {
|
||||
.attach = drm_gem_map_attach,
|
||||
.detach = drm_gem_map_detach,
|
||||
.map_dma_buf = drm_gem_map_dma_buf,
|
||||
.unmap_dma_buf = drm_gem_unmap_dma_buf,
|
||||
.release = drm_gem_dmabuf_release,
|
||||
.mmap = amdxdna_gem_dmabuf_mmap,
|
||||
.vmap = drm_gem_dmabuf_vmap,
|
||||
.vunmap = drm_gem_dmabuf_vunmap,
|
||||
};
|
||||
|
||||
static struct dma_buf *amdxdna_gem_prime_export(struct drm_gem_object *gobj, int flags)
|
||||
{
|
||||
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
|
||||
|
||||
exp_info.ops = &amdxdna_dmabuf_ops;
|
||||
exp_info.size = gobj->size;
|
||||
exp_info.flags = flags;
|
||||
exp_info.priv = gobj;
|
||||
exp_info.resv = gobj->resv;
|
||||
|
||||
return drm_gem_dmabuf_export(gobj->dev, &exp_info);
|
||||
}
|
||||
|
||||
static void amdxdna_imported_obj_free(struct amdxdna_gem_obj *abo)
|
||||
{
|
||||
dma_buf_unmap_attachment_unlocked(abo->attach, abo->base.sgt, DMA_BIDIRECTIONAL);
|
||||
dma_buf_detach(abo->dma_buf, abo->attach);
|
||||
dma_buf_put(abo->dma_buf);
|
||||
drm_gem_object_release(to_gobj(abo));
|
||||
kfree(abo);
|
||||
}
|
||||
|
||||
static void amdxdna_gem_obj_free(struct drm_gem_object *gobj)
|
||||
{
|
||||
struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev);
|
||||
|
|
@ -62,6 +367,10 @@ static void amdxdna_gem_obj_free(struct drm_gem_object *gobj)
|
|||
struct iosys_map map = IOSYS_MAP_INIT_VADDR(abo->mem.kva);
|
||||
|
||||
XDNA_DBG(xdna, "BO type %d xdna_addr 0x%llx", abo->type, abo->mem.dev_addr);
|
||||
|
||||
amdxdna_hmm_unregister(abo, NULL);
|
||||
flush_workqueue(xdna->notifier_wq);
|
||||
|
||||
if (abo->pinned)
|
||||
amdxdna_gem_unpin(abo);
|
||||
|
||||
|
|
@ -81,8 +390,14 @@ static void amdxdna_gem_obj_free(struct drm_gem_object *gobj)
|
|||
if (abo->type == AMDXDNA_BO_DEV_HEAP)
|
||||
drm_mm_takedown(&abo->mm);
|
||||
|
||||
drm_gem_vunmap_unlocked(gobj, &map);
|
||||
drm_gem_vunmap(gobj, &map);
|
||||
mutex_destroy(&abo->lock);
|
||||
|
||||
if (is_import_bo(abo)) {
|
||||
amdxdna_imported_obj_free(abo);
|
||||
return;
|
||||
}
|
||||
|
||||
drm_gem_shmem_free(&abo->base);
|
||||
}
|
||||
|
||||
|
|
@ -90,127 +405,6 @@ static const struct drm_gem_object_funcs amdxdna_gem_dev_obj_funcs = {
|
|||
.free = amdxdna_gem_obj_free,
|
||||
};
|
||||
|
||||
static bool amdxdna_hmm_invalidate(struct mmu_interval_notifier *mni,
|
||||
const struct mmu_notifier_range *range,
|
||||
unsigned long cur_seq)
|
||||
{
|
||||
struct amdxdna_gem_obj *abo = container_of(mni, struct amdxdna_gem_obj,
|
||||
mem.notifier);
|
||||
struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
|
||||
|
||||
XDNA_DBG(xdna, "Invalid range 0x%llx, 0x%lx, type %d",
|
||||
abo->mem.userptr, abo->mem.size, abo->type);
|
||||
|
||||
if (!mmu_notifier_range_blockable(range))
|
||||
return false;
|
||||
|
||||
xdna->dev_info->ops->hmm_invalidate(abo, cur_seq);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static const struct mmu_interval_notifier_ops amdxdna_hmm_ops = {
|
||||
.invalidate = amdxdna_hmm_invalidate,
|
||||
};
|
||||
|
||||
static void amdxdna_hmm_unregister(struct amdxdna_gem_obj *abo)
|
||||
{
|
||||
struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
|
||||
|
||||
if (!xdna->dev_info->ops->hmm_invalidate)
|
||||
return;
|
||||
|
||||
mmu_interval_notifier_remove(&abo->mem.notifier);
|
||||
kvfree(abo->mem.pfns);
|
||||
abo->mem.pfns = NULL;
|
||||
}
|
||||
|
||||
static int amdxdna_hmm_register(struct amdxdna_gem_obj *abo, unsigned long addr,
|
||||
size_t len)
|
||||
{
|
||||
struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
|
||||
u32 nr_pages;
|
||||
int ret;
|
||||
|
||||
if (!xdna->dev_info->ops->hmm_invalidate)
|
||||
return 0;
|
||||
|
||||
if (abo->mem.pfns)
|
||||
return -EEXIST;
|
||||
|
||||
nr_pages = (PAGE_ALIGN(addr + len) - (addr & PAGE_MASK)) >> PAGE_SHIFT;
|
||||
abo->mem.pfns = kvcalloc(nr_pages, sizeof(*abo->mem.pfns),
|
||||
GFP_KERNEL);
|
||||
if (!abo->mem.pfns)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = mmu_interval_notifier_insert_locked(&abo->mem.notifier,
|
||||
current->mm,
|
||||
addr,
|
||||
len,
|
||||
&amdxdna_hmm_ops);
|
||||
if (ret) {
|
||||
XDNA_ERR(xdna, "Insert mmu notifier failed, ret %d", ret);
|
||||
kvfree(abo->mem.pfns);
|
||||
}
|
||||
abo->mem.userptr = addr;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int amdxdna_gem_obj_mmap(struct drm_gem_object *gobj,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
|
||||
unsigned long num_pages;
|
||||
int ret;
|
||||
|
||||
ret = amdxdna_hmm_register(abo, vma->vm_start, gobj->size);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = drm_gem_shmem_mmap(&abo->base, vma);
|
||||
if (ret)
|
||||
goto hmm_unreg;
|
||||
|
||||
num_pages = gobj->size >> PAGE_SHIFT;
|
||||
/* Try to insert the pages */
|
||||
vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP);
|
||||
ret = vm_insert_pages(vma, vma->vm_start, abo->base.pages, &num_pages);
|
||||
if (ret)
|
||||
XDNA_ERR(abo->client->xdna, "Failed insert pages, ret %d", ret);
|
||||
|
||||
return 0;
|
||||
|
||||
hmm_unreg:
|
||||
amdxdna_hmm_unregister(abo);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static vm_fault_t amdxdna_gem_vm_fault(struct vm_fault *vmf)
|
||||
{
|
||||
return drm_gem_shmem_vm_ops.fault(vmf);
|
||||
}
|
||||
|
||||
static void amdxdna_gem_vm_open(struct vm_area_struct *vma)
|
||||
{
|
||||
drm_gem_shmem_vm_ops.open(vma);
|
||||
}
|
||||
|
||||
static void amdxdna_gem_vm_close(struct vm_area_struct *vma)
|
||||
{
|
||||
struct drm_gem_object *gobj = vma->vm_private_data;
|
||||
|
||||
amdxdna_hmm_unregister(to_xdna_obj(gobj));
|
||||
drm_gem_shmem_vm_ops.close(vma);
|
||||
}
|
||||
|
||||
static const struct vm_operations_struct amdxdna_gem_vm_ops = {
|
||||
.fault = amdxdna_gem_vm_fault,
|
||||
.open = amdxdna_gem_vm_open,
|
||||
.close = amdxdna_gem_vm_close,
|
||||
};
|
||||
|
||||
static const struct drm_gem_object_funcs amdxdna_gem_shmem_funcs = {
|
||||
.free = amdxdna_gem_obj_free,
|
||||
.print_info = drm_gem_shmem_object_print_info,
|
||||
|
|
@ -220,7 +414,8 @@ static const struct drm_gem_object_funcs amdxdna_gem_shmem_funcs = {
|
|||
.vmap = drm_gem_shmem_object_vmap,
|
||||
.vunmap = drm_gem_shmem_object_vunmap,
|
||||
.mmap = amdxdna_gem_obj_mmap,
|
||||
.vm_ops = &amdxdna_gem_vm_ops,
|
||||
.vm_ops = &drm_gem_shmem_vm_ops,
|
||||
.export = amdxdna_gem_prime_export,
|
||||
};
|
||||
|
||||
static struct amdxdna_gem_obj *
|
||||
|
|
@ -239,6 +434,7 @@ amdxdna_gem_create_obj(struct drm_device *dev, size_t size)
|
|||
abo->mem.userptr = AMDXDNA_INVALID_ADDR;
|
||||
abo->mem.dev_addr = AMDXDNA_INVALID_ADDR;
|
||||
abo->mem.size = size;
|
||||
INIT_LIST_HEAD(&abo->mem.umap_list);
|
||||
|
||||
return abo;
|
||||
}
|
||||
|
|
@ -258,6 +454,51 @@ amdxdna_gem_create_object_cb(struct drm_device *dev, size_t size)
|
|||
return to_gobj(abo);
|
||||
}
|
||||
|
||||
struct drm_gem_object *
|
||||
amdxdna_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf)
|
||||
{
|
||||
struct dma_buf_attachment *attach;
|
||||
struct amdxdna_gem_obj *abo;
|
||||
struct drm_gem_object *gobj;
|
||||
struct sg_table *sgt;
|
||||
int ret;
|
||||
|
||||
get_dma_buf(dma_buf);
|
||||
|
||||
attach = dma_buf_attach(dma_buf, dev->dev);
|
||||
if (IS_ERR(attach)) {
|
||||
ret = PTR_ERR(attach);
|
||||
goto put_buf;
|
||||
}
|
||||
|
||||
sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL);
|
||||
if (IS_ERR(sgt)) {
|
||||
ret = PTR_ERR(sgt);
|
||||
goto fail_detach;
|
||||
}
|
||||
|
||||
gobj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
|
||||
if (IS_ERR(gobj)) {
|
||||
ret = PTR_ERR(gobj);
|
||||
goto fail_unmap;
|
||||
}
|
||||
|
||||
abo = to_xdna_obj(gobj);
|
||||
abo->attach = attach;
|
||||
abo->dma_buf = dma_buf;
|
||||
|
||||
return gobj;
|
||||
|
||||
fail_unmap:
|
||||
dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_BIDIRECTIONAL);
|
||||
fail_detach:
|
||||
dma_buf_detach(dma_buf, attach);
|
||||
put_buf:
|
||||
dma_buf_put(dma_buf);
|
||||
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static struct amdxdna_gem_obj *
|
||||
amdxdna_drm_alloc_shmem(struct drm_device *dev,
|
||||
struct amdxdna_drm_create_bo *args,
|
||||
|
|
@ -417,7 +658,7 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev,
|
|||
abo->type = AMDXDNA_BO_CMD;
|
||||
abo->client = filp->driver_priv;
|
||||
|
||||
ret = drm_gem_vmap_unlocked(to_gobj(abo), &map);
|
||||
ret = drm_gem_vmap(to_gobj(abo), &map);
|
||||
if (ret) {
|
||||
XDNA_ERR(xdna, "Vmap cmd bo failed, ret %d", ret);
|
||||
goto release_obj;
|
||||
|
|
@ -483,6 +724,9 @@ int amdxdna_gem_pin_nolock(struct amdxdna_gem_obj *abo)
|
|||
struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
|
||||
int ret;
|
||||
|
||||
if (is_import_bo(abo))
|
||||
return 0;
|
||||
|
||||
switch (abo->type) {
|
||||
case AMDXDNA_BO_SHMEM:
|
||||
case AMDXDNA_BO_DEV_HEAP:
|
||||
|
|
@ -515,6 +759,9 @@ int amdxdna_gem_pin(struct amdxdna_gem_obj *abo)
|
|||
|
||||
void amdxdna_gem_unpin(struct amdxdna_gem_obj *abo)
|
||||
{
|
||||
if (is_import_bo(abo))
|
||||
return;
|
||||
|
||||
if (abo->type == AMDXDNA_BO_DEV)
|
||||
abo = abo->dev_heap;
|
||||
|
||||
|
|
@ -606,7 +853,9 @@ int amdxdna_drm_sync_bo_ioctl(struct drm_device *dev,
|
|||
goto put_obj;
|
||||
}
|
||||
|
||||
if (abo->type == AMDXDNA_BO_DEV)
|
||||
if (is_import_bo(abo))
|
||||
drm_clflush_sg(abo->base.sgt);
|
||||
else if (abo->type == AMDXDNA_BO_DEV)
|
||||
drm_clflush_pages(abo->mem.pages, abo->mem.nr_pages);
|
||||
else
|
||||
drm_clflush_pages(abo->base.pages, gobj->size >> PAGE_SHIFT);
|
||||
|
|
|
|||
|
|
@ -6,6 +6,20 @@
|
|||
#ifndef _AMDXDNA_GEM_H_
|
||||
#define _AMDXDNA_GEM_H_
|
||||
|
||||
#include <linux/hmm.h>
|
||||
|
||||
struct amdxdna_umap {
|
||||
struct vm_area_struct *vma;
|
||||
struct mmu_interval_notifier notifier;
|
||||
struct hmm_range range;
|
||||
struct work_struct hmm_unreg_work;
|
||||
struct amdxdna_gem_obj *abo;
|
||||
struct list_head node;
|
||||
struct kref refcnt;
|
||||
bool invalid;
|
||||
bool unmapped;
|
||||
};
|
||||
|
||||
struct amdxdna_mem {
|
||||
u64 userptr;
|
||||
void *kva;
|
||||
|
|
@ -13,8 +27,7 @@ struct amdxdna_mem {
|
|||
size_t size;
|
||||
struct page **pages;
|
||||
u32 nr_pages;
|
||||
struct mmu_interval_notifier notifier;
|
||||
unsigned long *pfns;
|
||||
struct list_head umap_list;
|
||||
bool map_invalid;
|
||||
};
|
||||
|
||||
|
|
@ -31,9 +44,12 @@ struct amdxdna_gem_obj {
|
|||
struct amdxdna_gem_obj *dev_heap; /* For AMDXDNA_BO_DEV */
|
||||
struct drm_mm_node mm_node; /* For AMDXDNA_BO_DEV */
|
||||
u32 assigned_hwctx;
|
||||
struct dma_buf *dma_buf;
|
||||
struct dma_buf_attachment *attach;
|
||||
};
|
||||
|
||||
#define to_gobj(obj) (&(obj)->base.base)
|
||||
#define is_import_bo(obj) ((obj)->attach)
|
||||
|
||||
static inline struct amdxdna_gem_obj *to_xdna_obj(struct drm_gem_object *gobj)
|
||||
{
|
||||
|
|
@ -47,8 +63,12 @@ static inline void amdxdna_gem_put_obj(struct amdxdna_gem_obj *abo)
|
|||
drm_gem_object_put(to_gobj(abo));
|
||||
}
|
||||
|
||||
void amdxdna_umap_put(struct amdxdna_umap *mapp);
|
||||
|
||||
struct drm_gem_object *
|
||||
amdxdna_gem_create_object_cb(struct drm_device *dev, size_t size);
|
||||
struct drm_gem_object *
|
||||
amdxdna_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf);
|
||||
struct amdxdna_gem_obj *
|
||||
amdxdna_drm_alloc_dev_bo(struct drm_device *dev,
|
||||
struct amdxdna_drm_create_bo *args,
|
||||
|
|
|
|||
|
|
@ -226,6 +226,7 @@ const struct drm_driver amdxdna_drm_drv = {
|
|||
.num_ioctls = ARRAY_SIZE(amdxdna_drm_ioctls),
|
||||
|
||||
.gem_create_object = amdxdna_gem_create_object_cb,
|
||||
.gem_prime_import = amdxdna_gem_prime_import,
|
||||
};
|
||||
|
||||
static const struct amdxdna_dev_info *
|
||||
|
|
@ -266,12 +267,16 @@ static int amdxdna_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
fs_reclaim_release(GFP_KERNEL);
|
||||
}
|
||||
|
||||
xdna->notifier_wq = alloc_ordered_workqueue("notifier_wq", 0);
|
||||
if (!xdna->notifier_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_lock(&xdna->dev_lock);
|
||||
ret = xdna->dev_info->ops->init(xdna);
|
||||
mutex_unlock(&xdna->dev_lock);
|
||||
if (ret) {
|
||||
XDNA_ERR(xdna, "Hardware init failed, ret %d", ret);
|
||||
return ret;
|
||||
goto destroy_notifier_wq;
|
||||
}
|
||||
|
||||
ret = amdxdna_sysfs_init(xdna);
|
||||
|
|
@ -301,6 +306,8 @@ static int amdxdna_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
mutex_lock(&xdna->dev_lock);
|
||||
xdna->dev_info->ops->fini(xdna);
|
||||
mutex_unlock(&xdna->dev_lock);
|
||||
destroy_notifier_wq:
|
||||
destroy_workqueue(xdna->notifier_wq);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
@ -310,6 +317,8 @@ static void amdxdna_remove(struct pci_dev *pdev)
|
|||
struct device *dev = &pdev->dev;
|
||||
struct amdxdna_client *client;
|
||||
|
||||
destroy_workqueue(xdna->notifier_wq);
|
||||
|
||||
pm_runtime_get_noresume(dev);
|
||||
pm_runtime_forbid(dev);
|
||||
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@
|
|||
#ifndef _AMDXDNA_PCI_DRV_H_
|
||||
#define _AMDXDNA_PCI_DRV_H_
|
||||
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/xarray.h>
|
||||
|
||||
#define XDNA_INFO(xdna, fmt, args...) drm_info(&(xdna)->ddev, fmt, ##args)
|
||||
|
|
@ -98,6 +99,7 @@ struct amdxdna_dev {
|
|||
struct list_head client_list;
|
||||
struct amdxdna_fw_ver fw_ver;
|
||||
struct rw_semaphore notifier_lock; /* for mmu notifier*/
|
||||
struct workqueue_struct *notifier_wq;
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -282,7 +282,7 @@ static void ivpu_gem_bo_free(struct drm_gem_object *obj)
|
|||
ivpu_bo_unbind_locked(bo);
|
||||
mutex_destroy(&bo->lock);
|
||||
|
||||
drm_WARN_ON(obj->dev, bo->base.pages_use_count > 1);
|
||||
drm_WARN_ON(obj->dev, refcount_read(&bo->base.pages_use_count) > 1);
|
||||
drm_gem_shmem_free(&bo->base);
|
||||
}
|
||||
|
||||
|
|
@ -362,7 +362,7 @@ ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
|
|||
|
||||
if (flags & DRM_IVPU_BO_MAPPABLE) {
|
||||
dma_resv_lock(bo->base.base.resv, NULL);
|
||||
ret = drm_gem_shmem_vmap(&bo->base, &map);
|
||||
ret = drm_gem_shmem_vmap_locked(&bo->base, &map);
|
||||
dma_resv_unlock(bo->base.base.resv);
|
||||
|
||||
if (ret)
|
||||
|
|
@ -387,7 +387,7 @@ void ivpu_bo_free(struct ivpu_bo *bo)
|
|||
|
||||
if (bo->flags & DRM_IVPU_BO_MAPPABLE) {
|
||||
dma_resv_lock(bo->base.base.resv, NULL);
|
||||
drm_gem_shmem_vunmap(&bo->base, &map);
|
||||
drm_gem_shmem_vunmap_locked(&bo->base, &map);
|
||||
dma_resv_unlock(bo->base.base.resv);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -240,7 +240,6 @@ static int qaic_bootlog_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_d
|
|||
mhi_unprepare:
|
||||
mhi_unprepare_from_transfer(mhi_dev);
|
||||
destroy_workqueue:
|
||||
flush_workqueue(qdev->bootlog_wq);
|
||||
destroy_workqueue(qdev->bootlog_wq);
|
||||
out:
|
||||
return ret;
|
||||
|
|
@ -253,7 +252,6 @@ static void qaic_bootlog_mhi_remove(struct mhi_device *mhi_dev)
|
|||
qdev = dev_get_drvdata(&mhi_dev->dev);
|
||||
|
||||
mhi_unprepare_from_transfer(qdev->bootlog_ch);
|
||||
flush_workqueue(qdev->bootlog_wq);
|
||||
destroy_workqueue(qdev->bootlog_wq);
|
||||
qdev->bootlog_ch = NULL;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -636,10 +636,6 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
|
|||
|| !exp_info->ops->release))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
|
||||
(exp_info->ops->pin || exp_info->ops->unpin)))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
|
|
@ -782,7 +778,7 @@ static void mangle_sg_table(struct sg_table *sg_table)
|
|||
|
||||
/* To catch abuse of the underlying struct page by importers mix
|
||||
* up the bits, but take care to preserve the low SG_ bits to
|
||||
* not corrupt the sgt. The mixing is undone in __unmap_dma_buf
|
||||
* not corrupt the sgt. The mixing is undone on unmap
|
||||
* before passing the sgt back to the exporter.
|
||||
*/
|
||||
for_each_sgtable_sg(sg_table, sg, i)
|
||||
|
|
@ -790,29 +786,19 @@ static void mangle_sg_table(struct sg_table *sg_table)
|
|||
#endif
|
||||
|
||||
}
|
||||
static struct sg_table *__map_dma_buf(struct dma_buf_attachment *attach,
|
||||
enum dma_data_direction direction)
|
||||
|
||||
static inline bool
|
||||
dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach)
|
||||
{
|
||||
struct sg_table *sg_table;
|
||||
signed long ret;
|
||||
return !!attach->importer_ops;
|
||||
}
|
||||
|
||||
sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
|
||||
if (IS_ERR_OR_NULL(sg_table))
|
||||
return sg_table;
|
||||
|
||||
if (!dma_buf_attachment_is_dynamic(attach)) {
|
||||
ret = dma_resv_wait_timeout(attach->dmabuf->resv,
|
||||
DMA_RESV_USAGE_KERNEL, true,
|
||||
MAX_SCHEDULE_TIMEOUT);
|
||||
if (ret < 0) {
|
||||
attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
|
||||
direction);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
}
|
||||
|
||||
mangle_sg_table(sg_table);
|
||||
return sg_table;
|
||||
static bool
|
||||
dma_buf_pin_on_map(struct dma_buf_attachment *attach)
|
||||
{
|
||||
return attach->dmabuf->ops->pin &&
|
||||
(!dma_buf_attachment_is_dynamic(attach) ||
|
||||
!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -935,48 +921,11 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
|
|||
list_add(&attach->node, &dmabuf->attachments);
|
||||
dma_resv_unlock(dmabuf->resv);
|
||||
|
||||
/* When either the importer or the exporter can't handle dynamic
|
||||
* mappings we cache the mapping here to avoid issues with the
|
||||
* reservation object lock.
|
||||
*/
|
||||
if (dma_buf_attachment_is_dynamic(attach) !=
|
||||
dma_buf_is_dynamic(dmabuf)) {
|
||||
struct sg_table *sgt;
|
||||
|
||||
dma_resv_lock(attach->dmabuf->resv, NULL);
|
||||
if (dma_buf_is_dynamic(attach->dmabuf)) {
|
||||
ret = dmabuf->ops->pin(attach);
|
||||
if (ret)
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
|
||||
if (!sgt)
|
||||
sgt = ERR_PTR(-ENOMEM);
|
||||
if (IS_ERR(sgt)) {
|
||||
ret = PTR_ERR(sgt);
|
||||
goto err_unpin;
|
||||
}
|
||||
dma_resv_unlock(attach->dmabuf->resv);
|
||||
attach->sgt = sgt;
|
||||
attach->dir = DMA_BIDIRECTIONAL;
|
||||
}
|
||||
|
||||
return attach;
|
||||
|
||||
err_attach:
|
||||
kfree(attach);
|
||||
return ERR_PTR(ret);
|
||||
|
||||
err_unpin:
|
||||
if (dma_buf_is_dynamic(attach->dmabuf))
|
||||
dmabuf->ops->unpin(attach);
|
||||
|
||||
err_unlock:
|
||||
dma_resv_unlock(attach->dmabuf->resv);
|
||||
|
||||
dma_buf_detach(dmabuf, attach);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, "DMA_BUF");
|
||||
|
||||
|
|
@ -995,16 +944,6 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
|
|||
}
|
||||
EXPORT_SYMBOL_NS_GPL(dma_buf_attach, "DMA_BUF");
|
||||
|
||||
static void __unmap_dma_buf(struct dma_buf_attachment *attach,
|
||||
struct sg_table *sg_table,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
/* uses XOR, hence this unmangles */
|
||||
mangle_sg_table(sg_table);
|
||||
|
||||
attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_buf_detach - Remove the given attachment from dmabuf's attachments list
|
||||
* @dmabuf: [in] buffer to detach from.
|
||||
|
|
@ -1020,16 +959,7 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
|
|||
return;
|
||||
|
||||
dma_resv_lock(dmabuf->resv, NULL);
|
||||
|
||||
if (attach->sgt) {
|
||||
|
||||
__unmap_dma_buf(attach, attach->sgt, attach->dir);
|
||||
|
||||
if (dma_buf_is_dynamic(attach->dmabuf))
|
||||
dmabuf->ops->unpin(attach);
|
||||
}
|
||||
list_del(&attach->node);
|
||||
|
||||
dma_resv_unlock(dmabuf->resv);
|
||||
|
||||
if (dmabuf->ops->detach)
|
||||
|
|
@ -1058,7 +988,7 @@ int dma_buf_pin(struct dma_buf_attachment *attach)
|
|||
struct dma_buf *dmabuf = attach->dmabuf;
|
||||
int ret = 0;
|
||||
|
||||
WARN_ON(!dma_buf_attachment_is_dynamic(attach));
|
||||
WARN_ON(!attach->importer_ops);
|
||||
|
||||
dma_resv_assert_held(dmabuf->resv);
|
||||
|
||||
|
|
@ -1081,7 +1011,7 @@ void dma_buf_unpin(struct dma_buf_attachment *attach)
|
|||
{
|
||||
struct dma_buf *dmabuf = attach->dmabuf;
|
||||
|
||||
WARN_ON(!dma_buf_attachment_is_dynamic(attach));
|
||||
WARN_ON(!attach->importer_ops);
|
||||
|
||||
dma_resv_assert_held(dmabuf->resv);
|
||||
|
||||
|
|
@ -1115,7 +1045,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
|
|||
enum dma_data_direction direction)
|
||||
{
|
||||
struct sg_table *sg_table;
|
||||
int r;
|
||||
signed long ret;
|
||||
|
||||
might_sleep();
|
||||
|
||||
|
|
@ -1124,41 +1054,37 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
|
|||
|
||||
dma_resv_assert_held(attach->dmabuf->resv);
|
||||
|
||||
if (attach->sgt) {
|
||||
if (dma_buf_pin_on_map(attach)) {
|
||||
ret = attach->dmabuf->ops->pin(attach);
|
||||
/*
|
||||
* Two mappings with different directions for the same
|
||||
* attachment are not allowed.
|
||||
* Catch exporters making buffers inaccessible even when
|
||||
* attachments preventing that exist.
|
||||
*/
|
||||
if (attach->dir != direction &&
|
||||
attach->dir != DMA_BIDIRECTIONAL)
|
||||
return ERR_PTR(-EBUSY);
|
||||
|
||||
return attach->sgt;
|
||||
WARN_ON_ONCE(ret == EBUSY);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
if (dma_buf_is_dynamic(attach->dmabuf)) {
|
||||
if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
|
||||
r = attach->dmabuf->ops->pin(attach);
|
||||
if (r)
|
||||
return ERR_PTR(r);
|
||||
}
|
||||
}
|
||||
|
||||
sg_table = __map_dma_buf(attach, direction);
|
||||
sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
|
||||
if (!sg_table)
|
||||
sg_table = ERR_PTR(-ENOMEM);
|
||||
if (IS_ERR(sg_table))
|
||||
goto error_unpin;
|
||||
|
||||
if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
|
||||
!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
|
||||
attach->dmabuf->ops->unpin(attach);
|
||||
|
||||
if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
|
||||
attach->sgt = sg_table;
|
||||
attach->dir = direction;
|
||||
/*
|
||||
* Importers with static attachments don't wait for fences.
|
||||
*/
|
||||
if (!dma_buf_attachment_is_dynamic(attach)) {
|
||||
ret = dma_resv_wait_timeout(attach->dmabuf->resv,
|
||||
DMA_RESV_USAGE_KERNEL, true,
|
||||
MAX_SCHEDULE_TIMEOUT);
|
||||
if (ret < 0)
|
||||
goto error_unmap;
|
||||
}
|
||||
mangle_sg_table(sg_table);
|
||||
|
||||
#ifdef CONFIG_DMA_API_DEBUG
|
||||
if (!IS_ERR(sg_table)) {
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
u64 addr;
|
||||
int len;
|
||||
|
|
@ -1175,6 +1101,16 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
|
|||
}
|
||||
#endif /* CONFIG_DMA_API_DEBUG */
|
||||
return sg_table;
|
||||
|
||||
error_unmap:
|
||||
attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
|
||||
sg_table = ERR_PTR(ret);
|
||||
|
||||
error_unpin:
|
||||
if (dma_buf_pin_on_map(attach))
|
||||
attach->dmabuf->ops->unpin(attach);
|
||||
|
||||
return sg_table;
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, "DMA_BUF");
|
||||
|
||||
|
|
@ -1227,14 +1163,11 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
|
|||
|
||||
dma_resv_assert_held(attach->dmabuf->resv);
|
||||
|
||||
if (attach->sgt == sg_table)
|
||||
return;
|
||||
mangle_sg_table(sg_table);
|
||||
attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
|
||||
|
||||
__unmap_dma_buf(attach, sg_table, direction);
|
||||
|
||||
if (dma_buf_is_dynamic(attach->dmabuf) &&
|
||||
!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
|
||||
dma_buf_unpin(attach);
|
||||
if (dma_buf_pin_on_map(attach))
|
||||
attach->dmabuf->ops->unpin(attach);
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, "DMA_BUF");
|
||||
|
||||
|
|
|
|||
|
|
@ -21,8 +21,6 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
static struct dma_heap *sys_heap;
|
||||
|
||||
struct system_heap_buffer {
|
||||
struct dma_heap *heap;
|
||||
struct list_head attachments;
|
||||
|
|
@ -424,6 +422,7 @@ static const struct dma_heap_ops system_heap_ops = {
|
|||
static int __init system_heap_create(void)
|
||||
{
|
||||
struct dma_heap_export_info exp_info;
|
||||
struct dma_heap *sys_heap;
|
||||
|
||||
exp_info.name = "system";
|
||||
exp_info.ops = &system_heap_ops;
|
||||
|
|
|
|||
|
|
@ -173,20 +173,6 @@ static bool timeline_fence_signaled(struct dma_fence *fence)
|
|||
return !__dma_fence_is_later(fence->seqno, parent->value, fence->ops);
|
||||
}
|
||||
|
||||
static void timeline_fence_value_str(struct dma_fence *fence,
|
||||
char *str, int size)
|
||||
{
|
||||
snprintf(str, size, "%lld", fence->seqno);
|
||||
}
|
||||
|
||||
static void timeline_fence_timeline_value_str(struct dma_fence *fence,
|
||||
char *str, int size)
|
||||
{
|
||||
struct sync_timeline *parent = dma_fence_parent(fence);
|
||||
|
||||
snprintf(str, size, "%d", parent->value);
|
||||
}
|
||||
|
||||
static void timeline_fence_set_deadline(struct dma_fence *fence, ktime_t deadline)
|
||||
{
|
||||
struct sync_pt *pt = dma_fence_to_sync_pt(fence);
|
||||
|
|
@ -208,8 +194,6 @@ static const struct dma_fence_ops timeline_fence_ops = {
|
|||
.get_timeline_name = timeline_fence_get_timeline_name,
|
||||
.signaled = timeline_fence_signaled,
|
||||
.release = timeline_fence_release,
|
||||
.fence_value_str = timeline_fence_value_str,
|
||||
.timeline_value_str = timeline_fence_timeline_value_str,
|
||||
.set_deadline = timeline_fence_set_deadline,
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -82,25 +82,8 @@ static void sync_print_fence(struct seq_file *s,
|
|||
seq_printf(s, "@%lld.%09ld", (s64)ts64.tv_sec, ts64.tv_nsec);
|
||||
}
|
||||
|
||||
if (fence->ops->timeline_value_str &&
|
||||
fence->ops->fence_value_str) {
|
||||
char value[64];
|
||||
bool success;
|
||||
|
||||
fence->ops->fence_value_str(fence, value, sizeof(value));
|
||||
success = strlen(value);
|
||||
|
||||
if (success) {
|
||||
seq_printf(s, ": %s", value);
|
||||
|
||||
fence->ops->timeline_value_str(fence, value,
|
||||
sizeof(value));
|
||||
|
||||
if (strlen(value))
|
||||
seq_printf(s, " / %s", value);
|
||||
}
|
||||
}
|
||||
|
||||
seq_printf(s, ": %lld", fence->seqno);
|
||||
seq_printf(s, " / %d", parent->value);
|
||||
seq_putc(s, '\n');
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -285,7 +285,6 @@ static int end_cpu_udmabuf(struct dma_buf *buf,
|
|||
}
|
||||
|
||||
static const struct dma_buf_ops udmabuf_ops = {
|
||||
.cache_sgt_mapping = true,
|
||||
.map_dma_buf = map_udmabuf,
|
||||
.unmap_dma_buf = unmap_udmabuf,
|
||||
.release = release_udmabuf,
|
||||
|
|
|
|||
|
|
@ -558,6 +558,7 @@ int __efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
|
|||
|
||||
extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
|
||||
__weak __alias(__efi_mem_desc_lookup);
|
||||
EXPORT_SYMBOL_GPL(efi_mem_desc_lookup);
|
||||
|
||||
/*
|
||||
* Calculate the highest address of an efi memory descriptor.
|
||||
|
|
|
|||
|
|
@ -35,36 +35,7 @@ __init bool sysfb_parse_mode(const struct screen_info *si,
|
|||
if (type != VIDEO_TYPE_VLFB && type != VIDEO_TYPE_EFI)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* The meaning of depth and bpp for direct-color formats is
|
||||
* inconsistent:
|
||||
*
|
||||
* - DRM format info specifies depth as the number of color
|
||||
* bits; including alpha, but not including filler bits.
|
||||
* - Linux' EFI platform code computes lfb_depth from the
|
||||
* individual color channels, including the reserved bits.
|
||||
* - VBE 1.1 defines lfb_depth for XRGB1555 as 16, but later
|
||||
* versions use 15.
|
||||
* - On the kernel command line, 'bpp' of 32 is usually
|
||||
* XRGB8888 including the filler bits, but 15 is XRGB1555
|
||||
* not including the filler bit.
|
||||
*
|
||||
* It's not easily possible to fix this in struct screen_info,
|
||||
* as this could break UAPI. The best solution is to compute
|
||||
* bits_per_pixel from the color bits, reserved bits and
|
||||
* reported lfb_depth, whichever is highest. In the loop below,
|
||||
* ignore simplefb formats with alpha bits, as EFI and VESA
|
||||
* don't specify alpha channels.
|
||||
*/
|
||||
if (si->lfb_depth > 8) {
|
||||
bits_per_pixel = max(max3(si->red_size + si->red_pos,
|
||||
si->green_size + si->green_pos,
|
||||
si->blue_size + si->blue_pos),
|
||||
si->rsvd_size + si->rsvd_pos);
|
||||
bits_per_pixel = max_t(u32, bits_per_pixel, si->lfb_depth);
|
||||
} else {
|
||||
bits_per_pixel = si->lfb_depth;
|
||||
}
|
||||
bits_per_pixel = __screen_info_lfb_bits_per_pixel(si);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(formats); ++i) {
|
||||
const struct simplefb_format *f = &formats[i];
|
||||
|
|
|
|||
|
|
@ -26,6 +26,11 @@ menuconfig DRM
|
|||
details. You should also select and configure AGP
|
||||
(/dev/agpgart) support if it is available for your platform.
|
||||
|
||||
menu "DRM debugging options"
|
||||
depends on DRM
|
||||
source "drivers/gpu/drm/Kconfig.debug"
|
||||
endmenu
|
||||
|
||||
if DRM
|
||||
|
||||
config DRM_MIPI_DBI
|
||||
|
|
@ -37,65 +42,6 @@ config DRM_MIPI_DSI
|
|||
bool
|
||||
depends on DRM
|
||||
|
||||
config DRM_DEBUG_MM
|
||||
bool "Insert extra checks and debug info into the DRM range managers"
|
||||
default n
|
||||
depends on DRM
|
||||
depends on STACKTRACE_SUPPORT
|
||||
select STACKDEPOT
|
||||
help
|
||||
Enable allocation tracking of memory manager and leak detection on
|
||||
shutdown.
|
||||
|
||||
Recommended for driver developers only.
|
||||
|
||||
If in doubt, say "N".
|
||||
|
||||
config DRM_USE_DYNAMIC_DEBUG
|
||||
bool "use dynamic debug to implement drm.debug"
|
||||
default n
|
||||
depends on BROKEN
|
||||
depends on DRM
|
||||
depends on DYNAMIC_DEBUG || DYNAMIC_DEBUG_CORE
|
||||
depends on JUMP_LABEL
|
||||
help
|
||||
Use dynamic-debug to avoid drm_debug_enabled() runtime overheads.
|
||||
Due to callsite counts in DRM drivers (~4k in amdgpu) and 56
|
||||
bytes per callsite, the .data costs can be substantial, and
|
||||
are therefore configurable.
|
||||
|
||||
config DRM_KUNIT_TEST_HELPERS
|
||||
tristate
|
||||
depends on DRM && KUNIT
|
||||
select DRM_KMS_HELPER
|
||||
help
|
||||
KUnit Helpers for KMS drivers.
|
||||
|
||||
config DRM_KUNIT_TEST
|
||||
tristate "KUnit tests for DRM" if !KUNIT_ALL_TESTS
|
||||
depends on DRM && KUNIT && MMU
|
||||
select DRM_BUDDY
|
||||
select DRM_DISPLAY_DP_HELPER
|
||||
select DRM_DISPLAY_HDMI_STATE_HELPER
|
||||
select DRM_DISPLAY_HELPER
|
||||
select DRM_EXEC
|
||||
select DRM_EXPORT_FOR_TESTS if m
|
||||
select DRM_GEM_SHMEM_HELPER
|
||||
select DRM_KUNIT_TEST_HELPERS
|
||||
select DRM_LIB_RANDOM
|
||||
select PRIME_NUMBERS
|
||||
default KUNIT_ALL_TESTS
|
||||
help
|
||||
This builds unit tests for DRM. This option is not useful for
|
||||
distributions or general kernels, but only for kernel
|
||||
developers working on DRM and associated drivers.
|
||||
|
||||
For more information on KUnit and unit tests in general,
|
||||
please refer to the KUnit documentation in
|
||||
Documentation/dev-tools/kunit/.
|
||||
|
||||
If in doubt, say "N".
|
||||
|
||||
config DRM_KMS_HELPER
|
||||
tristate
|
||||
depends on DRM
|
||||
|
|
@ -247,23 +193,6 @@ config DRM_TTM
|
|||
GPU memory types. Will be enabled automatically if a device driver
|
||||
uses it.
|
||||
|
||||
config DRM_TTM_KUNIT_TEST
|
||||
tristate "KUnit tests for TTM" if !KUNIT_ALL_TESTS
|
||||
default n
|
||||
depends on DRM && KUNIT && MMU && (UML || COMPILE_TEST)
|
||||
select DRM_TTM
|
||||
select DRM_BUDDY
|
||||
select DRM_EXPORT_FOR_TESTS if m
|
||||
select DRM_KUNIT_TEST_HELPERS
|
||||
default KUNIT_ALL_TESTS
|
||||
help
|
||||
Enables unit tests for TTM, a GPU memory manager subsystem used
|
||||
to manage memory buffers. This option is mostly useful for kernel
|
||||
developers. It depends on (UML || COMPILE_TEST) since no other driver
|
||||
which uses TTM can be loaded while running the tests.
|
||||
|
||||
If in doubt, say "N".
|
||||
|
||||
config DRM_EXEC
|
||||
tristate
|
||||
depends on DRM
|
||||
|
|
@ -335,6 +264,8 @@ config DRM_SCHED
|
|||
tristate
|
||||
depends on DRM
|
||||
|
||||
source "drivers/gpu/drm/sysfb/Kconfig"
|
||||
|
||||
source "drivers/gpu/drm/arm/Kconfig"
|
||||
|
||||
source "drivers/gpu/drm/radeon/Kconfig"
|
||||
|
|
@ -474,9 +405,6 @@ config DRM_HYPERV
|
|||
|
||||
If M is selected the module will be called hyperv_drm.
|
||||
|
||||
config DRM_EXPORT_FOR_TESTS
|
||||
bool
|
||||
|
||||
# Separate option as not all DRM drivers use it
|
||||
config DRM_PANEL_BACKLIGHT_QUIRKS
|
||||
tristate
|
||||
|
|
@ -489,31 +417,6 @@ config DRM_PRIVACY_SCREEN
|
|||
bool
|
||||
default n
|
||||
|
||||
config DRM_WERROR
|
||||
bool "Compile the drm subsystem with warnings as errors"
|
||||
depends on DRM && EXPERT
|
||||
depends on !WERROR
|
||||
default n
|
||||
help
|
||||
A kernel build should not cause any compiler warnings, and this
|
||||
enables the '-Werror' flag to enforce that rule in the drm subsystem.
|
||||
|
||||
The drm subsystem enables more warnings than the kernel default, so
|
||||
this config option is disabled by default.
|
||||
|
||||
If in doubt, say N.
|
||||
|
||||
config DRM_HEADER_TEST
|
||||
bool "Ensure DRM headers are self-contained and pass kernel-doc"
|
||||
depends on DRM && EXPERT && BROKEN
|
||||
default n
|
||||
help
|
||||
Ensure the DRM subsystem headers both under drivers/gpu/drm and
|
||||
include/drm compile, are self-contained, have header guards, and have
|
||||
no kernel-doc warnings.
|
||||
|
||||
If in doubt, say N.
|
||||
|
||||
endif
|
||||
|
||||
# Separate option because drm_panel_orientation_quirks.c is shared with fbdev
|
||||
|
|
|
|||
|
|
@ -0,0 +1,116 @@
|
|||
config DRM_USE_DYNAMIC_DEBUG
|
||||
bool "use dynamic debug to implement drm.debug"
|
||||
default n
|
||||
depends on BROKEN
|
||||
depends on DRM
|
||||
depends on DYNAMIC_DEBUG || DYNAMIC_DEBUG_CORE
|
||||
depends on JUMP_LABEL
|
||||
help
|
||||
Use dynamic-debug to avoid drm_debug_enabled() runtime overheads.
|
||||
Due to callsite counts in DRM drivers (~4k in amdgpu) and 56
|
||||
bytes per callsite, the .data costs can be substantial, and
|
||||
are therefore configurable.
|
||||
|
||||
config DRM_WERROR
|
||||
bool "Compile the drm subsystem with warnings as errors"
|
||||
depends on DRM && EXPERT
|
||||
depends on !WERROR
|
||||
default n
|
||||
help
|
||||
A kernel build should not cause any compiler warnings, and this
|
||||
enables the '-Werror' flag to enforce that rule in the drm subsystem.
|
||||
|
||||
The drm subsystem enables more warnings than the kernel default, so
|
||||
this config option is disabled by default.
|
||||
|
||||
If in doubt, say N.
|
||||
|
||||
config DRM_HEADER_TEST
|
||||
bool "Ensure DRM headers are self-contained and pass kernel-doc"
|
||||
depends on DRM && EXPERT
|
||||
default n
|
||||
help
|
||||
Ensure the DRM subsystem headers both under drivers/gpu/drm and
|
||||
include/drm compile, are self-contained, have header guards, and have
|
||||
no kernel-doc warnings.
|
||||
|
||||
If in doubt, say N.
|
||||
|
||||
config DRM_DEBUG_MM
|
||||
bool "Insert extra checks and debug info into the DRM range managers"
|
||||
default n
|
||||
depends on DRM
|
||||
depends on STACKTRACE_SUPPORT
|
||||
select STACKDEPOT
|
||||
help
|
||||
Enable allocation tracking of memory manager and leak detection on
|
||||
shutdown.
|
||||
|
||||
Recommended for driver developers only.
|
||||
|
||||
If in doubt, say "N".
|
||||
|
||||
config DRM_KUNIT_TEST_HELPERS
|
||||
tristate
|
||||
depends on DRM && KUNIT
|
||||
select DRM_KMS_HELPER
|
||||
help
|
||||
KUnit Helpers for KMS drivers.
|
||||
|
||||
config DRM_KUNIT_TEST
|
||||
tristate "KUnit tests for DRM" if !KUNIT_ALL_TESTS
|
||||
depends on DRM && KUNIT && MMU
|
||||
select DRM_BRIDGE_CONNECTOR
|
||||
select DRM_BUDDY
|
||||
select DRM_DISPLAY_DP_HELPER
|
||||
select DRM_DISPLAY_HDMI_STATE_HELPER
|
||||
select DRM_DISPLAY_HELPER
|
||||
select DRM_EXEC
|
||||
select DRM_EXPORT_FOR_TESTS if m
|
||||
select DRM_GEM_SHMEM_HELPER
|
||||
select DRM_KUNIT_TEST_HELPERS
|
||||
select DRM_LIB_RANDOM
|
||||
select PRIME_NUMBERS
|
||||
default KUNIT_ALL_TESTS
|
||||
help
|
||||
This builds unit tests for DRM. This option is not useful for
|
||||
distributions or general kernels, but only for kernel
|
||||
developers working on DRM and associated drivers.
|
||||
|
||||
For more information on KUnit and unit tests in general,
|
||||
please refer to the KUnit documentation in
|
||||
Documentation/dev-tools/kunit/.
|
||||
|
||||
If in doubt, say "N".
|
||||
|
||||
config DRM_TTM_KUNIT_TEST
|
||||
tristate "KUnit tests for TTM" if !KUNIT_ALL_TESTS
|
||||
default n
|
||||
depends on DRM && KUNIT && MMU && (UML || COMPILE_TEST)
|
||||
select DRM_TTM
|
||||
select DRM_BUDDY
|
||||
select DRM_EXPORT_FOR_TESTS if m
|
||||
select DRM_KUNIT_TEST_HELPERS
|
||||
default KUNIT_ALL_TESTS
|
||||
help
|
||||
Enables unit tests for TTM, a GPU memory manager subsystem used
|
||||
to manage memory buffers. This option is mostly useful for kernel
|
||||
developers. It depends on (UML || COMPILE_TEST) since no other driver
|
||||
which uses TTM can be loaded while running the tests.
|
||||
|
||||
If in doubt, say "N".
|
||||
|
||||
config DRM_SCHED_KUNIT_TEST
|
||||
tristate "KUnit tests for the DRM scheduler" if !KUNIT_ALL_TESTS
|
||||
select DRM_SCHED
|
||||
depends on DRM && KUNIT
|
||||
default KUNIT_ALL_TESTS
|
||||
help
|
||||
Choose this option to build unit tests for the DRM scheduler.
|
||||
|
||||
Recommended for driver developers only.
|
||||
|
||||
If in doubt, say "N".
|
||||
|
||||
config DRM_EXPORT_FOR_TESTS
|
||||
bool
|
||||
|
|
@ -134,6 +134,7 @@ obj-$(CONFIG_DRM_TTM_HELPER) += drm_ttm_helper.o
|
|||
drm_kms_helper-y := \
|
||||
drm_atomic_helper.o \
|
||||
drm_atomic_state_helper.o \
|
||||
drm_bridge_helper.o \
|
||||
drm_crtc_helper.o \
|
||||
drm_damage_helper.o \
|
||||
drm_flip_work.o \
|
||||
|
|
@ -204,6 +205,7 @@ obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu/
|
|||
obj-$(CONFIG_DRM_ETNAVIV) += etnaviv/
|
||||
obj-y += hisilicon/
|
||||
obj-y += mxsfb/
|
||||
obj-y += sysfb/
|
||||
obj-y += tiny/
|
||||
obj-$(CONFIG_DRM_PL111) += pl111/
|
||||
obj-$(CONFIG_DRM_TVE200) += tve200/
|
||||
|
|
|
|||
|
|
@ -212,12 +212,13 @@ static const struct mipi_dsi_host_ops adp_dsi_host_ops = {
|
|||
};
|
||||
|
||||
static int adp_dsi_bridge_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct adp_mipi_drv_private *adp =
|
||||
container_of(bridge, struct adp_mipi_drv_private, bridge);
|
||||
|
||||
return drm_bridge_attach(bridge->encoder, adp->next_bridge, bridge, flags);
|
||||
return drm_bridge_attach(encoder, adp->next_bridge, bridge, flags);
|
||||
}
|
||||
|
||||
static const struct drm_bridge_funcs adp_dsi_bridge_funcs = {
|
||||
|
|
|
|||
|
|
@ -458,8 +458,8 @@ bool amdgpu_atombios_dp_needs_link_train(struct amdgpu_connector *amdgpu_connect
|
|||
u8 link_status[DP_LINK_STATUS_SIZE];
|
||||
struct amdgpu_connector_atom_dig *dig = amdgpu_connector->con_priv;
|
||||
|
||||
if (drm_dp_dpcd_read_link_status(&amdgpu_connector->ddc_bus->aux, link_status)
|
||||
<= 0)
|
||||
if (drm_dp_dpcd_read_link_status(&amdgpu_connector->ddc_bus->aux,
|
||||
link_status) < 0)
|
||||
return false;
|
||||
if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count))
|
||||
return false;
|
||||
|
|
@ -616,7 +616,7 @@ amdgpu_atombios_dp_link_train_cr(struct amdgpu_atombios_dp_link_train_info *dp_i
|
|||
drm_dp_link_train_clock_recovery_delay(dp_info->aux, dp_info->dpcd);
|
||||
|
||||
if (drm_dp_dpcd_read_link_status(dp_info->aux,
|
||||
dp_info->link_status) <= 0) {
|
||||
dp_info->link_status) < 0) {
|
||||
DRM_ERROR("displayport link status failed\n");
|
||||
break;
|
||||
}
|
||||
|
|
@ -681,7 +681,7 @@ amdgpu_atombios_dp_link_train_ce(struct amdgpu_atombios_dp_link_train_info *dp_i
|
|||
drm_dp_link_train_channel_eq_delay(dp_info->aux, dp_info->dpcd);
|
||||
|
||||
if (drm_dp_dpcd_read_link_status(dp_info->aux,
|
||||
dp_info->link_status) <= 0) {
|
||||
dp_info->link_status) < 0) {
|
||||
DRM_ERROR("displayport link status failed\n");
|
||||
break;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -37,6 +37,7 @@
|
|||
*/
|
||||
|
||||
/* define for signature structure */
|
||||
#define AST_HWC_SIGNATURE_SIZE SZ_32
|
||||
#define AST_HWC_SIGNATURE_CHECKSUM 0x00
|
||||
#define AST_HWC_SIGNATURE_SizeX 0x04
|
||||
#define AST_HWC_SIGNATURE_SizeY 0x08
|
||||
|
|
@ -45,6 +46,21 @@
|
|||
#define AST_HWC_SIGNATURE_HOTSPOTX 0x14
|
||||
#define AST_HWC_SIGNATURE_HOTSPOTY 0x18
|
||||
|
||||
static unsigned long ast_cursor_vram_size(void)
|
||||
{
|
||||
return AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE;
|
||||
}
|
||||
|
||||
long ast_cursor_vram_offset(struct ast_device *ast)
|
||||
{
|
||||
unsigned long size = ast_cursor_vram_size();
|
||||
|
||||
if (size > ast->vram_size)
|
||||
return -EINVAL;
|
||||
|
||||
return ALIGN_DOWN(ast->vram_size - size, SZ_8);
|
||||
}
|
||||
|
||||
static u32 ast_cursor_calculate_checksum(const void *src, unsigned int width, unsigned int height)
|
||||
{
|
||||
u32 csum = 0;
|
||||
|
|
@ -75,7 +91,7 @@ static u32 ast_cursor_calculate_checksum(const void *src, unsigned int width, un
|
|||
static void ast_set_cursor_image(struct ast_device *ast, const u8 *src,
|
||||
unsigned int width, unsigned int height)
|
||||
{
|
||||
u8 __iomem *dst = ast->cursor_plane.base.vaddr;
|
||||
u8 __iomem *dst = ast_plane_vaddr(&ast->cursor_plane.base);
|
||||
u32 csum;
|
||||
|
||||
csum = ast_cursor_calculate_checksum(src, width, height);
|
||||
|
|
@ -177,7 +193,7 @@ static void ast_cursor_plane_helper_atomic_update(struct drm_plane *plane,
|
|||
struct ast_device *ast = to_ast_device(plane->dev);
|
||||
struct drm_rect damage;
|
||||
u64 dst_off = ast_plane->offset;
|
||||
u8 __iomem *dst = ast_plane->vaddr; /* TODO: Use mapping abstraction properly */
|
||||
u8 __iomem *dst = ast_plane_vaddr(ast_plane); /* TODO: Use mapping abstraction properly */
|
||||
u8 __iomem *sig = dst + AST_HWC_SIZE; /* TODO: Use mapping abstraction properly */
|
||||
unsigned int offset_x, offset_y;
|
||||
u16 x, y;
|
||||
|
|
@ -274,25 +290,16 @@ int ast_cursor_plane_init(struct ast_device *ast)
|
|||
struct ast_cursor_plane *ast_cursor_plane = &ast->cursor_plane;
|
||||
struct ast_plane *ast_plane = &ast_cursor_plane->base;
|
||||
struct drm_plane *cursor_plane = &ast_plane->base;
|
||||
size_t size;
|
||||
void __iomem *vaddr;
|
||||
u64 offset;
|
||||
unsigned long size;
|
||||
long offset;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Allocate backing storage for cursors. The BOs are permanently
|
||||
* pinned to the top end of the VRAM.
|
||||
*/
|
||||
size = ast_cursor_vram_size();
|
||||
offset = ast_cursor_vram_offset(ast);
|
||||
if (offset < 0)
|
||||
return offset;
|
||||
|
||||
size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE);
|
||||
|
||||
if (ast->vram_fb_available < size)
|
||||
return -ENOMEM;
|
||||
|
||||
vaddr = ast->vram + ast->vram_fb_available - size;
|
||||
offset = ast->vram_fb_available - size;
|
||||
|
||||
ret = ast_plane_init(dev, ast_plane, vaddr, offset, size,
|
||||
ret = ast_plane_init(dev, ast_plane, offset, size,
|
||||
0x01, &ast_cursor_plane_funcs,
|
||||
ast_cursor_plane_formats, ARRAY_SIZE(ast_cursor_plane_formats),
|
||||
NULL, DRM_PLANE_TYPE_CURSOR);
|
||||
|
|
@ -303,7 +310,5 @@ int ast_cursor_plane_init(struct ast_device *ast)
|
|||
drm_plane_helper_add(cursor_plane, &ast_cursor_plane_helper_funcs);
|
||||
drm_plane_enable_fb_damage_clips(cursor_plane);
|
||||
|
||||
ast->vram_fb_available -= size;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -112,12 +112,9 @@ enum ast_config_mode {
|
|||
|
||||
#define AST_MAX_HWC_WIDTH 64
|
||||
#define AST_MAX_HWC_HEIGHT 64
|
||||
|
||||
#define AST_HWC_PITCH (AST_MAX_HWC_WIDTH * SZ_2)
|
||||
#define AST_HWC_SIZE (AST_MAX_HWC_HEIGHT * AST_HWC_PITCH)
|
||||
|
||||
#define AST_HWC_SIGNATURE_SIZE 32
|
||||
|
||||
/*
|
||||
* Planes
|
||||
*/
|
||||
|
|
@ -125,7 +122,6 @@ enum ast_config_mode {
|
|||
struct ast_plane {
|
||||
struct drm_plane base;
|
||||
|
||||
void __iomem *vaddr;
|
||||
u64 offset;
|
||||
unsigned long size;
|
||||
};
|
||||
|
|
@ -183,7 +179,6 @@ struct ast_device {
|
|||
void __iomem *vram;
|
||||
unsigned long vram_base;
|
||||
unsigned long vram_size;
|
||||
unsigned long vram_fb_available;
|
||||
|
||||
struct mutex modeset_lock; /* Protects access to modeset I/O registers in ioregs */
|
||||
|
||||
|
|
@ -340,14 +335,6 @@ static inline void ast_set_index_reg_mask(struct ast_device *ast, u32 base, u8 i
|
|||
__ast_write8_i_masked(ast->ioregs, base, index, preserve_mask, val);
|
||||
}
|
||||
|
||||
#define AST_VIDMEM_SIZE_8M 0x00800000
|
||||
#define AST_VIDMEM_SIZE_16M 0x01000000
|
||||
#define AST_VIDMEM_SIZE_32M 0x02000000
|
||||
#define AST_VIDMEM_SIZE_64M 0x04000000
|
||||
#define AST_VIDMEM_SIZE_128M 0x08000000
|
||||
|
||||
#define AST_VIDMEM_DEFAULT_SIZE AST_VIDMEM_SIZE_8M
|
||||
|
||||
struct ast_vbios_stdtable {
|
||||
u8 misc;
|
||||
u8 seq[4];
|
||||
|
|
@ -440,6 +427,7 @@ int ast_vga_output_init(struct ast_device *ast);
|
|||
int ast_sil164_output_init(struct ast_device *ast);
|
||||
|
||||
/* ast_cursor.c */
|
||||
long ast_cursor_vram_offset(struct ast_device *ast);
|
||||
int ast_cursor_plane_init(struct ast_device *ast);
|
||||
|
||||
/* ast dp501 */
|
||||
|
|
@ -454,11 +442,12 @@ int ast_astdp_output_init(struct ast_device *ast);
|
|||
/* ast_mode.c */
|
||||
int ast_mode_config_init(struct ast_device *ast);
|
||||
int ast_plane_init(struct drm_device *dev, struct ast_plane *ast_plane,
|
||||
void __iomem *vaddr, u64 offset, unsigned long size,
|
||||
u64 offset, unsigned long size,
|
||||
uint32_t possible_crtcs,
|
||||
const struct drm_plane_funcs *funcs,
|
||||
const uint32_t *formats, unsigned int format_count,
|
||||
const uint64_t *format_modifiers,
|
||||
enum drm_plane_type type);
|
||||
void __iomem *ast_plane_vaddr(struct ast_plane *ast);
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -35,36 +35,35 @@
|
|||
|
||||
static u32 ast_get_vram_size(struct ast_device *ast)
|
||||
{
|
||||
u8 jreg;
|
||||
u32 vram_size;
|
||||
u8 vgacr99, vgacraa;
|
||||
|
||||
vram_size = AST_VIDMEM_DEFAULT_SIZE;
|
||||
jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xaa, 0xff);
|
||||
switch (jreg & 3) {
|
||||
vgacraa = ast_get_index_reg(ast, AST_IO_VGACRI, 0xaa);
|
||||
switch (vgacraa & AST_IO_VGACRAA_VGAMEM_SIZE_MASK) {
|
||||
case 0:
|
||||
vram_size = AST_VIDMEM_SIZE_8M;
|
||||
vram_size = SZ_8M;
|
||||
break;
|
||||
case 1:
|
||||
vram_size = AST_VIDMEM_SIZE_16M;
|
||||
vram_size = SZ_16M;
|
||||
break;
|
||||
case 2:
|
||||
vram_size = AST_VIDMEM_SIZE_32M;
|
||||
vram_size = SZ_32M;
|
||||
break;
|
||||
case 3:
|
||||
vram_size = AST_VIDMEM_SIZE_64M;
|
||||
vram_size = SZ_64M;
|
||||
break;
|
||||
}
|
||||
|
||||
jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0x99, 0xff);
|
||||
switch (jreg & 0x03) {
|
||||
vgacr99 = ast_get_index_reg(ast, AST_IO_VGACRI, 0x99);
|
||||
switch (vgacr99 & AST_IO_VGACR99_VGAMEM_RSRV_MASK) {
|
||||
case 1:
|
||||
vram_size -= 0x100000;
|
||||
vram_size -= SZ_1M;
|
||||
break;
|
||||
case 2:
|
||||
vram_size -= 0x200000;
|
||||
vram_size -= SZ_2M;
|
||||
break;
|
||||
case 3:
|
||||
vram_size -= 0x400000;
|
||||
vram_size -= SZ_4M;
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
@ -93,7 +92,6 @@ int ast_mm_init(struct ast_device *ast)
|
|||
|
||||
ast->vram_base = base;
|
||||
ast->vram_size = vram_size;
|
||||
ast->vram_fb_available = vram_size;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -51,6 +51,26 @@
|
|||
|
||||
#define AST_LUT_SIZE 256
|
||||
|
||||
#define AST_PRIMARY_PLANE_MAX_OFFSET (BIT(16) - 1)
|
||||
|
||||
static unsigned long ast_fb_vram_offset(void)
|
||||
{
|
||||
return 0; // with shmem, the primary plane is always at offset 0
|
||||
}
|
||||
|
||||
static unsigned long ast_fb_vram_size(struct ast_device *ast)
|
||||
{
|
||||
struct drm_device *dev = &ast->base;
|
||||
unsigned long offset = ast_fb_vram_offset(); // starts at offset
|
||||
long cursor_offset = ast_cursor_vram_offset(ast); // ends at cursor offset
|
||||
|
||||
if (cursor_offset < 0)
|
||||
cursor_offset = ast->vram_size; // no cursor; it's all ours
|
||||
if (drm_WARN_ON_ONCE(dev, offset > cursor_offset))
|
||||
return 0; // cannot legally happen; signal error
|
||||
return cursor_offset - offset;
|
||||
}
|
||||
|
||||
static inline void ast_load_palette_index(struct ast_device *ast,
|
||||
u8 index, u8 red, u8 green,
|
||||
u8 blue)
|
||||
|
|
@ -439,7 +459,7 @@ static void ast_wait_for_vretrace(struct ast_device *ast)
|
|||
*/
|
||||
|
||||
int ast_plane_init(struct drm_device *dev, struct ast_plane *ast_plane,
|
||||
void __iomem *vaddr, u64 offset, unsigned long size,
|
||||
u64 offset, unsigned long size,
|
||||
uint32_t possible_crtcs,
|
||||
const struct drm_plane_funcs *funcs,
|
||||
const uint32_t *formats, unsigned int format_count,
|
||||
|
|
@ -448,7 +468,6 @@ int ast_plane_init(struct drm_device *dev, struct ast_plane *ast_plane,
|
|||
{
|
||||
struct drm_plane *plane = &ast_plane->base;
|
||||
|
||||
ast_plane->vaddr = vaddr;
|
||||
ast_plane->offset = offset;
|
||||
ast_plane->size = size;
|
||||
|
||||
|
|
@ -457,6 +476,13 @@ int ast_plane_init(struct drm_device *dev, struct ast_plane *ast_plane,
|
|||
type, NULL);
|
||||
}
|
||||
|
||||
void __iomem *ast_plane_vaddr(struct ast_plane *ast_plane)
|
||||
{
|
||||
struct ast_device *ast = to_ast_device(ast_plane->base.dev);
|
||||
|
||||
return ast->vram + ast_plane->offset;
|
||||
}
|
||||
|
||||
/*
|
||||
* Primary plane
|
||||
*/
|
||||
|
|
@ -503,7 +529,7 @@ static void ast_handle_damage(struct ast_plane *ast_plane, struct iosys_map *src
|
|||
struct drm_framebuffer *fb,
|
||||
const struct drm_rect *clip)
|
||||
{
|
||||
struct iosys_map dst = IOSYS_MAP_INIT_VADDR_IOMEM(ast_plane->vaddr);
|
||||
struct iosys_map dst = IOSYS_MAP_INIT_VADDR_IOMEM(ast_plane_vaddr(ast_plane));
|
||||
|
||||
iosys_map_incr(&dst, drm_fb_clip_offset(fb->pitches[0], fb->format, clip));
|
||||
drm_fb_memcpy(&dst, fb->pitches, src, fb, clip);
|
||||
|
|
@ -576,12 +602,12 @@ static int ast_primary_plane_helper_get_scanout_buffer(struct drm_plane *plane,
|
|||
{
|
||||
struct ast_plane *ast_plane = to_ast_plane(plane);
|
||||
|
||||
if (plane->state && plane->state->fb && ast_plane->vaddr) {
|
||||
if (plane->state && plane->state->fb) {
|
||||
sb->format = plane->state->fb->format;
|
||||
sb->width = plane->state->fb->width;
|
||||
sb->height = plane->state->fb->height;
|
||||
sb->pitch[0] = plane->state->fb->pitches[0];
|
||||
iosys_map_set_vaddr_iomem(&sb->map[0], ast_plane->vaddr);
|
||||
iosys_map_set_vaddr_iomem(&sb->map[0], ast_plane_vaddr(ast_plane));
|
||||
return 0;
|
||||
}
|
||||
return -ENODEV;
|
||||
|
|
@ -608,13 +634,11 @@ static int ast_primary_plane_init(struct ast_device *ast)
|
|||
struct drm_device *dev = &ast->base;
|
||||
struct ast_plane *ast_primary_plane = &ast->primary_plane;
|
||||
struct drm_plane *primary_plane = &ast_primary_plane->base;
|
||||
void __iomem *vaddr = ast->vram;
|
||||
u64 offset = 0; /* with shmem, the primary plane is always at offset 0 */
|
||||
unsigned long cursor_size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE);
|
||||
unsigned long size = ast->vram_fb_available - cursor_size;
|
||||
u64 offset = ast_fb_vram_offset();
|
||||
unsigned long size = ast_fb_vram_size(ast);
|
||||
int ret;
|
||||
|
||||
ret = ast_plane_init(dev, ast_primary_plane, vaddr, offset, size,
|
||||
ret = ast_plane_init(dev, ast_primary_plane, offset, size,
|
||||
0x01, &ast_primary_plane_funcs,
|
||||
ast_primary_plane_formats, ARRAY_SIZE(ast_primary_plane_formats),
|
||||
NULL, DRM_PLANE_TYPE_PRIMARY);
|
||||
|
|
@ -922,9 +946,9 @@ static void ast_mode_config_helper_atomic_commit_tail(struct drm_atomic_state *s
|
|||
|
||||
/*
|
||||
* Concurrent operations could possibly trigger a call to
|
||||
* drm_connector_helper_funcs.get_modes by trying to read the
|
||||
* display modes. Protect access to I/O registers by acquiring
|
||||
* the I/O-register lock. Released in atomic_flush().
|
||||
* drm_connector_helper_funcs.get_modes by reading the display
|
||||
* modes. Protect access to registers by acquiring the modeset
|
||||
* lock.
|
||||
*/
|
||||
mutex_lock(&ast->modeset_lock);
|
||||
drm_atomic_helper_commit_tail(state);
|
||||
|
|
@ -938,16 +962,20 @@ static const struct drm_mode_config_helper_funcs ast_mode_config_helper_funcs =
|
|||
static enum drm_mode_status ast_mode_config_mode_valid(struct drm_device *dev,
|
||||
const struct drm_display_mode *mode)
|
||||
{
|
||||
static const unsigned long max_bpp = 4; /* DRM_FORMAT_XRGB8888 */
|
||||
const struct drm_format_info *info = drm_format_info(DRM_FORMAT_XRGB8888);
|
||||
struct ast_device *ast = to_ast_device(dev);
|
||||
unsigned long fbsize, fbpages, max_fbpages;
|
||||
unsigned long max_fb_size = ast_fb_vram_size(ast);
|
||||
u64 pitch;
|
||||
|
||||
max_fbpages = (ast->vram_fb_available) >> PAGE_SHIFT;
|
||||
if (drm_WARN_ON_ONCE(dev, !info))
|
||||
return MODE_ERROR; /* driver bug */
|
||||
|
||||
fbsize = mode->hdisplay * mode->vdisplay * max_bpp;
|
||||
fbpages = DIV_ROUND_UP(fbsize, PAGE_SIZE);
|
||||
|
||||
if (fbpages > max_fbpages)
|
||||
pitch = drm_format_info_min_pitch(info, 0, mode->hdisplay);
|
||||
if (!pitch)
|
||||
return MODE_BAD_WIDTH;
|
||||
if (pitch > AST_PRIMARY_PLANE_MAX_OFFSET)
|
||||
return MODE_BAD_WIDTH; /* maximum programmable pitch */
|
||||
if (pitch > max_fb_size / mode->vdisplay)
|
||||
return MODE_MEM;
|
||||
|
||||
return MODE_OK;
|
||||
|
|
@ -1018,10 +1046,7 @@ int ast_mode_config_init(struct ast_device *ast)
|
|||
return ret;
|
||||
|
||||
drm_mode_config_reset(dev);
|
||||
|
||||
ret = drmm_kms_helper_poll_init(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
drmm_kms_helper_poll_init(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1075,16 +1075,16 @@ static void get_ddr3_info(struct ast_device *ast, struct ast2300_dram_param *par
|
|||
|
||||
switch (param->vram_size) {
|
||||
default:
|
||||
case AST_VIDMEM_SIZE_8M:
|
||||
case SZ_8M:
|
||||
param->dram_config |= 0x00;
|
||||
break;
|
||||
case AST_VIDMEM_SIZE_16M:
|
||||
case SZ_16M:
|
||||
param->dram_config |= 0x04;
|
||||
break;
|
||||
case AST_VIDMEM_SIZE_32M:
|
||||
case SZ_32M:
|
||||
param->dram_config |= 0x08;
|
||||
break;
|
||||
case AST_VIDMEM_SIZE_64M:
|
||||
case SZ_64M:
|
||||
param->dram_config |= 0x0c;
|
||||
break;
|
||||
}
|
||||
|
|
@ -1446,16 +1446,16 @@ static void get_ddr2_info(struct ast_device *ast, struct ast2300_dram_param *par
|
|||
|
||||
switch (param->vram_size) {
|
||||
default:
|
||||
case AST_VIDMEM_SIZE_8M:
|
||||
case SZ_8M:
|
||||
param->dram_config |= 0x00;
|
||||
break;
|
||||
case AST_VIDMEM_SIZE_16M:
|
||||
case SZ_16M:
|
||||
param->dram_config |= 0x04;
|
||||
break;
|
||||
case AST_VIDMEM_SIZE_32M:
|
||||
case SZ_32M:
|
||||
param->dram_config |= 0x08;
|
||||
break;
|
||||
case AST_VIDMEM_SIZE_64M:
|
||||
case SZ_64M:
|
||||
param->dram_config |= 0x0c;
|
||||
break;
|
||||
}
|
||||
|
|
@ -1635,19 +1635,19 @@ static void ast_post_chip_2300(struct ast_device *ast)
|
|||
switch (temp & 0x0c) {
|
||||
default:
|
||||
case 0x00:
|
||||
param.vram_size = AST_VIDMEM_SIZE_8M;
|
||||
param.vram_size = SZ_8M;
|
||||
break;
|
||||
|
||||
case 0x04:
|
||||
param.vram_size = AST_VIDMEM_SIZE_16M;
|
||||
param.vram_size = SZ_16M;
|
||||
break;
|
||||
|
||||
case 0x08:
|
||||
param.vram_size = AST_VIDMEM_SIZE_32M;
|
||||
param.vram_size = SZ_32M;
|
||||
break;
|
||||
|
||||
case 0x0c:
|
||||
param.vram_size = AST_VIDMEM_SIZE_64M;
|
||||
param.vram_size = SZ_64M;
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -30,9 +30,11 @@
|
|||
|
||||
#define AST_IO_VGACRI (0x54)
|
||||
#define AST_IO_VGACR80_PASSWORD (0xa8)
|
||||
#define AST_IO_VGACR99_VGAMEM_RSRV_MASK GENMASK(1, 0)
|
||||
#define AST_IO_VGACRA1_VGAIO_DISABLED BIT(1)
|
||||
#define AST_IO_VGACRA1_MMIO_ENABLED BIT(2)
|
||||
#define AST_IO_VGACRA3_DVO_ENABLED BIT(7)
|
||||
#define AST_IO_VGACRAA_VGAMEM_SIZE_MASK GENMASK(1, 0)
|
||||
#define AST_IO_VGACRB6_HSYNC_OFF BIT(0)
|
||||
#define AST_IO_VGACRB6_VSYNC_OFF BIT(1)
|
||||
#define AST_IO_VGACRCB_HWC_16BPP BIT(0) /* set: ARGB4444, cleared: 2bpp palette */
|
||||
|
|
|
|||
|
|
@ -948,13 +948,14 @@ static enum drm_mode_status adv7511_bridge_mode_valid(struct drm_bridge *bridge,
|
|||
}
|
||||
|
||||
static int adv7511_bridge_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct adv7511 *adv = bridge_to_adv7511(bridge);
|
||||
int ret = 0;
|
||||
|
||||
if (adv->next_bridge) {
|
||||
ret = drm_bridge_attach(bridge->encoder, adv->next_bridge, bridge,
|
||||
ret = drm_bridge_attach(encoder, adv->next_bridge, bridge,
|
||||
flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
|
|
|||
|
|
@ -143,35 +143,7 @@ static int anx6345_dp_link_training(struct anx6345 *anx6345)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
/*
|
||||
* Power up the sink (DP_SET_POWER register is only available on DPCD
|
||||
* v1.1 and later).
|
||||
*/
|
||||
if (anx6345->dpcd[DP_DPCD_REV] >= 0x11) {
|
||||
err = drm_dp_dpcd_readb(&anx6345->aux, DP_SET_POWER, &dpcd[0]);
|
||||
if (err < 0) {
|
||||
DRM_ERROR("Failed to read DP_SET_POWER register: %d\n",
|
||||
err);
|
||||
return err;
|
||||
}
|
||||
|
||||
dpcd[0] &= ~DP_SET_POWER_MASK;
|
||||
dpcd[0] |= DP_SET_POWER_D0;
|
||||
|
||||
err = drm_dp_dpcd_writeb(&anx6345->aux, DP_SET_POWER, dpcd[0]);
|
||||
if (err < 0) {
|
||||
DRM_ERROR("Failed to power up DisplayPort link: %d\n",
|
||||
err);
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* According to the DP 1.1 specification, a "Sink Device must
|
||||
* exit the power saving state within 1 ms" (Section 2.5.3.1,
|
||||
* Table 5-52, "Sink Control Field" (register 0x600).
|
||||
*/
|
||||
usleep_range(1000, 2000);
|
||||
}
|
||||
drm_dp_link_power_up(&anx6345->aux, anx6345->dpcd[DP_DPCD_REV]);
|
||||
|
||||
/* Possibly enable downspread on the sink */
|
||||
err = regmap_write(anx6345->map[I2C_IDX_DPTX],
|
||||
|
|
@ -517,6 +489,7 @@ static const struct drm_connector_funcs anx6345_connector_funcs = {
|
|||
};
|
||||
|
||||
static int anx6345_bridge_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct anx6345 *anx6345 = bridge_to_anx6345(bridge);
|
||||
|
|
@ -553,7 +526,7 @@ static int anx6345_bridge_attach(struct drm_bridge *bridge,
|
|||
anx6345->connector.polled = DRM_CONNECTOR_POLL_HPD;
|
||||
|
||||
err = drm_connector_attach_encoder(&anx6345->connector,
|
||||
bridge->encoder);
|
||||
encoder);
|
||||
if (err) {
|
||||
DRM_ERROR("Failed to link up connector to encoder: %d\n", err);
|
||||
goto connector_cleanup;
|
||||
|
|
|
|||
|
|
@ -656,35 +656,7 @@ static int anx78xx_dp_link_training(struct anx78xx *anx78xx)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
/*
|
||||
* Power up the sink (DP_SET_POWER register is only available on DPCD
|
||||
* v1.1 and later).
|
||||
*/
|
||||
if (anx78xx->dpcd[DP_DPCD_REV] >= 0x11) {
|
||||
err = drm_dp_dpcd_readb(&anx78xx->aux, DP_SET_POWER, &dpcd[0]);
|
||||
if (err < 0) {
|
||||
DRM_ERROR("Failed to read DP_SET_POWER register: %d\n",
|
||||
err);
|
||||
return err;
|
||||
}
|
||||
|
||||
dpcd[0] &= ~DP_SET_POWER_MASK;
|
||||
dpcd[0] |= DP_SET_POWER_D0;
|
||||
|
||||
err = drm_dp_dpcd_writeb(&anx78xx->aux, DP_SET_POWER, dpcd[0]);
|
||||
if (err < 0) {
|
||||
DRM_ERROR("Failed to power up DisplayPort link: %d\n",
|
||||
err);
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* According to the DP 1.1 specification, a "Sink Device must
|
||||
* exit the power saving state within 1 ms" (Section 2.5.3.1,
|
||||
* Table 5-52, "Sink Control Field" (register 0x600).
|
||||
*/
|
||||
usleep_range(1000, 2000);
|
||||
}
|
||||
drm_dp_link_power_up(&anx78xx->aux, anx78xx->dpcd[DP_DPCD_REV]);
|
||||
|
||||
/* Possibly enable downspread on the sink */
|
||||
err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
|
||||
|
|
@ -888,6 +860,7 @@ static const struct drm_connector_funcs anx78xx_connector_funcs = {
|
|||
};
|
||||
|
||||
static int anx78xx_bridge_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct anx78xx *anx78xx = bridge_to_anx78xx(bridge);
|
||||
|
|
@ -924,7 +897,7 @@ static int anx78xx_bridge_attach(struct drm_bridge *bridge,
|
|||
anx78xx->connector.polled = DRM_CONNECTOR_POLL_HPD;
|
||||
|
||||
err = drm_connector_attach_encoder(&anx78xx->connector,
|
||||
bridge->encoder);
|
||||
encoder);
|
||||
if (err) {
|
||||
DRM_ERROR("Failed to link up connector to encoder: %d\n", err);
|
||||
goto connector_cleanup;
|
||||
|
|
|
|||
|
|
@ -1113,10 +1113,10 @@ static const struct drm_connector_funcs analogix_dp_connector_funcs = {
|
|||
};
|
||||
|
||||
static int analogix_dp_bridge_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct analogix_dp_device *dp = bridge->driver_private;
|
||||
struct drm_encoder *encoder = dp->encoder;
|
||||
struct drm_connector *connector = NULL;
|
||||
int ret = 0;
|
||||
|
||||
|
|
|
|||
|
|
@ -2141,6 +2141,7 @@ static void hdcp_check_work_func(struct work_struct *work)
|
|||
}
|
||||
|
||||
static int anx7625_bridge_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct anx7625_data *ctx = bridge_to_anx7625(bridge);
|
||||
|
|
@ -2159,7 +2160,7 @@ static int anx7625_bridge_attach(struct drm_bridge *bridge,
|
|||
}
|
||||
|
||||
if (ctx->pdata.panel_bridge) {
|
||||
err = drm_bridge_attach(bridge->encoder,
|
||||
err = drm_bridge_attach(encoder,
|
||||
ctx->pdata.panel_bridge,
|
||||
&ctx->bridge, flags);
|
||||
if (err)
|
||||
|
|
@ -2771,7 +2772,6 @@ static void anx7625_i2c_remove(struct i2c_client *client)
|
|||
|
||||
if (platform->hdcp_workqueue) {
|
||||
cancel_delayed_work(&platform->hdcp_work);
|
||||
flush_workqueue(platform->hdcp_workqueue);
|
||||
destroy_workqueue(platform->hdcp_workqueue);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -86,6 +86,7 @@ struct drm_aux_bridge_data {
|
|||
};
|
||||
|
||||
static int drm_aux_bridge_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct drm_aux_bridge_data *data;
|
||||
|
|
@ -95,7 +96,7 @@ static int drm_aux_bridge_attach(struct drm_bridge *bridge,
|
|||
|
||||
data = container_of(bridge, struct drm_aux_bridge_data, bridge);
|
||||
|
||||
return drm_bridge_attach(bridge->encoder, data->next_bridge, bridge,
|
||||
return drm_bridge_attach(encoder, data->next_bridge, bridge,
|
||||
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -156,6 +156,7 @@ void drm_aux_hpd_bridge_notify(struct device *dev, enum drm_connector_status sta
|
|||
EXPORT_SYMBOL_GPL(drm_aux_hpd_bridge_notify);
|
||||
|
||||
static int drm_aux_hpd_bridge_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL;
|
||||
|
|
|
|||
|
|
@ -425,6 +425,17 @@
|
|||
#define DSI_NULL_FRAME_OVERHEAD 6
|
||||
#define DSI_EOT_PKT_SIZE 4
|
||||
|
||||
struct cdns_dsi_bridge_state {
|
||||
struct drm_bridge_state base;
|
||||
struct cdns_dsi_cfg dsi_cfg;
|
||||
};
|
||||
|
||||
static inline struct cdns_dsi_bridge_state *
|
||||
to_cdns_dsi_bridge_state(struct drm_bridge_state *bridge_state)
|
||||
{
|
||||
return container_of(bridge_state, struct cdns_dsi_bridge_state, base);
|
||||
}
|
||||
|
||||
static inline struct cdns_dsi *input_to_dsi(struct cdns_dsi_input *input)
|
||||
{
|
||||
return container_of(input, struct cdns_dsi, input);
|
||||
|
|
@ -568,15 +579,18 @@ static int cdns_dsi_check_conf(struct cdns_dsi *dsi,
|
|||
struct phy_configure_opts_mipi_dphy *phy_cfg = &output->phy_opts.mipi_dphy;
|
||||
unsigned long dsi_hss_hsa_hse_hbp;
|
||||
unsigned int nlanes = output->dev->lanes;
|
||||
int mode_clock = (mode_valid_check ? mode->clock : mode->crtc_clock);
|
||||
int ret;
|
||||
|
||||
ret = cdns_dsi_mode2cfg(dsi, mode, dsi_cfg, mode_valid_check);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
phy_mipi_dphy_get_default_config(mode->crtc_clock * 1000,
|
||||
mipi_dsi_pixel_format_to_bpp(output->dev->format),
|
||||
nlanes, phy_cfg);
|
||||
ret = phy_mipi_dphy_get_default_config(mode_clock * 1000,
|
||||
mipi_dsi_pixel_format_to_bpp(output->dev->format),
|
||||
nlanes, phy_cfg);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = cdns_dsi_adjust_phy_config(dsi, dsi_cfg, phy_cfg, mode, mode_valid_check);
|
||||
if (ret)
|
||||
|
|
@ -605,6 +619,7 @@ static int cdns_dsi_check_conf(struct cdns_dsi *dsi,
|
|||
}
|
||||
|
||||
static int cdns_dsi_bridge_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
|
||||
|
|
@ -617,7 +632,7 @@ static int cdns_dsi_bridge_attach(struct drm_bridge *bridge,
|
|||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
return drm_bridge_attach(bridge->encoder, output->bridge, bridge,
|
||||
return drm_bridge_attach(encoder, output->bridge, bridge,
|
||||
flags);
|
||||
}
|
||||
|
||||
|
|
@ -655,7 +670,8 @@ cdns_dsi_bridge_mode_valid(struct drm_bridge *bridge,
|
|||
return MODE_OK;
|
||||
}
|
||||
|
||||
static void cdns_dsi_bridge_disable(struct drm_bridge *bridge)
|
||||
static void cdns_dsi_bridge_atomic_disable(struct drm_bridge *bridge,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
|
||||
struct cdns_dsi *dsi = input_to_dsi(input);
|
||||
|
|
@ -675,11 +691,17 @@ static void cdns_dsi_bridge_disable(struct drm_bridge *bridge)
|
|||
pm_runtime_put(dsi->base.dev);
|
||||
}
|
||||
|
||||
static void cdns_dsi_bridge_post_disable(struct drm_bridge *bridge)
|
||||
static void cdns_dsi_bridge_atomic_post_disable(struct drm_bridge *bridge,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
|
||||
struct cdns_dsi *dsi = input_to_dsi(input);
|
||||
|
||||
dsi->phy_initialized = false;
|
||||
dsi->link_initialized = false;
|
||||
phy_power_off(dsi->dphy);
|
||||
phy_exit(dsi->dphy);
|
||||
|
||||
pm_runtime_put(dsi->base.dev);
|
||||
}
|
||||
|
||||
|
|
@ -752,32 +774,59 @@ static void cdns_dsi_init_link(struct cdns_dsi *dsi)
|
|||
dsi->link_initialized = true;
|
||||
}
|
||||
|
||||
static void cdns_dsi_bridge_enable(struct drm_bridge *bridge)
|
||||
static void cdns_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
|
||||
struct cdns_dsi *dsi = input_to_dsi(input);
|
||||
struct cdns_dsi_output *output = &dsi->output;
|
||||
struct drm_connector_state *conn_state;
|
||||
struct drm_crtc_state *crtc_state;
|
||||
struct cdns_dsi_bridge_state *dsi_state;
|
||||
struct drm_bridge_state *new_bridge_state;
|
||||
struct drm_display_mode *mode;
|
||||
struct phy_configure_opts_mipi_dphy *phy_cfg = &output->phy_opts.mipi_dphy;
|
||||
struct drm_connector *connector;
|
||||
unsigned long tx_byte_period;
|
||||
struct cdns_dsi_cfg dsi_cfg;
|
||||
u32 tmp, reg_wakeup, div;
|
||||
u32 tmp, reg_wakeup, div, status;
|
||||
int nlanes;
|
||||
|
||||
if (WARN_ON(pm_runtime_get_sync(dsi->base.dev) < 0))
|
||||
return;
|
||||
|
||||
new_bridge_state = drm_atomic_get_new_bridge_state(state, bridge);
|
||||
if (WARN_ON(!new_bridge_state))
|
||||
return;
|
||||
|
||||
dsi_state = to_cdns_dsi_bridge_state(new_bridge_state);
|
||||
dsi_cfg = dsi_state->dsi_cfg;
|
||||
|
||||
if (dsi->platform_ops && dsi->platform_ops->enable)
|
||||
dsi->platform_ops->enable(dsi);
|
||||
|
||||
mode = &bridge->encoder->crtc->state->adjusted_mode;
|
||||
connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
|
||||
conn_state = drm_atomic_get_new_connector_state(state, connector);
|
||||
crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
|
||||
mode = &crtc_state->adjusted_mode;
|
||||
nlanes = output->dev->lanes;
|
||||
|
||||
WARN_ON_ONCE(cdns_dsi_check_conf(dsi, mode, &dsi_cfg, false));
|
||||
|
||||
cdns_dsi_hs_init(dsi);
|
||||
cdns_dsi_init_link(dsi);
|
||||
|
||||
/*
|
||||
* Now that the DSI Link and DSI Phy are initialized,
|
||||
* wait for the CLK and Data Lanes to be ready.
|
||||
*/
|
||||
tmp = CLK_LANE_RDY;
|
||||
for (int i = 0; i < nlanes; i++)
|
||||
tmp |= DATA_LANE_RDY(i);
|
||||
|
||||
if (readl_poll_timeout(dsi->regs + MCTL_MAIN_STS, status,
|
||||
(tmp == (status & tmp)), 100, 500000))
|
||||
dev_err(dsi->base.dev,
|
||||
"Timed Out: DSI-DPhy Clock and Data Lanes not ready.\n");
|
||||
|
||||
writel(HBP_LEN(dsi_cfg.hbp) | HSA_LEN(dsi_cfg.hsa),
|
||||
dsi->regs + VID_HSIZE1);
|
||||
writel(HFP_LEN(dsi_cfg.hfp) | HACT_LEN(dsi_cfg.hact),
|
||||
|
|
@ -892,7 +941,8 @@ static void cdns_dsi_bridge_enable(struct drm_bridge *bridge)
|
|||
writel(tmp, dsi->regs + MCTL_MAIN_EN);
|
||||
}
|
||||
|
||||
static void cdns_dsi_bridge_pre_enable(struct drm_bridge *bridge)
|
||||
static void cdns_dsi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
|
||||
struct cdns_dsi *dsi = input_to_dsi(input);
|
||||
|
|
@ -904,13 +954,109 @@ static void cdns_dsi_bridge_pre_enable(struct drm_bridge *bridge)
|
|||
cdns_dsi_hs_init(dsi);
|
||||
}
|
||||
|
||||
static u32 *cdns_dsi_bridge_get_input_bus_fmts(struct drm_bridge *bridge,
|
||||
struct drm_bridge_state *bridge_state,
|
||||
struct drm_crtc_state *crtc_state,
|
||||
struct drm_connector_state *conn_state,
|
||||
u32 output_fmt,
|
||||
unsigned int *num_input_fmts)
|
||||
{
|
||||
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
|
||||
struct cdns_dsi *dsi = input_to_dsi(input);
|
||||
struct cdns_dsi_output *output = &dsi->output;
|
||||
u32 *input_fmts;
|
||||
|
||||
*num_input_fmts = 0;
|
||||
|
||||
input_fmts = kzalloc(sizeof(*input_fmts), GFP_KERNEL);
|
||||
if (!input_fmts)
|
||||
return NULL;
|
||||
|
||||
input_fmts[0] = drm_mipi_dsi_get_input_bus_fmt(output->dev->format);
|
||||
if (!input_fmts[0])
|
||||
return NULL;
|
||||
|
||||
*num_input_fmts = 1;
|
||||
|
||||
return input_fmts;
|
||||
}
|
||||
|
||||
static int cdns_dsi_bridge_atomic_check(struct drm_bridge *bridge,
|
||||
struct drm_bridge_state *bridge_state,
|
||||
struct drm_crtc_state *crtc_state,
|
||||
struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
|
||||
struct cdns_dsi *dsi = input_to_dsi(input);
|
||||
struct cdns_dsi_bridge_state *dsi_state = to_cdns_dsi_bridge_state(bridge_state);
|
||||
const struct drm_display_mode *mode = &crtc_state->mode;
|
||||
struct cdns_dsi_cfg *dsi_cfg = &dsi_state->dsi_cfg;
|
||||
|
||||
return cdns_dsi_check_conf(dsi, mode, dsi_cfg, false);
|
||||
}
|
||||
|
||||
static struct drm_bridge_state *
|
||||
cdns_dsi_bridge_atomic_duplicate_state(struct drm_bridge *bridge)
|
||||
{
|
||||
struct cdns_dsi_bridge_state *dsi_state, *old_dsi_state;
|
||||
struct drm_bridge_state *bridge_state;
|
||||
|
||||
if (WARN_ON(!bridge->base.state))
|
||||
return NULL;
|
||||
|
||||
bridge_state = drm_priv_to_bridge_state(bridge->base.state);
|
||||
old_dsi_state = to_cdns_dsi_bridge_state(bridge_state);
|
||||
|
||||
dsi_state = kzalloc(sizeof(*dsi_state), GFP_KERNEL);
|
||||
if (!dsi_state)
|
||||
return NULL;
|
||||
|
||||
__drm_atomic_helper_bridge_duplicate_state(bridge, &dsi_state->base);
|
||||
|
||||
memcpy(&dsi_state->dsi_cfg, &old_dsi_state->dsi_cfg,
|
||||
sizeof(dsi_state->dsi_cfg));
|
||||
|
||||
return &dsi_state->base;
|
||||
}
|
||||
|
||||
static void
|
||||
cdns_dsi_bridge_atomic_destroy_state(struct drm_bridge *bridge,
|
||||
struct drm_bridge_state *state)
|
||||
{
|
||||
struct cdns_dsi_bridge_state *dsi_state;
|
||||
|
||||
dsi_state = to_cdns_dsi_bridge_state(state);
|
||||
|
||||
kfree(dsi_state);
|
||||
}
|
||||
|
||||
static struct drm_bridge_state *
|
||||
cdns_dsi_bridge_atomic_reset(struct drm_bridge *bridge)
|
||||
{
|
||||
struct cdns_dsi_bridge_state *dsi_state;
|
||||
|
||||
dsi_state = kzalloc(sizeof(*dsi_state), GFP_KERNEL);
|
||||
if (!dsi_state)
|
||||
return NULL;
|
||||
|
||||
memset(dsi_state, 0, sizeof(*dsi_state));
|
||||
dsi_state->base.bridge = bridge;
|
||||
|
||||
return &dsi_state->base;
|
||||
}
|
||||
|
||||
static const struct drm_bridge_funcs cdns_dsi_bridge_funcs = {
|
||||
.attach = cdns_dsi_bridge_attach,
|
||||
.mode_valid = cdns_dsi_bridge_mode_valid,
|
||||
.disable = cdns_dsi_bridge_disable,
|
||||
.pre_enable = cdns_dsi_bridge_pre_enable,
|
||||
.enable = cdns_dsi_bridge_enable,
|
||||
.post_disable = cdns_dsi_bridge_post_disable,
|
||||
.atomic_disable = cdns_dsi_bridge_atomic_disable,
|
||||
.atomic_pre_enable = cdns_dsi_bridge_atomic_pre_enable,
|
||||
.atomic_enable = cdns_dsi_bridge_atomic_enable,
|
||||
.atomic_post_disable = cdns_dsi_bridge_atomic_post_disable,
|
||||
.atomic_check = cdns_dsi_bridge_atomic_check,
|
||||
.atomic_reset = cdns_dsi_bridge_atomic_reset,
|
||||
.atomic_duplicate_state = cdns_dsi_bridge_atomic_duplicate_state,
|
||||
.atomic_destroy_state = cdns_dsi_bridge_atomic_destroy_state,
|
||||
.atomic_get_input_bus_fmts = cdns_dsi_bridge_get_input_bus_fmts,
|
||||
};
|
||||
|
||||
static int cdns_dsi_attach(struct mipi_dsi_host *host,
|
||||
|
|
@ -920,8 +1066,6 @@ static int cdns_dsi_attach(struct mipi_dsi_host *host,
|
|||
struct cdns_dsi_output *output = &dsi->output;
|
||||
struct cdns_dsi_input *input = &dsi->input;
|
||||
struct drm_bridge *bridge;
|
||||
struct drm_panel *panel;
|
||||
struct device_node *np;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
|
|
@ -939,26 +1083,10 @@ static int cdns_dsi_attach(struct mipi_dsi_host *host,
|
|||
/*
|
||||
* The host <-> device link might be described using an OF-graph
|
||||
* representation, in this case we extract the device of_node from
|
||||
* this representation, otherwise we use dsidev->dev.of_node which
|
||||
* should have been filled by the core.
|
||||
* this representation.
|
||||
*/
|
||||
np = of_graph_get_remote_node(dsi->base.dev->of_node, DSI_OUTPUT_PORT,
|
||||
dev->channel);
|
||||
if (!np)
|
||||
np = of_node_get(dev->dev.of_node);
|
||||
|
||||
panel = of_drm_find_panel(np);
|
||||
if (!IS_ERR(panel)) {
|
||||
bridge = drm_panel_bridge_add_typed(panel,
|
||||
DRM_MODE_CONNECTOR_DSI);
|
||||
} else {
|
||||
bridge = of_drm_find_bridge(dev->dev.of_node);
|
||||
if (!bridge)
|
||||
bridge = ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
of_node_put(np);
|
||||
|
||||
bridge = devm_drm_of_get_bridge(dsi->base.dev, dsi->base.dev->of_node,
|
||||
DSI_OUTPUT_PORT, dev->channel);
|
||||
if (IS_ERR(bridge)) {
|
||||
ret = PTR_ERR(bridge);
|
||||
dev_err(host->dev, "failed to add DSI device %s (err = %d)",
|
||||
|
|
@ -968,7 +1096,6 @@ static int cdns_dsi_attach(struct mipi_dsi_host *host,
|
|||
|
||||
output->dev = dev;
|
||||
output->bridge = bridge;
|
||||
output->panel = panel;
|
||||
|
||||
/*
|
||||
* The DSI output has been properly configured, we can now safely
|
||||
|
|
@ -984,12 +1111,9 @@ static int cdns_dsi_detach(struct mipi_dsi_host *host,
|
|||
struct mipi_dsi_device *dev)
|
||||
{
|
||||
struct cdns_dsi *dsi = to_cdns_dsi(host);
|
||||
struct cdns_dsi_output *output = &dsi->output;
|
||||
struct cdns_dsi_input *input = &dsi->input;
|
||||
|
||||
drm_bridge_remove(&input->bridge);
|
||||
if (output->panel)
|
||||
drm_panel_bridge_remove(output->bridge);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -1152,7 +1276,6 @@ static int __maybe_unused cdns_dsi_suspend(struct device *dev)
|
|||
clk_disable_unprepare(dsi->dsi_sys_clk);
|
||||
clk_disable_unprepare(dsi->dsi_p_clk);
|
||||
reset_control_assert(dsi->dsi_p_rst);
|
||||
dsi->link_initialized = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -10,7 +10,6 @@
|
|||
|
||||
#include <drm/drm_bridge.h>
|
||||
#include <drm/drm_mipi_dsi.h>
|
||||
#include <drm/drm_panel.h>
|
||||
|
||||
#include <linux/bits.h>
|
||||
#include <linux/completion.h>
|
||||
|
|
@ -21,7 +20,6 @@ struct reset_control;
|
|||
|
||||
struct cdns_dsi_output {
|
||||
struct mipi_dsi_device *dev;
|
||||
struct drm_panel *panel;
|
||||
struct drm_bridge *bridge;
|
||||
union phy_configure_opts phy_opts;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -545,76 +545,6 @@ int cdns_mhdp_adjust_lt(struct cdns_mhdp_device *mhdp, unsigned int nlanes,
|
|||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* cdns_mhdp_link_power_up() - power up a DisplayPort link
|
||||
* @aux: DisplayPort AUX channel
|
||||
* @link: pointer to a structure containing the link configuration
|
||||
*
|
||||
* Returns 0 on success or a negative error code on failure.
|
||||
*/
|
||||
static
|
||||
int cdns_mhdp_link_power_up(struct drm_dp_aux *aux, struct cdns_mhdp_link *link)
|
||||
{
|
||||
u8 value;
|
||||
int err;
|
||||
|
||||
/* DP_SET_POWER register is only available on DPCD v1.1 and later */
|
||||
if (link->revision < 0x11)
|
||||
return 0;
|
||||
|
||||
err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
value &= ~DP_SET_POWER_MASK;
|
||||
value |= DP_SET_POWER_D0;
|
||||
|
||||
err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
/*
|
||||
* According to the DP 1.1 specification, a "Sink Device must exit the
|
||||
* power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
|
||||
* Control Field" (register 0x600).
|
||||
*/
|
||||
usleep_range(1000, 2000);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* cdns_mhdp_link_power_down() - power down a DisplayPort link
|
||||
* @aux: DisplayPort AUX channel
|
||||
* @link: pointer to a structure containing the link configuration
|
||||
*
|
||||
* Returns 0 on success or a negative error code on failure.
|
||||
*/
|
||||
static
|
||||
int cdns_mhdp_link_power_down(struct drm_dp_aux *aux,
|
||||
struct cdns_mhdp_link *link)
|
||||
{
|
||||
u8 value;
|
||||
int err;
|
||||
|
||||
/* DP_SET_POWER register is only available on DPCD v1.1 and later */
|
||||
if (link->revision < 0x11)
|
||||
return 0;
|
||||
|
||||
err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
value &= ~DP_SET_POWER_MASK;
|
||||
value |= DP_SET_POWER_D3;
|
||||
|
||||
err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* cdns_mhdp_link_configure() - configure a DisplayPort link
|
||||
* @aux: DisplayPort AUX channel
|
||||
|
|
@ -1453,7 +1383,7 @@ static int cdns_mhdp_link_up(struct cdns_mhdp_device *mhdp)
|
|||
mhdp->link.capabilities |= DP_LINK_CAP_ENHANCED_FRAMING;
|
||||
|
||||
dev_dbg(mhdp->dev, "Set sink device power state via DPCD\n");
|
||||
cdns_mhdp_link_power_up(&mhdp->aux, &mhdp->link);
|
||||
drm_dp_link_power_up(&mhdp->aux, mhdp->link.revision);
|
||||
|
||||
cdns_mhdp_fill_sink_caps(mhdp, dpcd);
|
||||
|
||||
|
|
@ -1500,7 +1430,7 @@ static void cdns_mhdp_link_down(struct cdns_mhdp_device *mhdp)
|
|||
WARN_ON(!mutex_is_locked(&mhdp->link_mutex));
|
||||
|
||||
if (mhdp->plugged)
|
||||
cdns_mhdp_link_power_down(&mhdp->aux, &mhdp->link);
|
||||
drm_dp_link_power_down(&mhdp->aux, mhdp->link.revision);
|
||||
|
||||
mhdp->link_up = false;
|
||||
}
|
||||
|
|
@ -1726,6 +1656,7 @@ static int cdns_mhdp_connector_init(struct cdns_mhdp_device *mhdp)
|
|||
}
|
||||
|
||||
static int cdns_mhdp_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
|
||||
|
|
@ -2305,7 +2236,7 @@ static int cdns_mhdp_update_link_status(struct cdns_mhdp_device *mhdp)
|
|||
* If everything looks fine, just return, as we don't handle
|
||||
* DP IRQs.
|
||||
*/
|
||||
if (ret > 0 &&
|
||||
if (!ret &&
|
||||
drm_dp_channel_eq_ok(status, mhdp->link.num_lanes) &&
|
||||
drm_dp_clock_recovery_ok(status, mhdp->link.num_lanes))
|
||||
goto out;
|
||||
|
|
|
|||
|
|
@ -580,11 +580,13 @@ static int chipone_dsi_host_attach(struct chipone *icn)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int chipone_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags)
|
||||
static int chipone_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct chipone *icn = bridge_to_chipone(bridge);
|
||||
|
||||
return drm_bridge_attach(bridge->encoder, icn->panel_bridge, bridge, flags);
|
||||
return drm_bridge_attach(encoder, icn->panel_bridge, bridge, flags);
|
||||
}
|
||||
|
||||
#define MAX_INPUT_SEL_FORMATS 1
|
||||
|
|
|
|||
|
|
@ -268,13 +268,14 @@ static void ch7033_hpd_event(void *arg, enum drm_connector_status status)
|
|||
}
|
||||
|
||||
static int ch7033_bridge_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct ch7033_priv *priv = bridge_to_ch7033_priv(bridge);
|
||||
struct drm_connector *connector = &priv->connector;
|
||||
int ret;
|
||||
|
||||
ret = drm_bridge_attach(bridge->encoder, priv->next_bridge, bridge,
|
||||
ret = drm_bridge_attach(encoder, priv->next_bridge, bridge,
|
||||
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
|
@ -305,7 +306,7 @@ static int ch7033_bridge_attach(struct drm_bridge *bridge,
|
|||
return ret;
|
||||
}
|
||||
|
||||
return drm_connector_attach_encoder(&priv->connector, bridge->encoder);
|
||||
return drm_connector_attach_encoder(&priv->connector, encoder);
|
||||
}
|
||||
|
||||
static void ch7033_bridge_detach(struct drm_bridge *bridge)
|
||||
|
|
|
|||
|
|
@ -34,6 +34,7 @@ to_display_connector(struct drm_bridge *bridge)
|
|||
}
|
||||
|
||||
static int display_connector_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL;
|
||||
|
|
|
|||
|
|
@ -113,11 +113,12 @@ static unsigned long fsl_ldb_link_frequency(struct fsl_ldb *fsl_ldb, int clock)
|
|||
}
|
||||
|
||||
static int fsl_ldb_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct fsl_ldb *fsl_ldb = to_fsl_ldb(bridge);
|
||||
|
||||
return drm_bridge_attach(bridge->encoder, fsl_ldb->panel_bridge,
|
||||
return drm_bridge_attach(encoder, fsl_ldb->panel_bridge,
|
||||
bridge, flags);
|
||||
}
|
||||
|
||||
|
|
@ -180,9 +181,9 @@ static void fsl_ldb_atomic_enable(struct drm_bridge *bridge,
|
|||
|
||||
configured_link_freq = clk_get_rate(fsl_ldb->clk);
|
||||
if (configured_link_freq != requested_link_freq)
|
||||
dev_warn(fsl_ldb->dev, "Configured LDB clock (%lu Hz) does not match requested LVDS clock: %lu Hz\n",
|
||||
configured_link_freq,
|
||||
requested_link_freq);
|
||||
dev_warn(fsl_ldb->dev,
|
||||
"Configured %pC clock (%lu Hz) does not match requested LVDS clock: %lu Hz\n",
|
||||
fsl_ldb->clk, configured_link_freq, requested_link_freq);
|
||||
|
||||
clk_prepare_enable(fsl_ldb->clk);
|
||||
|
||||
|
|
|
|||
|
|
@ -104,7 +104,7 @@ void ldb_bridge_disable_helper(struct drm_bridge *bridge)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(ldb_bridge_disable_helper);
|
||||
|
||||
int ldb_bridge_attach_helper(struct drm_bridge *bridge,
|
||||
int ldb_bridge_attach_helper(struct drm_bridge *bridge, struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct ldb_channel *ldb_ch = bridge->driver_private;
|
||||
|
|
@ -116,9 +116,8 @@ int ldb_bridge_attach_helper(struct drm_bridge *bridge,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
return drm_bridge_attach(bridge->encoder,
|
||||
ldb_ch->next_bridge, bridge,
|
||||
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
|
||||
return drm_bridge_attach(encoder, ldb_ch->next_bridge, bridge,
|
||||
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ldb_bridge_attach_helper);
|
||||
|
||||
|
|
|
|||
|
|
@ -81,7 +81,7 @@ void ldb_bridge_enable_helper(struct drm_bridge *bridge);
|
|||
|
||||
void ldb_bridge_disable_helper(struct drm_bridge *bridge);
|
||||
|
||||
int ldb_bridge_attach_helper(struct drm_bridge *bridge,
|
||||
int ldb_bridge_attach_helper(struct drm_bridge *bridge, struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags);
|
||||
|
||||
int ldb_init_helper(struct ldb *ldb);
|
||||
|
|
|
|||
|
|
@ -23,7 +23,8 @@ struct imx_legacy_bridge {
|
|||
#define to_imx_legacy_bridge(bridge) container_of(bridge, struct imx_legacy_bridge, base)
|
||||
|
||||
static int imx_legacy_bridge_attach(struct drm_bridge *bridge,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
|
||||
return -EINVAL;
|
||||
|
|
@ -76,9 +77,9 @@ struct drm_bridge *devm_imx_drm_legacy_bridge(struct device *dev,
|
|||
imx_bridge->base.ops = DRM_BRIDGE_OP_MODES;
|
||||
imx_bridge->base.type = type;
|
||||
|
||||
ret = devm_drm_bridge_add(dev, &imx_bridge->base);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
ret = devm_drm_bridge_add(dev, &imx_bridge->base);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
return &imx_bridge->base;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -40,11 +40,12 @@ to_imx8mp_hdmi_pvi(struct drm_bridge *bridge)
|
|||
}
|
||||
|
||||
static int imx8mp_hdmi_pvi_bridge_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct imx8mp_hdmi_pvi *pvi = to_imx8mp_hdmi_pvi(bridge);
|
||||
|
||||
return drm_bridge_attach(bridge->encoder, pvi->next_bridge,
|
||||
return drm_bridge_attach(encoder, pvi->next_bridge,
|
||||
bridge, flags);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -662,7 +662,7 @@ static int imx8qxp_ldb_probe(struct platform_device *pdev)
|
|||
|
||||
ldb_add_bridge_helper(ldb, &imx8qxp_ldb_bridge_funcs);
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void imx8qxp_ldb_remove(struct platform_device *pdev)
|
||||
|
|
|
|||
|
|
@ -108,6 +108,7 @@ imx8qxp_pc_bridge_mode_valid(struct drm_bridge *bridge,
|
|||
}
|
||||
|
||||
static int imx8qxp_pc_bridge_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct imx8qxp_pc_channel *ch = bridge->driver_private;
|
||||
|
|
@ -119,7 +120,7 @@ static int imx8qxp_pc_bridge_attach(struct drm_bridge *bridge,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
return drm_bridge_attach(bridge->encoder,
|
||||
return drm_bridge_attach(encoder,
|
||||
ch->next_bridge, bridge,
|
||||
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -128,6 +128,7 @@ static void imx8qxp_pixel_link_set_mst_addr(struct imx8qxp_pixel_link *pl)
|
|||
}
|
||||
|
||||
static int imx8qxp_pixel_link_bridge_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct imx8qxp_pixel_link *pl = bridge->driver_private;
|
||||
|
|
@ -138,7 +139,7 @@ static int imx8qxp_pixel_link_bridge_attach(struct drm_bridge *bridge,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
return drm_bridge_attach(bridge->encoder,
|
||||
return drm_bridge_attach(encoder,
|
||||
pl->next_bridge, bridge,
|
||||
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -48,6 +48,7 @@ struct imx8qxp_pxl2dpi {
|
|||
#define bridge_to_p2d(b) container_of(b, struct imx8qxp_pxl2dpi, bridge)
|
||||
|
||||
static int imx8qxp_pxl2dpi_bridge_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct imx8qxp_pxl2dpi *p2d = bridge->driver_private;
|
||||
|
|
@ -58,7 +59,7 @@ static int imx8qxp_pxl2dpi_bridge_attach(struct drm_bridge *bridge,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
return drm_bridge_attach(bridge->encoder,
|
||||
return drm_bridge_attach(encoder,
|
||||
p2d->next_bridge, bridge,
|
||||
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -665,13 +665,14 @@ it6263_bridge_mode_valid(struct drm_bridge *bridge,
|
|||
}
|
||||
|
||||
static int it6263_bridge_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct it6263 *it = bridge_to_it6263(bridge);
|
||||
struct drm_connector *connector;
|
||||
int ret;
|
||||
|
||||
ret = drm_bridge_attach(bridge->encoder, it->next_bridge, bridge,
|
||||
ret = drm_bridge_attach(encoder, it->next_bridge, bridge,
|
||||
flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
|
@ -679,7 +680,7 @@ static int it6263_bridge_attach(struct drm_bridge *bridge,
|
|||
if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
|
||||
return 0;
|
||||
|
||||
connector = drm_bridge_connector_init(bridge->dev, bridge->encoder);
|
||||
connector = drm_bridge_connector_init(bridge->dev, encoder);
|
||||
if (IS_ERR(connector)) {
|
||||
ret = PTR_ERR(connector);
|
||||
dev_err(it->dev, "failed to initialize bridge connector: %d\n",
|
||||
|
|
@ -687,7 +688,7 @@ static int it6263_bridge_attach(struct drm_bridge *bridge,
|
|||
return ret;
|
||||
}
|
||||
|
||||
drm_connector_attach_encoder(connector, bridge->encoder);
|
||||
drm_connector_attach_encoder(connector, encoder);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -771,40 +771,6 @@ static void it6505_calc_video_info(struct it6505 *it6505)
|
|||
DRM_MODE_ARG(&it6505->video_info));
|
||||
}
|
||||
|
||||
static int it6505_drm_dp_link_set_power(struct drm_dp_aux *aux,
|
||||
struct it6505_drm_dp_link *link,
|
||||
u8 mode)
|
||||
{
|
||||
u8 value;
|
||||
int err;
|
||||
|
||||
/* DP_SET_POWER register is only available on DPCD v1.1 and later */
|
||||
if (link->revision < DPCD_V_1_1)
|
||||
return 0;
|
||||
|
||||
err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
value &= ~DP_SET_POWER_MASK;
|
||||
value |= mode;
|
||||
|
||||
err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (mode == DP_SET_POWER_D0) {
|
||||
/*
|
||||
* According to the DP 1.1 specification, a "Sink Device must
|
||||
* exit the power saving state within 1 ms" (Section 2.5.3.1,
|
||||
* Table 5-52, "Sink Control Field" (register 0x600).
|
||||
*/
|
||||
usleep_range(1000, 2000);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void it6505_clear_int(struct it6505 *it6505)
|
||||
{
|
||||
it6505_write(it6505, INT_STATUS_01, 0xFF);
|
||||
|
|
@ -2578,8 +2544,7 @@ static void it6505_irq_hpd(struct it6505 *it6505)
|
|||
}
|
||||
it6505->auto_train_retry = AUTO_TRAIN_RETRY;
|
||||
|
||||
it6505_drm_dp_link_set_power(&it6505->aux, &it6505->link,
|
||||
DP_SET_POWER_D0);
|
||||
drm_dp_link_power_up(&it6505->aux, it6505->link.revision);
|
||||
dp_sink_count = it6505_dpcd_read(it6505, DP_SINK_COUNT);
|
||||
it6505->sink_count = DP_GET_SINK_COUNT(dp_sink_count);
|
||||
|
||||
|
|
@ -2910,8 +2875,7 @@ static enum drm_connector_status it6505_detect(struct it6505 *it6505)
|
|||
}
|
||||
|
||||
if (it6505->hpd_state) {
|
||||
it6505_drm_dp_link_set_power(&it6505->aux, &it6505->link,
|
||||
DP_SET_POWER_D0);
|
||||
drm_dp_link_power_up(&it6505->aux, it6505->link.revision);
|
||||
dp_sink_count = it6505_dpcd_read(it6505, DP_SINK_COUNT);
|
||||
it6505->sink_count = DP_GET_SINK_COUNT(dp_sink_count);
|
||||
DRM_DEV_DEBUG_DRIVER(dev, "it6505->sink_count:%d branch:%d",
|
||||
|
|
@ -3124,6 +3088,7 @@ static inline struct it6505 *bridge_to_it6505(struct drm_bridge *bridge)
|
|||
}
|
||||
|
||||
static int it6505_bridge_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct it6505 *it6505 = bridge_to_it6505(bridge);
|
||||
|
|
@ -3233,8 +3198,7 @@ static void it6505_bridge_atomic_enable(struct drm_bridge *bridge,
|
|||
it6505_int_mask_enable(it6505);
|
||||
it6505_video_reset(it6505);
|
||||
|
||||
it6505_drm_dp_link_set_power(&it6505->aux, &it6505->link,
|
||||
DP_SET_POWER_D0);
|
||||
drm_dp_link_power_up(&it6505->aux, it6505->link.revision);
|
||||
}
|
||||
|
||||
static void it6505_bridge_atomic_disable(struct drm_bridge *bridge,
|
||||
|
|
@ -3246,8 +3210,7 @@ static void it6505_bridge_atomic_disable(struct drm_bridge *bridge,
|
|||
DRM_DEV_DEBUG_DRIVER(dev, "start");
|
||||
|
||||
if (it6505->powered) {
|
||||
it6505_drm_dp_link_set_power(&it6505->aux, &it6505->link,
|
||||
DP_SET_POWER_D3);
|
||||
drm_dp_link_power_down(&it6505->aux, it6505->link.revision);
|
||||
it6505_video_disable(it6505);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -586,6 +586,7 @@ static bool it66121_is_hpd_detect(struct it66121_ctx *ctx)
|
|||
}
|
||||
|
||||
static int it66121_bridge_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge);
|
||||
|
|
@ -594,7 +595,7 @@ static int it66121_bridge_attach(struct drm_bridge *bridge,
|
|||
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
|
||||
return -EINVAL;
|
||||
|
||||
ret = drm_bridge_attach(bridge->encoder, ctx->next_bridge, bridge, flags);
|
||||
ret = drm_bridge_attach(encoder, ctx->next_bridge, bridge, flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
|
|||
|
|
@ -543,12 +543,13 @@ static int lt8912_bridge_connector_init(struct drm_bridge *bridge)
|
|||
}
|
||||
|
||||
static int lt8912_bridge_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct lt8912 *lt = bridge_to_lt8912(bridge);
|
||||
int ret;
|
||||
|
||||
ret = drm_bridge_attach(bridge->encoder, lt->hdmi_port, bridge,
|
||||
ret = drm_bridge_attach(encoder, lt->hdmi_port, bridge,
|
||||
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
|
||||
if (ret < 0) {
|
||||
dev_err(lt->dev, "Failed to attach next bridge (%d)\n", ret);
|
||||
|
|
|
|||
|
|
@ -99,11 +99,12 @@ static struct lt9211 *bridge_to_lt9211(struct drm_bridge *bridge)
|
|||
}
|
||||
|
||||
static int lt9211_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct lt9211 *ctx = bridge_to_lt9211(bridge);
|
||||
|
||||
return drm_bridge_attach(bridge->encoder, ctx->panel_bridge,
|
||||
return drm_bridge_attach(encoder, ctx->panel_bridge,
|
||||
&ctx->bridge, flags);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -740,11 +740,12 @@ static struct mipi_dsi_device *lt9611_attach_dsi(struct lt9611 *lt9611,
|
|||
}
|
||||
|
||||
static int lt9611_bridge_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
|
||||
|
||||
return drm_bridge_attach(bridge->encoder, lt9611->next_bridge,
|
||||
return drm_bridge_attach(encoder, lt9611->next_bridge,
|
||||
bridge, flags);
|
||||
}
|
||||
|
||||
|
|
@ -1130,7 +1131,7 @@ static int lt9611_probe(struct i2c_client *client)
|
|||
lt9611->bridge.of_node = client->dev.of_node;
|
||||
lt9611->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID |
|
||||
DRM_BRIDGE_OP_HPD | DRM_BRIDGE_OP_MODES |
|
||||
DRM_BRIDGE_OP_HDMI;
|
||||
DRM_BRIDGE_OP_HDMI | DRM_BRIDGE_OP_HDMI_AUDIO;
|
||||
lt9611->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
|
||||
lt9611->bridge.vendor = "Lontium";
|
||||
lt9611->bridge.product = "LT9611";
|
||||
|
|
|
|||
|
|
@ -280,11 +280,12 @@ static struct mipi_dsi_device *lt9611uxc_attach_dsi(struct lt9611uxc *lt9611uxc,
|
|||
}
|
||||
|
||||
static int lt9611uxc_bridge_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge);
|
||||
|
||||
return drm_bridge_attach(bridge->encoder, lt9611uxc->next_bridge,
|
||||
return drm_bridge_attach(encoder, lt9611uxc->next_bridge,
|
||||
bridge, flags);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -34,11 +34,12 @@ static inline struct lvds_codec *to_lvds_codec(struct drm_bridge *bridge)
|
|||
}
|
||||
|
||||
static int lvds_codec_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct lvds_codec *lvds_codec = to_lvds_codec(bridge);
|
||||
|
||||
return drm_bridge_attach(bridge->encoder, lvds_codec->panel_bridge,
|
||||
return drm_bridge_attach(encoder, lvds_codec->panel_bridge,
|
||||
bridge, flags);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -190,6 +190,7 @@ static irqreturn_t ge_b850v3_lvds_irq_handler(int irq, void *dev_id)
|
|||
}
|
||||
|
||||
static int ge_b850v3_lvds_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct i2c_client *stdp4028_i2c
|
||||
|
|
|
|||
|
|
@ -104,11 +104,12 @@ static void lvds_serialiser_on(struct mchp_lvds *lvds)
|
|||
}
|
||||
|
||||
static int mchp_lvds_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct mchp_lvds *lvds = bridge_to_lvds(bridge);
|
||||
|
||||
return drm_bridge_attach(bridge->encoder, lvds->panel_bridge,
|
||||
return drm_bridge_attach(encoder, lvds->panel_bridge,
|
||||
bridge, flags);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -910,6 +910,7 @@ static void nwl_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
|
|||
}
|
||||
|
||||
static int nwl_dsi_bridge_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct nwl_dsi *dsi = bridge_to_dsi(bridge);
|
||||
|
|
@ -919,7 +920,7 @@ static int nwl_dsi_bridge_attach(struct drm_bridge *bridge,
|
|||
if (IS_ERR(panel_bridge))
|
||||
return PTR_ERR(panel_bridge);
|
||||
|
||||
return drm_bridge_attach(bridge->encoder, panel_bridge, bridge, flags);
|
||||
return drm_bridge_attach(encoder, panel_bridge, bridge, flags);
|
||||
}
|
||||
|
||||
static u32 *nwl_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
|
||||
|
|
|
|||
|
|
@ -214,13 +214,14 @@ static const struct drm_connector_funcs ptn3460_connector_funcs = {
|
|||
};
|
||||
|
||||
static int ptn3460_bridge_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge);
|
||||
int ret;
|
||||
|
||||
/* Let this driver create connector if requested */
|
||||
ret = drm_bridge_attach(bridge->encoder, ptn_bridge->panel_bridge,
|
||||
ret = drm_bridge_attach(encoder, ptn_bridge->panel_bridge,
|
||||
bridge, flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
|
@ -239,7 +240,7 @@ static int ptn3460_bridge_attach(struct drm_bridge *bridge,
|
|||
&ptn3460_connector_helper_funcs);
|
||||
drm_connector_register(&ptn_bridge->connector);
|
||||
drm_connector_attach_encoder(&ptn_bridge->connector,
|
||||
bridge->encoder);
|
||||
encoder);
|
||||
|
||||
drm_helper_hpd_irq_event(ptn_bridge->connector.dev);
|
||||
|
||||
|
|
|
|||
|
|
@ -58,6 +58,7 @@ static const struct drm_connector_funcs panel_bridge_connector_funcs = {
|
|||
};
|
||||
|
||||
static int panel_bridge_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
|
||||
|
|
@ -81,7 +82,7 @@ static int panel_bridge_attach(struct drm_bridge *bridge,
|
|||
drm_panel_bridge_set_orientation(connector, bridge);
|
||||
|
||||
drm_connector_attach_encoder(&panel_bridge->connector,
|
||||
bridge->encoder);
|
||||
encoder);
|
||||
|
||||
if (bridge->dev->registered) {
|
||||
if (connector->funcs->reset)
|
||||
|
|
|
|||
|
|
@ -418,6 +418,7 @@ static void ps8622_post_disable(struct drm_bridge *bridge)
|
|||
}
|
||||
|
||||
static int ps8622_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge);
|
||||
|
|
|
|||
|
|
@ -494,6 +494,7 @@ static void ps8640_atomic_post_disable(struct drm_bridge *bridge,
|
|||
}
|
||||
|
||||
static int ps8640_bridge_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
|
||||
|
|
@ -518,7 +519,7 @@ static int ps8640_bridge_attach(struct drm_bridge *bridge,
|
|||
}
|
||||
|
||||
/* Attach the panel-bridge to the dsi bridge */
|
||||
ret = drm_bridge_attach(bridge->encoder, ps_bridge->panel_bridge,
|
||||
ret = drm_bridge_attach(encoder, ps_bridge->panel_bridge,
|
||||
&ps_bridge->bridge, flags);
|
||||
if (ret)
|
||||
goto err_bridge_attach;
|
||||
|
|
|
|||
|
|
@ -1640,11 +1640,12 @@ static void samsung_dsim_mode_set(struct drm_bridge *bridge,
|
|||
}
|
||||
|
||||
static int samsung_dsim_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct samsung_dsim *dsi = bridge_to_dsi(bridge);
|
||||
|
||||
return drm_bridge_attach(bridge->encoder, dsi->out_bridge, bridge,
|
||||
return drm_bridge_attach(encoder, dsi->out_bridge, bridge,
|
||||
flags);
|
||||
}
|
||||
|
||||
|
|
@ -1935,9 +1936,9 @@ int samsung_dsim_probe(struct platform_device *pdev)
|
|||
struct samsung_dsim *dsi;
|
||||
int ret, i;
|
||||
|
||||
dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
|
||||
if (!dsi)
|
||||
return -ENOMEM;
|
||||
dsi = devm_drm_bridge_alloc(dev, struct samsung_dsim, bridge, &samsung_dsim_bridge_funcs);
|
||||
if (IS_ERR(dsi))
|
||||
return PTR_ERR(dsi);
|
||||
|
||||
init_completion(&dsi->completed);
|
||||
spin_lock_init(&dsi->transfer_lock);
|
||||
|
|
@ -2007,7 +2008,6 @@ int samsung_dsim_probe(struct platform_device *pdev)
|
|||
|
||||
pm_runtime_enable(dev);
|
||||
|
||||
dsi->bridge.funcs = &samsung_dsim_bridge_funcs;
|
||||
dsi->bridge.of_node = dev->of_node;
|
||||
dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
|
||||
|
||||
|
|
|
|||
|
|
@ -416,6 +416,7 @@ static void sii902x_bridge_mode_set(struct drm_bridge *bridge,
|
|||
}
|
||||
|
||||
static int sii902x_bridge_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct sii902x *sii902x = bridge_to_sii902x(bridge);
|
||||
|
|
@ -424,7 +425,7 @@ static int sii902x_bridge_attach(struct drm_bridge *bridge,
|
|||
int ret;
|
||||
|
||||
if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
|
||||
return drm_bridge_attach(bridge->encoder, sii902x->next_bridge,
|
||||
return drm_bridge_attach(encoder, sii902x->next_bridge,
|
||||
bridge, flags);
|
||||
|
||||
drm_connector_helper_add(&sii902x->connector,
|
||||
|
|
@ -452,7 +453,7 @@ static int sii902x_bridge_attach(struct drm_bridge *bridge,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
drm_connector_attach_encoder(&sii902x->connector, bridge->encoder);
|
||||
drm_connector_attach_encoder(&sii902x->connector, encoder);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -1138,6 +1139,7 @@ static int sii902x_init(struct sii902x *sii902x)
|
|||
sii902x->bridge.of_node = dev->of_node;
|
||||
sii902x->bridge.timings = &default_sii902x_timings;
|
||||
sii902x->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID;
|
||||
sii902x->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
|
||||
|
||||
if (sii902x->i2c->irq > 0)
|
||||
sii902x->bridge.ops |= DRM_BRIDGE_OP_HPD;
|
||||
|
|
|
|||
|
|
@ -2203,6 +2203,7 @@ static inline struct sii8620 *bridge_to_sii8620(struct drm_bridge *bridge)
|
|||
}
|
||||
|
||||
static int sii8620_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct sii8620 *ctx = bridge_to_sii8620(bridge);
|
||||
|
|
|
|||
|
|
@ -103,12 +103,13 @@ static const struct drm_connector_funcs simple_bridge_con_funcs = {
|
|||
};
|
||||
|
||||
static int simple_bridge_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct simple_bridge *sbridge = drm_bridge_to_simple_bridge(bridge);
|
||||
int ret;
|
||||
|
||||
ret = drm_bridge_attach(bridge->encoder, sbridge->next_bridge, bridge,
|
||||
ret = drm_bridge_attach(encoder, sbridge->next_bridge, bridge,
|
||||
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
|
@ -127,7 +128,7 @@ static int simple_bridge_attach(struct drm_bridge *bridge,
|
|||
return ret;
|
||||
}
|
||||
|
||||
drm_connector_attach_encoder(&sbridge->connector, bridge->encoder);
|
||||
drm_connector_attach_encoder(&sbridge->connector, encoder);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1077,6 +1077,7 @@ struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev,
|
|||
hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT |
|
||||
DRM_BRIDGE_OP_EDID |
|
||||
DRM_BRIDGE_OP_HDMI |
|
||||
DRM_BRIDGE_OP_HDMI_AUDIO |
|
||||
DRM_BRIDGE_OP_HPD;
|
||||
hdmi->bridge.of_node = pdev->dev.of_node;
|
||||
hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
|
||||
|
|
|
|||
|
|
@ -2889,12 +2889,13 @@ static int dw_hdmi_bridge_atomic_check(struct drm_bridge *bridge,
|
|||
}
|
||||
|
||||
static int dw_hdmi_bridge_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct dw_hdmi *hdmi = bridge->driver_private;
|
||||
|
||||
if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
|
||||
return drm_bridge_attach(bridge->encoder, hdmi->next_bridge,
|
||||
return drm_bridge_attach(encoder, hdmi->next_bridge,
|
||||
bridge, flags);
|
||||
|
||||
return dw_hdmi_connector_create(hdmi);
|
||||
|
|
|
|||
|
|
@ -1072,15 +1072,16 @@ dw_mipi_dsi_bridge_mode_valid(struct drm_bridge *bridge,
|
|||
}
|
||||
|
||||
static int dw_mipi_dsi_bridge_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
|
||||
|
||||
/* Set the encoder type as caller does not know it */
|
||||
bridge->encoder->encoder_type = DRM_MODE_ENCODER_DSI;
|
||||
encoder->encoder_type = DRM_MODE_ENCODER_DSI;
|
||||
|
||||
/* Attach the panel-bridge to the dsi bridge */
|
||||
return drm_bridge_attach(bridge->encoder, dsi->panel_bridge, bridge,
|
||||
return drm_bridge_attach(encoder, dsi->panel_bridge, bridge,
|
||||
flags);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -870,15 +870,16 @@ dw_mipi_dsi2_bridge_mode_valid(struct drm_bridge *bridge,
|
|||
}
|
||||
|
||||
static int dw_mipi_dsi2_bridge_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct dw_mipi_dsi2 *dsi2 = bridge_to_dsi2(bridge);
|
||||
|
||||
/* Set the encoder type as caller does not know it */
|
||||
bridge->encoder->encoder_type = DRM_MODE_ENCODER_DSI;
|
||||
encoder->encoder_type = DRM_MODE_ENCODER_DSI;
|
||||
|
||||
/* Attach the panel-bridge to the dsi bridge */
|
||||
return drm_bridge_attach(bridge->encoder, dsi2->panel_bridge, bridge,
|
||||
return drm_bridge_attach(encoder, dsi2->panel_bridge, bridge,
|
||||
flags);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -202,11 +202,12 @@ static void tc358762_enable(struct drm_bridge *bridge,
|
|||
}
|
||||
|
||||
static int tc358762_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct tc358762 *ctx = bridge_to_tc358762(bridge);
|
||||
|
||||
return drm_bridge_attach(bridge->encoder, ctx->panel_bridge,
|
||||
return drm_bridge_attach(encoder, ctx->panel_bridge,
|
||||
bridge, flags);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -295,11 +295,12 @@ static void tc358764_pre_enable(struct drm_bridge *bridge)
|
|||
}
|
||||
|
||||
static int tc358764_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct tc358764 *ctx = bridge_to_tc358764(bridge);
|
||||
|
||||
return drm_bridge_attach(bridge->encoder, ctx->next_bridge, bridge, flags);
|
||||
return drm_bridge_attach(encoder, ctx->next_bridge, bridge, flags);
|
||||
}
|
||||
|
||||
static const struct drm_bridge_funcs tc358764_bridge_funcs = {
|
||||
|
|
|
|||
|
|
@ -1795,6 +1795,7 @@ static const struct drm_connector_funcs tc_connector_funcs = {
|
|||
};
|
||||
|
||||
static int tc_dpi_bridge_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct tc_data *tc = bridge_to_tc(bridge);
|
||||
|
|
@ -1807,6 +1808,7 @@ static int tc_dpi_bridge_attach(struct drm_bridge *bridge,
|
|||
}
|
||||
|
||||
static int tc_edp_bridge_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
|
||||
|
|
|
|||
|
|
@ -554,6 +554,7 @@ static const struct mipi_dsi_host_ops tc358768_dsi_host_ops = {
|
|||
};
|
||||
|
||||
static int tc358768_bridge_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct tc358768_priv *priv = bridge_to_tc358768(bridge);
|
||||
|
|
@ -563,7 +564,7 @@ static int tc358768_bridge_attach(struct drm_bridge *bridge,
|
|||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
return drm_bridge_attach(bridge->encoder, priv->output.bridge, bridge,
|
||||
return drm_bridge_attach(encoder, priv->output.bridge, bridge,
|
||||
flags);
|
||||
}
|
||||
|
||||
|
|
@ -580,7 +581,8 @@ tc358768_bridge_mode_valid(struct drm_bridge *bridge,
|
|||
return MODE_OK;
|
||||
}
|
||||
|
||||
static void tc358768_bridge_disable(struct drm_bridge *bridge)
|
||||
static void tc358768_bridge_atomic_disable(struct drm_bridge *bridge,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct tc358768_priv *priv = bridge_to_tc358768(bridge);
|
||||
int ret;
|
||||
|
|
@ -602,7 +604,8 @@ static void tc358768_bridge_disable(struct drm_bridge *bridge)
|
|||
dev_warn(priv->dev, "Software disable failed: %d\n", ret);
|
||||
}
|
||||
|
||||
static void tc358768_bridge_post_disable(struct drm_bridge *bridge)
|
||||
static void tc358768_bridge_atomic_post_disable(struct drm_bridge *bridge,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct tc358768_priv *priv = bridge_to_tc358768(bridge);
|
||||
|
||||
|
|
@ -682,13 +685,17 @@ static u32 tc358768_dsi_bytes_to_ns(struct tc358768_priv *priv, u32 val)
|
|||
return (u32)div_u64(m, n);
|
||||
}
|
||||
|
||||
static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
|
||||
static void tc358768_bridge_atomic_pre_enable(struct drm_bridge *bridge,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct tc358768_priv *priv = bridge_to_tc358768(bridge);
|
||||
struct mipi_dsi_device *dsi_dev = priv->output.dev;
|
||||
unsigned long mode_flags = dsi_dev->mode_flags;
|
||||
u32 val, val2, lptxcnt, hact, data_type;
|
||||
s32 raw_val;
|
||||
struct drm_crtc_state *crtc_state;
|
||||
struct drm_connector_state *conn_state;
|
||||
struct drm_connector *connector;
|
||||
const struct drm_display_mode *mode;
|
||||
u32 hsbyteclk_ps, dsiclk_ps, ui_ps;
|
||||
u32 dsiclk, hsbyteclk;
|
||||
|
|
@ -719,7 +726,10 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
|
|||
return;
|
||||
}
|
||||
|
||||
mode = &bridge->encoder->crtc->state->adjusted_mode;
|
||||
connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
|
||||
conn_state = drm_atomic_get_new_connector_state(state, connector);
|
||||
crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
|
||||
mode = &crtc_state->adjusted_mode;
|
||||
ret = tc358768_setup_pll(priv, mode);
|
||||
if (ret) {
|
||||
dev_err(dev, "PLL setup failed: %d\n", ret);
|
||||
|
|
@ -1076,14 +1086,12 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
|
|||
tc358768_write(priv, TC358768_DSI_CONFW, val);
|
||||
|
||||
ret = tc358768_clear_error(priv);
|
||||
if (ret) {
|
||||
if (ret)
|
||||
dev_err(dev, "Bridge pre_enable failed: %d\n", ret);
|
||||
tc358768_bridge_disable(bridge);
|
||||
tc358768_bridge_post_disable(bridge);
|
||||
}
|
||||
}
|
||||
|
||||
static void tc358768_bridge_enable(struct drm_bridge *bridge)
|
||||
static void tc358768_bridge_atomic_enable(struct drm_bridge *bridge,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct tc358768_priv *priv = bridge_to_tc358768(bridge);
|
||||
int ret;
|
||||
|
|
@ -1100,11 +1108,8 @@ static void tc358768_bridge_enable(struct drm_bridge *bridge)
|
|||
tc358768_update_bits(priv, TC358768_CONFCTL, BIT(6), BIT(6));
|
||||
|
||||
ret = tc358768_clear_error(priv);
|
||||
if (ret) {
|
||||
if (ret)
|
||||
dev_err(priv->dev, "Bridge enable failed: %d\n", ret);
|
||||
tc358768_bridge_disable(bridge);
|
||||
tc358768_bridge_post_disable(bridge);
|
||||
}
|
||||
}
|
||||
|
||||
#define MAX_INPUT_SEL_FORMATS 1
|
||||
|
|
@ -1166,10 +1171,10 @@ static const struct drm_bridge_funcs tc358768_bridge_funcs = {
|
|||
.attach = tc358768_bridge_attach,
|
||||
.mode_valid = tc358768_bridge_mode_valid,
|
||||
.mode_fixup = tc358768_mode_fixup,
|
||||
.pre_enable = tc358768_bridge_pre_enable,
|
||||
.enable = tc358768_bridge_enable,
|
||||
.disable = tc358768_bridge_disable,
|
||||
.post_disable = tc358768_bridge_post_disable,
|
||||
.atomic_pre_enable = tc358768_bridge_atomic_pre_enable,
|
||||
.atomic_enable = tc358768_bridge_atomic_enable,
|
||||
.atomic_disable = tc358768_bridge_atomic_disable,
|
||||
.atomic_post_disable = tc358768_bridge_atomic_post_disable,
|
||||
|
||||
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
|
||||
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
|
||||
|
|
|
|||
|
|
@ -286,7 +286,8 @@ static inline struct tc_data *bridge_to_tc(struct drm_bridge *b)
|
|||
return container_of(b, struct tc_data, bridge);
|
||||
}
|
||||
|
||||
static void tc_bridge_pre_enable(struct drm_bridge *bridge)
|
||||
static void tc_bridge_atomic_pre_enable(struct drm_bridge *bridge,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct tc_data *tc = bridge_to_tc(bridge);
|
||||
struct device *dev = &tc->dsi->dev;
|
||||
|
|
@ -309,7 +310,8 @@ static void tc_bridge_pre_enable(struct drm_bridge *bridge)
|
|||
usleep_range(10, 20);
|
||||
}
|
||||
|
||||
static void tc_bridge_post_disable(struct drm_bridge *bridge)
|
||||
static void tc_bridge_atomic_post_disable(struct drm_bridge *bridge,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct tc_data *tc = bridge_to_tc(bridge);
|
||||
struct device *dev = &tc->dsi->dev;
|
||||
|
|
@ -368,30 +370,21 @@ static void d2l_write(struct i2c_client *i2c, u16 addr, u32 val)
|
|||
ret, addr);
|
||||
}
|
||||
|
||||
/* helper function to access bus_formats */
|
||||
static struct drm_connector *get_connector(struct drm_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_connector *connector;
|
||||
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
|
||||
if (connector->encoder == encoder)
|
||||
return connector;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void tc_bridge_enable(struct drm_bridge *bridge)
|
||||
static void tc_bridge_atomic_enable(struct drm_bridge *bridge,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct tc_data *tc = bridge_to_tc(bridge);
|
||||
u32 hback_porch, hsync_len, hfront_porch, hactive, htime1, htime2;
|
||||
u32 vback_porch, vsync_len, vfront_porch, vactive, vtime1, vtime2;
|
||||
u32 val = 0;
|
||||
u16 dsiclk, clkdiv, byteclk, t1, t2, t3, vsdelay;
|
||||
struct drm_display_mode *mode;
|
||||
struct drm_connector *connector = get_connector(bridge->encoder);
|
||||
|
||||
mode = &bridge->encoder->crtc->state->adjusted_mode;
|
||||
struct drm_connector *connector =
|
||||
drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
|
||||
struct drm_connector_state *conn_state =
|
||||
drm_atomic_get_new_connector_state(state, connector);
|
||||
struct drm_crtc_state *crtc_state =
|
||||
drm_atomic_get_new_crtc_state(state, conn_state->crtc);
|
||||
struct drm_display_mode *mode = &crtc_state->adjusted_mode;
|
||||
|
||||
hback_porch = mode->htotal - mode->hsync_end;
|
||||
hsync_len = mode->hsync_end - mode->hsync_start;
|
||||
|
|
@ -589,21 +582,25 @@ static int tc358775_parse_dt(struct device_node *np, struct tc_data *tc)
|
|||
}
|
||||
|
||||
static int tc_bridge_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct tc_data *tc = bridge_to_tc(bridge);
|
||||
|
||||
/* Attach the panel-bridge to the dsi bridge */
|
||||
return drm_bridge_attach(bridge->encoder, tc->panel_bridge,
|
||||
return drm_bridge_attach(encoder, tc->panel_bridge,
|
||||
&tc->bridge, flags);
|
||||
}
|
||||
|
||||
static const struct drm_bridge_funcs tc_bridge_funcs = {
|
||||
.attach = tc_bridge_attach,
|
||||
.pre_enable = tc_bridge_pre_enable,
|
||||
.enable = tc_bridge_enable,
|
||||
.atomic_pre_enable = tc_bridge_atomic_pre_enable,
|
||||
.atomic_enable = tc_bridge_atomic_enable,
|
||||
.mode_valid = tc_mode_valid,
|
||||
.post_disable = tc_bridge_post_disable,
|
||||
.atomic_post_disable = tc_bridge_atomic_post_disable,
|
||||
.atomic_reset = drm_atomic_helper_bridge_reset,
|
||||
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
|
||||
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
|
||||
};
|
||||
|
||||
static int tc_attach_host(struct tc_data *tc)
|
||||
|
|
|
|||
|
|
@ -1365,6 +1365,7 @@ static int tda998x_connector_init(struct tda998x_priv *priv,
|
|||
/* DRM bridge functions */
|
||||
|
||||
static int tda998x_bridge_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
|
||||
|
|
|
|||
|
|
@ -43,11 +43,12 @@ static inline struct thc63_dev *to_thc63(struct drm_bridge *bridge)
|
|||
}
|
||||
|
||||
static int thc63_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct thc63_dev *thc63 = to_thc63(bridge);
|
||||
|
||||
return drm_bridge_attach(bridge->encoder, thc63->next, bridge, flags);
|
||||
return drm_bridge_attach(encoder, thc63->next, bridge, flags);
|
||||
}
|
||||
|
||||
static enum drm_mode_status thc63_mode_valid(struct drm_bridge *bridge,
|
||||
|
|
|
|||
|
|
@ -242,12 +242,12 @@ static void dlpc_mode_set(struct drm_bridge *bridge,
|
|||
drm_mode_copy(&dlpc->mode, adjusted_mode);
|
||||
}
|
||||
|
||||
static int dlpc_attach(struct drm_bridge *bridge,
|
||||
static int dlpc_attach(struct drm_bridge *bridge, struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct dlpc *dlpc = bridge_to_dlpc(bridge);
|
||||
|
||||
return drm_bridge_attach(bridge->encoder, dlpc->next_bridge, bridge, flags);
|
||||
return drm_bridge_attach(encoder, dlpc->next_bridge, bridge, flags);
|
||||
}
|
||||
|
||||
static const struct drm_bridge_funcs dlpc_bridge_funcs = {
|
||||
|
|
|
|||
|
|
@ -40,7 +40,7 @@
|
|||
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_bridge.h>
|
||||
#include <drm/drm_drv.h> /* DRM_MODESET_LOCK_ALL_BEGIN() needs drm_drv_uses_atomic_modeset() */
|
||||
#include <drm/drm_bridge_helper.h>
|
||||
#include <drm/drm_mipi_dsi.h>
|
||||
#include <drm/drm_of.h>
|
||||
#include <drm/drm_print.h>
|
||||
|
|
@ -290,11 +290,12 @@ static struct sn65dsi83 *bridge_to_sn65dsi83(struct drm_bridge *bridge)
|
|||
}
|
||||
|
||||
static int sn65dsi83_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
|
||||
|
||||
return drm_bridge_attach(bridge->encoder, ctx->panel_bridge,
|
||||
return drm_bridge_attach(encoder, ctx->panel_bridge,
|
||||
&ctx->bridge, flags);
|
||||
}
|
||||
|
||||
|
|
@ -370,7 +371,6 @@ static u8 sn65dsi83_get_dsi_div(struct sn65dsi83 *ctx)
|
|||
|
||||
static int sn65dsi83_reset_pipe(struct sn65dsi83 *sn65dsi83)
|
||||
{
|
||||
struct drm_device *dev = sn65dsi83->bridge.dev;
|
||||
struct drm_modeset_acquire_ctx ctx;
|
||||
int err;
|
||||
|
||||
|
|
@ -385,26 +385,21 @@ static int sn65dsi83_reset_pipe(struct sn65dsi83 *sn65dsi83)
|
|||
* Keep the lock during the whole operation to be atomic.
|
||||
*/
|
||||
|
||||
DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err);
|
||||
|
||||
if (!sn65dsi83->bridge.encoder->crtc) {
|
||||
/*
|
||||
* No CRTC attached -> No CRTC active outputs to reset
|
||||
* This can happen when the SN65DSI83 is reset. Simply do
|
||||
* nothing without returning any errors.
|
||||
*/
|
||||
err = 0;
|
||||
goto end;
|
||||
}
|
||||
drm_modeset_acquire_init(&ctx, 0);
|
||||
|
||||
dev_warn(sn65dsi83->dev, "reset the pipe\n");
|
||||
|
||||
err = drm_atomic_helper_reset_crtc(sn65dsi83->bridge.encoder->crtc, &ctx);
|
||||
retry:
|
||||
err = drm_bridge_helper_reset_crtc(&sn65dsi83->bridge, &ctx);
|
||||
if (err == -EDEADLK) {
|
||||
drm_modeset_backoff(&ctx);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
end:
|
||||
DRM_MODESET_LOCK_ALL_END(dev, ctx, err);
|
||||
drm_modeset_drop_locks(&ctx);
|
||||
drm_modeset_acquire_fini(&ctx);
|
||||
|
||||
return err;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sn65dsi83_reset_work(struct work_struct *ws)
|
||||
|
|
@ -946,9 +941,9 @@ static int sn65dsi83_probe(struct i2c_client *client)
|
|||
struct sn65dsi83 *ctx;
|
||||
int ret;
|
||||
|
||||
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
|
||||
if (!ctx)
|
||||
return -ENOMEM;
|
||||
ctx = devm_drm_bridge_alloc(dev, struct sn65dsi83, bridge, &sn65dsi83_funcs);
|
||||
if (IS_ERR(ctx))
|
||||
return PTR_ERR(ctx);
|
||||
|
||||
ctx->dev = dev;
|
||||
INIT_WORK(&ctx->reset_work, sn65dsi83_reset_work);
|
||||
|
|
@ -988,7 +983,6 @@ static int sn65dsi83_probe(struct i2c_client *client)
|
|||
dev_set_drvdata(dev, ctx);
|
||||
i2c_set_clientdata(client, ctx);
|
||||
|
||||
ctx->bridge.funcs = &sn65dsi83_funcs;
|
||||
ctx->bridge.of_node = dev->of_node;
|
||||
ctx->bridge.pre_enable_prev_first = true;
|
||||
ctx->bridge.type = DRM_MODE_CONNECTOR_LVDS;
|
||||
|
|
|
|||
|
|
@ -35,6 +35,7 @@
|
|||
#include <drm/drm_print.h>
|
||||
#include <drm/drm_probe_helper.h>
|
||||
|
||||
#define SN_DEVICE_ID_REGS 0x00 /* up to 0x07 */
|
||||
#define SN_DEVICE_REV_REG 0x08
|
||||
#define SN_DPPLL_SRC_REG 0x0A
|
||||
#define DPPLL_CLK_SRC_DSICLK BIT(0)
|
||||
|
|
@ -243,11 +244,26 @@ static void ti_sn65dsi86_write_u16(struct ti_sn65dsi86 *pdata,
|
|||
regmap_bulk_write(pdata->regmap, reg, buf, ARRAY_SIZE(buf));
|
||||
}
|
||||
|
||||
static u32 ti_sn_bridge_get_dsi_freq(struct ti_sn65dsi86 *pdata)
|
||||
static struct drm_display_mode *
|
||||
get_new_adjusted_display_mode(struct drm_bridge *bridge,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_connector *connector =
|
||||
drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
|
||||
struct drm_connector_state *conn_state =
|
||||
drm_atomic_get_new_connector_state(state, connector);
|
||||
struct drm_crtc_state *crtc_state =
|
||||
drm_atomic_get_new_crtc_state(state, conn_state->crtc);
|
||||
|
||||
return &crtc_state->adjusted_mode;
|
||||
}
|
||||
|
||||
static u32 ti_sn_bridge_get_dsi_freq(struct ti_sn65dsi86 *pdata,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
u32 bit_rate_khz, clk_freq_khz;
|
||||
struct drm_display_mode *mode =
|
||||
&pdata->bridge.encoder->crtc->state->adjusted_mode;
|
||||
get_new_adjusted_display_mode(&pdata->bridge, state);
|
||||
|
||||
bit_rate_khz = mode->clock *
|
||||
mipi_dsi_pixel_format_to_bpp(pdata->dsi->format);
|
||||
|
|
@ -274,7 +290,8 @@ static const u32 ti_sn_bridge_dsiclk_lut[] = {
|
|||
460800000,
|
||||
};
|
||||
|
||||
static void ti_sn_bridge_set_refclk_freq(struct ti_sn65dsi86 *pdata)
|
||||
static void ti_sn_bridge_set_refclk_freq(struct ti_sn65dsi86 *pdata,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
int i;
|
||||
u32 refclk_rate;
|
||||
|
|
@ -287,7 +304,7 @@ static void ti_sn_bridge_set_refclk_freq(struct ti_sn65dsi86 *pdata)
|
|||
refclk_lut_size = ARRAY_SIZE(ti_sn_bridge_refclk_lut);
|
||||
clk_prepare_enable(pdata->refclk);
|
||||
} else {
|
||||
refclk_rate = ti_sn_bridge_get_dsi_freq(pdata) * 1000;
|
||||
refclk_rate = ti_sn_bridge_get_dsi_freq(pdata, state) * 1000;
|
||||
refclk_lut = ti_sn_bridge_dsiclk_lut;
|
||||
refclk_lut_size = ARRAY_SIZE(ti_sn_bridge_dsiclk_lut);
|
||||
}
|
||||
|
|
@ -311,12 +328,13 @@ static void ti_sn_bridge_set_refclk_freq(struct ti_sn65dsi86 *pdata)
|
|||
pdata->pwm_refclk_freq = ti_sn_bridge_refclk_lut[i];
|
||||
}
|
||||
|
||||
static void ti_sn65dsi86_enable_comms(struct ti_sn65dsi86 *pdata)
|
||||
static void ti_sn65dsi86_enable_comms(struct ti_sn65dsi86 *pdata,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
mutex_lock(&pdata->comms_mutex);
|
||||
|
||||
/* configure bridge ref_clk */
|
||||
ti_sn_bridge_set_refclk_freq(pdata);
|
||||
ti_sn_bridge_set_refclk_freq(pdata, state);
|
||||
|
||||
/*
|
||||
* HPD on this bridge chip is a bit useless. This is an eDP bridge
|
||||
|
|
@ -376,7 +394,7 @@ static int __maybe_unused ti_sn65dsi86_resume(struct device *dev)
|
|||
* clock so reading early doesn't work.
|
||||
*/
|
||||
if (pdata->refclk)
|
||||
ti_sn65dsi86_enable_comms(pdata);
|
||||
ti_sn65dsi86_enable_comms(pdata, NULL);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
@ -423,36 +441,8 @@ static int status_show(struct seq_file *s, void *data)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_SHOW_ATTRIBUTE(status);
|
||||
|
||||
static void ti_sn65dsi86_debugfs_remove(void *data)
|
||||
{
|
||||
debugfs_remove_recursive(data);
|
||||
}
|
||||
|
||||
static void ti_sn65dsi86_debugfs_init(struct ti_sn65dsi86 *pdata)
|
||||
{
|
||||
struct device *dev = pdata->dev;
|
||||
struct dentry *debugfs;
|
||||
int ret;
|
||||
|
||||
debugfs = debugfs_create_dir(dev_name(dev), NULL);
|
||||
|
||||
/*
|
||||
* We might get an error back if debugfs wasn't enabled in the kernel
|
||||
* so let's just silently return upon failure.
|
||||
*/
|
||||
if (IS_ERR_OR_NULL(debugfs))
|
||||
return;
|
||||
|
||||
ret = devm_add_action_or_reset(dev, ti_sn65dsi86_debugfs_remove, debugfs);
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
debugfs_create_file("status", 0600, debugfs, pdata, &status_fops);
|
||||
}
|
||||
|
||||
/* -----------------------------------------------------------------------------
|
||||
* Auxiliary Devices (*not* AUX)
|
||||
*/
|
||||
|
|
@ -732,6 +722,7 @@ static int ti_sn_attach_host(struct auxiliary_device *adev, struct ti_sn65dsi86
|
|||
}
|
||||
|
||||
static int ti_sn_bridge_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
|
||||
|
|
@ -748,7 +739,7 @@ static int ti_sn_bridge_attach(struct drm_bridge *bridge,
|
|||
* Attach the next bridge.
|
||||
* We never want the next bridge to *also* create a connector.
|
||||
*/
|
||||
ret = drm_bridge_attach(bridge->encoder, pdata->next_bridge,
|
||||
ret = drm_bridge_attach(encoder, pdata->next_bridge,
|
||||
&pdata->bridge, flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR);
|
||||
if (ret < 0)
|
||||
goto err_initted_aux;
|
||||
|
|
@ -821,12 +812,13 @@ static void ti_sn_bridge_atomic_disable(struct drm_bridge *bridge,
|
|||
regmap_update_bits(pdata->regmap, SN_ENH_FRAME_REG, VSTREAM_ENABLE, 0);
|
||||
}
|
||||
|
||||
static void ti_sn_bridge_set_dsi_rate(struct ti_sn65dsi86 *pdata)
|
||||
static void ti_sn_bridge_set_dsi_rate(struct ti_sn65dsi86 *pdata,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
unsigned int bit_rate_mhz, clk_freq_mhz;
|
||||
unsigned int val;
|
||||
struct drm_display_mode *mode =
|
||||
&pdata->bridge.encoder->crtc->state->adjusted_mode;
|
||||
get_new_adjusted_display_mode(&pdata->bridge, state);
|
||||
|
||||
/* set DSIA clk frequency */
|
||||
bit_rate_mhz = (mode->clock / 1000) *
|
||||
|
|
@ -856,12 +848,14 @@ static const unsigned int ti_sn_bridge_dp_rate_lut[] = {
|
|||
0, 1620, 2160, 2430, 2700, 3240, 4320, 5400
|
||||
};
|
||||
|
||||
static int ti_sn_bridge_calc_min_dp_rate_idx(struct ti_sn65dsi86 *pdata, unsigned int bpp)
|
||||
static int ti_sn_bridge_calc_min_dp_rate_idx(struct ti_sn65dsi86 *pdata,
|
||||
struct drm_atomic_state *state,
|
||||
unsigned int bpp)
|
||||
{
|
||||
unsigned int bit_rate_khz, dp_rate_mhz;
|
||||
unsigned int i;
|
||||
struct drm_display_mode *mode =
|
||||
&pdata->bridge.encoder->crtc->state->adjusted_mode;
|
||||
get_new_adjusted_display_mode(&pdata->bridge, state);
|
||||
|
||||
/* Calculate minimum bit rate based on our pixel clock. */
|
||||
bit_rate_khz = mode->clock * bpp;
|
||||
|
|
@ -960,10 +954,11 @@ static unsigned int ti_sn_bridge_read_valid_rates(struct ti_sn65dsi86 *pdata)
|
|||
return valid_rates;
|
||||
}
|
||||
|
||||
static void ti_sn_bridge_set_video_timings(struct ti_sn65dsi86 *pdata)
|
||||
static void ti_sn_bridge_set_video_timings(struct ti_sn65dsi86 *pdata,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_display_mode *mode =
|
||||
&pdata->bridge.encoder->crtc->state->adjusted_mode;
|
||||
get_new_adjusted_display_mode(&pdata->bridge, state);
|
||||
u8 hsync_polarity = 0, vsync_polarity = 0;
|
||||
|
||||
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
|
||||
|
|
@ -1105,7 +1100,7 @@ static void ti_sn_bridge_atomic_enable(struct drm_bridge *bridge,
|
|||
pdata->ln_polrs << LN_POLRS_OFFSET);
|
||||
|
||||
/* set dsi clk frequency value */
|
||||
ti_sn_bridge_set_dsi_rate(pdata);
|
||||
ti_sn_bridge_set_dsi_rate(pdata, state);
|
||||
|
||||
/*
|
||||
* The SN65DSI86 only supports ASSR Display Authentication method and
|
||||
|
|
@ -1140,7 +1135,7 @@ static void ti_sn_bridge_atomic_enable(struct drm_bridge *bridge,
|
|||
valid_rates = ti_sn_bridge_read_valid_rates(pdata);
|
||||
|
||||
/* Train until we run out of rates */
|
||||
for (dp_rate_idx = ti_sn_bridge_calc_min_dp_rate_idx(pdata, bpp);
|
||||
for (dp_rate_idx = ti_sn_bridge_calc_min_dp_rate_idx(pdata, state, bpp);
|
||||
dp_rate_idx < ARRAY_SIZE(ti_sn_bridge_dp_rate_lut);
|
||||
dp_rate_idx++) {
|
||||
if (!(valid_rates & BIT(dp_rate_idx)))
|
||||
|
|
@ -1156,7 +1151,7 @@ static void ti_sn_bridge_atomic_enable(struct drm_bridge *bridge,
|
|||
}
|
||||
|
||||
/* config video parameters */
|
||||
ti_sn_bridge_set_video_timings(pdata);
|
||||
ti_sn_bridge_set_video_timings(pdata, state);
|
||||
|
||||
/* enable video stream */
|
||||
regmap_update_bits(pdata->regmap, SN_ENH_FRAME_REG, VSTREAM_ENABLE,
|
||||
|
|
@ -1171,7 +1166,7 @@ static void ti_sn_bridge_atomic_pre_enable(struct drm_bridge *bridge,
|
|||
pm_runtime_get_sync(pdata->dev);
|
||||
|
||||
if (!pdata->refclk)
|
||||
ti_sn65dsi86_enable_comms(pdata);
|
||||
ti_sn65dsi86_enable_comms(pdata, state);
|
||||
|
||||
/* td7: min 100 us after enable before DSI data */
|
||||
usleep_range(100, 110);
|
||||
|
|
@ -1216,6 +1211,15 @@ static const struct drm_edid *ti_sn_bridge_edid_read(struct drm_bridge *bridge,
|
|||
return drm_edid_read_ddc(connector, &pdata->aux.ddc);
|
||||
}
|
||||
|
||||
static void ti_sn65dsi86_debugfs_init(struct drm_bridge *bridge, struct dentry *root)
|
||||
{
|
||||
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
|
||||
struct dentry *debugfs;
|
||||
|
||||
debugfs = debugfs_create_dir(dev_name(pdata->dev), root);
|
||||
debugfs_create_file("status", 0600, debugfs, pdata, &status_fops);
|
||||
}
|
||||
|
||||
static const struct drm_bridge_funcs ti_sn_bridge_funcs = {
|
||||
.attach = ti_sn_bridge_attach,
|
||||
.detach = ti_sn_bridge_detach,
|
||||
|
|
@ -1229,6 +1233,7 @@ static const struct drm_bridge_funcs ti_sn_bridge_funcs = {
|
|||
.atomic_reset = drm_atomic_helper_bridge_reset,
|
||||
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
|
||||
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
|
||||
.debugfs_init = ti_sn65dsi86_debugfs_init,
|
||||
};
|
||||
|
||||
static void ti_sn_bridge_parse_lanes(struct ti_sn65dsi86 *pdata,
|
||||
|
|
@ -1894,6 +1899,7 @@ static int ti_sn65dsi86_probe(struct i2c_client *client)
|
|||
{
|
||||
struct device *dev = &client->dev;
|
||||
struct ti_sn65dsi86 *pdata;
|
||||
u8 id_buf[8];
|
||||
int ret;
|
||||
|
||||
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
|
||||
|
|
@ -1937,7 +1943,15 @@ static int ti_sn65dsi86_probe(struct i2c_client *client)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
ti_sn65dsi86_debugfs_init(pdata);
|
||||
pm_runtime_get_sync(dev);
|
||||
ret = regmap_bulk_read(pdata->regmap, SN_DEVICE_ID_REGS, id_buf, ARRAY_SIZE(id_buf));
|
||||
pm_runtime_put_autosuspend(dev);
|
||||
if (ret)
|
||||
return dev_err_probe(dev, ret, "failed to read device id\n");
|
||||
|
||||
/* The ID string is stored backwards */
|
||||
if (strncmp(id_buf, "68ISD ", ARRAY_SIZE(id_buf)))
|
||||
return dev_err_probe(dev, -EOPNOTSUPP, "unsupported device id\n");
|
||||
|
||||
/*
|
||||
* Break ourselves up into a collection of aux devices. The only real
|
||||
|
|
|
|||
|
|
@ -45,11 +45,13 @@ static void tdp158_disable(struct drm_bridge *bridge,
|
|||
regulator_disable(tdp158->vcc);
|
||||
}
|
||||
|
||||
static int tdp158_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags)
|
||||
static int tdp158_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct tdp158 *tdp158 = bridge->driver_private;
|
||||
|
||||
return drm_bridge_attach(bridge->encoder, tdp158->next, bridge, flags);
|
||||
return drm_bridge_attach(encoder, tdp158->next, bridge, flags);
|
||||
}
|
||||
|
||||
static const struct drm_bridge_funcs tdp158_bridge_funcs = {
|
||||
|
|
|
|||
|
|
@ -120,12 +120,13 @@ static void tfp410_hpd_callback(void *arg, enum drm_connector_status status)
|
|||
}
|
||||
|
||||
static int tfp410_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct tfp410 *dvi = drm_bridge_to_tfp410(bridge);
|
||||
int ret;
|
||||
|
||||
ret = drm_bridge_attach(bridge->encoder, dvi->next_bridge, bridge,
|
||||
ret = drm_bridge_attach(encoder, dvi->next_bridge, bridge,
|
||||
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
|
@ -159,7 +160,7 @@ static int tfp410_attach(struct drm_bridge *bridge,
|
|||
drm_display_info_set_bus_formats(&dvi->connector.display_info,
|
||||
&dvi->bus_format, 1);
|
||||
|
||||
drm_connector_attach_encoder(&dvi->connector, bridge->encoder);
|
||||
drm_connector_attach_encoder(&dvi->connector, encoder);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -38,6 +38,7 @@ static inline struct tpd12s015_device *to_tpd12s015(struct drm_bridge *bridge)
|
|||
}
|
||||
|
||||
static int tpd12s015_attach(struct drm_bridge *bridge,
|
||||
struct drm_encoder *encoder,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct tpd12s015_device *tpd = to_tpd12s015(bridge);
|
||||
|
|
@ -46,7 +47,7 @@ static int tpd12s015_attach(struct drm_bridge *bridge,
|
|||
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
|
||||
return -EINVAL;
|
||||
|
||||
ret = drm_bridge_attach(bridge->encoder, tpd->next_bridge,
|
||||
ret = drm_bridge_attach(encoder, tpd->next_bridge,
|
||||
bridge, flags);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
|
|
|||
|
|
@ -193,6 +193,8 @@ CONFIG_PWM_MTK_DISP=y
|
|||
CONFIG_MTK_CMDQ=y
|
||||
CONFIG_REGULATOR_DA9211=y
|
||||
CONFIG_DRM_ANALOGIX_ANX7625=y
|
||||
CONFIG_PHY_MTK_HDMI=y
|
||||
CONFIG_PHY_MTK_MIPI_DSI=y
|
||||
|
||||
# For nouveau. Note that DRM must be a module so that it's loaded after NFS is up to provide the firmware.
|
||||
CONFIG_ARCH_TEGRA=y
|
||||
|
|
|
|||
|
|
@ -98,14 +98,14 @@ done
|
|||
|
||||
make ${KERNEL_IMAGE_NAME}
|
||||
|
||||
mkdir -p /lava-files/
|
||||
mkdir -p /kernel/
|
||||
for image in ${KERNEL_IMAGE_NAME}; do
|
||||
cp arch/${KERNEL_ARCH}/boot/${image} /lava-files/.
|
||||
cp arch/${KERNEL_ARCH}/boot/${image} /kernel/.
|
||||
done
|
||||
|
||||
if [[ -n ${DEVICE_TREES} ]]; then
|
||||
make dtbs
|
||||
cp ${DEVICE_TREES} /lava-files/.
|
||||
cp ${DEVICE_TREES} /kernel/.
|
||||
fi
|
||||
|
||||
make modules
|
||||
|
|
@ -121,11 +121,11 @@ if [[ ${DEBIAN_ARCH} = "arm64" ]]; then
|
|||
-d arch/arm64/boot/Image.lzma \
|
||||
-C lzma\
|
||||
-b arch/arm64/boot/dts/qcom/sdm845-cheza-r3.dtb \
|
||||
/lava-files/cheza-kernel
|
||||
/kernel/cheza-kernel
|
||||
KERNEL_IMAGE_NAME+=" cheza-kernel"
|
||||
|
||||
# Make a gzipped copy of the Image for db410c.
|
||||
gzip -k /lava-files/Image
|
||||
gzip -k /kernel/Image
|
||||
KERNEL_IMAGE_NAME+=" Image.gz"
|
||||
fi
|
||||
|
||||
|
|
@ -139,7 +139,7 @@ cp -rfv drivers/gpu/drm/ci/* install/.
|
|||
. .gitlab-ci/container/container_post_build.sh
|
||||
|
||||
if [[ "$UPLOAD_TO_MINIO" = "1" ]]; then
|
||||
xz -7 -c -T${FDO_CI_CONCURRENT:-4} vmlinux > /lava-files/vmlinux.xz
|
||||
xz -7 -c -T${FDO_CI_CONCURRENT:-4} vmlinux > /kernel/vmlinux.xz
|
||||
FILES_TO_UPLOAD="$KERNEL_IMAGE_NAME vmlinux.xz"
|
||||
|
||||
if [[ -n $DEVICE_TREES ]]; then
|
||||
|
|
@ -148,7 +148,7 @@ if [[ "$UPLOAD_TO_MINIO" = "1" ]]; then
|
|||
|
||||
ls -l "${S3_JWT_FILE}"
|
||||
for f in $FILES_TO_UPLOAD; do
|
||||
ci-fairy s3cp --token-file "${S3_JWT_FILE}" /lava-files/$f \
|
||||
ci-fairy s3cp --token-file "${S3_JWT_FILE}" /kernel/$f \
|
||||
https://${PIPELINE_ARTIFACTS_BASE}/${DEBIAN_ARCH}/$f
|
||||
done
|
||||
|
||||
|
|
@ -165,7 +165,7 @@ ln -s common artifacts/install/ci-common
|
|||
cp .config artifacts/${CI_JOB_NAME}_config
|
||||
|
||||
for image in ${KERNEL_IMAGE_NAME}; do
|
||||
cp /lava-files/$image artifacts/install/.
|
||||
cp /kernel/$image artifacts/install/.
|
||||
done
|
||||
|
||||
tar -C artifacts -cf artifacts/install.tar install
|
||||
|
|
|
|||
|
|
@ -67,7 +67,7 @@ testing:arm32:
|
|||
#
|
||||
# db410c and db820c don't boot with KASAN_INLINE, probably due to the kernel
|
||||
# becoming too big for their bootloaders.
|
||||
ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT"
|
||||
ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT DEBUG_WW_MUTEX_SLOWPATH"
|
||||
UPLOAD_TO_MINIO: 1
|
||||
MERGE_FRAGMENT: arm.config
|
||||
|
||||
|
|
@ -79,7 +79,7 @@ testing:arm64:
|
|||
#
|
||||
# db410c and db820c don't boot with KASAN_INLINE, probably due to the kernel
|
||||
# becoming too big for their bootloaders.
|
||||
ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT"
|
||||
ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT DEBUG_WW_MUTEX_SLOWPATH"
|
||||
UPLOAD_TO_MINIO: 1
|
||||
MERGE_FRAGMENT: arm64.config
|
||||
|
||||
|
|
@ -91,7 +91,7 @@ testing:x86_64:
|
|||
#
|
||||
# db410c and db820c don't boot with KASAN_INLINE, probably due to the kernel
|
||||
# becoming too big for their bootloaders.
|
||||
ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT"
|
||||
ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT DEBUG_WW_MUTEX_SLOWPATH"
|
||||
UPLOAD_TO_MINIO: 1
|
||||
MERGE_FRAGMENT: x86_64.config
|
||||
|
||||
|
|
@ -143,6 +143,10 @@ debian-arm64-release:
|
|||
rules:
|
||||
- when: never
|
||||
|
||||
debian-arm64-ubsan:
|
||||
rules:
|
||||
- when: never
|
||||
|
||||
debian-build-testing:
|
||||
rules:
|
||||
- when: never
|
||||
|
|
@ -183,6 +187,10 @@ debian-testing-msan:
|
|||
rules:
|
||||
- when: never
|
||||
|
||||
debian-testing-ubsan:
|
||||
rules:
|
||||
- when: never
|
||||
|
||||
debian-vulkan:
|
||||
rules:
|
||||
- when: never
|
||||
|
|
|
|||
|
|
@ -24,6 +24,18 @@ alpine/x86_64_build:
|
|||
rules:
|
||||
- when: never
|
||||
|
||||
debian/arm32_test-base:
|
||||
rules:
|
||||
- when: never
|
||||
|
||||
debian/arm32_test-gl:
|
||||
rules:
|
||||
- when: never
|
||||
|
||||
debian/arm32_test-vk:
|
||||
rules:
|
||||
- when: never
|
||||
|
||||
debian/arm64_test-gl:
|
||||
rules:
|
||||
- when: never
|
||||
|
|
@ -32,6 +44,10 @@ debian/arm64_test-vk:
|
|||
rules:
|
||||
- when: never
|
||||
|
||||
debian/baremetal_arm32_test:
|
||||
rules:
|
||||
- when: never
|
||||
|
||||
debian/ppc64el_build:
|
||||
rules:
|
||||
- when: never
|
||||
|
|
@ -40,6 +56,14 @@ debian/s390x_build:
|
|||
rules:
|
||||
- when: never
|
||||
|
||||
debian/x86_32_build:
|
||||
rules:
|
||||
- when: never
|
||||
|
||||
debian/x86_64_test-android:
|
||||
rules:
|
||||
- when: never
|
||||
|
||||
debian/x86_64_test-vk:
|
||||
rules:
|
||||
- when: never
|
||||
|
|
|
|||
|
|
@ -1,11 +1,11 @@
|
|||
variables:
|
||||
DRM_CI_PROJECT_PATH: &drm-ci-project-path mesa/mesa
|
||||
DRM_CI_COMMIT_SHA: &drm-ci-commit-sha 7d3062470f3ccc6cb40540e772e902c7e2248024
|
||||
DRM_CI_COMMIT_SHA: &drm-ci-commit-sha 82ab58f6c6f94fa80ca7e1615146f08356e3ba69
|
||||
|
||||
UPSTREAM_REPO: https://gitlab.freedesktop.org/drm/kernel.git
|
||||
TARGET_BRANCH: drm-next
|
||||
|
||||
IGT_VERSION: 33adea9ebafd059ac88a5ccfec60536394f36c7c
|
||||
IGT_VERSION: 04bedb9238586b81d4d4ca62b02e584f6cfc77af
|
||||
|
||||
DEQP_RUNNER_GIT_URL: https://gitlab.freedesktop.org/mesa/deqp-runner.git
|
||||
DEQP_RUNNER_GIT_TAG: v0.20.0
|
||||
|
|
@ -143,11 +143,11 @@ stages:
|
|||
# Pre-merge pipeline
|
||||
- if: &is-pre-merge $CI_PIPELINE_SOURCE == "merge_request_event"
|
||||
# Push to a branch on a fork
|
||||
- if: &is-fork-push $CI_PROJECT_NAMESPACE != "mesa" && $CI_PIPELINE_SOURCE == "push"
|
||||
- if: &is-fork-push $CI_PIPELINE_SOURCE == "push"
|
||||
# nightly pipeline
|
||||
- if: &is-scheduled-pipeline $CI_PIPELINE_SOURCE == "schedule"
|
||||
# pipeline for direct pushes that bypassed the CI
|
||||
- if: &is-direct-push $CI_PROJECT_NAMESPACE == "mesa" && $CI_PIPELINE_SOURCE == "push" && $GITLAB_USER_LOGIN != "marge-bot"
|
||||
- if: &is-direct-push $CI_PIPELINE_SOURCE == "push" && $GITLAB_USER_LOGIN != "marge-bot"
|
||||
|
||||
|
||||
# Rules applied to every job in the pipeline
|
||||
|
|
@ -170,26 +170,15 @@ stages:
|
|||
- !reference [.disable-farm-mr-rules, rules]
|
||||
# Never run immediately after merging, as we just ran everything
|
||||
- !reference [.never-post-merge-rules, rules]
|
||||
# Build everything in merge pipelines, if any files affecting the pipeline
|
||||
# were changed
|
||||
# Build everything in merge pipelines
|
||||
- if: *is-merge-attempt
|
||||
changes: &all_paths
|
||||
- drivers/gpu/drm/ci/**/*
|
||||
when: on_success
|
||||
# Same as above, but for pre-merge pipelines
|
||||
- if: *is-pre-merge
|
||||
changes:
|
||||
*all_paths
|
||||
when: manual
|
||||
# Skip everything for pre-merge and merge pipelines which don't change
|
||||
# anything in the build
|
||||
- if: *is-merge-attempt
|
||||
when: never
|
||||
- if: *is-pre-merge
|
||||
when: never
|
||||
# Build everything after someone bypassed the CI
|
||||
- if: *is-direct-push
|
||||
when: on_success
|
||||
when: manual
|
||||
# Build everything in scheduled pipelines
|
||||
- if: *is-scheduled-pipeline
|
||||
when: on_success
|
||||
|
|
@ -198,6 +187,36 @@ stages:
|
|||
- when: manual
|
||||
|
||||
|
||||
# Repeat of the above but with `when: on_success` replaced with
|
||||
# `when: delayed` + `start_in:`, for build-only jobs.
|
||||
# Note: make sure the branches in this list are the same as in
|
||||
# `.container+build-rules` above.
|
||||
.build-only-delayed-rules:
|
||||
rules:
|
||||
- !reference [.common-rules, rules]
|
||||
# Run when re-enabling a disabled farm, but not when disabling it
|
||||
- !reference [.disable-farm-mr-rules, rules]
|
||||
# Never run immediately after merging, as we just ran everything
|
||||
- !reference [.never-post-merge-rules, rules]
|
||||
# Build everything in merge pipelines
|
||||
- if: *is-merge-attempt
|
||||
when: delayed
|
||||
start_in: &build-delay 5 minutes
|
||||
# Same as above, but for pre-merge pipelines
|
||||
- if: *is-pre-merge
|
||||
when: manual
|
||||
# Build everything after someone bypassed the CI
|
||||
- if: *is-direct-push
|
||||
when: manual
|
||||
# Build everything in scheduled pipelines
|
||||
- if: *is-scheduled-pipeline
|
||||
when: delayed
|
||||
start_in: *build-delay
|
||||
# Allow building everything in fork pipelines, but build nothing unless
|
||||
# manually triggered
|
||||
- when: manual
|
||||
|
||||
|
||||
.ci-deqp-artifacts:
|
||||
artifacts:
|
||||
name: "${CI_PROJECT_NAME}_${CI_JOB_NAME}"
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue