From 34c06254ff82a815fdccdfae7517a06c9b768cee Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 5 Nov 2015 00:12:24 -0500 Subject: cgroup: fix cftype->file_offset handling 6f60eade2433 ("cgroup: generalize obtaining the handles of and notifying cgroup files") introduced cftype->file_offset so that the handles for per-css file instances can be recorded. These handles then can be used, for example, to generate file modified notifications. Unfortunately, it made the wrong assumption that files are created once for a given css and removed on its destruction. Due to the dependencies among subsystems, a css may be hidden from userland and then later shown again. This is implemented by removing and re-creating the affected files, so the associated kernfs_node for a given cgroup file may change over time. This incorrect assumption led to the corruption of css->files lists. Reimplement cftype->file_offset handling so that cgroup_file->kn is protected by a lock and updated as files are created and destroyed. This also makes keeping them on per-cgroup list unnecessary. Signed-off-by: Tejun Heo Reported-by: James Sedgwick Fixes: 6f60eade2433 ("cgroup: generalize obtaining the handles of and notifying cgroup files") Acked-by: Johannes Weiner Acked-by: Zefan Li --- include/linux/cgroup-defs.h | 4 ---- include/linux/cgroup.h | 14 +------------- 2 files changed, 1 insertion(+), 17 deletions(-) (limited to 'include') diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 60d44b26276d..869fd4a3d28e 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -90,7 +90,6 @@ enum { */ struct cgroup_file { /* do not access any fields from outside cgroup core */ - struct list_head node; /* anchored at css->files */ struct kernfs_node *kn; }; @@ -134,9 +133,6 @@ struct cgroup_subsys_state { */ u64 serial_nr; - /* all cgroup_files associated with this css */ - struct list_head files; - /* percpu_ref killing and RCU release */ struct rcu_head rcu_head; struct work_struct destroy_work; diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 22e3754f89c5..f64083030ad5 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -88,6 +88,7 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from); int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); int cgroup_rm_cftypes(struct cftype *cfts); +void cgroup_file_notify(struct cgroup_file *cfile); char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen); int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry); @@ -516,19 +517,6 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp) pr_cont_kernfs_path(cgrp->kn); } -/** - * cgroup_file_notify - generate a file modified event for a cgroup_file - * @cfile: target cgroup_file - * - * @cfile must have been obtained by setting cftype->file_offset. - */ -static inline void cgroup_file_notify(struct cgroup_file *cfile) -{ - /* might not have been created due to one of the CFTYPE selector flags */ - if (cfile->kn) - kernfs_notify(cfile->kn); -} - #else /* !CONFIG_CGROUPS */ struct cgroup_subsys_state; -- cgit v1.2.3 From ec26d9e9382f432225d76b3ff1c7f72e21192f7f Mon Sep 17 00:00:00 2001 From: Liu Ying Date: Fri, 20 Nov 2015 16:15:30 +0800 Subject: drm/dsi: Add a helper to get bits per pixel of MIPI DSI pixel format Add a helper that can be used to obtain the number of bits per pixel corresponding to a given MIPI DSI pixel format. This is useful in bandwidth calculations, for example. Signed-off-by: Liu Ying Acked-by: Thierry Reding Signed-off-by: Chris Zhong [treding@nvidia.com: add kerneldoc comment and commit message] Signed-off-by: Thierry Reding --- include/drm/drm_mipi_dsi.h | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) (limited to 'include') diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h index f1d8d0dbb4f1..4396c9f22af5 100644 --- a/include/drm/drm_mipi_dsi.h +++ b/include/drm/drm_mipi_dsi.h @@ -163,6 +163,31 @@ static inline struct mipi_dsi_device *to_mipi_dsi_device(struct device *dev) return container_of(dev, struct mipi_dsi_device, dev); } +/** + * mipi_dsi_pixel_format_to_bpp - obtain the number of bits per pixel for any + * given pixel format defined by the MIPI DSI + * specification + * @fmt: MIPI DSI pixel format + * + * Returns: The number of bits per pixel of the given pixel format. + */ +static inline int mipi_dsi_pixel_format_to_bpp(enum mipi_dsi_pixel_format fmt) +{ + switch (fmt) { + case MIPI_DSI_FMT_RGB888: + case MIPI_DSI_FMT_RGB666: + return 24; + + case MIPI_DSI_FMT_RGB666_PACKED: + return 18; + + case MIPI_DSI_FMT_RGB565: + return 16; + } + + return -EINVAL; +} + struct mipi_dsi_device *of_find_mipi_dsi_device_by_node(struct device_node *np); int mipi_dsi_attach(struct mipi_dsi_device *dsi); int mipi_dsi_detach(struct mipi_dsi_device *dsi); -- cgit v1.2.3 From 614e4c4ebc75517295bccd29b20ddbc5b52af6fc Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Thu, 12 Nov 2015 11:00:04 +0100 Subject: perf/core: Robustify the perf_cgroup_from_task() RCU checks This patch reinforces the lockdep checks performed by perf_cgroup_from_tsk() by passing the perf_event_context whenever possible. It is okay to not hold the RCU read lock when we know we hold the ctx->lock. This patch makes sure this property holds. In some functions, such as perf_cgroup_sched_in(), we do not pass the context because we are sure we are holding the RCU read lock. Signed-off-by: Stephane Eranian Signed-off-by: Peter Zijlstra (Intel) Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Vince Weaver Cc: edumazet@google.com Link: http://lkml.kernel.org/r/1447322404-10920-3-git-send-email-eranian@google.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event_intel_cqm.c | 2 +- include/linux/perf_event.h | 6 ++++-- kernel/events/core.c | 20 +++++++++++++------- 3 files changed, 18 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/arch/x86/kernel/cpu/perf_event_intel_cqm.c b/arch/x86/kernel/cpu/perf_event_intel_cqm.c index 377e8f8ed391..a316ca96f1b6 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_cqm.c +++ b/arch/x86/kernel/cpu/perf_event_intel_cqm.c @@ -298,7 +298,7 @@ static bool __match_event(struct perf_event *a, struct perf_event *b) static inline struct perf_cgroup *event_to_cgroup(struct perf_event *event) { if (event->attach_state & PERF_ATTACH_TASK) - return perf_cgroup_from_task(event->hw.target); + return perf_cgroup_from_task(event->hw.target, event->ctx); return event->cgrp; } diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index d841d33bcdc9..f9828a48f16a 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -697,9 +697,11 @@ struct perf_cgroup { * if there is no cgroup event for the current CPU context. */ static inline struct perf_cgroup * -perf_cgroup_from_task(struct task_struct *task) +perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx) { - return container_of(task_css(task, perf_event_cgrp_id), + return container_of(task_css_check(task, perf_event_cgrp_id, + ctx ? lockdep_is_held(&ctx->lock) + : true), struct perf_cgroup, css); } #endif /* CONFIG_CGROUP_PERF */ diff --git a/kernel/events/core.c b/kernel/events/core.c index 60e71ca42c22..1ac857aff7b0 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -435,7 +435,7 @@ static inline void update_cgrp_time_from_event(struct perf_event *event) if (!is_cgroup_event(event)) return; - cgrp = perf_cgroup_from_task(current); + cgrp = perf_cgroup_from_task(current, event->ctx); /* * Do not update time when cgroup is not active */ @@ -458,7 +458,7 @@ perf_cgroup_set_timestamp(struct task_struct *task, if (!task || !ctx->nr_cgroups) return; - cgrp = perf_cgroup_from_task(task); + cgrp = perf_cgroup_from_task(task, ctx); info = this_cpu_ptr(cgrp->info); info->timestamp = ctx->timestamp; } @@ -521,8 +521,10 @@ static void perf_cgroup_switch(struct task_struct *task, int mode) * set cgrp before ctxsw in to allow * event_filter_match() to not have to pass * task around + * we pass the cpuctx->ctx to perf_cgroup_from_task() + * because cgorup events are only per-cpu */ - cpuctx->cgrp = perf_cgroup_from_task(task); + cpuctx->cgrp = perf_cgroup_from_task(task, &cpuctx->ctx); cpu_ctx_sched_in(cpuctx, EVENT_ALL, task); } perf_pmu_enable(cpuctx->ctx.pmu); @@ -542,15 +544,17 @@ static inline void perf_cgroup_sched_out(struct task_struct *task, rcu_read_lock(); /* * we come here when we know perf_cgroup_events > 0 + * we do not need to pass the ctx here because we know + * we are holding the rcu lock */ - cgrp1 = perf_cgroup_from_task(task); + cgrp1 = perf_cgroup_from_task(task, NULL); /* * next is NULL when called from perf_event_enable_on_exec() * that will systematically cause a cgroup_switch() */ if (next) - cgrp2 = perf_cgroup_from_task(next); + cgrp2 = perf_cgroup_from_task(next, NULL); /* * only schedule out current cgroup events if we know @@ -572,11 +576,13 @@ static inline void perf_cgroup_sched_in(struct task_struct *prev, rcu_read_lock(); /* * we come here when we know perf_cgroup_events > 0 + * we do not need to pass the ctx here because we know + * we are holding the rcu lock */ - cgrp1 = perf_cgroup_from_task(task); + cgrp1 = perf_cgroup_from_task(task, NULL); /* prev can never be NULL */ - cgrp2 = perf_cgroup_from_task(prev); + cgrp2 = perf_cgroup_from_task(prev, NULL); /* * only need to schedule in cgroup events if we are changing -- cgit v1.2.3 From 90eec103b96e30401c0b846045bf8a1c7159b6da Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 16 Nov 2015 11:08:45 +0100 Subject: treewide: Remove old email address There were still a number of references to my old Red Hat email address in the kernel source. Remove these while keeping the Red Hat copyright notices intact. Signed-off-by: Peter Zijlstra (Intel) Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Signed-off-by: Ingo Molnar --- arch/blackfin/kernel/perf_event.c | 2 +- arch/sh/kernel/perf_event.c | 2 +- arch/sparc/kernel/perf_event.c | 2 +- arch/tile/kernel/perf_event.c | 2 +- arch/x86/kernel/cpu/perf_event.c | 2 +- arch/x86/kernel/cpu/perf_event.h | 2 +- arch/x86/kernel/irq_work.c | 2 +- include/asm-generic/tlb.h | 2 +- include/linux/jump_label.h | 2 +- include/linux/lockdep.h | 2 +- include/linux/proportions.h | 2 +- include/linux/uprobes.h | 2 +- kernel/events/callchain.c | 2 +- kernel/events/core.c | 2 +- kernel/events/ring_buffer.c | 2 +- kernel/events/uprobes.c | 2 +- kernel/irq_work.c | 2 +- kernel/jump_label.c | 2 +- kernel/locking/lockdep.c | 2 +- kernel/locking/lockdep_proc.c | 2 +- kernel/sched/clock.c | 2 +- kernel/sched/fair.c | 2 +- kernel/trace/trace_event_perf.c | 2 +- lib/btree.c | 2 +- lib/proportions.c | 2 +- mm/page-writeback.c | 2 +- 26 files changed, 26 insertions(+), 26 deletions(-) (limited to 'include') diff --git a/arch/blackfin/kernel/perf_event.c b/arch/blackfin/kernel/perf_event.c index 1e9c8b0bf486..170d786807c4 100644 --- a/arch/blackfin/kernel/perf_event.c +++ b/arch/blackfin/kernel/perf_event.c @@ -14,7 +14,7 @@ * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar * Copyright (C) 2009 Jaswinder Singh Rajput * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter - * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra + * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra * Copyright (C) 2009 Intel Corporation, * * ppc: diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c index 7cfd7f153966..4dca18347ee9 100644 --- a/arch/sh/kernel/perf_event.c +++ b/arch/sh/kernel/perf_event.c @@ -10,7 +10,7 @@ * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar * Copyright (C) 2009 Jaswinder Singh Rajput * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter - * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra + * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra * Copyright (C) 2009 Intel Corporation, * * ppc: diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index b0da5aedb336..3091267c5cc3 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -9,7 +9,7 @@ * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar * Copyright (C) 2009 Jaswinder Singh Rajput * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter - * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra + * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra */ #include diff --git a/arch/tile/kernel/perf_event.c b/arch/tile/kernel/perf_event.c index bb509cee3b59..8767060d70fb 100644 --- a/arch/tile/kernel/perf_event.c +++ b/arch/tile/kernel/perf_event.c @@ -21,7 +21,7 @@ * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar * Copyright (C) 2009 Jaswinder Singh Rajput * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter - * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra + * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra * Copyright (C) 2009 Intel Corporation, * Copyright (C) 2009 Google, Inc., Stephane Eranian */ diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 4562cf070c27..2bf79d7c97df 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -5,7 +5,7 @@ * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar * Copyright (C) 2009 Jaswinder Singh Rajput * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter - * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra + * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra * Copyright (C) 2009 Intel Corporation, * Copyright (C) 2009 Google, Inc., Stephane Eranian * diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index ffa7a92025e1..ab18b8a91583 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h @@ -5,7 +5,7 @@ * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar * Copyright (C) 2009 Jaswinder Singh Rajput * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter - * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra + * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra * Copyright (C) 2009 Intel Corporation, * Copyright (C) 2009 Google, Inc., Stephane Eranian * diff --git a/arch/x86/kernel/irq_work.c b/arch/x86/kernel/irq_work.c index dc5fa6a1e8d6..3512ba607361 100644 --- a/arch/x86/kernel/irq_work.c +++ b/arch/x86/kernel/irq_work.c @@ -1,7 +1,7 @@ /* * x86 specific code for irq_work * - * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra + * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra */ #include diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index db284bff29dc..9dbb739cafa0 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -5,7 +5,7 @@ * Copyright 2001 Red Hat, Inc. * Based on code from mm/memory.c Copyright Linus Torvalds and others. * - * Copyright 2011 Red Hat, Inc., Peter Zijlstra + * Copyright 2011 Red Hat, Inc., Peter Zijlstra * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 8dde55974f18..0536524bb9eb 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -5,7 +5,7 @@ * Jump label support * * Copyright (C) 2009-2012 Jason Baron - * Copyright (C) 2011-2012 Peter Zijlstra + * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra * * DEPRECATED API: * diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 70400dc7660f..c57e424d914b 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -2,7 +2,7 @@ * Runtime locking correctness validator * * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar - * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra + * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra * * see Documentation/locking/lockdep-design.txt for more details. */ diff --git a/include/linux/proportions.h b/include/linux/proportions.h index 5440f64d2942..21221338ad18 100644 --- a/include/linux/proportions.h +++ b/include/linux/proportions.h @@ -1,7 +1,7 @@ /* * FLoating proportions * - * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra + * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra * * This file contains the public data structure and API definitions. */ diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h index 0bdc72f36905..4a29c75b146e 100644 --- a/include/linux/uprobes.h +++ b/include/linux/uprobes.h @@ -21,7 +21,7 @@ * Authors: * Srikar Dronamraju * Jim Keniston - * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra + * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra */ #include diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c index d659487254d5..9c418002b8c1 100644 --- a/kernel/events/callchain.c +++ b/kernel/events/callchain.c @@ -3,7 +3,7 @@ * * Copyright (C) 2008 Thomas Gleixner * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar - * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra + * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra * Copyright © 2009 Paul Mackerras, IBM Corp. * * For licensing details see kernel-base/COPYING diff --git a/kernel/events/core.c b/kernel/events/core.c index 1ac857aff7b0..5854fcf7f05a 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -3,7 +3,7 @@ * * Copyright (C) 2008 Thomas Gleixner * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar - * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra + * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra * Copyright © 2009 Paul Mackerras, IBM Corp. * * For licensing details see kernel-base/COPYING diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index b5d1ea79c595..adfdc0536117 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c @@ -3,7 +3,7 @@ * * Copyright (C) 2008 Thomas Gleixner * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar - * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra + * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra * Copyright © 2009 Paul Mackerras, IBM Corp. * * For licensing details see kernel-base/COPYING diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 4e5e9798aa0c..7dad84913abf 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -19,7 +19,7 @@ * Authors: * Srikar Dronamraju * Jim Keniston - * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra + * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra */ #include diff --git a/kernel/irq_work.c b/kernel/irq_work.c index cbf9fb899d92..bcf107ce0854 100644 --- a/kernel/irq_work.c +++ b/kernel/irq_work.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra + * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra * * Provides a framework for enqueueing and running callbacks from hardirq * context. The enqueueing is NMI-safe. diff --git a/kernel/jump_label.c b/kernel/jump_label.c index f7dd15d537f9..05254eeb4b4e 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -2,7 +2,7 @@ * jump label support * * Copyright (C) 2009 Jason Baron - * Copyright (C) 2011 Peter Zijlstra + * Copyright (C) 2011 Peter Zijlstra * */ #include diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index deae3907ac1e..60ace56618f6 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -6,7 +6,7 @@ * Started by Ingo Molnar: * * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar - * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra + * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra * * this code maps all the lock dependencies as they occur in a live kernel * and will warn about the following classes of locking bugs: diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c index d83d798bef95..dbb61a302548 100644 --- a/kernel/locking/lockdep_proc.c +++ b/kernel/locking/lockdep_proc.c @@ -6,7 +6,7 @@ * Started by Ingo Molnar: * * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar - * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra + * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra * * Code for /proc/lockdep and /proc/lockdep_stats: * diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c index c0a205101c23..caf4041f5b0a 100644 --- a/kernel/sched/clock.c +++ b/kernel/sched/clock.c @@ -1,7 +1,7 @@ /* * sched_clock for unstable cpu clocks * - * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra + * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra * * Updates and enhancements: * Copyright (C) 2008 Red Hat, Inc. Steven Rostedt diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index f04fda8f669c..90e26b11deaa 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -17,7 +17,7 @@ * Copyright (C) 2007, Thomas Gleixner * * Adaptive scheduling granularity, math enhancements by Peter Zijlstra - * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra + * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra */ #include diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index abfc903e741e..cc9f7a9319be 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c @@ -1,7 +1,7 @@ /* * trace event based perf event profiling/tracing * - * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra + * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra * Copyright (C) 2009-2010 Frederic Weisbecker */ diff --git a/lib/btree.c b/lib/btree.c index 4264871ea1a0..f93a945274af 100644 --- a/lib/btree.c +++ b/lib/btree.c @@ -5,7 +5,7 @@ * * Copyright (c) 2007-2008 Joern Engel * Bits and pieces stolen from Peter Zijlstra's code, which is - * Copyright 2007, Red Hat Inc. Peter Zijlstra + * Copyright 2007, Red Hat Inc. Peter Zijlstra * GPLv2 * * see http://programming.kicks-ass.net/kernel-patches/vma_lookup/btree.patch diff --git a/lib/proportions.c b/lib/proportions.c index 6f724298f67a..efa54f259ea9 100644 --- a/lib/proportions.c +++ b/lib/proportions.c @@ -1,7 +1,7 @@ /* * Floating proportions * - * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra + * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra * * Description: * diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 3e4d65445fa7..d15d88c8efa1 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2,7 +2,7 @@ * mm/page-writeback.c * * Copyright (C) 2002, Linus Torvalds. - * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra + * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra * * Contains functions related to writing back dirty pages at the * address_space level. -- cgit v1.2.3 From 6e8c9e33760d0d0f8450f5ac55d10b085e07e136 Mon Sep 17 00:00:00 2001 From: Werner Johansson Date: Fri, 30 Oct 2015 17:38:26 -0700 Subject: drm/dsi: Add Turn On/Shutdown Peripheral command helpers The MIPI_DSI_TURN_ON_PERIPHERAL and MIPI_DSI_SHUTDOWN_PERIPHERAL packets are required for some panels, for example the Panasonic VVX10F034N00. Signed-off-by: Werner Johansson Signed-off-by: Bjorn Andersson Signed-off-by: Thierry Reding --- drivers/gpu/drm/drm_mipi_dsi.c | 38 ++++++++++++++++++++++++++++++++++++++ include/drm/drm_mipi_dsi.h | 2 ++ 2 files changed, 40 insertions(+) (limited to 'include') diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c index 2d5ca8eec13a..6e6a9c58d404 100644 --- a/drivers/gpu/drm/drm_mipi_dsi.c +++ b/drivers/gpu/drm/drm_mipi_dsi.c @@ -365,6 +365,44 @@ int mipi_dsi_create_packet(struct mipi_dsi_packet *packet, } EXPORT_SYMBOL(mipi_dsi_create_packet); +/** + * mipi_dsi_shutdown_peripheral() - sends a Shutdown Peripheral command + * @dsi: DSI peripheral device + * + * Return: 0 on success or a negative error code on failure. + */ +int mipi_dsi_shutdown_peripheral(struct mipi_dsi_device *dsi) +{ + struct mipi_dsi_msg msg = { + .channel = dsi->channel, + .type = MIPI_DSI_SHUTDOWN_PERIPHERAL, + .tx_buf = (u8 [2]) { 0, 0 }, + .tx_len = 2, + }; + + return mipi_dsi_device_transfer(dsi, &msg); +} +EXPORT_SYMBOL(mipi_dsi_shutdown_peripheral); + +/** + * mipi_dsi_turn_on_peripheral() - sends a Turn On Peripheral command + * @dsi: DSI peripheral device + * + * Return: 0 on success or a negative error code on failure. + */ +int mipi_dsi_turn_on_peripheral(struct mipi_dsi_device *dsi) +{ + struct mipi_dsi_msg msg = { + .channel = dsi->channel, + .type = MIPI_DSI_TURN_ON_PERIPHERAL, + .tx_buf = (u8 [2]) { 0, 0 }, + .tx_len = 2, + }; + + return mipi_dsi_device_transfer(dsi, &msg); +} +EXPORT_SYMBOL(mipi_dsi_turn_on_peripheral); + /* * mipi_dsi_set_maximum_return_packet_size() - specify the maximum size of the * the payload in a long packet transmitted from the peripheral back to the diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h index 4396c9f22af5..1b3b1f8c8cdf 100644 --- a/include/drm/drm_mipi_dsi.h +++ b/include/drm/drm_mipi_dsi.h @@ -191,6 +191,8 @@ static inline int mipi_dsi_pixel_format_to_bpp(enum mipi_dsi_pixel_format fmt) struct mipi_dsi_device *of_find_mipi_dsi_device_by_node(struct device_node *np); int mipi_dsi_attach(struct mipi_dsi_device *dsi); int mipi_dsi_detach(struct mipi_dsi_device *dsi); +int mipi_dsi_shutdown_peripheral(struct mipi_dsi_device *dsi); +int mipi_dsi_turn_on_peripheral(struct mipi_dsi_device *dsi); int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi, u16 value); -- cgit v1.2.3 From 1f7dd3e5a6e4f093017fff12232572ee1aa4639b Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 3 Dec 2015 10:18:21 -0500 Subject: cgroup: fix handling of multi-destination migration from subtree_control enabling Consider the following v2 hierarchy. P0 (+memory) --- P1 (-memory) --- A \- B P0 has memory enabled in its subtree_control while P1 doesn't. If both A and B contain processes, they would belong to the memory css of P1. Now if memory is enabled on P1's subtree_control, memory csses should be created on both A and B and A's processes should be moved to the former and B's processes the latter. IOW, enabling controllers can cause atomic migrations into different csses. The core cgroup migration logic has been updated accordingly but the controller migration methods haven't and still assume that all tasks migrate to a single target css; furthermore, the methods were fed the css in which subtree_control was updated which is the parent of the target csses. pids controller depends on the migration methods to move charges and this made the controller attribute charges to the wrong csses often triggering the following warning by driving a counter negative. WARNING: CPU: 1 PID: 1 at kernel/cgroup_pids.c:97 pids_cancel.constprop.6+0x31/0x40() Modules linked in: CPU: 1 PID: 1 Comm: systemd Not tainted 4.4.0-rc1+ #29 ... ffffffff81f65382 ffff88007c043b90 ffffffff81551ffc 0000000000000000 ffff88007c043bc8 ffffffff810de202 ffff88007a752000 ffff88007a29ab00 ffff88007c043c80 ffff88007a1d8400 0000000000000001 ffff88007c043bd8 Call Trace: [] dump_stack+0x4e/0x82 [] warn_slowpath_common+0x82/0xc0 [] warn_slowpath_null+0x1a/0x20 [] pids_cancel.constprop.6+0x31/0x40 [] pids_can_attach+0x6d/0xf0 [] cgroup_taskset_migrate+0x6c/0x330 [] cgroup_migrate+0xf5/0x190 [] cgroup_attach_task+0x176/0x200 [] __cgroup_procs_write+0x2ad/0x460 [] cgroup_procs_write+0x14/0x20 [] cgroup_file_write+0x35/0x1c0 [] kernfs_fop_write+0x141/0x190 [] __vfs_write+0x28/0xe0 [] vfs_write+0xac/0x1a0 [] SyS_write+0x49/0xb0 [] entry_SYSCALL_64_fastpath+0x12/0x76 This patch fixes the bug by removing @css parameter from the three migration methods, ->can_attach, ->cancel_attach() and ->attach() and updating cgroup_taskset iteration helpers also return the destination css in addition to the task being migrated. All controllers are updated accordingly. * Controllers which don't care whether there are one or multiple target csses can be converted trivially. cpu, io, freezer, perf, netclassid and netprio fall in this category. * cpuset's current implementation assumes that there's single source and destination and thus doesn't support v2 hierarchy already. The only change made by this patchset is how that single destination css is obtained. * memory migration path already doesn't do anything on v2. How the single destination css is obtained is updated and the prep stage of mem_cgroup_can_attach() is reordered to accomodate the change. * pids is the only controller which was affected by this bug. It now correctly handles multi-destination migrations and no longer causes counter underflow from incorrect accounting. Signed-off-by: Tejun Heo Reported-and-tested-by: Daniel Wagner Cc: Aleksa Sarai --- block/blk-cgroup.c | 6 +++--- include/linux/cgroup-defs.h | 9 +++------ include/linux/cgroup.h | 33 +++++++++++++++++++++----------- kernel/cgroup.c | 43 +++++++++++++++++++++++++++++++++--------- kernel/cgroup_freezer.c | 6 +++--- kernel/cgroup_pids.c | 16 ++++++++-------- kernel/cpuset.c | 33 ++++++++++++++++++++------------ kernel/events/core.c | 6 +++--- kernel/sched/core.c | 12 ++++++------ mm/memcontrol.c | 45 ++++++++++++++++++++++---------------------- net/core/netclassid_cgroup.c | 11 ++++++----- net/core/netprio_cgroup.c | 9 +++++---- 12 files changed, 137 insertions(+), 92 deletions(-) (limited to 'include') diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 5bcdfc10c23a..5a37188b559f 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -1127,15 +1127,15 @@ void blkcg_exit_queue(struct request_queue *q) * of the main cic data structures. For now we allow a task to change * its cgroup only if it's the only owner of its ioc. */ -static int blkcg_can_attach(struct cgroup_subsys_state *css, - struct cgroup_taskset *tset) +static int blkcg_can_attach(struct cgroup_taskset *tset) { struct task_struct *task; + struct cgroup_subsys_state *dst_css; struct io_context *ioc; int ret = 0; /* task_lock() is needed to avoid races with exit_io_context() */ - cgroup_taskset_for_each(task, tset) { + cgroup_taskset_for_each(task, dst_css, tset) { task_lock(task); ioc = task->io_context; if (ioc && atomic_read(&ioc->nr_tasks) > 1) diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 869fd4a3d28e..06b77f9dd3f2 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -422,12 +422,9 @@ struct cgroup_subsys { void (*css_reset)(struct cgroup_subsys_state *css); void (*css_e_css_changed)(struct cgroup_subsys_state *css); - int (*can_attach)(struct cgroup_subsys_state *css, - struct cgroup_taskset *tset); - void (*cancel_attach)(struct cgroup_subsys_state *css, - struct cgroup_taskset *tset); - void (*attach)(struct cgroup_subsys_state *css, - struct cgroup_taskset *tset); + int (*can_attach)(struct cgroup_taskset *tset); + void (*cancel_attach)(struct cgroup_taskset *tset); + void (*attach)(struct cgroup_taskset *tset); int (*can_fork)(struct task_struct *task, void **priv_p); void (*cancel_fork)(struct task_struct *task, void *priv); void (*fork)(struct task_struct *task, void *priv); diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index f64083030ad5..cb91b44f5f78 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -120,8 +120,10 @@ struct cgroup_subsys_state *css_rightmost_descendant(struct cgroup_subsys_state struct cgroup_subsys_state *css_next_descendant_post(struct cgroup_subsys_state *pos, struct cgroup_subsys_state *css); -struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset); -struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset); +struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset, + struct cgroup_subsys_state **dst_cssp); +struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset, + struct cgroup_subsys_state **dst_cssp); void css_task_iter_start(struct cgroup_subsys_state *css, struct css_task_iter *it); @@ -236,30 +238,39 @@ void css_task_iter_end(struct css_task_iter *it); /** * cgroup_taskset_for_each - iterate cgroup_taskset * @task: the loop cursor + * @dst_css: the destination css * @tset: taskset to iterate * * @tset may contain multiple tasks and they may belong to multiple - * processes. When there are multiple tasks in @tset, if a task of a - * process is in @tset, all tasks of the process are in @tset. Also, all - * are guaranteed to share the same source and destination csses. + * processes. + * + * On the v2 hierarchy, there may be tasks from multiple processes and they + * may not share the source or destination csses. + * + * On traditional hierarchies, when there are multiple tasks in @tset, if a + * task of a process is in @tset, all tasks of the process are in @tset. + * Also, all are guaranteed to share the same source and destination csses. * * Iteration is not in any specific order. */ -#define cgroup_taskset_for_each(task, tset) \ - for ((task) = cgroup_taskset_first((tset)); (task); \ - (task) = cgroup_taskset_next((tset))) +#define cgroup_taskset_for_each(task, dst_css, tset) \ + for ((task) = cgroup_taskset_first((tset), &(dst_css)); \ + (task); \ + (task) = cgroup_taskset_next((tset), &(dst_css))) /** * cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset * @leader: the loop cursor + * @dst_css: the destination css * @tset: takset to iterate * * Iterate threadgroup leaders of @tset. For single-task migrations, @tset * may not contain any. */ -#define cgroup_taskset_for_each_leader(leader, tset) \ - for ((leader) = cgroup_taskset_first((tset)); (leader); \ - (leader) = cgroup_taskset_next((tset))) \ +#define cgroup_taskset_for_each_leader(leader, dst_css, tset) \ + for ((leader) = cgroup_taskset_first((tset), &(dst_css)); \ + (leader); \ + (leader) = cgroup_taskset_next((tset), &(dst_css))) \ if ((leader) != (leader)->group_leader) \ ; \ else diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 5cea63fe4095..470f6536b9e8 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -2237,6 +2237,9 @@ struct cgroup_taskset { struct list_head src_csets; struct list_head dst_csets; + /* the subsys currently being processed */ + int ssid; + /* * Fields for cgroup_taskset_*() iteration. * @@ -2299,25 +2302,29 @@ static void cgroup_taskset_add(struct task_struct *task, /** * cgroup_taskset_first - reset taskset and return the first task * @tset: taskset of interest + * @dst_cssp: output variable for the destination css * * @tset iteration is initialized and the first task is returned. */ -struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset) +struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset, + struct cgroup_subsys_state **dst_cssp) { tset->cur_cset = list_first_entry(tset->csets, struct css_set, mg_node); tset->cur_task = NULL; - return cgroup_taskset_next(tset); + return cgroup_taskset_next(tset, dst_cssp); } /** * cgroup_taskset_next - iterate to the next task in taskset * @tset: taskset of interest + * @dst_cssp: output variable for the destination css * * Return the next task in @tset. Iteration must have been initialized * with cgroup_taskset_first(). */ -struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset) +struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset, + struct cgroup_subsys_state **dst_cssp) { struct css_set *cset = tset->cur_cset; struct task_struct *task = tset->cur_task; @@ -2332,6 +2339,18 @@ struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset) if (&task->cg_list != &cset->mg_tasks) { tset->cur_cset = cset; tset->cur_task = task; + + /* + * This function may be called both before and + * after cgroup_taskset_migrate(). The two cases + * can be distinguished by looking at whether @cset + * has its ->mg_dst_cset set. + */ + if (cset->mg_dst_cset) + *dst_cssp = cset->mg_dst_cset->subsys[tset->ssid]; + else + *dst_cssp = cset->subsys[tset->ssid]; + return task; } @@ -2367,7 +2386,8 @@ static int cgroup_taskset_migrate(struct cgroup_taskset *tset, /* check that we can legitimately attach to the cgroup */ for_each_e_css(css, i, dst_cgrp) { if (css->ss->can_attach) { - ret = css->ss->can_attach(css, tset); + tset->ssid = i; + ret = css->ss->can_attach(tset); if (ret) { failed_css = css; goto out_cancel_attach; @@ -2400,9 +2420,12 @@ static int cgroup_taskset_migrate(struct cgroup_taskset *tset, */ tset->csets = &tset->dst_csets; - for_each_e_css(css, i, dst_cgrp) - if (css->ss->attach) - css->ss->attach(css, tset); + for_each_e_css(css, i, dst_cgrp) { + if (css->ss->attach) { + tset->ssid = i; + css->ss->attach(tset); + } + } ret = 0; goto out_release_tset; @@ -2411,8 +2434,10 @@ out_cancel_attach: for_each_e_css(css, i, dst_cgrp) { if (css == failed_css) break; - if (css->ss->cancel_attach) - css->ss->cancel_attach(css, tset); + if (css->ss->cancel_attach) { + tset->ssid = i; + css->ss->cancel_attach(tset); + } } out_release_tset: spin_lock_bh(&css_set_lock); diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c index ff02a8e51bb3..2d3df82c54f2 100644 --- a/kernel/cgroup_freezer.c +++ b/kernel/cgroup_freezer.c @@ -155,10 +155,10 @@ static void freezer_css_free(struct cgroup_subsys_state *css) * @freezer->lock. freezer_attach() makes the new tasks conform to the * current state and all following state changes can see the new tasks. */ -static void freezer_attach(struct cgroup_subsys_state *new_css, - struct cgroup_taskset *tset) +static void freezer_attach(struct cgroup_taskset *tset) { struct task_struct *task; + struct cgroup_subsys_state *new_css; mutex_lock(&freezer_mutex); @@ -172,7 +172,7 @@ static void freezer_attach(struct cgroup_subsys_state *new_css, * current state before executing the following - !frozen tasks may * be visible in a FROZEN cgroup and frozen tasks in a THAWED one. */ - cgroup_taskset_for_each(task, tset) { + cgroup_taskset_for_each(task, new_css, tset) { struct freezer *freezer = css_freezer(new_css); if (!(freezer->state & CGROUP_FREEZING)) { diff --git a/kernel/cgroup_pids.c b/kernel/cgroup_pids.c index de3359a48dbb..8e27fc5dbb20 100644 --- a/kernel/cgroup_pids.c +++ b/kernel/cgroup_pids.c @@ -162,13 +162,13 @@ revert: return -EAGAIN; } -static int pids_can_attach(struct cgroup_subsys_state *css, - struct cgroup_taskset *tset) +static int pids_can_attach(struct cgroup_taskset *tset) { - struct pids_cgroup *pids = css_pids(css); struct task_struct *task; + struct cgroup_subsys_state *dst_css; - cgroup_taskset_for_each(task, tset) { + cgroup_taskset_for_each(task, dst_css, tset) { + struct pids_cgroup *pids = css_pids(dst_css); struct cgroup_subsys_state *old_css; struct pids_cgroup *old_pids; @@ -187,13 +187,13 @@ static int pids_can_attach(struct cgroup_subsys_state *css, return 0; } -static void pids_cancel_attach(struct cgroup_subsys_state *css, - struct cgroup_taskset *tset) +static void pids_cancel_attach(struct cgroup_taskset *tset) { - struct pids_cgroup *pids = css_pids(css); struct task_struct *task; + struct cgroup_subsys_state *dst_css; - cgroup_taskset_for_each(task, tset) { + cgroup_taskset_for_each(task, dst_css, tset) { + struct pids_cgroup *pids = css_pids(dst_css); struct cgroup_subsys_state *old_css; struct pids_cgroup *old_pids; diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 10ae73611d80..02a8ea5c9963 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -1429,15 +1429,16 @@ static int fmeter_getrate(struct fmeter *fmp) static struct cpuset *cpuset_attach_old_cs; /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */ -static int cpuset_can_attach(struct cgroup_subsys_state *css, - struct cgroup_taskset *tset) +static int cpuset_can_attach(struct cgroup_taskset *tset) { - struct cpuset *cs = css_cs(css); + struct cgroup_subsys_state *css; + struct cpuset *cs; struct task_struct *task; int ret; /* used later by cpuset_attach() */ - cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset)); + cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css)); + cs = css_cs(css); mutex_lock(&cpuset_mutex); @@ -1447,7 +1448,7 @@ static int cpuset_can_attach(struct cgroup_subsys_state *css, (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))) goto out_unlock; - cgroup_taskset_for_each(task, tset) { + cgroup_taskset_for_each(task, css, tset) { ret = task_can_attach(task, cs->cpus_allowed); if (ret) goto out_unlock; @@ -1467,9 +1468,14 @@ out_unlock: return ret; } -static void cpuset_cancel_attach(struct cgroup_subsys_state *css, - struct cgroup_taskset *tset) +static void cpuset_cancel_attach(struct cgroup_taskset *tset) { + struct cgroup_subsys_state *css; + struct cpuset *cs; + + cgroup_taskset_first(tset, &css); + cs = css_cs(css); + mutex_lock(&cpuset_mutex); css_cs(css)->attach_in_progress--; mutex_unlock(&cpuset_mutex); @@ -1482,16 +1488,19 @@ static void cpuset_cancel_attach(struct cgroup_subsys_state *css, */ static cpumask_var_t cpus_attach; -static void cpuset_attach(struct cgroup_subsys_state *css, - struct cgroup_taskset *tset) +static void cpuset_attach(struct cgroup_taskset *tset) { /* static buf protected by cpuset_mutex */ static nodemask_t cpuset_attach_nodemask_to; struct task_struct *task; struct task_struct *leader; - struct cpuset *cs = css_cs(css); + struct cgroup_subsys_state *css; + struct cpuset *cs; struct cpuset *oldcs = cpuset_attach_old_cs; + cgroup_taskset_first(tset, &css); + cs = css_cs(css); + mutex_lock(&cpuset_mutex); /* prepare for attach */ @@ -1502,7 +1511,7 @@ static void cpuset_attach(struct cgroup_subsys_state *css, guarantee_online_mems(cs, &cpuset_attach_nodemask_to); - cgroup_taskset_for_each(task, tset) { + cgroup_taskset_for_each(task, css, tset) { /* * can_attach beforehand should guarantee that this doesn't * fail. TODO: have a better way to handle failure here @@ -1518,7 +1527,7 @@ static void cpuset_attach(struct cgroup_subsys_state *css, * sleep and should be moved outside migration path proper. */ cpuset_attach_nodemask_to = cs->effective_mems; - cgroup_taskset_for_each_leader(leader, tset) { + cgroup_taskset_for_each_leader(leader, css, tset) { struct mm_struct *mm = get_task_mm(leader); if (mm) { diff --git a/kernel/events/core.c b/kernel/events/core.c index 36babfd20648..026305dfe523 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -9456,12 +9456,12 @@ static int __perf_cgroup_move(void *info) return 0; } -static void perf_cgroup_attach(struct cgroup_subsys_state *css, - struct cgroup_taskset *tset) +static void perf_cgroup_attach(struct cgroup_taskset *tset) { struct task_struct *task; + struct cgroup_subsys_state *css; - cgroup_taskset_for_each(task, tset) + cgroup_taskset_for_each(task, css, tset) task_function_call(task, __perf_cgroup_move, task); } diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 4d568ac9319e..a9db4819e586 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -8217,12 +8217,12 @@ static void cpu_cgroup_fork(struct task_struct *task, void *private) sched_move_task(task); } -static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css, - struct cgroup_taskset *tset) +static int cpu_cgroup_can_attach(struct cgroup_taskset *tset) { struct task_struct *task; + struct cgroup_subsys_state *css; - cgroup_taskset_for_each(task, tset) { + cgroup_taskset_for_each(task, css, tset) { #ifdef CONFIG_RT_GROUP_SCHED if (!sched_rt_can_attach(css_tg(css), task)) return -EINVAL; @@ -8235,12 +8235,12 @@ static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css, return 0; } -static void cpu_cgroup_attach(struct cgroup_subsys_state *css, - struct cgroup_taskset *tset) +static void cpu_cgroup_attach(struct cgroup_taskset *tset) { struct task_struct *task; + struct cgroup_subsys_state *css; - cgroup_taskset_for_each(task, tset) + cgroup_taskset_for_each(task, css, tset) sched_move_task(task); } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 9acfb165eb52..c92a65b2b4ab 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -4779,23 +4779,18 @@ static void mem_cgroup_clear_mc(void) spin_unlock(&mc.lock); } -static int mem_cgroup_can_attach(struct cgroup_subsys_state *css, - struct cgroup_taskset *tset) +static int mem_cgroup_can_attach(struct cgroup_taskset *tset) { - struct mem_cgroup *memcg = mem_cgroup_from_css(css); + struct cgroup_subsys_state *css; + struct mem_cgroup *memcg; struct mem_cgroup *from; struct task_struct *leader, *p; struct mm_struct *mm; unsigned long move_flags; int ret = 0; - /* - * We are now commited to this value whatever it is. Changes in this - * tunable will only affect upcoming migrations, not the current one. - * So we need to save it, and keep it going. - */ - move_flags = READ_ONCE(memcg->move_charge_at_immigrate); - if (!move_flags) + /* charge immigration isn't supported on the default hierarchy */ + if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) return 0; /* @@ -4805,13 +4800,23 @@ static int mem_cgroup_can_attach(struct cgroup_subsys_state *css, * multiple. */ p = NULL; - cgroup_taskset_for_each_leader(leader, tset) { + cgroup_taskset_for_each_leader(leader, css, tset) { WARN_ON_ONCE(p); p = leader; + memcg = mem_cgroup_from_css(css); } if (!p) return 0; + /* + * We are now commited to this value whatever it is. Changes in this + * tunable will only affect upcoming migrations, not the current one. + * So we need to save it, and keep it going. + */ + move_flags = READ_ONCE(memcg->move_charge_at_immigrate); + if (!move_flags) + return 0; + from = mem_cgroup_from_task(p); VM_BUG_ON(from == memcg); @@ -4842,8 +4847,7 @@ static int mem_cgroup_can_attach(struct cgroup_subsys_state *css, return ret; } -static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css, - struct cgroup_taskset *tset) +static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) { if (mc.to) mem_cgroup_clear_mc(); @@ -4985,10 +4989,10 @@ retry: atomic_dec(&mc.from->moving_account); } -static void mem_cgroup_move_task(struct cgroup_subsys_state *css, - struct cgroup_taskset *tset) +static void mem_cgroup_move_task(struct cgroup_taskset *tset) { - struct task_struct *p = cgroup_taskset_first(tset); + struct cgroup_subsys_state *css; + struct task_struct *p = cgroup_taskset_first(tset, &css); struct mm_struct *mm = get_task_mm(p); if (mm) { @@ -5000,17 +5004,14 @@ static void mem_cgroup_move_task(struct cgroup_subsys_state *css, mem_cgroup_clear_mc(); } #else /* !CONFIG_MMU */ -static int mem_cgroup_can_attach(struct cgroup_subsys_state *css, - struct cgroup_taskset *tset) +static int mem_cgroup_can_attach(struct cgroup_taskset *tset) { return 0; } -static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css, - struct cgroup_taskset *tset) +static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) { } -static void mem_cgroup_move_task(struct cgroup_subsys_state *css, - struct cgroup_taskset *tset) +static void mem_cgroup_move_task(struct cgroup_taskset *tset) { } #endif diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c index 6441f47b1a8f..81cb3c72efe8 100644 --- a/net/core/netclassid_cgroup.c +++ b/net/core/netclassid_cgroup.c @@ -67,14 +67,15 @@ static int update_classid(const void *v, struct file *file, unsigned n) return 0; } -static void cgrp_attach(struct cgroup_subsys_state *css, - struct cgroup_taskset *tset) +static void cgrp_attach(struct cgroup_taskset *tset) { - struct cgroup_cls_state *cs = css_cls_state(css); - void *v = (void *)(unsigned long)cs->classid; struct task_struct *p; + struct cgroup_subsys_state *css; + + cgroup_taskset_for_each(p, css, tset) { + struct cgroup_cls_state *cs = css_cls_state(css); + void *v = (void *)(unsigned long)cs->classid; - cgroup_taskset_for_each(p, tset) { task_lock(p); iterate_fd(p->files, 0, update_classid, v); task_unlock(p); diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c index cbd0a199bf52..40fd09fe06ae 100644 --- a/net/core/netprio_cgroup.c +++ b/net/core/netprio_cgroup.c @@ -218,13 +218,14 @@ static int update_netprio(const void *v, struct file *file, unsigned n) return 0; } -static void net_prio_attach(struct cgroup_subsys_state *css, - struct cgroup_taskset *tset) +static void net_prio_attach(struct cgroup_taskset *tset) { struct task_struct *p; - void *v = (void *)(unsigned long)css->cgroup->id; + struct cgroup_subsys_state *css; + + cgroup_taskset_for_each(p, css, tset) { + void *v = (void *)(unsigned long)css->cgroup->id; - cgroup_taskset_for_each(p, tset) { task_lock(p); iterate_fd(p->files, 0, update_netprio, v); task_unlock(p); -- cgit v1.2.3 From ae5515d66362b9d96cdcfce504567f0b8b7bd83e Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Fri, 4 Dec 2015 08:38:42 -0700 Subject: Revert: "vfio: Include No-IOMMU mode" Revert commit 033291eccbdb ("vfio: Include No-IOMMU mode") due to lack of a user. This was originally intended to fill a need for the DPDK driver, but uptake has been slow so rather than support an unproven kernel interface revert it and revisit when userspace catches up. Signed-off-by: Alex Williamson --- drivers/vfio/Kconfig | 15 ---- drivers/vfio/pci/vfio_pci.c | 8 +- drivers/vfio/vfio.c | 186 ++------------------------------------------ include/linux/vfio.h | 3 - include/uapi/linux/vfio.h | 7 -- 5 files changed, 10 insertions(+), 209 deletions(-) (limited to 'include') diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig index da6e2ce77495..850d86ca685b 100644 --- a/drivers/vfio/Kconfig +++ b/drivers/vfio/Kconfig @@ -31,21 +31,6 @@ menuconfig VFIO If you don't know what to do here, say N. -menuconfig VFIO_NOIOMMU - bool "VFIO No-IOMMU support" - depends on VFIO - help - VFIO is built on the ability to isolate devices using the IOMMU. - Only with an IOMMU can userspace access to DMA capable devices be - considered secure. VFIO No-IOMMU mode enables IOMMU groups for - devices without IOMMU backing for the purpose of re-using the VFIO - infrastructure in a non-secure mode. Use of this mode will result - in an unsupportable kernel and will therefore taint the kernel. - Device assignment to virtual machines is also not possible with - this mode since there is no IOMMU to provide DMA translation. - - If you don't know what to do here, say N. - source "drivers/vfio/pci/Kconfig" source "drivers/vfio/platform/Kconfig" source "virt/lib/Kconfig" diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index 2760a7ba3f30..56bf6dbb93db 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -940,13 +940,13 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL) return -EINVAL; - group = vfio_iommu_group_get(&pdev->dev); + group = iommu_group_get(&pdev->dev); if (!group) return -EINVAL; vdev = kzalloc(sizeof(*vdev), GFP_KERNEL); if (!vdev) { - vfio_iommu_group_put(group, &pdev->dev); + iommu_group_put(group); return -ENOMEM; } @@ -957,7 +957,7 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev); if (ret) { - vfio_iommu_group_put(group, &pdev->dev); + iommu_group_put(group); kfree(vdev); return ret; } @@ -993,7 +993,7 @@ static void vfio_pci_remove(struct pci_dev *pdev) if (!vdev) return; - vfio_iommu_group_put(pdev->dev.iommu_group, &pdev->dev); + iommu_group_put(pdev->dev.iommu_group); kfree(vdev); if (vfio_pci_is_vga(pdev)) { diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index 9da0703e09d0..6070b793cbcb 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c @@ -62,7 +62,6 @@ struct vfio_container { struct rw_semaphore group_lock; struct vfio_iommu_driver *iommu_driver; void *iommu_data; - bool noiommu; }; struct vfio_unbound_dev { @@ -85,7 +84,6 @@ struct vfio_group { struct list_head unbound_list; struct mutex unbound_lock; atomic_t opened; - bool noiommu; }; struct vfio_device { @@ -97,147 +95,6 @@ struct vfio_device { void *device_data; }; -#ifdef CONFIG_VFIO_NOIOMMU -static bool noiommu __read_mostly; -module_param_named(enable_unsafe_noiommu_support, - noiommu, bool, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(enable_unsafe_noiommu_mode, "Enable UNSAFE, no-IOMMU mode. This mode provides no device isolation, no DMA translation, no host kernel protection, cannot be used for device assignment to virtual machines, requires RAWIO permissions, and will taint the kernel. If you do not know what this is for, step away. (default: false)"); -#endif - -/* - * vfio_iommu_group_{get,put} are only intended for VFIO bus driver probe - * and remove functions, any use cases other than acquiring the first - * reference for the purpose of calling vfio_add_group_dev() or removing - * that symmetric reference after vfio_del_group_dev() should use the raw - * iommu_group_{get,put} functions. In particular, vfio_iommu_group_put() - * removes the device from the dummy group and cannot be nested. - */ -struct iommu_group *vfio_iommu_group_get(struct device *dev) -{ - struct iommu_group *group; - int __maybe_unused ret; - - group = iommu_group_get(dev); - -#ifdef CONFIG_VFIO_NOIOMMU - /* - * With noiommu enabled, an IOMMU group will be created for a device - * that doesn't already have one and doesn't have an iommu_ops on their - * bus. We use iommu_present() again in the main code to detect these - * fake groups. - */ - if (group || !noiommu || iommu_present(dev->bus)) - return group; - - group = iommu_group_alloc(); - if (IS_ERR(group)) - return NULL; - - iommu_group_set_name(group, "vfio-noiommu"); - ret = iommu_group_add_device(group, dev); - iommu_group_put(group); - if (ret) - return NULL; - - /* - * Where to taint? At this point we've added an IOMMU group for a - * device that is not backed by iommu_ops, therefore any iommu_ - * callback using iommu_ops can legitimately Oops. So, while we may - * be about to give a DMA capable device to a user without IOMMU - * protection, which is clearly taint-worthy, let's go ahead and do - * it here. - */ - add_taint(TAINT_USER, LOCKDEP_STILL_OK); - dev_warn(dev, "Adding kernel taint for vfio-noiommu group on device\n"); -#endif - - return group; -} -EXPORT_SYMBOL_GPL(vfio_iommu_group_get); - -void vfio_iommu_group_put(struct iommu_group *group, struct device *dev) -{ -#ifdef CONFIG_VFIO_NOIOMMU - if (!iommu_present(dev->bus)) - iommu_group_remove_device(dev); -#endif - - iommu_group_put(group); -} -EXPORT_SYMBOL_GPL(vfio_iommu_group_put); - -#ifdef CONFIG_VFIO_NOIOMMU -static void *vfio_noiommu_open(unsigned long arg) -{ - if (arg != VFIO_NOIOMMU_IOMMU) - return ERR_PTR(-EINVAL); - if (!capable(CAP_SYS_RAWIO)) - return ERR_PTR(-EPERM); - - return NULL; -} - -static void vfio_noiommu_release(void *iommu_data) -{ -} - -static long vfio_noiommu_ioctl(void *iommu_data, - unsigned int cmd, unsigned long arg) -{ - if (cmd == VFIO_CHECK_EXTENSION) - return arg == VFIO_NOIOMMU_IOMMU ? 1 : 0; - - return -ENOTTY; -} - -static int vfio_iommu_present(struct device *dev, void *unused) -{ - return iommu_present(dev->bus) ? 1 : 0; -} - -static int vfio_noiommu_attach_group(void *iommu_data, - struct iommu_group *iommu_group) -{ - return iommu_group_for_each_dev(iommu_group, NULL, - vfio_iommu_present) ? -EINVAL : 0; -} - -static void vfio_noiommu_detach_group(void *iommu_data, - struct iommu_group *iommu_group) -{ -} - -static struct vfio_iommu_driver_ops vfio_noiommu_ops = { - .name = "vfio-noiommu", - .owner = THIS_MODULE, - .open = vfio_noiommu_open, - .release = vfio_noiommu_release, - .ioctl = vfio_noiommu_ioctl, - .attach_group = vfio_noiommu_attach_group, - .detach_group = vfio_noiommu_detach_group, -}; - -static struct vfio_iommu_driver vfio_noiommu_driver = { - .ops = &vfio_noiommu_ops, -}; - -/* - * Wrap IOMMU drivers, the noiommu driver is the one and only driver for - * noiommu groups (and thus containers) and not available for normal groups. - */ -#define vfio_for_each_iommu_driver(con, pos) \ - for (pos = con->noiommu ? &vfio_noiommu_driver : \ - list_first_entry(&vfio.iommu_drivers_list, \ - struct vfio_iommu_driver, vfio_next); \ - (con->noiommu ? pos != NULL : \ - &pos->vfio_next != &vfio.iommu_drivers_list); \ - pos = con->noiommu ? NULL : list_next_entry(pos, vfio_next)) -#else -#define vfio_for_each_iommu_driver(con, pos) \ - list_for_each_entry(pos, &vfio.iommu_drivers_list, vfio_next) -#endif - - /** * IOMMU driver registration */ @@ -342,8 +199,7 @@ static void vfio_group_unlock_and_free(struct vfio_group *group) /** * Group objects - create, release, get, put, search */ -static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group, - bool noiommu) +static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group) { struct vfio_group *group, *tmp; struct device *dev; @@ -361,7 +217,6 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group, atomic_set(&group->container_users, 0); atomic_set(&group->opened, 0); group->iommu_group = iommu_group; - group->noiommu = noiommu; group->nb.notifier_call = vfio_iommu_group_notifier; @@ -397,8 +252,7 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group, dev = device_create(vfio.class, NULL, MKDEV(MAJOR(vfio.group_devt), minor), - group, "%s%d", noiommu ? "noiommu-" : "", - iommu_group_id(iommu_group)); + group, "%d", iommu_group_id(iommu_group)); if (IS_ERR(dev)) { vfio_free_group_minor(minor); vfio_group_unlock_and_free(group); @@ -786,8 +640,7 @@ int vfio_add_group_dev(struct device *dev, group = vfio_group_get_from_iommu(iommu_group); if (!group) { - group = vfio_create_group(iommu_group, - !iommu_present(dev->bus)); + group = vfio_create_group(iommu_group); if (IS_ERR(group)) { iommu_group_put(iommu_group); return PTR_ERR(group); @@ -999,7 +852,8 @@ static long vfio_ioctl_check_extension(struct vfio_container *container, */ if (!driver) { mutex_lock(&vfio.iommu_drivers_lock); - vfio_for_each_iommu_driver(container, driver) { + list_for_each_entry(driver, &vfio.iommu_drivers_list, + vfio_next) { if (!try_module_get(driver->ops->owner)) continue; @@ -1068,7 +922,7 @@ static long vfio_ioctl_set_iommu(struct vfio_container *container, } mutex_lock(&vfio.iommu_drivers_lock); - vfio_for_each_iommu_driver(container, driver) { + list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) { void *data; if (!try_module_get(driver->ops->owner)) @@ -1333,9 +1187,6 @@ static int vfio_group_set_container(struct vfio_group *group, int container_fd) if (atomic_read(&group->container_users)) return -EINVAL; - if (group->noiommu && !capable(CAP_SYS_RAWIO)) - return -EPERM; - f = fdget(container_fd); if (!f.file) return -EBADF; @@ -1351,13 +1202,6 @@ static int vfio_group_set_container(struct vfio_group *group, int container_fd) down_write(&container->group_lock); - /* Real groups and fake groups cannot mix */ - if (!list_empty(&container->group_list) && - container->noiommu != group->noiommu) { - ret = -EPERM; - goto unlock_out; - } - driver = container->iommu_driver; if (driver) { ret = driver->ops->attach_group(container->iommu_data, @@ -1367,7 +1211,6 @@ static int vfio_group_set_container(struct vfio_group *group, int container_fd) } group->container = container; - container->noiommu = group->noiommu; list_add(&group->container_next, &container->group_list); /* Get a reference on the container and mark a user within the group */ @@ -1398,9 +1241,6 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf) !group->container->iommu_driver || !vfio_group_viable(group)) return -EINVAL; - if (group->noiommu && !capable(CAP_SYS_RAWIO)) - return -EPERM; - device = vfio_device_get_from_name(group, buf); if (!device) return -ENODEV; @@ -1443,10 +1283,6 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf) fd_install(ret, filep); - if (group->noiommu) - dev_warn(device->dev, "vfio-noiommu device opened by user " - "(%s:%d)\n", current->comm, task_pid_nr(current)); - return ret; } @@ -1535,11 +1371,6 @@ static int vfio_group_fops_open(struct inode *inode, struct file *filep) if (!group) return -ENODEV; - if (group->noiommu && !capable(CAP_SYS_RAWIO)) { - vfio_group_put(group); - return -EPERM; - } - /* Do we need multiple instances of the group open? Seems not. */ opened = atomic_cmpxchg(&group->opened, 0, 1); if (opened) { @@ -1702,11 +1533,6 @@ struct vfio_group *vfio_group_get_external_user(struct file *filep) if (!atomic_inc_not_zero(&group->container_users)) return ERR_PTR(-EINVAL); - if (group->noiommu) { - atomic_dec(&group->container_users); - return ERR_PTR(-EPERM); - } - if (!group->container->iommu_driver || !vfio_group_viable(group)) { atomic_dec(&group->container_users); diff --git a/include/linux/vfio.h b/include/linux/vfio.h index 610a86a892b8..ddb440975382 100644 --- a/include/linux/vfio.h +++ b/include/linux/vfio.h @@ -44,9 +44,6 @@ struct vfio_device_ops { void (*request)(void *device_data, unsigned int count); }; -extern struct iommu_group *vfio_iommu_group_get(struct device *dev); -extern void vfio_iommu_group_put(struct iommu_group *group, struct device *dev); - extern int vfio_add_group_dev(struct device *dev, const struct vfio_device_ops *ops, void *device_data); diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h index 751b69f858c8..9fd7b5d8df2f 100644 --- a/include/uapi/linux/vfio.h +++ b/include/uapi/linux/vfio.h @@ -38,13 +38,6 @@ #define VFIO_SPAPR_TCE_v2_IOMMU 7 -/* - * The No-IOMMU IOMMU offers no translation or isolation for devices and - * supports no ioctls outside of VFIO_CHECK_EXTENSION. Use of VFIO's No-IOMMU - * code will taint the host kernel and should be used with extreme caution. - */ -#define VFIO_NOIOMMU_IOMMU 8 - /* * The IOCTL interface is designed for extensibility by embedding the * structure length (argsz) and flags into structures passed between -- cgit v1.2.3 From 3cf92222a39cc7842c373dd90a0c204fa7d7cced Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 3 Dec 2015 20:41:29 +0800 Subject: rhashtable: Prevent spurious EBUSY errors on insertion Thomas and Phil observed that under stress rhashtable insertion sometimes failed with EBUSY, even though this error should only ever been seen when we're under attack and our hash chain length has grown to an unacceptable level, even after a rehash. It turns out that the logic for detecting whether there is an existing rehash is faulty. In particular, when two threads both try to grow the same table at the same time, one of them may see the newly grown table and thus erroneously conclude that it had been rehashed. This is what leads to the EBUSY error. This patch fixes this by remembering the current last table we used during insertion so that rhashtable_insert_rehash can detect when another thread has also done a resize/rehash. When this is detected we will give up our resize/rehash and simply retry the insertion with the new table. Reported-by: Thomas Graf Reported-by: Phil Sutter Signed-off-by: Herbert Xu Tested-by: Phil Sutter Signed-off-by: David S. Miller --- include/linux/rhashtable.h | 18 +++++++++++------- lib/rhashtable.c | 45 ++++++++++++++++++++++++++++++--------------- 2 files changed, 41 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 843ceca9a21e..e50b31d18462 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -19,6 +19,7 @@ #include #include +#include #include #include #include @@ -339,10 +340,11 @@ static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, int rhashtable_init(struct rhashtable *ht, const struct rhashtable_params *params); -int rhashtable_insert_slow(struct rhashtable *ht, const void *key, - struct rhash_head *obj, - struct bucket_table *old_tbl); -int rhashtable_insert_rehash(struct rhashtable *ht); +struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht, + const void *key, + struct rhash_head *obj, + struct bucket_table *old_tbl); +int rhashtable_insert_rehash(struct rhashtable *ht, struct bucket_table *tbl); int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter); void rhashtable_walk_exit(struct rhashtable_iter *iter); @@ -598,9 +600,11 @@ restart: new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); if (unlikely(new_tbl)) { - err = rhashtable_insert_slow(ht, key, obj, new_tbl); - if (err == -EAGAIN) + tbl = rhashtable_insert_slow(ht, key, obj, new_tbl); + if (!IS_ERR_OR_NULL(tbl)) goto slow_path; + + err = PTR_ERR(tbl); goto out; } @@ -611,7 +615,7 @@ restart: if (unlikely(rht_grow_above_100(ht, tbl))) { slow_path: spin_unlock_bh(lock); - err = rhashtable_insert_rehash(ht); + err = rhashtable_insert_rehash(ht, tbl); rcu_read_unlock(); if (err) return err; diff --git a/lib/rhashtable.c b/lib/rhashtable.c index a54ff8949f91..2ff7ed91663a 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -389,33 +389,31 @@ static bool rhashtable_check_elasticity(struct rhashtable *ht, return false; } -int rhashtable_insert_rehash(struct rhashtable *ht) +int rhashtable_insert_rehash(struct rhashtable *ht, + struct bucket_table *tbl) { struct bucket_table *old_tbl; struct bucket_table *new_tbl; - struct bucket_table *tbl; unsigned int size; int err; old_tbl = rht_dereference_rcu(ht->tbl, ht); - tbl = rhashtable_last_table(ht, old_tbl); size = tbl->size; + err = -EBUSY; + if (rht_grow_above_75(ht, tbl)) size *= 2; /* Do not schedule more than one rehash */ else if (old_tbl != tbl) - return -EBUSY; + goto fail; + + err = -ENOMEM; new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC); - if (new_tbl == NULL) { - /* Schedule async resize/rehash to try allocation - * non-atomic context. - */ - schedule_work(&ht->run_work); - return -ENOMEM; - } + if (new_tbl == NULL) + goto fail; err = rhashtable_rehash_attach(ht, tbl, new_tbl); if (err) { @@ -426,12 +424,24 @@ int rhashtable_insert_rehash(struct rhashtable *ht) schedule_work(&ht->run_work); return err; + +fail: + /* Do not fail the insert if someone else did a rehash. */ + if (likely(rcu_dereference_raw(tbl->future_tbl))) + return 0; + + /* Schedule async rehash to retry allocation in process context. */ + if (err == -ENOMEM) + schedule_work(&ht->run_work); + + return err; } EXPORT_SYMBOL_GPL(rhashtable_insert_rehash); -int rhashtable_insert_slow(struct rhashtable *ht, const void *key, - struct rhash_head *obj, - struct bucket_table *tbl) +struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht, + const void *key, + struct rhash_head *obj, + struct bucket_table *tbl) { struct rhash_head *head; unsigned int hash; @@ -467,7 +477,12 @@ int rhashtable_insert_slow(struct rhashtable *ht, const void *key, exit: spin_unlock(rht_bucket_lock(tbl, hash)); - return err; + if (err == 0) + return NULL; + else if (err == -EAGAIN) + return tbl; + else + return ERR_PTR(err); } EXPORT_SYMBOL_GPL(rhashtable_insert_slow); -- cgit v1.2.3 From c5fb8caaf91ea6a92920cf24db10cfc94d58de0f Mon Sep 17 00:00:00 2001 From: Jiri Benc Date: Fri, 4 Dec 2015 13:54:03 +0100 Subject: vxlan: fix incorrect RCO bit in VXLAN header Commit 3511494ce2f3d ("vxlan: Group Policy extension") changed definition of VXLAN_HF_RCO from 0x00200000 to BIT(24). This is obviously incorrect. It's also in violation with the RFC draft. Fixes: 3511494ce2f3d ("vxlan: Group Policy extension") Cc: Thomas Graf Cc: Tom Herbert Signed-off-by: Jiri Benc Acked-by: Tom Herbert Signed-off-by: David S. Miller --- include/net/vxlan.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/vxlan.h b/include/net/vxlan.h index c1c899c3a51b..e289ada6adf6 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -79,7 +79,7 @@ struct vxlanhdr { }; /* VXLAN header flags. */ -#define VXLAN_HF_RCO BIT(24) +#define VXLAN_HF_RCO BIT(21) #define VXLAN_HF_VNI BIT(27) #define VXLAN_HF_GBP BIT(31) -- cgit v1.2.3 From 01ce63c90170283a9855d1db4fe81934dddce648 Mon Sep 17 00:00:00 2001 From: Marcelo Ricardo Leitner Date: Fri, 4 Dec 2015 15:14:04 -0200 Subject: sctp: update the netstamp_needed counter when copying sockets Dmitry Vyukov reported that SCTP was triggering a WARN on socket destroy related to disabling sock timestamp. When SCTP accepts an association or peel one off, it copies sock flags but forgot to call net_enable_timestamp() if a packet timestamping flag was copied, leading to extra calls to net_disable_timestamp() whenever such clones were closed. The fix is to call net_enable_timestamp() whenever we copy a sock with that flag on, like tcp does. Reported-by: Dmitry Vyukov Signed-off-by: Marcelo Ricardo Leitner Acked-by: Vlad Yasevich Signed-off-by: David S. Miller --- include/net/sock.h | 2 ++ net/core/sock.c | 2 -- net/sctp/socket.c | 3 +++ 3 files changed, 5 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/net/sock.h b/include/net/sock.h index 52d27ee924f4..b1d475b5db68 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -740,6 +740,8 @@ enum sock_flags { SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */ }; +#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)) + static inline void sock_copy_flags(struct sock *nsk, struct sock *osk) { nsk->sk_flags = osk->sk_flags; diff --git a/net/core/sock.c b/net/core/sock.c index e31dfcee1729..d01c8f42dbb2 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -433,8 +433,6 @@ static bool sock_needs_netstamp(const struct sock *sk) } } -#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)) - static void sock_disable_timestamp(struct sock *sk, unsigned long flags) { if (sk->sk_flags & flags) { diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 03c8256063ec..4c9282bdd067 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -7199,6 +7199,9 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk, newinet->mc_ttl = 1; newinet->mc_index = 0; newinet->mc_list = NULL; + + if (newsk->sk_flags & SK_FLAGS_TIMESTAMP) + net_enable_timestamp(); } static inline void sctp_copy_descendant(struct sock *sk_to, -- cgit v1.2.3 From 8a0d19c5ed417c78d03f4e0fa7215e58c40896d8 Mon Sep 17 00:00:00 2001 From: lucien Date: Sat, 5 Dec 2015 15:35:36 +0800 Subject: sctp: start t5 timer only when peer rwnd is 0 and local state is SHUTDOWN_PENDING when A sends a data to B, then A close() and enter into SHUTDOWN_PENDING state, if B neither claim his rwnd is 0 nor send SACK for this data, A will keep retransmitting this data until t5 timeout, Max.Retrans times can't work anymore, which is bad. if B's rwnd is not 0, it should send abort after Max.Retrans times, only when B's rwnd == 0 and A's retransmitting beyonds Max.Retrans times, A will start t5 timer, which is also commit f8d960524328 ("sctp: Enforce retransmission limit during shutdown") means, but it lacks the condition peer rwnd == 0. so fix it by adding a bit (zero_window_announced) in peer to record if the last rwnd is 0. If it was, zero_window_announced will be set. and use this bit to decide if start t5 timer when local.state is SHUTDOWN_PENDING. Fixes: commit f8d960524328 ("sctp: Enforce retransmission limit during shutdown") Signed-off-by: Xin Long Signed-off-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller --- include/net/sctp/structs.h | 3 ++- net/sctp/outqueue.c | 1 + net/sctp/sm_statefuns.c | 3 ++- 3 files changed, 5 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 7bbb71081aeb..eea9bdeecba2 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -1493,7 +1493,8 @@ struct sctp_association { * : SACK's are not delayed (see Section 6). */ __u8 sack_needed:1, /* Do we need to sack the peer? */ - sack_generation:1; + sack_generation:1, + zero_window_announced:1; __u32 sack_cnt; __u32 adaptation_ind; /* Adaptation Code point. */ diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 0b3d8189f140..c0380cfb16ae 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -1252,6 +1252,7 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk) */ sack_a_rwnd = ntohl(sack->a_rwnd); + asoc->peer.zero_window_announced = !sack_a_rwnd; outstanding = q->outstanding_bytes; if (outstanding < sack_a_rwnd) diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 6f46aa16cb76..cd34a4a34065 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -5412,7 +5412,8 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(struct net *net, SCTP_INC_STATS(net, SCTP_MIB_T3_RTX_EXPIREDS); if (asoc->overall_error_count >= asoc->max_retrans) { - if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING) { + if (asoc->peer.zero_window_announced && + asoc->state == SCTP_STATE_SHUTDOWN_PENDING) { /* * We are here likely because the receiver had its rwnd * closed for a while and we have not been able to -- cgit v1.2.3 From 326fcfa5acca446b3f71e99f6d19881145556e5c Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Sat, 5 Dec 2015 13:58:11 +0100 Subject: net: remove unnecessary semicolon in netdev_alloc_pcpu_stats() This semicolon causes a build error if the function call is wrapped in parentheses. Fixes: aabc92bbe3cf ("net: add __netdev_alloc_pcpu_stats() to indicate gfp flags") Reported-by: Imre Kaloz Signed-off-by: Felix Fietkau Acked-by: Pablo Neira Ayuso Signed-off-by: David S. Miller --- include/linux/netdevice.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 3b5d134e945a..3143c847bddb 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2084,7 +2084,7 @@ struct pcpu_sw_netstats { }) #define netdev_alloc_pcpu_stats(type) \ - __netdev_alloc_pcpu_stats(type, GFP_KERNEL); + __netdev_alloc_pcpu_stats(type, GFP_KERNEL) #include -- cgit v1.2.3 From 7c23b7c1996597dd9d60bb282fb5fa1be6ebd18b Mon Sep 17 00:00:00 2001 From: "Lu, Han" Date: Mon, 7 Dec 2015 15:59:13 +0800 Subject: ALSA: hda - Fix playback noise with 24/32 bit sample size on BXT In BXT-P A0, HD-Audio DMA requests is later than expected, and makes an audio stream sensitive to system latencies when 24/32 bits are playing. Adjusting threshold of DMA fifo to force the DMA request sooner to improve latency tolerance at the expense of power. v2: move Intel specific code to hda_intel.c Signed-off-by: Lu, Han Signed-off-by: Takashi Iwai --- include/sound/hda_register.h | 3 +++ sound/pci/hda/hda_intel.c | 23 +++++++++++++++++++++++ 2 files changed, 26 insertions(+) (limited to 'include') diff --git a/include/sound/hda_register.h b/include/sound/hda_register.h index 2ae8812d7b1a..94dc6a9772e0 100644 --- a/include/sound/hda_register.h +++ b/include/sound/hda_register.h @@ -93,6 +93,9 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 }; #define AZX_REG_HSW_EM4 0x100c #define AZX_REG_HSW_EM5 0x1010 +/* Skylake/Broxton display HD-A controller Extended Mode registers */ +#define AZX_REG_SKL_EM4L 0x1040 + /* PCI space */ #define AZX_PCIREG_TCSEL 0x44 diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 963f82430938..bff5c8b329d1 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -355,6 +355,8 @@ enum { ((pci)->device == 0x0d0c) || \ ((pci)->device == 0x160c)) +#define IS_BROXTON(pci) ((pci)->device == 0x5a98) + static char *driver_short_names[] = { [AZX_DRIVER_ICH] = "HDA Intel", [AZX_DRIVER_PCH] = "HDA Intel PCH", @@ -506,15 +508,36 @@ static void azx_init_pci(struct azx *chip) } } +/* + * In BXT-P A0, HD-Audio DMA requests is later than expected, + * and makes an audio stream sensitive to system latencies when + * 24/32 bits are playing. + * Adjusting threshold of DMA fifo to force the DMA request + * sooner to improve latency tolerance at the expense of power. + */ +static void bxt_reduce_dma_latency(struct azx *chip) +{ + u32 val; + + val = azx_readl(chip, SKL_EM4L); + val &= (0x3 << 20); + azx_writel(chip, SKL_EM4L, val); +} + static void hda_intel_init_chip(struct azx *chip, bool full_reset) { struct hdac_bus *bus = azx_bus(chip); + struct pci_dev *pci = chip->pci; if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) snd_hdac_set_codec_wakeup(bus, true); azx_init_chip(chip, full_reset); if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) snd_hdac_set_codec_wakeup(bus, false); + + /* reduce dma latency to avoid noise */ + if (IS_BROXTON(pci)) + bxt_reduce_dma_latency(chip); } /* calculate runtime delay from LPIB */ -- cgit v1.2.3 From ea013a9b205b47b1fcbc72522146fad560af0712 Mon Sep 17 00:00:00 2001 From: Andreas Werner Date: Fri, 4 Dec 2015 18:12:49 +0100 Subject: libata-eh.c: Introduce new ata port flag for controller which lockup on read log page Some controller lockup on a ata_read_log_page. Add new ata port flag ATA_FLAG_NO_LOG_PAGE which can used to blacklist a controller. If this flag is set, any attempt to read a log page returns an error without actually issuing the command. Signed-off-by: Andreas Werner Signed-off-by: Tejun Heo --- drivers/ata/libata-eh.c | 8 ++++++++ include/linux/libata.h | 1 + 2 files changed, 9 insertions(+) (limited to 'include') diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index cb0508af1459..961acc788f44 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -1505,12 +1505,20 @@ static const char *ata_err_string(unsigned int err_mask) unsigned int ata_read_log_page(struct ata_device *dev, u8 log, u8 page, void *buf, unsigned int sectors) { + unsigned long ap_flags = dev->link->ap->flags; struct ata_taskfile tf; unsigned int err_mask; bool dma = false; DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page); + /* + * Return error without actually issuing the command on controllers + * which e.g. lockup on a read log page. + */ + if (ap_flags & ATA_FLAG_NO_LOG_PAGE) + return AC_ERR_DEV; + retry: ata_tf_init(dev, &tf); if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) && diff --git a/include/linux/libata.h b/include/linux/libata.h index 83577f8fd15b..600c1e0626a5 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -210,6 +210,7 @@ enum { ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */ /* (doesn't imply presence) */ ATA_FLAG_SATA = (1 << 1), + ATA_FLAG_NO_LOG_PAGE = (1 << 5), /* do not issue log page read */ ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */ ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */ ATA_FLAG_PIO_LBA48 = (1 << 8), /* Host DMA engine is LBA28 only */ -- cgit v1.2.3 From 57b4bd06ff0372fe1e3617889c4b37fbd500364a Mon Sep 17 00:00:00 2001 From: Matias Bjørling Date: Sun, 6 Dec 2015 11:25:47 +0100 Subject: lightnvm: comments on constants MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It is not obvious what NVM_IO_* and NVM_BLK_T_* are used for. Make sure to comment them appropriately as the other constants. Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- include/linux/lightnvm.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'include') diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index c6916aec43b6..935ef3844c05 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -50,9 +50,16 @@ enum { NVM_IO_DUAL_ACCESS = 0x1, NVM_IO_QUAD_ACCESS = 0x2, + /* NAND Access Modes */ NVM_IO_SUSPEND = 0x80, NVM_IO_SLC_MODE = 0x100, NVM_IO_SCRAMBLE_DISABLE = 0x200, + + /* Block Types */ + NVM_BLK_T_FREE = 0x0, + NVM_BLK_T_BAD = 0x1, + NVM_BLK_T_DEV = 0x2, + NVM_BLK_T_HOST = 0x4, }; struct nvm_id_group { -- cgit v1.2.3 From 16f26c3aa9b9c36a9d1092ae3258461d1008481e Mon Sep 17 00:00:00 2001 From: Matias Bjørling Date: Sun, 6 Dec 2015 11:25:48 +0100 Subject: lightnvm: replace req queue with nvmdev for lld MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In the case where a request queue is passed to the low lever lightnvm device drive integration, the device driver might pass its admin commands through another queue. Instead pass nvm_dev, and let the low level drive the appropriate queue. Reported-by: Christoph Hellwig Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/block/null_blk.c | 9 +++++---- drivers/lightnvm/core.c | 7 +++---- drivers/lightnvm/gennvm.c | 8 ++++---- drivers/lightnvm/rrpc.c | 2 +- drivers/nvme/host/lightnvm.c | 24 +++++++++++++----------- include/linux/lightnvm.h | 14 +++++++------- 6 files changed, 33 insertions(+), 31 deletions(-) (limited to 'include') diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 0c3940ec5e62..7981b7407305 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -444,8 +444,9 @@ static void null_lnvm_end_io(struct request *rq, int error) blk_put_request(rq); } -static int null_lnvm_submit_io(struct request_queue *q, struct nvm_rq *rqd) +static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd) { + struct request_queue *q = dev->q; struct request *rq; struct bio *bio = rqd->bio; @@ -470,7 +471,7 @@ static int null_lnvm_submit_io(struct request_queue *q, struct nvm_rq *rqd) return 0; } -static int null_lnvm_id(struct request_queue *q, struct nvm_id *id) +static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id) { sector_t size = gb * 1024 * 1024 * 1024ULL; sector_t blksize; @@ -523,7 +524,7 @@ static int null_lnvm_id(struct request_queue *q, struct nvm_id *id) return 0; } -static void *null_lnvm_create_dma_pool(struct request_queue *q, char *name) +static void *null_lnvm_create_dma_pool(struct nvm_dev *dev, char *name) { mempool_t *virtmem_pool; @@ -541,7 +542,7 @@ static void null_lnvm_destroy_dma_pool(void *pool) mempool_destroy(pool); } -static void *null_lnvm_dev_dma_alloc(struct request_queue *q, void *pool, +static void *null_lnvm_dev_dma_alloc(struct nvm_dev *dev, void *pool, gfp_t mem_flags, dma_addr_t *dma_handler) { return mempool_alloc(pool, mem_flags); diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index 86ce887b2ed6..4a8d1fe34c4e 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c @@ -74,7 +74,7 @@ EXPORT_SYMBOL(nvm_unregister_target); void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags, dma_addr_t *dma_handler) { - return dev->ops->dev_dma_alloc(dev->q, dev->ppalist_pool, mem_flags, + return dev->ops->dev_dma_alloc(dev, dev->ppalist_pool, mem_flags, dma_handler); } EXPORT_SYMBOL(nvm_dev_dma_alloc); @@ -246,7 +246,7 @@ static int nvm_init(struct nvm_dev *dev) if (!dev->q || !dev->ops) return ret; - if (dev->ops->identity(dev->q, &dev->identity)) { + if (dev->ops->identity(dev, &dev->identity)) { pr_err("nvm: device could not be identified\n"); goto err; } @@ -326,8 +326,7 @@ int nvm_register(struct request_queue *q, char *disk_name, } if (dev->ops->max_phys_sect > 1) { - dev->ppalist_pool = dev->ops->create_dma_pool(dev->q, - "ppalist"); + dev->ppalist_pool = dev->ops->create_dma_pool(dev, "ppalist"); if (!dev->ppalist_pool) { pr_err("nvm: could not create ppa pool\n"); ret = -ENOMEM; diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c index ce6025487a5c..52b513a65946 100644 --- a/drivers/lightnvm/gennvm.c +++ b/drivers/lightnvm/gennvm.c @@ -195,7 +195,7 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn) } if (dev->ops->get_l2p_tbl) { - ret = dev->ops->get_l2p_tbl(dev->q, 0, dev->total_pages, + ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_pages, gennvm_block_map, dev); if (ret) { pr_err("gennvm: could not read L2P table.\n"); @@ -346,7 +346,7 @@ static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd) gennvm_generic_to_addr_mode(dev, rqd); rqd->dev = dev; - return dev->ops->submit_io(dev->q, rqd); + return dev->ops->submit_io(dev, rqd); } static void gennvm_blk_set_type(struct nvm_dev *dev, struct ppa_addr *ppa, @@ -382,7 +382,7 @@ static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd) if (!dev->ops->set_bb_tbl) return; - if (dev->ops->set_bb_tbl(dev->q, rqd, 1)) + if (dev->ops->set_bb_tbl(dev, rqd, 1)) return; gennvm_addr_to_generic_mode(dev, rqd); @@ -450,7 +450,7 @@ static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk, gennvm_generic_to_addr_mode(dev, &rqd); - ret = dev->ops->erase_block(dev->q, &rqd); + ret = dev->ops->erase_block(dev, &rqd); if (plane_cnt) nvm_dev_dma_free(dev, rqd.ppa_list, rqd.dma_ppa_list); diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c index cf1a4a515b76..134e4faba482 100644 --- a/drivers/lightnvm/rrpc.c +++ b/drivers/lightnvm/rrpc.c @@ -1016,7 +1016,7 @@ static int rrpc_map_init(struct rrpc *rrpc) return 0; /* Bring up the mapping table from device */ - ret = dev->ops->get_l2p_tbl(dev->q, 0, dev->total_pages, + ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_pages, rrpc_l2p_update, rrpc); if (ret) { pr_err("nvm: rrpc: could not read L2P table.\n"); diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index 762c9a7cbfa6..15f2acb4d5cd 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c @@ -271,9 +271,9 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id) return 0; } -static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id) +static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id) { - struct nvme_ns *ns = q->queuedata; + struct nvme_ns *ns = nvmdev->q->queuedata; struct nvme_dev *dev = ns->dev; struct nvme_nvm_id *nvme_nvm_id; struct nvme_nvm_command c = {}; @@ -308,10 +308,10 @@ out: return ret; } -static int nvme_nvm_get_l2p_tbl(struct request_queue *q, u64 slba, u32 nlb, +static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb, nvm_l2p_update_fn *update_l2p, void *priv) { - struct nvme_ns *ns = q->queuedata; + struct nvme_ns *ns = nvmdev->q->queuedata; struct nvme_dev *dev = ns->dev; struct nvme_nvm_command c = {}; u32 len = queue_max_hw_sectors(dev->admin_q) << 9; @@ -415,10 +415,10 @@ out: return ret; } -static int nvme_nvm_set_bb_tbl(struct request_queue *q, struct nvm_rq *rqd, +static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct nvm_rq *rqd, int type) { - struct nvme_ns *ns = q->queuedata; + struct nvme_ns *ns = nvmdev->q->queuedata; struct nvme_dev *dev = ns->dev; struct nvme_nvm_command c = {}; int ret = 0; @@ -463,8 +463,9 @@ static void nvme_nvm_end_io(struct request *rq, int error) blk_mq_free_request(rq); } -static int nvme_nvm_submit_io(struct request_queue *q, struct nvm_rq *rqd) +static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd) { + struct request_queue *q = dev->q; struct nvme_ns *ns = q->queuedata; struct request *rq; struct bio *bio = rqd->bio; @@ -502,8 +503,9 @@ static int nvme_nvm_submit_io(struct request_queue *q, struct nvm_rq *rqd) return 0; } -static int nvme_nvm_erase_block(struct request_queue *q, struct nvm_rq *rqd) +static int nvme_nvm_erase_block(struct nvm_dev *dev, struct nvm_rq *rqd) { + struct request_queue *q = dev->q; struct nvme_ns *ns = q->queuedata; struct nvme_nvm_command c = {}; @@ -515,9 +517,9 @@ static int nvme_nvm_erase_block(struct request_queue *q, struct nvm_rq *rqd) return nvme_submit_sync_cmd(q, (struct nvme_command *)&c, NULL, 0); } -static void *nvme_nvm_create_dma_pool(struct request_queue *q, char *name) +static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name) { - struct nvme_ns *ns = q->queuedata; + struct nvme_ns *ns = nvmdev->q->queuedata; struct nvme_dev *dev = ns->dev; return dma_pool_create(name, dev->dev, PAGE_SIZE, PAGE_SIZE, 0); @@ -530,7 +532,7 @@ static void nvme_nvm_destroy_dma_pool(void *pool) dma_pool_destroy(dma_pool); } -static void *nvme_nvm_dev_dma_alloc(struct request_queue *q, void *pool, +static void *nvme_nvm_dev_dma_alloc(struct nvm_dev *dev, void *pool, gfp_t mem_flags, dma_addr_t *dma_handler) { return dma_pool_alloc(pool, mem_flags, dma_handler); diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index 935ef3844c05..034117b3be5f 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -183,17 +183,17 @@ struct nvm_block; typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *); typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *); -typedef int (nvm_id_fn)(struct request_queue *, struct nvm_id *); -typedef int (nvm_get_l2p_tbl_fn)(struct request_queue *, u64, u32, +typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *); +typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32, nvm_l2p_update_fn *, void *); typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, int, nvm_bb_update_fn *, void *); -typedef int (nvm_op_set_bb_fn)(struct request_queue *, struct nvm_rq *, int); -typedef int (nvm_submit_io_fn)(struct request_queue *, struct nvm_rq *); -typedef int (nvm_erase_blk_fn)(struct request_queue *, struct nvm_rq *); -typedef void *(nvm_create_dma_pool_fn)(struct request_queue *, char *); +typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct nvm_rq *, int); +typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *); +typedef int (nvm_erase_blk_fn)(struct nvm_dev *, struct nvm_rq *); +typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *); typedef void (nvm_destroy_dma_pool_fn)(void *); -typedef void *(nvm_dev_dma_alloc_fn)(struct request_queue *, void *, gfp_t, +typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t, dma_addr_t *); typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t); -- cgit v1.2.3 From 4639d60d2bfb7f5007b5d93788fd93c19b63f000 Mon Sep 17 00:00:00 2001 From: Tomer Tayar Date: Mon, 7 Dec 2015 06:25:56 -0500 Subject: qed: Fix corner case for chain in-between pages The amount of chain next pointer elements between the producer and the consumer indices depends on which pages they currently point to. The current calculation is based only on their difference, and it can lead to a number of free elements which is higher by 1 than the actual value. Signed-off-by: Tomer Tayar Signed-off-by: Manish Chopra Signed-off-by: David S. Miller --- include/linux/qed/qed_chain.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index b920c3605c46..41b9049b57e2 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -111,7 +111,8 @@ static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain) used = ((u32)0x10000u + (u32)(p_chain->prod_idx)) - (u32)p_chain->cons_idx; if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR) - used -= (used / p_chain->elem_per_page); + used -= p_chain->prod_idx / p_chain->elem_per_page - + p_chain->cons_idx / p_chain->elem_per_page; return p_chain->capacity - used; } -- cgit v1.2.3 From 76a9a3642a0b72d5687d680150580d55b6ea9804 Mon Sep 17 00:00:00 2001 From: Tomer Tayar Date: Mon, 7 Dec 2015 06:25:57 -0500 Subject: qed: fix handling of concurrent ramrods. Concurrent non-blocking slowpath ramrods can be completed out-of-order on the completion chain. Recycling completed elements, while previously sent elements are still completion pending, can lead to overriding of active elements on the chain. Furthermore, sending pending slowpath ramrods currently lacks the update of the chain element physical pointer. This patch: * Ensures that ramrods are sent to the FW with consecutive echo values. * Handles out-of-order completions by freeing only first successive completed entries. * Updates the chain element physical pointer when copying a pending element into a free element for sending. Signed-off-by: Tomer Tayar Signed-off-by: Manish Chopra Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_sp.h | 8 +++- drivers/net/ethernet/qlogic/qed/qed_spq.c | 63 +++++++++++++++++++++++-------- include/linux/qed/common_hsi.h | 2 + 3 files changed, 56 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index 31a1f1eb4f56..287fadfab52d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -124,8 +124,12 @@ struct qed_spq { dma_addr_t p_phys; struct qed_spq_entry *p_virt; - /* Used as index for completions (returns on EQ by FW) */ - u16 echo_idx; +#define SPQ_RING_SIZE \ + (CORE_SPQE_PAGE_SIZE_BYTES / sizeof(struct slow_path_element)) + + /* Bitmap for handling out-of-order completions */ + DECLARE_BITMAP(p_comp_bitmap, SPQ_RING_SIZE); + u8 comp_bitmap_idx; /* Statistics */ u32 unlimited_pending_count; diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index 7c0b8459666e..3dd548ab8df1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -112,8 +112,6 @@ static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent) { - p_ent->elem.hdr.echo = 0; - p_hwfn->p_spq->echo_idx++; p_ent->flags = 0; switch (p_ent->comp_mode) { @@ -195,10 +193,12 @@ static int qed_spq_hw_post(struct qed_hwfn *p_hwfn, struct qed_spq *p_spq, struct qed_spq_entry *p_ent) { - struct qed_chain *p_chain = &p_hwfn->p_spq->chain; + struct qed_chain *p_chain = &p_hwfn->p_spq->chain; + u16 echo = qed_chain_get_prod_idx(p_chain); struct slow_path_element *elem; struct core_db_data db; + p_ent->elem.hdr.echo = cpu_to_le16(echo); elem = qed_chain_produce(p_chain); if (!elem) { DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n"); @@ -437,7 +437,9 @@ void qed_spq_setup(struct qed_hwfn *p_hwfn) p_spq->comp_count = 0; p_spq->comp_sent_count = 0; p_spq->unlimited_pending_count = 0; - p_spq->echo_idx = 0; + + bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE); + p_spq->comp_bitmap_idx = 0; /* SPQ cid, cannot fail */ qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid); @@ -582,26 +584,32 @@ qed_spq_add_entry(struct qed_hwfn *p_hwfn, struct qed_spq *p_spq = p_hwfn->p_spq; if (p_ent->queue == &p_spq->unlimited_pending) { - struct qed_spq_entry *p_en2; if (list_empty(&p_spq->free_pool)) { list_add_tail(&p_ent->list, &p_spq->unlimited_pending); p_spq->unlimited_pending_count++; return 0; - } + } else { + struct qed_spq_entry *p_en2; - p_en2 = list_first_entry(&p_spq->free_pool, - struct qed_spq_entry, - list); - list_del(&p_en2->list); + p_en2 = list_first_entry(&p_spq->free_pool, + struct qed_spq_entry, + list); + list_del(&p_en2->list); + + /* Copy the ring element physical pointer to the new + * entry, since we are about to override the entire ring + * entry and don't want to lose the pointer. + */ + p_ent->elem.data_ptr = p_en2->elem.data_ptr; - /* Strcut assignment */ - *p_en2 = *p_ent; + *p_en2 = *p_ent; - kfree(p_ent); + kfree(p_ent); - p_ent = p_en2; + p_ent = p_en2; + } } /* entry is to be placed in 'pending' queue */ @@ -777,13 +785,38 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) { if (p_ent->elem.hdr.echo == echo) { + u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE; + list_del(&p_ent->list); - qed_chain_return_produced(&p_spq->chain); + /* Avoid overriding of SPQ entries when getting + * out-of-order completions, by marking the completions + * in a bitmap and increasing the chain consumer only + * for the first successive completed entries. + */ + bitmap_set(p_spq->p_comp_bitmap, pos, SPQ_RING_SIZE); + + while (test_bit(p_spq->comp_bitmap_idx, + p_spq->p_comp_bitmap)) { + bitmap_clear(p_spq->p_comp_bitmap, + p_spq->comp_bitmap_idx, + SPQ_RING_SIZE); + p_spq->comp_bitmap_idx++; + qed_chain_return_produced(&p_spq->chain); + } + p_spq->comp_count++; found = p_ent; break; } + + /* This is relatively uncommon - depends on scenarios + * which have mutliple per-PF sent ramrods. + */ + DP_VERBOSE(p_hwfn, QED_MSG_SPQ, + "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n", + le16_to_cpu(echo), + le16_to_cpu(p_ent->elem.hdr.echo)); } /* Release lock before callback, as callback may post diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h index 6a4347639c03..1d1ba2c5ee7a 100644 --- a/include/linux/qed/common_hsi.h +++ b/include/linux/qed/common_hsi.h @@ -9,6 +9,8 @@ #ifndef __COMMON_HSI__ #define __COMMON_HSI__ +#define CORE_SPQE_PAGE_SIZE_BYTES 4096 + #define FW_MAJOR_VERSION 8 #define FW_MINOR_VERSION 4 #define FW_REVISION_VERSION 2 -- cgit v1.2.3 From d144da8c6f51f48ec39d891ea9dff80169c45f3b Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Mon, 2 Nov 2015 12:13:25 -0500 Subject: IB/core: use RCU for uverbs id lookup The current implementation gets a spin_lock, and at any scale with qib and hfi1 post send, the lock contention grows exponentially with the number of QPs. idr_find() is RCU compatibile, so read doesn't need the lock. Change to use rcu_read_lock() and rcu_read_unlock() in __idr_get_uobj(). kfree_rcu() is used to insure a grace period between the idr removal and actual free. Reviewed-by: Ira Weiny Signed-off-by: Mike Marciniszyn Reviewed-By: Jason Gunthorpe Signed-off-by: Doug Ledford --- drivers/infiniband/core/uverbs_cmd.c | 12 +++++++----- include/rdma/ib_verbs.h | 1 + 2 files changed, 8 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 4cb8e9d9966c..1c02deab068f 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -62,9 +62,11 @@ static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" }; * The ib_uobject locking scheme is as follows: * * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it - * needs to be held during all idr operations. When an object is + * needs to be held during all idr write operations. When an object is * looked up, a reference must be taken on the object's kref before - * dropping this lock. + * dropping this lock. For read operations, the rcu_read_lock() + * and rcu_write_lock() but similarly the kref reference is grabbed + * before the rcu_read_unlock(). * * - Each object also has an rwsem. This rwsem must be held for * reading while an operation that uses the object is performed. @@ -96,7 +98,7 @@ static void init_uobj(struct ib_uobject *uobj, u64 user_handle, static void release_uobj(struct kref *kref) { - kfree(container_of(kref, struct ib_uobject, ref)); + kfree_rcu(container_of(kref, struct ib_uobject, ref), rcu); } static void put_uobj(struct ib_uobject *uobj) @@ -145,7 +147,7 @@ static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id, { struct ib_uobject *uobj; - spin_lock(&ib_uverbs_idr_lock); + rcu_read_lock(); uobj = idr_find(idr, id); if (uobj) { if (uobj->context == context) @@ -153,7 +155,7 @@ static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id, else uobj = NULL; } - spin_unlock(&ib_uverbs_idr_lock); + rcu_read_unlock(); return uobj; } diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 9a68a19532ba..120da1d7f57e 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1271,6 +1271,7 @@ struct ib_uobject { int id; /* index into kernel idr */ struct kref ref; struct rw_semaphore mutex; /* protects .live */ + struct rcu_head rcu; /* kfree_rcu() overhead */ int live; }; -- cgit v1.2.3 From bd5eb35f16a9c55afcf5eb1c920cbbaf09747369 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 7 Dec 2015 08:53:17 -0800 Subject: xfrm: take care of request sockets TCP SYNACK messages might now be attached to request sockets. XFRM needs to get back to a listener socket. Adds new helpers that might be used elsewhere : sk_to_full_sk() and sk_const_to_full_sk() Note: We also need to add RCU protection for xfrm lookups, now TCP/DCCP have lockless listener processing. This will be addressed in separate patches. Fixes: ca6fb0651883 ("tcp: attach SYNACK messages to request sockets instead of listener") Reported-by: Dave Jones Signed-off-by: Eric Dumazet Cc: Steffen Klassert Signed-off-by: David S. Miller --- include/net/inet_sock.h | 27 +++++++++++++++++++++++---- net/xfrm/xfrm_policy.c | 2 ++ 2 files changed, 25 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index 2134e6d815bc..625bdf95d673 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -210,18 +210,37 @@ struct inet_sock { #define IP_CMSG_ORIGDSTADDR BIT(6) #define IP_CMSG_CHECKSUM BIT(7) -/* SYNACK messages might be attached to request sockets. +/** + * sk_to_full_sk - Access to a full socket + * @sk: pointer to a socket + * + * SYNACK messages might be attached to request sockets. * Some places want to reach the listener in this case. */ -static inline struct sock *skb_to_full_sk(const struct sk_buff *skb) +static inline struct sock *sk_to_full_sk(struct sock *sk) { - struct sock *sk = skb->sk; - +#ifdef CONFIG_INET if (sk && sk->sk_state == TCP_NEW_SYN_RECV) sk = inet_reqsk(sk)->rsk_listener; +#endif + return sk; +} + +/* sk_to_full_sk() variant with a const argument */ +static inline const struct sock *sk_const_to_full_sk(const struct sock *sk) +{ +#ifdef CONFIG_INET + if (sk && sk->sk_state == TCP_NEW_SYN_RECV) + sk = ((const struct request_sock *)sk)->rsk_listener; +#endif return sk; } +static inline struct sock *skb_to_full_sk(const struct sk_buff *skb) +{ + return sk_to_full_sk(skb->sk); +} + static inline struct inet_sock *inet_sk(const struct sock *sk) { return (struct inet_sock *)sk; diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 09bfcbac63bb..18276f0cc32b 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -2198,6 +2198,7 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, xdst = NULL; route = NULL; + sk = sk_const_to_full_sk(sk); if (sk && sk->sk_policy[XFRM_POLICY_OUT]) { num_pols = 1; pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl); @@ -2477,6 +2478,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, } pol = NULL; + sk = sk_to_full_sk(sk); if (sk && sk->sk_policy[dir]) { pol = xfrm_sk_policy_lookup(sk, dir, &fl); if (IS_ERR(pol)) { -- cgit v1.2.3 From 10028c5ab107d3765c7fc282b6c45324d1602155 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Mon, 30 Nov 2015 10:55:13 -0800 Subject: drm: Create a driver hook for allocating GEM object structs. The CMA helpers had no way for a driver to extend the struct with its own fields. Since the CMA helpers are mostly "Allocate a drm_gem_cma_object, then fill in a few fields", it's hard to write as pure helpers without passing in a driver callback for the allocate step. Signed-off-by: Eric Anholt Reviewed-by: Daniel Vetter --- drivers/gpu/drm/drm_gem_cma_helper.c | 10 ++++++---- include/drm/drmP.h | 7 +++++++ 2 files changed, 13 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c index e109b49cd25d..0f7b00ba57da 100644 --- a/drivers/gpu/drm/drm_gem_cma_helper.c +++ b/drivers/gpu/drm/drm_gem_cma_helper.c @@ -59,11 +59,13 @@ __drm_gem_cma_create(struct drm_device *drm, size_t size) struct drm_gem_object *gem_obj; int ret; - cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL); - if (!cma_obj) + if (drm->driver->gem_create_object) + gem_obj = drm->driver->gem_create_object(drm, size); + else + gem_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL); + if (!gem_obj) return ERR_PTR(-ENOMEM); - - gem_obj = &cma_obj->base; + cma_obj = container_of(gem_obj, struct drm_gem_cma_object, base); ret = drm_gem_object_init(drm, gem_obj, size); if (ret) diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 0b921ae06cd8..22ff162c61f2 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -580,6 +580,13 @@ struct drm_driver { int (*gem_open_object) (struct drm_gem_object *, struct drm_file *); void (*gem_close_object) (struct drm_gem_object *, struct drm_file *); + /** + * Hook for allocating the GEM object struct, for use by core + * helpers. + */ + struct drm_gem_object *(*gem_create_object)(struct drm_device *dev, + size_t size); + /* prime: */ /* export handle -> fd (see drm_gem_prime_handle_to_fd() helper) */ int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv, -- cgit v1.2.3 From d5bc60f6ad05b3c676b057bec662cfafc3ee24dd Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Sun, 18 Jan 2015 09:33:17 +1300 Subject: drm/vc4: Add create and map BO ioctls. While there exist dumb APIs for creating and mapping BOs, one of the rules is that drivers doing 3D acceleration have to provide their own APIs for buffer allocation (besides, the pitch/height parameters of the dumb alloc don't really make sense for a lot of 3D allocations). v2: Use __u32-style types, use "drm.h" instead of . Signed-off-by: Eric Anholt --- drivers/gpu/drm/vc4/vc4_bo.c | 41 ++++++++++++++++++++++++++ drivers/gpu/drm/vc4/vc4_drv.c | 3 ++ drivers/gpu/drm/vc4/vc4_drv.h | 4 +++ include/uapi/drm/Kbuild | 1 + include/uapi/drm/vc4_drm.h | 68 +++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 117 insertions(+) create mode 100644 include/uapi/drm/vc4_drm.h (limited to 'include') diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c index 18faa5ba37b7..06cba268e17a 100644 --- a/drivers/gpu/drm/vc4/vc4_bo.c +++ b/drivers/gpu/drm/vc4/vc4_bo.c @@ -19,6 +19,7 @@ */ #include "vc4_drv.h" +#include "uapi/drm/vc4_drm.h" static void vc4_bo_stats_dump(struct vc4_dev *vc4) { @@ -346,6 +347,46 @@ static void vc4_bo_cache_time_timer(unsigned long data) schedule_work(&vc4->bo_cache.time_work); } +int vc4_create_bo_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_vc4_create_bo *args = data; + struct vc4_bo *bo = NULL; + int ret; + + /* + * We can't allocate from the BO cache, because the BOs don't + * get zeroed, and that might leak data between users. + */ + bo = vc4_bo_create(dev, args->size, false); + if (!bo) + return -ENOMEM; + + ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); + drm_gem_object_unreference_unlocked(&bo->base.base); + + return ret; +} + +int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_vc4_mmap_bo *args = data; + struct drm_gem_object *gem_obj; + + gem_obj = drm_gem_object_lookup(dev, file_priv, args->handle); + if (!gem_obj) { + DRM_ERROR("Failed to look up GEM BO %d\n", args->handle); + return -EINVAL; + } + + /* The mmap offset was set up at BO allocation time. */ + args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node); + + drm_gem_object_unreference_unlocked(gem_obj); + return 0; +} + void vc4_bo_cache_init(struct drm_device *dev) { struct vc4_dev *vc4 = to_vc4_dev(dev); diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index da041fac0731..5fa468864ec8 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c @@ -16,6 +16,7 @@ #include #include "drm_fb_cma_helper.h" +#include "uapi/drm/vc4_drm.h" #include "vc4_drv.h" #include "vc4_regs.h" @@ -73,6 +74,8 @@ static const struct file_operations vc4_drm_fops = { }; static const struct drm_ioctl_desc vc4_drm_ioctls[] = { + DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, 0), + DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, 0), }; static struct drm_driver vc4_drm_driver = { diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h index 39a1ff5c8f28..fddb0a06ed3a 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.h +++ b/drivers/gpu/drm/vc4/vc4_drv.h @@ -155,6 +155,10 @@ int vc4_dumb_create(struct drm_file *file_priv, struct drm_mode_create_dumb *args); struct dma_buf *vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags); +int vc4_create_bo_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); void vc4_bo_cache_init(struct drm_device *dev); void vc4_bo_cache_destroy(struct drm_device *dev); int vc4_bo_stats_debugfs(struct seq_file *m, void *arg); diff --git a/include/uapi/drm/Kbuild b/include/uapi/drm/Kbuild index 38d437096c35..974fcd54e36f 100644 --- a/include/uapi/drm/Kbuild +++ b/include/uapi/drm/Kbuild @@ -17,4 +17,5 @@ header-y += tegra_drm.h header-y += via_drm.h header-y += vmwgfx_drm.h header-y += msm_drm.h +header-y += vc4_drm.h header-y += virtgpu_drm.h diff --git a/include/uapi/drm/vc4_drm.h b/include/uapi/drm/vc4_drm.h new file mode 100644 index 000000000000..219d34c42272 --- /dev/null +++ b/include/uapi/drm/vc4_drm.h @@ -0,0 +1,68 @@ +/* + * Copyright © 2014-2015 Broadcom + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef _UAPI_VC4_DRM_H_ +#define _UAPI_VC4_DRM_H_ + +#include "drm.h" + +#define DRM_VC4_CREATE_BO 0x03 +#define DRM_VC4_MMAP_BO 0x04 + +#define DRM_IOCTL_VC4_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_BO, struct drm_vc4_create_bo) +#define DRM_IOCTL_VC4_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_MMAP_BO, struct drm_vc4_mmap_bo) + +/** + * struct drm_vc4_create_bo - ioctl argument for creating VC4 BOs. + * + * There are currently no values for the flags argument, but it may be + * used in a future extension. + */ +struct drm_vc4_create_bo { + __u32 size; + __u32 flags; + /** Returned GEM handle for the BO. */ + __u32 handle; + __u32 pad; +}; + +/** + * struct drm_vc4_mmap_bo - ioctl argument for mapping VC4 BOs. + * + * This doesn't actually perform an mmap. Instead, it returns the + * offset you need to use in an mmap on the DRM device node. This + * means that tools like valgrind end up knowing about the mapped + * memory. + * + * There are currently no values for the flags argument, but it may be + * used in a future extension. + */ +struct drm_vc4_mmap_bo { + /** Handle for the object being mapped. */ + __u32 handle; + __u32 flags; + /** offset into the drm node to use for subsequent mmap call. */ + __u64 offset; +}; + +#endif /* _UAPI_VC4_DRM_H_ */ -- cgit v1.2.3 From 463873d5701427f2964a0b4b72c45f1f14b6df87 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Mon, 30 Nov 2015 11:41:40 -0800 Subject: drm/vc4: Add an API for creating GPU shaders in GEM BOs. Since we have no MMU, the kernel needs to validate that the submitted shader code won't make any accesses to memory that the user doesn't control, which involves banning some operations (general purpose DMA writes), and tracking where we need to write out pointers for other operations (texture sampling). Once it's validated, we return a GEM BO containing the shader, which doesn't allow mapping for write or exporting to other subsystems. v2: Use __u32-style types. Signed-off-by: Eric Anholt --- drivers/gpu/drm/vc4/Makefile | 3 +- drivers/gpu/drm/vc4/vc4_bo.c | 140 ++++++++ drivers/gpu/drm/vc4/vc4_drv.c | 9 +- drivers/gpu/drm/vc4/vc4_drv.h | 50 +++ drivers/gpu/drm/vc4/vc4_qpu_defines.h | 264 +++++++++++++++ drivers/gpu/drm/vc4/vc4_validate_shaders.c | 513 +++++++++++++++++++++++++++++ include/uapi/drm/vc4_drm.h | 25 ++ 7 files changed, 999 insertions(+), 5 deletions(-) create mode 100644 drivers/gpu/drm/vc4/vc4_qpu_defines.h create mode 100644 drivers/gpu/drm/vc4/vc4_validate_shaders.c (limited to 'include') diff --git a/drivers/gpu/drm/vc4/Makefile b/drivers/gpu/drm/vc4/Makefile index 32b4f9cd8f52..eb776a6a9899 100644 --- a/drivers/gpu/drm/vc4/Makefile +++ b/drivers/gpu/drm/vc4/Makefile @@ -10,7 +10,8 @@ vc4-y := \ vc4_kms.o \ vc4_hdmi.o \ vc4_hvs.o \ - vc4_plane.o + vc4_plane.o \ + vc4_validate_shaders.o vc4-$(CONFIG_DEBUG_FS) += vc4_debugfs.o diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c index 06cba268e17a..18dfe3ec9a62 100644 --- a/drivers/gpu/drm/vc4/vc4_bo.c +++ b/drivers/gpu/drm/vc4/vc4_bo.c @@ -79,6 +79,12 @@ static void vc4_bo_destroy(struct vc4_bo *bo) struct drm_gem_object *obj = &bo->base.base; struct vc4_dev *vc4 = to_vc4_dev(obj->dev); + if (bo->validated_shader) { + kfree(bo->validated_shader->texture_samples); + kfree(bo->validated_shader); + bo->validated_shader = NULL; + } + vc4->bo_stats.num_allocated--; vc4->bo_stats.size_allocated -= obj->size; drm_gem_cma_free_object(obj); @@ -315,6 +321,12 @@ void vc4_free_object(struct drm_gem_object *gem_bo) goto out; } + if (bo->validated_shader) { + kfree(bo->validated_shader->texture_samples); + kfree(bo->validated_shader); + bo->validated_shader = NULL; + } + bo->free_time = jiffies; list_add(&bo->size_head, cache_list); list_add(&bo->unref_head, &vc4->bo_cache.time_list); @@ -347,6 +359,78 @@ static void vc4_bo_cache_time_timer(unsigned long data) schedule_work(&vc4->bo_cache.time_work); } +struct dma_buf * +vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags) +{ + struct vc4_bo *bo = to_vc4_bo(obj); + + if (bo->validated_shader) { + DRM_ERROR("Attempting to export shader BO\n"); + return ERR_PTR(-EINVAL); + } + + return drm_gem_prime_export(dev, obj, flags); +} + +int vc4_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct drm_gem_object *gem_obj; + struct vc4_bo *bo; + int ret; + + ret = drm_gem_mmap(filp, vma); + if (ret) + return ret; + + gem_obj = vma->vm_private_data; + bo = to_vc4_bo(gem_obj); + + if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) { + DRM_ERROR("mmaping of shader BOs for writing not allowed.\n"); + return -EINVAL; + } + + /* + * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the + * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map + * the whole buffer. + */ + vma->vm_flags &= ~VM_PFNMAP; + vma->vm_pgoff = 0; + + ret = dma_mmap_writecombine(bo->base.base.dev->dev, vma, + bo->base.vaddr, bo->base.paddr, + vma->vm_end - vma->vm_start); + if (ret) + drm_gem_vm_close(vma); + + return ret; +} + +int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) +{ + struct vc4_bo *bo = to_vc4_bo(obj); + + if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) { + DRM_ERROR("mmaping of shader BOs for writing not allowed.\n"); + return -EINVAL; + } + + return drm_gem_cma_prime_mmap(obj, vma); +} + +void *vc4_prime_vmap(struct drm_gem_object *obj) +{ + struct vc4_bo *bo = to_vc4_bo(obj); + + if (bo->validated_shader) { + DRM_ERROR("mmaping of shader BOs not allowed.\n"); + return ERR_PTR(-EINVAL); + } + + return drm_gem_cma_prime_vmap(obj); +} + int vc4_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { @@ -387,6 +471,62 @@ int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data, return 0; } +int +vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_vc4_create_shader_bo *args = data; + struct vc4_bo *bo = NULL; + int ret; + + if (args->size == 0) + return -EINVAL; + + if (args->size % sizeof(u64) != 0) + return -EINVAL; + + if (args->flags != 0) { + DRM_INFO("Unknown flags set: 0x%08x\n", args->flags); + return -EINVAL; + } + + if (args->pad != 0) { + DRM_INFO("Pad set: 0x%08x\n", args->pad); + return -EINVAL; + } + + bo = vc4_bo_create(dev, args->size, true); + if (!bo) + return -ENOMEM; + + ret = copy_from_user(bo->base.vaddr, + (void __user *)(uintptr_t)args->data, + args->size); + if (ret != 0) + goto fail; + /* Clear the rest of the memory from allocating from the BO + * cache. + */ + memset(bo->base.vaddr + args->size, 0, + bo->base.base.size - args->size); + + bo->validated_shader = vc4_validate_shader(&bo->base); + if (!bo->validated_shader) { + ret = -EINVAL; + goto fail; + } + + /* We have to create the handle after validation, to avoid + * races for users to do doing things like mmap the shader BO. + */ + ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); + + fail: + drm_gem_object_unreference_unlocked(&bo->base.base); + + return ret; +} + void vc4_bo_cache_init(struct drm_device *dev) { struct vc4_dev *vc4 = to_vc4_dev(dev); diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index 5fa468864ec8..da4be9c8f70d 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c @@ -64,7 +64,7 @@ static const struct file_operations vc4_drm_fops = { .open = drm_open, .release = drm_release, .unlocked_ioctl = drm_ioctl, - .mmap = drm_gem_cma_mmap, + .mmap = vc4_mmap, .poll = drm_poll, .read = drm_read, #ifdef CONFIG_COMPAT @@ -76,6 +76,7 @@ static const struct file_operations vc4_drm_fops = { static const struct drm_ioctl_desc vc4_drm_ioctls[] = { DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, 0), DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, 0), + DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, 0), }; static struct drm_driver vc4_drm_driver = { @@ -102,12 +103,12 @@ static struct drm_driver vc4_drm_driver = { .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import = drm_gem_prime_import, - .gem_prime_export = drm_gem_prime_export, + .gem_prime_export = vc4_prime_export, .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table, .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table, - .gem_prime_vmap = drm_gem_cma_prime_vmap, + .gem_prime_vmap = vc4_prime_vmap, .gem_prime_vunmap = drm_gem_cma_prime_vunmap, - .gem_prime_mmap = drm_gem_cma_prime_mmap, + .gem_prime_mmap = vc4_prime_mmap, .dumb_create = vc4_dumb_create, .dumb_map_offset = drm_gem_cma_dumb_map_offset, diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h index fddb0a06ed3a..bd77d5574e30 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.h +++ b/drivers/gpu/drm/vc4/vc4_drv.h @@ -69,6 +69,11 @@ struct vc4_bo { /* List entry for the BO's position in vc4_dev->bo_cache.size_list */ struct list_head size_head; + + /* Struct for shader validation state, if created by + * DRM_IOCTL_VC4_CREATE_SHADER_BO. + */ + struct vc4_validated_shader_info *validated_shader; }; static inline struct vc4_bo * @@ -117,6 +122,42 @@ to_vc4_encoder(struct drm_encoder *encoder) #define HVS_READ(offset) readl(vc4->hvs->regs + offset) #define HVS_WRITE(offset, val) writel(val, vc4->hvs->regs + offset) +/** + * struct vc4_texture_sample_info - saves the offsets into the UBO for texture + * setup parameters. + * + * This will be used at draw time to relocate the reference to the texture + * contents in p0, and validate that the offset combined with + * width/height/stride/etc. from p1 and p2/p3 doesn't sample outside the BO. + * Note that the hardware treats unprovided config parameters as 0, so not all + * of them need to be set up for every texure sample, and we'll store ~0 as + * the offset to mark the unused ones. + * + * See the VC4 3D architecture guide page 41 ("Texture and Memory Lookup Unit + * Setup") for definitions of the texture parameters. + */ +struct vc4_texture_sample_info { + bool is_direct; + uint32_t p_offset[4]; +}; + +/** + * struct vc4_validated_shader_info - information about validated shaders that + * needs to be used from command list validation. + * + * For a given shader, each time a shader state record references it, we need + * to verify that the shader doesn't read more uniforms than the shader state + * record's uniform BO pointer can provide, and we need to apply relocations + * and validate the shader state record's uniforms that define the texture + * samples. + */ +struct vc4_validated_shader_info { + uint32_t uniforms_size; + uint32_t uniforms_src_size; + uint32_t num_texture_samples; + struct vc4_texture_sample_info *texture_samples; +}; + /** * _wait_for - magic (register) wait macro * @@ -157,8 +198,13 @@ struct dma_buf *vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags); int vc4_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int vc4_mmap(struct file *filp, struct vm_area_struct *vma); +int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); +void *vc4_prime_vmap(struct drm_gem_object *obj); void vc4_bo_cache_init(struct drm_device *dev); void vc4_bo_cache_destroy(struct drm_device *dev); int vc4_bo_stats_debugfs(struct seq_file *m, void *arg); @@ -194,3 +240,7 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev, enum drm_plane_type type); u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist); u32 vc4_plane_dlist_size(struct drm_plane_state *state); + +/* vc4_validate_shader.c */ +struct vc4_validated_shader_info * +vc4_validate_shader(struct drm_gem_cma_object *shader_obj); diff --git a/drivers/gpu/drm/vc4/vc4_qpu_defines.h b/drivers/gpu/drm/vc4/vc4_qpu_defines.h new file mode 100644 index 000000000000..d5c2f3c85ebb --- /dev/null +++ b/drivers/gpu/drm/vc4/vc4_qpu_defines.h @@ -0,0 +1,264 @@ +/* + * Copyright © 2014 Broadcom + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef VC4_QPU_DEFINES_H +#define VC4_QPU_DEFINES_H + +enum qpu_op_add { + QPU_A_NOP, + QPU_A_FADD, + QPU_A_FSUB, + QPU_A_FMIN, + QPU_A_FMAX, + QPU_A_FMINABS, + QPU_A_FMAXABS, + QPU_A_FTOI, + QPU_A_ITOF, + QPU_A_ADD = 12, + QPU_A_SUB, + QPU_A_SHR, + QPU_A_ASR, + QPU_A_ROR, + QPU_A_SHL, + QPU_A_MIN, + QPU_A_MAX, + QPU_A_AND, + QPU_A_OR, + QPU_A_XOR, + QPU_A_NOT, + QPU_A_CLZ, + QPU_A_V8ADDS = 30, + QPU_A_V8SUBS = 31, +}; + +enum qpu_op_mul { + QPU_M_NOP, + QPU_M_FMUL, + QPU_M_MUL24, + QPU_M_V8MULD, + QPU_M_V8MIN, + QPU_M_V8MAX, + QPU_M_V8ADDS, + QPU_M_V8SUBS, +}; + +enum qpu_raddr { + QPU_R_FRAG_PAYLOAD_ZW = 15, /* W for A file, Z for B file */ + /* 0-31 are the plain regfile a or b fields */ + QPU_R_UNIF = 32, + QPU_R_VARY = 35, + QPU_R_ELEM_QPU = 38, + QPU_R_NOP, + QPU_R_XY_PIXEL_COORD = 41, + QPU_R_MS_REV_FLAGS = 41, + QPU_R_VPM = 48, + QPU_R_VPM_LD_BUSY, + QPU_R_VPM_LD_WAIT, + QPU_R_MUTEX_ACQUIRE, +}; + +enum qpu_waddr { + /* 0-31 are the plain regfile a or b fields */ + QPU_W_ACC0 = 32, /* aka r0 */ + QPU_W_ACC1, + QPU_W_ACC2, + QPU_W_ACC3, + QPU_W_TMU_NOSWAP, + QPU_W_ACC5, + QPU_W_HOST_INT, + QPU_W_NOP, + QPU_W_UNIFORMS_ADDRESS, + QPU_W_QUAD_XY, /* X for regfile a, Y for regfile b */ + QPU_W_MS_FLAGS = 42, + QPU_W_REV_FLAG = 42, + QPU_W_TLB_STENCIL_SETUP = 43, + QPU_W_TLB_Z, + QPU_W_TLB_COLOR_MS, + QPU_W_TLB_COLOR_ALL, + QPU_W_TLB_ALPHA_MASK, + QPU_W_VPM, + QPU_W_VPMVCD_SETUP, /* LD for regfile a, ST for regfile b */ + QPU_W_VPM_ADDR, /* LD for regfile a, ST for regfile b */ + QPU_W_MUTEX_RELEASE, + QPU_W_SFU_RECIP, + QPU_W_SFU_RECIPSQRT, + QPU_W_SFU_EXP, + QPU_W_SFU_LOG, + QPU_W_TMU0_S, + QPU_W_TMU0_T, + QPU_W_TMU0_R, + QPU_W_TMU0_B, + QPU_W_TMU1_S, + QPU_W_TMU1_T, + QPU_W_TMU1_R, + QPU_W_TMU1_B, +}; + +enum qpu_sig_bits { + QPU_SIG_SW_BREAKPOINT, + QPU_SIG_NONE, + QPU_SIG_THREAD_SWITCH, + QPU_SIG_PROG_END, + QPU_SIG_WAIT_FOR_SCOREBOARD, + QPU_SIG_SCOREBOARD_UNLOCK, + QPU_SIG_LAST_THREAD_SWITCH, + QPU_SIG_COVERAGE_LOAD, + QPU_SIG_COLOR_LOAD, + QPU_SIG_COLOR_LOAD_END, + QPU_SIG_LOAD_TMU0, + QPU_SIG_LOAD_TMU1, + QPU_SIG_ALPHA_MASK_LOAD, + QPU_SIG_SMALL_IMM, + QPU_SIG_LOAD_IMM, + QPU_SIG_BRANCH +}; + +enum qpu_mux { + /* hardware mux values */ + QPU_MUX_R0, + QPU_MUX_R1, + QPU_MUX_R2, + QPU_MUX_R3, + QPU_MUX_R4, + QPU_MUX_R5, + QPU_MUX_A, + QPU_MUX_B, + + /* non-hardware mux values */ + QPU_MUX_IMM, +}; + +enum qpu_cond { + QPU_COND_NEVER, + QPU_COND_ALWAYS, + QPU_COND_ZS, + QPU_COND_ZC, + QPU_COND_NS, + QPU_COND_NC, + QPU_COND_CS, + QPU_COND_CC, +}; + +enum qpu_pack_mul { + QPU_PACK_MUL_NOP, + /* replicated to each 8 bits of the 32-bit dst. */ + QPU_PACK_MUL_8888 = 3, + QPU_PACK_MUL_8A, + QPU_PACK_MUL_8B, + QPU_PACK_MUL_8C, + QPU_PACK_MUL_8D, +}; + +enum qpu_pack_a { + QPU_PACK_A_NOP, + /* convert to 16 bit float if float input, or to int16. */ + QPU_PACK_A_16A, + QPU_PACK_A_16B, + /* replicated to each 8 bits of the 32-bit dst. */ + QPU_PACK_A_8888, + /* Convert to 8-bit unsigned int. */ + QPU_PACK_A_8A, + QPU_PACK_A_8B, + QPU_PACK_A_8C, + QPU_PACK_A_8D, + + /* Saturating variants of the previous instructions. */ + QPU_PACK_A_32_SAT, /* int-only */ + QPU_PACK_A_16A_SAT, /* int or float */ + QPU_PACK_A_16B_SAT, + QPU_PACK_A_8888_SAT, + QPU_PACK_A_8A_SAT, + QPU_PACK_A_8B_SAT, + QPU_PACK_A_8C_SAT, + QPU_PACK_A_8D_SAT, +}; + +enum qpu_unpack_r4 { + QPU_UNPACK_R4_NOP, + QPU_UNPACK_R4_F16A_TO_F32, + QPU_UNPACK_R4_F16B_TO_F32, + QPU_UNPACK_R4_8D_REP, + QPU_UNPACK_R4_8A, + QPU_UNPACK_R4_8B, + QPU_UNPACK_R4_8C, + QPU_UNPACK_R4_8D, +}; + +#define QPU_MASK(high, low) \ + ((((uint64_t)1 << ((high) - (low) + 1)) - 1) << (low)) + +#define QPU_GET_FIELD(word, field) \ + ((uint32_t)(((word) & field ## _MASK) >> field ## _SHIFT)) + +#define QPU_SIG_SHIFT 60 +#define QPU_SIG_MASK QPU_MASK(63, 60) + +#define QPU_UNPACK_SHIFT 57 +#define QPU_UNPACK_MASK QPU_MASK(59, 57) + +/** + * If set, the pack field means PACK_MUL or R4 packing, instead of normal + * regfile a packing. + */ +#define QPU_PM ((uint64_t)1 << 56) + +#define QPU_PACK_SHIFT 52 +#define QPU_PACK_MASK QPU_MASK(55, 52) + +#define QPU_COND_ADD_SHIFT 49 +#define QPU_COND_ADD_MASK QPU_MASK(51, 49) +#define QPU_COND_MUL_SHIFT 46 +#define QPU_COND_MUL_MASK QPU_MASK(48, 46) + +#define QPU_SF ((uint64_t)1 << 45) + +#define QPU_WADDR_ADD_SHIFT 38 +#define QPU_WADDR_ADD_MASK QPU_MASK(43, 38) +#define QPU_WADDR_MUL_SHIFT 32 +#define QPU_WADDR_MUL_MASK QPU_MASK(37, 32) + +#define QPU_OP_MUL_SHIFT 29 +#define QPU_OP_MUL_MASK QPU_MASK(31, 29) + +#define QPU_RADDR_A_SHIFT 18 +#define QPU_RADDR_A_MASK QPU_MASK(23, 18) +#define QPU_RADDR_B_SHIFT 12 +#define QPU_RADDR_B_MASK QPU_MASK(17, 12) +#define QPU_SMALL_IMM_SHIFT 12 +#define QPU_SMALL_IMM_MASK QPU_MASK(17, 12) + +#define QPU_ADD_A_SHIFT 9 +#define QPU_ADD_A_MASK QPU_MASK(11, 9) +#define QPU_ADD_B_SHIFT 6 +#define QPU_ADD_B_MASK QPU_MASK(8, 6) +#define QPU_MUL_A_SHIFT 3 +#define QPU_MUL_A_MASK QPU_MASK(5, 3) +#define QPU_MUL_B_SHIFT 0 +#define QPU_MUL_B_MASK QPU_MASK(2, 0) + +#define QPU_WS ((uint64_t)1 << 44) + +#define QPU_OP_ADD_SHIFT 24 +#define QPU_OP_ADD_MASK QPU_MASK(28, 24) + +#endif /* VC4_QPU_DEFINES_H */ diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c new file mode 100644 index 000000000000..f67124b4c534 --- /dev/null +++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c @@ -0,0 +1,513 @@ +/* + * Copyright © 2014 Broadcom + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +/** + * DOC: Shader validator for VC4. + * + * The VC4 has no IOMMU between it and system memory, so a user with + * access to execute shaders could escalate privilege by overwriting + * system memory (using the VPM write address register in the + * general-purpose DMA mode) or reading system memory it shouldn't + * (reading it as a texture, or uniform data, or vertex data). + * + * This walks over a shader BO, ensuring that its accesses are + * appropriately bounded, and recording how many texture accesses are + * made and where so that we can do relocations for them in the + * uniform stream. + */ + +#include "vc4_drv.h" +#include "vc4_qpu_defines.h" + +struct vc4_shader_validation_state { + struct vc4_texture_sample_info tmu_setup[2]; + int tmu_write_count[2]; + + /* For registers that were last written to by a MIN instruction with + * one argument being a uniform, the address of the uniform. + * Otherwise, ~0. + * + * This is used for the validation of direct address memory reads. + */ + uint32_t live_min_clamp_offsets[32 + 32 + 4]; + bool live_max_clamp_regs[32 + 32 + 4]; +}; + +static uint32_t +waddr_to_live_reg_index(uint32_t waddr, bool is_b) +{ + if (waddr < 32) { + if (is_b) + return 32 + waddr; + else + return waddr; + } else if (waddr <= QPU_W_ACC3) { + return 64 + waddr - QPU_W_ACC0; + } else { + return ~0; + } +} + +static uint32_t +raddr_add_a_to_live_reg_index(uint64_t inst) +{ + uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG); + uint32_t add_a = QPU_GET_FIELD(inst, QPU_ADD_A); + uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A); + uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B); + + if (add_a == QPU_MUX_A) + return raddr_a; + else if (add_a == QPU_MUX_B && sig != QPU_SIG_SMALL_IMM) + return 32 + raddr_b; + else if (add_a <= QPU_MUX_R3) + return 64 + add_a; + else + return ~0; +} + +static bool +is_tmu_submit(uint32_t waddr) +{ + return (waddr == QPU_W_TMU0_S || + waddr == QPU_W_TMU1_S); +} + +static bool +is_tmu_write(uint32_t waddr) +{ + return (waddr >= QPU_W_TMU0_S && + waddr <= QPU_W_TMU1_B); +} + +static bool +record_texture_sample(struct vc4_validated_shader_info *validated_shader, + struct vc4_shader_validation_state *validation_state, + int tmu) +{ + uint32_t s = validated_shader->num_texture_samples; + int i; + struct vc4_texture_sample_info *temp_samples; + + temp_samples = krealloc(validated_shader->texture_samples, + (s + 1) * sizeof(*temp_samples), + GFP_KERNEL); + if (!temp_samples) + return false; + + memcpy(&temp_samples[s], + &validation_state->tmu_setup[tmu], + sizeof(*temp_samples)); + + validated_shader->num_texture_samples = s + 1; + validated_shader->texture_samples = temp_samples; + + for (i = 0; i < 4; i++) + validation_state->tmu_setup[tmu].p_offset[i] = ~0; + + return true; +} + +static bool +check_tmu_write(uint64_t inst, + struct vc4_validated_shader_info *validated_shader, + struct vc4_shader_validation_state *validation_state, + bool is_mul) +{ + uint32_t waddr = (is_mul ? + QPU_GET_FIELD(inst, QPU_WADDR_MUL) : + QPU_GET_FIELD(inst, QPU_WADDR_ADD)); + uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A); + uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B); + int tmu = waddr > QPU_W_TMU0_B; + bool submit = is_tmu_submit(waddr); + bool is_direct = submit && validation_state->tmu_write_count[tmu] == 0; + uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG); + + if (is_direct) { + uint32_t add_b = QPU_GET_FIELD(inst, QPU_ADD_B); + uint32_t clamp_reg, clamp_offset; + + if (sig == QPU_SIG_SMALL_IMM) { + DRM_ERROR("direct TMU read used small immediate\n"); + return false; + } + + /* Make sure that this texture load is an add of the base + * address of the UBO to a clamped offset within the UBO. + */ + if (is_mul || + QPU_GET_FIELD(inst, QPU_OP_ADD) != QPU_A_ADD) { + DRM_ERROR("direct TMU load wasn't an add\n"); + return false; + } + + /* We assert that the the clamped address is the first + * argument, and the UBO base address is the second argument. + * This is arbitrary, but simpler than supporting flipping the + * two either way. + */ + clamp_reg = raddr_add_a_to_live_reg_index(inst); + if (clamp_reg == ~0) { + DRM_ERROR("direct TMU load wasn't clamped\n"); + return false; + } + + clamp_offset = validation_state->live_min_clamp_offsets[clamp_reg]; + if (clamp_offset == ~0) { + DRM_ERROR("direct TMU load wasn't clamped\n"); + return false; + } + + /* Store the clamp value's offset in p1 (see reloc_tex() in + * vc4_validate.c). + */ + validation_state->tmu_setup[tmu].p_offset[1] = + clamp_offset; + + if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) && + !(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF)) { + DRM_ERROR("direct TMU load didn't add to a uniform\n"); + return false; + } + + validation_state->tmu_setup[tmu].is_direct = true; + } else { + if (raddr_a == QPU_R_UNIF || (sig != QPU_SIG_SMALL_IMM && + raddr_b == QPU_R_UNIF)) { + DRM_ERROR("uniform read in the same instruction as " + "texture setup.\n"); + return false; + } + } + + if (validation_state->tmu_write_count[tmu] >= 4) { + DRM_ERROR("TMU%d got too many parameters before dispatch\n", + tmu); + return false; + } + validation_state->tmu_setup[tmu].p_offset[validation_state->tmu_write_count[tmu]] = + validated_shader->uniforms_size; + validation_state->tmu_write_count[tmu]++; + /* Since direct uses a RADDR uniform reference, it will get counted in + * check_instruction_reads() + */ + if (!is_direct) + validated_shader->uniforms_size += 4; + + if (submit) { + if (!record_texture_sample(validated_shader, + validation_state, tmu)) { + return false; + } + + validation_state->tmu_write_count[tmu] = 0; + } + + return true; +} + +static bool +check_reg_write(uint64_t inst, + struct vc4_validated_shader_info *validated_shader, + struct vc4_shader_validation_state *validation_state, + bool is_mul) +{ + uint32_t waddr = (is_mul ? + QPU_GET_FIELD(inst, QPU_WADDR_MUL) : + QPU_GET_FIELD(inst, QPU_WADDR_ADD)); + + switch (waddr) { + case QPU_W_UNIFORMS_ADDRESS: + /* XXX: We'll probably need to support this for reladdr, but + * it's definitely a security-related one. + */ + DRM_ERROR("uniforms address load unsupported\n"); + return false; + + case QPU_W_TLB_COLOR_MS: + case QPU_W_TLB_COLOR_ALL: + case QPU_W_TLB_Z: + /* These only interact with the tile buffer, not main memory, + * so they're safe. + */ + return true; + + case QPU_W_TMU0_S: + case QPU_W_TMU0_T: + case QPU_W_TMU0_R: + case QPU_W_TMU0_B: + case QPU_W_TMU1_S: + case QPU_W_TMU1_T: + case QPU_W_TMU1_R: + case QPU_W_TMU1_B: + return check_tmu_write(inst, validated_shader, validation_state, + is_mul); + + case QPU_W_HOST_INT: + case QPU_W_TMU_NOSWAP: + case QPU_W_TLB_ALPHA_MASK: + case QPU_W_MUTEX_RELEASE: + /* XXX: I haven't thought about these, so don't support them + * for now. + */ + DRM_ERROR("Unsupported waddr %d\n", waddr); + return false; + + case QPU_W_VPM_ADDR: + DRM_ERROR("General VPM DMA unsupported\n"); + return false; + + case QPU_W_VPM: + case QPU_W_VPMVCD_SETUP: + /* We allow VPM setup in general, even including VPM DMA + * configuration setup, because the (unsafe) DMA can only be + * triggered by QPU_W_VPM_ADDR writes. + */ + return true; + + case QPU_W_TLB_STENCIL_SETUP: + return true; + } + + return true; +} + +static void +track_live_clamps(uint64_t inst, + struct vc4_validated_shader_info *validated_shader, + struct vc4_shader_validation_state *validation_state) +{ + uint32_t op_add = QPU_GET_FIELD(inst, QPU_OP_ADD); + uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD); + uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL); + uint32_t cond_add = QPU_GET_FIELD(inst, QPU_COND_ADD); + uint32_t add_a = QPU_GET_FIELD(inst, QPU_ADD_A); + uint32_t add_b = QPU_GET_FIELD(inst, QPU_ADD_B); + uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A); + uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B); + uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG); + bool ws = inst & QPU_WS; + uint32_t lri_add_a, lri_add, lri_mul; + bool add_a_is_min_0; + + /* Check whether OP_ADD's A argumennt comes from a live MAX(x, 0), + * before we clear previous live state. + */ + lri_add_a = raddr_add_a_to_live_reg_index(inst); + add_a_is_min_0 = (lri_add_a != ~0 && + validation_state->live_max_clamp_regs[lri_add_a]); + + /* Clear live state for registers written by our instruction. */ + lri_add = waddr_to_live_reg_index(waddr_add, ws); + lri_mul = waddr_to_live_reg_index(waddr_mul, !ws); + if (lri_mul != ~0) { + validation_state->live_max_clamp_regs[lri_mul] = false; + validation_state->live_min_clamp_offsets[lri_mul] = ~0; + } + if (lri_add != ~0) { + validation_state->live_max_clamp_regs[lri_add] = false; + validation_state->live_min_clamp_offsets[lri_add] = ~0; + } else { + /* Nothing further to do for live tracking, since only ADDs + * generate new live clamp registers. + */ + return; + } + + /* Now, handle remaining live clamp tracking for the ADD operation. */ + + if (cond_add != QPU_COND_ALWAYS) + return; + + if (op_add == QPU_A_MAX) { + /* Track live clamps of a value to a minimum of 0 (in either + * arg). + */ + if (sig != QPU_SIG_SMALL_IMM || raddr_b != 0 || + (add_a != QPU_MUX_B && add_b != QPU_MUX_B)) { + return; + } + + validation_state->live_max_clamp_regs[lri_add] = true; + } else if (op_add == QPU_A_MIN) { + /* Track live clamps of a value clamped to a minimum of 0 and + * a maximum of some uniform's offset. + */ + if (!add_a_is_min_0) + return; + + if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) && + !(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF && + sig != QPU_SIG_SMALL_IMM)) { + return; + } + + validation_state->live_min_clamp_offsets[lri_add] = + validated_shader->uniforms_size; + } +} + +static bool +check_instruction_writes(uint64_t inst, + struct vc4_validated_shader_info *validated_shader, + struct vc4_shader_validation_state *validation_state) +{ + uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD); + uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL); + bool ok; + + if (is_tmu_write(waddr_add) && is_tmu_write(waddr_mul)) { + DRM_ERROR("ADD and MUL both set up textures\n"); + return false; + } + + ok = (check_reg_write(inst, validated_shader, validation_state, + false) && + check_reg_write(inst, validated_shader, validation_state, + true)); + + track_live_clamps(inst, validated_shader, validation_state); + + return ok; +} + +static bool +check_instruction_reads(uint64_t inst, + struct vc4_validated_shader_info *validated_shader) +{ + uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A); + uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B); + uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG); + + if (raddr_a == QPU_R_UNIF || + (raddr_b == QPU_R_UNIF && sig != QPU_SIG_SMALL_IMM)) { + /* This can't overflow the uint32_t, because we're reading 8 + * bytes of instruction to increment by 4 here, so we'd + * already be OOM. + */ + validated_shader->uniforms_size += 4; + } + + return true; +} + +struct vc4_validated_shader_info * +vc4_validate_shader(struct drm_gem_cma_object *shader_obj) +{ + bool found_shader_end = false; + int shader_end_ip = 0; + uint32_t ip, max_ip; + uint64_t *shader; + struct vc4_validated_shader_info *validated_shader; + struct vc4_shader_validation_state validation_state; + int i; + + memset(&validation_state, 0, sizeof(validation_state)); + + for (i = 0; i < 8; i++) + validation_state.tmu_setup[i / 4].p_offset[i % 4] = ~0; + for (i = 0; i < ARRAY_SIZE(validation_state.live_min_clamp_offsets); i++) + validation_state.live_min_clamp_offsets[i] = ~0; + + shader = shader_obj->vaddr; + max_ip = shader_obj->base.size / sizeof(uint64_t); + + validated_shader = kcalloc(1, sizeof(*validated_shader), GFP_KERNEL); + if (!validated_shader) + return NULL; + + for (ip = 0; ip < max_ip; ip++) { + uint64_t inst = shader[ip]; + uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG); + + switch (sig) { + case QPU_SIG_NONE: + case QPU_SIG_WAIT_FOR_SCOREBOARD: + case QPU_SIG_SCOREBOARD_UNLOCK: + case QPU_SIG_COLOR_LOAD: + case QPU_SIG_LOAD_TMU0: + case QPU_SIG_LOAD_TMU1: + case QPU_SIG_PROG_END: + case QPU_SIG_SMALL_IMM: + if (!check_instruction_writes(inst, validated_shader, + &validation_state)) { + DRM_ERROR("Bad write at ip %d\n", ip); + goto fail; + } + + if (!check_instruction_reads(inst, validated_shader)) + goto fail; + + if (sig == QPU_SIG_PROG_END) { + found_shader_end = true; + shader_end_ip = ip; + } + + break; + + case QPU_SIG_LOAD_IMM: + if (!check_instruction_writes(inst, validated_shader, + &validation_state)) { + DRM_ERROR("Bad LOAD_IMM write at ip %d\n", ip); + goto fail; + } + break; + + default: + DRM_ERROR("Unsupported QPU signal %d at " + "instruction %d\n", sig, ip); + goto fail; + } + + /* There are two delay slots after program end is signaled + * that are still executed, then we're finished. + */ + if (found_shader_end && ip == shader_end_ip + 2) + break; + } + + if (ip == max_ip) { + DRM_ERROR("shader failed to terminate before " + "shader BO end at %zd\n", + shader_obj->base.size); + goto fail; + } + + /* Again, no chance of integer overflow here because the worst case + * scenario is 8 bytes of uniforms plus handles per 8-byte + * instruction. + */ + validated_shader->uniforms_src_size = + (validated_shader->uniforms_size + + 4 * validated_shader->num_texture_samples); + + return validated_shader; + +fail: + if (validated_shader) { + kfree(validated_shader->texture_samples); + kfree(validated_shader); + } + return NULL; +} diff --git a/include/uapi/drm/vc4_drm.h b/include/uapi/drm/vc4_drm.h index 219d34c42272..74de18416be9 100644 --- a/include/uapi/drm/vc4_drm.h +++ b/include/uapi/drm/vc4_drm.h @@ -28,9 +28,11 @@ #define DRM_VC4_CREATE_BO 0x03 #define DRM_VC4_MMAP_BO 0x04 +#define DRM_VC4_CREATE_SHADER_BO 0x05 #define DRM_IOCTL_VC4_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_BO, struct drm_vc4_create_bo) #define DRM_IOCTL_VC4_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_MMAP_BO, struct drm_vc4_mmap_bo) +#define DRM_IOCTL_VC4_CREATE_SHADER_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_SHADER_BO, struct drm_vc4_create_shader_bo) /** * struct drm_vc4_create_bo - ioctl argument for creating VC4 BOs. @@ -65,4 +67,27 @@ struct drm_vc4_mmap_bo { __u64 offset; }; +/** + * struct drm_vc4_create_shader_bo - ioctl argument for creating VC4 + * shader BOs. + * + * Since allowing a shader to be overwritten while it's also being + * executed from would allow privlege escalation, shaders must be + * created using this ioctl, and they can't be mmapped later. + */ +struct drm_vc4_create_shader_bo { + /* Size of the data argument. */ + __u32 size; + /* Flags, currently must be 0. */ + __u32 flags; + + /* Pointer to the data. */ + __u64 data; + + /** Returned GEM handle for the BO. */ + __u32 handle; + /* Pad, must be 0. */ + __u32 pad; +}; + #endif /* _UAPI_VC4_DRM_H_ */ -- cgit v1.2.3 From d5b1a78a772f1e31a94f8babfa964152ec5e9aa5 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Mon, 30 Nov 2015 12:13:37 -0800 Subject: drm/vc4: Add support for drawing 3D frames. The user submission is basically a pointer to a command list and a pointer to uniforms. We copy those in to the kernel, validate and relocate them, and store the result in a GPU BO which we queue for execution. v2: Drop support for NV shader recs (not necessary for GL), simplify vc4_use_bo(), improve bin flush/semaphore checks, use __u32 style types. Signed-off-by: Eric Anholt --- drivers/gpu/drm/vc4/Makefile | 7 + drivers/gpu/drm/vc4/vc4_drv.c | 15 +- drivers/gpu/drm/vc4/vc4_drv.h | 182 +++++++ drivers/gpu/drm/vc4/vc4_gem.c | 642 +++++++++++++++++++++++ drivers/gpu/drm/vc4/vc4_irq.c | 210 ++++++++ drivers/gpu/drm/vc4/vc4_packet.h | 399 +++++++++++++++ drivers/gpu/drm/vc4/vc4_render_cl.c | 634 +++++++++++++++++++++++ drivers/gpu/drm/vc4/vc4_trace.h | 63 +++ drivers/gpu/drm/vc4/vc4_trace_points.c | 14 + drivers/gpu/drm/vc4/vc4_v3d.c | 37 ++ drivers/gpu/drm/vc4/vc4_validate.c | 900 +++++++++++++++++++++++++++++++++ include/uapi/drm/vc4_drm.h | 141 ++++++ 12 files changed, 3243 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/vc4/vc4_gem.c create mode 100644 drivers/gpu/drm/vc4/vc4_irq.c create mode 100644 drivers/gpu/drm/vc4/vc4_packet.h create mode 100644 drivers/gpu/drm/vc4/vc4_render_cl.c create mode 100644 drivers/gpu/drm/vc4/vc4_trace.h create mode 100644 drivers/gpu/drm/vc4/vc4_trace_points.c create mode 100644 drivers/gpu/drm/vc4/vc4_validate.c (limited to 'include') diff --git a/drivers/gpu/drm/vc4/Makefile b/drivers/gpu/drm/vc4/Makefile index e87a6f2f5916..4c6a99f0398c 100644 --- a/drivers/gpu/drm/vc4/Makefile +++ b/drivers/gpu/drm/vc4/Makefile @@ -8,12 +8,19 @@ vc4-y := \ vc4_crtc.o \ vc4_drv.o \ vc4_kms.o \ + vc4_gem.o \ vc4_hdmi.o \ vc4_hvs.o \ + vc4_irq.o \ vc4_plane.o \ + vc4_render_cl.o \ + vc4_trace_points.o \ vc4_v3d.o \ + vc4_validate.o \ vc4_validate_shaders.o vc4-$(CONFIG_DEBUG_FS) += vc4_debugfs.o obj-$(CONFIG_DRM_VC4) += vc4.o + +CFLAGS_vc4_trace_points.o := -I$(src) diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index db58d74efe95..2cfee5959455 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c @@ -74,6 +74,9 @@ static const struct file_operations vc4_drm_fops = { }; static const struct drm_ioctl_desc vc4_drm_ioctls[] = { + DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, 0), + DRM_IOCTL_DEF_DRV(VC4_WAIT_SEQNO, vc4_wait_seqno_ioctl, 0), + DRM_IOCTL_DEF_DRV(VC4_WAIT_BO, vc4_wait_bo_ioctl, 0), DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, 0), DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, 0), DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, 0), @@ -83,10 +86,16 @@ static struct drm_driver vc4_drm_driver = { .driver_features = (DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_GEM | + DRIVER_HAVE_IRQ | DRIVER_PRIME), .lastclose = vc4_lastclose, .preclose = vc4_drm_preclose, + .irq_handler = vc4_irq, + .irq_preinstall = vc4_irq_preinstall, + .irq_postinstall = vc4_irq_postinstall, + .irq_uninstall = vc4_irq_uninstall, + .enable_vblank = vc4_enable_vblank, .disable_vblank = vc4_disable_vblank, .get_vblank_counter = drm_vblank_count, @@ -181,9 +190,11 @@ static int vc4_drm_bind(struct device *dev) if (ret) goto unref; + vc4_gem_init(drm); + ret = component_bind_all(dev, drm); if (ret) - goto unref; + goto gem_destroy; ret = drm_dev_register(drm, 0); if (ret < 0) @@ -207,6 +218,8 @@ unregister: drm_dev_unregister(drm); unbind_all: component_unbind_all(dev, drm); +gem_destroy: + vc4_gem_destroy(drm); unref: drm_dev_unref(drm); vc4_bo_cache_destroy(drm); diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h index 8945463e70b6..0bc8c57196ac 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.h +++ b/drivers/gpu/drm/vc4/vc4_drv.h @@ -49,6 +49,48 @@ struct vc4_dev { /* Protects bo_cache and the BO stats. */ struct mutex bo_lock; + + /* Sequence number for the last job queued in job_list. + * Starts at 0 (no jobs emitted). + */ + uint64_t emit_seqno; + + /* Sequence number for the last completed job on the GPU. + * Starts at 0 (no jobs completed). + */ + uint64_t finished_seqno; + + /* List of all struct vc4_exec_info for jobs to be executed. + * The first job in the list is the one currently programmed + * into ct0ca/ct1ca for execution. + */ + struct list_head job_list; + /* List of the finished vc4_exec_infos waiting to be freed by + * job_done_work. + */ + struct list_head job_done_list; + /* Spinlock used to synchronize the job_list and seqno + * accesses between the IRQ handler and GEM ioctls. + */ + spinlock_t job_lock; + wait_queue_head_t job_wait_queue; + struct work_struct job_done_work; + + /* The binner overflow memory that's currently set up in + * BPOA/BPOS registers. When overflow occurs and a new one is + * allocated, the previous one will be moved to + * vc4->current_exec's free list. + */ + struct vc4_bo *overflow_mem; + struct work_struct overflow_mem_work; + + struct { + uint32_t last_ct0ca, last_ct1ca; + struct timer_list timer; + struct work_struct reset_work; + } hangcheck; + + struct semaphore async_modeset; }; static inline struct vc4_dev * @@ -60,6 +102,9 @@ to_vc4_dev(struct drm_device *dev) struct vc4_bo { struct drm_gem_cma_object base; + /* seqno of the last job to render to this BO. */ + uint64_t seqno; + /* List entry for the BO's position in either * vc4_exec_info->unref_list or vc4_dev->bo_cache.time_list */ @@ -130,6 +175,101 @@ to_vc4_encoder(struct drm_encoder *encoder) #define HVS_READ(offset) readl(vc4->hvs->regs + offset) #define HVS_WRITE(offset, val) writel(val, vc4->hvs->regs + offset) +struct vc4_exec_info { + /* Sequence number for this bin/render job. */ + uint64_t seqno; + + /* Kernel-space copy of the ioctl arguments */ + struct drm_vc4_submit_cl *args; + + /* This is the array of BOs that were looked up at the start of exec. + * Command validation will use indices into this array. + */ + struct drm_gem_cma_object **bo; + uint32_t bo_count; + + /* Pointers for our position in vc4->job_list */ + struct list_head head; + + /* List of other BOs used in the job that need to be released + * once the job is complete. + */ + struct list_head unref_list; + + /* Current unvalidated indices into @bo loaded by the non-hardware + * VC4_PACKET_GEM_HANDLES. + */ + uint32_t bo_index[2]; + + /* This is the BO where we store the validated command lists, shader + * records, and uniforms. + */ + struct drm_gem_cma_object *exec_bo; + + /** + * This tracks the per-shader-record state (packet 64) that + * determines the length of the shader record and the offset + * it's expected to be found at. It gets read in from the + * command lists. + */ + struct vc4_shader_state { + uint32_t addr; + /* Maximum vertex index referenced by any primitive using this + * shader state. + */ + uint32_t max_index; + } *shader_state; + + /** How many shader states the user declared they were using. */ + uint32_t shader_state_size; + /** How many shader state records the validator has seen. */ + uint32_t shader_state_count; + + bool found_tile_binning_mode_config_packet; + bool found_start_tile_binning_packet; + bool found_increment_semaphore_packet; + bool found_flush; + uint8_t bin_tiles_x, bin_tiles_y; + struct drm_gem_cma_object *tile_bo; + uint32_t tile_alloc_offset; + + /** + * Computed addresses pointing into exec_bo where we start the + * bin thread (ct0) and render thread (ct1). + */ + uint32_t ct0ca, ct0ea; + uint32_t ct1ca, ct1ea; + + /* Pointer to the unvalidated bin CL (if present). */ + void *bin_u; + + /* Pointers to the shader recs. These paddr gets incremented as CL + * packets are relocated in validate_gl_shader_state, and the vaddrs + * (u and v) get incremented and size decremented as the shader recs + * themselves are validated. + */ + void *shader_rec_u; + void *shader_rec_v; + uint32_t shader_rec_p; + uint32_t shader_rec_size; + + /* Pointers to the uniform data. These pointers are incremented, and + * size decremented, as each batch of uniforms is uploaded. + */ + void *uniforms_u; + void *uniforms_v; + uint32_t uniforms_p; + uint32_t uniforms_size; +}; + +static inline struct vc4_exec_info * +vc4_first_job(struct vc4_dev *vc4) +{ + if (list_empty(&vc4->job_list)) + return NULL; + return list_first_entry(&vc4->job_list, struct vc4_exec_info, head); +} + /** * struct vc4_texture_sample_info - saves the offsets into the UBO for texture * setup parameters. @@ -231,10 +371,31 @@ void vc4_debugfs_cleanup(struct drm_minor *minor); /* vc4_drv.c */ void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index); +/* vc4_gem.c */ +void vc4_gem_init(struct drm_device *dev); +void vc4_gem_destroy(struct drm_device *dev); +int vc4_submit_cl_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int vc4_wait_seqno_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int vc4_wait_bo_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +void vc4_submit_next_job(struct drm_device *dev); +int vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, + uint64_t timeout_ns, bool interruptible); +void vc4_job_handle_completed(struct vc4_dev *vc4); + /* vc4_hdmi.c */ extern struct platform_driver vc4_hdmi_driver; int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused); +/* vc4_irq.c */ +irqreturn_t vc4_irq(int irq, void *arg); +void vc4_irq_preinstall(struct drm_device *dev); +int vc4_irq_postinstall(struct drm_device *dev); +void vc4_irq_uninstall(struct drm_device *dev); +void vc4_irq_reset(struct drm_device *dev); + /* vc4_hvs.c */ extern struct platform_driver vc4_hvs_driver; void vc4_hvs_dump_state(struct drm_device *dev); @@ -253,6 +414,27 @@ u32 vc4_plane_dlist_size(struct drm_plane_state *state); extern struct platform_driver vc4_v3d_driver; int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused); int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused); +int vc4_v3d_set_power(struct vc4_dev *vc4, bool on); + +/* vc4_validate.c */ +int +vc4_validate_bin_cl(struct drm_device *dev, + void *validated, + void *unvalidated, + struct vc4_exec_info *exec); + +int +vc4_validate_shader_recs(struct drm_device *dev, struct vc4_exec_info *exec); + +struct drm_gem_cma_object *vc4_use_bo(struct vc4_exec_info *exec, + uint32_t hindex); + +int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec); + +bool vc4_check_tex_size(struct vc4_exec_info *exec, + struct drm_gem_cma_object *fbo, + uint32_t offset, uint8_t tiling_format, + uint32_t width, uint32_t height, uint8_t cpp); /* vc4_validate_shader.c */ struct vc4_validated_shader_info * diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c new file mode 100644 index 000000000000..936dddfa890f --- /dev/null +++ b/drivers/gpu/drm/vc4/vc4_gem.c @@ -0,0 +1,642 @@ +/* + * Copyright © 2014 Broadcom + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include +#include +#include +#include + +#include "uapi/drm/vc4_drm.h" +#include "vc4_drv.h" +#include "vc4_regs.h" +#include "vc4_trace.h" + +static void +vc4_queue_hangcheck(struct drm_device *dev) +{ + struct vc4_dev *vc4 = to_vc4_dev(dev); + + mod_timer(&vc4->hangcheck.timer, + round_jiffies_up(jiffies + msecs_to_jiffies(100))); +} + +static void +vc4_reset(struct drm_device *dev) +{ + struct vc4_dev *vc4 = to_vc4_dev(dev); + + DRM_INFO("Resetting GPU.\n"); + vc4_v3d_set_power(vc4, false); + vc4_v3d_set_power(vc4, true); + + vc4_irq_reset(dev); + + /* Rearm the hangcheck -- another job might have been waiting + * for our hung one to get kicked off, and vc4_irq_reset() + * would have started it. + */ + vc4_queue_hangcheck(dev); +} + +static void +vc4_reset_work(struct work_struct *work) +{ + struct vc4_dev *vc4 = + container_of(work, struct vc4_dev, hangcheck.reset_work); + + vc4_reset(vc4->dev); +} + +static void +vc4_hangcheck_elapsed(unsigned long data) +{ + struct drm_device *dev = (struct drm_device *)data; + struct vc4_dev *vc4 = to_vc4_dev(dev); + uint32_t ct0ca, ct1ca; + + /* If idle, we can stop watching for hangs. */ + if (list_empty(&vc4->job_list)) + return; + + ct0ca = V3D_READ(V3D_CTNCA(0)); + ct1ca = V3D_READ(V3D_CTNCA(1)); + + /* If we've made any progress in execution, rearm the timer + * and wait. + */ + if (ct0ca != vc4->hangcheck.last_ct0ca || + ct1ca != vc4->hangcheck.last_ct1ca) { + vc4->hangcheck.last_ct0ca = ct0ca; + vc4->hangcheck.last_ct1ca = ct1ca; + vc4_queue_hangcheck(dev); + return; + } + + /* We've gone too long with no progress, reset. This has to + * be done from a work struct, since resetting can sleep and + * this timer hook isn't allowed to. + */ + schedule_work(&vc4->hangcheck.reset_work); +} + +static void +submit_cl(struct drm_device *dev, uint32_t thread, uint32_t start, uint32_t end) +{ + struct vc4_dev *vc4 = to_vc4_dev(dev); + + /* Set the current and end address of the control list. + * Writing the end register is what starts the job. + */ + V3D_WRITE(V3D_CTNCA(thread), start); + V3D_WRITE(V3D_CTNEA(thread), end); +} + +int +vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns, + bool interruptible) +{ + struct vc4_dev *vc4 = to_vc4_dev(dev); + int ret = 0; + unsigned long timeout_expire; + DEFINE_WAIT(wait); + + if (vc4->finished_seqno >= seqno) + return 0; + + if (timeout_ns == 0) + return -ETIME; + + timeout_expire = jiffies + nsecs_to_jiffies(timeout_ns); + + trace_vc4_wait_for_seqno_begin(dev, seqno, timeout_ns); + for (;;) { + prepare_to_wait(&vc4->job_wait_queue, &wait, + interruptible ? TASK_INTERRUPTIBLE : + TASK_UNINTERRUPTIBLE); + + if (interruptible && signal_pending(current)) { + ret = -ERESTARTSYS; + break; + } + + if (vc4->finished_seqno >= seqno) + break; + + if (timeout_ns != ~0ull) { + if (time_after_eq(jiffies, timeout_expire)) { + ret = -ETIME; + break; + } + schedule_timeout(timeout_expire - jiffies); + } else { + schedule(); + } + } + + finish_wait(&vc4->job_wait_queue, &wait); + trace_vc4_wait_for_seqno_end(dev, seqno); + + if (ret && ret != -ERESTARTSYS) { + DRM_ERROR("timeout waiting for render thread idle\n"); + return ret; + } + + return 0; +} + +static void +vc4_flush_caches(struct drm_device *dev) +{ + struct vc4_dev *vc4 = to_vc4_dev(dev); + + /* Flush the GPU L2 caches. These caches sit on top of system + * L3 (the 128kb or so shared with the CPU), and are + * non-allocating in the L3. + */ + V3D_WRITE(V3D_L2CACTL, + V3D_L2CACTL_L2CCLR); + + V3D_WRITE(V3D_SLCACTL, + VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) | + VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC) | + VC4_SET_FIELD(0xf, V3D_SLCACTL_UCC) | + VC4_SET_FIELD(0xf, V3D_SLCACTL_ICC)); +} + +/* Sets the registers for the next job to be actually be executed in + * the hardware. + * + * The job_lock should be held during this. + */ +void +vc4_submit_next_job(struct drm_device *dev) +{ + struct vc4_dev *vc4 = to_vc4_dev(dev); + struct vc4_exec_info *exec = vc4_first_job(vc4); + + if (!exec) + return; + + vc4_flush_caches(dev); + + /* Disable the binner's pre-loaded overflow memory address */ + V3D_WRITE(V3D_BPOA, 0); + V3D_WRITE(V3D_BPOS, 0); + + if (exec->ct0ca != exec->ct0ea) + submit_cl(dev, 0, exec->ct0ca, exec->ct0ea); + submit_cl(dev, 1, exec->ct1ca, exec->ct1ea); +} + +static void +vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno) +{ + struct vc4_bo *bo; + unsigned i; + + for (i = 0; i < exec->bo_count; i++) { + bo = to_vc4_bo(&exec->bo[i]->base); + bo->seqno = seqno; + } + + list_for_each_entry(bo, &exec->unref_list, unref_head) { + bo->seqno = seqno; + } +} + +/* Queues a struct vc4_exec_info for execution. If no job is + * currently executing, then submits it. + * + * Unlike most GPUs, our hardware only handles one command list at a + * time. To queue multiple jobs at once, we'd need to edit the + * previous command list to have a jump to the new one at the end, and + * then bump the end address. That's a change for a later date, + * though. + */ +static void +vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec) +{ + struct vc4_dev *vc4 = to_vc4_dev(dev); + uint64_t seqno; + unsigned long irqflags; + + spin_lock_irqsave(&vc4->job_lock, irqflags); + + seqno = ++vc4->emit_seqno; + exec->seqno = seqno; + vc4_update_bo_seqnos(exec, seqno); + + list_add_tail(&exec->head, &vc4->job_list); + + /* If no job was executing, kick ours off. Otherwise, it'll + * get started when the previous job's frame done interrupt + * occurs. + */ + if (vc4_first_job(vc4) == exec) { + vc4_submit_next_job(dev); + vc4_queue_hangcheck(dev); + } + + spin_unlock_irqrestore(&vc4->job_lock, irqflags); +} + +/** + * Looks up a bunch of GEM handles for BOs and stores the array for + * use in the command validator that actually writes relocated + * addresses pointing to them. + */ +static int +vc4_cl_lookup_bos(struct drm_device *dev, + struct drm_file *file_priv, + struct vc4_exec_info *exec) +{ + struct drm_vc4_submit_cl *args = exec->args; + uint32_t *handles; + int ret = 0; + int i; + + exec->bo_count = args->bo_handle_count; + + if (!exec->bo_count) { + /* See comment on bo_index for why we have to check + * this. + */ + DRM_ERROR("Rendering requires BOs to validate\n"); + return -EINVAL; + } + + exec->bo = kcalloc(exec->bo_count, sizeof(struct drm_gem_cma_object *), + GFP_KERNEL); + if (!exec->bo) { + DRM_ERROR("Failed to allocate validated BO pointers\n"); + return -ENOMEM; + } + + handles = drm_malloc_ab(exec->bo_count, sizeof(uint32_t)); + if (!handles) { + DRM_ERROR("Failed to allocate incoming GEM handles\n"); + goto fail; + } + + ret = copy_from_user(handles, + (void __user *)(uintptr_t)args->bo_handles, + exec->bo_count * sizeof(uint32_t)); + if (ret) { + DRM_ERROR("Failed to copy in GEM handles\n"); + goto fail; + } + + spin_lock(&file_priv->table_lock); + for (i = 0; i < exec->bo_count; i++) { + struct drm_gem_object *bo = idr_find(&file_priv->object_idr, + handles[i]); + if (!bo) { + DRM_ERROR("Failed to look up GEM BO %d: %d\n", + i, handles[i]); + ret = -EINVAL; + spin_unlock(&file_priv->table_lock); + goto fail; + } + drm_gem_object_reference(bo); + exec->bo[i] = (struct drm_gem_cma_object *)bo; + } + spin_unlock(&file_priv->table_lock); + +fail: + kfree(handles); + return 0; +} + +static int +vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec) +{ + struct drm_vc4_submit_cl *args = exec->args; + void *temp = NULL; + void *bin; + int ret = 0; + uint32_t bin_offset = 0; + uint32_t shader_rec_offset = roundup(bin_offset + args->bin_cl_size, + 16); + uint32_t uniforms_offset = shader_rec_offset + args->shader_rec_size; + uint32_t exec_size = uniforms_offset + args->uniforms_size; + uint32_t temp_size = exec_size + (sizeof(struct vc4_shader_state) * + args->shader_rec_count); + struct vc4_bo *bo; + + if (uniforms_offset < shader_rec_offset || + exec_size < uniforms_offset || + args->shader_rec_count >= (UINT_MAX / + sizeof(struct vc4_shader_state)) || + temp_size < exec_size) { + DRM_ERROR("overflow in exec arguments\n"); + goto fail; + } + + /* Allocate space where we'll store the copied in user command lists + * and shader records. + * + * We don't just copy directly into the BOs because we need to + * read the contents back for validation, and I think the + * bo->vaddr is uncached access. + */ + temp = kmalloc(temp_size, GFP_KERNEL); + if (!temp) { + DRM_ERROR("Failed to allocate storage for copying " + "in bin/render CLs.\n"); + ret = -ENOMEM; + goto fail; + } + bin = temp + bin_offset; + exec->shader_rec_u = temp + shader_rec_offset; + exec->uniforms_u = temp + uniforms_offset; + exec->shader_state = temp + exec_size; + exec->shader_state_size = args->shader_rec_count; + + ret = copy_from_user(bin, + (void __user *)(uintptr_t)args->bin_cl, + args->bin_cl_size); + if (ret) { + DRM_ERROR("Failed to copy in bin cl\n"); + goto fail; + } + + ret = copy_from_user(exec->shader_rec_u, + (void __user *)(uintptr_t)args->shader_rec, + args->shader_rec_size); + if (ret) { + DRM_ERROR("Failed to copy in shader recs\n"); + goto fail; + } + + ret = copy_from_user(exec->uniforms_u, + (void __user *)(uintptr_t)args->uniforms, + args->uniforms_size); + if (ret) { + DRM_ERROR("Failed to copy in uniforms cl\n"); + goto fail; + } + + bo = vc4_bo_create(dev, exec_size, true); + if (!bo) { + DRM_ERROR("Couldn't allocate BO for binning\n"); + ret = PTR_ERR(exec->exec_bo); + goto fail; + } + exec->exec_bo = &bo->base; + + list_add_tail(&to_vc4_bo(&exec->exec_bo->base)->unref_head, + &exec->unref_list); + + exec->ct0ca = exec->exec_bo->paddr + bin_offset; + + exec->bin_u = bin; + + exec->shader_rec_v = exec->exec_bo->vaddr + shader_rec_offset; + exec->shader_rec_p = exec->exec_bo->paddr + shader_rec_offset; + exec->shader_rec_size = args->shader_rec_size; + + exec->uniforms_v = exec->exec_bo->vaddr + uniforms_offset; + exec->uniforms_p = exec->exec_bo->paddr + uniforms_offset; + exec->uniforms_size = args->uniforms_size; + + ret = vc4_validate_bin_cl(dev, + exec->exec_bo->vaddr + bin_offset, + bin, + exec); + if (ret) + goto fail; + + ret = vc4_validate_shader_recs(dev, exec); + +fail: + kfree(temp); + return ret; +} + +static void +vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec) +{ + unsigned i; + + /* Need the struct lock for drm_gem_object_unreference(). */ + mutex_lock(&dev->struct_mutex); + if (exec->bo) { + for (i = 0; i < exec->bo_count; i++) + drm_gem_object_unreference(&exec->bo[i]->base); + kfree(exec->bo); + } + + while (!list_empty(&exec->unref_list)) { + struct vc4_bo *bo = list_first_entry(&exec->unref_list, + struct vc4_bo, unref_head); + list_del(&bo->unref_head); + drm_gem_object_unreference(&bo->base.base); + } + mutex_unlock(&dev->struct_mutex); + + kfree(exec); +} + +void +vc4_job_handle_completed(struct vc4_dev *vc4) +{ + unsigned long irqflags; + + spin_lock_irqsave(&vc4->job_lock, irqflags); + while (!list_empty(&vc4->job_done_list)) { + struct vc4_exec_info *exec = + list_first_entry(&vc4->job_done_list, + struct vc4_exec_info, head); + list_del(&exec->head); + + spin_unlock_irqrestore(&vc4->job_lock, irqflags); + vc4_complete_exec(vc4->dev, exec); + spin_lock_irqsave(&vc4->job_lock, irqflags); + } + spin_unlock_irqrestore(&vc4->job_lock, irqflags); +} + +/* Scheduled when any job has been completed, this walks the list of + * jobs that had completed and unrefs their BOs and frees their exec + * structs. + */ +static void +vc4_job_done_work(struct work_struct *work) +{ + struct vc4_dev *vc4 = + container_of(work, struct vc4_dev, job_done_work); + + vc4_job_handle_completed(vc4); +} + +static int +vc4_wait_for_seqno_ioctl_helper(struct drm_device *dev, + uint64_t seqno, + uint64_t *timeout_ns) +{ + unsigned long start = jiffies; + int ret = vc4_wait_for_seqno(dev, seqno, *timeout_ns, true); + + if ((ret == -EINTR || ret == -ERESTARTSYS) && *timeout_ns != ~0ull) { + uint64_t delta = jiffies_to_nsecs(jiffies - start); + + if (*timeout_ns >= delta) + *timeout_ns -= delta; + } + + return ret; +} + +int +vc4_wait_seqno_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_vc4_wait_seqno *args = data; + + return vc4_wait_for_seqno_ioctl_helper(dev, args->seqno, + &args->timeout_ns); +} + +int +vc4_wait_bo_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + int ret; + struct drm_vc4_wait_bo *args = data; + struct drm_gem_object *gem_obj; + struct vc4_bo *bo; + + gem_obj = drm_gem_object_lookup(dev, file_priv, args->handle); + if (!gem_obj) { + DRM_ERROR("Failed to look up GEM BO %d\n", args->handle); + return -EINVAL; + } + bo = to_vc4_bo(gem_obj); + + ret = vc4_wait_for_seqno_ioctl_helper(dev, bo->seqno, + &args->timeout_ns); + + drm_gem_object_unreference_unlocked(gem_obj); + return ret; +} + +/** + * Submits a command list to the VC4. + * + * This is what is called batchbuffer emitting on other hardware. + */ +int +vc4_submit_cl_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct vc4_dev *vc4 = to_vc4_dev(dev); + struct drm_vc4_submit_cl *args = data; + struct vc4_exec_info *exec; + int ret; + + if ((args->flags & ~VC4_SUBMIT_CL_USE_CLEAR_COLOR) != 0) { + DRM_ERROR("Unknown flags: 0x%02x\n", args->flags); + return -EINVAL; + } + + exec = kcalloc(1, sizeof(*exec), GFP_KERNEL); + if (!exec) { + DRM_ERROR("malloc failure on exec struct\n"); + return -ENOMEM; + } + + exec->args = args; + INIT_LIST_HEAD(&exec->unref_list); + + ret = vc4_cl_lookup_bos(dev, file_priv, exec); + if (ret) + goto fail; + + if (exec->args->bin_cl_size != 0) { + ret = vc4_get_bcl(dev, exec); + if (ret) + goto fail; + } else { + exec->ct0ca = 0; + exec->ct0ea = 0; + } + + ret = vc4_get_rcl(dev, exec); + if (ret) + goto fail; + + /* Clear this out of the struct we'll be putting in the queue, + * since it's part of our stack. + */ + exec->args = NULL; + + vc4_queue_submit(dev, exec); + + /* Return the seqno for our job. */ + args->seqno = vc4->emit_seqno; + + return 0; + +fail: + vc4_complete_exec(vc4->dev, exec); + + return ret; +} + +void +vc4_gem_init(struct drm_device *dev) +{ + struct vc4_dev *vc4 = to_vc4_dev(dev); + + INIT_LIST_HEAD(&vc4->job_list); + INIT_LIST_HEAD(&vc4->job_done_list); + spin_lock_init(&vc4->job_lock); + + INIT_WORK(&vc4->hangcheck.reset_work, vc4_reset_work); + setup_timer(&vc4->hangcheck.timer, + vc4_hangcheck_elapsed, + (unsigned long)dev); + + INIT_WORK(&vc4->job_done_work, vc4_job_done_work); +} + +void +vc4_gem_destroy(struct drm_device *dev) +{ + struct vc4_dev *vc4 = to_vc4_dev(dev); + + /* Waiting for exec to finish would need to be done before + * unregistering V3D. + */ + WARN_ON(vc4->emit_seqno != vc4->finished_seqno); + + /* V3D should already have disabled its interrupt and cleared + * the overflow allocation registers. Now free the object. + */ + if (vc4->overflow_mem) { + drm_gem_object_unreference_unlocked(&vc4->overflow_mem->base.base); + vc4->overflow_mem = NULL; + } + + vc4_bo_cache_destroy(dev); +} diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c new file mode 100644 index 000000000000..b68060e758db --- /dev/null +++ b/drivers/gpu/drm/vc4/vc4_irq.c @@ -0,0 +1,210 @@ +/* + * Copyright © 2014 Broadcom + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +/** DOC: Interrupt management for the V3D engine. + * + * We have an interrupt status register (V3D_INTCTL) which reports + * interrupts, and where writing 1 bits clears those interrupts. + * There are also a pair of interrupt registers + * (V3D_INTENA/V3D_INTDIS) where writing a 1 to their bits enables or + * disables that specific interrupt, and 0s written are ignored + * (reading either one returns the set of enabled interrupts). + * + * When we take a render frame interrupt, we need to wake the + * processes waiting for some frame to be done, and get the next frame + * submitted ASAP (so the hardware doesn't sit idle when there's work + * to do). + * + * When we take the binner out of memory interrupt, we need to + * allocate some new memory and pass it to the binner so that the + * current job can make progress. + */ + +#include "vc4_drv.h" +#include "vc4_regs.h" + +#define V3D_DRIVER_IRQS (V3D_INT_OUTOMEM | \ + V3D_INT_FRDONE) + +DECLARE_WAIT_QUEUE_HEAD(render_wait); + +static void +vc4_overflow_mem_work(struct work_struct *work) +{ + struct vc4_dev *vc4 = + container_of(work, struct vc4_dev, overflow_mem_work); + struct drm_device *dev = vc4->dev; + struct vc4_bo *bo; + + bo = vc4_bo_create(dev, 256 * 1024, true); + if (!bo) { + DRM_ERROR("Couldn't allocate binner overflow mem\n"); + return; + } + + /* If there's a job executing currently, then our previous + * overflow allocation is getting used in that job and we need + * to queue it to be released when the job is done. But if no + * job is executing at all, then we can free the old overflow + * object direcctly. + * + * No lock necessary for this pointer since we're the only + * ones that update the pointer, and our workqueue won't + * reenter. + */ + if (vc4->overflow_mem) { + struct vc4_exec_info *current_exec; + unsigned long irqflags; + + spin_lock_irqsave(&vc4->job_lock, irqflags); + current_exec = vc4_first_job(vc4); + if (current_exec) { + vc4->overflow_mem->seqno = vc4->finished_seqno + 1; + list_add_tail(&vc4->overflow_mem->unref_head, + ¤t_exec->unref_list); + vc4->overflow_mem = NULL; + } + spin_unlock_irqrestore(&vc4->job_lock, irqflags); + } + + if (vc4->overflow_mem) + drm_gem_object_unreference_unlocked(&vc4->overflow_mem->base.base); + vc4->overflow_mem = bo; + + V3D_WRITE(V3D_BPOA, bo->base.paddr); + V3D_WRITE(V3D_BPOS, bo->base.base.size); + V3D_WRITE(V3D_INTCTL, V3D_INT_OUTOMEM); + V3D_WRITE(V3D_INTENA, V3D_INT_OUTOMEM); +} + +static void +vc4_irq_finish_job(struct drm_device *dev) +{ + struct vc4_dev *vc4 = to_vc4_dev(dev); + struct vc4_exec_info *exec = vc4_first_job(vc4); + + if (!exec) + return; + + vc4->finished_seqno++; + list_move_tail(&exec->head, &vc4->job_done_list); + vc4_submit_next_job(dev); + + wake_up_all(&vc4->job_wait_queue); + schedule_work(&vc4->job_done_work); +} + +irqreturn_t +vc4_irq(int irq, void *arg) +{ + struct drm_device *dev = arg; + struct vc4_dev *vc4 = to_vc4_dev(dev); + uint32_t intctl; + irqreturn_t status = IRQ_NONE; + + barrier(); + intctl = V3D_READ(V3D_INTCTL); + + /* Acknowledge the interrupts we're handling here. The render + * frame done interrupt will be cleared, while OUTOMEM will + * stay high until the underlying cause is cleared. + */ + V3D_WRITE(V3D_INTCTL, intctl); + + if (intctl & V3D_INT_OUTOMEM) { + /* Disable OUTOMEM until the work is done. */ + V3D_WRITE(V3D_INTDIS, V3D_INT_OUTOMEM); + schedule_work(&vc4->overflow_mem_work); + status = IRQ_HANDLED; + } + + if (intctl & V3D_INT_FRDONE) { + spin_lock(&vc4->job_lock); + vc4_irq_finish_job(dev); + spin_unlock(&vc4->job_lock); + status = IRQ_HANDLED; + } + + return status; +} + +void +vc4_irq_preinstall(struct drm_device *dev) +{ + struct vc4_dev *vc4 = to_vc4_dev(dev); + + init_waitqueue_head(&vc4->job_wait_queue); + INIT_WORK(&vc4->overflow_mem_work, vc4_overflow_mem_work); + + /* Clear any pending interrupts someone might have left around + * for us. + */ + V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS); +} + +int +vc4_irq_postinstall(struct drm_device *dev) +{ + struct vc4_dev *vc4 = to_vc4_dev(dev); + + /* Enable both the render done and out of memory interrupts. */ + V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS); + + return 0; +} + +void +vc4_irq_uninstall(struct drm_device *dev) +{ + struct vc4_dev *vc4 = to_vc4_dev(dev); + + /* Disable sending interrupts for our driver's IRQs. */ + V3D_WRITE(V3D_INTDIS, V3D_DRIVER_IRQS); + + /* Clear any pending interrupts we might have left. */ + V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS); + + cancel_work_sync(&vc4->overflow_mem_work); +} + +/** Reinitializes interrupt registers when a GPU reset is performed. */ +void vc4_irq_reset(struct drm_device *dev) +{ + struct vc4_dev *vc4 = to_vc4_dev(dev); + unsigned long irqflags; + + /* Acknowledge any stale IRQs. */ + V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS); + + /* + * Turn all our interrupts on. Binner out of memory is the + * only one we expect to trigger at this point, since we've + * just come from poweron and haven't supplied any overflow + * memory yet. + */ + V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS); + + spin_lock_irqsave(&vc4->job_lock, irqflags); + vc4_irq_finish_job(dev); + spin_unlock_irqrestore(&vc4->job_lock, irqflags); +} diff --git a/drivers/gpu/drm/vc4/vc4_packet.h b/drivers/gpu/drm/vc4/vc4_packet.h new file mode 100644 index 000000000000..0f31cc06500f --- /dev/null +++ b/drivers/gpu/drm/vc4/vc4_packet.h @@ -0,0 +1,399 @@ +/* + * Copyright © 2014 Broadcom + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef VC4_PACKET_H +#define VC4_PACKET_H + +#include "vc4_regs.h" /* for VC4_MASK, VC4_GET_FIELD, VC4_SET_FIELD */ + +enum vc4_packet { + VC4_PACKET_HALT = 0, + VC4_PACKET_NOP = 1, + + VC4_PACKET_FLUSH = 4, + VC4_PACKET_FLUSH_ALL = 5, + VC4_PACKET_START_TILE_BINNING = 6, + VC4_PACKET_INCREMENT_SEMAPHORE = 7, + VC4_PACKET_WAIT_ON_SEMAPHORE = 8, + + VC4_PACKET_BRANCH = 16, + VC4_PACKET_BRANCH_TO_SUB_LIST = 17, + + VC4_PACKET_STORE_MS_TILE_BUFFER = 24, + VC4_PACKET_STORE_MS_TILE_BUFFER_AND_EOF = 25, + VC4_PACKET_STORE_FULL_RES_TILE_BUFFER = 26, + VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER = 27, + VC4_PACKET_STORE_TILE_BUFFER_GENERAL = 28, + VC4_PACKET_LOAD_TILE_BUFFER_GENERAL = 29, + + VC4_PACKET_GL_INDEXED_PRIMITIVE = 32, + VC4_PACKET_GL_ARRAY_PRIMITIVE = 33, + + VC4_PACKET_COMPRESSED_PRIMITIVE = 48, + VC4_PACKET_CLIPPED_COMPRESSED_PRIMITIVE = 49, + + VC4_PACKET_PRIMITIVE_LIST_FORMAT = 56, + + VC4_PACKET_GL_SHADER_STATE = 64, + VC4_PACKET_NV_SHADER_STATE = 65, + VC4_PACKET_VG_SHADER_STATE = 66, + + VC4_PACKET_CONFIGURATION_BITS = 96, + VC4_PACKET_FLAT_SHADE_FLAGS = 97, + VC4_PACKET_POINT_SIZE = 98, + VC4_PACKET_LINE_WIDTH = 99, + VC4_PACKET_RHT_X_BOUNDARY = 100, + VC4_PACKET_DEPTH_OFFSET = 101, + VC4_PACKET_CLIP_WINDOW = 102, + VC4_PACKET_VIEWPORT_OFFSET = 103, + VC4_PACKET_Z_CLIPPING = 104, + VC4_PACKET_CLIPPER_XY_SCALING = 105, + VC4_PACKET_CLIPPER_Z_SCALING = 106, + + VC4_PACKET_TILE_BINNING_MODE_CONFIG = 112, + VC4_PACKET_TILE_RENDERING_MODE_CONFIG = 113, + VC4_PACKET_CLEAR_COLORS = 114, + VC4_PACKET_TILE_COORDINATES = 115, + + /* Not an actual hardware packet -- this is what we use to put + * references to GEM bos in the command stream, since we need the u32 + * int the actual address packet in order to store the offset from the + * start of the BO. + */ + VC4_PACKET_GEM_HANDLES = 254, +} __attribute__ ((__packed__)); + +#define VC4_PACKET_HALT_SIZE 1 +#define VC4_PACKET_NOP_SIZE 1 +#define VC4_PACKET_FLUSH_SIZE 1 +#define VC4_PACKET_FLUSH_ALL_SIZE 1 +#define VC4_PACKET_START_TILE_BINNING_SIZE 1 +#define VC4_PACKET_INCREMENT_SEMAPHORE_SIZE 1 +#define VC4_PACKET_WAIT_ON_SEMAPHORE_SIZE 1 +#define VC4_PACKET_BRANCH_SIZE 5 +#define VC4_PACKET_BRANCH_TO_SUB_LIST_SIZE 5 +#define VC4_PACKET_STORE_MS_TILE_BUFFER_SIZE 1 +#define VC4_PACKET_STORE_MS_TILE_BUFFER_AND_EOF_SIZE 1 +#define VC4_PACKET_STORE_FULL_RES_TILE_BUFFER_SIZE 5 +#define VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER_SIZE 5 +#define VC4_PACKET_STORE_TILE_BUFFER_GENERAL_SIZE 7 +#define VC4_PACKET_LOAD_TILE_BUFFER_GENERAL_SIZE 7 +#define VC4_PACKET_GL_INDEXED_PRIMITIVE_SIZE 14 +#define VC4_PACKET_GL_ARRAY_PRIMITIVE_SIZE 10 +#define VC4_PACKET_COMPRESSED_PRIMITIVE_SIZE 1 +#define VC4_PACKET_CLIPPED_COMPRESSED_PRIMITIVE_SIZE 1 +#define VC4_PACKET_PRIMITIVE_LIST_FORMAT_SIZE 2 +#define VC4_PACKET_GL_SHADER_STATE_SIZE 5 +#define VC4_PACKET_NV_SHADER_STATE_SIZE 5 +#define VC4_PACKET_VG_SHADER_STATE_SIZE 5 +#define VC4_PACKET_CONFIGURATION_BITS_SIZE 4 +#define VC4_PACKET_FLAT_SHADE_FLAGS_SIZE 5 +#define VC4_PACKET_POINT_SIZE_SIZE 5 +#define VC4_PACKET_LINE_WIDTH_SIZE 5 +#define VC4_PACKET_RHT_X_BOUNDARY_SIZE 3 +#define VC4_PACKET_DEPTH_OFFSET_SIZE 5 +#define VC4_PACKET_CLIP_WINDOW_SIZE 9 +#define VC4_PACKET_VIEWPORT_OFFSET_SIZE 5 +#define VC4_PACKET_Z_CLIPPING_SIZE 9 +#define VC4_PACKET_CLIPPER_XY_SCALING_SIZE 9 +#define VC4_PACKET_CLIPPER_Z_SCALING_SIZE 9 +#define VC4_PACKET_TILE_BINNING_MODE_CONFIG_SIZE 16 +#define VC4_PACKET_TILE_RENDERING_MODE_CONFIG_SIZE 11 +#define VC4_PACKET_CLEAR_COLORS_SIZE 14 +#define VC4_PACKET_TILE_COORDINATES_SIZE 3 +#define VC4_PACKET_GEM_HANDLES_SIZE 9 + +/* Number of multisamples supported. */ +#define VC4_MAX_SAMPLES 4 +/* Size of a full resolution color or Z tile buffer load/store. */ +#define VC4_TILE_BUFFER_SIZE (64 * 64 * 4) + +/** @{ + * Bits used by packets like VC4_PACKET_STORE_TILE_BUFFER_GENERAL and + * VC4_PACKET_TILE_RENDERING_MODE_CONFIG. +*/ +#define VC4_TILING_FORMAT_LINEAR 0 +#define VC4_TILING_FORMAT_T 1 +#define VC4_TILING_FORMAT_LT 2 +/** @} */ + +/** @{ + * + * low bits of VC4_PACKET_STORE_FULL_RES_TILE_BUFFER and + * VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER. + */ +#define VC4_LOADSTORE_FULL_RES_EOF BIT(3) +#define VC4_LOADSTORE_FULL_RES_DISABLE_CLEAR_ALL BIT(2) +#define VC4_LOADSTORE_FULL_RES_DISABLE_ZS BIT(1) +#define VC4_LOADSTORE_FULL_RES_DISABLE_COLOR BIT(0) + +/** @{ + * + * low bits of VC4_PACKET_STORE_FULL_RES_TILE_BUFFER and + * VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER. + */ +#define VC4_LOADSTORE_FULL_RES_EOF BIT(3) +#define VC4_LOADSTORE_FULL_RES_DISABLE_CLEAR_ALL BIT(2) +#define VC4_LOADSTORE_FULL_RES_DISABLE_ZS BIT(1) +#define VC4_LOADSTORE_FULL_RES_DISABLE_COLOR BIT(0) + +/** @{ + * + * byte 2 of VC4_PACKET_STORE_TILE_BUFFER_GENERAL and + * VC4_PACKET_LOAD_TILE_BUFFER_GENERAL (low bits of the address) + */ + +#define VC4_LOADSTORE_TILE_BUFFER_EOF BIT(3) +#define VC4_LOADSTORE_TILE_BUFFER_DISABLE_FULL_VG_MASK BIT(2) +#define VC4_LOADSTORE_TILE_BUFFER_DISABLE_FULL_ZS BIT(1) +#define VC4_LOADSTORE_TILE_BUFFER_DISABLE_FULL_COLOR BIT(0) + +/** @} */ + +/** @{ + * + * byte 0-1 of VC4_PACKET_STORE_TILE_BUFFER_GENERAL and + * VC4_PACKET_LOAD_TILE_BUFFER_GENERAL + */ +#define VC4_STORE_TILE_BUFFER_DISABLE_VG_MASK_CLEAR BIT(15) +#define VC4_STORE_TILE_BUFFER_DISABLE_ZS_CLEAR BIT(14) +#define VC4_STORE_TILE_BUFFER_DISABLE_COLOR_CLEAR BIT(13) +#define VC4_STORE_TILE_BUFFER_DISABLE_SWAP BIT(12) + +#define VC4_LOADSTORE_TILE_BUFFER_FORMAT_MASK VC4_MASK(9, 8) +#define VC4_LOADSTORE_TILE_BUFFER_FORMAT_SHIFT 8 +#define VC4_LOADSTORE_TILE_BUFFER_RGBA8888 0 +#define VC4_LOADSTORE_TILE_BUFFER_BGR565_DITHER 1 +#define VC4_LOADSTORE_TILE_BUFFER_BGR565 2 +/** @} */ + +/** @{ + * + * byte 0 of VC4_PACKET_STORE_TILE_BUFFER_GENERAL and + * VC4_PACKET_LOAD_TILE_BUFFER_GENERAL + */ +#define VC4_STORE_TILE_BUFFER_MODE_MASK VC4_MASK(7, 6) +#define VC4_STORE_TILE_BUFFER_MODE_SHIFT 6 +#define VC4_STORE_TILE_BUFFER_MODE_SAMPLE0 (0 << 6) +#define VC4_STORE_TILE_BUFFER_MODE_DECIMATE_X4 (1 << 6) +#define VC4_STORE_TILE_BUFFER_MODE_DECIMATE_X16 (2 << 6) + +/** The values of the field are VC4_TILING_FORMAT_* */ +#define VC4_LOADSTORE_TILE_BUFFER_TILING_MASK VC4_MASK(5, 4) +#define VC4_LOADSTORE_TILE_BUFFER_TILING_SHIFT 4 + +#define VC4_LOADSTORE_TILE_BUFFER_BUFFER_MASK VC4_MASK(2, 0) +#define VC4_LOADSTORE_TILE_BUFFER_BUFFER_SHIFT 0 +#define VC4_LOADSTORE_TILE_BUFFER_NONE 0 +#define VC4_LOADSTORE_TILE_BUFFER_COLOR 1 +#define VC4_LOADSTORE_TILE_BUFFER_ZS 2 +#define VC4_LOADSTORE_TILE_BUFFER_Z 3 +#define VC4_LOADSTORE_TILE_BUFFER_VG_MASK 4 +#define VC4_LOADSTORE_TILE_BUFFER_FULL 5 +/** @} */ + +#define VC4_INDEX_BUFFER_U8 (0 << 4) +#define VC4_INDEX_BUFFER_U16 (1 << 4) + +/* This flag is only present in NV shader state. */ +#define VC4_SHADER_FLAG_SHADED_CLIP_COORDS BIT(3) +#define VC4_SHADER_FLAG_ENABLE_CLIPPING BIT(2) +#define VC4_SHADER_FLAG_VS_POINT_SIZE BIT(1) +#define VC4_SHADER_FLAG_FS_SINGLE_THREAD BIT(0) + +/** @{ byte 2 of config bits. */ +#define VC4_CONFIG_BITS_EARLY_Z_UPDATE BIT(1) +#define VC4_CONFIG_BITS_EARLY_Z BIT(0) +/** @} */ + +/** @{ byte 1 of config bits. */ +#define VC4_CONFIG_BITS_Z_UPDATE BIT(7) +/** same values in this 3-bit field as PIPE_FUNC_* */ +#define VC4_CONFIG_BITS_DEPTH_FUNC_SHIFT 4 +#define VC4_CONFIG_BITS_COVERAGE_READ_LEAVE BIT(3) + +#define VC4_CONFIG_BITS_COVERAGE_UPDATE_NONZERO (0 << 1) +#define VC4_CONFIG_BITS_COVERAGE_UPDATE_ODD (1 << 1) +#define VC4_CONFIG_BITS_COVERAGE_UPDATE_OR (2 << 1) +#define VC4_CONFIG_BITS_COVERAGE_UPDATE_ZERO (3 << 1) + +#define VC4_CONFIG_BITS_COVERAGE_PIPE_SELECT BIT(0) +/** @} */ + +/** @{ byte 0 of config bits. */ +#define VC4_CONFIG_BITS_RASTERIZER_OVERSAMPLE_NONE (0 << 6) +#define VC4_CONFIG_BITS_RASTERIZER_OVERSAMPLE_4X (1 << 6) +#define VC4_CONFIG_BITS_RASTERIZER_OVERSAMPLE_16X (2 << 6) + +#define VC4_CONFIG_BITS_AA_POINTS_AND_LINES BIT(4) +#define VC4_CONFIG_BITS_ENABLE_DEPTH_OFFSET BIT(3) +#define VC4_CONFIG_BITS_CW_PRIMITIVES BIT(2) +#define VC4_CONFIG_BITS_ENABLE_PRIM_BACK BIT(1) +#define VC4_CONFIG_BITS_ENABLE_PRIM_FRONT BIT(0) +/** @} */ + +/** @{ bits in the last u8 of VC4_PACKET_TILE_BINNING_MODE_CONFIG */ +#define VC4_BIN_CONFIG_DB_NON_MS BIT(7) + +#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_MASK VC4_MASK(6, 5) +#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_SHIFT 5 +#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_32 0 +#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_64 1 +#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_128 2 +#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_256 3 + +#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_MASK VC4_MASK(4, 3) +#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_SHIFT 3 +#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_32 0 +#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_64 1 +#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_128 2 +#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_256 3 + +#define VC4_BIN_CONFIG_AUTO_INIT_TSDA BIT(2) +#define VC4_BIN_CONFIG_TILE_BUFFER_64BIT BIT(1) +#define VC4_BIN_CONFIG_MS_MODE_4X BIT(0) +/** @} */ + +/** @{ bits in the last u16 of VC4_PACKET_TILE_RENDERING_MODE_CONFIG */ +#define VC4_RENDER_CONFIG_DB_NON_MS BIT(12) +#define VC4_RENDER_CONFIG_EARLY_Z_COVERAGE_DISABLE BIT(11) +#define VC4_RENDER_CONFIG_EARLY_Z_DIRECTION_G BIT(10) +#define VC4_RENDER_CONFIG_COVERAGE_MODE BIT(9) +#define VC4_RENDER_CONFIG_ENABLE_VG_MASK BIT(8) + +/** The values of the field are VC4_TILING_FORMAT_* */ +#define VC4_RENDER_CONFIG_MEMORY_FORMAT_MASK VC4_MASK(7, 6) +#define VC4_RENDER_CONFIG_MEMORY_FORMAT_SHIFT 6 + +#define VC4_RENDER_CONFIG_DECIMATE_MODE_1X (0 << 4) +#define VC4_RENDER_CONFIG_DECIMATE_MODE_4X (1 << 4) +#define VC4_RENDER_CONFIG_DECIMATE_MODE_16X (2 << 4) + +#define VC4_RENDER_CONFIG_FORMAT_MASK VC4_MASK(3, 2) +#define VC4_RENDER_CONFIG_FORMAT_SHIFT 2 +#define VC4_RENDER_CONFIG_FORMAT_BGR565_DITHERED 0 +#define VC4_RENDER_CONFIG_FORMAT_RGBA8888 1 +#define VC4_RENDER_CONFIG_FORMAT_BGR565 2 + +#define VC4_RENDER_CONFIG_TILE_BUFFER_64BIT BIT(1) +#define VC4_RENDER_CONFIG_MS_MODE_4X BIT(0) + +#define VC4_PRIMITIVE_LIST_FORMAT_16_INDEX (1 << 4) +#define VC4_PRIMITIVE_LIST_FORMAT_32_XY (3 << 4) +#define VC4_PRIMITIVE_LIST_FORMAT_TYPE_POINTS (0 << 0) +#define VC4_PRIMITIVE_LIST_FORMAT_TYPE_LINES (1 << 0) +#define VC4_PRIMITIVE_LIST_FORMAT_TYPE_TRIANGLES (2 << 0) +#define VC4_PRIMITIVE_LIST_FORMAT_TYPE_RHT (3 << 0) + +enum vc4_texture_data_type { + VC4_TEXTURE_TYPE_RGBA8888 = 0, + VC4_TEXTURE_TYPE_RGBX8888 = 1, + VC4_TEXTURE_TYPE_RGBA4444 = 2, + VC4_TEXTURE_TYPE_RGBA5551 = 3, + VC4_TEXTURE_TYPE_RGB565 = 4, + VC4_TEXTURE_TYPE_LUMINANCE = 5, + VC4_TEXTURE_TYPE_ALPHA = 6, + VC4_TEXTURE_TYPE_LUMALPHA = 7, + VC4_TEXTURE_TYPE_ETC1 = 8, + VC4_TEXTURE_TYPE_S16F = 9, + VC4_TEXTURE_TYPE_S8 = 10, + VC4_TEXTURE_TYPE_S16 = 11, + VC4_TEXTURE_TYPE_BW1 = 12, + VC4_TEXTURE_TYPE_A4 = 13, + VC4_TEXTURE_TYPE_A1 = 14, + VC4_TEXTURE_TYPE_RGBA64 = 15, + VC4_TEXTURE_TYPE_RGBA32R = 16, + VC4_TEXTURE_TYPE_YUV422R = 17, +}; + +#define VC4_TEX_P0_OFFSET_MASK VC4_MASK(31, 12) +#define VC4_TEX_P0_OFFSET_SHIFT 12 +#define VC4_TEX_P0_CSWIZ_MASK VC4_MASK(11, 10) +#define VC4_TEX_P0_CSWIZ_SHIFT 10 +#define VC4_TEX_P0_CMMODE_MASK VC4_MASK(9, 9) +#define VC4_TEX_P0_CMMODE_SHIFT 9 +#define VC4_TEX_P0_FLIPY_MASK VC4_MASK(8, 8) +#define VC4_TEX_P0_FLIPY_SHIFT 8 +#define VC4_TEX_P0_TYPE_MASK VC4_MASK(7, 4) +#define VC4_TEX_P0_TYPE_SHIFT 4 +#define VC4_TEX_P0_MIPLVLS_MASK VC4_MASK(3, 0) +#define VC4_TEX_P0_MIPLVLS_SHIFT 0 + +#define VC4_TEX_P1_TYPE4_MASK VC4_MASK(31, 31) +#define VC4_TEX_P1_TYPE4_SHIFT 31 +#define VC4_TEX_P1_HEIGHT_MASK VC4_MASK(30, 20) +#define VC4_TEX_P1_HEIGHT_SHIFT 20 +#define VC4_TEX_P1_ETCFLIP_MASK VC4_MASK(19, 19) +#define VC4_TEX_P1_ETCFLIP_SHIFT 19 +#define VC4_TEX_P1_WIDTH_MASK VC4_MASK(18, 8) +#define VC4_TEX_P1_WIDTH_SHIFT 8 + +#define VC4_TEX_P1_MAGFILT_MASK VC4_MASK(7, 7) +#define VC4_TEX_P1_MAGFILT_SHIFT 7 +# define VC4_TEX_P1_MAGFILT_LINEAR 0 +# define VC4_TEX_P1_MAGFILT_NEAREST 1 + +#define VC4_TEX_P1_MINFILT_MASK VC4_MASK(6, 4) +#define VC4_TEX_P1_MINFILT_SHIFT 4 +# define VC4_TEX_P1_MINFILT_LINEAR 0 +# define VC4_TEX_P1_MINFILT_NEAREST 1 +# define VC4_TEX_P1_MINFILT_NEAR_MIP_NEAR 2 +# define VC4_TEX_P1_MINFILT_NEAR_MIP_LIN 3 +# define VC4_TEX_P1_MINFILT_LIN_MIP_NEAR 4 +# define VC4_TEX_P1_MINFILT_LIN_MIP_LIN 5 + +#define VC4_TEX_P1_WRAP_T_MASK VC4_MASK(3, 2) +#define VC4_TEX_P1_WRAP_T_SHIFT 2 +#define VC4_TEX_P1_WRAP_S_MASK VC4_MASK(1, 0) +#define VC4_TEX_P1_WRAP_S_SHIFT 0 +# define VC4_TEX_P1_WRAP_REPEAT 0 +# define VC4_TEX_P1_WRAP_CLAMP 1 +# define VC4_TEX_P1_WRAP_MIRROR 2 +# define VC4_TEX_P1_WRAP_BORDER 3 + +#define VC4_TEX_P2_PTYPE_MASK VC4_MASK(31, 30) +#define VC4_TEX_P2_PTYPE_SHIFT 30 +# define VC4_TEX_P2_PTYPE_IGNORED 0 +# define VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE 1 +# define VC4_TEX_P2_PTYPE_CHILD_IMAGE_DIMENSIONS 2 +# define VC4_TEX_P2_PTYPE_CHILD_IMAGE_OFFSETS 3 + +/* VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE bits */ +#define VC4_TEX_P2_CMST_MASK VC4_MASK(29, 12) +#define VC4_TEX_P2_CMST_SHIFT 12 +#define VC4_TEX_P2_BSLOD_MASK VC4_MASK(0, 0) +#define VC4_TEX_P2_BSLOD_SHIFT 0 + +/* VC4_TEX_P2_PTYPE_CHILD_IMAGE_DIMENSIONS */ +#define VC4_TEX_P2_CHEIGHT_MASK VC4_MASK(22, 12) +#define VC4_TEX_P2_CHEIGHT_SHIFT 12 +#define VC4_TEX_P2_CWIDTH_MASK VC4_MASK(10, 0) +#define VC4_TEX_P2_CWIDTH_SHIFT 0 + +/* VC4_TEX_P2_PTYPE_CHILD_IMAGE_OFFSETS */ +#define VC4_TEX_P2_CYOFF_MASK VC4_MASK(22, 12) +#define VC4_TEX_P2_CYOFF_SHIFT 12 +#define VC4_TEX_P2_CXOFF_MASK VC4_MASK(10, 0) +#define VC4_TEX_P2_CXOFF_SHIFT 0 + +#endif /* VC4_PACKET_H */ diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c new file mode 100644 index 000000000000..8a2a312e2c1b --- /dev/null +++ b/drivers/gpu/drm/vc4/vc4_render_cl.c @@ -0,0 +1,634 @@ +/* + * Copyright © 2014-2015 Broadcom + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +/** + * DOC: Render command list generation + * + * In the VC4 driver, render command list generation is performed by the + * kernel instead of userspace. We do this because validating a + * user-submitted command list is hard to get right and has high CPU overhead, + * while the number of valid configurations for render command lists is + * actually fairly low. + */ + +#include "uapi/drm/vc4_drm.h" +#include "vc4_drv.h" +#include "vc4_packet.h" + +struct vc4_rcl_setup { + struct drm_gem_cma_object *color_read; + struct drm_gem_cma_object *color_write; + struct drm_gem_cma_object *zs_read; + struct drm_gem_cma_object *zs_write; + struct drm_gem_cma_object *msaa_color_write; + struct drm_gem_cma_object *msaa_zs_write; + + struct drm_gem_cma_object *rcl; + u32 next_offset; +}; + +static inline void rcl_u8(struct vc4_rcl_setup *setup, u8 val) +{ + *(u8 *)(setup->rcl->vaddr + setup->next_offset) = val; + setup->next_offset += 1; +} + +static inline void rcl_u16(struct vc4_rcl_setup *setup, u16 val) +{ + *(u16 *)(setup->rcl->vaddr + setup->next_offset) = val; + setup->next_offset += 2; +} + +static inline void rcl_u32(struct vc4_rcl_setup *setup, u32 val) +{ + *(u32 *)(setup->rcl->vaddr + setup->next_offset) = val; + setup->next_offset += 4; +} + +/* + * Emits a no-op STORE_TILE_BUFFER_GENERAL. + * + * If we emit a PACKET_TILE_COORDINATES, it must be followed by a store of + * some sort before another load is triggered. + */ +static void vc4_store_before_load(struct vc4_rcl_setup *setup) +{ + rcl_u8(setup, VC4_PACKET_STORE_TILE_BUFFER_GENERAL); + rcl_u16(setup, + VC4_SET_FIELD(VC4_LOADSTORE_TILE_BUFFER_NONE, + VC4_LOADSTORE_TILE_BUFFER_BUFFER) | + VC4_STORE_TILE_BUFFER_DISABLE_COLOR_CLEAR | + VC4_STORE_TILE_BUFFER_DISABLE_ZS_CLEAR | + VC4_STORE_TILE_BUFFER_DISABLE_VG_MASK_CLEAR); + rcl_u32(setup, 0); /* no address, since we're in None mode */ +} + +/* + * Calculates the physical address of the start of a tile in a RCL surface. + * + * Unlike the other load/store packets, + * VC4_PACKET_LOAD/STORE_FULL_RES_TILE_BUFFER don't look at the tile + * coordinates packet, and instead just store to the address given. + */ +static uint32_t vc4_full_res_offset(struct vc4_exec_info *exec, + struct drm_gem_cma_object *bo, + struct drm_vc4_submit_rcl_surface *surf, + uint8_t x, uint8_t y) +{ + return bo->paddr + surf->offset + VC4_TILE_BUFFER_SIZE * + (DIV_ROUND_UP(exec->args->width, 32) * y + x); +} + +/* + * Emits a PACKET_TILE_COORDINATES if one isn't already pending. + * + * The tile coordinates packet triggers a pending load if there is one, are + * used for clipping during rendering, and determine where loads/stores happen + * relative to their base address. + */ +static void vc4_tile_coordinates(struct vc4_rcl_setup *setup, + uint32_t x, uint32_t y) +{ + rcl_u8(setup, VC4_PACKET_TILE_COORDINATES); + rcl_u8(setup, x); + rcl_u8(setup, y); +} + +static void emit_tile(struct vc4_exec_info *exec, + struct vc4_rcl_setup *setup, + uint8_t x, uint8_t y, bool first, bool last) +{ + struct drm_vc4_submit_cl *args = exec->args; + bool has_bin = args->bin_cl_size != 0; + + /* Note that the load doesn't actually occur until the + * tile coords packet is processed, and only one load + * may be outstanding at a time. + */ + if (setup->color_read) { + if (args->color_read.flags & + VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) { + rcl_u8(setup, VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER); + rcl_u32(setup, + vc4_full_res_offset(exec, setup->color_read, + &args->color_read, x, y) | + VC4_LOADSTORE_FULL_RES_DISABLE_ZS); + } else { + rcl_u8(setup, VC4_PACKET_LOAD_TILE_BUFFER_GENERAL); + rcl_u16(setup, args->color_read.bits); + rcl_u32(setup, setup->color_read->paddr + + args->color_read.offset); + } + } + + if (setup->zs_read) { + if (args->zs_read.flags & + VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) { + rcl_u8(setup, VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER); + rcl_u32(setup, + vc4_full_res_offset(exec, setup->zs_read, + &args->zs_read, x, y) | + VC4_LOADSTORE_FULL_RES_DISABLE_COLOR); + } else { + if (setup->color_read) { + /* Exec previous load. */ + vc4_tile_coordinates(setup, x, y); + vc4_store_before_load(setup); + } + + rcl_u8(setup, VC4_PACKET_LOAD_TILE_BUFFER_GENERAL); + rcl_u16(setup, args->zs_read.bits); + rcl_u32(setup, setup->zs_read->paddr + + args->zs_read.offset); + } + } + + /* Clipping depends on tile coordinates having been + * emitted, so we always need one here. + */ + vc4_tile_coordinates(setup, x, y); + + /* Wait for the binner before jumping to the first + * tile's lists. + */ + if (first && has_bin) + rcl_u8(setup, VC4_PACKET_WAIT_ON_SEMAPHORE); + + if (has_bin) { + rcl_u8(setup, VC4_PACKET_BRANCH_TO_SUB_LIST); + rcl_u32(setup, (exec->tile_bo->paddr + + exec->tile_alloc_offset + + (y * exec->bin_tiles_x + x) * 32)); + } + + if (setup->msaa_color_write) { + bool last_tile_write = (!setup->msaa_zs_write && + !setup->zs_write && + !setup->color_write); + uint32_t bits = VC4_LOADSTORE_FULL_RES_DISABLE_ZS; + + if (!last_tile_write) + bits |= VC4_LOADSTORE_FULL_RES_DISABLE_CLEAR_ALL; + else if (last) + bits |= VC4_LOADSTORE_FULL_RES_EOF; + rcl_u8(setup, VC4_PACKET_STORE_FULL_RES_TILE_BUFFER); + rcl_u32(setup, + vc4_full_res_offset(exec, setup->msaa_color_write, + &args->msaa_color_write, x, y) | + bits); + } + + if (setup->msaa_zs_write) { + bool last_tile_write = (!setup->zs_write && + !setup->color_write); + uint32_t bits = VC4_LOADSTORE_FULL_RES_DISABLE_COLOR; + + if (setup->msaa_color_write) + vc4_tile_coordinates(setup, x, y); + if (!last_tile_write) + bits |= VC4_LOADSTORE_FULL_RES_DISABLE_CLEAR_ALL; + else if (last) + bits |= VC4_LOADSTORE_FULL_RES_EOF; + rcl_u8(setup, VC4_PACKET_STORE_FULL_RES_TILE_BUFFER); + rcl_u32(setup, + vc4_full_res_offset(exec, setup->msaa_zs_write, + &args->msaa_zs_write, x, y) | + bits); + } + + if (setup->zs_write) { + bool last_tile_write = !setup->color_write; + + if (setup->msaa_color_write || setup->msaa_zs_write) + vc4_tile_coordinates(setup, x, y); + + rcl_u8(setup, VC4_PACKET_STORE_TILE_BUFFER_GENERAL); + rcl_u16(setup, args->zs_write.bits | + (last_tile_write ? + 0 : VC4_STORE_TILE_BUFFER_DISABLE_COLOR_CLEAR)); + rcl_u32(setup, + (setup->zs_write->paddr + args->zs_write.offset) | + ((last && last_tile_write) ? + VC4_LOADSTORE_TILE_BUFFER_EOF : 0)); + } + + if (setup->color_write) { + if (setup->msaa_color_write || setup->msaa_zs_write || + setup->zs_write) { + vc4_tile_coordinates(setup, x, y); + } + + if (last) + rcl_u8(setup, VC4_PACKET_STORE_MS_TILE_BUFFER_AND_EOF); + else + rcl_u8(setup, VC4_PACKET_STORE_MS_TILE_BUFFER); + } +} + +static int vc4_create_rcl_bo(struct drm_device *dev, struct vc4_exec_info *exec, + struct vc4_rcl_setup *setup) +{ + struct drm_vc4_submit_cl *args = exec->args; + bool has_bin = args->bin_cl_size != 0; + uint8_t min_x_tile = args->min_x_tile; + uint8_t min_y_tile = args->min_y_tile; + uint8_t max_x_tile = args->max_x_tile; + uint8_t max_y_tile = args->max_y_tile; + uint8_t xtiles = max_x_tile - min_x_tile + 1; + uint8_t ytiles = max_y_tile - min_y_tile + 1; + uint8_t x, y; + uint32_t size, loop_body_size; + + size = VC4_PACKET_TILE_RENDERING_MODE_CONFIG_SIZE; + loop_body_size = VC4_PACKET_TILE_COORDINATES_SIZE; + + if (args->flags & VC4_SUBMIT_CL_USE_CLEAR_COLOR) { + size += VC4_PACKET_CLEAR_COLORS_SIZE + + VC4_PACKET_TILE_COORDINATES_SIZE + + VC4_PACKET_STORE_TILE_BUFFER_GENERAL_SIZE; + } + + if (setup->color_read) { + if (args->color_read.flags & + VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) { + loop_body_size += VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER_SIZE; + } else { + loop_body_size += VC4_PACKET_LOAD_TILE_BUFFER_GENERAL_SIZE; + } + } + if (setup->zs_read) { + if (args->zs_read.flags & + VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) { + loop_body_size += VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER_SIZE; + } else { + if (setup->color_read && + !(args->color_read.flags & + VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES)) { + loop_body_size += VC4_PACKET_TILE_COORDINATES_SIZE; + loop_body_size += VC4_PACKET_STORE_TILE_BUFFER_GENERAL_SIZE; + } + loop_body_size += VC4_PACKET_LOAD_TILE_BUFFER_GENERAL_SIZE; + } + } + + if (has_bin) { + size += VC4_PACKET_WAIT_ON_SEMAPHORE_SIZE; + loop_body_size += VC4_PACKET_BRANCH_TO_SUB_LIST_SIZE; + } + + if (setup->msaa_color_write) + loop_body_size += VC4_PACKET_STORE_FULL_RES_TILE_BUFFER_SIZE; + if (setup->msaa_zs_write) + loop_body_size += VC4_PACKET_STORE_FULL_RES_TILE_BUFFER_SIZE; + + if (setup->zs_write) + loop_body_size += VC4_PACKET_STORE_TILE_BUFFER_GENERAL_SIZE; + if (setup->color_write) + loop_body_size += VC4_PACKET_STORE_MS_TILE_BUFFER_SIZE; + + /* We need a VC4_PACKET_TILE_COORDINATES in between each store. */ + loop_body_size += VC4_PACKET_TILE_COORDINATES_SIZE * + ((setup->msaa_color_write != NULL) + + (setup->msaa_zs_write != NULL) + + (setup->color_write != NULL) + + (setup->zs_write != NULL) - 1); + + size += xtiles * ytiles * loop_body_size; + + setup->rcl = &vc4_bo_create(dev, size, true)->base; + if (!setup->rcl) + return -ENOMEM; + list_add_tail(&to_vc4_bo(&setup->rcl->base)->unref_head, + &exec->unref_list); + + rcl_u8(setup, VC4_PACKET_TILE_RENDERING_MODE_CONFIG); + rcl_u32(setup, + (setup->color_write ? (setup->color_write->paddr + + args->color_write.offset) : + 0)); + rcl_u16(setup, args->width); + rcl_u16(setup, args->height); + rcl_u16(setup, args->color_write.bits); + + /* The tile buffer gets cleared when the previous tile is stored. If + * the clear values changed between frames, then the tile buffer has + * stale clear values in it, so we have to do a store in None mode (no + * writes) so that we trigger the tile buffer clear. + */ + if (args->flags & VC4_SUBMIT_CL_USE_CLEAR_COLOR) { + rcl_u8(setup, VC4_PACKET_CLEAR_COLORS); + rcl_u32(setup, args->clear_color[0]); + rcl_u32(setup, args->clear_color[1]); + rcl_u32(setup, args->clear_z); + rcl_u8(setup, args->clear_s); + + vc4_tile_coordinates(setup, 0, 0); + + rcl_u8(setup, VC4_PACKET_STORE_TILE_BUFFER_GENERAL); + rcl_u16(setup, VC4_LOADSTORE_TILE_BUFFER_NONE); + rcl_u32(setup, 0); /* no address, since we're in None mode */ + } + + for (y = min_y_tile; y <= max_y_tile; y++) { + for (x = min_x_tile; x <= max_x_tile; x++) { + bool first = (x == min_x_tile && y == min_y_tile); + bool last = (x == max_x_tile && y == max_y_tile); + + emit_tile(exec, setup, x, y, first, last); + } + } + + BUG_ON(setup->next_offset != size); + exec->ct1ca = setup->rcl->paddr; + exec->ct1ea = setup->rcl->paddr + setup->next_offset; + + return 0; +} + +static int vc4_full_res_bounds_check(struct vc4_exec_info *exec, + struct drm_gem_cma_object *obj, + struct drm_vc4_submit_rcl_surface *surf) +{ + struct drm_vc4_submit_cl *args = exec->args; + u32 render_tiles_stride = DIV_ROUND_UP(exec->args->width, 32); + + if (surf->offset > obj->base.size) { + DRM_ERROR("surface offset %d > BO size %zd\n", + surf->offset, obj->base.size); + return -EINVAL; + } + + if ((obj->base.size - surf->offset) / VC4_TILE_BUFFER_SIZE < + render_tiles_stride * args->max_y_tile + args->max_x_tile) { + DRM_ERROR("MSAA tile %d, %d out of bounds " + "(bo size %zd, offset %d).\n", + args->max_x_tile, args->max_y_tile, + obj->base.size, + surf->offset); + return -EINVAL; + } + + return 0; +} + +static int vc4_rcl_msaa_surface_setup(struct vc4_exec_info *exec, + struct drm_gem_cma_object **obj, + struct drm_vc4_submit_rcl_surface *surf) +{ + if (surf->flags != 0 || surf->bits != 0) { + DRM_ERROR("MSAA surface had nonzero flags/bits\n"); + return -EINVAL; + } + + if (surf->hindex == ~0) + return 0; + + *obj = vc4_use_bo(exec, surf->hindex); + if (!*obj) + return -EINVAL; + + if (surf->offset & 0xf) { + DRM_ERROR("MSAA write must be 16b aligned.\n"); + return -EINVAL; + } + + return vc4_full_res_bounds_check(exec, *obj, surf); +} + +static int vc4_rcl_surface_setup(struct vc4_exec_info *exec, + struct drm_gem_cma_object **obj, + struct drm_vc4_submit_rcl_surface *surf) +{ + uint8_t tiling = VC4_GET_FIELD(surf->bits, + VC4_LOADSTORE_TILE_BUFFER_TILING); + uint8_t buffer = VC4_GET_FIELD(surf->bits, + VC4_LOADSTORE_TILE_BUFFER_BUFFER); + uint8_t format = VC4_GET_FIELD(surf->bits, + VC4_LOADSTORE_TILE_BUFFER_FORMAT); + int cpp; + int ret; + + if (surf->flags & ~VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) { + DRM_ERROR("Extra flags set\n"); + return -EINVAL; + } + + if (surf->hindex == ~0) + return 0; + + *obj = vc4_use_bo(exec, surf->hindex); + if (!*obj) + return -EINVAL; + + if (surf->flags & VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) { + if (surf == &exec->args->zs_write) { + DRM_ERROR("general zs write may not be a full-res.\n"); + return -EINVAL; + } + + if (surf->bits != 0) { + DRM_ERROR("load/store general bits set with " + "full res load/store.\n"); + return -EINVAL; + } + + ret = vc4_full_res_bounds_check(exec, *obj, surf); + if (!ret) + return ret; + + return 0; + } + + if (surf->bits & ~(VC4_LOADSTORE_TILE_BUFFER_TILING_MASK | + VC4_LOADSTORE_TILE_BUFFER_BUFFER_MASK | + VC4_LOADSTORE_TILE_BUFFER_FORMAT_MASK)) { + DRM_ERROR("Unknown bits in load/store: 0x%04x\n", + surf->bits); + return -EINVAL; + } + + if (tiling > VC4_TILING_FORMAT_LT) { + DRM_ERROR("Bad tiling format\n"); + return -EINVAL; + } + + if (buffer == VC4_LOADSTORE_TILE_BUFFER_ZS) { + if (format != 0) { + DRM_ERROR("No color format should be set for ZS\n"); + return -EINVAL; + } + cpp = 4; + } else if (buffer == VC4_LOADSTORE_TILE_BUFFER_COLOR) { + switch (format) { + case VC4_LOADSTORE_TILE_BUFFER_BGR565: + case VC4_LOADSTORE_TILE_BUFFER_BGR565_DITHER: + cpp = 2; + break; + case VC4_LOADSTORE_TILE_BUFFER_RGBA8888: + cpp = 4; + break; + default: + DRM_ERROR("Bad tile buffer format\n"); + return -EINVAL; + } + } else { + DRM_ERROR("Bad load/store buffer %d.\n", buffer); + return -EINVAL; + } + + if (surf->offset & 0xf) { + DRM_ERROR("load/store buffer must be 16b aligned.\n"); + return -EINVAL; + } + + if (!vc4_check_tex_size(exec, *obj, surf->offset, tiling, + exec->args->width, exec->args->height, cpp)) { + return -EINVAL; + } + + return 0; +} + +static int +vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec, + struct vc4_rcl_setup *setup, + struct drm_gem_cma_object **obj, + struct drm_vc4_submit_rcl_surface *surf) +{ + uint8_t tiling = VC4_GET_FIELD(surf->bits, + VC4_RENDER_CONFIG_MEMORY_FORMAT); + uint8_t format = VC4_GET_FIELD(surf->bits, + VC4_RENDER_CONFIG_FORMAT); + int cpp; + + if (surf->flags != 0) { + DRM_ERROR("No flags supported on render config.\n"); + return -EINVAL; + } + + if (surf->bits & ~(VC4_RENDER_CONFIG_MEMORY_FORMAT_MASK | + VC4_RENDER_CONFIG_FORMAT_MASK | + VC4_RENDER_CONFIG_MS_MODE_4X | + VC4_RENDER_CONFIG_DECIMATE_MODE_4X)) { + DRM_ERROR("Unknown bits in render config: 0x%04x\n", + surf->bits); + return -EINVAL; + } + + if (surf->hindex == ~0) + return 0; + + *obj = vc4_use_bo(exec, surf->hindex); + if (!*obj) + return -EINVAL; + + if (tiling > VC4_TILING_FORMAT_LT) { + DRM_ERROR("Bad tiling format\n"); + return -EINVAL; + } + + switch (format) { + case VC4_RENDER_CONFIG_FORMAT_BGR565_DITHERED: + case VC4_RENDER_CONFIG_FORMAT_BGR565: + cpp = 2; + break; + case VC4_RENDER_CONFIG_FORMAT_RGBA8888: + cpp = 4; + break; + default: + DRM_ERROR("Bad tile buffer format\n"); + return -EINVAL; + } + + if (!vc4_check_tex_size(exec, *obj, surf->offset, tiling, + exec->args->width, exec->args->height, cpp)) { + return -EINVAL; + } + + return 0; +} + +int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec) +{ + struct vc4_rcl_setup setup = {0}; + struct drm_vc4_submit_cl *args = exec->args; + bool has_bin = args->bin_cl_size != 0; + int ret; + + if (args->min_x_tile > args->max_x_tile || + args->min_y_tile > args->max_y_tile) { + DRM_ERROR("Bad render tile set (%d,%d)-(%d,%d)\n", + args->min_x_tile, args->min_y_tile, + args->max_x_tile, args->max_y_tile); + return -EINVAL; + } + + if (has_bin && + (args->max_x_tile > exec->bin_tiles_x || + args->max_y_tile > exec->bin_tiles_y)) { + DRM_ERROR("Render tiles (%d,%d) outside of bin config " + "(%d,%d)\n", + args->max_x_tile, args->max_y_tile, + exec->bin_tiles_x, exec->bin_tiles_y); + return -EINVAL; + } + + ret = vc4_rcl_render_config_surface_setup(exec, &setup, + &setup.color_write, + &args->color_write); + if (ret) + return ret; + + ret = vc4_rcl_surface_setup(exec, &setup.color_read, &args->color_read); + if (ret) + return ret; + + ret = vc4_rcl_surface_setup(exec, &setup.zs_read, &args->zs_read); + if (ret) + return ret; + + ret = vc4_rcl_surface_setup(exec, &setup.zs_write, &args->zs_write); + if (ret) + return ret; + + ret = vc4_rcl_msaa_surface_setup(exec, &setup.msaa_color_write, + &args->msaa_color_write); + if (ret) + return ret; + + ret = vc4_rcl_msaa_surface_setup(exec, &setup.msaa_zs_write, + &args->msaa_zs_write); + if (ret) + return ret; + + /* We shouldn't even have the job submitted to us if there's no + * surface to write out. + */ + if (!setup.color_write && !setup.zs_write && + !setup.msaa_color_write && !setup.msaa_zs_write) { + DRM_ERROR("RCL requires color or Z/S write\n"); + return -EINVAL; + } + + return vc4_create_rcl_bo(dev, exec, &setup); +} diff --git a/drivers/gpu/drm/vc4/vc4_trace.h b/drivers/gpu/drm/vc4/vc4_trace.h new file mode 100644 index 000000000000..ad7b1ea720c2 --- /dev/null +++ b/drivers/gpu/drm/vc4/vc4_trace.h @@ -0,0 +1,63 @@ +/* + * Copyright (C) 2015 Broadcom + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#if !defined(_VC4_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) +#define _VC4_TRACE_H_ + +#include +#include +#include + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM vc4 +#define TRACE_INCLUDE_FILE vc4_trace + +TRACE_EVENT(vc4_wait_for_seqno_begin, + TP_PROTO(struct drm_device *dev, uint64_t seqno, uint64_t timeout), + TP_ARGS(dev, seqno, timeout), + + TP_STRUCT__entry( + __field(u32, dev) + __field(u64, seqno) + __field(u64, timeout) + ), + + TP_fast_assign( + __entry->dev = dev->primary->index; + __entry->seqno = seqno; + __entry->timeout = timeout; + ), + + TP_printk("dev=%u, seqno=%llu, timeout=%llu", + __entry->dev, __entry->seqno, __entry->timeout) +); + +TRACE_EVENT(vc4_wait_for_seqno_end, + TP_PROTO(struct drm_device *dev, uint64_t seqno), + TP_ARGS(dev, seqno), + + TP_STRUCT__entry( + __field(u32, dev) + __field(u64, seqno) + ), + + TP_fast_assign( + __entry->dev = dev->primary->index; + __entry->seqno = seqno; + ), + + TP_printk("dev=%u, seqno=%llu", + __entry->dev, __entry->seqno) +); + +#endif /* _VC4_TRACE_H_ */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#include diff --git a/drivers/gpu/drm/vc4/vc4_trace_points.c b/drivers/gpu/drm/vc4/vc4_trace_points.c new file mode 100644 index 000000000000..e6278f25716b --- /dev/null +++ b/drivers/gpu/drm/vc4/vc4_trace_points.c @@ -0,0 +1,14 @@ +/* + * Copyright (C) 2015 Broadcom + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include "vc4_drv.h" + +#ifndef __CHECKER__ +#define CREATE_TRACE_POINTS +#include "vc4_trace.h" +#endif diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c index 040ad0d8b8a1..424d515ffcda 100644 --- a/drivers/gpu/drm/vc4/vc4_v3d.c +++ b/drivers/gpu/drm/vc4/vc4_v3d.c @@ -144,6 +144,21 @@ int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused) } #endif /* CONFIG_DEBUG_FS */ +/* + * Asks the firmware to turn on power to the V3D engine. + * + * This may be doable with just the clocks interface, though this + * packet does some other register setup from the firmware, too. + */ +int +vc4_v3d_set_power(struct vc4_dev *vc4, bool on) +{ + if (on) + return pm_generic_poweroff(&vc4->v3d->pdev->dev); + else + return pm_generic_resume(&vc4->v3d->pdev->dev); +} + static void vc4_v3d_init_hw(struct drm_device *dev) { struct vc4_dev *vc4 = to_vc4_dev(dev); @@ -161,6 +176,7 @@ static int vc4_v3d_bind(struct device *dev, struct device *master, void *data) struct drm_device *drm = dev_get_drvdata(master); struct vc4_dev *vc4 = to_vc4_dev(drm); struct vc4_v3d *v3d = NULL; + int ret; v3d = devm_kzalloc(&pdev->dev, sizeof(*v3d), GFP_KERNEL); if (!v3d) @@ -180,8 +196,20 @@ static int vc4_v3d_bind(struct device *dev, struct device *master, void *data) return -EINVAL; } + /* Reset the binner overflow address/size at setup, to be sure + * we don't reuse an old one. + */ + V3D_WRITE(V3D_BPOA, 0); + V3D_WRITE(V3D_BPOS, 0); + vc4_v3d_init_hw(drm); + ret = drm_irq_install(drm, platform_get_irq(pdev, 0)); + if (ret) { + DRM_ERROR("Failed to install IRQ handler\n"); + return ret; + } + return 0; } @@ -191,6 +219,15 @@ static void vc4_v3d_unbind(struct device *dev, struct device *master, struct drm_device *drm = dev_get_drvdata(master); struct vc4_dev *vc4 = to_vc4_dev(drm); + drm_irq_uninstall(drm); + + /* Disable the binner's overflow memory address, so the next + * driver probe (if any) doesn't try to reuse our old + * allocation. + */ + V3D_WRITE(V3D_BPOA, 0); + V3D_WRITE(V3D_BPOS, 0); + vc4->v3d = NULL; } diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c new file mode 100644 index 000000000000..0fb5b994b9dd --- /dev/null +++ b/drivers/gpu/drm/vc4/vc4_validate.c @@ -0,0 +1,900 @@ +/* + * Copyright © 2014 Broadcom + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +/** + * Command list validator for VC4. + * + * The VC4 has no IOMMU between it and system memory. So, a user with + * access to execute command lists could escalate privilege by + * overwriting system memory (drawing to it as a framebuffer) or + * reading system memory it shouldn't (reading it as a texture, or + * uniform data, or vertex data). + * + * This validates command lists to ensure that all accesses are within + * the bounds of the GEM objects referenced. It explicitly whitelists + * packets, and looks at the offsets in any address fields to make + * sure they're constrained within the BOs they reference. + * + * Note that because of the validation that's happening anyway, this + * is where GEM relocation processing happens. + */ + +#include "uapi/drm/vc4_drm.h" +#include "vc4_drv.h" +#include "vc4_packet.h" + +#define VALIDATE_ARGS \ + struct vc4_exec_info *exec, \ + void *validated, \ + void *untrusted + +/** Return the width in pixels of a 64-byte microtile. */ +static uint32_t +utile_width(int cpp) +{ + switch (cpp) { + case 1: + case 2: + return 8; + case 4: + return 4; + case 8: + return 2; + default: + DRM_ERROR("unknown cpp: %d\n", cpp); + return 1; + } +} + +/** Return the height in pixels of a 64-byte microtile. */ +static uint32_t +utile_height(int cpp) +{ + switch (cpp) { + case 1: + return 8; + case 2: + case 4: + case 8: + return 4; + default: + DRM_ERROR("unknown cpp: %d\n", cpp); + return 1; + } +} + +/** + * The texture unit decides what tiling format a particular miplevel is using + * this function, so we lay out our miptrees accordingly. + */ +static bool +size_is_lt(uint32_t width, uint32_t height, int cpp) +{ + return (width <= 4 * utile_width(cpp) || + height <= 4 * utile_height(cpp)); +} + +struct drm_gem_cma_object * +vc4_use_bo(struct vc4_exec_info *exec, uint32_t hindex) +{ + struct drm_gem_cma_object *obj; + struct vc4_bo *bo; + + if (hindex >= exec->bo_count) { + DRM_ERROR("BO index %d greater than BO count %d\n", + hindex, exec->bo_count); + return NULL; + } + obj = exec->bo[hindex]; + bo = to_vc4_bo(&obj->base); + + if (bo->validated_shader) { + DRM_ERROR("Trying to use shader BO as something other than " + "a shader\n"); + return NULL; + } + + return obj; +} + +static struct drm_gem_cma_object * +vc4_use_handle(struct vc4_exec_info *exec, uint32_t gem_handles_packet_index) +{ + return vc4_use_bo(exec, exec->bo_index[gem_handles_packet_index]); +} + +static bool +validate_bin_pos(struct vc4_exec_info *exec, void *untrusted, uint32_t pos) +{ + /* Note that the untrusted pointer passed to these functions is + * incremented past the packet byte. + */ + return (untrusted - 1 == exec->bin_u + pos); +} + +static uint32_t +gl_shader_rec_size(uint32_t pointer_bits) +{ + uint32_t attribute_count = pointer_bits & 7; + bool extended = pointer_bits & 8; + + if (attribute_count == 0) + attribute_count = 8; + + if (extended) + return 100 + attribute_count * 4; + else + return 36 + attribute_count * 8; +} + +bool +vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_cma_object *fbo, + uint32_t offset, uint8_t tiling_format, + uint32_t width, uint32_t height, uint8_t cpp) +{ + uint32_t aligned_width, aligned_height, stride, size; + uint32_t utile_w = utile_width(cpp); + uint32_t utile_h = utile_height(cpp); + + /* The shaded vertex format stores signed 12.4 fixed point + * (-2048,2047) offsets from the viewport center, so we should + * never have a render target larger than 4096. The texture + * unit can only sample from 2048x2048, so it's even more + * restricted. This lets us avoid worrying about overflow in + * our math. + */ + if (width > 4096 || height > 4096) { + DRM_ERROR("Surface dimesions (%d,%d) too large", width, height); + return false; + } + + switch (tiling_format) { + case VC4_TILING_FORMAT_LINEAR: + aligned_width = round_up(width, utile_w); + aligned_height = height; + break; + case VC4_TILING_FORMAT_T: + aligned_width = round_up(width, utile_w * 8); + aligned_height = round_up(height, utile_h * 8); + break; + case VC4_TILING_FORMAT_LT: + aligned_width = round_up(width, utile_w); + aligned_height = round_up(height, utile_h); + break; + default: + DRM_ERROR("buffer tiling %d unsupported\n", tiling_format); + return false; + } + + stride = aligned_width * cpp; + size = stride * aligned_height; + + if (size + offset < size || + size + offset > fbo->base.size) { + DRM_ERROR("Overflow in %dx%d (%dx%d) fbo size (%d + %d > %zd)\n", + width, height, + aligned_width, aligned_height, + size, offset, fbo->base.size); + return false; + } + + return true; +} + +static int +validate_flush(VALIDATE_ARGS) +{ + if (!validate_bin_pos(exec, untrusted, exec->args->bin_cl_size - 1)) { + DRM_ERROR("Bin CL must end with VC4_PACKET_FLUSH\n"); + return -EINVAL; + } + exec->found_flush = true; + + return 0; +} + +static int +validate_start_tile_binning(VALIDATE_ARGS) +{ + if (exec->found_start_tile_binning_packet) { + DRM_ERROR("Duplicate VC4_PACKET_START_TILE_BINNING\n"); + return -EINVAL; + } + exec->found_start_tile_binning_packet = true; + + if (!exec->found_tile_binning_mode_config_packet) { + DRM_ERROR("missing VC4_PACKET_TILE_BINNING_MODE_CONFIG\n"); + return -EINVAL; + } + + return 0; +} + +static int +validate_increment_semaphore(VALIDATE_ARGS) +{ + if (!validate_bin_pos(exec, untrusted, exec->args->bin_cl_size - 2)) { + DRM_ERROR("Bin CL must end with " + "VC4_PACKET_INCREMENT_SEMAPHORE\n"); + return -EINVAL; + } + exec->found_increment_semaphore_packet = true; + + return 0; +} + +static int +validate_indexed_prim_list(VALIDATE_ARGS) +{ + struct drm_gem_cma_object *ib; + uint32_t length = *(uint32_t *)(untrusted + 1); + uint32_t offset = *(uint32_t *)(untrusted + 5); + uint32_t max_index = *(uint32_t *)(untrusted + 9); + uint32_t index_size = (*(uint8_t *)(untrusted + 0) >> 4) ? 2 : 1; + struct vc4_shader_state *shader_state; + + /* Check overflow condition */ + if (exec->shader_state_count == 0) { + DRM_ERROR("shader state must precede primitives\n"); + return -EINVAL; + } + shader_state = &exec->shader_state[exec->shader_state_count - 1]; + + if (max_index > shader_state->max_index) + shader_state->max_index = max_index; + + ib = vc4_use_handle(exec, 0); + if (!ib) + return -EINVAL; + + if (offset > ib->base.size || + (ib->base.size - offset) / index_size < length) { + DRM_ERROR("IB access overflow (%d + %d*%d > %zd)\n", + offset, length, index_size, ib->base.size); + return -EINVAL; + } + + *(uint32_t *)(validated + 5) = ib->paddr + offset; + + return 0; +} + +static int +validate_gl_array_primitive(VALIDATE_ARGS) +{ + uint32_t length = *(uint32_t *)(untrusted + 1); + uint32_t base_index = *(uint32_t *)(untrusted + 5); + uint32_t max_index; + struct vc4_shader_state *shader_state; + + /* Check overflow condition */ + if (exec->shader_state_count == 0) { + DRM_ERROR("shader state must precede primitives\n"); + return -EINVAL; + } + shader_state = &exec->shader_state[exec->shader_state_count - 1]; + + if (length + base_index < length) { + DRM_ERROR("primitive vertex count overflow\n"); + return -EINVAL; + } + max_index = length + base_index - 1; + + if (max_index > shader_state->max_index) + shader_state->max_index = max_index; + + return 0; +} + +static int +validate_gl_shader_state(VALIDATE_ARGS) +{ + uint32_t i = exec->shader_state_count++; + + if (i >= exec->shader_state_size) { + DRM_ERROR("More requests for shader states than declared\n"); + return -EINVAL; + } + + exec->shader_state[i].addr = *(uint32_t *)untrusted; + exec->shader_state[i].max_index = 0; + + if (exec->shader_state[i].addr & ~0xf) { + DRM_ERROR("high bits set in GL shader rec reference\n"); + return -EINVAL; + } + + *(uint32_t *)validated = (exec->shader_rec_p + + exec->shader_state[i].addr); + + exec->shader_rec_p += + roundup(gl_shader_rec_size(exec->shader_state[i].addr), 16); + + return 0; +} + +static int +validate_tile_binning_config(VALIDATE_ARGS) +{ + struct drm_device *dev = exec->exec_bo->base.dev; + struct vc4_bo *tile_bo; + uint8_t flags; + uint32_t tile_state_size, tile_alloc_size; + uint32_t tile_count; + + if (exec->found_tile_binning_mode_config_packet) { + DRM_ERROR("Duplicate VC4_PACKET_TILE_BINNING_MODE_CONFIG\n"); + return -EINVAL; + } + exec->found_tile_binning_mode_config_packet = true; + + exec->bin_tiles_x = *(uint8_t *)(untrusted + 12); + exec->bin_tiles_y = *(uint8_t *)(untrusted + 13); + tile_count = exec->bin_tiles_x * exec->bin_tiles_y; + flags = *(uint8_t *)(untrusted + 14); + + if (exec->bin_tiles_x == 0 || + exec->bin_tiles_y == 0) { + DRM_ERROR("Tile binning config of %dx%d too small\n", + exec->bin_tiles_x, exec->bin_tiles_y); + return -EINVAL; + } + + if (flags & (VC4_BIN_CONFIG_DB_NON_MS | + VC4_BIN_CONFIG_TILE_BUFFER_64BIT)) { + DRM_ERROR("unsupported binning config flags 0x%02x\n", flags); + return -EINVAL; + } + + /* The tile state data array is 48 bytes per tile, and we put it at + * the start of a BO containing both it and the tile alloc. + */ + tile_state_size = 48 * tile_count; + + /* Since the tile alloc array will follow us, align. */ + exec->tile_alloc_offset = roundup(tile_state_size, 4096); + + *(uint8_t *)(validated + 14) = + ((flags & ~(VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_MASK | + VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_MASK)) | + VC4_BIN_CONFIG_AUTO_INIT_TSDA | + VC4_SET_FIELD(VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_32, + VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE) | + VC4_SET_FIELD(VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_128, + VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE)); + + /* Initial block size. */ + tile_alloc_size = 32 * tile_count; + + /* + * The initial allocation gets rounded to the next 256 bytes before + * the hardware starts fulfilling further allocations. + */ + tile_alloc_size = roundup(tile_alloc_size, 256); + + /* Add space for the extra allocations. This is what gets used first, + * before overflow memory. It must have at least 4096 bytes, but we + * want to avoid overflow memory usage if possible. + */ + tile_alloc_size += 1024 * 1024; + + tile_bo = vc4_bo_create(dev, exec->tile_alloc_offset + tile_alloc_size, + true); + exec->tile_bo = &tile_bo->base; + if (!exec->tile_bo) + return -ENOMEM; + list_add_tail(&tile_bo->unref_head, &exec->unref_list); + + /* tile alloc address. */ + *(uint32_t *)(validated + 0) = (exec->tile_bo->paddr + + exec->tile_alloc_offset); + /* tile alloc size. */ + *(uint32_t *)(validated + 4) = tile_alloc_size; + /* tile state address. */ + *(uint32_t *)(validated + 8) = exec->tile_bo->paddr; + + return 0; +} + +static int +validate_gem_handles(VALIDATE_ARGS) +{ + memcpy(exec->bo_index, untrusted, sizeof(exec->bo_index)); + return 0; +} + +#define VC4_DEFINE_PACKET(packet, func) \ + [packet] = { packet ## _SIZE, #packet, func } + +static const struct cmd_info { + uint16_t len; + const char *name; + int (*func)(struct vc4_exec_info *exec, void *validated, + void *untrusted); +} cmd_info[] = { + VC4_DEFINE_PACKET(VC4_PACKET_HALT, NULL), + VC4_DEFINE_PACKET(VC4_PACKET_NOP, NULL), + VC4_DEFINE_PACKET(VC4_PACKET_FLUSH, validate_flush), + VC4_DEFINE_PACKET(VC4_PACKET_FLUSH_ALL, NULL), + VC4_DEFINE_PACKET(VC4_PACKET_START_TILE_BINNING, + validate_start_tile_binning), + VC4_DEFINE_PACKET(VC4_PACKET_INCREMENT_SEMAPHORE, + validate_increment_semaphore), + + VC4_DEFINE_PACKET(VC4_PACKET_GL_INDEXED_PRIMITIVE, + validate_indexed_prim_list), + VC4_DEFINE_PACKET(VC4_PACKET_GL_ARRAY_PRIMITIVE, + validate_gl_array_primitive), + + VC4_DEFINE_PACKET(VC4_PACKET_PRIMITIVE_LIST_FORMAT, NULL), + + VC4_DEFINE_PACKET(VC4_PACKET_GL_SHADER_STATE, validate_gl_shader_state), + + VC4_DEFINE_PACKET(VC4_PACKET_CONFIGURATION_BITS, NULL), + VC4_DEFINE_PACKET(VC4_PACKET_FLAT_SHADE_FLAGS, NULL), + VC4_DEFINE_PACKET(VC4_PACKET_POINT_SIZE, NULL), + VC4_DEFINE_PACKET(VC4_PACKET_LINE_WIDTH, NULL), + VC4_DEFINE_PACKET(VC4_PACKET_RHT_X_BOUNDARY, NULL), + VC4_DEFINE_PACKET(VC4_PACKET_DEPTH_OFFSET, NULL), + VC4_DEFINE_PACKET(VC4_PACKET_CLIP_WINDOW, NULL), + VC4_DEFINE_PACKET(VC4_PACKET_VIEWPORT_OFFSET, NULL), + VC4_DEFINE_PACKET(VC4_PACKET_CLIPPER_XY_SCALING, NULL), + /* Note: The docs say this was also 105, but it was 106 in the + * initial userland code drop. + */ + VC4_DEFINE_PACKET(VC4_PACKET_CLIPPER_Z_SCALING, NULL), + + VC4_DEFINE_PACKET(VC4_PACKET_TILE_BINNING_MODE_CONFIG, + validate_tile_binning_config), + + VC4_DEFINE_PACKET(VC4_PACKET_GEM_HANDLES, validate_gem_handles), +}; + +int +vc4_validate_bin_cl(struct drm_device *dev, + void *validated, + void *unvalidated, + struct vc4_exec_info *exec) +{ + uint32_t len = exec->args->bin_cl_size; + uint32_t dst_offset = 0; + uint32_t src_offset = 0; + + while (src_offset < len) { + void *dst_pkt = validated + dst_offset; + void *src_pkt = unvalidated + src_offset; + u8 cmd = *(uint8_t *)src_pkt; + const struct cmd_info *info; + + if (cmd >= ARRAY_SIZE(cmd_info)) { + DRM_ERROR("0x%08x: packet %d out of bounds\n", + src_offset, cmd); + return -EINVAL; + } + + info = &cmd_info[cmd]; + if (!info->name) { + DRM_ERROR("0x%08x: packet %d invalid\n", + src_offset, cmd); + return -EINVAL; + } + + if (src_offset + info->len > len) { + DRM_ERROR("0x%08x: packet %d (%s) length 0x%08x " + "exceeds bounds (0x%08x)\n", + src_offset, cmd, info->name, info->len, + src_offset + len); + return -EINVAL; + } + + if (cmd != VC4_PACKET_GEM_HANDLES) + memcpy(dst_pkt, src_pkt, info->len); + + if (info->func && info->func(exec, + dst_pkt + 1, + src_pkt + 1)) { + DRM_ERROR("0x%08x: packet %d (%s) failed to validate\n", + src_offset, cmd, info->name); + return -EINVAL; + } + + src_offset += info->len; + /* GEM handle loading doesn't produce HW packets. */ + if (cmd != VC4_PACKET_GEM_HANDLES) + dst_offset += info->len; + + /* When the CL hits halt, it'll stop reading anything else. */ + if (cmd == VC4_PACKET_HALT) + break; + } + + exec->ct0ea = exec->ct0ca + dst_offset; + + if (!exec->found_start_tile_binning_packet) { + DRM_ERROR("Bin CL missing VC4_PACKET_START_TILE_BINNING\n"); + return -EINVAL; + } + + /* The bin CL must be ended with INCREMENT_SEMAPHORE and FLUSH. The + * semaphore is used to trigger the render CL to start up, and the + * FLUSH is what caps the bin lists with + * VC4_PACKET_RETURN_FROM_SUB_LIST (so they jump back to the main + * render CL when they get called to) and actually triggers the queued + * semaphore increment. + */ + if (!exec->found_increment_semaphore_packet || !exec->found_flush) { + DRM_ERROR("Bin CL missing VC4_PACKET_INCREMENT_SEMAPHORE + " + "VC4_PACKET_FLUSH\n"); + return -EINVAL; + } + + return 0; +} + +static bool +reloc_tex(struct vc4_exec_info *exec, + void *uniform_data_u, + struct vc4_texture_sample_info *sample, + uint32_t texture_handle_index) + +{ + struct drm_gem_cma_object *tex; + uint32_t p0 = *(uint32_t *)(uniform_data_u + sample->p_offset[0]); + uint32_t p1 = *(uint32_t *)(uniform_data_u + sample->p_offset[1]); + uint32_t p2 = (sample->p_offset[2] != ~0 ? + *(uint32_t *)(uniform_data_u + sample->p_offset[2]) : 0); + uint32_t p3 = (sample->p_offset[3] != ~0 ? + *(uint32_t *)(uniform_data_u + sample->p_offset[3]) : 0); + uint32_t *validated_p0 = exec->uniforms_v + sample->p_offset[0]; + uint32_t offset = p0 & VC4_TEX_P0_OFFSET_MASK; + uint32_t miplevels = VC4_GET_FIELD(p0, VC4_TEX_P0_MIPLVLS); + uint32_t width = VC4_GET_FIELD(p1, VC4_TEX_P1_WIDTH); + uint32_t height = VC4_GET_FIELD(p1, VC4_TEX_P1_HEIGHT); + uint32_t cpp, tiling_format, utile_w, utile_h; + uint32_t i; + uint32_t cube_map_stride = 0; + enum vc4_texture_data_type type; + + tex = vc4_use_bo(exec, texture_handle_index); + if (!tex) + return false; + + if (sample->is_direct) { + uint32_t remaining_size = tex->base.size - p0; + + if (p0 > tex->base.size - 4) { + DRM_ERROR("UBO offset greater than UBO size\n"); + goto fail; + } + if (p1 > remaining_size - 4) { + DRM_ERROR("UBO clamp would allow reads " + "outside of UBO\n"); + goto fail; + } + *validated_p0 = tex->paddr + p0; + return true; + } + + if (width == 0) + width = 2048; + if (height == 0) + height = 2048; + + if (p0 & VC4_TEX_P0_CMMODE_MASK) { + if (VC4_GET_FIELD(p2, VC4_TEX_P2_PTYPE) == + VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE) + cube_map_stride = p2 & VC4_TEX_P2_CMST_MASK; + if (VC4_GET_FIELD(p3, VC4_TEX_P2_PTYPE) == + VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE) { + if (cube_map_stride) { + DRM_ERROR("Cube map stride set twice\n"); + goto fail; + } + + cube_map_stride = p3 & VC4_TEX_P2_CMST_MASK; + } + if (!cube_map_stride) { + DRM_ERROR("Cube map stride not set\n"); + goto fail; + } + } + + type = (VC4_GET_FIELD(p0, VC4_TEX_P0_TYPE) | + (VC4_GET_FIELD(p1, VC4_TEX_P1_TYPE4) << 4)); + + switch (type) { + case VC4_TEXTURE_TYPE_RGBA8888: + case VC4_TEXTURE_TYPE_RGBX8888: + case VC4_TEXTURE_TYPE_RGBA32R: + cpp = 4; + break; + case VC4_TEXTURE_TYPE_RGBA4444: + case VC4_TEXTURE_TYPE_RGBA5551: + case VC4_TEXTURE_TYPE_RGB565: + case VC4_TEXTURE_TYPE_LUMALPHA: + case VC4_TEXTURE_TYPE_S16F: + case VC4_TEXTURE_TYPE_S16: + cpp = 2; + break; + case VC4_TEXTURE_TYPE_LUMINANCE: + case VC4_TEXTURE_TYPE_ALPHA: + case VC4_TEXTURE_TYPE_S8: + cpp = 1; + break; + case VC4_TEXTURE_TYPE_ETC1: + case VC4_TEXTURE_TYPE_BW1: + case VC4_TEXTURE_TYPE_A4: + case VC4_TEXTURE_TYPE_A1: + case VC4_TEXTURE_TYPE_RGBA64: + case VC4_TEXTURE_TYPE_YUV422R: + default: + DRM_ERROR("Texture format %d unsupported\n", type); + goto fail; + } + utile_w = utile_width(cpp); + utile_h = utile_height(cpp); + + if (type == VC4_TEXTURE_TYPE_RGBA32R) { + tiling_format = VC4_TILING_FORMAT_LINEAR; + } else { + if (size_is_lt(width, height, cpp)) + tiling_format = VC4_TILING_FORMAT_LT; + else + tiling_format = VC4_TILING_FORMAT_T; + } + + if (!vc4_check_tex_size(exec, tex, offset + cube_map_stride * 5, + tiling_format, width, height, cpp)) { + goto fail; + } + + /* The mipmap levels are stored before the base of the texture. Make + * sure there is actually space in the BO. + */ + for (i = 1; i <= miplevels; i++) { + uint32_t level_width = max(width >> i, 1u); + uint32_t level_height = max(height >> i, 1u); + uint32_t aligned_width, aligned_height; + uint32_t level_size; + + /* Once the levels get small enough, they drop from T to LT. */ + if (tiling_format == VC4_TILING_FORMAT_T && + size_is_lt(level_width, level_height, cpp)) { + tiling_format = VC4_TILING_FORMAT_LT; + } + + switch (tiling_format) { + case VC4_TILING_FORMAT_T: + aligned_width = round_up(level_width, utile_w * 8); + aligned_height = round_up(level_height, utile_h * 8); + break; + case VC4_TILING_FORMAT_LT: + aligned_width = round_up(level_width, utile_w); + aligned_height = round_up(level_height, utile_h); + break; + default: + aligned_width = round_up(level_width, utile_w); + aligned_height = level_height; + break; + } + + level_size = aligned_width * cpp * aligned_height; + + if (offset < level_size) { + DRM_ERROR("Level %d (%dx%d -> %dx%d) size %db " + "overflowed buffer bounds (offset %d)\n", + i, level_width, level_height, + aligned_width, aligned_height, + level_size, offset); + goto fail; + } + + offset -= level_size; + } + + *validated_p0 = tex->paddr + p0; + + return true; + fail: + DRM_INFO("Texture p0 at %d: 0x%08x\n", sample->p_offset[0], p0); + DRM_INFO("Texture p1 at %d: 0x%08x\n", sample->p_offset[1], p1); + DRM_INFO("Texture p2 at %d: 0x%08x\n", sample->p_offset[2], p2); + DRM_INFO("Texture p3 at %d: 0x%08x\n", sample->p_offset[3], p3); + return false; +} + +static int +validate_gl_shader_rec(struct drm_device *dev, + struct vc4_exec_info *exec, + struct vc4_shader_state *state) +{ + uint32_t *src_handles; + void *pkt_u, *pkt_v; + static const uint32_t shader_reloc_offsets[] = { + 4, /* fs */ + 16, /* vs */ + 28, /* cs */ + }; + uint32_t shader_reloc_count = ARRAY_SIZE(shader_reloc_offsets); + struct drm_gem_cma_object *bo[shader_reloc_count + 8]; + uint32_t nr_attributes, nr_relocs, packet_size; + int i; + + nr_attributes = state->addr & 0x7; + if (nr_attributes == 0) + nr_attributes = 8; + packet_size = gl_shader_rec_size(state->addr); + + nr_relocs = ARRAY_SIZE(shader_reloc_offsets) + nr_attributes; + if (nr_relocs * 4 > exec->shader_rec_size) { + DRM_ERROR("overflowed shader recs reading %d handles " + "from %d bytes left\n", + nr_relocs, exec->shader_rec_size); + return -EINVAL; + } + src_handles = exec->shader_rec_u; + exec->shader_rec_u += nr_relocs * 4; + exec->shader_rec_size -= nr_relocs * 4; + + if (packet_size > exec->shader_rec_size) { + DRM_ERROR("overflowed shader recs copying %db packet " + "from %d bytes left\n", + packet_size, exec->shader_rec_size); + return -EINVAL; + } + pkt_u = exec->shader_rec_u; + pkt_v = exec->shader_rec_v; + memcpy(pkt_v, pkt_u, packet_size); + exec->shader_rec_u += packet_size; + /* Shader recs have to be aligned to 16 bytes (due to the attribute + * flags being in the low bytes), so round the next validated shader + * rec address up. This should be safe, since we've got so many + * relocations in a shader rec packet. + */ + BUG_ON(roundup(packet_size, 16) - packet_size > nr_relocs * 4); + exec->shader_rec_v += roundup(packet_size, 16); + exec->shader_rec_size -= packet_size; + + if (!(*(uint16_t *)pkt_u & VC4_SHADER_FLAG_FS_SINGLE_THREAD)) { + DRM_ERROR("Multi-threaded fragment shaders not supported.\n"); + return -EINVAL; + } + + for (i = 0; i < shader_reloc_count; i++) { + if (src_handles[i] > exec->bo_count) { + DRM_ERROR("Shader handle %d too big\n", src_handles[i]); + return -EINVAL; + } + + bo[i] = exec->bo[src_handles[i]]; + if (!bo[i]) + return -EINVAL; + } + for (i = shader_reloc_count; i < nr_relocs; i++) { + bo[i] = vc4_use_bo(exec, src_handles[i]); + if (!bo[i]) + return -EINVAL; + } + + for (i = 0; i < shader_reloc_count; i++) { + struct vc4_validated_shader_info *validated_shader; + uint32_t o = shader_reloc_offsets[i]; + uint32_t src_offset = *(uint32_t *)(pkt_u + o); + uint32_t *texture_handles_u; + void *uniform_data_u; + uint32_t tex; + + *(uint32_t *)(pkt_v + o) = bo[i]->paddr + src_offset; + + if (src_offset != 0) { + DRM_ERROR("Shaders must be at offset 0 of " + "the BO.\n"); + return -EINVAL; + } + + validated_shader = to_vc4_bo(&bo[i]->base)->validated_shader; + if (!validated_shader) + return -EINVAL; + + if (validated_shader->uniforms_src_size > + exec->uniforms_size) { + DRM_ERROR("Uniforms src buffer overflow\n"); + return -EINVAL; + } + + texture_handles_u = exec->uniforms_u; + uniform_data_u = (texture_handles_u + + validated_shader->num_texture_samples); + + memcpy(exec->uniforms_v, uniform_data_u, + validated_shader->uniforms_size); + + for (tex = 0; + tex < validated_shader->num_texture_samples; + tex++) { + if (!reloc_tex(exec, + uniform_data_u, + &validated_shader->texture_samples[tex], + texture_handles_u[tex])) { + return -EINVAL; + } + } + + *(uint32_t *)(pkt_v + o + 4) = exec->uniforms_p; + + exec->uniforms_u += validated_shader->uniforms_src_size; + exec->uniforms_v += validated_shader->uniforms_size; + exec->uniforms_p += validated_shader->uniforms_size; + } + + for (i = 0; i < nr_attributes; i++) { + struct drm_gem_cma_object *vbo = + bo[ARRAY_SIZE(shader_reloc_offsets) + i]; + uint32_t o = 36 + i * 8; + uint32_t offset = *(uint32_t *)(pkt_u + o + 0); + uint32_t attr_size = *(uint8_t *)(pkt_u + o + 4) + 1; + uint32_t stride = *(uint8_t *)(pkt_u + o + 5); + uint32_t max_index; + + if (state->addr & 0x8) + stride |= (*(uint32_t *)(pkt_u + 100 + i * 4)) & ~0xff; + + if (vbo->base.size < offset || + vbo->base.size - offset < attr_size) { + DRM_ERROR("BO offset overflow (%d + %d > %d)\n", + offset, attr_size, vbo->base.size); + return -EINVAL; + } + + if (stride != 0) { + max_index = ((vbo->base.size - offset - attr_size) / + stride); + if (state->max_index > max_index) { + DRM_ERROR("primitives use index %d out of " + "supplied %d\n", + state->max_index, max_index); + return -EINVAL; + } + } + + *(uint32_t *)(pkt_v + o) = vbo->paddr + offset; + } + + return 0; +} + +int +vc4_validate_shader_recs(struct drm_device *dev, + struct vc4_exec_info *exec) +{ + uint32_t i; + int ret = 0; + + for (i = 0; i < exec->shader_state_count; i++) { + ret = validate_gl_shader_rec(dev, exec, &exec->shader_state[i]); + if (ret) + return ret; + } + + return ret; +} diff --git a/include/uapi/drm/vc4_drm.h b/include/uapi/drm/vc4_drm.h index 74de18416be9..fe4161bc93ae 100644 --- a/include/uapi/drm/vc4_drm.h +++ b/include/uapi/drm/vc4_drm.h @@ -26,14 +26,155 @@ #include "drm.h" +#define DRM_VC4_SUBMIT_CL 0x00 +#define DRM_VC4_WAIT_SEQNO 0x01 +#define DRM_VC4_WAIT_BO 0x02 #define DRM_VC4_CREATE_BO 0x03 #define DRM_VC4_MMAP_BO 0x04 #define DRM_VC4_CREATE_SHADER_BO 0x05 +#define DRM_IOCTL_VC4_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SUBMIT_CL, struct drm_vc4_submit_cl) +#define DRM_IOCTL_VC4_WAIT_SEQNO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_SEQNO, struct drm_vc4_wait_seqno) +#define DRM_IOCTL_VC4_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_BO, struct drm_vc4_wait_bo) #define DRM_IOCTL_VC4_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_BO, struct drm_vc4_create_bo) #define DRM_IOCTL_VC4_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_MMAP_BO, struct drm_vc4_mmap_bo) #define DRM_IOCTL_VC4_CREATE_SHADER_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_SHADER_BO, struct drm_vc4_create_shader_bo) +struct drm_vc4_submit_rcl_surface { + __u32 hindex; /* Handle index, or ~0 if not present. */ + __u32 offset; /* Offset to start of buffer. */ + /* + * Bits for either render config (color_write) or load/store packet. + * Bits should all be 0 for MSAA load/stores. + */ + __u16 bits; + +#define VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES (1 << 0) + __u16 flags; +}; + +/** + * struct drm_vc4_submit_cl - ioctl argument for submitting commands to the 3D + * engine. + * + * Drivers typically use GPU BOs to store batchbuffers / command lists and + * their associated state. However, because the VC4 lacks an MMU, we have to + * do validation of memory accesses by the GPU commands. If we were to store + * our commands in BOs, we'd need to do uncached readback from them to do the + * validation process, which is too expensive. Instead, userspace accumulates + * commands and associated state in plain memory, then the kernel copies the + * data to its own address space, and then validates and stores it in a GPU + * BO. + */ +struct drm_vc4_submit_cl { + /* Pointer to the binner command list. + * + * This is the first set of commands executed, which runs the + * coordinate shader to determine where primitives land on the screen, + * then writes out the state updates and draw calls necessary per tile + * to the tile allocation BO. + */ + __u64 bin_cl; + + /* Pointer to the shader records. + * + * Shader records are the structures read by the hardware that contain + * pointers to uniforms, shaders, and vertex attributes. The + * reference to the shader record has enough information to determine + * how many pointers are necessary (fixed number for shaders/uniforms, + * and an attribute count), so those BO indices into bo_handles are + * just stored as __u32s before each shader record passed in. + */ + __u64 shader_rec; + + /* Pointer to uniform data and texture handles for the textures + * referenced by the shader. + * + * For each shader state record, there is a set of uniform data in the + * order referenced by the record (FS, VS, then CS). Each set of + * uniform data has a __u32 index into bo_handles per texture + * sample operation, in the order the QPU_W_TMUn_S writes appear in + * the program. Following the texture BO handle indices is the actual + * uniform data. + * + * The individual uniform state blocks don't have sizes passed in, + * because the kernel has to determine the sizes anyway during shader + * code validation. + */ + __u64 uniforms; + __u64 bo_handles; + + /* Size in bytes of the binner command list. */ + __u32 bin_cl_size; + /* Size in bytes of the set of shader records. */ + __u32 shader_rec_size; + /* Number of shader records. + * + * This could just be computed from the contents of shader_records and + * the address bits of references to them from the bin CL, but it + * keeps the kernel from having to resize some allocations it makes. + */ + __u32 shader_rec_count; + /* Size in bytes of the uniform state. */ + __u32 uniforms_size; + + /* Number of BO handles passed in (size is that times 4). */ + __u32 bo_handle_count; + + /* RCL setup: */ + __u16 width; + __u16 height; + __u8 min_x_tile; + __u8 min_y_tile; + __u8 max_x_tile; + __u8 max_y_tile; + struct drm_vc4_submit_rcl_surface color_read; + struct drm_vc4_submit_rcl_surface color_write; + struct drm_vc4_submit_rcl_surface zs_read; + struct drm_vc4_submit_rcl_surface zs_write; + struct drm_vc4_submit_rcl_surface msaa_color_write; + struct drm_vc4_submit_rcl_surface msaa_zs_write; + __u32 clear_color[2]; + __u32 clear_z; + __u8 clear_s; + + __u32 pad:24; + +#define VC4_SUBMIT_CL_USE_CLEAR_COLOR (1 << 0) + __u32 flags; + + /* Returned value of the seqno of this render job (for the + * wait ioctl). + */ + __u64 seqno; +}; + +/** + * struct drm_vc4_wait_seqno - ioctl argument for waiting for + * DRM_VC4_SUBMIT_CL completion using its returned seqno. + * + * timeout_ns is the timeout in nanoseconds, where "0" means "don't + * block, just return the status." + */ +struct drm_vc4_wait_seqno { + __u64 seqno; + __u64 timeout_ns; +}; + +/** + * struct drm_vc4_wait_bo - ioctl argument for waiting for + * completion of the last DRM_VC4_SUBMIT_CL on a BO. + * + * This is useful for cases where multiple processes might be + * rendering to a BO and you want to wait for all rendering to be + * completed. + */ +struct drm_vc4_wait_bo { + __u32 handle; + __u32 pad; + __u64 timeout_ns; +}; + /** * struct drm_vc4_create_bo - ioctl argument for creating VC4 BOs. * -- cgit v1.2.3 From 214613656b5179f0daab6e0a080814b5100d45f0 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Fri, 30 Oct 2015 10:09:02 -0700 Subject: drm/vc4: Add an interface for capturing the GPU state after a hang. This can be parsed with vc4-gpu-tools tools for trying to figure out what was going on. v2: Use __u32-style types. Signed-off-by: Eric Anholt --- drivers/gpu/drm/vc4/vc4_drv.c | 2 + drivers/gpu/drm/vc4/vc4_drv.h | 4 + drivers/gpu/drm/vc4/vc4_gem.c | 185 ++++++++++++++++++++++++++++++++++++++++++ include/uapi/drm/vc4_drm.h | 45 ++++++++++ 4 files changed, 236 insertions(+) (limited to 'include') diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index 2cfee5959455..97226b677bf4 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c @@ -80,6 +80,8 @@ static const struct drm_ioctl_desc vc4_drm_ioctls[] = { DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, 0), DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, 0), DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, 0), + DRM_IOCTL_DEF_DRV(VC4_GET_HANG_STATE, vc4_get_hang_state_ioctl, + DRM_ROOT_ONLY), }; static struct drm_driver vc4_drm_driver = { diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h index f9927d87369f..080865ec2bae 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.h +++ b/drivers/gpu/drm/vc4/vc4_drv.h @@ -19,6 +19,8 @@ struct vc4_dev { struct drm_fbdev_cma *fbdev; + struct vc4_hang_state *hang_state; + /* The kernel-space BO cache. Tracks buffers that have been * unreferenced by all other users (refcounts of 0!) but not * yet freed, so we can do cheap allocations. @@ -361,6 +363,8 @@ int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); int vc4_mmap(struct file *filp, struct vm_area_struct *vma); int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); void *vc4_prime_vmap(struct drm_gem_object *obj); diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index 5fb0556e001e..39f29e759334 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c @@ -40,6 +40,186 @@ vc4_queue_hangcheck(struct drm_device *dev) round_jiffies_up(jiffies + msecs_to_jiffies(100))); } +struct vc4_hang_state { + struct drm_vc4_get_hang_state user_state; + + u32 bo_count; + struct drm_gem_object **bo; +}; + +static void +vc4_free_hang_state(struct drm_device *dev, struct vc4_hang_state *state) +{ + unsigned int i; + + mutex_lock(&dev->struct_mutex); + for (i = 0; i < state->user_state.bo_count; i++) + drm_gem_object_unreference(state->bo[i]); + mutex_unlock(&dev->struct_mutex); + + kfree(state); +} + +int +vc4_get_hang_state_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_vc4_get_hang_state *get_state = data; + struct drm_vc4_get_hang_state_bo *bo_state; + struct vc4_hang_state *kernel_state; + struct drm_vc4_get_hang_state *state; + struct vc4_dev *vc4 = to_vc4_dev(dev); + unsigned long irqflags; + u32 i; + int ret; + + spin_lock_irqsave(&vc4->job_lock, irqflags); + kernel_state = vc4->hang_state; + if (!kernel_state) { + spin_unlock_irqrestore(&vc4->job_lock, irqflags); + return -ENOENT; + } + state = &kernel_state->user_state; + + /* If the user's array isn't big enough, just return the + * required array size. + */ + if (get_state->bo_count < state->bo_count) { + get_state->bo_count = state->bo_count; + spin_unlock_irqrestore(&vc4->job_lock, irqflags); + return 0; + } + + vc4->hang_state = NULL; + spin_unlock_irqrestore(&vc4->job_lock, irqflags); + + /* Save the user's BO pointer, so we don't stomp it with the memcpy. */ + state->bo = get_state->bo; + memcpy(get_state, state, sizeof(*state)); + + bo_state = kcalloc(state->bo_count, sizeof(*bo_state), GFP_KERNEL); + if (!bo_state) { + ret = -ENOMEM; + goto err_free; + } + + for (i = 0; i < state->bo_count; i++) { + struct vc4_bo *vc4_bo = to_vc4_bo(kernel_state->bo[i]); + u32 handle; + + ret = drm_gem_handle_create(file_priv, kernel_state->bo[i], + &handle); + + if (ret) { + state->bo_count = i - 1; + goto err; + } + bo_state[i].handle = handle; + bo_state[i].paddr = vc4_bo->base.paddr; + bo_state[i].size = vc4_bo->base.base.size; + } + + ret = copy_to_user((void __user *)(uintptr_t)get_state->bo, + bo_state, + state->bo_count * sizeof(*bo_state)); + kfree(bo_state); + +err_free: + + vc4_free_hang_state(dev, kernel_state); + +err: + return ret; +} + +static void +vc4_save_hang_state(struct drm_device *dev) +{ + struct vc4_dev *vc4 = to_vc4_dev(dev); + struct drm_vc4_get_hang_state *state; + struct vc4_hang_state *kernel_state; + struct vc4_exec_info *exec; + struct vc4_bo *bo; + unsigned long irqflags; + unsigned int i, unref_list_count; + + kernel_state = kcalloc(1, sizeof(*state), GFP_KERNEL); + if (!kernel_state) + return; + + state = &kernel_state->user_state; + + spin_lock_irqsave(&vc4->job_lock, irqflags); + exec = vc4_first_job(vc4); + if (!exec) { + spin_unlock_irqrestore(&vc4->job_lock, irqflags); + return; + } + + unref_list_count = 0; + list_for_each_entry(bo, &exec->unref_list, unref_head) + unref_list_count++; + + state->bo_count = exec->bo_count + unref_list_count; + kernel_state->bo = kcalloc(state->bo_count, sizeof(*kernel_state->bo), + GFP_ATOMIC); + if (!kernel_state->bo) { + spin_unlock_irqrestore(&vc4->job_lock, irqflags); + return; + } + + for (i = 0; i < exec->bo_count; i++) { + drm_gem_object_reference(&exec->bo[i]->base); + kernel_state->bo[i] = &exec->bo[i]->base; + } + + list_for_each_entry(bo, &exec->unref_list, unref_head) { + drm_gem_object_reference(&bo->base.base); + kernel_state->bo[i] = &bo->base.base; + i++; + } + + state->start_bin = exec->ct0ca; + state->start_render = exec->ct1ca; + + spin_unlock_irqrestore(&vc4->job_lock, irqflags); + + state->ct0ca = V3D_READ(V3D_CTNCA(0)); + state->ct0ea = V3D_READ(V3D_CTNEA(0)); + + state->ct1ca = V3D_READ(V3D_CTNCA(1)); + state->ct1ea = V3D_READ(V3D_CTNEA(1)); + + state->ct0cs = V3D_READ(V3D_CTNCS(0)); + state->ct1cs = V3D_READ(V3D_CTNCS(1)); + + state->ct0ra0 = V3D_READ(V3D_CT00RA0); + state->ct1ra0 = V3D_READ(V3D_CT01RA0); + + state->bpca = V3D_READ(V3D_BPCA); + state->bpcs = V3D_READ(V3D_BPCS); + state->bpoa = V3D_READ(V3D_BPOA); + state->bpos = V3D_READ(V3D_BPOS); + + state->vpmbase = V3D_READ(V3D_VPMBASE); + + state->dbge = V3D_READ(V3D_DBGE); + state->fdbgo = V3D_READ(V3D_FDBGO); + state->fdbgb = V3D_READ(V3D_FDBGB); + state->fdbgr = V3D_READ(V3D_FDBGR); + state->fdbgs = V3D_READ(V3D_FDBGS); + state->errstat = V3D_READ(V3D_ERRSTAT); + + spin_lock_irqsave(&vc4->job_lock, irqflags); + if (vc4->hang_state) { + spin_unlock_irqrestore(&vc4->job_lock, irqflags); + vc4_free_hang_state(dev, kernel_state); + } else { + vc4->hang_state = kernel_state; + spin_unlock_irqrestore(&vc4->job_lock, irqflags); + } +} + static void vc4_reset(struct drm_device *dev) { @@ -64,6 +244,8 @@ vc4_reset_work(struct work_struct *work) struct vc4_dev *vc4 = container_of(work, struct vc4_dev, hangcheck.reset_work); + vc4_save_hang_state(vc4->dev); + vc4_reset(vc4->dev); } @@ -679,4 +861,7 @@ vc4_gem_destroy(struct drm_device *dev) } vc4_bo_cache_destroy(dev); + + if (vc4->hang_state) + vc4_free_hang_state(dev, vc4->hang_state); } diff --git a/include/uapi/drm/vc4_drm.h b/include/uapi/drm/vc4_drm.h index fe4161bc93ae..eeb37e394f13 100644 --- a/include/uapi/drm/vc4_drm.h +++ b/include/uapi/drm/vc4_drm.h @@ -32,6 +32,7 @@ #define DRM_VC4_CREATE_BO 0x03 #define DRM_VC4_MMAP_BO 0x04 #define DRM_VC4_CREATE_SHADER_BO 0x05 +#define DRM_VC4_GET_HANG_STATE 0x06 #define DRM_IOCTL_VC4_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SUBMIT_CL, struct drm_vc4_submit_cl) #define DRM_IOCTL_VC4_WAIT_SEQNO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_SEQNO, struct drm_vc4_wait_seqno) @@ -39,6 +40,7 @@ #define DRM_IOCTL_VC4_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_BO, struct drm_vc4_create_bo) #define DRM_IOCTL_VC4_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_MMAP_BO, struct drm_vc4_mmap_bo) #define DRM_IOCTL_VC4_CREATE_SHADER_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_SHADER_BO, struct drm_vc4_create_shader_bo) +#define DRM_IOCTL_VC4_GET_HANG_STATE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_HANG_STATE, struct drm_vc4_get_hang_state) struct drm_vc4_submit_rcl_surface { __u32 hindex; /* Handle index, or ~0 if not present. */ @@ -231,4 +233,47 @@ struct drm_vc4_create_shader_bo { __u32 pad; }; +struct drm_vc4_get_hang_state_bo { + __u32 handle; + __u32 paddr; + __u32 size; + __u32 pad; +}; + +/** + * struct drm_vc4_hang_state - ioctl argument for collecting state + * from a GPU hang for analysis. +*/ +struct drm_vc4_get_hang_state { + /** Pointer to array of struct drm_vc4_get_hang_state_bo. */ + __u64 bo; + /** + * On input, the size of the bo array. Output is the number + * of bos to be returned. + */ + __u32 bo_count; + + __u32 start_bin, start_render; + + __u32 ct0ca, ct0ea; + __u32 ct1ca, ct1ea; + __u32 ct0cs, ct1cs; + __u32 ct0ra0, ct1ra0; + + __u32 bpca, bpcs; + __u32 bpoa, bpos; + + __u32 vpmbase; + + __u32 dbge; + __u32 fdbgo; + __u32 fdbgb; + __u32 fdbgr; + __u32 fdbgs; + __u32 errstat; + + /* Pad that we may save more registers into in the future. */ + __u32 pad[16]; +}; + #endif /* _UAPI_VC4_DRM_H_ */ -- cgit v1.2.3 From 264d6970e16c68f851086d6f7fbf9aba18910e62 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 4 Dec 2015 09:45:42 +0100 Subject: drm: Polish fbdev helper struct docs Mostly this is just adding extensive docs for the callbacks, but also a few other additions. v2: Use FIXME comments to annotate helper hooks that should be replaced. v3: Small nits (Thierry). Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1449218769-16577-2-git-send-email-daniel.vetter@ffwll.ch Reviewed-by: Thierry Reding --- include/drm/drm_fb_helper.h | 96 ++++++++++++++++++++++++++++++++++++++------- 1 file changed, 81 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h index 87b090c4b730..d2d4ad1918ac 100644 --- a/include/drm/drm_fb_helper.h +++ b/include/drm/drm_fb_helper.h @@ -74,25 +74,76 @@ struct drm_fb_helper_surface_size { /** * struct drm_fb_helper_funcs - driver callbacks for the fbdev emulation library - * @gamma_set: Set the given gamma lut register on the given crtc. - * @gamma_get: Read the given gamma lut register on the given crtc, used to - * save the current lut when force-restoring the fbdev for e.g. - * kdbg. - * @fb_probe: Driver callback to allocate and initialize the fbdev info - * structure. Furthermore it also needs to allocate the drm - * framebuffer used to back the fbdev. - * @initial_config: Setup an initial fbdev display configuration * * Driver callbacks used by the fbdev emulation helper library. */ struct drm_fb_helper_funcs { + /** + * @gamma_set: + * + * Set the given gamma LUT register on the given CRTC. + * + * This callback is optional. + * + * FIXME: + * + * This callback is functionally redundant with the core gamma table + * support and simply exists because the fbdev hasn't yet been + * refactored to use the core gamma table interfaces. + */ void (*gamma_set)(struct drm_crtc *crtc, u16 red, u16 green, u16 blue, int regno); + /** + * @gamma_get: + * + * Read the given gamma LUT register on the given CRTC, used to save the + * current LUT when force-restoring the fbdev for e.g. kdbg. + * + * This callback is optional. + * + * FIXME: + * + * This callback is functionally redundant with the core gamma table + * support and simply exists because the fbdev hasn't yet been + * refactored to use the core gamma table interfaces. + */ void (*gamma_get)(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, int regno); + /** + * @fb_probe: + * + * Driver callback to allocate and initialize the fbdev info structure. + * Furthermore it also needs to allocate the DRM framebuffer used to + * back the fbdev. + * + * This callback is mandatory. + * + * RETURNS: + * + * The driver should return 0 on success and a negative error code on + * failure. + */ int (*fb_probe)(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes); + + /** + * @initial_config: + * + * Driver callback to setup an initial fbdev display configuration. + * Drivers can use this callback to tell the fbdev emulation what the + * preferred initial configuration is. This is useful to implement + * smooth booting where the fbdev (and subsequently all userspace) never + * changes the mode, but always inherits the existing configuration. + * + * This callback is optional. + * + * RETURNS: + * + * The driver should return true if a suitable initial configuration has + * been filled out and false when the fbdev helper should fall back to + * the default probing logic. + */ bool (*initial_config)(struct drm_fb_helper *fb_helper, struct drm_fb_helper_crtc **crtcs, struct drm_display_mode **modes, @@ -105,18 +156,22 @@ struct drm_fb_helper_connector { }; /** - * struct drm_fb_helper - helper to emulate fbdev on top of kms - * @fb: Scanout framebuffer object - * @dev: DRM device + * struct drm_fb_helper - main structure to emulate fbdev on top of KMS + * @fb: Scanout framebuffer object + * @dev: DRM device * @crtc_count: number of possible CRTCs * @crtc_info: per-CRTC helper state (mode, x/y offset, etc) * @connector_count: number of connected connectors * @connector_info_alloc_count: size of connector_info + * @connector_info: array of per-connector information * @funcs: driver callbacks for fb helper * @fbdev: emulated fbdev device info struct * @pseudo_palette: fake palette of 16 colors - * @kernel_fb_list: list_head in kernel_fb_helper_list - * @delayed_hotplug: was there a hotplug while kms master active? + * + * This is the main structure used by the fbdev helpers. Drivers supporting + * fbdev emulation should embedded this into their overall driver structure. + * Drivers must also fill out a struct &drm_fb_helper_funcs with a few + * operations. */ struct drm_fb_helper { struct drm_framebuffer *fb; @@ -129,10 +184,21 @@ struct drm_fb_helper { const struct drm_fb_helper_funcs *funcs; struct fb_info *fbdev; u32 pseudo_palette[17]; + + /** + * @kernel_fb_list: + * + * Entry on the global kernel_fb_helper_list, used for kgdb entry/exit. + */ struct list_head kernel_fb_list; - /* we got a hotplug but fbdev wasn't running the console - delay until next set_par */ + /** + * @delayed_hotplug: + * + * A hotplug was received while fbdev wasn't in control of the DRM + * device, i.e. another KMS master was active. The output configuration + * needs to be reprobe when fbdev is in control again. + */ bool delayed_hotplug; /** -- cgit v1.2.3 From b516a9efb7af38733ba2308fc7c0f49f034f53bd Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 4 Dec 2015 09:45:43 +0100 Subject: drm: Move LEAVE/ENTER_ATOMIC_MODESET to fbdev helpers This is only used for kgdb (and previously panic) handlers in the fbdev emulation, so belongs there. Note that this means we'll leave behind a forward declaration, but once all the helper vtables are consolidated (in the next patch) that will make more sense. v2: fixup radone/amdgpu. Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1449218769-16577-3-git-send-email-daniel.vetter@ffwll.ch Reviewed-by: Thierry Reding (v2) --- drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h | 1 + drivers/gpu/drm/nouveau/nv50_display.c | 1 + drivers/gpu/drm/radeon/atombios_crtc.c | 1 + drivers/gpu/drm/radeon/radeon_legacy_crtc.c | 1 + include/drm/drm_crtc_helper.h | 5 +---- include/drm/drm_fb_helper.h | 5 +++++ 6 files changed, 10 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index a53d756672fe..fdc1be8550da 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -35,6 +35,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index c053c50b346a..a240939beca4 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -28,6 +28,7 @@ #include #include #include +#include #include diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index dac78ad24b31..801dd60ac192 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -25,6 +25,7 @@ */ #include #include +#include #include #include #include "radeon.h" diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index 678b4386540d..32b338ff436b 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c @@ -25,6 +25,7 @@ */ #include #include +#include #include #include #include "radeon.h" diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h index e22ab29d2d00..26cc978f79e7 100644 --- a/include/drm/drm_crtc_helper.h +++ b/include/drm/drm_crtc_helper.h @@ -41,10 +41,7 @@ #include -enum mode_set_atomic { - LEAVE_ATOMIC_MODE_SET, - ENTER_ATOMIC_MODE_SET, -}; +enum mode_set_atomic; /** * struct drm_crtc_helper_funcs - helper operations for CRTCs diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h index d2d4ad1918ac..d8a40dff0d1d 100644 --- a/include/drm/drm_fb_helper.h +++ b/include/drm/drm_fb_helper.h @@ -34,6 +34,11 @@ struct drm_fb_helper; #include +enum mode_set_atomic { + LEAVE_ATOMIC_MODE_SET, + ENTER_ATOMIC_MODE_SET, +}; + struct drm_fb_offset { int x, y; }; -- cgit v1.2.3 From 092d01dae09aa6779ed41c3ac637e1e3c835424b Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 4 Dec 2015 09:45:44 +0100 Subject: drm: Reorganize helper vtables and their docs Currently we have 4 helper libraries (probe, crtc, plane & atomic) that all use the same helper vtables. And that's by necessity since we don't want to litter the core structs with one ops pointer per helper library. Also often the reuse the same hooks (like atomic does, to facilite conversion from existing drivers using crtc and plane helpers). Given all that it doesn't make sense to put the docs for these next to specific helpers. Instead extract them into a new header file and section in the docbook, and add references to them everywhere. Unfortunately kernel-doc complains when an include directive doesn't find anything (and it does by dumping crap into the output file). We have to remove the now empty includes to avoid that, instead of leaving them in for future proofing. v2: More OCD in ordering functions. v3: Spelling plus collate copyright headers properly. Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1449218769-16577-4-git-send-email-daniel.vetter@ffwll.ch Reviewed-by: Thierry Reding --- Documentation/DocBook/gpu.tmpl | 6 +- drivers/gpu/drm/drm_atomic_helper.c | 6 + drivers/gpu/drm/drm_crtc_helper.c | 5 + drivers/gpu/drm/drm_plane_helper.c | 4 + drivers/gpu/drm/drm_probe_helper.c | 3 + include/drm/drm_crtc_helper.h | 158 +------------------ include/drm/drm_modeset_helper_vtables.h | 257 +++++++++++++++++++++++++++++++ include/drm/drm_plane_helper.h | 38 +---- 8 files changed, 282 insertions(+), 195 deletions(-) create mode 100644 include/drm/drm_modeset_helper_vtables.h (limited to 'include') diff --git a/Documentation/DocBook/gpu.tmpl b/Documentation/DocBook/gpu.tmpl index 03f01e76add7..bd608d0c5703 100644 --- a/Documentation/DocBook/gpu.tmpl +++ b/Documentation/DocBook/gpu.tmpl @@ -2301,10 +2301,14 @@ void intel_crt_init(struct drm_device *dev) !Iinclude/drm/drm_atomic_helper.h !Edrivers/gpu/drm/drm_atomic_helper.c + + + Modeset Helper Reference for Common Vtables +!Iinclude/drm/drm_modeset_helper_vtables.h +!Pinclude/drm/drm_modeset_helper_vtables.h overview Modeset Helper Functions Reference -!Iinclude/drm/drm_crtc_helper.h !Edrivers/gpu/drm/drm_crtc_helper.c !Pdrivers/gpu/drm/drm_crtc_helper.c overview diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 74a5fc4deef6..ab275499d2a3 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -52,6 +52,12 @@ * drm_atomic_helper_disable_plane(), drm_atomic_helper_disable_plane() and the * various functions to implement set_property callbacks. New drivers must not * implement these functions themselves but must use the provided helpers. + * + * The atomic helper uses the same function table structures as all other + * modesetting helpers. See the documentation for struct &drm_crtc_helper_funcs, + * struct &drm_encoder_helper_funcs and struct &drm_connector_helper_funcs. It + * also shares the struct &drm_plane_helper_funcs function table with the plane + * helpers. */ static void drm_atomic_helper_plane_changed(struct drm_atomic_state *state, diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 10d0989db273..5dc92b66e07e 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -62,6 +62,11 @@ * converting to the plane helpers). New drivers must not use these functions * but need to implement the atomic interface instead, potentially using the * atomic helpers for that. + * + * These legacy modeset helpers use the same function table structures as + * all other modesetting helpers. See the documentation for struct + * &drm_crtc_helper_funcs, struct &drm_encoder_helper_funcs and struct + * &drm_connector_helper_funcs. */ MODULE_AUTHOR("David Airlie, Jesse Barnes"); MODULE_DESCRIPTION("DRM KMS helper"); diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c index a6983d41920d..8455e996dd9c 100644 --- a/drivers/gpu/drm/drm_plane_helper.c +++ b/drivers/gpu/drm/drm_plane_helper.c @@ -57,6 +57,10 @@ * by the atomic helpers. * * Again drivers are strongly urged to switch to the new interfaces. + * + * The plane helpers share the function table structures with other helpers, + * specifically also the atomic helpers. See struct &drm_plane_helper_funcs for + * the details. */ /* diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index eee3b6f38cfb..b59acad83644 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -53,6 +53,9 @@ * This helper library can be used independently of the modeset helper library. * Drivers can also overwrite different parts e.g. use their own hotplug * handling code to avoid probing unrelated outputs. + * + * The probe helpers share the function table structures with other display + * helper libraries. See struct &drm_connector_helper_funcs for the details. */ static bool drm_kms_helper_poll = true; diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h index 26cc978f79e7..f94ff54ae25e 100644 --- a/include/drm/drm_crtc_helper.h +++ b/include/drm/drm_crtc_helper.h @@ -40,145 +40,7 @@ #include #include - -enum mode_set_atomic; - -/** - * struct drm_crtc_helper_funcs - helper operations for CRTCs - * @dpms: set power state - * @prepare: prepare the CRTC, called before @mode_set - * @commit: commit changes to CRTC, called after @mode_set - * @mode_fixup: try to fixup proposed mode for this CRTC - * @mode_set: set this mode - * @mode_set_nofb: set mode only (no scanout buffer attached) - * @mode_set_base: update the scanout buffer - * @mode_set_base_atomic: non-blocking mode set (used for kgdb support) - * @load_lut: load color palette - * @disable: disable CRTC when no longer in use - * @enable: enable CRTC - * @atomic_check: check for validity of an atomic state - * @atomic_begin: begin atomic update - * @atomic_flush: flush atomic update - * - * The helper operations are called by the mid-layer CRTC helper. - * - * Note that with atomic helpers @dpms, @prepare and @commit hooks are - * deprecated. Used @enable and @disable instead exclusively. - * - * With legacy crtc helpers there's a big semantic difference between @disable - * and the other hooks: @disable also needs to release any resources acquired in - * @mode_set (like shared PLLs). - */ -struct drm_crtc_helper_funcs { - /* - * Control power levels on the CRTC. If the mode passed in is - * unsupported, the provider must use the next lowest power level. - */ - void (*dpms)(struct drm_crtc *crtc, int mode); - void (*prepare)(struct drm_crtc *crtc); - void (*commit)(struct drm_crtc *crtc); - - /* Provider can fixup or change mode timings before modeset occurs */ - bool (*mode_fixup)(struct drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode); - /* Actually set the mode */ - int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode, int x, int y, - struct drm_framebuffer *old_fb); - /* Actually set the mode for atomic helpers, optional */ - void (*mode_set_nofb)(struct drm_crtc *crtc); - - /* Move the crtc on the current fb to the given position *optional* */ - int (*mode_set_base)(struct drm_crtc *crtc, int x, int y, - struct drm_framebuffer *old_fb); - int (*mode_set_base_atomic)(struct drm_crtc *crtc, - struct drm_framebuffer *fb, int x, int y, - enum mode_set_atomic); - - /* reload the current crtc LUT */ - void (*load_lut)(struct drm_crtc *crtc); - - void (*disable)(struct drm_crtc *crtc); - void (*enable)(struct drm_crtc *crtc); - - /* atomic helpers */ - int (*atomic_check)(struct drm_crtc *crtc, - struct drm_crtc_state *state); - void (*atomic_begin)(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state); - void (*atomic_flush)(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state); -}; - -/** - * struct drm_encoder_helper_funcs - helper operations for encoders - * @dpms: set power state - * @save: save connector state - * @restore: restore connector state - * @mode_fixup: try to fixup proposed mode for this connector - * @prepare: part of the disable sequence, called before the CRTC modeset - * @commit: called after the CRTC modeset - * @mode_set: set this mode, optional for atomic helpers - * @get_crtc: return CRTC that the encoder is currently attached to - * @detect: connection status detection - * @disable: disable encoder when not in use (overrides DPMS off) - * @enable: enable encoder - * @atomic_check: check for validity of an atomic update - * - * The helper operations are called by the mid-layer CRTC helper. - * - * Note that with atomic helpers @dpms, @prepare and @commit hooks are - * deprecated. Used @enable and @disable instead exclusively. - * - * With legacy crtc helpers there's a big semantic difference between @disable - * and the other hooks: @disable also needs to release any resources acquired in - * @mode_set (like shared PLLs). - */ -struct drm_encoder_helper_funcs { - void (*dpms)(struct drm_encoder *encoder, int mode); - void (*save)(struct drm_encoder *encoder); - void (*restore)(struct drm_encoder *encoder); - - bool (*mode_fixup)(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode); - void (*prepare)(struct drm_encoder *encoder); - void (*commit)(struct drm_encoder *encoder); - void (*mode_set)(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode); - struct drm_crtc *(*get_crtc)(struct drm_encoder *encoder); - /* detect for DAC style encoders */ - enum drm_connector_status (*detect)(struct drm_encoder *encoder, - struct drm_connector *connector); - void (*disable)(struct drm_encoder *encoder); - - void (*enable)(struct drm_encoder *encoder); - - /* atomic helpers */ - int (*atomic_check)(struct drm_encoder *encoder, - struct drm_crtc_state *crtc_state, - struct drm_connector_state *conn_state); -}; - -/** - * struct drm_connector_helper_funcs - helper operations for connectors - * @get_modes: get mode list for this connector - * @mode_valid: is this mode valid on the given connector? (optional) - * @best_encoder: return the preferred encoder for this connector - * @atomic_best_encoder: atomic version of @best_encoder - * - * The helper operations are called by the mid-layer CRTC helper. - */ -struct drm_connector_helper_funcs { - int (*get_modes)(struct drm_connector *connector); - enum drm_mode_status (*mode_valid)(struct drm_connector *connector, - struct drm_display_mode *mode); - struct drm_encoder *(*best_encoder)(struct drm_connector *connector); - struct drm_encoder *(*atomic_best_encoder)(struct drm_connector *connector, - struct drm_connector_state *connector_state); -}; +#include extern void drm_helper_disable_unused_functions(struct drm_device *dev); extern int drm_crtc_helper_set_config(struct drm_mode_set *set); @@ -196,24 +58,6 @@ extern void drm_helper_move_panel_connectors_to_head(struct drm_device *); extern void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, const struct drm_mode_fb_cmd2 *mode_cmd); -static inline void drm_crtc_helper_add(struct drm_crtc *crtc, - const struct drm_crtc_helper_funcs *funcs) -{ - crtc->helper_private = funcs; -} - -static inline void drm_encoder_helper_add(struct drm_encoder *encoder, - const struct drm_encoder_helper_funcs *funcs) -{ - encoder->helper_private = funcs; -} - -static inline void drm_connector_helper_add(struct drm_connector *connector, - const struct drm_connector_helper_funcs *funcs) -{ - connector->helper_private = funcs; -} - extern void drm_helper_resume_force_mode(struct drm_device *dev); int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h new file mode 100644 index 000000000000..875809158fc4 --- /dev/null +++ b/include/drm/drm_modeset_helper_vtables.h @@ -0,0 +1,257 @@ +/* + * Copyright © 2006 Keith Packard + * Copyright © 2007-2008 Dave Airlie + * Copyright © 2007-2008 Intel Corporation + * Jesse Barnes + * Copyright © 2011-2013 Intel Corporation + * Copyright © 2015 Intel Corporation + * Daniel Vetter + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __DRM_MODESET_HELPER_VTABLES_H__ +#define __DRM_MODESET_HELPER_VTABLES_H__ + +#include + +/** + * DOC: overview + * + * The DRM mode setting helper functions are common code for drivers to use if + * they wish. Drivers are not forced to use this code in their + * implementations but it would be useful if the code they do use at least + * provides a consistent interface and operation to userspace. Therefore it is + * highly recommended to use the provided helpers as much as possible. + * + * Because there is only one pointer per modeset object to hold a vfunc table + * for helper libraries they are by necessity shared among the different + * helpers. + * + * To make this clear all the helper vtables are pulled together in this location here. + */ + +enum mode_set_atomic; + +/** + * struct drm_crtc_helper_funcs - helper operations for CRTCs + * @dpms: set power state + * @prepare: prepare the CRTC, called before @mode_set + * @commit: commit changes to CRTC, called after @mode_set + * @mode_fixup: try to fixup proposed mode for this CRTC + * @mode_set: set this mode + * @mode_set_nofb: set mode only (no scanout buffer attached) + * @mode_set_base: update the scanout buffer + * @mode_set_base_atomic: non-blocking mode set (used for kgdb support) + * @load_lut: load color palette + * @disable: disable CRTC when no longer in use + * @enable: enable CRTC + * @atomic_check: check for validity of an atomic state + * @atomic_begin: begin atomic update + * @atomic_flush: flush atomic update + * + * The helper operations are called by the mid-layer CRTC helper. + * + * Note that with atomic helpers @dpms, @prepare and @commit hooks are + * deprecated. Used @enable and @disable instead exclusively. + * + * With legacy crtc helpers there's a big semantic difference between @disable + * and the other hooks: @disable also needs to release any resources acquired in + * @mode_set (like shared PLLs). + */ +struct drm_crtc_helper_funcs { + /* + * Control power levels on the CRTC. If the mode passed in is + * unsupported, the provider must use the next lowest power level. + */ + void (*dpms)(struct drm_crtc *crtc, int mode); + void (*prepare)(struct drm_crtc *crtc); + void (*commit)(struct drm_crtc *crtc); + + /* Provider can fixup or change mode timings before modeset occurs */ + bool (*mode_fixup)(struct drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); + /* Actually set the mode */ + int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode, int x, int y, + struct drm_framebuffer *old_fb); + /* Actually set the mode for atomic helpers, optional */ + void (*mode_set_nofb)(struct drm_crtc *crtc); + + /* Move the crtc on the current fb to the given position *optional* */ + int (*mode_set_base)(struct drm_crtc *crtc, int x, int y, + struct drm_framebuffer *old_fb); + int (*mode_set_base_atomic)(struct drm_crtc *crtc, + struct drm_framebuffer *fb, int x, int y, + enum mode_set_atomic); + + /* reload the current crtc LUT */ + void (*load_lut)(struct drm_crtc *crtc); + + void (*disable)(struct drm_crtc *crtc); + void (*enable)(struct drm_crtc *crtc); + + /* atomic helpers */ + int (*atomic_check)(struct drm_crtc *crtc, + struct drm_crtc_state *state); + void (*atomic_begin)(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state); + void (*atomic_flush)(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state); +}; + +/** + * drm_crtc_helper_add - sets the helper vtable for a crtc + * @crtc: DRM CRTC + * @funcs: helper vtable to set for @crtc + */ +static inline void drm_crtc_helper_add(struct drm_crtc *crtc, + const struct drm_crtc_helper_funcs *funcs) +{ + crtc->helper_private = funcs; +} + +/** + * struct drm_encoder_helper_funcs - helper operations for encoders + * @dpms: set power state + * @save: save connector state + * @restore: restore connector state + * @mode_fixup: try to fixup proposed mode for this connector + * @prepare: part of the disable sequence, called before the CRTC modeset + * @commit: called after the CRTC modeset + * @mode_set: set this mode, optional for atomic helpers + * @get_crtc: return CRTC that the encoder is currently attached to + * @detect: connection status detection + * @disable: disable encoder when not in use (overrides DPMS off) + * @enable: enable encoder + * @atomic_check: check for validity of an atomic update + * + * The helper operations are called by the mid-layer CRTC helper. + * + * Note that with atomic helpers @dpms, @prepare and @commit hooks are + * deprecated. Used @enable and @disable instead exclusively. + * + * With legacy crtc helpers there's a big semantic difference between @disable + * and the other hooks: @disable also needs to release any resources acquired in + * @mode_set (like shared PLLs). + */ +struct drm_encoder_helper_funcs { + void (*dpms)(struct drm_encoder *encoder, int mode); + void (*save)(struct drm_encoder *encoder); + void (*restore)(struct drm_encoder *encoder); + + bool (*mode_fixup)(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); + void (*prepare)(struct drm_encoder *encoder); + void (*commit)(struct drm_encoder *encoder); + void (*mode_set)(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); + struct drm_crtc *(*get_crtc)(struct drm_encoder *encoder); + /* detect for DAC style encoders */ + enum drm_connector_status (*detect)(struct drm_encoder *encoder, + struct drm_connector *connector); + void (*disable)(struct drm_encoder *encoder); + + void (*enable)(struct drm_encoder *encoder); + + /* atomic helpers */ + int (*atomic_check)(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state); +}; + +/** + * drm_encoder_helper_add - sets the helper vtable for a encoder + * @encoder: DRM encoder + * @funcs: helper vtable to set for @encoder + */ +static inline void drm_encoder_helper_add(struct drm_encoder *encoder, + const struct drm_encoder_helper_funcs *funcs) +{ + encoder->helper_private = funcs; +} + +/** + * struct drm_connector_helper_funcs - helper operations for connectors + * @get_modes: get mode list for this connector + * @mode_valid: is this mode valid on the given connector? (optional) + * @best_encoder: return the preferred encoder for this connector + * @atomic_best_encoder: atomic version of @best_encoder + * + * The helper operations are called by the mid-layer CRTC helper. + */ +struct drm_connector_helper_funcs { + int (*get_modes)(struct drm_connector *connector); + enum drm_mode_status (*mode_valid)(struct drm_connector *connector, + struct drm_display_mode *mode); + struct drm_encoder *(*best_encoder)(struct drm_connector *connector); + struct drm_encoder *(*atomic_best_encoder)(struct drm_connector *connector, + struct drm_connector_state *connector_state); +}; + +/** + * drm_connector_helper_add - sets the helper vtable for a connector + * @connector: DRM connector + * @funcs: helper vtable to set for @connector + */ +static inline void drm_connector_helper_add(struct drm_connector *connector, + const struct drm_connector_helper_funcs *funcs) +{ + connector->helper_private = funcs; +} + +/** + * struct drm_plane_helper_funcs - helper operations for CRTCs + * @prepare_fb: prepare a framebuffer for use by the plane + * @cleanup_fb: cleanup a framebuffer when it's no longer used by the plane + * @atomic_check: check that a given atomic state is valid and can be applied + * @atomic_update: apply an atomic state to the plane (mandatory) + * @atomic_disable: disable the plane + * + * The helper operations are called by the mid-layer CRTC helper. + */ +struct drm_plane_helper_funcs { + int (*prepare_fb)(struct drm_plane *plane, + const struct drm_plane_state *new_state); + void (*cleanup_fb)(struct drm_plane *plane, + const struct drm_plane_state *old_state); + + int (*atomic_check)(struct drm_plane *plane, + struct drm_plane_state *state); + void (*atomic_update)(struct drm_plane *plane, + struct drm_plane_state *old_state); + void (*atomic_disable)(struct drm_plane *plane, + struct drm_plane_state *old_state); +}; + +/** + * drm_plane_helper_add - sets the helper vtable for a plane + * @plane: DRM plane + * @funcs: helper vtable to set for @plane + */ +static inline void drm_plane_helper_add(struct drm_plane *plane, + const struct drm_plane_helper_funcs *funcs) +{ + plane->helper_private = funcs; +} + +#endif diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h index 5a7f9d4efb1d..4421f3f4ca8d 100644 --- a/include/drm/drm_plane_helper.h +++ b/include/drm/drm_plane_helper.h @@ -26,6 +26,7 @@ #include #include +#include /* * Drivers that don't allow primary plane scaling may pass this macro in place @@ -36,46 +37,9 @@ */ #define DRM_PLANE_HELPER_NO_SCALING (1<<16) -/** - * DOC: plane helpers - * - * Helper functions to assist with creation and handling of CRTC primary - * planes. - */ - int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, const struct drm_crtc_funcs *funcs); -/** - * drm_plane_helper_funcs - helper operations for CRTCs - * @prepare_fb: prepare a framebuffer for use by the plane - * @cleanup_fb: cleanup a framebuffer when it's no longer used by the plane - * @atomic_check: check that a given atomic state is valid and can be applied - * @atomic_update: apply an atomic state to the plane (mandatory) - * @atomic_disable: disable the plane - * - * The helper operations are called by the mid-layer CRTC helper. - */ -struct drm_plane_helper_funcs { - int (*prepare_fb)(struct drm_plane *plane, - const struct drm_plane_state *new_state); - void (*cleanup_fb)(struct drm_plane *plane, - const struct drm_plane_state *old_state); - - int (*atomic_check)(struct drm_plane *plane, - struct drm_plane_state *state); - void (*atomic_update)(struct drm_plane *plane, - struct drm_plane_state *old_state); - void (*atomic_disable)(struct drm_plane *plane, - struct drm_plane_state *old_state); -}; - -static inline void drm_plane_helper_add(struct drm_plane *plane, - const struct drm_plane_helper_funcs *funcs) -{ - plane->helper_private = funcs; -} - int drm_plane_helper_check_update(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, -- cgit v1.2.3 From 4490d4c7111e1ebf748ea2c3e1e64d103d73bae7 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 4 Dec 2015 09:45:45 +0100 Subject: drm: Make helper vtable pointers type-safe Originally the idea behind void* was to allow different sets of helpers. But now we have that (with probe, plane, crtc and atomic helpers) and we still just use the same set of vtables. That's the only way to make the individual helpers modular and allow drivers to pick&choose and transition between them. So this flexibility isn't really needed. Also we have lots of non-vtable data meanwhile in core structures too, this is not the first one at all. Given that the void * is only trouble since gcc can't warn you if you mix them up. Let's fix that and make them typesafe. Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1449218769-16577-5-git-send-email-daniel.vetter@ffwll.ch Reviewed-by: Thierry Reding --- include/drm/drm_crtc.h | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 4765df331002..e8f8e4e9a6a1 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -254,6 +254,11 @@ struct drm_plane; struct drm_bridge; struct drm_atomic_state; +struct drm_crtc_helper_funcs; +struct drm_encoder_helper_funcs; +struct drm_connector_helper_funcs; +struct drm_plane_helper_funcs; + /** * struct drm_crtc_state - mutable CRTC state * @crtc: backpointer to the CRTC @@ -467,7 +472,7 @@ struct drm_crtc { uint16_t *gamma_store; /* if you are using the helper */ - const void *helper_private; + const struct drm_crtc_helper_funcs *helper_private; struct drm_object_properties properties; @@ -597,7 +602,7 @@ struct drm_encoder { struct drm_crtc *crtc; struct drm_bridge *bridge; const struct drm_encoder_funcs *funcs; - const void *helper_private; + const struct drm_encoder_helper_funcs *helper_private; }; /* should we poll this connector for connects and disconnects */ @@ -702,7 +707,7 @@ struct drm_connector { /* requested DPMS state */ int dpms; - const void *helper_private; + const struct drm_connector_helper_funcs *helper_private; /* forced on connector */ struct drm_cmdline_mode cmdline_mode; @@ -870,7 +875,7 @@ struct drm_plane { enum drm_plane_type type; - const void *helper_private; + const struct drm_plane_helper_funcs *helper_private; struct drm_plane_state *state; }; -- cgit v1.2.3 From da024fe58680a46346cfbeba5db705d33f39b313 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 4 Dec 2015 09:45:47 +0100 Subject: drm/bridge: Improve kerneldoc Especially document the assumptions and semantics of the callbacks carefully. Just a warm-up excercise really. v2: Spelling fixes (Eric). v3: Consolidate more with existing docs: - Remove the overview section explaining the bridge funcs, that's now all in the drm_bridge_funcs kerneldoc in much more detail. - Use & to reference structs so that kerneldoc automatically inserts hyperlinks. v4: Review from Thierry. Cc: Eric Anholt Cc: Archit Taneja Signed-off-by: Daniel Vetter Reviewed-by: Archit Taneja (v3) Link: http://patchwork.freedesktop.org/patch/msgid/1449218769-16577-7-git-send-email-daniel.vetter@ffwll.ch Reviewed-by: Thierry Reding --- drivers/gpu/drm/drm_bridge.c | 69 +++++++++++------------------ include/drm/drm_crtc.h | 102 ++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 122 insertions(+), 49 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c index 6b8f7211e543..bd93453afa61 100644 --- a/drivers/gpu/drm/drm_bridge.c +++ b/drivers/gpu/drm/drm_bridge.c @@ -31,14 +31,14 @@ /** * DOC: overview * - * drm_bridge represents a device that hangs on to an encoder. These are handy - * when a regular drm_encoder entity isn't enough to represent the entire + * struct &drm_bridge represents a device that hangs on to an encoder. These are + * handy when a regular &drm_encoder entity isn't enough to represent the entire * encoder chain. * - * A bridge is always associated to a single drm_encoder at a time, but can be + * A bridge is always attached to a single &drm_encoder at a time, but can be * either connected to it directly, or through an intermediate bridge: * - * encoder ---> bridge B ---> bridge A + * encoder ---> bridge B ---> bridge A * * Here, the output of the encoder feeds to bridge B, and that furthers feeds to * bridge A. @@ -46,11 +46,16 @@ * The driver using the bridge is responsible to make the associations between * the encoder and bridges. Once these links are made, the bridges will * participate along with encoder functions to perform mode_set/enable/disable - * through the ops provided in drm_bridge_funcs. + * through the ops provided in &drm_bridge_funcs. * * drm_bridge, like drm_panel, aren't drm_mode_object entities like planes, - * crtcs, encoders or connectors. They just provide additional hooks to get the - * desired output at the end of the encoder chain. + * CRTCs, encoders or connectors and hence are not visible to userspace. They + * just provide additional hooks to get the desired output at the end of the + * encoder chain. + * + * Bridges can also be chained up using the next pointer in struct &drm_bridge. + * + * Both legacy CRTC helpers and the new atomic modeset helpers support bridges. */ static DEFINE_MUTEX(bridge_lock); @@ -122,34 +127,12 @@ EXPORT_SYMBOL(drm_bridge_attach); /** * DOC: bridge callbacks * - * The drm_bridge_funcs ops are populated by the bridge driver. The drm - * internals(atomic and crtc helpers) use the helpers defined in drm_bridge.c - * These helpers call a specific drm_bridge_funcs op for all the bridges + * The &drm_bridge_funcs ops are populated by the bridge driver. The DRM + * internals (atomic and CRTC helpers) use the helpers defined in drm_bridge.c + * These helpers call a specific &drm_bridge_funcs op for all the bridges * during encoder configuration. * - * When creating a bridge driver, one can implement drm_bridge_funcs op with - * the help of these rough rules: - * - * pre_enable: this contains things needed to be done for the bridge before - * its clock and timings are enabled by its source. For a bridge, its source - * is generally the encoder or bridge just before it in the encoder chain. - * - * enable: this contains things needed to be done for the bridge once its - * source is enabled. In other words, enable is called once the source is - * ready with clock and timing needed by the bridge. - * - * disable: this contains things needed to be done for the bridge assuming - * that its source is still enabled, i.e. clock and timings are still on. - * - * post_disable: this contains things needed to be done for the bridge once - * its source is disabled, i.e. once clocks and timings are off. - * - * mode_fixup: this should fixup the given mode for the bridge. It is called - * after the encoder's mode fixup. mode_fixup can also reject a mode completely - * if it's unsuitable for the hardware. - * - * mode_set: this sets up the mode for the bridge. It assumes that its source - * (an encoder or a bridge) has set the mode too. + * For detailed specification of the bridge callbacks see &drm_bridge_funcs. */ /** @@ -159,7 +142,7 @@ EXPORT_SYMBOL(drm_bridge_attach); * @mode: desired mode to be set for the bridge * @adjusted_mode: updated mode that works for this bridge * - * Calls 'mode_fixup' drm_bridge_funcs op for all the bridges in the + * Calls ->mode_fixup() &drm_bridge_funcs op for all the bridges in the * encoder chain, starting from the first bridge to the last. * * Note: the bridge passed should be the one closest to the encoder @@ -186,11 +169,11 @@ bool drm_bridge_mode_fixup(struct drm_bridge *bridge, EXPORT_SYMBOL(drm_bridge_mode_fixup); /** - * drm_bridge_disable - calls 'disable' drm_bridge_funcs op for all + * drm_bridge_disable - calls ->disable() &drm_bridge_funcs op for all * bridges in the encoder chain. * @bridge: bridge control structure * - * Calls 'disable' drm_bridge_funcs op for all the bridges in the encoder + * Calls ->disable() &drm_bridge_funcs op for all the bridges in the encoder * chain, starting from the last bridge to the first. These are called before * calling the encoder's prepare op. * @@ -208,11 +191,11 @@ void drm_bridge_disable(struct drm_bridge *bridge) EXPORT_SYMBOL(drm_bridge_disable); /** - * drm_bridge_post_disable - calls 'post_disable' drm_bridge_funcs op for + * drm_bridge_post_disable - calls ->post_disable() &drm_bridge_funcs op for * all bridges in the encoder chain. * @bridge: bridge control structure * - * Calls 'post_disable' drm_bridge_funcs op for all the bridges in the + * Calls ->post_disable() &drm_bridge_funcs op for all the bridges in the * encoder chain, starting from the first bridge to the last. These are called * after completing the encoder's prepare op. * @@ -236,7 +219,7 @@ EXPORT_SYMBOL(drm_bridge_post_disable); * @mode: desired mode to be set for the bridge * @adjusted_mode: updated mode that works for this bridge * - * Calls 'mode_set' drm_bridge_funcs op for all the bridges in the + * Calls ->mode_set() &drm_bridge_funcs op for all the bridges in the * encoder chain, starting from the first bridge to the last. * * Note: the bridge passed should be the one closest to the encoder @@ -256,11 +239,11 @@ void drm_bridge_mode_set(struct drm_bridge *bridge, EXPORT_SYMBOL(drm_bridge_mode_set); /** - * drm_bridge_pre_enable - calls 'pre_enable' drm_bridge_funcs op for all + * drm_bridge_pre_enable - calls ->pre_enable() &drm_bridge_funcs op for all * bridges in the encoder chain. * @bridge: bridge control structure * - * Calls 'pre_enable' drm_bridge_funcs op for all the bridges in the encoder + * Calls ->pre_enable() &drm_bridge_funcs op for all the bridges in the encoder * chain, starting from the last bridge to the first. These are called * before calling the encoder's commit op. * @@ -278,11 +261,11 @@ void drm_bridge_pre_enable(struct drm_bridge *bridge) EXPORT_SYMBOL(drm_bridge_pre_enable); /** - * drm_bridge_enable - calls 'enable' drm_bridge_funcs op for all bridges + * drm_bridge_enable - calls ->enable() &drm_bridge_funcs op for all bridges * in the encoder chain. * @bridge: bridge control structure * - * Calls 'enable' drm_bridge_funcs op for all the bridges in the encoder + * Calls ->enable() &drm_bridge_funcs op for all the bridges in the encoder * chain, starting from the first bridge to the last. These are called * after completing the encoder's commit op. * diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index e8f8e4e9a6a1..7f80fae1a45f 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -883,24 +883,114 @@ struct drm_plane { /** * struct drm_bridge_funcs - drm_bridge control functions * @attach: Called during drm_bridge_attach - * @mode_fixup: Try to fixup (or reject entirely) proposed mode for this bridge - * @disable: Called right before encoder prepare, disables the bridge - * @post_disable: Called right after encoder prepare, for lockstepped disable - * @mode_set: Set this mode to the bridge - * @pre_enable: Called right before encoder commit, for lockstepped commit - * @enable: Called right after encoder commit, enables the bridge */ struct drm_bridge_funcs { int (*attach)(struct drm_bridge *bridge); + + /** + * @mode_fixup: + * + * This callback is used to validate and adjust a mode. The paramater + * mode is the display mode that should be fed to the next element in + * the display chain, either the final &drm_connector or the next + * &drm_bridge. The parameter adjusted_mode is the input mode the bridge + * requires. It can be modified by this callback and does not need to + * match mode. + * + * This is the only hook that allows a bridge to reject a modeset. If + * this function passes all other callbacks must succeed for this + * configuration. + * + * NOTE: + * + * This function is called in the check phase of atomic modesets, which + * can be aborted for any reason (including on userspace's request to + * just check whether a configuration would be possible). Drivers MUST + * NOT touch any persistent state (hardware or software) or data + * structures except the passed in adjusted_mode parameter. + * + * RETURNS: + * + * True if an acceptable configuration is possible, false if the modeset + * operation should be rejected. + */ bool (*mode_fixup)(struct drm_bridge *bridge, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); + /** + * @disable: + * + * This callback should disable the bridge. It is called right before + * the preceding element in the display pipe is disabled. If the + * preceding element is a bridge this means it's called before that + * bridge's ->disable() function. If the preceding element is a + * &drm_encoder it's called right before the encoder's ->disable(), + * ->prepare() or ->dpms() hook from struct &drm_encoder_helper_funcs. + * + * The bridge can assume that the display pipe (i.e. clocks and timing + * signals) feeding it is still running when this callback is called. + */ void (*disable)(struct drm_bridge *bridge); + + /** + * @post_disable: + * + * This callback should disable the bridge. It is called right after + * the preceding element in the display pipe is disabled. If the + * preceding element is a bridge this means it's called after that + * bridge's ->post_disable() function. If the preceding element is a + * &drm_encoder it's called right after the encoder's ->disable(), + * ->prepare() or ->dpms() hook from struct &drm_encoder_helper_funcs. + * + * The bridge must assume that the display pipe (i.e. clocks and timing + * singals) feeding it is no longer running when this callback is + * called. + */ void (*post_disable)(struct drm_bridge *bridge); + + /** + * @mode_set: + * + * This callback should set the given mode on the bridge. It is called + * after the ->mode_set() callback for the preceding element in the + * display pipeline has been called already. The display pipe (i.e. + * clocks and timing signals) is off when this function is called. + */ void (*mode_set)(struct drm_bridge *bridge, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); + /** + * @pre_enable: + * + * This callback should enable the bridge. It is called right before + * the preceding element in the display pipe is enabled. If the + * preceding element is a bridge this means it's called before that + * bridge's ->pre_enable() function. If the preceding element is a + * &drm_encoder it's called right before the encoder's ->enable(), + * ->commit() or ->dpms() hook from struct &drm_encoder_helper_funcs. + * + * The display pipe (i.e. clocks and timing signals) feeding this bridge + * will not yet be running when this callback is called. The bridge must + * not enable the display link feeding the next bridge in the chain (if + * there is one) when this callback is called. + */ void (*pre_enable)(struct drm_bridge *bridge); + + /** + * @enable: + * + * This callback should enable the bridge. It is called right after + * the preceding element in the display pipe is enabled. If the + * preceding element is a bridge this means it's called after that + * bridge's ->enable() function. If the preceding element is a + * &drm_encoder it's called right after the encoder's ->enable(), + * ->commit() or ->dpms() hook from struct &drm_encoder_helper_funcs. + * + * The bridge can assume that the display pipe (i.e. clocks and timing + * signals) feeding it is running when this callback is called. This + * callback must enable the display link feeding the next bridge in the + * chain if there is one. + */ void (*enable)(struct drm_bridge *bridge); }; -- cgit v1.2.3 From 88548636406c08ef4b3d32d12d1860a216389177 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 4 Dec 2015 09:45:48 +0100 Subject: drm: Update drm_plane_funcs kerneldoc - Merge the docbook into the kerneldoc comments. - Spec in detail the precise semantics of the callbacks. - For consistency in wording and easier review roll out kerneldoc also for crtc, encoder and connector for the standard hooks they share with planes. v2: Suggestions from Thierry. Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1449218769-16577-8-git-send-email-daniel.vetter@ffwll.ch Reviewed-by: Thierry Reding --- Documentation/DocBook/gpu.tmpl | 104 --------- include/drm/drm_crtc.h | 496 ++++++++++++++++++++++++++++++++++++++--- 2 files changed, 459 insertions(+), 141 deletions(-) (limited to 'include') diff --git a/Documentation/DocBook/gpu.tmpl b/Documentation/DocBook/gpu.tmpl index 4b5de720b8a8..79cdc12c1772 100644 --- a/Documentation/DocBook/gpu.tmpl +++ b/Documentation/DocBook/gpu.tmpl @@ -1278,15 +1278,6 @@ int max_width, max_height; Miscellaneous - - void (*set_property)(struct drm_crtc *crtc, - struct drm_property *property, uint64_t value); - - Set the value of the given CRTC property to - value. See - for more information about properties. - - void (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start, uint32_t size); @@ -1294,13 +1285,6 @@ int max_width, max_height; Apply a gamma table to the device. The operation is optional. - - void (*destroy)(struct drm_crtc *crtc); - - Destroy the CRTC when not needed anymore. See - . - - @@ -1357,52 +1341,6 @@ int max_width, max_height; primary plane with standard capabilities. - - Plane Operations - - - int (*update_plane)(struct drm_plane *plane, struct drm_crtc *crtc, - struct drm_framebuffer *fb, int crtc_x, int crtc_y, - unsigned int crtc_w, unsigned int crtc_h, - uint32_t src_x, uint32_t src_y, - uint32_t src_w, uint32_t src_h); - - Enable and configure the plane to use the given CRTC and frame buffer. - - - The source rectangle in frame buffer memory coordinates is given by - the src_x, src_y, - src_w and src_h - parameters (as 16.16 fixed point values). Devices that don't support - subpixel plane coordinates can ignore the fractional part. - - - The destination rectangle in CRTC coordinates is given by the - crtc_x, crtc_y, - crtc_w and crtc_h - parameters (as integer values). Devices scale the source rectangle to - the destination rectangle. If scaling is not supported, and the source - rectangle size doesn't match the destination rectangle size, the - driver must return a -EINVAL error. - - - - int (*disable_plane)(struct drm_plane *plane); - - Disable the plane. The DRM core calls this method in response to a - DRM_IOCTL_MODE_SETPLANE ioctl call with the frame buffer ID set to 0. - Disabled planes must not be processed by the CRTC. - - - - void (*destroy)(struct drm_plane *plane); - - Destroy the plane when not needed anymore. See - . - - - - Encoders (struct <structname>drm_encoder</structname>) @@ -1459,27 +1397,6 @@ int max_width, max_height; encoders they want to use to a CRTC. - - Encoder Operations - - - void (*destroy)(struct drm_encoder *encoder); - - Called to destroy the encoder when not needed anymore. See - . - - - - void (*set_property)(struct drm_plane *plane, - struct drm_property *property, uint64_t value); - - Set the value of the given plane property to - value. See - for more information about properties. - - - - Connectors (struct <structname>drm_connector</structname>) @@ -1683,27 +1600,6 @@ int max_width, max_height; connector_status_unknown. - - Miscellaneous - - - void (*set_property)(struct drm_connector *connector, - struct drm_property *property, uint64_t value); - - Set the value of the given connector property to - value. See - for more information about properties. - - - - void (*destroy)(struct drm_connector *connector); - - Destroy the connector when not needed anymore. See - . - - - - diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 7f80fae1a45f..1bcfa094af16 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -322,21 +322,12 @@ struct drm_crtc_state { * struct drm_crtc_funcs - control CRTCs for a given device * @save: save CRTC state * @restore: restore CRTC state - * @reset: reset CRTC after state has been invalidated (e.g. resume) * @cursor_set: setup the cursor * @cursor_set2: setup the cursor with hotspot, superseeds @cursor_set if set * @cursor_move: move the cursor * @gamma_set: specify color ramp for CRTC - * @destroy: deinit and free object - * @set_property: called when a property is changed * @set_config: apply a new CRTC configuration * @page_flip: initiate a page flip - * @atomic_duplicate_state: duplicate the atomic state for this CRTC - * @atomic_destroy_state: destroy an atomic state for this CRTC - * @atomic_set_property: set a property on an atomic state for this CRTC - * (do not call directly, use drm_atomic_crtc_set_property()) - * @atomic_get_property: get a property on an atomic state for this CRTC - * (do not call directly, use drm_atomic_crtc_get_property()) * * The drm_crtc_funcs structure is the central CRTC management structure * in the DRM. Each CRTC controls one or more connectors (note that the name @@ -352,7 +343,17 @@ struct drm_crtc_funcs { void (*save)(struct drm_crtc *crtc); /* suspend? */ /* Restore CRTC state */ void (*restore)(struct drm_crtc *crtc); /* resume? */ - /* Reset CRTC state */ + + /** + * @reset: + * + * Reset CRTC hardware and software state to off. This function isn't + * called by the core directly, only through drm_mode_config_reset(). + * It's not a helper hook only for historical reasons. + * + * Atomic drivers can use drm_atomic_helper_crtc_reset() to reset + * atomic state using this hook. + */ void (*reset)(struct drm_crtc *crtc); /* cursor controls */ @@ -366,7 +367,14 @@ struct drm_crtc_funcs { /* Set gamma on the CRTC */ void (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start, uint32_t size); - /* Object destroy routine */ + + /** + * @destroy: + * + * Clean up plane resources. This is only called at driver unload time + * through drm_mode_config_cleanup() since a CRTC cannot be hotplugged + * in DRM. + */ void (*destroy)(struct drm_crtc *crtc); int (*set_config)(struct drm_mode_set *set); @@ -385,17 +393,129 @@ struct drm_crtc_funcs { struct drm_pending_vblank_event *event, uint32_t flags); + /** + * @set_property: + * + * This is the legacy entry point to update a property attached to the + * CRTC. + * + * Drivers implementing atomic modeset should use + * drm_atomic_helper_crtc_set_property() to implement this hook. + * + * This callback is optional if the driver does not support any legacy + * driver-private properties. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ int (*set_property)(struct drm_crtc *crtc, struct drm_property *property, uint64_t val); - /* atomic update handling */ + /** + * @atomic_duplicate_state: + * + * Duplicate the current atomic state for this CRTC and return it. + * The core and helpers gurantee that any atomic state duplicated with + * this hook and still owned by the caller (i.e. not transferred to the + * driver by calling ->atomic_commit() from struct + * &drm_mode_config_funcs) will be cleaned up by calling the + * @atomic_destroy_state hook in this structure. + * + * Atomic drivers which don't subclass struct &drm_crtc should use + * drm_atomic_helper_crtc_duplicate_state(). Drivers that subclass the + * state structure to extend it with driver-private state should use + * __drm_atomic_helper_crtc_duplicate_state() to make sure shared state is + * duplicated in a consistent fashion across drivers. + * + * It is an error to call this hook before crtc->state has been + * initialized correctly. + * + * NOTE: + * + * If the duplicate state references refcounted resources this hook must + * acquire a reference for each of them. The driver must release these + * references again in @atomic_destroy_state. + * + * RETURNS: + * + * Duplicated atomic state or NULL when the allocation failed. + */ struct drm_crtc_state *(*atomic_duplicate_state)(struct drm_crtc *crtc); + + /** + * @atomic_destroy_state: + * + * Destroy a state duplicated with @atomic_duplicate_state and release + * or unreference all resources it references + */ void (*atomic_destroy_state)(struct drm_crtc *crtc, struct drm_crtc_state *state); + + /** + * @atomic_set_property: + * + * Decode a driver-private property value and store the decoded value + * into the passed-in state structure. Since the atomic core decodes all + * standardized properties (even for extensions beyond the core set of + * properties which might not be implemented by all drivers) this + * requires drivers to subclass the state structure. + * + * Such driver-private properties should really only be implemented for + * truly hardware/vendor specific state. Instead it is preferred to + * standardize atomic extension and decode the properties used to expose + * such an extension in the core. + * + * Do not call this function directly, use + * drm_atomic_crtc_set_property() instead. + * + * This callback is optional if the driver does not support any + * driver-private atomic properties. + * + * NOTE: + * + * This function is called in the state assembly phase of atomic + * modesets, which can be aborted for any reason (including on + * userspace's request to just check whether a configuration would be + * possible). Drivers MUST NOT touch any persistent state (hardware or + * software) or data structures except the passed in @state parameter. + * + * Also since userspace controls in which order properties are set this + * function must not do any input validation (since the state update is + * incomplete and hence likely inconsistent). Instead any such input + * validation must be done in the various atomic_check callbacks. + * + * RETURNS: + * + * 0 if the property has been found, -EINVAL if the property isn't + * implemented by the driver (which should never happen, the core only + * asks for properties attached to this CRTC). No other validation is + * allowed by the driver. The core already checks that the property + * value is within the range (integer, valid enum value, ...) the driver + * set when registering the property. + */ int (*atomic_set_property)(struct drm_crtc *crtc, struct drm_crtc_state *state, struct drm_property *property, uint64_t val); + /** + * @atomic_get_property: + * + * Reads out the decoded driver-private property. This is used to + * implement the GETCRTC ioctl. + * + * Do not call this function directly, use + * drm_atomic_crtc_get_property() instead. + * + * This callback is optional if the driver does not support any + * driver-private atomic properties. + * + * RETURNS: + * + * 0 on success, -EINVAL if the property isn't implemented by the + * driver (which should never happen, the core only asks for + * properties attached to this CRTC). + */ int (*atomic_get_property)(struct drm_crtc *crtc, const struct drm_crtc_state *state, struct drm_property *property, @@ -507,18 +627,9 @@ struct drm_connector_state { * @dpms: set power state * @save: save connector state * @restore: restore connector state - * @reset: reset connector after state has been invalidated (e.g. resume) * @detect: is this connector active? * @fill_modes: fill mode list for this connector - * @set_property: property for this connector may need an update - * @destroy: make object go away * @force: notify the driver that the connector is forced on - * @atomic_duplicate_state: duplicate the atomic state for this connector - * @atomic_destroy_state: destroy an atomic state for this connector - * @atomic_set_property: set a property on an atomic state for this connector - * (do not call directly, use drm_atomic_connector_set_property()) - * @atomic_get_property: get a property on an atomic state for this connector - * (do not call directly, use drm_atomic_connector_get_property()) * * Each CRTC may have one or more connectors attached to it. The functions * below allow the core DRM code to control connectors, enumerate available modes, @@ -528,6 +639,17 @@ struct drm_connector_funcs { int (*dpms)(struct drm_connector *connector, int mode); void (*save)(struct drm_connector *connector); void (*restore)(struct drm_connector *connector); + + /** + * @reset: + * + * Reset connector hardware and software state to off. This function isn't + * called by the core directly, only through drm_mode_config_reset(). + * It's not a helper hook only for historical reasons. + * + * Atomic drivers can use drm_atomic_helper_connector_reset() to reset + * atomic state using this hook. + */ void (*reset)(struct drm_connector *connector); /* Check to see if anything is attached to the connector. @@ -539,19 +661,142 @@ struct drm_connector_funcs { enum drm_connector_status (*detect)(struct drm_connector *connector, bool force); int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height); + + /** + * @set_property: + * + * This is the legacy entry point to update a property attached to the + * connector. + * + * Drivers implementing atomic modeset should use + * drm_atomic_helper_connector_set_property() to implement this hook. + * + * This callback is optional if the driver does not support any legacy + * driver-private properties. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ int (*set_property)(struct drm_connector *connector, struct drm_property *property, uint64_t val); + + /** + * @destroy: + * + * Clean up connector resources. This is called at driver unload time + * through drm_mode_config_cleanup(). It can also be called at runtime + * when a connector is being hot-unplugged for drivers that support + * connector hotplugging (e.g. DisplayPort MST). + */ void (*destroy)(struct drm_connector *connector); void (*force)(struct drm_connector *connector); - /* atomic update handling */ + /** + * @atomic_duplicate_state: + * + * Duplicate the current atomic state for this connector and return it. + * The core and helpers gurantee that any atomic state duplicated with + * this hook and still owned by the caller (i.e. not transferred to the + * driver by calling ->atomic_commit() from struct + * &drm_mode_config_funcs) will be cleaned up by calling the + * @atomic_destroy_state hook in this structure. + * + * Atomic drivers which don't subclass struct &drm_connector_state should use + * drm_atomic_helper_connector_duplicate_state(). Drivers that subclass the + * state structure to extend it with driver-private state should use + * __drm_atomic_helper_connector_duplicate_state() to make sure shared state is + * duplicated in a consistent fashion across drivers. + * + * It is an error to call this hook before connector->state has been + * initialized correctly. + * + * NOTE: + * + * If the duplicate state references refcounted resources this hook must + * acquire a reference for each of them. The driver must release these + * references again in @atomic_destroy_state. + * + * RETURNS: + * + * Duplicated atomic state or NULL when the allocation failed. + */ struct drm_connector_state *(*atomic_duplicate_state)(struct drm_connector *connector); + + /** + * @atomic_destroy_state: + * + * Destroy a state duplicated with @atomic_duplicate_state and release + * or unreference all resources it references + */ void (*atomic_destroy_state)(struct drm_connector *connector, struct drm_connector_state *state); + + /** + * @atomic_set_property: + * + * Decode a driver-private property value and store the decoded value + * into the passed-in state structure. Since the atomic core decodes all + * standardized properties (even for extensions beyond the core set of + * properties which might not be implemented by all drivers) this + * requires drivers to subclass the state structure. + * + * Such driver-private properties should really only be implemented for + * truly hardware/vendor specific state. Instead it is preferred to + * standardize atomic extension and decode the properties used to expose + * such an extension in the core. + * + * Do not call this function directly, use + * drm_atomic_connector_set_property() instead. + * + * This callback is optional if the driver does not support any + * driver-private atomic properties. + * + * NOTE: + * + * This function is called in the state assembly phase of atomic + * modesets, which can be aborted for any reason (including on + * userspace's request to just check whether a configuration would be + * possible). Drivers MUST NOT touch any persistent state (hardware or + * software) or data structures except the passed in @state parameter. + * + * Also since userspace controls in which order properties are set this + * function must not do any input validation (since the state update is + * incomplete and hence likely inconsistent). Instead any such input + * validation must be done in the various atomic_check callbacks. + * + * RETURNS: + * + * 0 if the property has been found, -EINVAL if the property isn't + * implemented by the driver (which shouldn't ever happen, the core only + * asks for properties attached to this connector). No other validation + * is allowed by the driver. The core already checks that the property + * value is within the range (integer, valid enum value, ...) the driver + * set when registering the property. + */ int (*atomic_set_property)(struct drm_connector *connector, struct drm_connector_state *state, struct drm_property *property, uint64_t val); + + /** + * @atomic_get_property: + * + * Reads out the decoded driver-private property. This is used to + * implement the GETCONNECTOR ioctl. + * + * Do not call this function directly, use + * drm_atomic_connector_get_property() instead. + * + * This callback is optional if the driver does not support any + * driver-private atomic properties. + * + * RETURNS: + * + * 0 on success, -EINVAL if the property isn't implemented by the + * driver (which shouldn't ever happen, the core only asks for + * properties attached to this connector). + */ int (*atomic_get_property)(struct drm_connector *connector, const struct drm_connector_state *state, struct drm_property *property, @@ -560,13 +805,26 @@ struct drm_connector_funcs { /** * struct drm_encoder_funcs - encoder controls - * @reset: reset state (e.g. at init or resume time) - * @destroy: cleanup and free associated data * * Encoders sit between CRTCs and connectors. */ struct drm_encoder_funcs { + /** + * @reset: + * + * Reset encoder hardware and software state to off. This function isn't + * called by the core directly, only through drm_mode_config_reset(). + * It's not a helper hook only for historical reasons. + */ void (*reset)(struct drm_encoder *encoder); + + /** + * @destroy: + * + * Clean up encoder resources. This is only called at driver unload time + * through drm_mode_config_cleanup() since an encoder cannot be + * hotplugged in DRM. + */ void (*destroy)(struct drm_encoder *encoder); }; @@ -787,40 +1045,203 @@ struct drm_plane_state { /** * struct drm_plane_funcs - driver plane control functions - * @update_plane: update the plane configuration - * @disable_plane: shut down the plane - * @destroy: clean up plane resources - * @reset: reset plane after state has been invalidated (e.g. resume) - * @set_property: called when a property is changed - * @atomic_duplicate_state: duplicate the atomic state for this plane - * @atomic_destroy_state: destroy an atomic state for this plane - * @atomic_set_property: set a property on an atomic state for this plane - * (do not call directly, use drm_atomic_plane_set_property()) - * @atomic_get_property: get a property on an atomic state for this plane - * (do not call directly, use drm_atomic_plane_get_property()) */ struct drm_plane_funcs { + /** + * @update_plane: + * + * This is the legacy entry point to enable and configure the plane for + * the given CRTC and framebuffer. It is never called to disable the + * plane, i.e. the passed-in crtc and fb paramters are never NULL. + * + * The source rectangle in frame buffer memory coordinates is given by + * the src_x, src_y, src_w and src_h parameters (as 16.16 fixed point + * values). Devices that don't support subpixel plane coordinates can + * ignore the fractional part. + * + * The destination rectangle in CRTC coordinates is given by the + * crtc_x, crtc_y, crtc_w and crtc_h parameters (as integer values). + * Devices scale the source rectangle to the destination rectangle. If + * scaling is not supported, and the source rectangle size doesn't match + * the destination rectangle size, the driver must return a + * -EINVAL error. + * + * Drivers implementing atomic modeset should use + * drm_atomic_helper_update_plane() to implement this hook. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ int (*update_plane)(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h); + + /** + * @disable_plane: + * + * This is the legacy entry point to disable the plane. The DRM core + * calls this method in response to a DRM_IOCTL_MODE_SETPLANE ioctl call + * with the frame buffer ID set to 0. Disabled planes must not be + * processed by the CRTC. + * + * Drivers implementing atomic modeset should use + * drm_atomic_helper_disable_plane() to implement this hook. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ int (*disable_plane)(struct drm_plane *plane); + + /** + * @destroy: + * + * Clean up plane resources. This is only called at driver unload time + * through drm_mode_config_cleanup() since a plane cannot be hotplugged + * in DRM. + */ void (*destroy)(struct drm_plane *plane); + + /** + * @reset: + * + * Reset plane hardware and software state to off. This function isn't + * called by the core directly, only through drm_mode_config_reset(). + * It's not a helper hook only for historical reasons. + * + * Atomic drivers can use drm_atomic_helper_plane_reset() to reset + * atomic state using this hook. + */ void (*reset)(struct drm_plane *plane); + /** + * @set_property: + * + * This is the legacy entry point to update a property attached to the + * plane. + * + * Drivers implementing atomic modeset should use + * drm_atomic_helper_plane_set_property() to implement this hook. + * + * This callback is optional if the driver does not support any legacy + * driver-private properties. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ int (*set_property)(struct drm_plane *plane, struct drm_property *property, uint64_t val); - /* atomic update handling */ + /** + * @atomic_duplicate_state: + * + * Duplicate the current atomic state for this plane and return it. + * The core and helpers gurantee that any atomic state duplicated with + * this hook and still owned by the caller (i.e. not transferred to the + * driver by calling ->atomic_commit() from struct + * &drm_mode_config_funcs) will be cleaned up by calling the + * @atomic_destroy_state hook in this structure. + * + * Atomic drivers which don't subclass struct &drm_plane_state should use + * drm_atomic_helper_plane_duplicate_state(). Drivers that subclass the + * state structure to extend it with driver-private state should use + * __drm_atomic_helper_plane_duplicate_state() to make sure shared state is + * duplicated in a consistent fashion across drivers. + * + * It is an error to call this hook before plane->state has been + * initialized correctly. + * + * NOTE: + * + * If the duplicate state references refcounted resources this hook must + * acquire a reference for each of them. The driver must release these + * references again in @atomic_destroy_state. + * + * RETURNS: + * + * Duplicated atomic state or NULL when the allocation failed. + */ struct drm_plane_state *(*atomic_duplicate_state)(struct drm_plane *plane); + + /** + * @atomic_destroy_state: + * + * Destroy a state duplicated with @atomic_duplicate_state and release + * or unreference all resources it references + */ void (*atomic_destroy_state)(struct drm_plane *plane, struct drm_plane_state *state); + + /** + * @atomic_set_property: + * + * Decode a driver-private property value and store the decoded value + * into the passed-in state structure. Since the atomic core decodes all + * standardized properties (even for extensions beyond the core set of + * properties which might not be implemented by all drivers) this + * requires drivers to subclass the state structure. + * + * Such driver-private properties should really only be implemented for + * truly hardware/vendor specific state. Instead it is preferred to + * standardize atomic extension and decode the properties used to expose + * such an extension in the core. + * + * Do not call this function directly, use + * drm_atomic_plane_set_property() instead. + * + * This callback is optional if the driver does not support any + * driver-private atomic properties. + * + * NOTE: + * + * This function is called in the state assembly phase of atomic + * modesets, which can be aborted for any reason (including on + * userspace's request to just check whether a configuration would be + * possible). Drivers MUST NOT touch any persistent state (hardware or + * software) or data structures except the passed in @state parameter. + * + * Also since userspace controls in which order properties are set this + * function must not do any input validation (since the state update is + * incomplete and hence likely inconsistent). Instead any such input + * validation must be done in the various atomic_check callbacks. + * + * RETURNS: + * + * 0 if the property has been found, -EINVAL if the property isn't + * implemented by the driver (which shouldn't ever happen, the core only + * asks for properties attached to this plane). No other validation is + * allowed by the driver. The core already checks that the property + * value is within the range (integer, valid enum value, ...) the driver + * set when registering the property. + */ int (*atomic_set_property)(struct drm_plane *plane, struct drm_plane_state *state, struct drm_property *property, uint64_t val); + + /** + * @atomic_get_property: + * + * Reads out the decoded driver-private property. This is used to + * implement the GETPLANE ioctl. + * + * Do not call this function directly, use + * drm_atomic_plane_get_property() instead. + * + * This callback is optional if the driver does not support any + * driver-private atomic properties. + * + * RETURNS: + * + * 0 on success, -EINVAL if the property isn't implemented by the + * driver (which should never happen, the core only asks for + * properties attached to this plane). + */ int (*atomic_get_property)(struct drm_plane *plane, const struct drm_plane_state *state, struct drm_property *property, @@ -833,6 +1254,7 @@ enum drm_plane_type { DRM_PLANE_TYPE_CURSOR, }; + /** * struct drm_plane - central DRM plane control structure * @dev: DRM device this plane belongs to @@ -907,7 +1329,7 @@ struct drm_bridge_funcs { * can be aborted for any reason (including on userspace's request to * just check whether a configuration would be possible). Drivers MUST * NOT touch any persistent state (hardware or software) or data - * structures except the passed in adjusted_mode parameter. + * structures except the passed in @state parameter. * * RETURNS: * -- cgit v1.2.3 From 813b0f3e00d402279718403aeb621c965f97180e Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 4 Dec 2015 09:45:55 +0100 Subject: drm: Remove crtc/connector->save/restore hooks They're not how system suspend/resume should be done with atomic (there's new helpers for that developed by Thierry Reding), and for legacy drivers this really should be a helper hook and not a core one. But there's not even helper code to use them, and only 2 drivers (which now have their own private hooks) set them. Ditch them. Saves me typing some kerneldoc, too ;-) Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1449218769-16577-15-git-send-email-daniel.vetter@ffwll.ch Reviewed-by: Thierry Reding --- include/drm/drm_crtc.h | 11 ----------- 1 file changed, 11 deletions(-) (limited to 'include') diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 1bcfa094af16..e75b06b61143 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -320,8 +320,6 @@ struct drm_crtc_state { /** * struct drm_crtc_funcs - control CRTCs for a given device - * @save: save CRTC state - * @restore: restore CRTC state * @cursor_set: setup the cursor * @cursor_set2: setup the cursor with hotspot, superseeds @cursor_set if set * @cursor_move: move the cursor @@ -339,11 +337,6 @@ struct drm_crtc_state { * bus accessors. */ struct drm_crtc_funcs { - /* Save CRTC state */ - void (*save)(struct drm_crtc *crtc); /* suspend? */ - /* Restore CRTC state */ - void (*restore)(struct drm_crtc *crtc); /* resume? */ - /** * @reset: * @@ -625,8 +618,6 @@ struct drm_connector_state { /** * struct drm_connector_funcs - control connectors on a given device * @dpms: set power state - * @save: save connector state - * @restore: restore connector state * @detect: is this connector active? * @fill_modes: fill mode list for this connector * @force: notify the driver that the connector is forced on @@ -637,8 +628,6 @@ struct drm_connector_state { */ struct drm_connector_funcs { int (*dpms)(struct drm_connector *connector, int mode); - void (*save)(struct drm_connector *connector); - void (*restore)(struct drm_connector *connector); /** * @reset: -- cgit v1.2.3 From 129b782008a537a0d222d9e23b39d78a323ed595 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 4 Dec 2015 17:14:07 +0100 Subject: drm: Move encoder->save/restore into nouveau Nouveau is the only user, and atomic drivers should do state save/restoring differently. So move it into noveau. Saves me typing some kerneldoc, too ;-) v2: Move misplaced hunk into earlier nouveau patch. Cc: Ilia Mirkin Cc: Ben Skeggs Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1449245647-1315-1-git-send-email-daniel.vetter@ffwll.ch Reviewed-by: Thierry Reding --- drivers/gpu/drm/nouveau/dispnv04/dac.c | 7 +++---- drivers/gpu/drm/nouveau/dispnv04/dfp.c | 7 +++---- drivers/gpu/drm/nouveau/dispnv04/disp.c | 32 ++++++++++++------------------- drivers/gpu/drm/nouveau/dispnv04/tvnv04.c | 5 +++-- drivers/gpu/drm/nouveau/dispnv04/tvnv17.c | 5 +++-- drivers/gpu/drm/nouveau/nouveau_encoder.h | 3 +++ include/drm/drm_modeset_helper_vtables.h | 4 ---- 7 files changed, 27 insertions(+), 36 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/nouveau/dispnv04/dac.c b/drivers/gpu/drm/nouveau/dispnv04/dac.c index 78cb033bc015..6c442def403d 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/dac.c +++ b/drivers/gpu/drm/nouveau/dispnv04/dac.c @@ -504,8 +504,6 @@ static void nv04_dac_destroy(struct drm_encoder *encoder) static const struct drm_encoder_helper_funcs nv04_dac_helper_funcs = { .dpms = nv04_dac_dpms, - .save = nv04_dac_save, - .restore = nv04_dac_restore, .mode_fixup = nv04_dac_mode_fixup, .prepare = nv04_dac_prepare, .commit = nv04_dac_commit, @@ -515,8 +513,6 @@ static const struct drm_encoder_helper_funcs nv04_dac_helper_funcs = { static const struct drm_encoder_helper_funcs nv17_dac_helper_funcs = { .dpms = nv04_dac_dpms, - .save = nv04_dac_save, - .restore = nv04_dac_restore, .mode_fixup = nv04_dac_mode_fixup, .prepare = nv04_dac_prepare, .commit = nv04_dac_commit, @@ -545,6 +541,9 @@ nv04_dac_create(struct drm_connector *connector, struct dcb_output *entry) nv_encoder->dcb = entry; nv_encoder->or = ffs(entry->or) - 1; + nv_encoder->enc_save = nv04_dac_save; + nv_encoder->enc_restore = nv04_dac_restore; + if (nv_gf4_disp_arch(dev)) helper = &nv17_dac_helper_funcs; else diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c index 429ab5e3025a..4c5fb89d74db 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c +++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c @@ -652,8 +652,6 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder) static const struct drm_encoder_helper_funcs nv04_lvds_helper_funcs = { .dpms = nv04_lvds_dpms, - .save = nv04_dfp_save, - .restore = nv04_dfp_restore, .mode_fixup = nv04_dfp_mode_fixup, .prepare = nv04_dfp_prepare, .commit = nv04_dfp_commit, @@ -663,8 +661,6 @@ static const struct drm_encoder_helper_funcs nv04_lvds_helper_funcs = { static const struct drm_encoder_helper_funcs nv04_tmds_helper_funcs = { .dpms = nv04_tmds_dpms, - .save = nv04_dfp_save, - .restore = nv04_dfp_restore, .mode_fixup = nv04_dfp_mode_fixup, .prepare = nv04_dfp_prepare, .commit = nv04_dfp_commit, @@ -701,6 +697,9 @@ nv04_dfp_create(struct drm_connector *connector, struct dcb_output *entry) if (!nv_encoder) return -ENOMEM; + nv_encoder->enc_save = nv04_dfp_save; + nv_encoder->enc_restore = nv04_dfp_restore; + encoder = to_drm_encoder(nv_encoder); nv_encoder->dcb = entry; diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c index 59242ff767ea..b4a6bc433ef5 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c @@ -39,6 +39,7 @@ nv04_display_create(struct drm_device *dev) struct dcb_table *dcb = &drm->vbios.dcb; struct drm_connector *connector, *ct; struct drm_encoder *encoder; + struct nouveau_encoder *nv_encoder; struct nouveau_crtc *crtc; struct nv04_display *disp; int i, ret; @@ -110,11 +111,8 @@ nv04_display_create(struct drm_device *dev) list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) crtc->save(&crtc->base); - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - const struct drm_encoder_helper_funcs *func = encoder->helper_private; - - func->save(encoder); - } + list_for_each_entry(nv_encoder, &dev->mode_config.encoder_list, base.base.head) + nv_encoder->enc_save(&nv_encoder->base.base); nouveau_overlay_init(dev); @@ -126,7 +124,7 @@ nv04_display_destroy(struct drm_device *dev) { struct nv04_display *disp = nv04_display(dev); struct nouveau_drm *drm = nouveau_drm(dev); - struct drm_encoder *encoder; + struct nouveau_encoder *encoder; struct drm_crtc *crtc; struct nouveau_crtc *nv_crtc; @@ -140,11 +138,8 @@ nv04_display_destroy(struct drm_device *dev) } /* Restore state */ - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - const struct drm_encoder_helper_funcs *func = encoder->helper_private; - - func->restore(encoder); - } + list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.base.head) + encoder->enc_restore(&encoder->base.base); list_for_each_entry(nv_crtc, &dev->mode_config.crtc_list, base.head) nv_crtc->restore(&nv_crtc->base); @@ -160,8 +155,8 @@ nv04_display_destroy(struct drm_device *dev) int nv04_display_init(struct drm_device *dev) { - struct drm_encoder *encoder; - struct drm_crtc *crtc; + struct nouveau_encoder *encoder; + struct nouveau_crtc *crtc; /* meh.. modeset apparently doesn't setup all the regs and depends * on pre-existing state, for now load the state of the card *before* @@ -171,14 +166,11 @@ nv04_display_init(struct drm_device *dev) * save/restore "pre-load" state, but more general so we can save * on suspend too. */ - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - const struct drm_encoder_helper_funcs *func = encoder->helper_private; - - func->restore(encoder); - } + list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) + crtc->save(&crtc->base); - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) - crtc->funcs->restore(crtc); + list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.base.head) + encoder->enc_save(&encoder->base.base); return 0; } diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c index 5345eb5378a8..91d689400d2e 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c +++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c @@ -192,8 +192,6 @@ static const struct drm_encoder_funcs nv04_tv_funcs = { static const struct drm_encoder_helper_funcs nv04_tv_helper_funcs = { .dpms = nv04_tv_dpms, - .save = drm_i2c_encoder_save, - .restore = drm_i2c_encoder_restore, .mode_fixup = drm_i2c_encoder_mode_fixup, .prepare = nv04_tv_prepare, .commit = nv04_tv_commit, @@ -228,6 +226,9 @@ nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry) drm_encoder_init(dev, encoder, &nv04_tv_funcs, DRM_MODE_ENCODER_TVDAC); drm_encoder_helper_add(encoder, &nv04_tv_helper_funcs); + nv_encoder->enc_save = drm_i2c_encoder_save; + nv_encoder->enc_restore = drm_i2c_encoder_restore; + encoder->possible_crtcs = entry->heads; encoder->possible_clones = 0; nv_encoder->dcb = entry; diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c index b734195d80a0..ff8c55866b18 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c +++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c @@ -771,8 +771,6 @@ static void nv17_tv_destroy(struct drm_encoder *encoder) static struct drm_encoder_helper_funcs nv17_tv_helper_funcs = { .dpms = nv17_tv_dpms, - .save = nv17_tv_save, - .restore = nv17_tv_restore, .mode_fixup = nv17_tv_mode_fixup, .prepare = nv17_tv_prepare, .commit = nv17_tv_commit, @@ -820,6 +818,9 @@ nv17_tv_create(struct drm_connector *connector, struct dcb_output *entry) drm_encoder_helper_add(encoder, &nv17_tv_helper_funcs); to_encoder_slave(encoder)->slave_funcs = &nv17_tv_slave_funcs; + tv_enc->base.enc_save = nv17_tv_save; + tv_enc->base.enc_restore = nv17_tv_restore; + encoder->possible_crtcs = entry->heads; encoder->possible_clones = 0; diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h index b37da95105b0..c38a86408363 100644 --- a/drivers/gpu/drm/nouveau/nouveau_encoder.h +++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h @@ -63,6 +63,9 @@ struct nouveau_encoder { u32 datarate; } dp; }; + + void (*enc_save)(struct drm_encoder *encoder); + void (*enc_restore)(struct drm_encoder *encoder); }; struct nouveau_encoder * diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h index 875809158fc4..56dadfe7a181 100644 --- a/include/drm/drm_modeset_helper_vtables.h +++ b/include/drm/drm_modeset_helper_vtables.h @@ -131,8 +131,6 @@ static inline void drm_crtc_helper_add(struct drm_crtc *crtc, /** * struct drm_encoder_helper_funcs - helper operations for encoders * @dpms: set power state - * @save: save connector state - * @restore: restore connector state * @mode_fixup: try to fixup proposed mode for this connector * @prepare: part of the disable sequence, called before the CRTC modeset * @commit: called after the CRTC modeset @@ -154,8 +152,6 @@ static inline void drm_crtc_helper_add(struct drm_crtc *crtc, */ struct drm_encoder_helper_funcs { void (*dpms)(struct drm_encoder *encoder, int mode); - void (*save)(struct drm_encoder *encoder); - void (*restore)(struct drm_encoder *encoder); bool (*mode_fixup)(struct drm_encoder *encoder, const struct drm_display_mode *mode, -- cgit v1.2.3 From 6fe14acd496e7ca27d6dfbeaa71b4f16656d10c2 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 4 Dec 2015 09:45:58 +0100 Subject: drm: Document drm_connector_funcs The special case here is that both ->detect and ->force are actually functions only called by the probe helpers and hence really shouldn't be here. But since they've used by pretty much every driver I figured it's better to just document this for now instead of holding this doc patch hostage until that's all fixed. For that reason also group force right next to detect. v2: Use FIXME comments to annotate where we should move a hook to helpers. Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1449218769-16577-18-git-send-email-daniel.vetter@ffwll.ch Reviewed-by: Thierry Reding --- include/drm/drm_crtc.h | 84 ++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 74 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index e75b06b61143..819eb45821cd 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -617,16 +617,28 @@ struct drm_connector_state { /** * struct drm_connector_funcs - control connectors on a given device - * @dpms: set power state - * @detect: is this connector active? - * @fill_modes: fill mode list for this connector - * @force: notify the driver that the connector is forced on * * Each CRTC may have one or more connectors attached to it. The functions * below allow the core DRM code to control connectors, enumerate available modes, * etc. */ struct drm_connector_funcs { + /** + * @dpms: + * + * Legacy entry point to set the per-connector DPMS state. Legacy DPMS + * is exposed as a standard property on the connector, but diverted to + * this callback in the drm core. Note that atomic drivers don't + * implement the 4 level DPMS support on the connector any more, but + * instead only have an on/off "ACTIVE" property on the CRTC object. + * + * Drivers implementing atomic modeset should use + * drm_atomic_helper_connector_dpms() to implement this hook. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ int (*dpms)(struct drm_connector *connector, int mode); /** @@ -641,14 +653,67 @@ struct drm_connector_funcs { */ void (*reset)(struct drm_connector *connector); - /* Check to see if anything is attached to the connector. - * @force is set to false whilst polling, true when checking the - * connector due to user request. @force can be used by the driver - * to avoid expensive, destructive operations during automated - * probing. + /** + * @detect: + * + * Check to see if anything is attached to the connector. The parameter + * force is set to false whilst polling, true when checking the + * connector due to a user request. force can be used by the driver to + * avoid expensive, destructive operations during automated probing. + * + * FIXME: + * + * Note that this hook is only called by the probe helper. It's not in + * the helper library vtable purely for historical reasons. The only DRM + * core entry point to probe connector state is @fill_modes. + * + * RETURNS: + * + * drm_connector_status indicating the connector's status. */ enum drm_connector_status (*detect)(struct drm_connector *connector, bool force); + + /** + * @force: + * + * This function is called to update internal encoder state when the + * connector is forced to a certain state by userspace, either through + * the sysfs interfaces or on the kernel cmdline. In that case the + * @detect callback isn't called. + * + * FIXME: + * + * Note that this hook is only called by the probe helper. It's not in + * the helper library vtable purely for historical reasons. The only DRM + * core entry point to probe connector state is @fill_modes. + */ + void (*force)(struct drm_connector *connector); + + /** + * @fill_modes: + * + * Entry point for output detection and basic mode validation. The + * driver should reprobe the output if needed (e.g. when hotplug + * handling is unreliable), add all detected modes to connector->modes + * and filter out any the device can't support in any configuration. It + * also needs to filter out any modes wider or higher than the + * parameters max_width and max_height indicate. + * + * The drivers must also prune any modes no longer valid from + * connector->modes. Furthermore it must update connector->status and + * connector->edid. If no EDID has been received for this output + * connector->edid must be NULL. + * + * Drivers using the probe helpers should use + * drm_helper_probe_single_connector_modes() or + * drm_helper_probe_single_connector_modes_nomerge() to implement this + * function. + * + * RETURNS: + * + * The number of modes detected and filled into connector->modes. + */ int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height); /** @@ -679,7 +744,6 @@ struct drm_connector_funcs { * connector hotplugging (e.g. DisplayPort MST). */ void (*destroy)(struct drm_connector *connector); - void (*force)(struct drm_connector *connector); /** * @atomic_duplicate_state: -- cgit v1.2.3 From f6da8c6e5ee378c82f20ad07f84556befc7b58e9 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 4 Dec 2015 09:46:00 +0100 Subject: drm: document drm_crtc_funcs And merge any docbook we have into the kerneldoc comments. Since it's a legacy entry point with only two implementation (one each in atomic and legacy crtc helpers) I've made the documentation for set_config fairly sparse - no one should ever need to look at this again, all the ABI we have is baked into code. For ->page_flip otoh I kept all the extensive docs from the docbook and even extended it where it was lacking: Currently we have a pile of legacy page_flip implemantations, and even for atomic drivers there's not yet a standard implementation in the helpers. Which means every driver needs to implement this itself, and precise specs are really valuable. Otherwise there's just cursor, which really just boils down to "use at least universal planes". And gamma tables (where we have a bit a mess with the fbdev helper gamma hooks). v2: Spelling fixes (Eric). v3: Suggestions from Thierry. Cc: Eric Anholt Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1449218769-16577-20-git-send-email-daniel.vetter@ffwll.ch --- Documentation/DocBook/gpu.tmpl | 117 +---------------------------- include/drm/drm_crtc.h | 164 +++++++++++++++++++++++++++++++++++++---- 2 files changed, 149 insertions(+), 132 deletions(-) (limited to 'include') diff --git a/Documentation/DocBook/gpu.tmpl b/Documentation/DocBook/gpu.tmpl index be06c9b8a0a7..0ca010da4bdf 100644 --- a/Documentation/DocBook/gpu.tmpl +++ b/Documentation/DocBook/gpu.tmpl @@ -1174,121 +1174,6 @@ int max_width, max_height; pointer to CRTC functions. - - CRTC Operations - - Set Configuration - int (*set_config)(struct drm_mode_set *set); - - Apply a new CRTC configuration to the device. The configuration - specifies a CRTC, a frame buffer to scan out from, a (x,y) position in - the frame buffer, a display mode and an array of connectors to drive - with the CRTC if possible. - - - If the frame buffer specified in the configuration is NULL, the driver - must detach all encoders connected to the CRTC and all connectors - attached to those encoders and disable them. - - - This operation is called with the mode config lock held. - - - Note that the drm core has no notion of restoring the mode setting - state after resume, since all resume handling is in the full - responsibility of the driver. The common mode setting helper library - though provides a helper which can be used for this: - drm_helper_resume_force_mode. - - - - Page Flipping - int (*page_flip)(struct drm_crtc *crtc, struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event); - - Schedule a page flip to the given frame buffer for the CRTC. This - operation is called with the mode config mutex held. - - - Page flipping is a synchronization mechanism that replaces the frame - buffer being scanned out by the CRTC with a new frame buffer during - vertical blanking, avoiding tearing. When an application requests a page - flip the DRM core verifies that the new frame buffer is large enough to - be scanned out by the CRTC in the currently configured mode and then - calls the CRTC page_flip operation with a - pointer to the new frame buffer. - - - The page_flip operation schedules a page flip. - Once any pending rendering targeting the new frame buffer has - completed, the CRTC will be reprogrammed to display that frame buffer - after the next vertical refresh. The operation must return immediately - without waiting for rendering or page flip to complete and must block - any new rendering to the frame buffer until the page flip completes. - - - If a page flip can be successfully scheduled the driver must set the - drm_crtc->fb field to the new framebuffer pointed to - by fb. This is important so that the reference counting - on framebuffers stays balanced. - - - If a page flip is already pending, the - page_flip operation must return - -EBUSY. - - - To synchronize page flip to vertical blanking the driver will likely - need to enable vertical blanking interrupts. It should call - drm_vblank_get for that purpose, and call - drm_vblank_put after the page flip completes. - - - If the application has requested to be notified when page flip completes - the page_flip operation will be called with a - non-NULL event argument pointing to a - drm_pending_vblank_event instance. Upon page - flip completion the driver must call drm_send_vblank_event - to fill in the event and send to wake up any waiting processes. - This can be performed with - event_lock, flags); - ... - drm_send_vblank_event(dev, pipe, event); - spin_unlock_irqrestore(&dev->event_lock, flags); - ]]> - - - FIXME: Could drivers that don't need to wait for rendering to complete - just add the event to dev->vblank_event_list and - let the DRM core handle everything, as for "normal" vertical blanking - events? - - - While waiting for the page flip to complete, the - event->base.link list head can be used freely by - the driver to store the pending event in a driver-specific list. - - - If the file handle is closed before the event is signaled, drivers must - take care to destroy the event in their - preclose operation (and, if needed, call - drm_vblank_put). - - - - Miscellaneous - - - void (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, - uint32_t start, uint32_t size); - - Apply a gamma table to the device. The operation is optional. - - - - - Planes (struct <structname>drm_plane</structname>) @@ -1305,7 +1190,7 @@ int max_width, max_height; DRM_PLANE_TYPE_PRIMARY represents a "main" plane for a CRTC. Primary planes are the planes operated upon by CRTC modesetting and flipping - operations described in . + operations described in the page_flip hook in drm_crtc_funcs. DRM_PLANE_TYPE_CURSOR represents a "cursor" plane for a CRTC. Cursor diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 819eb45821cd..44a98082d409 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -320,12 +320,6 @@ struct drm_crtc_state { /** * struct drm_crtc_funcs - control CRTCs for a given device - * @cursor_set: setup the cursor - * @cursor_set2: setup the cursor with hotspot, superseeds @cursor_set if set - * @cursor_move: move the cursor - * @gamma_set: specify color ramp for CRTC - * @set_config: apply a new CRTC configuration - * @page_flip: initiate a page flip * * The drm_crtc_funcs structure is the central CRTC management structure * in the DRM. Each CRTC controls one or more connectors (note that the name @@ -349,15 +343,86 @@ struct drm_crtc_funcs { */ void (*reset)(struct drm_crtc *crtc); - /* cursor controls */ + /** + * @cursor_set: + * + * Update the cursor image. The cursor position is relative to the CRTC + * and can be partially or fully outside of the visible area. + * + * Note that contrary to all other KMS functions the legacy cursor entry + * points don't take a framebuffer object, but instead take directly a + * raw buffer object id from the driver's buffer manager (which is + * either GEM or TTM for current drivers). + * + * This entry point is deprecated, drivers should instead implement + * universal plane support and register a proper cursor plane using + * drm_crtc_init_with_planes(). + * + * This callback is optional + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ int (*cursor_set)(struct drm_crtc *crtc, struct drm_file *file_priv, uint32_t handle, uint32_t width, uint32_t height); + + /** + * @cursor_set2: + * + * Update the cursor image, including hotspot information. The hotspot + * must not affect the cursor position in CRTC coordinates, but is only + * meant as a hint for virtualized display hardware to coordinate the + * guests and hosts cursor position. The cursor hotspot is relative to + * the cursor image. Otherwise this works exactly like @cursor_set. + * + * This entry point is deprecated, drivers should instead implement + * universal plane support and register a proper cursor plane using + * drm_crtc_init_with_planes(). + * + * This callback is optional. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ int (*cursor_set2)(struct drm_crtc *crtc, struct drm_file *file_priv, uint32_t handle, uint32_t width, uint32_t height, int32_t hot_x, int32_t hot_y); + + /** + * @cursor_move: + * + * Update the cursor position. The cursor does not need to be visible + * when this hook is called. + * + * This entry point is deprecated, drivers should instead implement + * universal plane support and register a proper cursor plane using + * drm_crtc_init_with_planes(). + * + * This callback is optional. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ int (*cursor_move)(struct drm_crtc *crtc, int x, int y); - /* Set gamma on the CRTC */ + /** + * @gamma_set: + * + * Set gamma on the CRTC. + * + * This callback is optional. + * + * NOTE: + * + * Drivers that support gamma tables and also fbdev emulation through + * the provided helper library need to take care to fill out the gamma + * hooks for both. Currently there's a bit an unfortunate duplication + * going on, which should eventually be unified to just one set of + * hooks. + */ void (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start, uint32_t size); @@ -370,16 +435,83 @@ struct drm_crtc_funcs { */ void (*destroy)(struct drm_crtc *crtc); + /** + * @set_config: + * + * This is the main legacy entry point to change the modeset state on a + * CRTC. All the details of the desired configuration are passed in a + * struct &drm_mode_set - see there for details. + * + * Drivers implementing atomic modeset should use + * drm_atomic_helper_set_config() to implement this hook. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ int (*set_config)(struct drm_mode_set *set); - /* - * Flip to the given framebuffer. This implements the page - * flip ioctl described in drm_mode.h, specifically, the - * implementation must return immediately and block all - * rendering to the current fb until the flip has completed. - * If userspace set the event flag in the ioctl, the event - * argument will point to an event to send back when the flip - * completes, otherwise it will be NULL. + /** + * @page_flip: + * + * Legacy entry point to schedule a flip to the given framebuffer. + * + * Page flipping is a synchronization mechanism that replaces the frame + * buffer being scanned out by the CRTC with a new frame buffer during + * vertical blanking, avoiding tearing (except when requested otherwise + * through the DRM_MODE_PAGE_FLIP_ASYNC flag). When an application + * requests a page flip the DRM core verifies that the new frame buffer + * is large enough to be scanned out by the CRTC in the currently + * configured mode and then calls the CRTC ->page_flip() operation with a + * pointer to the new frame buffer. + * + * The driver must wait for any pending rendering to the new framebuffer + * to complete before executing the flip. It should also wait for any + * pending rendering from other drivers if the underlying buffer is a + * shared dma-buf. + * + * An application can request to be notified when the page flip has + * completed. The drm core will supply a struct &drm_event in the event + * parameter in this case. This can be handled by the + * drm_crtc_send_vblank_event() function, which the driver should call on + * the provided event upon completion of the flip. Note that if + * the driver supports vblank signalling and timestamping the vblank + * counters and timestamps must agree with the ones returned from page + * flip events. With the current vblank helper infrastructure this can + * be achieved by holding a vblank reference while the page flip is + * pending, acquired through drm_crtc_vblank_get() and released with + * drm_crtc_vblank_put(). Drivers are free to implement their own vblank + * counter and timestamp tracking though, e.g. if they have accurate + * timestamp registers in hardware. + * + * FIXME: + * + * Up to that point drivers need to manage events themselves and can use + * even->base.list freely for that. Specifically they need to ensure + * that they don't send out page flip (or vblank) events for which the + * corresponding drm file has been closed already. The drm core + * unfortunately does not (yet) take care of that. Therefore drivers + * currently must clean up and release pending events in their + * ->preclose driver function. + * + * This callback is optional. + * + * NOTE: + * + * Very early versions of the KMS ABI mandated that the driver must + * block (but not reject) any rendering to the old framebuffer until the + * flip operation has completed and the old framebuffer is no longer + * visible. This requirement has been lifted, and userspace is instead + * expected to request delivery of an event and wait with recycling old + * buffers until such has been received. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. Note that if a + * ->page_flip() operation is already pending the callback should return + * -EBUSY. Pageflips on a disabled CRTC (either by setting a NULL mode + * or just runtime disabled through DPMS respectively the new atomic + * "ACTIVE" state) should result in an -EINVAL error code. */ int (*page_flip)(struct drm_crtc *crtc, struct drm_framebuffer *fb, -- cgit v1.2.3 From c6b0ca3ea8366d1954c7825d0f67bc833b8e10df Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 4 Dec 2015 09:46:01 +0100 Subject: drm: Add kerneldoc for drm_framebuffer_funcs While typing these I noticed that ->dirty is a bit a can of worms and even supports blt/fill semantics ... shocked me a bit. Oh well it's defined in a way that nothing bad (just a bit of inefficiency) will happen for drivers which supports this. So I didn't bother copying the detailed spec into the new kerneldoc. v2: Suggestions from Thierry. Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1449218769-16577-21-git-send-email-daniel.vetter@ffwll.ch Reviewed-by: Thierry Reding --- include/drm/drm_crtc.h | 68 +++++++++++++++++++++++++++++++++++++------------- 1 file changed, 50 insertions(+), 18 deletions(-) (limited to 'include') diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 44a98082d409..f09391f9cac0 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -162,23 +162,55 @@ struct drm_tile_group { u8 group_data[8]; }; +/** + * struct drm_framebuffer_funcs - framebuffer hooks + */ struct drm_framebuffer_funcs { - /* note: use drm_framebuffer_remove() */ + /** + * @destroy: + * + * Clean up framebuffer resources, specifically also unreference the + * backing storage. The core guarantees to call this function for every + * framebuffer successfully created by ->fb_create() in + * &drm_mode_config_funcs. + */ void (*destroy)(struct drm_framebuffer *framebuffer); + + /** + * @create_handle: + * + * Create a buffer handle in the driver-specific buffer manager (either + * GEM or TTM) valid for the passed-in struct &drm_file. This is used by + * the core to implement the GETFB IOCTL, which returns (for + * sufficiently priviledged user) also a native buffer handle. This can + * be used for seamless transitions between modesetting clients by + * copying the current screen contents to a private buffer and blending + * between that and the new contents. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ int (*create_handle)(struct drm_framebuffer *fb, struct drm_file *file_priv, unsigned int *handle); - /* - * Optional callback for the dirty fb ioctl. + /** + * @dirty: * - * Userspace can notify the driver via this callback - * that a area of the framebuffer has changed and should - * be flushed to the display hardware. + * Optional callback for the dirty fb IOCTL. * - * See documentation in drm_mode.h for the struct - * drm_mode_fb_dirty_cmd for more information as all - * the semantics and arguments have a one to one mapping - * on this function. + * Userspace can notify the driver via this callback that an area of the + * framebuffer has changed and should be flushed to the display + * hardware. This can also be used internally, e.g. by the fbdev + * emulation, though that's not the case currently. + * + * See documentation in drm_mode.h for the struct drm_mode_fb_dirty_cmd + * for more information as all the semantics and arguments have a one to + * one mapping on this function. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. */ int (*dirty)(struct drm_framebuffer *framebuffer, struct drm_file *file_priv, unsigned flags, @@ -627,7 +659,7 @@ struct drm_crtc_funcs { * @atomic_get_property: * * Reads out the decoded driver-private property. This is used to - * implement the GETCRTC ioctl. + * implement the GETCRTC IOCTL. * * Do not call this function directly, use * drm_atomic_crtc_get_property() instead. @@ -670,7 +702,7 @@ struct drm_crtc_funcs { * @properties: property tracking for this CRTC * @state: current atomic state for this CRTC * @acquire_ctx: per-CRTC implicit acquire context used by atomic drivers for - * legacy ioctls + * legacy IOCTLs * * Each CRTC may have one or more connectors associated with it. This structure * allows the CRTC to be controlled. @@ -724,7 +756,7 @@ struct drm_crtc { struct drm_crtc_state *state; /* - * For legacy crtc ioctls so that atomic drivers can get at the locking + * For legacy crtc IOCTLs so that atomic drivers can get at the locking * acquire context. */ struct drm_modeset_acquire_ctx *acquire_ctx; @@ -968,7 +1000,7 @@ struct drm_connector_funcs { * @atomic_get_property: * * Reads out the decoded driver-private property. This is used to - * implement the GETCONNECTOR ioctl. + * implement the GETCONNECTOR IOCTL. * * Do not call this function directly, use * drm_atomic_connector_get_property() instead. @@ -1269,7 +1301,7 @@ struct drm_plane_funcs { * @disable_plane: * * This is the legacy entry point to disable the plane. The DRM core - * calls this method in response to a DRM_IOCTL_MODE_SETPLANE ioctl call + * calls this method in response to a DRM_IOCTL_MODE_SETPLANE IOCTL call * with the frame buffer ID set to 0. Disabled planes must not be * processed by the CRTC. * @@ -1413,7 +1445,7 @@ struct drm_plane_funcs { * @atomic_get_property: * * Reads out the decoded driver-private property. This is used to - * implement the GETPLANE ioctl. + * implement the GETPLANE IOCTL. * * Do not call this function directly, use * drm_atomic_plane_get_property() instead. @@ -1628,7 +1660,7 @@ struct drm_bridge { * struct drm_atomic_state - the global state object for atomic updates * @dev: parent DRM device * @allow_modeset: allow full modeset - * @legacy_cursor_update: hint to enforce legacy cursor ioctl semantics + * @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics * @planes: pointer to array of plane pointers * @plane_states: pointer to array of plane states pointers * @crtcs: pointer to array of CRTC pointers @@ -1716,7 +1748,7 @@ struct drm_mode_config_funcs { * @mutex: mutex protecting KMS related lists and structures * @connection_mutex: ww mutex protecting connector state and routing * @acquire_ctx: global implicit acquire context used by atomic drivers for - * legacy ioctls + * legacy IOCTLs * @idr_mutex: mutex for KMS ID allocation and management * @crtc_idr: main KMS ID tracking object * @fb_lock: mutex to protect fb state and lists -- cgit v1.2.3 From 9953f41799bdad34c367196541a7a9a3b6e13a6c Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 4 Dec 2015 09:46:02 +0100 Subject: drm: Kerneldoc for drm_mode_config_funcs The meat here is definitely the detailed specs for what atomic_check and atomic_commit are supposed to do. And another candidate for a core vfunc that should be in a helper really (output_poll_changed this time around). v2: Feedback from Eric on irc: - spelling fixes. - spec what async should do - copy the event related paragraphs from page_flip and adjust - make it clear that a successful async commit is not allowed to leave the pipe dead or disabled. v3: Use FIXME comments to annotate functions that we should move to some helpers. v4: Suggestions from Thierry. Cc: Eric Anholt Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1449218769-16577-22-git-send-email-daniel.vetter@ffwll.ch Reviewed-by: Thierry Reding --- include/drm/drm_crtc.h | 243 +++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 233 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index f09391f9cac0..4f587a5bc88f 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -1715,31 +1715,254 @@ struct drm_mode_set { /** * struct drm_mode_config_funcs - basic driver provided mode setting functions - * @fb_create: create a new framebuffer object - * @output_poll_changed: function to handle output configuration changes - * @atomic_check: check whether a given atomic state update is possible - * @atomic_commit: commit an atomic state update previously verified with - * atomic_check() - * @atomic_state_alloc: allocate a new atomic state - * @atomic_state_clear: clear the atomic state - * @atomic_state_free: free the atomic state * * Some global (i.e. not per-CRTC, connector, etc) mode setting functions that * involve drivers. */ struct drm_mode_config_funcs { + /** + * @fb_create: + * + * Create a new framebuffer object. The core does basic checks on the + * requested metadata, but most of that is left to the driver. See + * struct &drm_mode_fb_cmd2 for details. + * + * RETURNS: + * + * A new framebuffer with an initial reference count of 1 or a negative + * error code encoded with ERR_PTR(). + */ struct drm_framebuffer *(*fb_create)(struct drm_device *dev, struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd); + + /** + * @output_poll_changed: + * + * Callback used by helpers to inform the driver of output configuration + * changes. + * + * Drivers implementing fbdev emulation with the helpers can call + * drm_fb_helper_hotplug_changed from this hook to inform the fbdev + * helper of output changes. + * + * FIXME: + * + * Except that there's no vtable for device-level helper callbacks + * there's no reason this is a core function. + */ void (*output_poll_changed)(struct drm_device *dev); + /** + * @atomic_check: + * + * This is the only hook to validate an atomic modeset update. This + * function must reject any modeset and state changes which the hardware + * or driver doesn't support. This includes but is of course not limited + * to: + * + * - Checking that the modes, framebuffers, scaling and placement + * requirements and so on are within the limits of the hardware. + * + * - Checking that any hidden shared resources are not oversubscribed. + * This can be shared PLLs, shared lanes, overall memory bandwidth, + * display fifo space (where shared between planes or maybe even + * CRTCs). + * + * - Checking that virtualized resources exported to userspace are not + * oversubscribed. For various reasons it can make sense to expose + * more planes, crtcs or encoders than which are physically there. One + * example is dual-pipe operations (which generally should be hidden + * from userspace if when lockstepped in hardware, exposed otherwise), + * where a plane might need 1 hardware plane (if it's just on one + * pipe), 2 hardware planes (when it spans both pipes) or maybe even + * shared a hardware plane with a 2nd plane (if there's a compatible + * plane requested on the area handled by the other pipe). + * + * - Check that any transitional state is possible and that if + * requested, the update can indeed be done in the vblank period + * without temporarily disabling some functions. + * + * - Check any other constraints the driver or hardware might have. + * + * - This callback also needs to correctly fill out the &drm_crtc_state + * in this update to make sure that drm_atomic_crtc_needs_modeset() + * reflects the nature of the possible update and returns true if and + * only if the update cannot be applied without tearing within one + * vblank on that CRTC. The core uses that information to reject + * updates which require a full modeset (i.e. blanking the screen, or + * at least pausing updates for a substantial amount of time) if + * userspace has disallowed that in its request. + * + * - The driver also does not need to repeat basic input validation + * like done for the corresponding legacy entry points. The core does + * that before calling this hook. + * + * See the documentation of @atomic_commit for an exhaustive list of + * error conditions which don't have to be checked at the + * ->atomic_check() stage? + * + * See the documentation for struct &drm_atomic_state for how exactly + * an atomic modeset update is described. + * + * Drivers using the atomic helpers can implement this hook using + * drm_atomic_helper_check(), or one of the exported sub-functions of + * it. + * + * RETURNS: + * + * 0 on success or one of the below negative error codes: + * + * - -EINVAL, if any of the above constraints are violated. + * + * - -EDEADLK, when returned from an attempt to acquire an additional + * &drm_modeset_lock through drm_modeset_lock(). + * + * - -ENOMEM, if allocating additional state sub-structures failed due + * to lack of memory. + * + * - -EINTR, -EAGAIN or -ERESTARTSYS, if the IOCTL should be restarted. + * This can either be due to a pending signal, or because the driver + * needs to completely bail out to recover from an exceptional + * situation like a GPU hang. From a userspace point all errors are + * treated equally. + */ int (*atomic_check)(struct drm_device *dev, - struct drm_atomic_state *a); + struct drm_atomic_state *state); + + /** + * @atomic_commit: + * + * This is the only hook to commit an atomic modeset update. The core + * guarantees that @atomic_check has been called successfully before + * calling this function, and that nothing has been changed in the + * interim. + * + * See the documentation for struct &drm_atomic_state for how exactly + * an atomic modeset update is described. + * + * Drivers using the atomic helpers can implement this hook using + * drm_atomic_helper_commit(), or one of the exported sub-functions of + * it. + * + * Asynchronous commits (as indicated with the async parameter) must + * do any preparatory work which might result in an unsuccessful commit + * in the context of this callback. The only exceptions are hardware + * errors resulting in -EIO. But even in that case the driver must + * ensure that the display pipe is at least running, to avoid + * compositors crashing when pageflips don't work. Anything else, + * specifically committing the update to the hardware, should be done + * without blocking the caller. For updates which do not require a + * modeset this must be guaranteed. + * + * The driver must wait for any pending rendering to the new + * framebuffers to complete before executing the flip. It should also + * wait for any pending rendering from other drivers if the underlying + * buffer is a shared dma-buf. Asynchronous commits must not wait for + * rendering in the context of this callback. + * + * An application can request to be notified when the atomic commit has + * completed. These events are per-CRTC and can be distinguished by the + * CRTC index supplied in &drm_event to userspace. + * + * The drm core will supply a struct &drm_event in the event + * member of each CRTC's &drm_crtc_state structure. This can be handled by the + * drm_crtc_send_vblank_event() function, which the driver should call on + * the provided event upon completion of the atomic commit. Note that if + * the driver supports vblank signalling and timestamping the vblank + * counters and timestamps must agree with the ones returned from page + * flip events. With the current vblank helper infrastructure this can + * be achieved by holding a vblank reference while the page flip is + * pending, acquired through drm_crtc_vblank_get() and released with + * drm_crtc_vblank_put(). Drivers are free to implement their own vblank + * counter and timestamp tracking though, e.g. if they have accurate + * timestamp registers in hardware. + * + * NOTE: + * + * Drivers are not allowed to shut down any display pipe successfully + * enabled through an atomic commit on their own. Doing so can result in + * compositors crashing if a page flip is suddenly rejected because the + * pipe is off. + * + * RETURNS: + * + * 0 on success or one of the below negative error codes: + * + * - -EBUSY, if an asynchronous updated is requested and there is + * an earlier updated pending. Drivers are allowed to support a queue + * of outstanding updates, but currently no driver supports that. + * Note that drivers must wait for preceding updates to complete if a + * synchronous update is requested, they are not allowed to fail the + * commit in that case. + * + * - -ENOMEM, if the driver failed to allocate memory. Specifically + * this can happen when trying to pin framebuffers, which must only + * be done when committing the state. + * + * - -ENOSPC, as a refinement of the more generic -ENOMEM to indicate + * that the driver has run out of vram, iommu space or similar GPU + * address space needed for framebuffer. + * + * - -EIO, if the hardware completely died. + * + * - -EINTR, -EAGAIN or -ERESTARTSYS, if the IOCTL should be restarted. + * This can either be due to a pending signal, or because the driver + * needs to completely bail out to recover from an exceptional + * situation like a GPU hang. From a userspace point of view all errors are + * treated equally. + * + * This list is exhaustive. Specifically this hook is not allowed to + * return -EINVAL (any invalid requests should be caught in + * @atomic_check) or -EDEADLK (this function must not acquire + * additional modeset locks). + */ int (*atomic_commit)(struct drm_device *dev, - struct drm_atomic_state *a, + struct drm_atomic_state *state, bool async); + + /** + * @atomic_state_alloc: + * + * This optional hook can be used by drivers that want to subclass struct + * &drm_atomic_state to be able to track their own driver-private global + * state easily. If this hook is implemented, drivers must also + * implement @atomic_state_clear and @atomic_state_free. + * + * RETURNS: + * + * A new &drm_atomic_state on success or NULL on failure. + */ struct drm_atomic_state *(*atomic_state_alloc)(struct drm_device *dev); + + /** + * @atomic_state_clear: + * + * This hook must clear any driver private state duplicated into the + * passed-in &drm_atomic_state. This hook is called when the caller + * encountered a &drm_modeset_lock deadlock and needs to drop all + * already acquired locks as part of the deadlock avoidance dance + * implemented in drm_modeset_lock_backoff(). + * + * Any duplicated state must be invalidated since a concurrent atomic + * update might change it, and the drm atomic interfaces always apply + * updates as relative changes to the current state. + * + * Drivers that implement this must call drm_atomic_state_default_clear() + * to clear common state. + */ void (*atomic_state_clear)(struct drm_atomic_state *state); + + /** + * @atomic_state_free: + * + * This hook needs driver private resources and the &drm_atomic_state + * itself. Note that the core first calls drm_atomic_state_clear() to + * avoid code duplicate between the clear and free hooks. + * + * Drivers that implement this must call drm_atomic_state_default_free() + * to release common resources. + */ void (*atomic_state_free)(struct drm_atomic_state *state); }; -- cgit v1.2.3 From 11a0ba972d55dd21d88b62e918d6cf072c9c67af Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 4 Dec 2015 09:46:04 +0100 Subject: drm: Document drm_plane_helper_funcs Plus related hooks used to do atomic plane updates since they only really make sense as a package. v2: Suggestions from Thierry. Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1449218769-16577-24-git-send-email-daniel.vetter@ffwll.ch Reviewed-by: Thierry Reding --- include/drm/drm_modeset_helper_vtables.h | 210 +++++++++++++++++++++++++++++-- 1 file changed, 199 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h index 56dadfe7a181..c77b7dcf343b 100644 --- a/include/drm/drm_modeset_helper_vtables.h +++ b/include/drm/drm_modeset_helper_vtables.h @@ -62,9 +62,6 @@ enum mode_set_atomic; * @load_lut: load color palette * @disable: disable CRTC when no longer in use * @enable: enable CRTC - * @atomic_check: check for validity of an atomic state - * @atomic_begin: begin atomic update - * @atomic_flush: flush atomic update * * The helper operations are called by the mid-layer CRTC helper. * @@ -108,11 +105,96 @@ struct drm_crtc_helper_funcs { void (*disable)(struct drm_crtc *crtc); void (*enable)(struct drm_crtc *crtc); - /* atomic helpers */ + /** + * @atomic_check: + * + * Drivers should check plane-update related CRTC constraints in this + * hook. They can also check mode related limitations but need to be + * aware of the calling order, since this hook is used by + * drm_atomic_helper_check_planes() whereas the preparations needed to + * check output routing and the display mode is done in + * drm_atomic_helper_check_modeset(). Therefore drivers that want to + * check output routing and display mode constraints in this callback + * must ensure that drm_atomic_helper_check_modeset() has been called + * beforehand. This is calling order used by the default helper + * implementation in drm_atomic_helper_check(). + * + * When using drm_atomic_helper_check_planes() CRTCs' ->atomic_check() + * hooks are called after the ones for planes, which allows drivers to + * assign shared resources requested by planes in the CRTC callback + * here. For more complicated dependencies the driver can call the provided + * check helpers multiple times until the computed state has a final + * configuration and everything has been checked. + * + * This function is also allowed to inspect any other object's state and + * can add more state objects to the atomic commit if needed. Care must + * be taken though to ensure that state check&compute functions for + * these added states are all called, and derived state in other objects + * all updated. Again the recommendation is to just call check helpers + * until a maximal configuration is reached. + * + * This callback is used by the atomic modeset helpers and by the + * transitional plane helpers, but it is optional. + * + * NOTE: + * + * This function is called in the check phase of an atomic update. The + * driver is not allowed to change anything outside of the free-standing + * state objects passed-in or assembled in the overall &drm_atomic_state + * update tracking structure. + * + * RETURNS: + * + * 0 on success, -EINVAL if the state or the transition can't be + * supported, -ENOMEM on memory allocation failure and -EDEADLK if an + * attempt to obtain another state object ran into a &drm_modeset_lock + * deadlock. + */ int (*atomic_check)(struct drm_crtc *crtc, struct drm_crtc_state *state); + + /** + * @atomic_begin: + * + * Drivers should prepare for an atomic update of multiple planes on + * a CRTC in this hook. Depending upon hardware this might be vblank + * evasion, blocking updates by setting bits or doing preparatory work + * for e.g. manual update display. + * + * This hook is called before any plane commit functions are called. + * + * Note that the power state of the display pipe when this function is + * called depends upon the exact helpers and calling sequence the driver + * has picked. See drm_atomic_commit_planes() for a discussion of the + * tradeoffs and variants of plane commit helpers. + * + * This callback is used by the atomic modeset helpers and by the + * transitional plane helpers, but it is optional. + */ void (*atomic_begin)(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state); + /** + * @atomic_flush: + * + * Drivers should finalize an atomic update of multiple planes on + * a CRTC in this hook. Depending upon hardware this might include + * checking that vblank evasion was successful, unblocking updates by + * setting bits or setting the GO bit to flush out all updates. + * + * Simple hardware or hardware with special requirements can commit and + * flush out all updates for all planes from this hook and forgo all the + * other commit hooks for plane updates. + * + * This hook is called after any plane commit functions are called. + * + * Note that the power state of the display pipe when this function is + * called depends upon the exact helpers and calling sequence the driver + * has picked. See drm_atomic_commit_planes() for a discussion of the + * tradeoffs and variants of plane commit helpers. + * + * This callback is used by the atomic modeset helpers and by the + * transitional plane helpers, but it is optional. + */ void (*atomic_flush)(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state); }; @@ -216,25 +298,131 @@ static inline void drm_connector_helper_add(struct drm_connector *connector, } /** - * struct drm_plane_helper_funcs - helper operations for CRTCs - * @prepare_fb: prepare a framebuffer for use by the plane - * @cleanup_fb: cleanup a framebuffer when it's no longer used by the plane - * @atomic_check: check that a given atomic state is valid and can be applied - * @atomic_update: apply an atomic state to the plane (mandatory) - * @atomic_disable: disable the plane + * struct drm_plane_helper_funcs - helper operations for planes * - * The helper operations are called by the mid-layer CRTC helper. + * These functions are used by the atomic helpers and by the transitional plane + * helpers. */ struct drm_plane_helper_funcs { + /** + * @prepare_fb: + * + * This hook is to prepare a framebuffer for scanout by e.g. pinning + * it's backing storage or relocating it into a contiguous block of + * VRAM. Other possible preparatory work includes flushing caches. + * + * This function must not block for outstanding rendering, since it is + * called in the context of the atomic IOCTL even for async commits to + * be able to return any errors to userspace. Instead the recommended + * way is to fill out the fence member of the passed-in + * &drm_plane_state. If the driver doesn't support native fences then + * equivalent functionality should be implemented through private + * members in the plane structure. + * + * The helpers will call @cleanup_fb with matching arguments for every + * successful call to this hook. + * + * This callback is used by the atomic modeset helpers and by the + * transitional plane helpers, but it is optional. + * + * RETURNS: + * + * 0 on success or one of the following negative error codes allowed by + * the atomic_commit hook in &drm_mode_config_funcs. When using helpers + * this callback is the only one which can fail an atomic commit, + * everything else must complete successfully. + */ int (*prepare_fb)(struct drm_plane *plane, const struct drm_plane_state *new_state); + /** + * @cleanup_fb: + * + * This hook is called to clean up any resources allocated for the given + * framebuffer and plane configuration in @prepare_fb. + * + * This callback is used by the atomic modeset helpers and by the + * transitional plane helpers, but it is optional. + */ void (*cleanup_fb)(struct drm_plane *plane, const struct drm_plane_state *old_state); + /** + * @atomic_check: + * + * Drivers should check plane specific constraints in this hook. + * + * When using drm_atomic_helper_check_planes() plane's ->atomic_check() + * hooks are called before the ones for CRTCs, which allows drivers to + * request shared resources that the CRTC controls here. For more + * complicated dependencies the driver can call the provided check helpers + * multiple times until the computed state has a final configuration and + * everything has been checked. + * + * This function is also allowed to inspect any other object's state and + * can add more state objects to the atomic commit if needed. Care must + * be taken though to ensure that state check&compute functions for + * these added states are all called, and derived state in other objects + * all updated. Again the recommendation is to just call check helpers + * until a maximal configuration is reached. + * + * This callback is used by the atomic modeset helpers and by the + * transitional plane helpers, but it is optional. + * + * NOTE: + * + * This function is called in the check phase of an atomic update. The + * driver is not allowed to change anything outside of the free-standing + * state objects passed-in or assembled in the overall &drm_atomic_state + * update tracking structure. + * + * RETURNS: + * + * 0 on success, -EINVAL if the state or the transition can't be + * supported, -ENOMEM on memory allocation failure and -EDEADLK if an + * attempt to obtain another state object ran into a &drm_modeset_lock + * deadlock. + */ int (*atomic_check)(struct drm_plane *plane, struct drm_plane_state *state); + + /** + * @atomic_update: + * + * Drivers should use this function to update the plane state. This + * hook is called in-between the ->atomic_begin() and + * ->atomic_flush() of &drm_crtc_helper_funcs. + * + * Note that the power state of the display pipe when this function is + * called depends upon the exact helpers and calling sequence the driver + * has picked. See drm_atomic_commit_planes() for a discussion of the + * tradeoffs and variants of plane commit helpers. + * + * This callback is used by the atomic modeset helpers and by the + * transitional plane helpers, but it is optional. + */ void (*atomic_update)(struct drm_plane *plane, struct drm_plane_state *old_state); + /** + * @atomic_disable: + * + * Drivers should use this function to unconditionally disable a plane. + * This hook is called in-between the ->atomic_begin() and + * ->atomic_flush() of &drm_crtc_helper_funcs. It is an alternative to + * @atomic_update, which will be called for disabling planes, too, if + * the @atomic_disable hook isn't implemented. + * + * This hook is also useful to disable planes in preparation of a modeset, + * by calling drm_atomic_helper_disable_planes_on_crtc() from the + * ->disable() hook in &drm_crtc_helper_funcs. + * + * Note that the power state of the display pipe when this function is + * called depends upon the exact helpers and calling sequence the driver + * has picked. See drm_atomic_commit_planes() for a discussion of the + * tradeoffs and variants of plane commit helpers. + * + * This callback is used by the atomic modeset helpers and by the + * transitional plane helpers, but it is optional. + */ void (*atomic_disable)(struct drm_plane *plane, struct drm_plane_state *old_state); }; -- cgit v1.2.3 From 4ee6034c80c50b6e788f0f4291c3ad64debde390 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 4 Dec 2015 09:46:05 +0100 Subject: drm: Document drm_connector_helper_funcs Nothing special, except the somewhat awkward split in probe helper callbacks between here and drm_crtc_funcs. v2: Suggestions from Thierry. Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1449218769-16577-25-git-send-email-daniel.vetter@ffwll.ch Reviewed-by: Thierry Reding --- include/drm/drm_modeset_helper_vtables.h | 106 +++++++++++++++++++++++++++++-- 1 file changed, 101 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h index c77b7dcf343b..4b3869bd49b5 100644 --- a/include/drm/drm_modeset_helper_vtables.h +++ b/include/drm/drm_modeset_helper_vtables.h @@ -270,18 +270,114 @@ static inline void drm_encoder_helper_add(struct drm_encoder *encoder, /** * struct drm_connector_helper_funcs - helper operations for connectors - * @get_modes: get mode list for this connector - * @mode_valid: is this mode valid on the given connector? (optional) - * @best_encoder: return the preferred encoder for this connector - * @atomic_best_encoder: atomic version of @best_encoder * - * The helper operations are called by the mid-layer CRTC helper. + * These functions are used by the atomic and legacy modeset helpers and by the + * probe helpers. */ struct drm_connector_helper_funcs { + /** + * @get_modes: + * + * This function should fill in all modes currently valid for the sink + * into the connector->probed_modes list. It should also update the + * EDID property by calling drm_mode_connector_update_edid_property(). + * + * The usual way to implement this is to cache the EDID retrieved in the + * probe callback somewhere in the driver-private connector structure. + * In this function drivers then parse the modes in the EDID and add + * them by calling drm_add_edid_modes(). But connectors that driver a + * fixed panel can also manually add specific modes using + * drm_mode_probed_add(). Finally drivers that support audio probably + * want to update the ELD data, too, using drm_edid_to_eld(). + * + * This function is only called after the ->detect() hook has indicated + * that a sink is connected and when the EDID isn't overridden through + * sysfs or the kernel commandline. + * + * This callback is used by the probe helpers in e.g. + * drm_helper_probe_single_connector_modes(). + * + * RETURNS: + * + * The number of modes added by calling drm_mode_probed_add(). + */ int (*get_modes)(struct drm_connector *connector); + + /** + * @mode_valid: + * + * Callback to validate a mode for a connector, irrespective of the + * specific display configuration. + * + * This callback is used by the probe helpers to filter the mode list + * (which is usually derived from the EDID data block from the sink). + * See e.g. drm_helper_probe_single_connector_modes(). + * + * NOTE: + * + * This only filters the mode list supplied to userspace in the + * GETCONNECOTR IOCTL. Userspace is free to create modes of its own and + * ask the kernel to use them. It this case the atomic helpers or legacy + * CRTC helpers will not call this function. Drivers therefore must + * still fully validate any mode passed in in a modeset request. + * + * RETURNS: + * + * Either MODE_OK or one of the failure reasons in enum + * &drm_mode_status. + */ enum drm_mode_status (*mode_valid)(struct drm_connector *connector, struct drm_display_mode *mode); + /** + * @best_encoder: + * + * This function should select the best encoder for the given connector. + * + * This function is used by both the atomic helpers (in the + * drm_atomic_helper_check_modeset() function) and in the legacy CRTC + * helpers. + * + * NOTE: + * + * In atomic drivers this function is called in the check phase of an + * atomic update. The driver is not allowed to change or inspect + * anything outside of arguments passed-in. Atomic drivers which need to + * inspect dynamic configuration state should instead use + * @atomic_best_encoder. + * + * RETURNS: + * + * Encoder that should be used for the given connector and connector + * state, or NULL if no suitable encoder exists. Note that the helpers + * will ensure that encoders aren't used twice, drivers should not check + * for this. + */ struct drm_encoder *(*best_encoder)(struct drm_connector *connector); + + /** + * @atomic_best_encoder: + * + * This is the atomic version of @best_encoder for atomic drivers which + * need to select the best encoder depending upon the desired + * configuration and can't select it statically. + * + * This function is used by drm_atomic_helper_check_modeset() and either + * this or @best_encoder is required. + * + * NOTE: + * + * This function is called in the check phase of an atomic update. The + * driver is not allowed to change anything outside of the free-standing + * state objects passed-in or assembled in the overall &drm_atomic_state + * update tracking structure. + * + * RETURNS: + * + * Encoder that should be used for the given connector and connector + * state, or NULL if no suitable encoder exists. Note that the helpers + * will ensure that encoders aren't used twice, drivers should not check + * for this. + */ struct drm_encoder *(*atomic_best_encoder)(struct drm_connector *connector, struct drm_connector_state *connector_state); }; -- cgit v1.2.3 From 533708867dd6388f643f12c87465b59e732d729d Mon Sep 17 00:00:00 2001 From: Hal Rosenstock Date: Fri, 13 Nov 2015 15:22:22 -0500 Subject: IB/mad: Require CM send method for everything except ClassPortInfo Receipt of CM MAD with other than the Send method for an attribute other than the ClassPortInfo attribute is invalid. CM attributes other than ClassPortInfo only use the send method. The SRP initiator does not maintain a timeout policy for CM connect requests relies on the CM layer to do that. The result was that the SRP initiator hung as the connect request never completed. A new SRP target has been observed to respond to Send CM REQ with GetResp of CM REQ with bad status. This is non conformant with IBA spec but exposes a vulnerability in the current MAD/CM code which will respond to the incoming GetResp of CM REQ as if it was a valid incoming Send of CM REQ rather than tossing this on the floor. It also causes the MAD layer not to retransmit the original REQ even though it has not received a REP. Reviewed-by: Sagi Grimberg Signed-off-by: Hal Rosenstock Reviewed-by: Ira Weiny Signed-off-by: Doug Ledford --- drivers/infiniband/core/mad.c | 5 +++++ include/rdma/ib_mad.h | 2 ++ 2 files changed, 7 insertions(+) (limited to 'include') diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 8d8af7a41a30..2281de122038 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -1811,6 +1811,11 @@ static int validate_mad(const struct ib_mad_hdr *mad_hdr, if (qp_num == 0) valid = 1; } else { + /* CM attributes other than ClassPortInfo only use Send method */ + if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_CM) && + (mad_hdr->attr_id != IB_MGMT_CLASSPORTINFO_ATTR_ID) && + (mad_hdr->method != IB_MGMT_METHOD_SEND)) + goto out; /* Filter GSI packets sent to QP0 */ if (qp_num != 0) valid = 1; diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h index 188df91d5851..ec9b44dd3d80 100644 --- a/include/rdma/ib_mad.h +++ b/include/rdma/ib_mad.h @@ -237,6 +237,8 @@ struct ib_vendor_mad { u8 data[IB_MGMT_VENDOR_DATA]; }; +#define IB_MGMT_CLASSPORTINFO_ATTR_ID cpu_to_be16(0x0001) + struct ib_class_port_info { u8 base_version; u8 class_version; -- cgit v1.2.3 From a5e14ba334e202c58e45ef47414ec94c585c1a8c Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Wed, 28 Oct 2015 13:28:15 +0200 Subject: mlx4: Expose correct max_sge_rd limit mlx4 devices (ConnectX-2, ConnectX-3) has a limitation where rdma read work queue entries cannot exceed 512 bytes. A rdma_read wqe needs to fit in 512 bytes: - wqe control segment (16 bytes) - rdma segment (16 bytes) - scatter elements (16 bytes each) So max_sge_rd should be: (512 - 16 - 16) / 16 = 30. Signed-off-by: Sagi Grimberg Reviewed-by: Steve Wise Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx4/main.c | 2 +- include/linux/mlx4/device.h | 11 +++++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index f567160a4a56..97d6878f9938 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -456,7 +456,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE; props->max_sge = min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg); - props->max_sge_rd = props->max_sge; + props->max_sge_rd = MLX4_MAX_SGE_RD; props->max_cq = dev->dev->quotas.cq; props->max_cqe = dev->dev->caps.max_cqes; props->max_mr = dev->dev->quotas.mpt; diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 7501626ab529..d3133be12d92 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -426,6 +426,17 @@ enum { MLX4_MAX_FAST_REG_PAGES = 511, }; +enum { + /* + * Max wqe size for rdma read is 512 bytes, so this + * limits our max_sge_rd as the wqe needs to fit: + * - ctrl segment (16 bytes) + * - rdma segment (16 bytes) + * - scatter elements (16 bytes each) + */ + MLX4_MAX_SGE_RD = (512 - 16 - 16) / 16 +}; + enum { MLX4_DEV_PMC_SUBTYPE_GUID_INFO = 0x14, MLX4_DEV_PMC_SUBTYPE_PORT_INFO = 0x15, -- cgit v1.2.3 From 30ecad77fe849b60c9a1f8df24dca50e3f083d41 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 9 Dec 2015 09:29:36 +0100 Subject: drm: Move drm_display_mode an related docs into kerneldoc MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This was in the documentation for modeset helper hooks, where it is a bit misplaced. v2: Reindent the drm_mode_status enum, inspired by Ville. v3: Suggestions from Ville and Thierry. v4: Small fixup that 0day spotted. v5: Slight change to avoid accidental headings in kerneldoc output. Cc: ville.syrjala@linux.intel.com Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1449218769-16577-27-git-send-email-daniel.vetter@ffwll.ch Reviewed-by: Ville Syrjälä (v3) Reviewed-by: Thierry Reding (v3) --- Documentation/DocBook/gpu.tmpl | 192 ---------------------- drivers/gpu/drm/drm_modes.c | 3 +- drivers/gpu/drm/i915/i915_gem.c | 4 + include/drm/drm_modes.h | 343 ++++++++++++++++++++++++++++++++++------ 4 files changed, 302 insertions(+), 240 deletions(-) (limited to 'include') diff --git a/Documentation/DocBook/gpu.tmpl b/Documentation/DocBook/gpu.tmpl index 0ca010da4bdf..749b8e2f2113 100644 --- a/Documentation/DocBook/gpu.tmpl +++ b/Documentation/DocBook/gpu.tmpl @@ -1757,198 +1757,6 @@ void intel_crt_init(struct drm_device *dev) connector_status_connected. There is no need to call drm_add_edid_modes manually in that case. - - When adding modes manually the driver creates each mode with a call to - drm_mode_create and must fill the following fields. - - - __u32 type; - - Mode type bitmask, a combination of - - - DRM_MODE_TYPE_BUILTIN - not used? - - - DRM_MODE_TYPE_CLOCK_C - not used? - - - DRM_MODE_TYPE_CRTC_C - not used? - - - - DRM_MODE_TYPE_PREFERRED - The preferred mode for the connector - - - not used? - - - - DRM_MODE_TYPE_DEFAULT - not used? - - - DRM_MODE_TYPE_USERDEF - not used? - - - DRM_MODE_TYPE_DRIVER - - - The mode has been created by the driver (as opposed to - to user-created modes). - - - - - Drivers must set the DRM_MODE_TYPE_DRIVER bit for all modes they - create, and set the DRM_MODE_TYPE_PREFERRED bit for the preferred - mode. - - - - __u32 clock; - Pixel clock frequency in kHz unit - - - __u16 hdisplay, hsync_start, hsync_end, htotal; - __u16 vdisplay, vsync_start, vsync_end, vtotal; - Horizontal and vertical timing information - <----------------><-------------><--------------> - - //////////////////////| - ////////////////////// | - ////////////////////// |.................. ................ - _______________ - - <----- [hv]display -----> - <------------- [hv]sync_start ------------> - <--------------------- [hv]sync_end ---------------------> - <-------------------------------- [hv]total -----------------------------> -]]> - - - __u16 hskew; - __u16 vscan; - Unknown - - - __u32 flags; - - Mode flags, a combination of - - - DRM_MODE_FLAG_PHSYNC - - Horizontal sync is active high - - - - DRM_MODE_FLAG_NHSYNC - - Horizontal sync is active low - - - - DRM_MODE_FLAG_PVSYNC - - Vertical sync is active high - - - - DRM_MODE_FLAG_NVSYNC - - Vertical sync is active low - - - - DRM_MODE_FLAG_INTERLACE - - Mode is interlaced - - - - DRM_MODE_FLAG_DBLSCAN - - Mode uses doublescan - - - - DRM_MODE_FLAG_CSYNC - - Mode uses composite sync - - - - DRM_MODE_FLAG_PCSYNC - - Composite sync is active high - - - - DRM_MODE_FLAG_NCSYNC - - Composite sync is active low - - - - DRM_MODE_FLAG_HSKEW - - hskew provided (not used?) - - - - DRM_MODE_FLAG_BCAST - - not used? - - - - DRM_MODE_FLAG_PIXMUX - - not used? - - - - DRM_MODE_FLAG_DBLCLK - - not used? - - - - DRM_MODE_FLAG_CLKDIV2 - - ? - - - - - - Note that modes marked with the INTERLACE or DBLSCAN flags will be - filtered out by - drm_helper_probe_single_connector_modes if - the connector's interlace_allowed or - doublescan_allowed field is set to 0. - - - - char name[DRM_DISPLAY_MODE_LEN]; - - Mode name. The driver must call - drm_mode_set_name to fill the mode name from - hdisplay, - vdisplay and interlace flag after - filling the corresponding fields. - - - - The vrefresh value is computed by drm_helper_probe_single_connector_modes. diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index ef6bd3656548..1892e615528a 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -708,7 +708,8 @@ void drm_mode_set_name(struct drm_display_mode *mode) } EXPORT_SYMBOL(drm_mode_set_name); -/** drm_mode_hsync - get the hsync of a mode +/** + * drm_mode_hsync - get the hsync of a mode * @mode: mode * * Returns: diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index a6997a8a3eaa..f98738925ed9 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2949,6 +2949,10 @@ i915_gem_idle_work_handler(struct work_struct *work) if (!list_empty(&ring->request_list)) return; + /* we probably should sync with hangcheck here, using cancel_work_sync. + * Also locking seems to be fubar here, ring->request_list is protected + * by dev->struct_mutex. */ + intel_mark_idle(dev); if (mutex_trylock(&dev->struct_mutex)) { diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h index f9115aee43f4..9e6d7a1cd19b 100644 --- a/include/drm/drm_modes.h +++ b/include/drm/drm_modes.h @@ -35,46 +35,91 @@ * structures). */ +/** + * enum drm_mode_status - hardware support status of a mode + * @MODE_OK: Mode OK + * @MODE_HSYNC: hsync out of range + * @MODE_VSYNC: vsync out of range + * @MODE_H_ILLEGAL: mode has illegal horizontal timings + * @MODE_V_ILLEGAL: mode has illegal horizontal timings + * @MODE_BAD_WIDTH: requires an unsupported linepitch + * @MODE_NOMODE: no mode with a matching name + * @MODE_NO_INTERLACE: interlaced mode not supported + * @MODE_NO_DBLESCAN: doublescan mode not supported + * @MODE_NO_VSCAN: multiscan mode not supported + * @MODE_MEM: insufficient video memory + * @MODE_VIRTUAL_X: mode width too large for specified virtual size + * @MODE_VIRTUAL_Y: mode height too large for specified virtual size + * @MODE_MEM_VIRT: insufficient video memory given virtual size + * @MODE_NOCLOCK: no fixed clock available + * @MODE_CLOCK_HIGH: clock required is too high + * @MODE_CLOCK_LOW: clock required is too low + * @MODE_CLOCK_RANGE: clock/mode isn't in a ClockRange + * @MODE_BAD_HVALUE: horizontal timing was out of range + * @MODE_BAD_VVALUE: vertical timing was out of range + * @MODE_BAD_VSCAN: VScan value out of range + * @MODE_HSYNC_NARROW: horizontal sync too narrow + * @MODE_HSYNC_WIDE: horizontal sync too wide + * @MODE_HBLANK_NARROW: horizontal blanking too narrow + * @MODE_HBLANK_WIDE: horizontal blanking too wide + * @MODE_VSYNC_NARROW: vertical sync too narrow + * @MODE_VSYNC_WIDE: vertical sync too wide + * @MODE_VBLANK_NARROW: vertical blanking too narrow + * @MODE_VBLANK_WIDE: vertical blanking too wide + * @MODE_PANEL: exceeds panel dimensions + * @MODE_INTERLACE_WIDTH: width too large for interlaced mode + * @MODE_ONE_WIDTH: only one width is supported + * @MODE_ONE_HEIGHT: only one height is supported + * @MODE_ONE_SIZE: only one resolution is supported + * @MODE_NO_REDUCED: monitor doesn't accept reduced blanking + * @MODE_NO_STEREO: stereo modes not supported + * @MODE_UNVERIFIED: mode needs to reverified + * @MODE_BAD: unspecified reason + * @MODE_ERROR: error condition + * + * This enum is used to filter out modes not supported by the driver/hardware + * combination. + */ enum drm_mode_status { - MODE_OK = 0, /* Mode OK */ - MODE_HSYNC, /* hsync out of range */ - MODE_VSYNC, /* vsync out of range */ - MODE_H_ILLEGAL, /* mode has illegal horizontal timings */ - MODE_V_ILLEGAL, /* mode has illegal horizontal timings */ - MODE_BAD_WIDTH, /* requires an unsupported linepitch */ - MODE_NOMODE, /* no mode with a matching name */ - MODE_NO_INTERLACE, /* interlaced mode not supported */ - MODE_NO_DBLESCAN, /* doublescan mode not supported */ - MODE_NO_VSCAN, /* multiscan mode not supported */ - MODE_MEM, /* insufficient video memory */ - MODE_VIRTUAL_X, /* mode width too large for specified virtual size */ - MODE_VIRTUAL_Y, /* mode height too large for specified virtual size */ - MODE_MEM_VIRT, /* insufficient video memory given virtual size */ - MODE_NOCLOCK, /* no fixed clock available */ - MODE_CLOCK_HIGH, /* clock required is too high */ - MODE_CLOCK_LOW, /* clock required is too low */ - MODE_CLOCK_RANGE, /* clock/mode isn't in a ClockRange */ - MODE_BAD_HVALUE, /* horizontal timing was out of range */ - MODE_BAD_VVALUE, /* vertical timing was out of range */ - MODE_BAD_VSCAN, /* VScan value out of range */ - MODE_HSYNC_NARROW, /* horizontal sync too narrow */ - MODE_HSYNC_WIDE, /* horizontal sync too wide */ - MODE_HBLANK_NARROW, /* horizontal blanking too narrow */ - MODE_HBLANK_WIDE, /* horizontal blanking too wide */ - MODE_VSYNC_NARROW, /* vertical sync too narrow */ - MODE_VSYNC_WIDE, /* vertical sync too wide */ - MODE_VBLANK_NARROW, /* vertical blanking too narrow */ - MODE_VBLANK_WIDE, /* vertical blanking too wide */ - MODE_PANEL, /* exceeds panel dimensions */ - MODE_INTERLACE_WIDTH, /* width too large for interlaced mode */ - MODE_ONE_WIDTH, /* only one width is supported */ - MODE_ONE_HEIGHT, /* only one height is supported */ - MODE_ONE_SIZE, /* only one resolution is supported */ - MODE_NO_REDUCED, /* monitor doesn't accept reduced blanking */ - MODE_NO_STEREO, /* stereo modes not supported */ - MODE_UNVERIFIED = -3, /* mode needs to reverified */ - MODE_BAD = -2, /* unspecified reason */ - MODE_ERROR = -1 /* error condition */ + MODE_OK = 0, + MODE_HSYNC, + MODE_VSYNC, + MODE_H_ILLEGAL, + MODE_V_ILLEGAL, + MODE_BAD_WIDTH, + MODE_NOMODE, + MODE_NO_INTERLACE, + MODE_NO_DBLESCAN, + MODE_NO_VSCAN, + MODE_MEM, + MODE_VIRTUAL_X, + MODE_VIRTUAL_Y, + MODE_MEM_VIRT, + MODE_NOCLOCK, + MODE_CLOCK_HIGH, + MODE_CLOCK_LOW, + MODE_CLOCK_RANGE, + MODE_BAD_HVALUE, + MODE_BAD_VVALUE, + MODE_BAD_VSCAN, + MODE_HSYNC_NARROW, + MODE_HSYNC_WIDE, + MODE_HBLANK_NARROW, + MODE_HBLANK_WIDE, + MODE_VSYNC_NARROW, + MODE_VSYNC_WIDE, + MODE_VBLANK_NARROW, + MODE_VBLANK_WIDE, + MODE_PANEL, + MODE_INTERLACE_WIDTH, + MODE_ONE_WIDTH, + MODE_ONE_HEIGHT, + MODE_ONE_SIZE, + MODE_NO_REDUCED, + MODE_NO_STEREO, + MODE_UNVERIFIED = -3, + MODE_BAD = -2, + MODE_ERROR = -1 }; #define DRM_MODE_TYPE_CLOCK_CRTC_C (DRM_MODE_TYPE_CLOCK_C | \ @@ -96,17 +141,125 @@ enum drm_mode_status { #define DRM_MODE_FLAG_3D_MAX DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF +/** + * struct drm_display_mode - DRM kernel-internal display mode structure + * @hdisplay: horizontal display size + * @hsync_start: horizontal sync start + * @hsync_end: horizontal sync end + * @htotal: horizontal total size + * @hskew: horizontal skew?! + * @vdisplay: vertical display size + * @vsync_start: vertical sync start + * @vsync_end: vertical sync end + * @vtotal: vertical total size + * @vscan: vertical scan?! + * @crtc_hdisplay: hardware mode horizontal display size + * @crtc_hblank_start: hardware mode horizontal blank start + * @crtc_hblank_end: hardware mode horizontal blank end + * @crtc_hsync_start: hardware mode horizontal sync start + * @crtc_hsync_end: hardware mode horizontal sync end + * @crtc_htotal: hardware mode horizontal total size + * @crtc_hskew: hardware mode horizontal skew?! + * @crtc_vdisplay: hardware mode vertical display size + * @crtc_vblank_start: hardware mode vertical blank start + * @crtc_vblank_end: hardware mode vertical blank end + * @crtc_vsync_start: hardware mode vertical sync start + * @crtc_vsync_end: hardware mode vertical sync end + * @crtc_vtotal: hardware mode vertical total size + * + * The horizontal and vertical timings are defined per the following diagram. + * + * + * Active Front Sync Back + * Region Porch Porch + * <-----------------------><----------------><-------------><--------------> + * //////////////////////| + * ////////////////////// | + * ////////////////////// |.................. ................ + * _______________ + * <----- [hv]display -----> + * <------------- [hv]sync_start ------------> + * <--------------------- [hv]sync_end ---------------------> + * <-------------------------------- [hv]total ----------------------------->* + * + * This structure contains two copies of timings. First are the plain timings, + * which specify the logical mode, as it would be for a progressive 1:1 scanout + * at the refresh rate userspace can observe through vblank timestamps. Then + * there's the hardware timings, which are corrected for interlacing, + * double-clocking and similar things. They are provided as a convenience, and + * can be appropriately computed using drm_mode_set_crtcinfo(). + */ struct drm_display_mode { - /* Header */ + /** + * @head: + * + * struct list_head for mode lists. + */ struct list_head head; + + /** + * @base: + * + * A display mode is a normal modeset object, possibly including public + * userspace id. + * + * FIXME: + * + * This can probably be removed since the entire concept of userspace + * managing modes explicitly has never landed in upstream kernel mode + * setting support. + */ struct drm_mode_object base; + /** + * @name: + * + * Human-readable name of the mode, filled out with drm_mode_set_name(). + */ char name[DRM_DISPLAY_MODE_LEN]; + /** + * @status: + * + * Status of the mode, used to filter out modes not supported by the + * hardware. See enum &drm_mode_status. + */ enum drm_mode_status status; + + /** + * @type: + * + * A bitmask of flags, mostly about the source of a mode. Possible flags + * are: + * + * - DRM_MODE_TYPE_BUILTIN: Meant for hard-coded modes, effectively + * unused. + * - DRM_MODE_TYPE_PREFERRED: Preferred mode, usually the native + * resolution of an LCD panel. There should only be one preferred + * mode per connector at any given time. + * - DRM_MODE_TYPE_DRIVER: Mode created by the driver, which is all of + * them really. Drivers must set this bit for all modes they create + * and expose to userspace. + * + * Plus a big list of flags which shouldn't be used at all, but are + * still around since these flags are also used in the userspace ABI: + * + * - DRM_MODE_TYPE_DEFAULT: Again a leftover, use + * DRM_MODE_TYPE_PREFERRED instead. + * - DRM_MODE_TYPE_CLOCK_C and DRM_MODE_TYPE_CRTC_C: Define leftovers + * which are stuck around for hysterical raisins only. No one has an + * idea what they were meant for. Don't use. + * - DRM_MODE_TYPE_USERDEF: Mode defined by userspace, again a vestige + * from older kms designs where userspace had to first add a custom + * mode to the kernel's mode list before it could use it. Don't use. + */ unsigned int type; - /* Proposed mode values */ + /** + * @clock: + * + * Pixel clock in kHz. + */ int clock; /* in kHz */ int hdisplay; int hsync_start; @@ -118,14 +271,74 @@ struct drm_display_mode { int vsync_end; int vtotal; int vscan; + /** + * @flags: + * + * Sync and timing flags: + * + * - DRM_MODE_FLAG_PHSYNC: horizontal sync is active high. + * - DRM_MODE_FLAG_NHSYNC: horizontal sync is active low. + * - DRM_MODE_FLAG_PVSYNC: vertical sync is active high. + * - DRM_MODE_FLAG_NVSYNC: vertical sync is active low. + * - DRM_MODE_FLAG_INTERLACE: mode is interlaced. + * - DRM_MODE_FLAG_DBLSCAN: mode uses doublescan. + * - DRM_MODE_FLAG_CSYNC: mode uses composite sync. + * - DRM_MODE_FLAG_PCSYNC: composite sync is active high. + * - DRM_MODE_FLAG_NCSYNC: composite sync is active low. + * - DRM_MODE_FLAG_HSKEW: hskew provided (not used?). + * - DRM_MODE_FLAG_BCAST: not used? + * - DRM_MODE_FLAG_PIXMUX: not used? + * - DRM_MODE_FLAG_DBLCLK: double-clocked mode. + * - DRM_MODE_FLAG_CLKDIV2: half-clocked mode. + * + * Additionally there's flags to specify how 3D modes are packed: + * + * - DRM_MODE_FLAG_3D_NONE: normal, non-3D mode. + * - DRM_MODE_FLAG_3D_FRAME_PACKING: 2 full frames for left and right. + * - DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE: interleaved like fields. + * - DRM_MODE_FLAG_3D_LINE_ALTERNATIVE: interleaved lines. + * - DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL: side-by-side full frames. + * - DRM_MODE_FLAG_3D_L_DEPTH: ? + * - DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH: ? + * - DRM_MODE_FLAG_3D_TOP_AND_BOTTOM: frame split into top and bottom + * parts. + * - DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF: frame split into left and + * right parts. + */ unsigned int flags; - /* Addressable image size (may be 0 for projectors, etc.) */ + /** + * @width_mm: + * + * Addressable size of the output in mm, projectors should set this to + * 0. + */ int width_mm; + + /** + * @height_mm: + * + * Addressable size of the output in mm, projectors should set this to + * 0. + */ int height_mm; - /* Actual mode we give to hw */ - int crtc_clock; /* in KHz */ + /** + * @crtc_clock: + * + * Actual pixel or dot clock in the hardware. This differs from the + * logical @clock when e.g. using interlacing, double-clocking, stereo + * modes or other fancy stuff that changes the timings and signals + * actually sent over the wire. + * + * This is again in kHz. + * + * Note that with digital outputs like HDMI or DP there's usually a + * massive confusion between the dot clock and the signal clock at the + * bit encoding level. Especially when a 8b/10b encoding is used and the + * difference is exactly a factor of 10. + */ + int crtc_clock; int crtc_hdisplay; int crtc_hblank_start; int crtc_hblank_end; @@ -140,12 +353,48 @@ struct drm_display_mode { int crtc_vsync_end; int crtc_vtotal; - /* Driver private mode info */ + /** + * @private: + * + * Pointer for driver private data. This can only be used for mode + * objects passed to drivers in modeset operations. It shouldn't be used + * by atomic drivers since they can store any additional data by + * subclassing state structures. + */ int *private; + + /** + * @private_flags: + * + * Similar to @private, but just an integer. + */ int private_flags; - int vrefresh; /* in Hz */ - int hsync; /* in kHz */ + /** + * @vrefresh: + * + * Vertical refresh rate, for debug output in human readable form. Not + * used in a functional way. + * + * This value is in Hz. + */ + int vrefresh; + + /** + * @hsync: + * + * Horizontal refresh rate, for debug output in human readable form. Not + * used in a functional way. + * + * This value is in kHz. + */ + int hsync; + + /** + * @picture_aspect_ratio: + * + * Field for setting the HDMI picture aspect ratio of a mode. + */ enum hdmi_picture_aspect picture_aspect_ratio; }; -- cgit v1.2.3 From 36b66080dc66ab08ff6c36237147638e2d060874 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 4 Dec 2015 09:46:08 +0100 Subject: drm: Document drm_encoder/crtc_helper_funcs Mostly this is about all the callbacks used for modesets by both legacy CRTC helpers and atomic helpers and I figured it doesn't make all that much sense to split this up. v2: Suggestions from Thierry. Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1449218769-16577-28-git-send-email-daniel.vetter@ffwll.ch Reviewed-by: Thierry Reding --- include/drm/drm_modeset_helper_vtables.h | 449 +++++++++++++++++++++++++++---- 1 file changed, 401 insertions(+), 48 deletions(-) (limited to 'include') diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h index 4b3869bd49b5..29e0dc50031d 100644 --- a/include/drm/drm_modeset_helper_vtables.h +++ b/include/drm/drm_modeset_helper_vtables.h @@ -51,58 +51,236 @@ enum mode_set_atomic; /** * struct drm_crtc_helper_funcs - helper operations for CRTCs - * @dpms: set power state - * @prepare: prepare the CRTC, called before @mode_set - * @commit: commit changes to CRTC, called after @mode_set - * @mode_fixup: try to fixup proposed mode for this CRTC - * @mode_set: set this mode - * @mode_set_nofb: set mode only (no scanout buffer attached) - * @mode_set_base: update the scanout buffer - * @mode_set_base_atomic: non-blocking mode set (used for kgdb support) - * @load_lut: load color palette - * @disable: disable CRTC when no longer in use - * @enable: enable CRTC * - * The helper operations are called by the mid-layer CRTC helper. - * - * Note that with atomic helpers @dpms, @prepare and @commit hooks are - * deprecated. Used @enable and @disable instead exclusively. - * - * With legacy crtc helpers there's a big semantic difference between @disable - * and the other hooks: @disable also needs to release any resources acquired in - * @mode_set (like shared PLLs). + * These hooks are used by the legacy CRTC helpers, the transitional plane + * helpers and the new atomic modesetting helpers. */ struct drm_crtc_helper_funcs { - /* - * Control power levels on the CRTC. If the mode passed in is - * unsupported, the provider must use the next lowest power level. + /** + * @dpms: + * + * Callback to control power levels on the CRTC. If the mode passed in + * is unsupported, the provider must use the next lowest power level. + * This is used by the legacy CRTC helpers to implement DPMS + * functionality in drm_helper_connector_dpms(). + * + * This callback is also used to disable a CRTC by calling it with + * DRM_MODE_DPMS_OFF if the @disable hook isn't used. + * + * This callback is used by the legacy CRTC helpers. Atomic helpers + * also support using this hook for enabling and disabling a CRTC to + * facilitate transitions to atomic, but it is deprecated. Instead + * @enable and @disable should be used. */ void (*dpms)(struct drm_crtc *crtc, int mode); + + /** + * @prepare: + * + * This callback should prepare the CRTC for a subsequent modeset, which + * in practice means the driver should disable the CRTC if it is + * running. Most drivers ended up implementing this by calling their + * @dpms hook with DRM_MODE_DPMS_OFF. + * + * This callback is used by the legacy CRTC helpers. Atomic helpers + * also support using this hook for disabling a CRTC to facilitate + * transitions to atomic, but it is deprecated. Instead @disable should + * be used. + */ void (*prepare)(struct drm_crtc *crtc); + + /** + * @commit: + * + * This callback should commit the new mode on the CRTC after a modeset, + * which in practice means the driver should enable the CRTC. Most + * drivers ended up implementing this by calling their @dpms hook with + * DRM_MODE_DPMS_ON. + * + * This callback is used by the legacy CRTC helpers. Atomic helpers + * also support using this hook for enabling a CRTC to facilitate + * transitions to atomic, but it is deprecated. Instead @enable should + * be used. + */ void (*commit)(struct drm_crtc *crtc); - /* Provider can fixup or change mode timings before modeset occurs */ + /** + * @mode_fixup: + * + * This callback is used to validate a mode. The parameter mode is the + * display mode that userspace requested, adjusted_mode is the mode the + * encoders need to be fed with. Note that this is the inverse semantics + * of the meaning for the &drm_encoder and &drm_bridge + * ->mode_fixup() functions. If the CRTC cannot support the requested + * conversion from mode to adjusted_mode it should reject the modeset. + * + * This function is used by both legacy CRTC helpers and atomic helpers. + * With atomic helpers it is optional. + * + * NOTE: + * + * This function is called in the check phase of atomic modesets, which + * can be aborted for any reason (including on userspace's request to + * just check whether a configuration would be possible). Atomic drivers + * MUST NOT touch any persistent state (hardware or software) or data + * structures except the passed in adjusted_mode parameter. + * + * This is in contrast to the legacy CRTC helpers where this was + * allowed. + * + * Atomic drivers which need to inspect and adjust more state should + * instead use the @atomic_check callback. + * + * RETURNS: + * + * True if an acceptable configuration is possible, false if the modeset + * operation should be rejected. + */ bool (*mode_fixup)(struct drm_crtc *crtc, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); - /* Actually set the mode */ + + /** + * @mode_set: + * + * This callback is used by the legacy CRTC helpers to set a new mode, + * position and framebuffer. Since it ties the primary plane to every + * mode change it is incompatible with universal plane support. And + * since it can't update other planes it's incompatible with atomic + * modeset support. + * + * This callback is only used by CRTC helpers and deprecated. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, int x, int y, struct drm_framebuffer *old_fb); - /* Actually set the mode for atomic helpers, optional */ + + /** + * @mode_set_nofb: + * + * This callback is used to update the display mode of a CRTC without + * changing anything of the primary plane configuration. This fits the + * requirement of atomic and hence is used by the atomic helpers. It is + * also used by the transitional plane helpers to implement a + * @mode_set hook in drm_helper_crtc_mode_set(). + * + * Note that the display pipe is completely off when this function is + * called. Atomic drivers which need hardware to be running before they + * program the new display mode (e.g. because they implement runtime PM) + * should not use this hook. This is because the helper library calls + * this hook only once per mode change and not every time the display + * pipeline is suspended using either DPMS or the new "ACTIVE" property. + * Which means register values set in this callback might get reset when + * the CRTC is suspended, but not restored. Such drivers should instead + * move all their CRTC setup into the @enable callback. + * + * This callback is optional. + */ void (*mode_set_nofb)(struct drm_crtc *crtc); - /* Move the crtc on the current fb to the given position *optional* */ + /** + * @mode_set_base: + * + * This callback is used by the legacy CRTC helpers to set a new + * framebuffer and scanout position. It is optional and used as an + * optimized fast-path instead of a full mode set operation with all the + * resulting flickering. Since it can't update other planes it's + * incompatible with atomic modeset support. + * + * This callback is only used by the CRTC helpers and deprecated. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ int (*mode_set_base)(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb); + + /** + * @mode_set_base_atomic: + * + * This callback is used by the fbdev helpers to set a new framebuffer + * and scanout without sleeping, i.e. from an atomic calling context. It + * is only used to implement kgdb support. + * + * This callback is optional and only needed for kgdb support in the fbdev + * helpers. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ int (*mode_set_base_atomic)(struct drm_crtc *crtc, struct drm_framebuffer *fb, int x, int y, enum mode_set_atomic); - /* reload the current crtc LUT */ + /** + * @load_lut: + * + * Load a LUT prepared with the @gamma_set functions from + * &drm_fb_helper_funcs. + * + * This callback is optional and is only used by the fbdev emulation + * helpers. + * + * FIXME: + * + * This callback is functionally redundant with the core gamma table + * support and simply exists because the fbdev hasn't yet been + * refactored to use the core gamma table interfaces. + */ void (*load_lut)(struct drm_crtc *crtc); + /** + * @disable: + * + * This callback should be used to disable the CRTC. With the atomic + * drivers it is called after all encoders connected to this CRTC have + * been shut off already using their own ->disable hook. If that + * sequence is too simple drivers can just add their own hooks and call + * it from this CRTC callback here by looping over all encoders + * connected to it using for_each_encoder_on_crtc(). + * + * This hook is used both by legacy CRTC helpers and atomic helpers. + * Atomic drivers don't need to implement it if there's no need to + * disable anything at the CRTC level. To ensure that runtime PM + * handling (using either DPMS or the new "ACTIVE" property) works + * @disable must be the inverse of @enable for atomic drivers. + * + * NOTE: + * + * With legacy CRTC helpers there's a big semantic difference between + * @disable and other hooks (like @prepare or @dpms) used to shut down a + * CRTC: @disable is only called when also logically disabling the + * display pipeline and needs to release any resources acquired in + * @mode_set (like shared PLLs, or again release pinned framebuffers). + * + * Therefore @disable must be the inverse of @mode_set plus @commit for + * drivers still using legacy CRTC helpers, which is different from the + * rules under atomic. + */ void (*disable)(struct drm_crtc *crtc); + + /** + * @enable: + * + * This callback should be used to enable the CRTC. With the atomic + * drivers it is called before all encoders connected to this CRTC are + * enabled through the encoder's own ->enable hook. If that sequence is + * too simple drivers can just add their own hooks and call it from this + * CRTC callback here by looping over all encoders connected to it using + * for_each_encoder_on_crtc(). + * + * This hook is used only by atomic helpers, for symmetry with @disable. + * Atomic drivers don't need to implement it if there's no need to + * enable anything at the CRTC level. To ensure that runtime PM handling + * (using either DPMS or the new "ACTIVE" property) works + * @enable must be the inverse of @disable for atomic drivers. + */ void (*enable)(struct drm_crtc *crtc); /** @@ -212,53 +390,228 @@ static inline void drm_crtc_helper_add(struct drm_crtc *crtc, /** * struct drm_encoder_helper_funcs - helper operations for encoders - * @dpms: set power state - * @mode_fixup: try to fixup proposed mode for this connector - * @prepare: part of the disable sequence, called before the CRTC modeset - * @commit: called after the CRTC modeset - * @mode_set: set this mode, optional for atomic helpers - * @get_crtc: return CRTC that the encoder is currently attached to - * @detect: connection status detection - * @disable: disable encoder when not in use (overrides DPMS off) - * @enable: enable encoder - * @atomic_check: check for validity of an atomic update - * - * The helper operations are called by the mid-layer CRTC helper. * - * Note that with atomic helpers @dpms, @prepare and @commit hooks are - * deprecated. Used @enable and @disable instead exclusively. - * - * With legacy crtc helpers there's a big semantic difference between @disable - * and the other hooks: @disable also needs to release any resources acquired in - * @mode_set (like shared PLLs). + * These hooks are used by the legacy CRTC helpers, the transitional plane + * helpers and the new atomic modesetting helpers. */ struct drm_encoder_helper_funcs { + /** + * @dpms: + * + * Callback to control power levels on the encoder. If the mode passed in + * is unsupported, the provider must use the next lowest power level. + * This is used by the legacy encoder helpers to implement DPMS + * functionality in drm_helper_connector_dpms(). + * + * This callback is also used to disable an encoder by calling it with + * DRM_MODE_DPMS_OFF if the @disable hook isn't used. + * + * This callback is used by the legacy CRTC helpers. Atomic helpers + * also support using this hook for enabling and disabling an encoder to + * facilitate transitions to atomic, but it is deprecated. Instead + * @enable and @disable should be used. + */ void (*dpms)(struct drm_encoder *encoder, int mode); + /** + * @mode_fixup: + * + * This callback is used to validate and adjust a mode. The parameter + * mode is the display mode that should be fed to the next element in + * the display chain, either the final &drm_connector or a &drm_bridge. + * The parameter adjusted_mode is the input mode the encoder requires. It + * can be modified by this callback and does not need to match mode. + * + * This function is used by both legacy CRTC helpers and atomic helpers. + * With atomic helpers it is optional. + * + * NOTE: + * + * This function is called in the check phase of atomic modesets, which + * can be aborted for any reason (including on userspace's request to + * just check whether a configuration would be possible). Atomic drivers + * MUST NOT touch any persistent state (hardware or software) or data + * structures except the passed in adjusted_mode parameter. + * + * This is in contrast to the legacy CRTC helpers where this was + * allowed. + * + * Atomic drivers which need to inspect and adjust more state should + * instead use the @atomic_check callback. + * + * RETURNS: + * + * True if an acceptable configuration is possible, false if the modeset + * operation should be rejected. + */ bool (*mode_fixup)(struct drm_encoder *encoder, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); + + /** + * @prepare: + * + * This callback should prepare the encoder for a subsequent modeset, + * which in practice means the driver should disable the encoder if it + * is running. Most drivers ended up implementing this by calling their + * @dpms hook with DRM_MODE_DPMS_OFF. + * + * This callback is used by the legacy CRTC helpers. Atomic helpers + * also support using this hook for disabling an encoder to facilitate + * transitions to atomic, but it is deprecated. Instead @disable should + * be used. + */ void (*prepare)(struct drm_encoder *encoder); + + /** + * @commit: + * + * This callback should commit the new mode on the encoder after a modeset, + * which in practice means the driver should enable the encoder. Most + * drivers ended up implementing this by calling their @dpms hook with + * DRM_MODE_DPMS_ON. + * + * This callback is used by the legacy CRTC helpers. Atomic helpers + * also support using this hook for enabling an encoder to facilitate + * transitions to atomic, but it is deprecated. Instead @enable should + * be used. + */ void (*commit)(struct drm_encoder *encoder); + + /** + * @mode_set: + * + * This callback is used to update the display mode of an encoder. + * + * Note that the display pipe is completely off when this function is + * called. Drivers which need hardware to be running before they program + * the new display mode (because they implement runtime PM) should not + * use this hook, because the helper library calls it only once and not + * every time the display pipeline is suspend using either DPMS or the + * new "ACTIVE" property. Such drivers should instead move all their + * encoder setup into the ->enable() callback. + * + * This callback is used both by the legacy CRTC helpers and the atomic + * modeset helpers. It is optional in the atomic helpers. + */ void (*mode_set)(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); + + /** + * @get_crtc: + * + * This callback is used by the legacy CRTC helpers to work around + * deficiencies in its own book-keeping. + * + * Do not use, use atomic helpers instead, which get the book keeping + * right. + * + * FIXME: + * + * Currently only nouveau is using this, and as soon as nouveau is + * atomic we can ditch this hook. + */ struct drm_crtc *(*get_crtc)(struct drm_encoder *encoder); - /* detect for DAC style encoders */ + + /** + * @detect: + * + * This callback can be used by drivers who want to do detection on the + * encoder object instead of in connector functions. + * + * It is not used by any helper and therefore has purely driver-specific + * semantics. New drivers shouldn't use this and instead just implement + * their own private callbacks. + * + * FIXME: + * + * This should just be converted into a pile of driver vfuncs. + * Currently radeon, amdgpu and nouveau are using it. + */ enum drm_connector_status (*detect)(struct drm_encoder *encoder, struct drm_connector *connector); + + /** + * @disable: + * + * This callback should be used to disable the encoder. With the atomic + * drivers it is called before this encoder's CRTC has been shut off + * using the CRTC's own ->disable hook. If that sequence is too simple + * drivers can just add their own driver private encoder hooks and call + * them from CRTC's callback by looping over all encoders connected to + * it using for_each_encoder_on_crtc(). + * + * This hook is used both by legacy CRTC helpers and atomic helpers. + * Atomic drivers don't need to implement it if there's no need to + * disable anything at the encoder level. To ensure that runtime PM + * handling (using either DPMS or the new "ACTIVE" property) works + * @disable must be the inverse of @enable for atomic drivers. + * + * NOTE: + * + * With legacy CRTC helpers there's a big semantic difference between + * @disable and other hooks (like @prepare or @dpms) used to shut down a + * encoder: @disable is only called when also logically disabling the + * display pipeline and needs to release any resources acquired in + * @mode_set (like shared PLLs, or again release pinned framebuffers). + * + * Therefore @disable must be the inverse of @mode_set plus @commit for + * drivers still using legacy CRTC helpers, which is different from the + * rules under atomic. + */ void (*disable)(struct drm_encoder *encoder); + /** + * @enable: + * + * This callback should be used to enable the encoder. With the atomic + * drivers it is called after this encoder's CRTC has been enabled using + * the CRTC's own ->enable hook. If that sequence is too simple drivers + * can just add their own driver private encoder hooks and call them + * from CRTC's callback by looping over all encoders connected to it + * using for_each_encoder_on_crtc(). + * + * This hook is used only by atomic helpers, for symmetry with @disable. + * Atomic drivers don't need to implement it if there's no need to + * enable anything at the encoder level. To ensure that runtime PM handling + * (using either DPMS or the new "ACTIVE" property) works + * @enable must be the inverse of @disable for atomic drivers. + */ void (*enable)(struct drm_encoder *encoder); - /* atomic helpers */ + /** + * @atomic_check: + * + * This callback is used to validate encoder state for atomic drivers. + * Since the encoder is the object connecting the CRTC and connector it + * gets passed both states, to be able to validate interactions and + * update the CRTC to match what the encoder needs for the requested + * connector. + * + * This function is used by the atomic helpers, but it is optional. + * + * NOTE: + * + * This function is called in the check phase of an atomic update. The + * driver is not allowed to change anything outside of the free-standing + * state objects passed-in or assembled in the overall &drm_atomic_state + * update tracking structure. + * + * RETURNS: + * + * 0 on success, -EINVAL if the state or the transition can't be + * supported, -ENOMEM on memory allocation failure and -EDEADLK if an + * attempt to obtain another state object ran into a &drm_modeset_lock + * deadlock. + */ int (*atomic_check)(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state); }; /** - * drm_encoder_helper_add - sets the helper vtable for a encoder + * drm_encoder_helper_add - sets the helper vtable for an encoder * @encoder: DRM encoder * @funcs: helper vtable to set for @encoder */ -- cgit v1.2.3 From 4c3141e09cfa6460bfcd5e90f73e498db654c917 Mon Sep 17 00:00:00 2001 From: Carlo Caione Date: Tue, 1 Dec 2015 17:24:17 +0100 Subject: of/irq: Export of_irq_find_parent again of_irq_find_parent was made static since it had no users outside of of_irq.c. Export it again since we are going to use it again. Signed-off-by: Carlo Caione [robh: move of_irq_find_parent to correct ifdef section] Signed-off-by: Rob Herring --- drivers/of/irq.c | 3 ++- include/linux/of_irq.h | 6 ++++++ 2 files changed, 8 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/of/irq.c b/drivers/of/irq.c index 902b89be7217..4fa916dffc91 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c @@ -53,7 +53,7 @@ EXPORT_SYMBOL_GPL(irq_of_parse_and_map); * Returns a pointer to the interrupt parent node, or NULL if the interrupt * parent could not be determined. */ -static struct device_node *of_irq_find_parent(struct device_node *child) +struct device_node *of_irq_find_parent(struct device_node *child) { struct device_node *p; const __be32 *parp; @@ -77,6 +77,7 @@ static struct device_node *of_irq_find_parent(struct device_node *child) return p; } +EXPORT_SYMBOL_GPL(of_irq_find_parent); /** * of_irq_parse_raw - Low level interrupt tree parsing diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h index 039f2eec49ce..f648acf27ed7 100644 --- a/include/linux/of_irq.h +++ b/include/linux/of_irq.h @@ -46,6 +46,7 @@ extern int of_irq_get(struct device_node *dev, int index); extern int of_irq_get_byname(struct device_node *dev, const char *name); extern int of_irq_to_resource_table(struct device_node *dev, struct resource *res, int nr_irqs); +extern struct device_node *of_irq_find_parent(struct device_node *child); extern struct irq_domain *of_msi_get_domain(struct device *dev, struct device_node *np, enum irq_domain_bus_token token); @@ -70,6 +71,11 @@ static inline int of_irq_to_resource_table(struct device_node *dev, { return 0; } +static inline void *of_irq_find_parent(struct device_node *child) +{ + return NULL; +} + static inline struct irq_domain *of_msi_get_domain(struct device *dev, struct device_node *np, enum irq_domain_bus_token token) -- cgit v1.2.3 From eaddb5725357e9f05ffe5d271630f8197d089da4 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Wed, 9 Dec 2015 09:11:10 -0600 Subject: of/irq: move of_msi_map_rid declaration to the correct ifdef section In checking fixes for of_irq_find_parent declaration location, I found that of_msi_map_rid is also wrong. of_msi_map_rid is not implemented for Sparc, so it should not be in the Sparc specific section of the header. Move it to just depend on OF_IRQ. Cc: Frank Rowand Signed-off-by: Rob Herring --- include/linux/of_irq.h | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h index f648acf27ed7..1e0deb8e8494 100644 --- a/include/linux/of_irq.h +++ b/include/linux/of_irq.h @@ -53,6 +53,7 @@ extern struct irq_domain *of_msi_get_domain(struct device *dev, extern struct irq_domain *of_msi_map_get_device_domain(struct device *dev, u32 rid); extern void of_msi_configure(struct device *dev, struct device_node *np); +u32 of_msi_map_rid(struct device *dev, struct device_node *msi_np, u32 rid_in); #else static inline int of_irq_count(struct device_node *dev) { @@ -90,6 +91,11 @@ static inline struct irq_domain *of_msi_map_get_device_domain(struct device *dev static inline void of_msi_configure(struct device *dev, struct device_node *np) { } +static inline u32 of_msi_map_rid(struct device *dev, + struct device_node *msi_np, u32 rid_in) +{ + return rid_in; +} #endif #if defined(CONFIG_OF_IRQ) || defined(CONFIG_SPARC) @@ -99,7 +105,6 @@ static inline void of_msi_configure(struct device *dev, struct device_node *np) * so declare it here regardless of the CONFIG_OF_IRQ setting. */ extern unsigned int irq_of_parse_and_map(struct device_node *node, int index); -u32 of_msi_map_rid(struct device *dev, struct device_node *msi_np, u32 rid_in); #else /* !CONFIG_OF && !CONFIG_SPARC */ static inline unsigned int irq_of_parse_and_map(struct device_node *dev, @@ -107,12 +112,6 @@ static inline unsigned int irq_of_parse_and_map(struct device_node *dev, { return 0; } - -static inline u32 of_msi_map_rid(struct device *dev, - struct device_node *msi_np, u32 rid_in) -{ - return rid_in; -} #endif /* !CONFIG_OF */ #endif /* __OF_IRQ_H */ -- cgit v1.2.3 From d7e35dfa2531b53618b9e6edcd8752ce988ac555 Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Thu, 3 Dec 2015 22:04:01 -0500 Subject: bitops.h: correctly handle rol32 with 0 byte shift ROL on a 32 bit integer with a shift of 32 or more is undefined and the result is arch-dependent. Avoid this by handling the trivial case of roling by 0 correctly. The trivial solution of checking if shift is 0 breaks gcc's detection of this code as a ROL instruction, which is unacceptable. This bug was reported and fixed in GCC (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=57157): The standard rotate idiom, (x << n) | (x >> (32 - n)) is recognized by gcc (for concreteness, I discuss only the case that x is an uint32_t here). However, this is portable C only for n in the range 0 < n < 32. For n == 0, we get x >> 32 which gives undefined behaviour according to the C standard (6.5.7, Bitwise shift operators). To portably support n == 0, one has to write the rotate as something like (x << n) | (x >> ((-n) & 31)) And this is apparently not recognized by gcc. Note that this is broken on older GCCs and will result in slower ROL. Acked-by: Linus Torvalds Signed-off-by: Sasha Levin Signed-off-by: Linus Torvalds --- include/linux/bitops.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/bitops.h b/include/linux/bitops.h index 2b8ed123ad36..defeaac0745f 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -107,7 +107,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift) */ static inline __u32 rol32(__u32 word, unsigned int shift) { - return (word << shift) | (word >> (32 - shift)); + return (word << shift) | (word >> ((-shift) & 31)); } /** -- cgit v1.2.3 From ecb7deceff2a51d3be50518969bc06411f485a62 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Wed, 9 Dec 2015 10:18:10 +0200 Subject: dmaengine: edma: DT: Change memcpy channel array from 16bit to 32bit type This change makes the DT file to be easier to read since the memcpy channels array does not need the '/bits/ 16' to be specified, which might confuse some people. Signed-off-by: Peter Ujfalusi Acked-by: Arnd Bergmann Acked-by: Rob Herring Acked-by: Tony Lindgren Signed-off-by: Vinod Koul --- Documentation/devicetree/bindings/dma/ti-edma.txt | 5 ++--- drivers/dma/edma.c | 22 ++++++++++------------ include/linux/platform_data/edma.h | 2 +- 3 files changed, 13 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/Documentation/devicetree/bindings/dma/ti-edma.txt b/Documentation/devicetree/bindings/dma/ti-edma.txt index d3d0a4fb1c73..ae8b8f1d6e69 100644 --- a/Documentation/devicetree/bindings/dma/ti-edma.txt +++ b/Documentation/devicetree/bindings/dma/ti-edma.txt @@ -22,8 +22,7 @@ Required properties: Optional properties: - ti,hwmods: Name of the hwmods associated to the eDMA CC - ti,edma-memcpy-channels: List of channels allocated to be used for memcpy, iow - these channels will be SW triggered channels. The list must - contain 16 bits numbers, see example. + these channels will be SW triggered channels. See example. - ti,edma-reserved-slot-ranges: PaRAM slot ranges which should not be used by the driver, they are allocated to be used by for example the DSP. See example. @@ -56,7 +55,7 @@ edma: edma@49000000 { ti,tptcs = <&edma_tptc0 7>, <&edma_tptc1 7>, <&edma_tptc2 0>; /* Channel 20 and 21 is allocated for memcpy */ - ti,edma-memcpy-channels = /bits/ 16 <20 21>; + ti,edma-memcpy-channels = <20 21>; /* The following PaRAM slots are reserved: 35-45 and 100-110 */ ti,edma-reserved-slot-ranges = /bits/ 16 <35 10>, /bits/ 16 <100 10>; diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 6b03e4e84e6b..3da20291db56 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c @@ -1752,16 +1752,14 @@ static enum dma_status edma_tx_status(struct dma_chan *chan, return ret; } -static bool edma_is_memcpy_channel(int ch_num, u16 *memcpy_channels) +static bool edma_is_memcpy_channel(int ch_num, s32 *memcpy_channels) { - s16 *memcpy_ch = memcpy_channels; - if (!memcpy_channels) return false; - while (*memcpy_ch != -1) { - if (*memcpy_ch == ch_num) + while (*memcpy_channels != -1) { + if (*memcpy_channels == ch_num) return true; - memcpy_ch++; + memcpy_channels++; } return false; } @@ -1775,7 +1773,7 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode) { struct dma_device *s_ddev = &ecc->dma_slave; struct dma_device *m_ddev = NULL; - s16 *memcpy_channels = ecc->info->memcpy_channels; + s32 *memcpy_channels = ecc->info->memcpy_channels; int i, j; dma_cap_zero(s_ddev->cap_mask); @@ -1996,16 +1994,16 @@ static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev, prop = of_find_property(dev->of_node, "ti,edma-memcpy-channels", &sz); if (prop) { const char pname[] = "ti,edma-memcpy-channels"; - size_t nelm = sz / sizeof(s16); - s16 *memcpy_ch; + size_t nelm = sz / sizeof(s32); + s32 *memcpy_ch; - memcpy_ch = devm_kcalloc(dev, nelm + 1, sizeof(s16), + memcpy_ch = devm_kcalloc(dev, nelm + 1, sizeof(s32), GFP_KERNEL); if (!memcpy_ch) return ERR_PTR(-ENOMEM); - ret = of_property_read_u16_array(dev->of_node, pname, - (u16 *)memcpy_ch, nelm); + ret = of_property_read_u32_array(dev->of_node, pname, + (u32 *)memcpy_ch, nelm); if (ret) return ERR_PTR(ret); diff --git a/include/linux/platform_data/edma.h b/include/linux/platform_data/edma.h index e2878baeb90e..4299f4ba03bd 100644 --- a/include/linux/platform_data/edma.h +++ b/include/linux/platform_data/edma.h @@ -72,7 +72,7 @@ struct edma_soc_info { struct edma_rsv_info *rsv; /* List of channels allocated for memcpy, terminated with -1 */ - s16 *memcpy_channels; + s32 *memcpy_channels; s8 (*queue_priority_mapping)[2]; const s16 (*xbar_chans)[2]; -- cgit v1.2.3 From 1a2a42c8bfa25ef0f7e14c5973c6a0bebe625a96 Mon Sep 17 00:00:00 2001 From: Mikko Rapeli Date: Sun, 31 Aug 2014 12:58:46 +0200 Subject: drm.h: use __kernel_size_t instead of size_t MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fall back to size_t for non Linux platforms. Fixes userspace compilation error: drm/drm.h:132:2: error: unknown type name ‘size_t’ Signed-off-by: Mikko Rapeli --- include/uapi/drm/drm.h | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index 3801584a0c53..b4e92eb12044 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -54,6 +54,7 @@ typedef int32_t __s32; typedef uint32_t __u32; typedef int64_t __s64; typedef uint64_t __u64; +typedef size_t __kernel_size_t; typedef unsigned long drm_handle_t; #endif @@ -129,11 +130,11 @@ struct drm_version { int version_major; /**< Major version */ int version_minor; /**< Minor version */ int version_patchlevel; /**< Patch level */ - size_t name_len; /**< Length of name buffer */ + __kernel_size_t name_len; /**< Length of name buffer */ char __user *name; /**< Name of driver */ - size_t date_len; /**< Length of date buffer */ + __kernel_size_t date_len; /**< Length of date buffer */ char __user *date; /**< User-space buffer to hold date */ - size_t desc_len; /**< Length of desc buffer */ + __kernel_size_t desc_len; /**< Length of desc buffer */ char __user *desc; /**< User-space buffer to hold desc */ }; @@ -143,7 +144,7 @@ struct drm_version { * \sa drmGetBusid() and drmSetBusId(). */ struct drm_unique { - size_t unique_len; /**< Length of unique */ + __kernel_size_t unique_len; /**< Length of unique */ char __user *unique; /**< Unique name for driver instantiation */ }; -- cgit v1.2.3 From 05623b7fcaf20886721307de10547d3f91235186 Mon Sep 17 00:00:00 2001 From: Mikko Rapeli Date: Sun, 31 Aug 2014 13:01:09 +0200 Subject: drm_mode.h: use __u32 and __u64 from linux/types.h MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes userspace compilation error: drm/drm_mode.h:472:2: error: unknown type name ‘uint32_t’ Signed-off-by: Mikko Rapeli --- include/uapi/drm/drm_mode.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h index 6c11ca401de8..44ad794960ee 100644 --- a/include/uapi/drm/drm_mode.h +++ b/include/uapi/drm/drm_mode.h @@ -526,14 +526,14 @@ struct drm_mode_crtc_page_flip { /* create a dumb scanout buffer */ struct drm_mode_create_dumb { - uint32_t height; - uint32_t width; - uint32_t bpp; - uint32_t flags; + __u32 height; + __u32 width; + __u32 bpp; + __u32 flags; /* handle, pitch, size will be returned */ - uint32_t handle; - uint32_t pitch; - uint64_t size; + __u32 handle; + __u32 pitch; + __u64 size; }; /* set up for mmap of a dumb scanout buffer */ @@ -550,7 +550,7 @@ struct drm_mode_map_dumb { }; struct drm_mode_destroy_dumb { - uint32_t handle; + __u32 handle; }; /* page-flip flags are valid, plus: */ -- cgit v1.2.3 From 6615b20f1e4c35a62f7d12d38b7f56dc63dbdece Mon Sep 17 00:00:00 2001 From: Mikko Rapeli Date: Sun, 31 Aug 2014 13:07:31 +0200 Subject: exynos_drm.h: use __u64 from linux/types.h MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes userspace compilation error: drm/exynos_drm.h:30:2: error: unknown type name ‘uint64_t’ Signed-off-by: Mikko Rapeli --- include/uapi/drm/exynos_drm.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/uapi/drm/exynos_drm.h b/include/uapi/drm/exynos_drm.h index 5575ed1598bd..cf431b76c320 100644 --- a/include/uapi/drm/exynos_drm.h +++ b/include/uapi/drm/exynos_drm.h @@ -27,7 +27,7 @@ * - this handle will be set by gem module of kernel side. */ struct drm_exynos_gem_create { - uint64_t size; + __u64 size; unsigned int flags; unsigned int handle; }; @@ -44,7 +44,7 @@ struct drm_exynos_gem_create { struct drm_exynos_gem_info { unsigned int handle; unsigned int flags; - uint64_t size; + __u64 size; }; /** @@ -58,7 +58,7 @@ struct drm_exynos_gem_info { struct drm_exynos_vidi_connection { unsigned int connection; unsigned int extensions; - uint64_t edid; + __u64 edid; }; /* memory type definitions. */ -- cgit v1.2.3 From 8860487ef39bef5765354ec7dd195983b49f9349 Mon Sep 17 00:00:00 2001 From: Mikko Rapeli Date: Sun, 31 Aug 2014 13:09:51 +0200 Subject: nouveau_drm.h: use __u32 and __u64 from linux/types.h MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes userspace compilation errors like: drm/nouveau_drm.h:41:2: error: unknown type name ‘uint32_t’ Signed-off-by: Mikko Rapeli --- include/uapi/drm/nouveau_drm.h | 86 +++++++++++++++++++++--------------------- 1 file changed, 44 insertions(+), 42 deletions(-) (limited to 'include') diff --git a/include/uapi/drm/nouveau_drm.h b/include/uapi/drm/nouveau_drm.h index fd594cc73cc0..500d82aecbe4 100644 --- a/include/uapi/drm/nouveau_drm.h +++ b/include/uapi/drm/nouveau_drm.h @@ -27,6 +27,8 @@ #define DRM_NOUVEAU_EVENT_NVIF 0x80000000 +#include + #define NOUVEAU_GEM_DOMAIN_CPU (1 << 0) #define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1) #define NOUVEAU_GEM_DOMAIN_GART (1 << 2) @@ -41,34 +43,34 @@ #define NOUVEAU_GEM_TILE_NONCONTIG 0x00000008 struct drm_nouveau_gem_info { - uint32_t handle; - uint32_t domain; - uint64_t size; - uint64_t offset; - uint64_t map_handle; - uint32_t tile_mode; - uint32_t tile_flags; + __u32 handle; + __u32 domain; + __u64 size; + __u64 offset; + __u64 map_handle; + __u32 tile_mode; + __u32 tile_flags; }; struct drm_nouveau_gem_new { struct drm_nouveau_gem_info info; - uint32_t channel_hint; - uint32_t align; + __u32 channel_hint; + __u32 align; }; #define NOUVEAU_GEM_MAX_BUFFERS 1024 struct drm_nouveau_gem_pushbuf_bo_presumed { - uint32_t valid; - uint32_t domain; - uint64_t offset; + __u32 valid; + __u32 domain; + __u64 offset; }; struct drm_nouveau_gem_pushbuf_bo { - uint64_t user_priv; - uint32_t handle; - uint32_t read_domains; - uint32_t write_domains; - uint32_t valid_domains; + __u64 user_priv; + __u32 handle; + __u32 read_domains; + __u32 write_domains; + __u32 valid_domains; struct drm_nouveau_gem_pushbuf_bo_presumed presumed; }; @@ -77,46 +79,46 @@ struct drm_nouveau_gem_pushbuf_bo { #define NOUVEAU_GEM_RELOC_OR (1 << 2) #define NOUVEAU_GEM_MAX_RELOCS 1024 struct drm_nouveau_gem_pushbuf_reloc { - uint32_t reloc_bo_index; - uint32_t reloc_bo_offset; - uint32_t bo_index; - uint32_t flags; - uint32_t data; - uint32_t vor; - uint32_t tor; + __u32 reloc_bo_index; + __u32 reloc_bo_offset; + __u32 bo_index; + __u32 flags; + __u32 data; + __u32 vor; + __u32 tor; }; #define NOUVEAU_GEM_MAX_PUSH 512 struct drm_nouveau_gem_pushbuf_push { - uint32_t bo_index; - uint32_t pad; - uint64_t offset; - uint64_t length; + __u32 bo_index; + __u32 pad; + __u64 offset; + __u64 length; }; struct drm_nouveau_gem_pushbuf { - uint32_t channel; - uint32_t nr_buffers; - uint64_t buffers; - uint32_t nr_relocs; - uint32_t nr_push; - uint64_t relocs; - uint64_t push; - uint32_t suffix0; - uint32_t suffix1; - uint64_t vram_available; - uint64_t gart_available; + __u32 channel; + __u32 nr_buffers; + __u64 buffers; + __u32 nr_relocs; + __u32 nr_push; + __u64 relocs; + __u64 push; + __u32 suffix0; + __u32 suffix1; + __u64 vram_available; + __u64 gart_available; }; #define NOUVEAU_GEM_CPU_PREP_NOWAIT 0x00000001 #define NOUVEAU_GEM_CPU_PREP_WRITE 0x00000004 struct drm_nouveau_gem_cpu_prep { - uint32_t handle; - uint32_t flags; + __u32 handle; + __u32 flags; }; struct drm_nouveau_gem_cpu_fini { - uint32_t handle; + __u32 handle; }; #define DRM_NOUVEAU_GETPARAM 0x00 /* deprecated */ -- cgit v1.2.3 From 31b4dfe24e903e995a32f17e9a9cafbbecabc77a Mon Sep 17 00:00:00 2001 From: Mikko Rapeli Date: Sun, 31 Aug 2014 13:11:54 +0200 Subject: radeon_drm.h: use __u32 and __u64 from linux/types.h MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes userspace compiler error: drm/radeon_drm.h:794:2: error: unknown type name ‘uint64_t’ Signed-off-by: Mikko Rapeli --- include/uapi/drm/radeon_drm.h | 128 +++++++++++++++++++++--------------------- 1 file changed, 64 insertions(+), 64 deletions(-) (limited to 'include') diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h index 01aa2a8e3f8d..ccb9bcd82685 100644 --- a/include/uapi/drm/radeon_drm.h +++ b/include/uapi/drm/radeon_drm.h @@ -793,9 +793,9 @@ typedef struct drm_radeon_surface_free { #define RADEON_GEM_DOMAIN_VRAM 0x4 struct drm_radeon_gem_info { - uint64_t gart_size; - uint64_t vram_size; - uint64_t vram_visible; + __u64 gart_size; + __u64 vram_size; + __u64 vram_visible; }; #define RADEON_GEM_NO_BACKING_STORE (1 << 0) @@ -807,11 +807,11 @@ struct drm_radeon_gem_info { #define RADEON_GEM_NO_CPU_ACCESS (1 << 4) struct drm_radeon_gem_create { - uint64_t size; - uint64_t alignment; - uint32_t handle; - uint32_t initial_domain; - uint32_t flags; + __u64 size; + __u64 alignment; + __u32 handle; + __u32 initial_domain; + __u32 flags; }; /* @@ -825,10 +825,10 @@ struct drm_radeon_gem_create { #define RADEON_GEM_USERPTR_REGISTER (1 << 3) struct drm_radeon_gem_userptr { - uint64_t addr; - uint64_t size; - uint32_t flags; - uint32_t handle; + __u64 addr; + __u64 size; + __u32 flags; + __u32 handle; }; #define RADEON_TILING_MACRO 0x1 @@ -850,72 +850,72 @@ struct drm_radeon_gem_userptr { #define RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK 0xf struct drm_radeon_gem_set_tiling { - uint32_t handle; - uint32_t tiling_flags; - uint32_t pitch; + __u32 handle; + __u32 tiling_flags; + __u32 pitch; }; struct drm_radeon_gem_get_tiling { - uint32_t handle; - uint32_t tiling_flags; - uint32_t pitch; + __u32 handle; + __u32 tiling_flags; + __u32 pitch; }; struct drm_radeon_gem_mmap { - uint32_t handle; - uint32_t pad; - uint64_t offset; - uint64_t size; - uint64_t addr_ptr; + __u32 handle; + __u32 pad; + __u64 offset; + __u64 size; + __u64 addr_ptr; }; struct drm_radeon_gem_set_domain { - uint32_t handle; - uint32_t read_domains; - uint32_t write_domain; + __u32 handle; + __u32 read_domains; + __u32 write_domain; }; struct drm_radeon_gem_wait_idle { - uint32_t handle; - uint32_t pad; + __u32 handle; + __u32 pad; }; struct drm_radeon_gem_busy { - uint32_t handle; - uint32_t domain; + __u32 handle; + __u32 domain; }; struct drm_radeon_gem_pread { /** Handle for the object being read. */ - uint32_t handle; - uint32_t pad; + __u32 handle; + __u32 pad; /** Offset into the object to read from */ - uint64_t offset; + __u64 offset; /** Length of data to read */ - uint64_t size; + __u64 size; /** Pointer to write the data into. */ /* void *, but pointers are not 32/64 compatible */ - uint64_t data_ptr; + __u64 data_ptr; }; struct drm_radeon_gem_pwrite { /** Handle for the object being written to. */ - uint32_t handle; - uint32_t pad; + __u32 handle; + __u32 pad; /** Offset into the object to write to */ - uint64_t offset; + __u64 offset; /** Length of data to write */ - uint64_t size; + __u64 size; /** Pointer to read the data from. */ /* void *, but pointers are not 32/64 compatible */ - uint64_t data_ptr; + __u64 data_ptr; }; /* Sets or returns a value associated with a buffer. */ struct drm_radeon_gem_op { - uint32_t handle; /* buffer */ - uint32_t op; /* RADEON_GEM_OP_* */ - uint64_t value; /* input or return value */ + __u32 handle; /* buffer */ + __u32 op; /* RADEON_GEM_OP_* */ + __u64 value; /* input or return value */ }; #define RADEON_GEM_OP_GET_INITIAL_DOMAIN 0 @@ -935,11 +935,11 @@ struct drm_radeon_gem_op { #define RADEON_VM_PAGE_SNOOPED (1 << 4) struct drm_radeon_gem_va { - uint32_t handle; - uint32_t operation; - uint32_t vm_id; - uint32_t flags; - uint64_t offset; + __u32 handle; + __u32 operation; + __u32 vm_id; + __u32 flags; + __u64 offset; }; #define RADEON_CHUNK_ID_RELOCS 0x01 @@ -961,29 +961,29 @@ struct drm_radeon_gem_va { /* 0 = normal, + = higher priority, - = lower priority */ struct drm_radeon_cs_chunk { - uint32_t chunk_id; - uint32_t length_dw; - uint64_t chunk_data; + __u32 chunk_id; + __u32 length_dw; + __u64 chunk_data; }; /* drm_radeon_cs_reloc.flags */ #define RADEON_RELOC_PRIO_MASK (0xf << 0) struct drm_radeon_cs_reloc { - uint32_t handle; - uint32_t read_domains; - uint32_t write_domain; - uint32_t flags; + __u32 handle; + __u32 read_domains; + __u32 write_domain; + __u32 flags; }; struct drm_radeon_cs { - uint32_t num_chunks; - uint32_t cs_id; - /* this points to uint64_t * which point to cs chunks */ - uint64_t chunks; + __u32 num_chunks; + __u32 cs_id; + /* this points to __u64 * which point to cs chunks */ + __u64 chunks; /* updates to the limits after this CS ioctl */ - uint64_t gart_limit; - uint64_t vram_limit; + __u64 gart_limit; + __u64 vram_limit; }; #define RADEON_INFO_DEVICE_ID 0x00 @@ -1042,9 +1042,9 @@ struct drm_radeon_cs { #define RADEON_INFO_GPU_RESET_COUNTER 0x26 struct drm_radeon_info { - uint32_t request; - uint32_t pad; - uint64_t value; + __u32 request; + __u32 pad; + __u64 value; }; /* Those correspond to the tile index to use, this is to explicitly state -- cgit v1.2.3 From 5b8573f7a0e97bbb444c51d129fe10edfde08b40 Mon Sep 17 00:00:00 2001 From: Mikko Rapeli Date: Sun, 31 Aug 2014 16:20:20 +0200 Subject: via_drm.h: don't include non-existing via_drmclient.h Fixes compiler error: drm/via_drm.h:36:27: fatal error: via_drmclient.h: No such file or directory Signed-off-by: Mikko Rapeli --- include/uapi/drm/via_drm.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'include') diff --git a/include/uapi/drm/via_drm.h b/include/uapi/drm/via_drm.h index 45bc80c3714b..c8c053a12e93 100644 --- a/include/uapi/drm/via_drm.h +++ b/include/uapi/drm/via_drm.h @@ -33,9 +33,6 @@ #ifndef _VIA_DEFINES_ #define _VIA_DEFINES_ -#ifndef __KERNEL__ -#include "via_drmclient.h" -#endif #define VIA_NR_SAREA_CLIPRECTS 8 #define VIA_NR_XVMC_PORTS 10 -- cgit v1.2.3 From 21de2fda3cb31ed3ef1807310ac982a1b8fe7089 Mon Sep 17 00:00:00 2001 From: Mikko Rapeli Date: Wed, 11 Mar 2015 03:07:51 +0100 Subject: include/uapi/drm/vmwgfx_drm.h: use __s32, __u32 and __u64 from linux/types.h MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes userspace compilation errors like: error: unknown type name ‘uint32_t’ Signed-off-by: Mikko Rapeli --- include/uapi/drm/vmwgfx_drm.h | 264 +++++++++++++++++++++--------------------- 1 file changed, 132 insertions(+), 132 deletions(-) (limited to 'include') diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h index 05b204954d16..ef31f448fecf 100644 --- a/include/uapi/drm/vmwgfx_drm.h +++ b/include/uapi/drm/vmwgfx_drm.h @@ -111,9 +111,9 @@ enum drm_vmw_handle_type { */ struct drm_vmw_getparam_arg { - uint64_t value; - uint32_t param; - uint32_t pad64; + __u64 value; + __u32 param; + __u32 pad64; }; /*************************************************************************/ @@ -134,8 +134,8 @@ struct drm_vmw_getparam_arg { */ struct drm_vmw_context_arg { - int32_t cid; - uint32_t pad64; + __s32 cid; + __u32 pad64; }; /*************************************************************************/ @@ -165,7 +165,7 @@ struct drm_vmw_context_arg { * @mip_levels: Number of mip levels for each face. * An unused face should have 0 encoded. * @size_addr: Address of a user-space array of sruct drm_vmw_size - * cast to an uint64_t for 32-64 bit compatibility. + * cast to an __u64 for 32-64 bit compatibility. * The size of the array should equal the total number of mipmap levels. * @shareable: Boolean whether other clients (as identified by file descriptors) * may reference this surface. @@ -177,12 +177,12 @@ struct drm_vmw_context_arg { */ struct drm_vmw_surface_create_req { - uint32_t flags; - uint32_t format; - uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES]; - uint64_t size_addr; - int32_t shareable; - int32_t scanout; + __u32 flags; + __u32 format; + __u32 mip_levels[DRM_VMW_MAX_SURFACE_FACES]; + __u64 size_addr; + __s32 shareable; + __s32 scanout; }; /** @@ -197,7 +197,7 @@ struct drm_vmw_surface_create_req { */ struct drm_vmw_surface_arg { - int32_t sid; + __s32 sid; enum drm_vmw_handle_type handle_type; }; @@ -213,10 +213,10 @@ struct drm_vmw_surface_arg { */ struct drm_vmw_size { - uint32_t width; - uint32_t height; - uint32_t depth; - uint32_t pad64; + __u32 width; + __u32 height; + __u32 depth; + __u32 pad64; }; /** @@ -284,13 +284,13 @@ union drm_vmw_surface_reference_arg { /** * struct drm_vmw_execbuf_arg * - * @commands: User-space address of a command buffer cast to an uint64_t. + * @commands: User-space address of a command buffer cast to an __u64. * @command-size: Size in bytes of the command buffer. * @throttle-us: Sleep until software is less than @throttle_us * microseconds ahead of hardware. The driver may round this value * to the nearest kernel tick. * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an - * uint64_t. + * __u64. * @version: Allows expanding the execbuf ioctl parameters without breaking * backwards compatibility, since user-space will always tell the kernel * which version it uses. @@ -302,14 +302,14 @@ union drm_vmw_surface_reference_arg { #define DRM_VMW_EXECBUF_VERSION 2 struct drm_vmw_execbuf_arg { - uint64_t commands; - uint32_t command_size; - uint32_t throttle_us; - uint64_t fence_rep; - uint32_t version; - uint32_t flags; - uint32_t context_handle; - uint32_t pad64; + __u64 commands; + __u32 command_size; + __u32 throttle_us; + __u64 fence_rep; + __u32 version; + __u32 flags; + __u32 context_handle; + __u32 pad64; }; /** @@ -338,12 +338,12 @@ struct drm_vmw_execbuf_arg { */ struct drm_vmw_fence_rep { - uint32_t handle; - uint32_t mask; - uint32_t seqno; - uint32_t passed_seqno; - uint32_t pad64; - int32_t error; + __u32 handle; + __u32 mask; + __u32 seqno; + __u32 passed_seqno; + __u32 pad64; + __s32 error; }; /*************************************************************************/ @@ -373,8 +373,8 @@ struct drm_vmw_fence_rep { */ struct drm_vmw_alloc_dmabuf_req { - uint32_t size; - uint32_t pad64; + __u32 size; + __u32 pad64; }; /** @@ -391,11 +391,11 @@ struct drm_vmw_alloc_dmabuf_req { */ struct drm_vmw_dmabuf_rep { - uint64_t map_handle; - uint32_t handle; - uint32_t cur_gmr_id; - uint32_t cur_gmr_offset; - uint32_t pad64; + __u64 map_handle; + __u32 handle; + __u32 cur_gmr_id; + __u32 cur_gmr_offset; + __u32 pad64; }; /** @@ -428,8 +428,8 @@ union drm_vmw_alloc_dmabuf_arg { */ struct drm_vmw_unref_dmabuf_arg { - uint32_t handle; - uint32_t pad64; + __u32 handle; + __u32 pad64; }; /*************************************************************************/ @@ -452,10 +452,10 @@ struct drm_vmw_unref_dmabuf_arg { */ struct drm_vmw_rect { - int32_t x; - int32_t y; - uint32_t w; - uint32_t h; + __s32 x; + __s32 y; + __u32 w; + __u32 h; }; /** @@ -477,21 +477,21 @@ struct drm_vmw_rect { */ struct drm_vmw_control_stream_arg { - uint32_t stream_id; - uint32_t enabled; + __u32 stream_id; + __u32 enabled; - uint32_t flags; - uint32_t color_key; + __u32 flags; + __u32 color_key; - uint32_t handle; - uint32_t offset; - int32_t format; - uint32_t size; - uint32_t width; - uint32_t height; - uint32_t pitch[3]; + __u32 handle; + __u32 offset; + __s32 format; + __u32 size; + __u32 width; + __u32 height; + __u32 pitch[3]; - uint32_t pad64; + __u32 pad64; struct drm_vmw_rect src; struct drm_vmw_rect dst; }; @@ -519,12 +519,12 @@ struct drm_vmw_control_stream_arg { */ struct drm_vmw_cursor_bypass_arg { - uint32_t flags; - uint32_t crtc_id; - int32_t xpos; - int32_t ypos; - int32_t xhot; - int32_t yhot; + __u32 flags; + __u32 crtc_id; + __s32 xpos; + __s32 ypos; + __s32 xhot; + __s32 yhot; }; /*************************************************************************/ @@ -542,8 +542,8 @@ struct drm_vmw_cursor_bypass_arg { */ struct drm_vmw_stream_arg { - uint32_t stream_id; - uint32_t pad64; + __u32 stream_id; + __u32 pad64; }; /*************************************************************************/ @@ -565,7 +565,7 @@ struct drm_vmw_stream_arg { /** * struct drm_vmw_get_3d_cap_arg * - * @buffer: Pointer to a buffer for capability data, cast to an uint64_t + * @buffer: Pointer to a buffer for capability data, cast to an __u64 * @size: Max size to copy * * Input argument to the DRM_VMW_GET_3D_CAP_IOCTL @@ -573,9 +573,9 @@ struct drm_vmw_stream_arg { */ struct drm_vmw_get_3d_cap_arg { - uint64_t buffer; - uint32_t max_size; - uint32_t pad64; + __u64 buffer; + __u32 max_size; + __u32 pad64; }; /*************************************************************************/ @@ -624,14 +624,14 @@ struct drm_vmw_get_3d_cap_arg { */ struct drm_vmw_fence_wait_arg { - uint32_t handle; - int32_t cookie_valid; - uint64_t kernel_cookie; - uint64_t timeout_us; - int32_t lazy; - int32_t flags; - int32_t wait_options; - int32_t pad64; + __u32 handle; + __s32 cookie_valid; + __u64 kernel_cookie; + __u64 timeout_us; + __s32 lazy; + __s32 flags; + __s32 wait_options; + __s32 pad64; }; /*************************************************************************/ @@ -655,12 +655,12 @@ struct drm_vmw_fence_wait_arg { */ struct drm_vmw_fence_signaled_arg { - uint32_t handle; - uint32_t flags; - int32_t signaled; - uint32_t passed_seqno; - uint32_t signaled_flags; - uint32_t pad64; + __u32 handle; + __u32 flags; + __s32 signaled; + __u32 passed_seqno; + __u32 signaled_flags; + __u32 pad64; }; /*************************************************************************/ @@ -681,8 +681,8 @@ struct drm_vmw_fence_signaled_arg { */ struct drm_vmw_fence_arg { - uint32_t handle; - uint32_t pad64; + __u32 handle; + __u32 pad64; }; @@ -703,9 +703,9 @@ struct drm_vmw_fence_arg { struct drm_vmw_event_fence { struct drm_event base; - uint64_t user_data; - uint32_t tv_sec; - uint32_t tv_usec; + __u64 user_data; + __u32 tv_sec; + __u32 tv_usec; }; /* @@ -717,17 +717,17 @@ struct drm_vmw_event_fence { /** * struct drm_vmw_fence_event_arg * - * @fence_rep: Pointer to fence_rep structure cast to uint64_t or 0 if + * @fence_rep: Pointer to fence_rep structure cast to __u64 or 0 if * the fence is not supposed to be referenced by user-space. * @user_info: Info to be delivered with the event. * @handle: Attach the event to this fence only. * @flags: A set of flags as defined above. */ struct drm_vmw_fence_event_arg { - uint64_t fence_rep; - uint64_t user_data; - uint32_t handle; - uint32_t flags; + __u64 fence_rep; + __u64 user_data; + __u32 handle; + __u32 flags; }; @@ -747,7 +747,7 @@ struct drm_vmw_fence_event_arg { * @sid: Surface id to present from. * @dest_x: X placement coordinate for surface. * @dest_y: Y placement coordinate for surface. - * @clips_ptr: Pointer to an array of clip rects cast to an uint64_t. + * @clips_ptr: Pointer to an array of clip rects cast to an __u64. * @num_clips: Number of cliprects given relative to the framebuffer origin, * in the same coordinate space as the frame buffer. * @pad64: Unused 64-bit padding. @@ -756,13 +756,13 @@ struct drm_vmw_fence_event_arg { */ struct drm_vmw_present_arg { - uint32_t fb_id; - uint32_t sid; - int32_t dest_x; - int32_t dest_y; - uint64_t clips_ptr; - uint32_t num_clips; - uint32_t pad64; + __u32 fb_id; + __u32 sid; + __s32 dest_x; + __s32 dest_y; + __u64 clips_ptr; + __u32 num_clips; + __u32 pad64; }; @@ -780,16 +780,16 @@ struct drm_vmw_present_arg { * struct drm_vmw_present_arg * @fb_id: fb_id to present / read back from. * @num_clips: Number of cliprects. - * @clips_ptr: Pointer to an array of clip rects cast to an uint64_t. - * @fence_rep: Pointer to a struct drm_vmw_fence_rep, cast to an uint64_t. + * @clips_ptr: Pointer to an array of clip rects cast to an __u64. + * @fence_rep: Pointer to a struct drm_vmw_fence_rep, cast to an __u64. * If this member is NULL, then the ioctl should not return a fence. */ struct drm_vmw_present_readback_arg { - uint32_t fb_id; - uint32_t num_clips; - uint64_t clips_ptr; - uint64_t fence_rep; + __u32 fb_id; + __u32 num_clips; + __u64 clips_ptr; + __u64 fence_rep; }; /*************************************************************************/ @@ -805,14 +805,14 @@ struct drm_vmw_present_readback_arg { * struct drm_vmw_update_layout_arg * * @num_outputs: number of active connectors - * @rects: pointer to array of drm_vmw_rect cast to an uint64_t + * @rects: pointer to array of drm_vmw_rect cast to an __u64 * * Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl. */ struct drm_vmw_update_layout_arg { - uint32_t num_outputs; - uint32_t pad64; - uint64_t rects; + __u32 num_outputs; + __u32 pad64; + __u64 rects; }; @@ -849,10 +849,10 @@ enum drm_vmw_shader_type { */ struct drm_vmw_shader_create_arg { enum drm_vmw_shader_type shader_type; - uint32_t size; - uint32_t buffer_handle; - uint32_t shader_handle; - uint64_t offset; + __u32 size; + __u32 buffer_handle; + __u32 shader_handle; + __u64 offset; }; /*************************************************************************/ @@ -871,8 +871,8 @@ struct drm_vmw_shader_create_arg { * Input argument to the DRM_VMW_UNREF_SHADER ioctl. */ struct drm_vmw_shader_arg { - uint32_t handle; - uint32_t pad64; + __u32 handle; + __u32 pad64; }; /*************************************************************************/ @@ -918,14 +918,14 @@ enum drm_vmw_surface_flags { * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl. */ struct drm_vmw_gb_surface_create_req { - uint32_t svga3d_flags; - uint32_t format; - uint32_t mip_levels; + __u32 svga3d_flags; + __u32 format; + __u32 mip_levels; enum drm_vmw_surface_flags drm_surface_flags; - uint32_t multisample_count; - uint32_t autogen_filter; - uint32_t buffer_handle; - uint32_t array_size; + __u32 multisample_count; + __u32 autogen_filter; + __u32 buffer_handle; + __u32 array_size; struct drm_vmw_size base_size; }; @@ -944,11 +944,11 @@ struct drm_vmw_gb_surface_create_req { * Output argument for the DRM_VMW_GB_SURFACE_CREATE ioctl. */ struct drm_vmw_gb_surface_create_rep { - uint32_t handle; - uint32_t backup_size; - uint32_t buffer_handle; - uint32_t buffer_size; - uint64_t buffer_map_handle; + __u32 handle; + __u32 backup_size; + __u32 buffer_handle; + __u32 buffer_size; + __u64 buffer_map_handle; }; /** @@ -1061,8 +1061,8 @@ enum drm_vmw_synccpu_op { struct drm_vmw_synccpu_arg { enum drm_vmw_synccpu_op op; enum drm_vmw_synccpu_flags flags; - uint32_t handle; - uint32_t pad64; + __u32 handle; + __u32 pad64; }; /*************************************************************************/ -- cgit v1.2.3 From 8e51012c4f2c6b4f2a318c71e1b43f552e6d2a2c Mon Sep 17 00:00:00 2001 From: Mikko Rapeli Date: Wed, 11 Mar 2015 03:12:23 +0100 Subject: include/uapi/drm/qxl_drm.h: use __s32, __u32 and __u64 from linux/types.h MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes userspace compilation errors like: error: unknown type name ‘uint32_t’ Signed-off-by: Mikko Rapeli --- include/uapi/drm/qxl_drm.h | 74 +++++++++++++++++++++++----------------------- 1 file changed, 37 insertions(+), 37 deletions(-) (limited to 'include') diff --git a/include/uapi/drm/qxl_drm.h b/include/uapi/drm/qxl_drm.h index ebebd36c4117..dface362d729 100644 --- a/include/uapi/drm/qxl_drm.h +++ b/include/uapi/drm/qxl_drm.h @@ -30,7 +30,7 @@ /* Please note that modifications to all structs defined here are * subject to backwards-compatibility constraints. * - * Do not use pointers, use uint64_t instead for 32 bit / 64 bit user/kernel + * Do not use pointers, use __u64 instead for 32 bit / 64 bit user/kernel * compatibility Keep fields aligned to their size */ @@ -48,14 +48,14 @@ #define DRM_QXL_ALLOC_SURF 0x06 struct drm_qxl_alloc { - uint32_t size; - uint32_t handle; /* 0 is an invalid handle */ + __u32 size; + __u32 handle; /* 0 is an invalid handle */ }; struct drm_qxl_map { - uint64_t offset; /* use for mmap system call */ - uint32_t handle; - uint32_t pad; + __u64 offset; /* use for mmap system call */ + __u32 handle; + __u32 pad; }; /* @@ -68,59 +68,59 @@ struct drm_qxl_map { #define QXL_RELOC_TYPE_SURF 2 struct drm_qxl_reloc { - uint64_t src_offset; /* offset into src_handle or src buffer */ - uint64_t dst_offset; /* offset in dest handle */ - uint32_t src_handle; /* dest handle to compute address from */ - uint32_t dst_handle; /* 0 if to command buffer */ - uint32_t reloc_type; - uint32_t pad; + __u64 src_offset; /* offset into src_handle or src buffer */ + __u64 dst_offset; /* offset in dest handle */ + __u32 src_handle; /* dest handle to compute address from */ + __u32 dst_handle; /* 0 if to command buffer */ + __u32 reloc_type; + __u32 pad; }; struct drm_qxl_command { - uint64_t __user command; /* void* */ - uint64_t __user relocs; /* struct drm_qxl_reloc* */ - uint32_t type; - uint32_t command_size; - uint32_t relocs_num; - uint32_t pad; + __u64 __user command; /* void* */ + __u64 __user relocs; /* struct drm_qxl_reloc* */ + __u32 type; + __u32 command_size; + __u32 relocs_num; + __u32 pad; }; /* XXX: call it drm_qxl_commands? */ struct drm_qxl_execbuffer { - uint32_t flags; /* for future use */ - uint32_t commands_num; - uint64_t __user commands; /* struct drm_qxl_command* */ + __u32 flags; /* for future use */ + __u32 commands_num; + __u64 __user commands; /* struct drm_qxl_command* */ }; struct drm_qxl_update_area { - uint32_t handle; - uint32_t top; - uint32_t left; - uint32_t bottom; - uint32_t right; - uint32_t pad; + __u32 handle; + __u32 top; + __u32 left; + __u32 bottom; + __u32 right; + __u32 pad; }; #define QXL_PARAM_NUM_SURFACES 1 /* rom->n_surfaces */ #define QXL_PARAM_MAX_RELOCS 2 struct drm_qxl_getparam { - uint64_t param; - uint64_t value; + __u64 param; + __u64 value; }; /* these are one bit values */ struct drm_qxl_clientcap { - uint32_t index; - uint32_t pad; + __u32 index; + __u32 pad; }; struct drm_qxl_alloc_surf { - uint32_t format; - uint32_t width; - uint32_t height; - int32_t stride; - uint32_t handle; - uint32_t pad; + __u32 format; + __u32 width; + __u32 height; + __s32 stride; + __u32 handle; + __u32 pad; }; #define DRM_IOCTL_QXL_ALLOC \ -- cgit v1.2.3 From 89545d6d5cb0fab53211d6a36a8037c8020e1b7e Mon Sep 17 00:00:00 2001 From: Mikko Rapeli Date: Wed, 2 Dec 2015 23:17:47 +0100 Subject: include/uapi/linux/virtio_gpu.h: use __u8 from MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Kernel headers exported to userspace are should these types. Fixes userspace compilation error: error: unknown type name ‘uint8_t’ Signed-off-by: Mikko Rapeli --- include/uapi/linux/virtio_gpu.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/linux/virtio_gpu.h b/include/uapi/linux/virtio_gpu.h index 7a63faa9065c..4b04ead26cd9 100644 --- a/include/uapi/linux/virtio_gpu.h +++ b/include/uapi/linux/virtio_gpu.h @@ -287,7 +287,7 @@ struct virtio_gpu_get_capset { /* VIRTIO_GPU_RESP_OK_CAPSET */ struct virtio_gpu_resp_capset { struct virtio_gpu_ctrl_hdr hdr; - uint8_t capset_data[]; + __u8 capset_data[]; }; #define VIRTIO_GPU_EVENT_DISPLAY (1 << 0) -- cgit v1.2.3 From 35d0f1d54ecd813973091fdb17baae43ea2b60b9 Mon Sep 17 00:00:00 2001 From: Mikko Rapeli Date: Sun, 1 Feb 2015 15:10:41 +0100 Subject: include/uapi/linux/agpgart.h: include stdlib.h in userspace MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes userspace compiler error: error: unknown type name ‘size_t’ Signed-off-by: Mikko Rapeli --- include/uapi/linux/agpgart.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/uapi/linux/agpgart.h b/include/uapi/linux/agpgart.h index 4e828cf487bc..f5251045181a 100644 --- a/include/uapi/linux/agpgart.h +++ b/include/uapi/linux/agpgart.h @@ -52,6 +52,7 @@ #ifndef __KERNEL__ #include +#include struct agp_version { __u16 major; -- cgit v1.2.3 From 29e08b0518fcddda2ae7b5014dfe8ac4eaa6ece7 Mon Sep 17 00:00:00 2001 From: Gabriel Laskar Date: Mon, 30 Nov 2015 15:10:39 +0100 Subject: drm: use __u{32,64} instead of uint{32,64}_t in virtgpu_drm.h Signed-off-by: Gabriel Laskar Reviewed-by: Emil Velikov CC: Emil Velikov CC: Mikko Rapeli --- include/uapi/drm/virtgpu_drm.h | 98 +++++++++++++++++++++--------------------- 1 file changed, 49 insertions(+), 49 deletions(-) (limited to 'include') diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h index fc9e2d6e5e2f..4bcfbe6be8d4 100644 --- a/include/uapi/drm/virtgpu_drm.h +++ b/include/uapi/drm/virtgpu_drm.h @@ -30,7 +30,7 @@ /* Please note that modifications to all structs defined here are * subject to backwards-compatibility constraints. * - * Do not use pointers, use uint64_t instead for 32 bit / 64 bit user/kernel + * Do not use pointers, use __u64 instead for 32 bit / 64 bit user/kernel * compatibility Keep fields aligned to their size */ @@ -45,88 +45,88 @@ #define DRM_VIRTGPU_GET_CAPS 0x09 struct drm_virtgpu_map { - uint64_t offset; /* use for mmap system call */ - uint32_t handle; - uint32_t pad; + __u64 offset; /* use for mmap system call */ + __u32 handle; + __u32 pad; }; struct drm_virtgpu_execbuffer { - uint32_t flags; /* for future use */ - uint32_t size; - uint64_t command; /* void* */ - uint64_t bo_handles; - uint32_t num_bo_handles; - uint32_t pad; + __u32 flags; /* for future use */ + __u32 size; + __u64 command; /* void* */ + __u64 bo_handles; + __u32 num_bo_handles; + __u32 pad; }; #define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */ struct drm_virtgpu_getparam { - uint64_t param; - uint64_t value; + __u64 param; + __u64 value; }; /* NO_BO flags? NO resource flag? */ /* resource flag for y_0_top */ struct drm_virtgpu_resource_create { - uint32_t target; - uint32_t format; - uint32_t bind; - uint32_t width; - uint32_t height; - uint32_t depth; - uint32_t array_size; - uint32_t last_level; - uint32_t nr_samples; - uint32_t flags; - uint32_t bo_handle; /* if this is set - recreate a new resource attached to this bo ? */ - uint32_t res_handle; /* returned by kernel */ - uint32_t size; /* validate transfer in the host */ - uint32_t stride; /* validate transfer in the host */ + __u32 target; + __u32 format; + __u32 bind; + __u32 width; + __u32 height; + __u32 depth; + __u32 array_size; + __u32 last_level; + __u32 nr_samples; + __u32 flags; + __u32 bo_handle; /* if this is set - recreate a new resource attached to this bo ? */ + __u32 res_handle; /* returned by kernel */ + __u32 size; /* validate transfer in the host */ + __u32 stride; /* validate transfer in the host */ }; struct drm_virtgpu_resource_info { - uint32_t bo_handle; - uint32_t res_handle; - uint32_t size; - uint32_t stride; + __u32 bo_handle; + __u32 res_handle; + __u32 size; + __u32 stride; }; struct drm_virtgpu_3d_box { - uint32_t x; - uint32_t y; - uint32_t z; - uint32_t w; - uint32_t h; - uint32_t d; + __u32 x; + __u32 y; + __u32 z; + __u32 w; + __u32 h; + __u32 d; }; struct drm_virtgpu_3d_transfer_to_host { - uint32_t bo_handle; + __u32 bo_handle; struct drm_virtgpu_3d_box box; - uint32_t level; - uint32_t offset; + __u32 level; + __u32 offset; }; struct drm_virtgpu_3d_transfer_from_host { - uint32_t bo_handle; + __u32 bo_handle; struct drm_virtgpu_3d_box box; - uint32_t level; - uint32_t offset; + __u32 level; + __u32 offset; }; #define VIRTGPU_WAIT_NOWAIT 1 /* like it */ struct drm_virtgpu_3d_wait { - uint32_t handle; /* 0 is an invalid handle */ - uint32_t flags; + __u32 handle; /* 0 is an invalid handle */ + __u32 flags; }; struct drm_virtgpu_get_caps { - uint32_t cap_set_id; - uint32_t cap_set_ver; - uint64_t addr; - uint32_t size; - uint32_t pad; + __u32 cap_set_id; + __u32 cap_set_ver; + __u64 addr; + __u32 size; + __u32 pad; }; #define DRM_IOCTL_VIRTGPU_MAP \ -- cgit v1.2.3 From f95d3aa438e15caa3a7c2d82b45c9df0ae8bc3a3 Mon Sep 17 00:00:00 2001 From: Gabriel Laskar Date: Mon, 30 Nov 2015 15:10:40 +0100 Subject: drm: Kbuild: add admgpu_drm.h to the installed headers Signed-off-by: Gabriel Laskar Reviewed-by: Emil Velikov Acked-by: Alex Deucher CC: Emil Velikov CC: Mikko Rapeli --- include/uapi/drm/Kbuild | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/uapi/drm/Kbuild b/include/uapi/drm/Kbuild index 38d437096c35..159551f49313 100644 --- a/include/uapi/drm/Kbuild +++ b/include/uapi/drm/Kbuild @@ -3,6 +3,7 @@ header-y += drm.h header-y += drm_fourcc.h header-y += drm_mode.h header-y += drm_sarea.h +header-y += amdgpu_drm.h header-y += exynos_drm.h header-y += i810_drm.h header-y += i915_drm.h -- cgit v1.2.3 From 2ce9dde0d47f2f94ab25c73a30596a7328bcdf1f Mon Sep 17 00:00:00 2001 From: Mikko Rapeli Date: Wed, 2 Dec 2015 23:44:33 +0100 Subject: include/uapi/drm/amdgpu_drm.h: use __u32 and __u64 from MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Kernel headers exported to userspace are supposed to use these. Fixes compilation errors in userspace: error: unknown type name ‘uint64_t’ error: unknown type name ‘uint32_t’ Signed-off-by: Mikko Rapeli --- include/uapi/drm/amdgpu_drm.h | 290 +++++++++++++++++++++--------------------- 1 file changed, 145 insertions(+), 145 deletions(-) (limited to 'include') diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index e52933a73580..453a76af123c 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -76,19 +76,19 @@ struct drm_amdgpu_gem_create_in { /** the requested memory size */ - uint64_t bo_size; + __u64 bo_size; /** physical start_addr alignment in bytes for some HW requirements */ - uint64_t alignment; + __u64 alignment; /** the requested memory domains */ - uint64_t domains; + __u64 domains; /** allocation flags */ - uint64_t domain_flags; + __u64 domain_flags; }; struct drm_amdgpu_gem_create_out { /** returned GEM object handle */ - uint32_t handle; - uint32_t _pad; + __u32 handle; + __u32 _pad; }; union drm_amdgpu_gem_create { @@ -105,28 +105,28 @@ union drm_amdgpu_gem_create { struct drm_amdgpu_bo_list_in { /** Type of operation */ - uint32_t operation; + __u32 operation; /** Handle of list or 0 if we want to create one */ - uint32_t list_handle; + __u32 list_handle; /** Number of BOs in list */ - uint32_t bo_number; + __u32 bo_number; /** Size of each element describing BO */ - uint32_t bo_info_size; + __u32 bo_info_size; /** Pointer to array describing BOs */ - uint64_t bo_info_ptr; + __u64 bo_info_ptr; }; struct drm_amdgpu_bo_list_entry { /** Handle of BO */ - uint32_t bo_handle; + __u32 bo_handle; /** New (if specified) BO priority to be used during migration */ - uint32_t bo_priority; + __u32 bo_priority; }; struct drm_amdgpu_bo_list_out { /** Handle of resource list */ - uint32_t list_handle; - uint32_t _pad; + __u32 list_handle; + __u32 _pad; }; union drm_amdgpu_bo_list { @@ -150,26 +150,26 @@ union drm_amdgpu_bo_list { struct drm_amdgpu_ctx_in { /** AMDGPU_CTX_OP_* */ - uint32_t op; + __u32 op; /** For future use, no flags defined so far */ - uint32_t flags; - uint32_t ctx_id; - uint32_t _pad; + __u32 flags; + __u32 ctx_id; + __u32 _pad; }; union drm_amdgpu_ctx_out { struct { - uint32_t ctx_id; - uint32_t _pad; + __u32 ctx_id; + __u32 _pad; } alloc; struct { /** For future use, no flags defined so far */ - uint64_t flags; + __u64 flags; /** Number of resets caused by this context so far. */ - uint32_t hangs; + __u32 hangs; /** Reset status since the last call of the ioctl. */ - uint32_t reset_status; + __u32 reset_status; } state; }; @@ -189,12 +189,12 @@ union drm_amdgpu_ctx { #define AMDGPU_GEM_USERPTR_REGISTER (1 << 3) struct drm_amdgpu_gem_userptr { - uint64_t addr; - uint64_t size; + __u64 addr; + __u64 size; /* AMDGPU_GEM_USERPTR_* */ - uint32_t flags; + __u32 flags; /* Resulting GEM handle */ - uint32_t handle; + __u32 handle; }; /* same meaning as the GB_TILE_MODE and GL_MACRO_TILE_MODE fields */ @@ -226,28 +226,28 @@ struct drm_amdgpu_gem_userptr { /** The same structure is shared for input/output */ struct drm_amdgpu_gem_metadata { /** GEM Object handle */ - uint32_t handle; + __u32 handle; /** Do we want get or set metadata */ - uint32_t op; + __u32 op; struct { /** For future use, no flags defined so far */ - uint64_t flags; + __u64 flags; /** family specific tiling info */ - uint64_t tiling_info; - uint32_t data_size_bytes; - uint32_t data[64]; + __u64 tiling_info; + __u32 data_size_bytes; + __u32 data[64]; } data; }; struct drm_amdgpu_gem_mmap_in { /** the GEM object handle */ - uint32_t handle; - uint32_t _pad; + __u32 handle; + __u32 _pad; }; struct drm_amdgpu_gem_mmap_out { /** mmap offset from the vma offset manager */ - uint64_t addr_ptr; + __u64 addr_ptr; }; union drm_amdgpu_gem_mmap { @@ -257,18 +257,18 @@ union drm_amdgpu_gem_mmap { struct drm_amdgpu_gem_wait_idle_in { /** GEM object handle */ - uint32_t handle; + __u32 handle; /** For future use, no flags defined so far */ - uint32_t flags; + __u32 flags; /** Absolute timeout to wait */ - uint64_t timeout; + __u64 timeout; }; struct drm_amdgpu_gem_wait_idle_out { /** BO status: 0 - BO is idle, 1 - BO is busy */ - uint32_t status; + __u32 status; /** Returned current memory domain */ - uint32_t domain; + __u32 domain; }; union drm_amdgpu_gem_wait_idle { @@ -278,18 +278,18 @@ union drm_amdgpu_gem_wait_idle { struct drm_amdgpu_wait_cs_in { /** Command submission handle */ - uint64_t handle; + __u64 handle; /** Absolute timeout to wait */ - uint64_t timeout; - uint32_t ip_type; - uint32_t ip_instance; - uint32_t ring; - uint32_t ctx_id; + __u64 timeout; + __u32 ip_type; + __u32 ip_instance; + __u32 ring; + __u32 ctx_id; }; struct drm_amdgpu_wait_cs_out { /** CS status: 0 - CS completed, 1 - CS still busy */ - uint64_t status; + __u64 status; }; union drm_amdgpu_wait_cs { @@ -303,11 +303,11 @@ union drm_amdgpu_wait_cs { /* Sets or returns a value associated with a buffer. */ struct drm_amdgpu_gem_op { /** GEM object handle */ - uint32_t handle; + __u32 handle; /** AMDGPU_GEM_OP_* */ - uint32_t op; + __u32 op; /** Input or return value */ - uint64_t value; + __u64 value; }; #define AMDGPU_VA_OP_MAP 1 @@ -326,18 +326,18 @@ struct drm_amdgpu_gem_op { struct drm_amdgpu_gem_va { /** GEM object handle */ - uint32_t handle; - uint32_t _pad; + __u32 handle; + __u32 _pad; /** AMDGPU_VA_OP_* */ - uint32_t operation; + __u32 operation; /** AMDGPU_VM_PAGE_* */ - uint32_t flags; + __u32 flags; /** va address to assign . Must be correctly aligned.*/ - uint64_t va_address; + __u64 va_address; /** Specify offset inside of BO to assign. Must be correctly aligned.*/ - uint64_t offset_in_bo; + __u64 offset_in_bo; /** Specify mapping size. Must be correctly aligned. */ - uint64_t map_size; + __u64 map_size; }; #define AMDGPU_HW_IP_GFX 0 @@ -354,24 +354,24 @@ struct drm_amdgpu_gem_va { #define AMDGPU_CHUNK_ID_DEPENDENCIES 0x03 struct drm_amdgpu_cs_chunk { - uint32_t chunk_id; - uint32_t length_dw; - uint64_t chunk_data; + __u32 chunk_id; + __u32 length_dw; + __u64 chunk_data; }; struct drm_amdgpu_cs_in { /** Rendering context id */ - uint32_t ctx_id; + __u32 ctx_id; /** Handle of resource list associated with CS */ - uint32_t bo_list_handle; - uint32_t num_chunks; - uint32_t _pad; - /** this points to uint64_t * which point to cs chunks */ - uint64_t chunks; + __u32 bo_list_handle; + __u32 num_chunks; + __u32 _pad; + /** this points to __u64 * which point to cs chunks */ + __u64 chunks; }; struct drm_amdgpu_cs_out { - uint64_t handle; + __u64 handle; }; union drm_amdgpu_cs { @@ -388,32 +388,32 @@ union drm_amdgpu_cs { #define AMDGPU_IB_FLAG_PREAMBLE (1<<1) struct drm_amdgpu_cs_chunk_ib { - uint32_t _pad; + __u32 _pad; /** AMDGPU_IB_FLAG_* */ - uint32_t flags; + __u32 flags; /** Virtual address to begin IB execution */ - uint64_t va_start; + __u64 va_start; /** Size of submission */ - uint32_t ib_bytes; + __u32 ib_bytes; /** HW IP to submit to */ - uint32_t ip_type; + __u32 ip_type; /** HW IP index of the same type to submit to */ - uint32_t ip_instance; + __u32 ip_instance; /** Ring index to submit to */ - uint32_t ring; + __u32 ring; }; struct drm_amdgpu_cs_chunk_dep { - uint32_t ip_type; - uint32_t ip_instance; - uint32_t ring; - uint32_t ctx_id; - uint64_t handle; + __u32 ip_type; + __u32 ip_instance; + __u32 ring; + __u32 ctx_id; + __u64 handle; }; struct drm_amdgpu_cs_chunk_fence { - uint32_t handle; - uint32_t offset; + __u32 handle; + __u32 offset; }; struct drm_amdgpu_cs_chunk_data { @@ -486,83 +486,83 @@ struct drm_amdgpu_cs_chunk_data { /* Input structure for the INFO ioctl */ struct drm_amdgpu_info { /* Where the return value will be stored */ - uint64_t return_pointer; + __u64 return_pointer; /* The size of the return value. Just like "size" in "snprintf", * it limits how many bytes the kernel can write. */ - uint32_t return_size; + __u32 return_size; /* The query request id. */ - uint32_t query; + __u32 query; union { struct { - uint32_t id; - uint32_t _pad; + __u32 id; + __u32 _pad; } mode_crtc; struct { /** AMDGPU_HW_IP_* */ - uint32_t type; + __u32 type; /** * Index of the IP if there are more IPs of the same * type. Ignored by AMDGPU_INFO_HW_IP_COUNT. */ - uint32_t ip_instance; + __u32 ip_instance; } query_hw_ip; struct { - uint32_t dword_offset; + __u32 dword_offset; /** number of registers to read */ - uint32_t count; - uint32_t instance; + __u32 count; + __u32 instance; /** For future use, no flags defined so far */ - uint32_t flags; + __u32 flags; } read_mmr_reg; struct { /** AMDGPU_INFO_FW_* */ - uint32_t fw_type; + __u32 fw_type; /** * Index of the IP if there are more IPs of * the same type. */ - uint32_t ip_instance; + __u32 ip_instance; /** * Index of the engine. Whether this is used depends * on the firmware type. (e.g. MEC, SDMA) */ - uint32_t index; - uint32_t _pad; + __u32 index; + __u32 _pad; } query_fw; }; }; struct drm_amdgpu_info_gds { /** GDS GFX partition size */ - uint32_t gds_gfx_partition_size; + __u32 gds_gfx_partition_size; /** GDS compute partition size */ - uint32_t compute_partition_size; + __u32 compute_partition_size; /** total GDS memory size */ - uint32_t gds_total_size; + __u32 gds_total_size; /** GWS size per GFX partition */ - uint32_t gws_per_gfx_partition; + __u32 gws_per_gfx_partition; /** GSW size per compute partition */ - uint32_t gws_per_compute_partition; + __u32 gws_per_compute_partition; /** OA size per GFX partition */ - uint32_t oa_per_gfx_partition; + __u32 oa_per_gfx_partition; /** OA size per compute partition */ - uint32_t oa_per_compute_partition; - uint32_t _pad; + __u32 oa_per_compute_partition; + __u32 _pad; }; struct drm_amdgpu_info_vram_gtt { - uint64_t vram_size; - uint64_t vram_cpu_accessible_size; - uint64_t gtt_size; + __u64 vram_size; + __u64 vram_cpu_accessible_size; + __u64 gtt_size; }; struct drm_amdgpu_info_firmware { - uint32_t ver; - uint32_t feature; + __u32 ver; + __u32 feature; }; #define AMDGPU_VRAM_TYPE_UNKNOWN 0 @@ -576,61 +576,61 @@ struct drm_amdgpu_info_firmware { struct drm_amdgpu_info_device { /** PCI Device ID */ - uint32_t device_id; + __u32 device_id; /** Internal chip revision: A0, A1, etc.) */ - uint32_t chip_rev; - uint32_t external_rev; + __u32 chip_rev; + __u32 external_rev; /** Revision id in PCI Config space */ - uint32_t pci_rev; - uint32_t family; - uint32_t num_shader_engines; - uint32_t num_shader_arrays_per_engine; + __u32 pci_rev; + __u32 family; + __u32 num_shader_engines; + __u32 num_shader_arrays_per_engine; /* in KHz */ - uint32_t gpu_counter_freq; - uint64_t max_engine_clock; - uint64_t max_memory_clock; + __u32 gpu_counter_freq; + __u64 max_engine_clock; + __u64 max_memory_clock; /* cu information */ - uint32_t cu_active_number; - uint32_t cu_ao_mask; - uint32_t cu_bitmap[4][4]; + __u32 cu_active_number; + __u32 cu_ao_mask; + __u32 cu_bitmap[4][4]; /** Render backend pipe mask. One render backend is CB+DB. */ - uint32_t enabled_rb_pipes_mask; - uint32_t num_rb_pipes; - uint32_t num_hw_gfx_contexts; - uint32_t _pad; - uint64_t ids_flags; + __u32 enabled_rb_pipes_mask; + __u32 num_rb_pipes; + __u32 num_hw_gfx_contexts; + __u32 _pad; + __u64 ids_flags; /** Starting virtual address for UMDs. */ - uint64_t virtual_address_offset; + __u64 virtual_address_offset; /** The maximum virtual address */ - uint64_t virtual_address_max; + __u64 virtual_address_max; /** Required alignment of virtual addresses. */ - uint32_t virtual_address_alignment; + __u32 virtual_address_alignment; /** Page table entry - fragment size */ - uint32_t pte_fragment_size; - uint32_t gart_page_size; + __u32 pte_fragment_size; + __u32 gart_page_size; /** constant engine ram size*/ - uint32_t ce_ram_size; + __u32 ce_ram_size; /** video memory type info*/ - uint32_t vram_type; + __u32 vram_type; /** video memory bit width*/ - uint32_t vram_bit_width; + __u32 vram_bit_width; /* vce harvesting instance */ - uint32_t vce_harvest_config; + __u32 vce_harvest_config; }; struct drm_amdgpu_info_hw_ip { /** Version of h/w IP */ - uint32_t hw_ip_version_major; - uint32_t hw_ip_version_minor; + __u32 hw_ip_version_major; + __u32 hw_ip_version_minor; /** Capabilities */ - uint64_t capabilities_flags; + __u64 capabilities_flags; /** command buffer address start alignment*/ - uint32_t ib_start_alignment; + __u32 ib_start_alignment; /** command buffer size alignment*/ - uint32_t ib_size_alignment; + __u32 ib_size_alignment; /** Bitmask of available rings. Bit 0 means ring 0, etc. */ - uint32_t available_rings; - uint32_t _pad; + __u32 available_rings; + __u32 _pad; }; /* -- cgit v1.2.3 From 6a14d01b91b07f354f11a3774755ed0afca2225b Mon Sep 17 00:00:00 2001 From: Gabriel Laskar Date: Mon, 30 Nov 2015 15:10:41 +0100 Subject: drm: include drm.h in armada_drm.h Signed-off-by: Gabriel Laskar Reviewed-by: Emil Velikov CC: Emil Velikov CC: Mikko Rapeli --- include/uapi/drm/armada_drm.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/uapi/drm/armada_drm.h b/include/uapi/drm/armada_drm.h index 8dec3fdc99c7..6de7f0196ca0 100644 --- a/include/uapi/drm/armada_drm.h +++ b/include/uapi/drm/armada_drm.h @@ -9,6 +9,8 @@ #ifndef DRM_ARMADA_IOCTL_H #define DRM_ARMADA_IOCTL_H +#include "drm.h" + #define DRM_ARMADA_GEM_CREATE 0x00 #define DRM_ARMADA_GEM_MMAP 0x02 #define DRM_ARMADA_GEM_PWRITE 0x03 -- cgit v1.2.3 From 47f06c2f937f765042e8d41d7ee09f475a22e9d0 Mon Sep 17 00:00:00 2001 From: Gabriel Laskar Date: Mon, 30 Nov 2015 15:10:42 +0100 Subject: drm: drm_fourcc.h fix includes Instead of using linux/types.h, drm headers should use drm.h, in order to handle the portability issues in only one place. Signed-off-by: Gabriel Laskar Reviewed-by: Emil Velikov CC: Emil Velikov CC: Mikko Rapeli --- include/uapi/drm/drm_fourcc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h index 0b69a7753558..998bd253faad 100644 --- a/include/uapi/drm/drm_fourcc.h +++ b/include/uapi/drm/drm_fourcc.h @@ -24,7 +24,7 @@ #ifndef DRM_FOURCC_H #define DRM_FOURCC_H -#include +#include "drm.h" #define fourcc_code(a, b, c, d) ((__u32)(a) | ((__u32)(b) << 8) | \ ((__u32)(c) << 16) | ((__u32)(d) << 24)) -- cgit v1.2.3 From abd5422db285f286a0d57747fb0edf192f880349 Mon Sep 17 00:00:00 2001 From: Gabriel Laskar Date: Mon, 30 Nov 2015 15:10:43 +0100 Subject: drm: drm_mode.h fix includes Instead of using linux/types.h, drm headers should use drm.h, in order to handle the portability issues in only one place. Signed-off-by: Gabriel Laskar Reviewed-by: Emil Velikov CC: Emil Velikov CC: Mikko Rapeli --- include/uapi/drm/drm_mode.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h index 44ad794960ee..50adb46204c2 100644 --- a/include/uapi/drm/drm_mode.h +++ b/include/uapi/drm/drm_mode.h @@ -27,7 +27,7 @@ #ifndef _DRM_MODE_H #define _DRM_MODE_H -#include +#include "drm.h" #define DRM_DISPLAY_INFO_LEN 32 #define DRM_CONNECTOR_NAME_LEN 32 -- cgit v1.2.3 From c6da66dff79e3e99903d4435292643665c30b0c6 Mon Sep 17 00:00:00 2001 From: Gabriel Laskar Date: Mon, 30 Nov 2015 15:10:44 +0100 Subject: drm: fix inclusion of drm.h in drm_sarea.h Using `#include "drm.h"` instead of `#include ` allow drm headers to be moved in another directory without changes, like for the libdrm imports. Signed-off-by: Gabriel Laskar Reviewed-by: Emil Velikov CC: Emil Velikov CC: Mikko Rapeli --- include/uapi/drm/drm_sarea.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/drm/drm_sarea.h b/include/uapi/drm/drm_sarea.h index 413a5642d49f..1d1a858a203d 100644 --- a/include/uapi/drm/drm_sarea.h +++ b/include/uapi/drm/drm_sarea.h @@ -32,7 +32,7 @@ #ifndef _DRM_SAREA_H_ #define _DRM_SAREA_H_ -#include +#include "drm.h" /* SAREA area needs to be at least a page */ #if defined(__alpha__) -- cgit v1.2.3 From 19b1e97aefde813f92a40699200fe1cf73f07da2 Mon Sep 17 00:00:00 2001 From: Gabriel Laskar Date: Mon, 30 Nov 2015 15:10:45 +0100 Subject: drm: fix inclusion of drm.h in exynos_sarea.h Using `#include "drm.h"` instead of `#include ` allow drm headers to be moved in another directory without changes, like for the libdrm imports. Signed-off-by: Gabriel Laskar Reviewed-by: Emil Velikov CC: Emil Velikov CC: Mikko Rapeli --- include/uapi/drm/exynos_drm.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/drm/exynos_drm.h b/include/uapi/drm/exynos_drm.h index cf431b76c320..312c67d744ae 100644 --- a/include/uapi/drm/exynos_drm.h +++ b/include/uapi/drm/exynos_drm.h @@ -15,7 +15,7 @@ #ifndef _UAPI_EXYNOS_DRM_H_ #define _UAPI_EXYNOS_DRM_H_ -#include +#include "drm.h" /** * User-desired buffer creation information structure. -- cgit v1.2.3 From 13635d6120b9d6607fbf2622066bebfd7416e545 Mon Sep 17 00:00:00 2001 From: Gabriel Laskar Date: Mon, 30 Nov 2015 15:10:46 +0100 Subject: drm: fix inclusion of drm.h in i810_drm.h Using `#include "drm.h"` instead of `#include ` allow drm headers to be moved in another directory without changes, like for the libdrm imports. Signed-off-by: Gabriel Laskar Reviewed-by: Emil Velikov CC: Emil Velikov CC: Mikko Rapeli --- include/uapi/drm/i810_drm.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/drm/i810_drm.h b/include/uapi/drm/i810_drm.h index 34736efd5824..bdb028723ded 100644 --- a/include/uapi/drm/i810_drm.h +++ b/include/uapi/drm/i810_drm.h @@ -1,7 +1,7 @@ #ifndef _I810_DRM_H_ #define _I810_DRM_H_ -#include +#include "drm.h" /* WARNING: These defines must be the same as what the Xserver uses. * if you change them, you must change the defines in the Xserver. -- cgit v1.2.3 From 1049102ff72b294bbeb1a254fe4de4a558387b46 Mon Sep 17 00:00:00 2001 From: Gabriel Laskar Date: Mon, 30 Nov 2015 15:10:47 +0100 Subject: drm: fix inclusion of drm.h in exynos_sarea.h Using `#include "drm.h"` instead of `#include ` allow drm headers to be moved in another directory without changes, like for the libdrm imports. Signed-off-by: Gabriel Laskar Reviewed-by: Emil Velikov CC: Emil Velikov CC: Mikko Rapeli --- include/uapi/drm/i915_drm.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index 484a9fb20479..07dcba2b002b 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -27,7 +27,7 @@ #ifndef _UAPI_I915_DRM_H_ #define _UAPI_I915_DRM_H_ -#include +#include "drm.h" /* Please note that modifications to all structs defined here are * subject to backwards-compatibility constraints. -- cgit v1.2.3 From 559003d067db195721656395cc765f0a16a3abf6 Mon Sep 17 00:00:00 2001 From: Gabriel Laskar Date: Mon, 30 Nov 2015 15:10:48 +0100 Subject: drm: fix inclusion of drm.h in mga_drm.h Using `#include "drm.h"` instead of `#include ` allow drm headers to be moved in another directory without changes, like for the libdrm imports. Signed-off-by: Gabriel Laskar Reviewed-by: Emil Velikov CC: Emil Velikov CC: Mikko Rapeli --- include/uapi/drm/mga_drm.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/drm/mga_drm.h b/include/uapi/drm/mga_drm.h index 2375bfd6e5e9..fca817009e13 100644 --- a/include/uapi/drm/mga_drm.h +++ b/include/uapi/drm/mga_drm.h @@ -35,7 +35,7 @@ #ifndef __MGA_DRM_H__ #define __MGA_DRM_H__ -#include +#include "drm.h" /* WARNING: If you change any of these defines, make sure to change the * defines in the Xserver file (mga_sarea.h) -- cgit v1.2.3 From 06577d042d38b27ed975c49358ad7adf873d09ae Mon Sep 17 00:00:00 2001 From: Gabriel Laskar Date: Mon, 30 Nov 2015 15:10:49 +0100 Subject: drm: fix inclusion of drm.h in msm_drm.h Using `#include "drm.h"` instead of `#include ` allow drm headers to be moved in another directory without changes, like for the libdrm imports. Signed-off-by: Gabriel Laskar Reviewed-by: Emil Velikov CC: Emil Velikov CC: Mikko Rapeli --- include/uapi/drm/msm_drm.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'include') diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h index 75a232b9a970..e995ffbcf86a 100644 --- a/include/uapi/drm/msm_drm.h +++ b/include/uapi/drm/msm_drm.h @@ -18,8 +18,7 @@ #ifndef __MSM_DRM_H__ #define __MSM_DRM_H__ -#include -#include +#include "drm.h" /* Please note that modifications to all structs defined here are * subject to backwards-compatibility constraints: -- cgit v1.2.3 From 7ef500e431ac2f3bf14f6c39704e5db01ab5a341 Mon Sep 17 00:00:00 2001 From: Gabriel Laskar Date: Mon, 30 Nov 2015 15:10:50 +0100 Subject: drm: fix inclusion of drm.h in omap_drm.h Using `#include "drm.h"` instead of `#include ` allow drm headers to be moved in another directory without changes, like for the libdrm imports. Signed-off-by: Gabriel Laskar Reviewed-by: Emil Velikov CC: Emil Velikov CC: Mikko Rapeli --- include/uapi/drm/omap_drm.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/drm/omap_drm.h b/include/uapi/drm/omap_drm.h index 1d0b1172664e..0750c01bb480 100644 --- a/include/uapi/drm/omap_drm.h +++ b/include/uapi/drm/omap_drm.h @@ -20,7 +20,7 @@ #ifndef __OMAP_DRM_H__ #define __OMAP_DRM_H__ -#include +#include "drm.h" /* Please note that modifications to all structs defined here are * subject to backwards-compatibility constraints. -- cgit v1.2.3 From 8ca32846322254df34c33aeeb222f77edecfa113 Mon Sep 17 00:00:00 2001 From: Gabriel Laskar Date: Mon, 30 Nov 2015 15:10:51 +0100 Subject: drm: fix inclusion of drm.h in qxl_drm.h Using `#include "drm.h"` instead of `#include ` allow drm headers to be moved in another directory without changes, like for the libdrm imports. Signed-off-by: Gabriel Laskar Reviewed-by: Emil Velikov CC: Emil Velikov CC: Mikko Rapeli --- include/uapi/drm/qxl_drm.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'include') diff --git a/include/uapi/drm/qxl_drm.h b/include/uapi/drm/qxl_drm.h index dface362d729..4d1e32640463 100644 --- a/include/uapi/drm/qxl_drm.h +++ b/include/uapi/drm/qxl_drm.h @@ -24,8 +24,7 @@ #ifndef QXL_DRM_H #define QXL_DRM_H -#include -#include "drm/drm.h" +#include "drm.h" /* Please note that modifications to all structs defined here are * subject to backwards-compatibility constraints. -- cgit v1.2.3 From 355f47803af9de70eab4fa20c8527ae047d240b7 Mon Sep 17 00:00:00 2001 From: Gabriel Laskar Date: Mon, 30 Nov 2015 15:10:52 +0100 Subject: drm: fix inclusion of drm.h in r128_drm.h Using `#include "drm.h"` instead of `#include ` allow drm headers to be moved in another directory without changes, like for the libdrm imports. Signed-off-by: Gabriel Laskar Reviewed-by: Emil Velikov CC: Emil Velikov CC: Mikko Rapeli --- include/uapi/drm/r128_drm.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/drm/r128_drm.h b/include/uapi/drm/r128_drm.h index 76b0aa3e8210..7a44c6500a7e 100644 --- a/include/uapi/drm/r128_drm.h +++ b/include/uapi/drm/r128_drm.h @@ -33,7 +33,7 @@ #ifndef __R128_DRM_H__ #define __R128_DRM_H__ -#include +#include "drm.h" /* WARNING: If you change any of these defines, make sure to change the * defines in the X server file (r128_sarea.h) -- cgit v1.2.3 From 3e2d2cdfa0fc277ec267a1fd4af41fdbb2f70255 Mon Sep 17 00:00:00 2001 From: Gabriel Laskar Date: Mon, 30 Nov 2015 15:10:53 +0100 Subject: drm: fix inclusion of drm.h in savage_drm.h Using `#include "drm.h"` instead of `#include ` allow drm headers to be moved in another directory without changes, like for the libdrm imports. Signed-off-by: Gabriel Laskar Reviewed-by: Emil Velikov CC: Emil Velikov CC: Mikko Rapeli --- include/uapi/drm/savage_drm.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/drm/savage_drm.h b/include/uapi/drm/savage_drm.h index 9dc9dc1a7753..574147489c60 100644 --- a/include/uapi/drm/savage_drm.h +++ b/include/uapi/drm/savage_drm.h @@ -26,7 +26,7 @@ #ifndef __SAVAGE_DRM_H__ #define __SAVAGE_DRM_H__ -#include +#include "drm.h" #ifndef __SAVAGE_SAREA_DEFINES__ #define __SAVAGE_SAREA_DEFINES__ -- cgit v1.2.3 From 678205a6c403a9da70dcd12ca61ace8d91ed0351 Mon Sep 17 00:00:00 2001 From: Gabriel Laskar Date: Mon, 30 Nov 2015 15:10:54 +0100 Subject: drm: fix inclusion of drm.h in tegra_drm.h Using `#include "drm.h"` instead of `#include ` allow drm headers to be moved in another directory without changes, like for the libdrm imports. Signed-off-by: Gabriel Laskar Reviewed-by: Emil Velikov CC: Emil Velikov CC: Mikko Rapeli --- include/uapi/drm/tegra_drm.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/drm/tegra_drm.h b/include/uapi/drm/tegra_drm.h index 5391780c2b05..27d0b054aed0 100644 --- a/include/uapi/drm/tegra_drm.h +++ b/include/uapi/drm/tegra_drm.h @@ -23,7 +23,7 @@ #ifndef _UAPI_TEGRA_DRM_H_ #define _UAPI_TEGRA_DRM_H_ -#include +#include "drm.h" #define DRM_TEGRA_GEM_CREATE_TILED (1 << 0) #define DRM_TEGRA_GEM_CREATE_BOTTOM_UP (1 << 1) -- cgit v1.2.3 From 6e82e9c85774a417af309497d0be5f125474d163 Mon Sep 17 00:00:00 2001 From: Gabriel Laskar Date: Mon, 30 Nov 2015 15:10:55 +0100 Subject: drm: fix inclusion of drm.h in virtgpu_drm.h Using `#include "drm.h"` instead of `#include ` allow drm headers to be moved in another directory without changes, like for the libdrm imports. Signed-off-by: Gabriel Laskar Reviewed-by: Emil Velikov CC: Emil Velikov CC: Mikko Rapeli --- include/uapi/drm/virtgpu_drm.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'include') diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h index 4bcfbe6be8d4..c74f1f90cb37 100644 --- a/include/uapi/drm/virtgpu_drm.h +++ b/include/uapi/drm/virtgpu_drm.h @@ -24,8 +24,7 @@ #ifndef VIRTGPU_DRM_H #define VIRTGPU_DRM_H -#include -#include "drm/drm.h" +#include "drm.h" /* Please note that modifications to all structs defined here are * subject to backwards-compatibility constraints. -- cgit v1.2.3 From 3a4197afc4e49083988e077d09dba88013f0e517 Mon Sep 17 00:00:00 2001 From: Gabriel Laskar Date: Mon, 30 Nov 2015 15:10:56 +0100 Subject: drm: fix inclusion of drm.h in vmwgfx_drm.h Using `#include "drm.h"` instead of `#include ` allow drm headers to be moved in another directory without changes, like for the libdrm imports. Signed-off-by: Gabriel Laskar Reviewed-by: Emil Velikov CC: Emil Velikov CC: Mikko Rapeli --- include/uapi/drm/vmwgfx_drm.h | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'include') diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h index ef31f448fecf..5b68b4d10884 100644 --- a/include/uapi/drm/vmwgfx_drm.h +++ b/include/uapi/drm/vmwgfx_drm.h @@ -28,9 +28,7 @@ #ifndef __VMWGFX_DRM_H__ #define __VMWGFX_DRM_H__ -#ifndef __KERNEL__ -#include -#endif +#include "drm.h" #define DRM_VMW_MAX_SURFACE_FACES 6 #define DRM_VMW_MAX_MIP_LEVELS 24 -- cgit v1.2.3 From d7e12cd7b8c6d7426a401ec03ddcb88382180ced Mon Sep 17 00:00:00 2001 From: Gabriel Laskar Date: Mon, 30 Nov 2015 15:10:57 +0100 Subject: drm: fix inclusion of drm.h in via_drm.h Using `#include "drm.h"` instead of `#include ` allow drm headers to be moved in another directory without changes, like for the libdrm imports. Signed-off-by: Gabriel Laskar Reviewed-by: Emil Velikov CC: Emil Velikov CC: Mikko Rapeli --- include/uapi/drm/via_drm.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/drm/via_drm.h b/include/uapi/drm/via_drm.h index c8c053a12e93..fa21ed185520 100644 --- a/include/uapi/drm/via_drm.h +++ b/include/uapi/drm/via_drm.h @@ -24,7 +24,7 @@ #ifndef _VIA_DRM_H_ #define _VIA_DRM_H_ -#include +#include "drm.h" /* WARNING: These defines must be the same as what the Xserver uses. * if you change them, you must change the defines in the Xserver. -- cgit v1.2.3 From 633c9a840d0bf1cce690f3165bdacd8ab412949e Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Wed, 9 Dec 2015 12:08:26 +0100 Subject: netfilter: nfnetlink: avoid recurrent netns lookups in call_batch Pass the net pointer to the call_batch callback functions so we can skip recurrent lookups. Signed-off-by: Pablo Neira Ayuso Tested-by: Arturo Borrero Gonzalez --- include/linux/netfilter/nfnetlink.h | 2 +- net/netfilter/nf_tables_api.c | 96 +++++++++++++++++-------------------- net/netfilter/nfnetlink.c | 2 +- 3 files changed, 47 insertions(+), 53 deletions(-) (limited to 'include') diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index 249d1bb01e03..5646b24bfc64 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h @@ -14,7 +14,7 @@ struct nfnl_callback { int (*call_rcu)(struct sock *nl, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const cda[]); - int (*call_batch)(struct sock *nl, struct sk_buff *skb, + int (*call_batch)(struct net *net, struct sock *nl, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const cda[]); const struct nla_policy *policy; /* netlink attribute policy */ diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 93cc4737018f..f1002dcfa1c9 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -89,6 +89,7 @@ nf_tables_afinfo_lookup(struct net *net, int family, bool autoload) } static void nft_ctx_init(struct nft_ctx *ctx, + struct net *net, const struct sk_buff *skb, const struct nlmsghdr *nlh, struct nft_af_info *afi, @@ -96,7 +97,7 @@ static void nft_ctx_init(struct nft_ctx *ctx, struct nft_chain *chain, const struct nlattr * const *nla) { - ctx->net = sock_net(skb->sk); + ctx->net = net; ctx->afi = afi; ctx->table = table; ctx->chain = chain; @@ -672,15 +673,14 @@ err: return ret; } -static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb, - const struct nlmsghdr *nlh, +static int nf_tables_newtable(struct net *net, struct sock *nlsk, + struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); const struct nlattr *name; struct nft_af_info *afi; struct nft_table *table; - struct net *net = sock_net(skb->sk); int family = nfmsg->nfgen_family; u32 flags = 0; struct nft_ctx ctx; @@ -706,7 +706,7 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb, if (nlh->nlmsg_flags & NLM_F_REPLACE) return -EOPNOTSUPP; - nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla); + nft_ctx_init(&ctx, net, skb, nlh, afi, table, NULL, nla); return nf_tables_updtable(&ctx); } @@ -730,7 +730,7 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb, INIT_LIST_HEAD(&table->sets); table->flags = flags; - nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla); + nft_ctx_init(&ctx, net, skb, nlh, afi, table, NULL, nla); err = nft_trans_table_add(&ctx, NFT_MSG_NEWTABLE); if (err < 0) goto err3; @@ -810,18 +810,17 @@ out: return err; } -static int nf_tables_deltable(struct sock *nlsk, struct sk_buff *skb, - const struct nlmsghdr *nlh, +static int nf_tables_deltable(struct net *net, struct sock *nlsk, + struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); struct nft_af_info *afi; struct nft_table *table; - struct net *net = sock_net(skb->sk); int family = nfmsg->nfgen_family; struct nft_ctx ctx; - nft_ctx_init(&ctx, skb, nlh, NULL, NULL, NULL, nla); + nft_ctx_init(&ctx, net, skb, nlh, NULL, NULL, NULL, nla); if (family == AF_UNSPEC || nla[NFTA_TABLE_NAME] == NULL) return nft_flush(&ctx, family); @@ -1221,8 +1220,8 @@ static void nf_tables_chain_destroy(struct nft_chain *chain) } } -static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb, - const struct nlmsghdr *nlh, +static int nf_tables_newchain(struct net *net, struct sock *nlsk, + struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); @@ -1232,7 +1231,6 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb, struct nft_chain *chain; struct nft_base_chain *basechain = NULL; struct nlattr *ha[NFTA_HOOK_MAX + 1]; - struct net *net = sock_net(skb->sk); int family = nfmsg->nfgen_family; struct net_device *dev = NULL; u8 policy = NF_ACCEPT; @@ -1313,7 +1311,7 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb, return PTR_ERR(stats); } - nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); + nft_ctx_init(&ctx, net, skb, nlh, afi, table, chain, nla); trans = nft_trans_alloc(&ctx, NFT_MSG_NEWCHAIN, sizeof(struct nft_trans_chain)); if (trans == NULL) { @@ -1461,7 +1459,7 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb, if (err < 0) goto err1; - nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); + nft_ctx_init(&ctx, net, skb, nlh, afi, table, chain, nla); err = nft_trans_chain_add(&ctx, NFT_MSG_NEWCHAIN); if (err < 0) goto err2; @@ -1476,15 +1474,14 @@ err1: return err; } -static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb, - const struct nlmsghdr *nlh, +static int nf_tables_delchain(struct net *net, struct sock *nlsk, + struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); struct nft_af_info *afi; struct nft_table *table; struct nft_chain *chain; - struct net *net = sock_net(skb->sk); int family = nfmsg->nfgen_family; struct nft_ctx ctx; @@ -1506,7 +1503,7 @@ static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb, if (chain->use > 0) return -EBUSY; - nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); + nft_ctx_init(&ctx, net, skb, nlh, afi, table, chain, nla); return nft_delchain(&ctx); } @@ -2010,13 +2007,12 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx, static struct nft_expr_info *info; -static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb, - const struct nlmsghdr *nlh, +static int nf_tables_newrule(struct net *net, struct sock *nlsk, + struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); struct nft_af_info *afi; - struct net *net = sock_net(skb->sk); struct nft_table *table; struct nft_chain *chain; struct nft_rule *rule, *old_rule = NULL; @@ -2075,7 +2071,7 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb, return PTR_ERR(old_rule); } - nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); + nft_ctx_init(&ctx, net, skb, nlh, afi, table, chain, nla); n = 0; size = 0; @@ -2176,13 +2172,12 @@ err1: return err; } -static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb, - const struct nlmsghdr *nlh, +static int nf_tables_delrule(struct net *net, struct sock *nlsk, + struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); struct nft_af_info *afi; - struct net *net = sock_net(skb->sk); struct nft_table *table; struct nft_chain *chain = NULL; struct nft_rule *rule; @@ -2205,7 +2200,7 @@ static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb, return PTR_ERR(chain); } - nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); + nft_ctx_init(&ctx, net, skb, nlh, afi, table, chain, nla); if (chain) { if (nla[NFTA_RULE_HANDLE]) { @@ -2344,12 +2339,11 @@ static const struct nla_policy nft_set_desc_policy[NFTA_SET_DESC_MAX + 1] = { [NFTA_SET_DESC_SIZE] = { .type = NLA_U32 }, }; -static int nft_ctx_init_from_setattr(struct nft_ctx *ctx, +static int nft_ctx_init_from_setattr(struct nft_ctx *ctx, struct net *net, const struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { - struct net *net = sock_net(skb->sk); const struct nfgenmsg *nfmsg = nlmsg_data(nlh); struct nft_af_info *afi = NULL; struct nft_table *table = NULL; @@ -2371,7 +2365,7 @@ static int nft_ctx_init_from_setattr(struct nft_ctx *ctx, return -ENOENT; } - nft_ctx_init(ctx, skb, nlh, afi, table, NULL, nla); + nft_ctx_init(ctx, net, skb, nlh, afi, table, NULL, nla); return 0; } @@ -2623,6 +2617,7 @@ static int nf_tables_getset(struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { + struct net *net = sock_net(skb->sk); const struct nft_set *set; struct nft_ctx ctx; struct sk_buff *skb2; @@ -2630,7 +2625,7 @@ static int nf_tables_getset(struct sock *nlsk, struct sk_buff *skb, int err; /* Verify existence before starting dump */ - err = nft_ctx_init_from_setattr(&ctx, skb, nlh, nla); + err = nft_ctx_init_from_setattr(&ctx, net, skb, nlh, nla); if (err < 0) return err; @@ -2693,14 +2688,13 @@ static int nf_tables_set_desc_parse(const struct nft_ctx *ctx, return 0; } -static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb, - const struct nlmsghdr *nlh, +static int nf_tables_newset(struct net *net, struct sock *nlsk, + struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); const struct nft_set_ops *ops; struct nft_af_info *afi; - struct net *net = sock_net(skb->sk); struct nft_table *table; struct nft_set *set; struct nft_ctx ctx; @@ -2798,7 +2792,7 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb, if (IS_ERR(table)) return PTR_ERR(table); - nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla); + nft_ctx_init(&ctx, net, skb, nlh, afi, table, NULL, nla); set = nf_tables_set_lookup(table, nla[NFTA_SET_NAME]); if (IS_ERR(set)) { @@ -2882,8 +2876,8 @@ static void nf_tables_set_destroy(const struct nft_ctx *ctx, struct nft_set *set nft_set_destroy(set); } -static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb, - const struct nlmsghdr *nlh, +static int nf_tables_delset(struct net *net, struct sock *nlsk, + struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); @@ -2896,7 +2890,7 @@ static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb, if (nla[NFTA_SET_TABLE] == NULL) return -EINVAL; - err = nft_ctx_init_from_setattr(&ctx, skb, nlh, nla); + err = nft_ctx_init_from_setattr(&ctx, net, skb, nlh, nla); if (err < 0) return err; @@ -3024,7 +3018,7 @@ static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX + [NFTA_SET_ELEM_LIST_SET_ID] = { .type = NLA_U32 }, }; -static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx, +static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx, struct net *net, const struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[], @@ -3033,7 +3027,6 @@ static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx, const struct nfgenmsg *nfmsg = nlmsg_data(nlh); struct nft_af_info *afi; struct nft_table *table; - struct net *net = sock_net(skb->sk); afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, false); if (IS_ERR(afi)) @@ -3045,7 +3038,7 @@ static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx, if (!trans && (table->flags & NFT_TABLE_INACTIVE)) return -ENOENT; - nft_ctx_init(ctx, skb, nlh, afi, table, NULL, nla); + nft_ctx_init(ctx, net, skb, nlh, afi, table, NULL, nla); return 0; } @@ -3135,6 +3128,7 @@ static int nf_tables_dump_setelem(const struct nft_ctx *ctx, static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb) { + struct net *net = sock_net(skb->sk); const struct nft_set *set; struct nft_set_dump_args args; struct nft_ctx ctx; @@ -3150,8 +3144,8 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb) if (err < 0) return err; - err = nft_ctx_init_from_elemattr(&ctx, cb->skb, cb->nlh, (void *)nla, - false); + err = nft_ctx_init_from_elemattr(&ctx, net, cb->skb, cb->nlh, + (void *)nla, false); if (err < 0) return err; @@ -3212,11 +3206,12 @@ static int nf_tables_getsetelem(struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { + struct net *net = sock_net(skb->sk); const struct nft_set *set; struct nft_ctx ctx; int err; - err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla, false); + err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla, false); if (err < 0) return err; @@ -3528,11 +3523,10 @@ err1: return err; } -static int nf_tables_newsetelem(struct sock *nlsk, struct sk_buff *skb, - const struct nlmsghdr *nlh, +static int nf_tables_newsetelem(struct net *net, struct sock *nlsk, + struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { - struct net *net = sock_net(skb->sk); const struct nlattr *attr; struct nft_set *set; struct nft_ctx ctx; @@ -3541,7 +3535,7 @@ static int nf_tables_newsetelem(struct sock *nlsk, struct sk_buff *skb, if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL) return -EINVAL; - err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla, true); + err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla, true); if (err < 0) return err; @@ -3623,8 +3617,8 @@ err1: return err; } -static int nf_tables_delsetelem(struct sock *nlsk, struct sk_buff *skb, - const struct nlmsghdr *nlh, +static int nf_tables_delsetelem(struct net *net, struct sock *nlsk, + struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { const struct nlattr *attr; @@ -3635,7 +3629,7 @@ static int nf_tables_delsetelem(struct sock *nlsk, struct sk_buff *skb, if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL) return -EINVAL; - err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla, false); + err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla, false); if (err < 0) return err; diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index 46453ab318db..445590f2c673 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -381,7 +381,7 @@ replay: goto ack; if (nc->call_batch) { - err = nc->call_batch(net->nfnl, skb, nlh, + err = nc->call_batch(net, net->nfnl, skb, nlh, (const struct nlattr **)cda); } -- cgit v1.2.3 From 059393c5bdd1420bdf1bed2972f33196dff263ae Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 7 Dec 2015 10:11:11 +0000 Subject: irqchip/gic-v3: Add missing struct device_node declaration When the GICv3 header file is used in a C file that doesn't include any of the OF stuff, we end up with a bunch of ugly warnings. Let's keep GCC quiet by adding a forward declaration. Signed-off-by: Marc Zyngier Cc: Cc: Jason Cooper Link: http://lkml.kernel.org/r/1449483072-17694-2-git-send-email-marc.zyngier@arm.com Signed-off-by: Thomas Gleixner --- include/linux/irqchip/arm-gic-v3.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index c9ae0c6ec050..d5d798b35c1f 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -330,6 +330,7 @@ struct rdists { }; struct irq_domain; +struct device_node; int its_cpu_init(void); int its_init(struct device_node *node, struct rdists *rdists, struct irq_domain *domain); -- cgit v1.2.3 From f98828769c8838f526703ef180b3088a714af2f9 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 9 Dec 2015 16:19:31 +0200 Subject: drm: Pass 'name' to drm_crtc_init_with_planes() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Done with coccinelle for the most part. However, it thinks '...' is part of the semantic patch, so I put an 'int DOTDOTDOT' placeholder in its place and got rid of it with sed afterwards. I didn't convert drm_crtc_init() since passing the varargs through would mean either cpp macros or va_list, and I figured we don't care about these legacy functions enough to warrant the extra pain. @@ identifier dev, crtc, primary, cursor, funcs; @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc, struct drm_plane *primary, struct drm_plane *cursor, const struct drm_crtc_funcs *funcs + ,const char *name, int DOTDOTDOT ) { ... } @@ identifier dev, crtc, primary, cursor, funcs; @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc, struct drm_plane *primary, struct drm_plane *cursor, const struct drm_crtc_funcs *funcs + ,const char *name, int DOTDOTDOT ); @@ expression E1, E2, E3, E4, E5; @@ drm_crtc_init_with_planes(E1, E2, E3, E4, E5 + ,NULL ) v2: Split crtc and plane changes apart Pass NULL for no-name instead of "" Leave drm_crtc_init() alone v3: Add ', or NULL...' to @name kernel doc (Jani) Annotate the function with __printf() attribute (Jani) Signed-off-by: Ville Syrjälä Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1449670771-2751-1-git-send-email-ville.syrjala@linux.intel.com --- drivers/gpu/drm/armada/armada_crtc.c | 2 +- drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c | 2 +- drivers/gpu/drm/drm_crtc.c | 4 +++- drivers/gpu/drm/drm_plane_helper.c | 3 ++- drivers/gpu/drm/exynos/exynos_drm_crtc.c | 2 +- drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c | 2 +- drivers/gpu/drm/i915/intel_display.c | 2 +- drivers/gpu/drm/imx/imx-drm-core.c | 2 +- drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c | 3 ++- drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c | 3 ++- drivers/gpu/drm/omapdrm/omap_crtc.c | 2 +- drivers/gpu/drm/rcar-du/rcar_du_crtc.c | 2 +- drivers/gpu/drm/rockchip/rockchip_drm_vop.c | 2 +- drivers/gpu/drm/sti/sti_crtc.c | 2 +- drivers/gpu/drm/tegra/dc.c | 2 +- drivers/gpu/drm/vc4/vc4_crtc.c | 2 +- drivers/gpu/drm/virtio/virtgpu_display.c | 2 +- include/drm/drm_crtc.h | 12 +++++++----- 18 files changed, 29 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c index cebcab560626..c3f3a7031bb1 100644 --- a/drivers/gpu/drm/armada/armada_crtc.c +++ b/drivers/gpu/drm/armada/armada_crtc.c @@ -1223,7 +1223,7 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev, } ret = drm_crtc_init_with_planes(drm, &dcrtc->crtc, &primary->base, NULL, - &armada_crtc_funcs); + &armada_crtc_funcs, NULL); if (ret) goto err_crtc_init; diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c index 9f6e234e7029..468a14f266a7 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c @@ -344,7 +344,7 @@ int atmel_hlcdc_crtc_create(struct drm_device *dev) ret = drm_crtc_init_with_planes(dev, &crtc->base, &planes->primary->base, planes->cursor ? &planes->cursor->base : NULL, - &atmel_hlcdc_crtc_funcs); + &atmel_hlcdc_crtc_funcs, NULL); if (ret < 0) goto fail; diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 99e7efebedda..27922bbcde35 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -657,6 +657,7 @@ DEFINE_WW_CLASS(crtc_ww_class); * @primary: Primary plane for CRTC * @cursor: Cursor plane for CRTC * @funcs: callbacks for the new CRTC + * @name: printf style format string for the CRTC name, or NULL for default name * * Inits a new object created as base part of a driver crtc object. * @@ -666,7 +667,8 @@ DEFINE_WW_CLASS(crtc_ww_class); int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc, struct drm_plane *primary, struct drm_plane *cursor, - const struct drm_crtc_funcs *funcs) + const struct drm_crtc_funcs *funcs, + const char *name, ...) { struct drm_mode_config *config = &dev->mode_config; int ret; diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c index 8455e996dd9c..f5a4273e71b5 100644 --- a/drivers/gpu/drm/drm_plane_helper.c +++ b/drivers/gpu/drm/drm_plane_helper.c @@ -402,7 +402,8 @@ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, struct drm_plane *primary; primary = create_primary_plane(dev); - return drm_crtc_init_with_planes(dev, crtc, primary, NULL, funcs); + return drm_crtc_init_with_planes(dev, crtc, primary, NULL, funcs, + NULL); } EXPORT_SYMBOL(drm_crtc_init); diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index b3ba27fd9a6b..9d30a0fa3248 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -150,7 +150,7 @@ struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev, private->crtc[pipe] = crtc; ret = drm_crtc_init_with_planes(drm_dev, crtc, plane, NULL, - &exynos_crtc_funcs); + &exynos_crtc_funcs, NULL); if (ret < 0) goto err_crtc; diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c index 82a3d311e164..d8ab8f0af10c 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c @@ -175,7 +175,7 @@ int fsl_dcu_drm_crtc_create(struct fsl_dcu_drm_device *fsl_dev) primary = fsl_dcu_drm_primary_create_plane(fsl_dev->drm); ret = drm_crtc_init_with_planes(fsl_dev->drm, crtc, primary, NULL, - &fsl_dcu_drm_crtc_funcs); + &fsl_dcu_drm_crtc_funcs, NULL); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 696f7543d264..5a3370798ba3 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -14120,7 +14120,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) goto fail; ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary, - cursor, &intel_crtc_funcs); + cursor, &intel_crtc_funcs, NULL); if (ret) goto fail; diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c index 882cf3d4b7a8..09e20ea69419 100644 --- a/drivers/gpu/drm/imx/imx-drm-core.c +++ b/drivers/gpu/drm/imx/imx-drm-core.c @@ -373,7 +373,7 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc, imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs); drm_crtc_init_with_planes(drm, crtc, primary_plane, NULL, - imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs); + imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs, NULL); return 0; diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c index 6ac9aa165768..28df397c3b04 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c @@ -678,7 +678,8 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev, drm_flip_work_init(&mdp4_crtc->unref_cursor_work, "unref cursor", unref_cursor_worker); - drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp4_crtc_funcs); + drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp4_crtc_funcs, + NULL); drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs); plane->crtc = crtc; diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index 7f9f4ac88029..20cee5ce4071 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c @@ -797,7 +797,8 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d", pipe2name(mdp5_plane_pipe(plane)), id); - drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs); + drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs, + NULL); drm_flip_work_init(&mdp5_crtc->unref_cursor_work, "unref cursor", unref_cursor_worker); diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c index ad09590e8a46..2ed0754ed19e 100644 --- a/drivers/gpu/drm/omapdrm/omap_crtc.c +++ b/drivers/gpu/drm/omapdrm/omap_crtc.c @@ -524,7 +524,7 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev, omap_crtc->mgr = omap_dss_get_overlay_manager(channel); ret = drm_crtc_init_with_planes(dev, crtc, plane, NULL, - &omap_crtc_funcs); + &omap_crtc_funcs, NULL); if (ret < 0) { kfree(omap_crtc); return NULL; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index 48cb19949ca3..88a4b706be16 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c @@ -613,7 +613,7 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index) ret = drm_crtc_init_with_planes(rcdu->ddev, crtc, &rgrp->planes[index % 2].plane, - NULL, &crtc_funcs); + NULL, &crtc_funcs, NULL); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index 03c47eeadc81..8e89e80ec906 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -1492,7 +1492,7 @@ static int vop_create_crtc(struct vop *vop) } ret = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor, - &vop_crtc_funcs); + &vop_crtc_funcs, NULL); if (ret) return ret; diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c index 3ae09dcd4fd8..de11c7cfb02f 100644 --- a/drivers/gpu/drm/sti/sti_crtc.c +++ b/drivers/gpu/drm/sti/sti_crtc.c @@ -367,7 +367,7 @@ int sti_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer, int res; res = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor, - &sti_crtc_funcs); + &sti_crtc_funcs, NULL); if (res) { DRM_ERROR("Can't initialze CRTC\n"); return -EINVAL; diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index e9f24a85a103..be894103fa49 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -1732,7 +1732,7 @@ static int tegra_dc_init(struct host1x_client *client) } err = drm_crtc_init_with_planes(drm, &dc->base, primary, cursor, - &tegra_crtc_funcs); + &tegra_crtc_funcs, NULL); if (err < 0) goto cleanup; diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index 265064c62d49..2168a99d59aa 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -606,7 +606,7 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data) } drm_crtc_init_with_planes(drm, crtc, primary_plane, cursor_plane, - &vc4_crtc_funcs); + &vc4_crtc_funcs, NULL); drm_crtc_helper_add(crtc, &vc4_crtc_helper_funcs); primary_plane->crtc = crtc; cursor_plane->crtc = crtc; diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c index 623c98cf024d..ef4cef0c8ece 100644 --- a/drivers/gpu/drm/virtio/virtgpu_display.c +++ b/drivers/gpu/drm/virtio/virtgpu_display.c @@ -431,7 +431,7 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index) if (IS_ERR(plane)) return PTR_ERR(plane); drm_crtc_init_with_planes(dev, crtc, plane, NULL, - &virtio_gpu_crtc_funcs); + &virtio_gpu_crtc_funcs, NULL); drm_mode_crtc_set_gamma_size(crtc, 256); drm_crtc_helper_add(crtc, &virtio_gpu_crtc_helper_funcs); plane->crtc = crtc; diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 4f587a5bc88f..f0127e7b0ee2 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -2144,11 +2144,13 @@ struct drm_prop_enum_list { char *name; }; -extern int drm_crtc_init_with_planes(struct drm_device *dev, - struct drm_crtc *crtc, - struct drm_plane *primary, - struct drm_plane *cursor, - const struct drm_crtc_funcs *funcs); +extern __printf(6, 7) +int drm_crtc_init_with_planes(struct drm_device *dev, + struct drm_crtc *crtc, + struct drm_plane *primary, + struct drm_plane *cursor, + const struct drm_crtc_funcs *funcs, + const char *name, ...); extern void drm_crtc_cleanup(struct drm_crtc *crtc); extern unsigned int drm_crtc_index(struct drm_crtc *crtc); -- cgit v1.2.3 From b0b3b7951114315d65398c27648705ca1c322faa Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 9 Dec 2015 16:19:55 +0200 Subject: drm: Pass 'name' to drm_universal_plane_init() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Done with coccinelle for the most part. It choked on msm/mdp/mdp5/mdp5_plane.c like so: "BAD:!!!!! enum drm_plane_type type;" No idea how to deal with that, so I just fixed that up by hand. Also it thinks '...' is part of the semantic patch, so I put an 'int DOTDOTDOT' placeholder in its place and got rid of it with sed afterwards. I didn't convert drm_plane_init() since passing the varargs through would mean either cpp macros or va_list, and I figured we don't care about these legacy functions enough to warrant the extra pain. @@ typedef uint32_t; identifier dev, plane, possible_crtcs, funcs, formats, format_count, type; @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane, unsigned long possible_crtcs, const struct drm_plane_funcs *funcs, const uint32_t *formats, unsigned int format_count, enum drm_plane_type type + ,const char *name, int DOTDOTDOT ) { ... } @@ identifier dev, plane, possible_crtcs, funcs, formats, format_count, type; @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane, unsigned long possible_crtcs, const struct drm_plane_funcs *funcs, const uint32_t *formats, unsigned int format_count, enum drm_plane_type type + ,const char *name, int DOTDOTDOT ); @@ expression E1, E2, E3, E4, E5, E6, E7; @@ drm_universal_plane_init(E1, E2, E3, E4, E5, E6, E7 + ,NULL ) v2: Split crtc and plane changes apart Pass NUL for no-name instead of "" Leave drm_plane_init() alone v3: Add ', or NULL...' to @name kernel doc (Jani) Annotate the function with __printf() attribute (Jani) Signed-off-by: Ville Syrjälä Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1449670795-2853-1-git-send-email-ville.syrjala@linux.intel.com --- drivers/gpu/drm/armada/armada_crtc.c | 2 +- drivers/gpu/drm/armada/armada_overlay.c | 2 +- drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c | 2 +- drivers/gpu/drm/drm_crtc.c | 6 ++++-- drivers/gpu/drm/drm_plane_helper.c | 2 +- drivers/gpu/drm/exynos/exynos_drm_plane.c | 2 +- drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c | 2 +- drivers/gpu/drm/i915/intel_display.c | 4 ++-- drivers/gpu/drm/i915/intel_sprite.c | 2 +- drivers/gpu/drm/imx/ipuv3-plane.c | 3 ++- drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c | 3 ++- drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c | 2 +- drivers/gpu/drm/omapdrm/omap_plane.c | 2 +- drivers/gpu/drm/rcar-du/rcar_du_plane.c | 3 ++- drivers/gpu/drm/rockchip/rockchip_drm_vop.c | 4 ++-- drivers/gpu/drm/sti/sti_cursor.c | 2 +- drivers/gpu/drm/sti/sti_gdp.c | 2 +- drivers/gpu/drm/sti/sti_hqvdp.c | 2 +- drivers/gpu/drm/tegra/dc.c | 9 ++++++--- drivers/gpu/drm/vc4/vc4_plane.c | 2 +- drivers/gpu/drm/virtio/virtgpu_plane.c | 2 +- include/drm/drm_crtc.h | 16 +++++++++------- 22 files changed, 43 insertions(+), 33 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c index c3f3a7031bb1..9bdc28cf927e 100644 --- a/drivers/gpu/drm/armada/armada_crtc.c +++ b/drivers/gpu/drm/armada/armada_crtc.c @@ -1216,7 +1216,7 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev, &armada_primary_plane_funcs, armada_primary_formats, ARRAY_SIZE(armada_primary_formats), - DRM_PLANE_TYPE_PRIMARY); + DRM_PLANE_TYPE_PRIMARY, NULL); if (ret) { kfree(primary); return ret; diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c index 5c22b380f8f3..148e8a42b2c6 100644 --- a/drivers/gpu/drm/armada/armada_overlay.c +++ b/drivers/gpu/drm/armada/armada_overlay.c @@ -460,7 +460,7 @@ int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs) &armada_ovl_plane_funcs, armada_ovl_formats, ARRAY_SIZE(armada_ovl_formats), - DRM_PLANE_TYPE_OVERLAY); + DRM_PLANE_TYPE_OVERLAY, NULL); if (ret) { kfree(dplane); return ret; diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c index d0299aed517e..1ffe9c329c46 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c @@ -941,7 +941,7 @@ atmel_hlcdc_plane_create(struct drm_device *dev, ret = drm_universal_plane_init(dev, &plane->base, 0, &layer_plane_funcs, desc->formats->formats, - desc->formats->nformats, type); + desc->formats->nformats, type, NULL); if (ret) return ERR_PTR(ret); diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 27922bbcde35..20d67a06efce 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -1152,6 +1152,7 @@ EXPORT_SYMBOL(drm_encoder_cleanup); * @formats: array of supported formats (%DRM_FORMAT_*) * @format_count: number of elements in @formats * @type: type of plane (overlay, primary, cursor) + * @name: printf style format string for the plane name, or NULL for default name * * Initializes a plane object of type @type. * @@ -1162,7 +1163,8 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane, unsigned long possible_crtcs, const struct drm_plane_funcs *funcs, const uint32_t *formats, unsigned int format_count, - enum drm_plane_type type) + enum drm_plane_type type, + const char *name, ...) { struct drm_mode_config *config = &dev->mode_config; int ret; @@ -1242,7 +1244,7 @@ int drm_plane_init(struct drm_device *dev, struct drm_plane *plane, type = is_primary ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; return drm_universal_plane_init(dev, plane, possible_crtcs, funcs, - formats, format_count, type); + formats, format_count, type, NULL); } EXPORT_SYMBOL(drm_plane_init); diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c index f5a4273e71b5..369d2898ff9e 100644 --- a/drivers/gpu/drm/drm_plane_helper.c +++ b/drivers/gpu/drm/drm_plane_helper.c @@ -375,7 +375,7 @@ static struct drm_plane *create_primary_plane(struct drm_device *dev) &drm_primary_helper_funcs, safe_modeset_formats, ARRAY_SIZE(safe_modeset_formats), - DRM_PLANE_TYPE_PRIMARY); + DRM_PLANE_TYPE_PRIMARY, NULL); if (ret) { kfree(primary); primary = NULL; diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c index 179311760bb7..383ee1edb965 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_plane.c +++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c @@ -228,7 +228,7 @@ int exynos_plane_init(struct drm_device *dev, err = drm_universal_plane_init(dev, &exynos_plane->base, possible_crtcs, &exynos_plane_funcs, formats, fcount, - type); + type, NULL); if (err) { DRM_ERROR("failed to initialize plane\n"); return err; diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c index 51daaea40b4d..4b13cf919575 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c @@ -249,7 +249,7 @@ struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev) &fsl_dcu_drm_plane_funcs, fsl_dcu_drm_plane_formats, ARRAY_SIZE(fsl_dcu_drm_plane_formats), - DRM_PLANE_TYPE_PRIMARY); + DRM_PLANE_TYPE_PRIMARY, NULL); if (ret) { kfree(primary); primary = NULL; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 5a3370798ba3..fc0d53a4eab3 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -13904,7 +13904,7 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev, drm_universal_plane_init(dev, &primary->base, 0, &intel_plane_funcs, intel_primary_formats, num_formats, - DRM_PLANE_TYPE_PRIMARY); + DRM_PLANE_TYPE_PRIMARY, NULL); if (INTEL_INFO(dev)->gen >= 4) intel_create_rotation_property(dev, primary); @@ -14043,7 +14043,7 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev, &intel_plane_funcs, intel_cursor_formats, ARRAY_SIZE(intel_cursor_formats), - DRM_PLANE_TYPE_CURSOR); + DRM_PLANE_TYPE_CURSOR, NULL); if (INTEL_INFO(dev)->gen >= 4) { if (!dev->mode_config.rotation_property) diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 2b96f336589e..dbf421351b5c 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -1123,7 +1123,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane) ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs, &intel_plane_funcs, plane_formats, num_plane_formats, - DRM_PLANE_TYPE_OVERLAY); + DRM_PLANE_TYPE_OVERLAY, NULL); if (ret) { kfree(intel_plane); goto out; diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c index e2ff410bab74..591ba2f1ae03 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.c +++ b/drivers/gpu/drm/imx/ipuv3-plane.c @@ -401,7 +401,8 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu, ret = drm_universal_plane_init(dev, &ipu_plane->base, possible_crtcs, &ipu_plane_funcs, ipu_plane_formats, - ARRAY_SIZE(ipu_plane_formats), type); + ARRAY_SIZE(ipu_plane_formats), type, + NULL); if (ret) { DRM_ERROR("failed to initialize plane\n"); kfree(ipu_plane); diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c index 30d57e74c42f..9f96dfe67769 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c @@ -397,7 +397,8 @@ struct drm_plane *mdp4_plane_init(struct drm_device *dev, type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; ret = drm_universal_plane_init(dev, plane, 0xff, &mdp4_plane_funcs, - mdp4_plane->formats, mdp4_plane->nformats, type); + mdp4_plane->formats, mdp4_plane->nformats, + type, NULL); if (ret) goto fail; diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c index 81cd49045ffc..432c09836b0e 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c @@ -904,7 +904,7 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev, type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; ret = drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs, mdp5_plane->formats, mdp5_plane->nformats, - type); + type, NULL); if (ret) goto fail; diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c index 3054bda72688..d5ecabd6c14c 100644 --- a/drivers/gpu/drm/omapdrm/omap_plane.c +++ b/drivers/gpu/drm/omapdrm/omap_plane.c @@ -366,7 +366,7 @@ struct drm_plane *omap_plane_init(struct drm_device *dev, ret = drm_universal_plane_init(dev, plane, (1 << priv->num_crtcs) - 1, &omap_plane_funcs, omap_plane->formats, - omap_plane->nformats, type); + omap_plane->nformats, type, NULL); if (ret < 0) goto error; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c index ffa583712cd9..c3ed9522c0e1 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c @@ -410,7 +410,8 @@ int rcar_du_planes_init(struct rcar_du_group *rgrp) ret = drm_universal_plane_init(rcdu->ddev, &plane->plane, crtcs, &rcar_du_plane_funcs, formats, - ARRAY_SIZE(formats), type); + ARRAY_SIZE(formats), type, + NULL); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index 8e89e80ec906..dd8e0860ad4e 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -1478,7 +1478,7 @@ static int vop_create_crtc(struct vop *vop) 0, &vop_plane_funcs, win_data->phy->data_formats, win_data->phy->nformats, - win_data->type); + win_data->type, NULL); if (ret) { DRM_ERROR("failed to initialize plane\n"); goto err_cleanup_planes; @@ -1515,7 +1515,7 @@ static int vop_create_crtc(struct vop *vop) &vop_plane_funcs, win_data->phy->data_formats, win_data->phy->nformats, - win_data->type); + win_data->type, NULL); if (ret) { DRM_ERROR("failed to initialize overlay plane\n"); goto err_cleanup_crtc; diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c index dd1032195051..807863106b8d 100644 --- a/drivers/gpu/drm/sti/sti_cursor.c +++ b/drivers/gpu/drm/sti/sti_cursor.c @@ -272,7 +272,7 @@ struct drm_plane *sti_cursor_create(struct drm_device *drm_dev, &sti_plane_helpers_funcs, cursor_supported_formats, ARRAY_SIZE(cursor_supported_formats), - DRM_PLANE_TYPE_CURSOR); + DRM_PLANE_TYPE_CURSOR, NULL); if (res) { DRM_ERROR("Failed to initialize universal plane\n"); goto err_plane; diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c index c85dc7d6b005..f9a1d92c9d95 100644 --- a/drivers/gpu/drm/sti/sti_gdp.c +++ b/drivers/gpu/drm/sti/sti_gdp.c @@ -630,7 +630,7 @@ struct drm_plane *sti_gdp_create(struct drm_device *drm_dev, &sti_plane_helpers_funcs, gdp_supported_formats, ARRAY_SIZE(gdp_supported_formats), - type); + type, NULL); if (res) { DRM_ERROR("Failed to initialize universal plane\n"); goto err; diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c index ea0690bc77d5..43861b52261d 100644 --- a/drivers/gpu/drm/sti/sti_hqvdp.c +++ b/drivers/gpu/drm/sti/sti_hqvdp.c @@ -973,7 +973,7 @@ static struct drm_plane *sti_hqvdp_create(struct drm_device *drm_dev, &sti_plane_helpers_funcs, hqvdp_supported_formats, ARRAY_SIZE(hqvdp_supported_formats), - DRM_PLANE_TYPE_OVERLAY); + DRM_PLANE_TYPE_OVERLAY, NULL); if (res) { DRM_ERROR("Failed to initialize universal plane\n"); return NULL; diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index be894103fa49..1f5cb68357c7 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -660,7 +660,8 @@ static struct drm_plane *tegra_dc_primary_plane_create(struct drm_device *drm, err = drm_universal_plane_init(drm, &plane->base, possible_crtcs, &tegra_primary_plane_funcs, formats, - num_formats, DRM_PLANE_TYPE_PRIMARY); + num_formats, DRM_PLANE_TYPE_PRIMARY, + NULL); if (err < 0) { kfree(plane); return ERR_PTR(err); @@ -827,7 +828,8 @@ static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm, err = drm_universal_plane_init(drm, &plane->base, 1 << dc->pipe, &tegra_cursor_plane_funcs, formats, - num_formats, DRM_PLANE_TYPE_CURSOR); + num_formats, DRM_PLANE_TYPE_CURSOR, + NULL); if (err < 0) { kfree(plane); return ERR_PTR(err); @@ -890,7 +892,8 @@ static struct drm_plane *tegra_dc_overlay_plane_create(struct drm_device *drm, err = drm_universal_plane_init(drm, &plane->base, 1 << dc->pipe, &tegra_overlay_plane_funcs, formats, - num_formats, DRM_PLANE_TYPE_OVERLAY); + num_formats, DRM_PLANE_TYPE_OVERLAY, + NULL); if (err < 0) { kfree(plane); return ERR_PTR(err); diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index 887f3caad0be..f34c422733dc 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c @@ -317,7 +317,7 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev, ret = drm_universal_plane_init(dev, plane, 0xff, &vc4_plane_funcs, formats, ARRAY_SIZE(formats), - type); + type, NULL); drm_plane_helper_add(plane, &vc4_plane_helper_funcs); diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c index 4a74129c5708..572fb351feab 100644 --- a/drivers/gpu/drm/virtio/virtgpu_plane.c +++ b/drivers/gpu/drm/virtio/virtgpu_plane.c @@ -107,7 +107,7 @@ struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev, &virtio_gpu_plane_funcs, virtio_gpu_formats, ARRAY_SIZE(virtio_gpu_formats), - DRM_PLANE_TYPE_PRIMARY); + DRM_PLANE_TYPE_PRIMARY, NULL); if (ret) goto err_plane_init; diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index f0127e7b0ee2..a6f0e25cbd51 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -2214,13 +2214,15 @@ static inline bool drm_encoder_crtc_ok(struct drm_encoder *encoder, return !!(encoder->possible_crtcs & drm_crtc_mask(crtc)); } -extern int drm_universal_plane_init(struct drm_device *dev, - struct drm_plane *plane, - unsigned long possible_crtcs, - const struct drm_plane_funcs *funcs, - const uint32_t *formats, - unsigned int format_count, - enum drm_plane_type type); +extern __printf(8, 9) +int drm_universal_plane_init(struct drm_device *dev, + struct drm_plane *plane, + unsigned long possible_crtcs, + const struct drm_plane_funcs *funcs, + const uint32_t *formats, + unsigned int format_count, + enum drm_plane_type type, + const char *name, ...); extern int drm_plane_init(struct drm_device *dev, struct drm_plane *plane, unsigned long possible_crtcs, -- cgit v1.2.3 From 13a3d91f17a5f7ed2acd275d18b6acfdb131fb15 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 9 Dec 2015 16:20:18 +0200 Subject: drm: Pass 'name' to drm_encoder_init() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Done with coccinelle for the most part. However, it thinks '...' is part of the semantic patch, so I put an 'int DOTDOTDOT' placeholder in its place and got rid of it with sed afterwards. @@ identifier dev, encoder, funcs; @@ int drm_encoder_init(struct drm_device *dev, struct drm_encoder *encoder, const struct drm_encoder_funcs *funcs, int encoder_type + ,const char *name, int DOTDOTDOT ) { ... } @@ identifier dev, encoder, funcs; @@ int drm_encoder_init(struct drm_device *dev, struct drm_encoder *encoder, const struct drm_encoder_funcs *funcs, int encoder_type + ,const char *name, int DOTDOTDOT ); @@ expression E1, E2, E3, E4; @@ drm_encoder_init(E1, E2, E3, E4 + ,NULL ) v2: Add ', or NULL...' to @name kernel doc (Jani) Annotate the function with __printf() attribute (Jani) Signed-off-by: Ville Syrjälä Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1449670818-2966-1-git-send-email-ville.syrjala@linux.intel.com --- drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | 14 +++++------ drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | 14 +++++------ drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | 14 +++++------ drivers/gpu/drm/ast/ast_mode.c | 2 +- drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c | 2 +- drivers/gpu/drm/bochs/bochs_kms.c | 2 +- drivers/gpu/drm/cirrus/cirrus_mode.c | 2 +- drivers/gpu/drm/drm_crtc.c | 3 ++- drivers/gpu/drm/exynos/exynos_dp_core.c | 2 +- drivers/gpu/drm/exynos/exynos_drm_dpi.c | 2 +- drivers/gpu/drm/exynos/exynos_drm_dsi.c | 2 +- drivers/gpu/drm/exynos/exynos_drm_vidi.c | 2 +- drivers/gpu/drm/exynos/exynos_hdmi.c | 2 +- drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c | 2 +- drivers/gpu/drm/gma500/cdv_intel_crt.c | 2 +- drivers/gpu/drm/gma500/cdv_intel_dp.c | 3 ++- drivers/gpu/drm/gma500/cdv_intel_hdmi.c | 2 +- drivers/gpu/drm/gma500/cdv_intel_lvds.c | 2 +- drivers/gpu/drm/gma500/mdfld_dsi_dpi.c | 2 +- drivers/gpu/drm/gma500/oaktrail_hdmi.c | 2 +- drivers/gpu/drm/gma500/oaktrail_lvds.c | 2 +- drivers/gpu/drm/gma500/psb_intel_lvds.c | 2 +- drivers/gpu/drm/gma500/psb_intel_sdvo.c | 3 ++- drivers/gpu/drm/i2c/tda998x_drv.c | 2 +- drivers/gpu/drm/i915/intel_crt.c | 2 +- drivers/gpu/drm/i915/intel_ddi.c | 2 +- drivers/gpu/drm/i915/intel_dp.c | 2 +- drivers/gpu/drm/i915/intel_dp_mst.c | 2 +- drivers/gpu/drm/i915/intel_dsi.c | 3 ++- drivers/gpu/drm/i915/intel_dvo.c | 2 +- drivers/gpu/drm/i915/intel_hdmi.c | 2 +- drivers/gpu/drm/i915/intel_lvds.c | 2 +- drivers/gpu/drm/i915/intel_sdvo.c | 3 ++- drivers/gpu/drm/i915/intel_tv.c | 2 +- drivers/gpu/drm/imx/dw_hdmi-imx.c | 2 +- drivers/gpu/drm/imx/imx-ldb.c | 2 +- drivers/gpu/drm/imx/imx-tve.c | 2 +- drivers/gpu/drm/imx/parallel-display.c | 2 +- drivers/gpu/drm/mgag200/mgag200_mode.c | 2 +- drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c | 2 +- drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c | 2 +- drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c | 2 +- drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c | 2 +- drivers/gpu/drm/nouveau/dispnv04/dac.c | 3 ++- drivers/gpu/drm/nouveau/dispnv04/dfp.c | 2 +- drivers/gpu/drm/nouveau/dispnv04/tvnv04.c | 3 ++- drivers/gpu/drm/nouveau/dispnv04/tvnv17.c | 3 ++- drivers/gpu/drm/nouveau/nv50_display.c | 6 ++--- drivers/gpu/drm/omapdrm/omap_encoder.c | 2 +- drivers/gpu/drm/qxl/qxl_display.c | 2 +- drivers/gpu/drm/radeon/atombios_encoders.c | 30 ++++++++++++++++-------- drivers/gpu/drm/radeon/radeon_dp_mst.c | 2 +- drivers/gpu/drm/radeon/radeon_legacy_encoders.c | 15 ++++++++---- drivers/gpu/drm/rcar-du/rcar_du_encoder.c | 2 +- drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c | 2 +- drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c | 2 +- drivers/gpu/drm/shmobile/shmob_drm_crtc.c | 2 +- drivers/gpu/drm/sti/sti_tvout.c | 7 +++--- drivers/gpu/drm/tegra/dsi.c | 2 +- drivers/gpu/drm/tegra/hdmi.c | 2 +- drivers/gpu/drm/tegra/rgb.c | 2 +- drivers/gpu/drm/tegra/sor.c | 2 +- drivers/gpu/drm/tilcdc/tilcdc_panel.c | 2 +- drivers/gpu/drm/tilcdc/tilcdc_tfp410.c | 2 +- drivers/gpu/drm/udl/udl_encoder.c | 3 ++- drivers/gpu/drm/vc4/vc4_hdmi.c | 2 +- drivers/gpu/drm/virtio/virtgpu_display.c | 2 +- drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c | 2 +- drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c | 2 +- drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c | 2 +- include/drm/drm_crtc.h | 9 +++---- 71 files changed, 135 insertions(+), 109 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 4dcc8fba5792..093599aba64b 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -3729,7 +3729,7 @@ static void dce_v10_0_encoder_add(struct amdgpu_device *adev, case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, - DRM_MODE_ENCODER_DAC); + DRM_MODE_ENCODER_DAC, NULL); drm_encoder_helper_add(encoder, &dce_v10_0_dac_helper_funcs); break; case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: @@ -3740,15 +3740,15 @@ static void dce_v10_0_encoder_add(struct amdgpu_device *adev, if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { amdgpu_encoder->rmx_type = RMX_FULL; drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, - DRM_MODE_ENCODER_LVDS); + DRM_MODE_ENCODER_LVDS, NULL); amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder); } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) { drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, - DRM_MODE_ENCODER_DAC); + DRM_MODE_ENCODER_DAC, NULL); amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); } else { drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, - DRM_MODE_ENCODER_TMDS); + DRM_MODE_ENCODER_TMDS, NULL); amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); } drm_encoder_helper_add(encoder, &dce_v10_0_dig_helper_funcs); @@ -3766,13 +3766,13 @@ static void dce_v10_0_encoder_add(struct amdgpu_device *adev, amdgpu_encoder->is_ext_encoder = true; if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, - DRM_MODE_ENCODER_LVDS); + DRM_MODE_ENCODER_LVDS, NULL); else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, - DRM_MODE_ENCODER_DAC); + DRM_MODE_ENCODER_DAC, NULL); else drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, - DRM_MODE_ENCODER_TMDS); + DRM_MODE_ENCODER_TMDS, NULL); drm_encoder_helper_add(encoder, &dce_v10_0_ext_helper_funcs); break; } diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 8f1e51128b33..8701661a8868 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -3722,7 +3722,7 @@ static void dce_v11_0_encoder_add(struct amdgpu_device *adev, case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, - DRM_MODE_ENCODER_DAC); + DRM_MODE_ENCODER_DAC, NULL); drm_encoder_helper_add(encoder, &dce_v11_0_dac_helper_funcs); break; case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: @@ -3733,15 +3733,15 @@ static void dce_v11_0_encoder_add(struct amdgpu_device *adev, if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { amdgpu_encoder->rmx_type = RMX_FULL; drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, - DRM_MODE_ENCODER_LVDS); + DRM_MODE_ENCODER_LVDS, NULL); amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder); } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) { drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, - DRM_MODE_ENCODER_DAC); + DRM_MODE_ENCODER_DAC, NULL); amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); } else { drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, - DRM_MODE_ENCODER_TMDS); + DRM_MODE_ENCODER_TMDS, NULL); amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); } drm_encoder_helper_add(encoder, &dce_v11_0_dig_helper_funcs); @@ -3759,13 +3759,13 @@ static void dce_v11_0_encoder_add(struct amdgpu_device *adev, amdgpu_encoder->is_ext_encoder = true; if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, - DRM_MODE_ENCODER_LVDS); + DRM_MODE_ENCODER_LVDS, NULL); else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, - DRM_MODE_ENCODER_DAC); + DRM_MODE_ENCODER_DAC, NULL); else drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, - DRM_MODE_ENCODER_TMDS); + DRM_MODE_ENCODER_TMDS, NULL); drm_encoder_helper_add(encoder, &dce_v11_0_ext_helper_funcs); break; } diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 42d954dc436d..d0e128c24813 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -3659,7 +3659,7 @@ static void dce_v8_0_encoder_add(struct amdgpu_device *adev, case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, - DRM_MODE_ENCODER_DAC); + DRM_MODE_ENCODER_DAC, NULL); drm_encoder_helper_add(encoder, &dce_v8_0_dac_helper_funcs); break; case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: @@ -3670,15 +3670,15 @@ static void dce_v8_0_encoder_add(struct amdgpu_device *adev, if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { amdgpu_encoder->rmx_type = RMX_FULL; drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, - DRM_MODE_ENCODER_LVDS); + DRM_MODE_ENCODER_LVDS, NULL); amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder); } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) { drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, - DRM_MODE_ENCODER_DAC); + DRM_MODE_ENCODER_DAC, NULL); amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); } else { drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, - DRM_MODE_ENCODER_TMDS); + DRM_MODE_ENCODER_TMDS, NULL); amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); } drm_encoder_helper_add(encoder, &dce_v8_0_dig_helper_funcs); @@ -3696,13 +3696,13 @@ static void dce_v8_0_encoder_add(struct amdgpu_device *adev, amdgpu_encoder->is_ext_encoder = true; if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, - DRM_MODE_ENCODER_LVDS); + DRM_MODE_ENCODER_LVDS, NULL); else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, - DRM_MODE_ENCODER_DAC); + DRM_MODE_ENCODER_DAC, NULL); else drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, - DRM_MODE_ENCODER_TMDS); + DRM_MODE_ENCODER_TMDS, NULL); drm_encoder_helper_add(encoder, &dce_v8_0_ext_helper_funcs); break; } diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 69d19f3304a5..0123458cbd83 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -751,7 +751,7 @@ static int ast_encoder_init(struct drm_device *dev) return -ENOMEM; drm_encoder_init(dev, &ast_encoder->base, &ast_enc_funcs, - DRM_MODE_ENCODER_DAC); + DRM_MODE_ENCODER_DAC, NULL); drm_encoder_helper_add(&ast_encoder->base, &ast_enc_helper_funcs); ast_encoder->base.possible_crtcs = 1; diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c index 067e4c144bd6..d1129000c5cf 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c @@ -256,7 +256,7 @@ static int atmel_hlcdc_create_panel_output(struct drm_device *dev, &atmel_hlcdc_panel_encoder_helper_funcs); ret = drm_encoder_init(dev, &panel->base.encoder, &atmel_hlcdc_panel_encoder_funcs, - DRM_MODE_ENCODER_LVDS); + DRM_MODE_ENCODER_LVDS, NULL); if (ret) return ret; diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c index 26bcd03a8cb6..a88be6dd34a4 100644 --- a/drivers/gpu/drm/bochs/bochs_kms.c +++ b/drivers/gpu/drm/bochs/bochs_kms.c @@ -196,7 +196,7 @@ static void bochs_encoder_init(struct drm_device *dev) encoder->possible_crtcs = 0x1; drm_encoder_init(dev, encoder, &bochs_encoder_encoder_funcs, - DRM_MODE_ENCODER_DAC); + DRM_MODE_ENCODER_DAC, NULL); drm_encoder_helper_add(encoder, &bochs_encoder_helper_funcs); } diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c index 61385f2298bf..276719e52153 100644 --- a/drivers/gpu/drm/cirrus/cirrus_mode.c +++ b/drivers/gpu/drm/cirrus/cirrus_mode.c @@ -489,7 +489,7 @@ static struct drm_encoder *cirrus_encoder_init(struct drm_device *dev) encoder->possible_crtcs = 0x1; drm_encoder_init(dev, encoder, &cirrus_encoder_encoder_funcs, - DRM_MODE_ENCODER_DAC); + DRM_MODE_ENCODER_DAC, NULL); drm_encoder_helper_add(encoder, &cirrus_encoder_helper_funcs); return encoder; diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 20d67a06efce..b1111ea2b29d 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -1077,6 +1077,7 @@ EXPORT_SYMBOL(drm_connector_unplug_all); * @encoder: the encoder to init * @funcs: callbacks for this encoder * @encoder_type: user visible type of the encoder + * @name: printf style format string for the encoder name, or NULL for default name * * Initialises a preallocated encoder. Encoder should be * subclassed as part of driver encoder objects. @@ -1087,7 +1088,7 @@ EXPORT_SYMBOL(drm_connector_unplug_all); int drm_encoder_init(struct drm_device *dev, struct drm_encoder *encoder, const struct drm_encoder_funcs *funcs, - int encoder_type) + int encoder_type, const char *name, ...) { int ret; diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c index 124fb9a56f02..cf17713907bd 100644 --- a/drivers/gpu/drm/exynos/exynos_dp_core.c +++ b/drivers/gpu/drm/exynos/exynos_dp_core.c @@ -1313,7 +1313,7 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data) DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs); drm_encoder_init(drm_dev, encoder, &exynos_dp_encoder_funcs, - DRM_MODE_ENCODER_TMDS); + DRM_MODE_ENCODER_TMDS, NULL); drm_encoder_helper_add(encoder, &exynos_dp_encoder_helper_funcs); diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c index c748b8790de3..1dbf8dca2d6b 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c @@ -309,7 +309,7 @@ int exynos_dpi_bind(struct drm_device *dev, struct drm_encoder *encoder) DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs); drm_encoder_init(dev, encoder, &exynos_dpi_encoder_funcs, - DRM_MODE_ENCODER_TMDS); + DRM_MODE_ENCODER_TMDS, NULL); drm_encoder_helper_add(encoder, &exynos_dpi_encoder_helper_funcs); diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index 12b03b364703..0a99160afaaa 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -1831,7 +1831,7 @@ static int exynos_dsi_bind(struct device *dev, struct device *master, DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs); drm_encoder_init(drm_dev, encoder, &exynos_dsi_encoder_funcs, - DRM_MODE_ENCODER_TMDS); + DRM_MODE_ENCODER_TMDS, NULL); drm_encoder_helper_add(encoder, &exynos_dsi_encoder_helper_funcs); diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index 669362c53f49..c34d49a8fd84 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c @@ -473,7 +473,7 @@ static int vidi_bind(struct device *dev, struct device *master, void *data) DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs); drm_encoder_init(drm_dev, encoder, &exynos_vidi_encoder_funcs, - DRM_MODE_ENCODER_TMDS); + DRM_MODE_ENCODER_TMDS, NULL); drm_encoder_helper_add(encoder, &exynos_vidi_encoder_helper_funcs); diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 57b675563e94..ba3543e1af6e 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -1793,7 +1793,7 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data) DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs); drm_encoder_init(drm_dev, encoder, &exynos_hdmi_encoder_funcs, - DRM_MODE_ENCODER_TMDS); + DRM_MODE_ENCODER_TMDS, NULL); drm_encoder_helper_add(encoder, &exynos_hdmi_encoder_helper_funcs); diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c index fe8ab5da04fb..8780deba5e8a 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c @@ -57,7 +57,7 @@ int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev, encoder->possible_crtcs = 1; ret = drm_encoder_init(fsl_dev->drm, encoder, &encoder_funcs, - DRM_MODE_ENCODER_LVDS); + DRM_MODE_ENCODER_LVDS, NULL); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c index 248c33a35ebf..d0717a85c7ec 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_crt.c +++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c @@ -273,7 +273,7 @@ void cdv_intel_crt_init(struct drm_device *dev, encoder = &gma_encoder->base; drm_encoder_init(dev, encoder, - &cdv_intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC); + &cdv_intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC, NULL); gma_connector_attach_encoder(gma_connector, gma_encoder); diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c index 17cea400ae32..7bb1f1aff932 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_dp.c +++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c @@ -2020,7 +2020,8 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev encoder = &gma_encoder->base; drm_connector_init(dev, connector, &cdv_intel_dp_connector_funcs, type); - drm_encoder_init(dev, encoder, &cdv_intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS); + drm_encoder_init(dev, encoder, &cdv_intel_dp_enc_funcs, + DRM_MODE_ENCODER_TMDS, NULL); gma_connector_attach_encoder(gma_connector, gma_encoder); diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c index e7b1e99fe2cc..ddf2d7700759 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c +++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c @@ -313,7 +313,7 @@ void cdv_hdmi_init(struct drm_device *dev, DRM_MODE_CONNECTOR_DVID); drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs, - DRM_MODE_ENCODER_TMDS); + DRM_MODE_ENCODER_TMDS, NULL); gma_connector_attach_encoder(gma_connector, gma_encoder); gma_encoder->type = INTEL_OUTPUT_HDMI; diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c index 9e648bcb81a7..813ef23a8054 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c +++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c @@ -652,7 +652,7 @@ void cdv_intel_lvds_init(struct drm_device *dev, drm_encoder_init(dev, encoder, &cdv_intel_lvds_enc_funcs, - DRM_MODE_ENCODER_LVDS); + DRM_MODE_ENCODER_LVDS, NULL); gma_connector_attach_encoder(gma_connector, gma_encoder); diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c index d4813e03f5ee..1a1acd3cb049 100644 --- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c +++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c @@ -994,7 +994,7 @@ struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev, drm_encoder_init(dev, encoder, p_funcs->encoder_funcs, - DRM_MODE_ENCODER_LVDS); + DRM_MODE_ENCODER_LVDS, NULL); drm_encoder_helper_add(encoder, p_funcs->encoder_helper_funcs); diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c index 2310d879cdc2..2d18499d6060 100644 --- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c +++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c @@ -654,7 +654,7 @@ void oaktrail_hdmi_init(struct drm_device *dev, drm_encoder_init(dev, encoder, &oaktrail_hdmi_enc_funcs, - DRM_MODE_ENCODER_TMDS); + DRM_MODE_ENCODER_TMDS, NULL); gma_connector_attach_encoder(gma_connector, gma_encoder); diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c index 83bbc271bcfb..f7038f12ac76 100644 --- a/drivers/gpu/drm/gma500/oaktrail_lvds.c +++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c @@ -323,7 +323,7 @@ void oaktrail_lvds_init(struct drm_device *dev, DRM_MODE_CONNECTOR_LVDS); drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs, - DRM_MODE_ENCODER_LVDS); + DRM_MODE_ENCODER_LVDS, NULL); gma_connector_attach_encoder(gma_connector, gma_encoder); gma_encoder->type = INTEL_OUTPUT_LVDS; diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c index 0d6143702b86..b1b93317d054 100644 --- a/drivers/gpu/drm/gma500/psb_intel_lvds.c +++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c @@ -723,7 +723,7 @@ void psb_intel_lvds_init(struct drm_device *dev, drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs, - DRM_MODE_ENCODER_LVDS); + DRM_MODE_ENCODER_LVDS, NULL); gma_connector_attach_encoder(gma_connector, gma_encoder); gma_encoder->type = INTEL_OUTPUT_LVDS; diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c index 86f28ac1e673..e787d376ba67 100644 --- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c +++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c @@ -2526,7 +2526,8 @@ bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg) /* encoder type will be decided later */ gma_encoder = &psb_intel_sdvo->base; gma_encoder->type = INTEL_OUTPUT_SDVO; - drm_encoder_init(dev, &gma_encoder->base, &psb_intel_sdvo_enc_funcs, 0); + drm_encoder_init(dev, &gma_encoder->base, &psb_intel_sdvo_enc_funcs, + 0, NULL); /* Read the regs to test if we can talk to the device */ for (i = 0; i < 0x40; i++) { diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c index 79cb9208530e..a46248f0c9c3 100644 --- a/drivers/gpu/drm/i2c/tda998x_drv.c +++ b/drivers/gpu/drm/i2c/tda998x_drv.c @@ -1423,7 +1423,7 @@ static int tda998x_bind(struct device *dev, struct device *master, void *data) drm_encoder_helper_add(&priv->encoder, &tda998x_encoder_helper_funcs); ret = drm_encoder_init(drm, &priv->encoder, &tda998x_encoder_funcs, - DRM_MODE_ENCODER_TMDS); + DRM_MODE_ENCODER_TMDS, NULL); if (ret) goto err_encoder; diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 27b3e610e8f0..912c0ac95f2a 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -798,7 +798,7 @@ void intel_crt_init(struct drm_device *dev) &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs, - DRM_MODE_ENCODER_DAC); + DRM_MODE_ENCODER_DAC, NULL); intel_connector_attach_encoder(intel_connector, &crt->base); diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 76ce7c2960b6..37efcd16ec81 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -3284,7 +3284,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port) encoder = &intel_encoder->base; drm_encoder_init(dev, encoder, &intel_ddi_funcs, - DRM_MODE_ENCODER_TMDS); + DRM_MODE_ENCODER_TMDS, NULL); intel_encoder->compute_config = intel_ddi_compute_config; intel_encoder->enable = intel_enable_ddi; diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index e1ceff7ab265..9b10526cc6dd 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -5977,7 +5977,7 @@ intel_dp_init(struct drm_device *dev, encoder = &intel_encoder->base; drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, - DRM_MODE_ENCODER_TMDS); + DRM_MODE_ENCODER_TMDS, NULL); intel_encoder->compute_config = intel_dp_compute_config; intel_encoder->disable = intel_disable_dp; diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 8c4e7dfe304c..e8d369d0a713 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -536,7 +536,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum intel_mst->primary = intel_dig_port; drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs, - DRM_MODE_ENCODER_DPMST); + DRM_MODE_ENCODER_DPMST, NULL); intel_encoder->type = INTEL_OUTPUT_DP_MST; intel_encoder->crtc_mask = 0x7; diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c index efb5a27dd49c..add2cf541218 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/intel_dsi.c @@ -1152,7 +1152,8 @@ void intel_dsi_init(struct drm_device *dev) connector = &intel_connector->base; - drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI); + drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI, + NULL); /* XXX: very likely not all of these are needed */ intel_encoder->compute_config = intel_dsi_compute_config; diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index 7161deb2aed8..286baec979c8 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c @@ -429,7 +429,7 @@ void intel_dvo_init(struct drm_device *dev) intel_encoder = &intel_dvo->base; drm_encoder_init(dev, &intel_encoder->base, - &intel_dvo_enc_funcs, encoder_type); + &intel_dvo_enc_funcs, encoder_type, NULL); intel_encoder->disable = intel_disable_dvo; intel_encoder->enable = intel_enable_dvo; diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index bdd462e7c690..6dd1e09a894b 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -2165,7 +2165,7 @@ void intel_hdmi_init(struct drm_device *dev, intel_encoder = &intel_dig_port->base; drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs, - DRM_MODE_ENCODER_TMDS); + DRM_MODE_ENCODER_TMDS, NULL); intel_encoder->compute_config = intel_hdmi_compute_config; if (HAS_PCH_SPLIT(dev)) { diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 61f1145f6579..0da0240caf81 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -1025,7 +1025,7 @@ void intel_lvds_init(struct drm_device *dev) DRM_MODE_CONNECTOR_LVDS); drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs, - DRM_MODE_ENCODER_LVDS); + DRM_MODE_ENCODER_LVDS, NULL); intel_encoder->enable = intel_enable_lvds; intel_encoder->pre_enable = intel_pre_enable_lvds; diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 06679f164b3e..2e1da060b0e1 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -2978,7 +2978,8 @@ bool intel_sdvo_init(struct drm_device *dev, /* encoder type will be decided later */ intel_encoder = &intel_sdvo->base; intel_encoder->type = INTEL_OUTPUT_SDVO; - drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0); + drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0, + NULL); /* Read the regs to test if we can talk to the device */ for (i = 0; i < 0x40; i++) { diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 6bea78944cd6..948cbff6c62e 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -1645,7 +1645,7 @@ intel_tv_init(struct drm_device *dev) DRM_MODE_CONNECTOR_SVIDEO); drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs, - DRM_MODE_ENCODER_TVDAC); + DRM_MODE_ENCODER_TVDAC, NULL); intel_encoder->compute_config = intel_tv_compute_config; intel_encoder->get_config = intel_tv_get_config; diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c index 98605ea2ad9d..35fcf6b84537 100644 --- a/drivers/gpu/drm/imx/dw_hdmi-imx.c +++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c @@ -251,7 +251,7 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master, drm_encoder_helper_add(encoder, &dw_hdmi_imx_encoder_helper_funcs); drm_encoder_init(drm, encoder, &dw_hdmi_imx_encoder_funcs, - DRM_MODE_ENCODER_TMDS); + DRM_MODE_ENCODER_TMDS, NULL); return dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data); } diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c index abacc8f67469..c79a61b67ded 100644 --- a/drivers/gpu/drm/imx/imx-ldb.c +++ b/drivers/gpu/drm/imx/imx-ldb.c @@ -422,7 +422,7 @@ static int imx_ldb_register(struct drm_device *drm, drm_encoder_helper_add(&imx_ldb_ch->encoder, &imx_ldb_encoder_helper_funcs); drm_encoder_init(drm, &imx_ldb_ch->encoder, &imx_ldb_encoder_funcs, - DRM_MODE_ENCODER_LVDS); + DRM_MODE_ENCODER_LVDS, NULL); drm_connector_helper_add(&imx_ldb_ch->connector, &imx_ldb_connector_helper_funcs); diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c index f9597146dc67..e61a8fca77cd 100644 --- a/drivers/gpu/drm/imx/imx-tve.c +++ b/drivers/gpu/drm/imx/imx-tve.c @@ -508,7 +508,7 @@ static int imx_tve_register(struct drm_device *drm, struct imx_tve *tve) drm_encoder_helper_add(&tve->encoder, &imx_tve_encoder_helper_funcs); drm_encoder_init(drm, &tve->encoder, &imx_tve_encoder_funcs, - encoder_type); + encoder_type, NULL); drm_connector_helper_add(&tve->connector, &imx_tve_connector_helper_funcs); diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c index 2e9b9f1b5cd2..fcbe4d2eeabf 100644 --- a/drivers/gpu/drm/imx/parallel-display.c +++ b/drivers/gpu/drm/imx/parallel-display.c @@ -192,7 +192,7 @@ static int imx_pd_register(struct drm_device *drm, drm_encoder_helper_add(&imxpd->encoder, &imx_pd_encoder_helper_funcs); drm_encoder_init(drm, &imxpd->encoder, &imx_pd_encoder_funcs, - DRM_MODE_ENCODER_NONE); + DRM_MODE_ENCODER_NONE, NULL); drm_connector_helper_add(&imxpd->connector, &imx_pd_connector_helper_funcs); diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index c99d3fe12881..31802128dfbb 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -1538,7 +1538,7 @@ static struct drm_encoder *mga_encoder_init(struct drm_device *dev) encoder->possible_crtcs = 0x1; drm_encoder_init(dev, encoder, &mga_encoder_encoder_funcs, - DRM_MODE_ENCODER_DAC); + DRM_MODE_ENCODER_DAC, NULL); drm_encoder_helper_add(encoder, &mga_encoder_helper_funcs); return encoder; diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c index 89614c6a6c1b..a21df54cb50f 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c @@ -262,7 +262,7 @@ struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev) encoder = &mdp4_dtv_encoder->base; drm_encoder_init(dev, encoder, &mdp4_dtv_encoder_funcs, - DRM_MODE_ENCODER_TMDS); + DRM_MODE_ENCODER_TMDS, NULL); drm_encoder_helper_add(encoder, &mdp4_dtv_encoder_helper_funcs); mdp4_dtv_encoder->src_clk = devm_clk_get(dev->dev, "src_clk"); diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c index 4cd6e721aa0a..f824c643a7d1 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c @@ -460,7 +460,7 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev, encoder = &mdp4_lcdc_encoder->base; drm_encoder_init(dev, encoder, &mdp4_lcdc_encoder_funcs, - DRM_MODE_ENCODER_LVDS); + DRM_MODE_ENCODER_LVDS, NULL); drm_encoder_helper_add(encoder, &mdp4_lcdc_encoder_helper_funcs); /* TODO: do we need different pll in other cases? */ diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c index 8e6c9b598a57..1aa21dba663d 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c @@ -326,7 +326,7 @@ struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev, mdp5_cmd_enc->ctl = ctl; drm_encoder_init(dev, encoder, &mdp5_cmd_encoder_funcs, - DRM_MODE_ENCODER_DSI); + DRM_MODE_ENCODER_DSI, NULL); drm_encoder_helper_add(encoder, &mdp5_cmd_encoder_helper_funcs); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c index c9e32b08a7a0..278e307c36fd 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c @@ -354,7 +354,7 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, spin_lock_init(&mdp5_encoder->intf_lock); - drm_encoder_init(dev, encoder, &mdp5_encoder_funcs, enc_type); + drm_encoder_init(dev, encoder, &mdp5_encoder_funcs, enc_type, NULL); drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs); diff --git a/drivers/gpu/drm/nouveau/dispnv04/dac.c b/drivers/gpu/drm/nouveau/dispnv04/dac.c index 6c442def403d..b48eec395f07 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/dac.c +++ b/drivers/gpu/drm/nouveau/dispnv04/dac.c @@ -549,7 +549,8 @@ nv04_dac_create(struct drm_connector *connector, struct dcb_output *entry) else helper = &nv04_dac_helper_funcs; - drm_encoder_init(dev, encoder, &nv04_dac_funcs, DRM_MODE_ENCODER_DAC); + drm_encoder_init(dev, encoder, &nv04_dac_funcs, DRM_MODE_ENCODER_DAC, + NULL); drm_encoder_helper_add(encoder, helper); encoder->possible_crtcs = entry->heads; diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c index 4c5fb89d74db..05bfd151d1d8 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c +++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c @@ -705,7 +705,7 @@ nv04_dfp_create(struct drm_connector *connector, struct dcb_output *entry) nv_encoder->dcb = entry; nv_encoder->or = ffs(entry->or) - 1; - drm_encoder_init(connector->dev, encoder, &nv04_dfp_funcs, type); + drm_encoder_init(connector->dev, encoder, &nv04_dfp_funcs, type, NULL); drm_encoder_helper_add(encoder, helper); encoder->possible_crtcs = entry->heads; diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c index 91d689400d2e..54e9fb9eb5c0 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c +++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c @@ -223,7 +223,8 @@ nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry) /* Initialize the common members */ encoder = to_drm_encoder(nv_encoder); - drm_encoder_init(dev, encoder, &nv04_tv_funcs, DRM_MODE_ENCODER_TVDAC); + drm_encoder_init(dev, encoder, &nv04_tv_funcs, DRM_MODE_ENCODER_TVDAC, + NULL); drm_encoder_helper_add(encoder, &nv04_tv_helper_funcs); nv_encoder->enc_save = drm_i2c_encoder_save; diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c index ff8c55866b18..d9644c0c5a83 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c +++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c @@ -814,7 +814,8 @@ nv17_tv_create(struct drm_connector *connector, struct dcb_output *entry) tv_enc->base.dcb = entry; tv_enc->base.or = ffs(entry->or) - 1; - drm_encoder_init(dev, encoder, &nv17_tv_funcs, DRM_MODE_ENCODER_TVDAC); + drm_encoder_init(dev, encoder, &nv17_tv_funcs, DRM_MODE_ENCODER_TVDAC, + NULL); drm_encoder_helper_add(encoder, &nv17_tv_helper_funcs); to_encoder_slave(encoder)->slave_funcs = &nv17_tv_slave_funcs; diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index a240939beca4..44e1952582aa 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -1718,7 +1718,7 @@ nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe) encoder = to_drm_encoder(nv_encoder); encoder->possible_crtcs = dcbe->heads; encoder->possible_clones = 0; - drm_encoder_init(connector->dev, encoder, &nv50_dac_func, type); + drm_encoder_init(connector->dev, encoder, &nv50_dac_func, type, NULL); drm_encoder_helper_add(encoder, &nv50_dac_hfunc); drm_mode_connector_attach_encoder(connector, encoder); @@ -2126,7 +2126,7 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe) encoder = to_drm_encoder(nv_encoder); encoder->possible_crtcs = dcbe->heads; encoder->possible_clones = 0; - drm_encoder_init(connector->dev, encoder, &nv50_sor_func, type); + drm_encoder_init(connector->dev, encoder, &nv50_sor_func, type, NULL); drm_encoder_helper_add(encoder, &nv50_sor_hfunc); drm_mode_connector_attach_encoder(connector, encoder); @@ -2306,7 +2306,7 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe) encoder = to_drm_encoder(nv_encoder); encoder->possible_crtcs = dcbe->heads; encoder->possible_clones = 0; - drm_encoder_init(connector->dev, encoder, &nv50_pior_func, type); + drm_encoder_init(connector->dev, encoder, &nv50_pior_func, type, NULL); drm_encoder_helper_add(encoder, &nv50_pior_hfunc); drm_mode_connector_attach_encoder(connector, encoder); diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c index 7d9b32a0eb43..0c104ad7ef66 100644 --- a/drivers/gpu/drm/omapdrm/omap_encoder.c +++ b/drivers/gpu/drm/omapdrm/omap_encoder.c @@ -178,7 +178,7 @@ struct drm_encoder *omap_encoder_init(struct drm_device *dev, encoder = &omap_encoder->base; drm_encoder_init(dev, encoder, &omap_encoder_funcs, - DRM_MODE_ENCODER_TMDS); + DRM_MODE_ENCODER_TMDS, NULL); drm_encoder_helper_add(encoder, &omap_encoder_helper_funcs); return encoder; diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index ebf7f3946a65..9be1af41e8d7 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -968,7 +968,7 @@ static int qdev_output_init(struct drm_device *dev, int num_output) &qxl_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL); drm_encoder_init(dev, &qxl_output->enc, &qxl_enc_funcs, - DRM_MODE_ENCODER_VIRTUAL); + DRM_MODE_ENCODER_VIRTUAL, NULL); /* we get HPD via client monitors config */ connector->polled = DRM_CONNECTOR_POLL_HPD; diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index bb292143997e..01b20e14a247 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -2767,23 +2767,27 @@ radeon_add_atom_encoder(struct drm_device *dev, case ENCODER_OBJECT_ID_INTERNAL_LVTM1: if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { radeon_encoder->rmx_type = RMX_FULL; - drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS); + drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, + DRM_MODE_ENCODER_LVDS, NULL); radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder); } else { - drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); + drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, + DRM_MODE_ENCODER_TMDS, NULL); radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); } drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); break; case ENCODER_OBJECT_ID_INTERNAL_DAC1: - drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC); + drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, + DRM_MODE_ENCODER_DAC, NULL); radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder); drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs); break; case ENCODER_OBJECT_ID_INTERNAL_DAC2: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: - drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TVDAC); + drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, + DRM_MODE_ENCODER_TVDAC, NULL); radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder); drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs); break; @@ -2797,13 +2801,16 @@ radeon_add_atom_encoder(struct drm_device *dev, case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { radeon_encoder->rmx_type = RMX_FULL; - drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS); + drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, + DRM_MODE_ENCODER_LVDS, NULL); radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder); } else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) { - drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC); + drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, + DRM_MODE_ENCODER_DAC, NULL); radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); } else { - drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); + drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, + DRM_MODE_ENCODER_TMDS, NULL); radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); } drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); @@ -2820,11 +2827,14 @@ radeon_add_atom_encoder(struct drm_device *dev, /* these are handled by the primary encoders */ radeon_encoder->is_ext_encoder = true; if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) - drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS); + drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, + DRM_MODE_ENCODER_LVDS, NULL); else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) - drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC); + drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, + DRM_MODE_ENCODER_DAC, NULL); else - drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); + drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, + DRM_MODE_ENCODER_TMDS, NULL); drm_encoder_helper_add(encoder, &radeon_atom_ext_helper_funcs); break; } diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c index 744f5c49c664..94323f51ffcf 100644 --- a/drivers/gpu/drm/radeon/radeon_dp_mst.c +++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c @@ -641,7 +641,7 @@ radeon_dp_create_fake_mst_encoder(struct radeon_connector *connector) } drm_encoder_init(dev, &radeon_encoder->base, &radeon_dp_mst_enc_funcs, - DRM_MODE_ENCODER_DPMST); + DRM_MODE_ENCODER_DPMST, NULL); drm_encoder_helper_add(encoder, &radeon_mst_helper_funcs); mst_enc = radeon_encoder->enc_priv; diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index 30de43366eae..88dc973fb209 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c @@ -1772,7 +1772,8 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_ switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_LVDS: encoder->possible_crtcs = 0x1; - drm_encoder_init(dev, encoder, &radeon_legacy_lvds_enc_funcs, DRM_MODE_ENCODER_LVDS); + drm_encoder_init(dev, encoder, &radeon_legacy_lvds_enc_funcs, + DRM_MODE_ENCODER_LVDS, NULL); drm_encoder_helper_add(encoder, &radeon_legacy_lvds_helper_funcs); if (rdev->is_atom_bios) radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder); @@ -1781,12 +1782,14 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_ radeon_encoder->rmx_type = RMX_FULL; break; case ENCODER_OBJECT_ID_INTERNAL_TMDS1: - drm_encoder_init(dev, encoder, &radeon_legacy_tmds_int_enc_funcs, DRM_MODE_ENCODER_TMDS); + drm_encoder_init(dev, encoder, &radeon_legacy_tmds_int_enc_funcs, + DRM_MODE_ENCODER_TMDS, NULL); drm_encoder_helper_add(encoder, &radeon_legacy_tmds_int_helper_funcs); radeon_encoder->enc_priv = radeon_legacy_get_tmds_info(radeon_encoder); break; case ENCODER_OBJECT_ID_INTERNAL_DAC1: - drm_encoder_init(dev, encoder, &radeon_legacy_primary_dac_enc_funcs, DRM_MODE_ENCODER_DAC); + drm_encoder_init(dev, encoder, &radeon_legacy_primary_dac_enc_funcs, + DRM_MODE_ENCODER_DAC, NULL); drm_encoder_helper_add(encoder, &radeon_legacy_primary_dac_helper_funcs); if (rdev->is_atom_bios) radeon_encoder->enc_priv = radeon_atombios_get_primary_dac_info(radeon_encoder); @@ -1794,7 +1797,8 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_ radeon_encoder->enc_priv = radeon_combios_get_primary_dac_info(radeon_encoder); break; case ENCODER_OBJECT_ID_INTERNAL_DAC2: - drm_encoder_init(dev, encoder, &radeon_legacy_tv_dac_enc_funcs, DRM_MODE_ENCODER_TVDAC); + drm_encoder_init(dev, encoder, &radeon_legacy_tv_dac_enc_funcs, + DRM_MODE_ENCODER_TVDAC, NULL); drm_encoder_helper_add(encoder, &radeon_legacy_tv_dac_helper_funcs); if (rdev->is_atom_bios) radeon_encoder->enc_priv = radeon_atombios_get_tv_dac_info(radeon_encoder); @@ -1802,7 +1806,8 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_ radeon_encoder->enc_priv = radeon_combios_get_tv_dac_info(radeon_encoder); break; case ENCODER_OBJECT_ID_INTERNAL_DVO1: - drm_encoder_init(dev, encoder, &radeon_legacy_tmds_ext_enc_funcs, DRM_MODE_ENCODER_TMDS); + drm_encoder_init(dev, encoder, &radeon_legacy_tmds_ext_enc_funcs, + DRM_MODE_ENCODER_TMDS, NULL); drm_encoder_helper_add(encoder, &radeon_legacy_tmds_ext_helper_funcs); if (!rdev->is_atom_bios) radeon_encoder->enc_priv = radeon_legacy_get_ext_tmds_info(radeon_encoder); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c index d0ae1e8009c6..c08700757feb 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c @@ -173,7 +173,7 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu, goto done; } else { ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs, - encoder_type); + encoder_type, NULL); if (ret < 0) goto done; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c index 81da8419282b..11267de26a51 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c @@ -151,7 +151,7 @@ int rcar_du_hdmienc_init(struct rcar_du_device *rcdu, goto error; ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs, - DRM_MODE_ENCODER_TMDS); + DRM_MODE_ENCODER_TMDS, NULL); if (ret < 0) goto error; diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c index 80d6fc8a5cee..525b5a81e96e 100644 --- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c @@ -295,7 +295,7 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master, drm_encoder_helper_add(encoder, &dw_hdmi_rockchip_encoder_helper_funcs); drm_encoder_init(drm, encoder, &dw_hdmi_rockchip_encoder_funcs, - DRM_MODE_ENCODER_TMDS); + DRM_MODE_ENCODER_TMDS, NULL); return dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data); } diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c index e9272b0a8592..b80802f55143 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c +++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c @@ -613,7 +613,7 @@ int shmob_drm_encoder_create(struct shmob_drm_device *sdev) encoder->possible_crtcs = 1; ret = drm_encoder_init(sdev->ddev, encoder, &encoder_funcs, - DRM_MODE_ENCODER_LVDS); + DRM_MODE_ENCODER_LVDS, NULL); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c index c8a4c5dae2b6..f2afcf5438b8 100644 --- a/drivers/gpu/drm/sti/sti_tvout.c +++ b/drivers/gpu/drm/sti/sti_tvout.c @@ -512,7 +512,8 @@ sti_tvout_create_dvo_encoder(struct drm_device *dev, drm_encoder->possible_clones = 1 << 0; drm_encoder_init(dev, drm_encoder, - &sti_tvout_encoder_funcs, DRM_MODE_ENCODER_LVDS); + &sti_tvout_encoder_funcs, DRM_MODE_ENCODER_LVDS, + NULL); drm_encoder_helper_add(drm_encoder, &sti_dvo_encoder_helper_funcs); @@ -564,7 +565,7 @@ static struct drm_encoder *sti_tvout_create_hda_encoder(struct drm_device *dev, drm_encoder->possible_clones = 1 << 0; drm_encoder_init(dev, drm_encoder, - &sti_tvout_encoder_funcs, DRM_MODE_ENCODER_DAC); + &sti_tvout_encoder_funcs, DRM_MODE_ENCODER_DAC, NULL); drm_encoder_helper_add(drm_encoder, &sti_hda_encoder_helper_funcs); @@ -613,7 +614,7 @@ static struct drm_encoder *sti_tvout_create_hdmi_encoder(struct drm_device *dev, drm_encoder->possible_clones = 1 << 1; drm_encoder_init(dev, drm_encoder, - &sti_tvout_encoder_funcs, DRM_MODE_ENCODER_TMDS); + &sti_tvout_encoder_funcs, DRM_MODE_ENCODER_TMDS, NULL); drm_encoder_helper_add(drm_encoder, &sti_hdmi_encoder_helper_funcs); diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c index f0a138ef68ce..50d46ae3786b 100644 --- a/drivers/gpu/drm/tegra/dsi.c +++ b/drivers/gpu/drm/tegra/dsi.c @@ -1023,7 +1023,7 @@ static int tegra_dsi_init(struct host1x_client *client) drm_encoder_init(drm, &dsi->output.encoder, &tegra_dsi_encoder_funcs, - DRM_MODE_ENCODER_DSI); + DRM_MODE_ENCODER_DSI, NULL); drm_encoder_helper_add(&dsi->output.encoder, &tegra_dsi_encoder_helper_funcs); diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c index 52b32cbd9de6..b7ef4929e347 100644 --- a/drivers/gpu/drm/tegra/hdmi.c +++ b/drivers/gpu/drm/tegra/hdmi.c @@ -1320,7 +1320,7 @@ static int tegra_hdmi_init(struct host1x_client *client) hdmi->output.connector.dpms = DRM_MODE_DPMS_OFF; drm_encoder_init(drm, &hdmi->output.encoder, &tegra_hdmi_encoder_funcs, - DRM_MODE_ENCODER_TMDS); + DRM_MODE_ENCODER_TMDS, NULL); drm_encoder_helper_add(&hdmi->output.encoder, &tegra_hdmi_encoder_helper_funcs); diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c index bc9735b4ad60..e246334e0252 100644 --- a/drivers/gpu/drm/tegra/rgb.c +++ b/drivers/gpu/drm/tegra/rgb.c @@ -287,7 +287,7 @@ int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc) output->connector.dpms = DRM_MODE_DPMS_OFF; drm_encoder_init(drm, &output->encoder, &tegra_rgb_encoder_funcs, - DRM_MODE_ENCODER_LVDS); + DRM_MODE_ENCODER_LVDS, NULL); drm_encoder_helper_add(&output->encoder, &tegra_rgb_encoder_helper_funcs); diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c index 3eff7cf75d25..3e012ee25242 100644 --- a/drivers/gpu/drm/tegra/sor.c +++ b/drivers/gpu/drm/tegra/sor.c @@ -2178,7 +2178,7 @@ static int tegra_sor_init(struct host1x_client *client) sor->output.connector.dpms = DRM_MODE_DPMS_OFF; drm_encoder_init(drm, &sor->output.encoder, &tegra_sor_encoder_funcs, - encoder); + encoder, NULL); drm_encoder_helper_add(&sor->output.encoder, helpers); drm_mode_connector_attach_encoder(&sor->output.connector, diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c index 0af8bed7ce1e..4dda6e2f464b 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c @@ -128,7 +128,7 @@ static struct drm_encoder *panel_encoder_create(struct drm_device *dev, encoder->possible_crtcs = 1; ret = drm_encoder_init(dev, encoder, &panel_encoder_funcs, - DRM_MODE_ENCODER_LVDS); + DRM_MODE_ENCODER_LVDS, NULL); if (ret < 0) goto fail; diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c index 354c47ca6374..5052a8af7ecb 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c @@ -138,7 +138,7 @@ static struct drm_encoder *tfp410_encoder_create(struct drm_device *dev, encoder->possible_crtcs = 1; ret = drm_encoder_init(dev, encoder, &tfp410_encoder_funcs, - DRM_MODE_ENCODER_TMDS); + DRM_MODE_ENCODER_TMDS, NULL); if (ret < 0) goto fail; diff --git a/drivers/gpu/drm/udl/udl_encoder.c b/drivers/gpu/drm/udl/udl_encoder.c index 4052c4656498..a181a647fcf9 100644 --- a/drivers/gpu/drm/udl/udl_encoder.c +++ b/drivers/gpu/drm/udl/udl_encoder.c @@ -73,7 +73,8 @@ struct drm_encoder *udl_encoder_init(struct drm_device *dev) if (!encoder) return NULL; - drm_encoder_init(dev, encoder, &udl_enc_funcs, DRM_MODE_ENCODER_TMDS); + drm_encoder_init(dev, encoder, &udl_enc_funcs, DRM_MODE_ENCODER_TMDS, + NULL); drm_encoder_helper_add(encoder, &udl_helper_funcs); encoder->possible_crtcs = 1; return encoder; diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index da9a36d6e1d1..c69c0460196b 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -519,7 +519,7 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data) WARN_ON_ONCE((HD_READ(VC4_HD_M_CTL) & VC4_HD_M_ENABLE) == 0); drm_encoder_init(drm, hdmi->encoder, &vc4_hdmi_encoder_funcs, - DRM_MODE_ENCODER_TMDS); + DRM_MODE_ENCODER_TMDS, NULL); drm_encoder_helper_add(hdmi->encoder, &vc4_hdmi_encoder_helper_funcs); hdmi->connector = vc4_hdmi_connector_init(drm, hdmi->encoder); diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c index ef4cef0c8ece..588a7aec60bf 100644 --- a/drivers/gpu/drm/virtio/virtgpu_display.c +++ b/drivers/gpu/drm/virtio/virtgpu_display.c @@ -441,7 +441,7 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index) drm_connector_helper_add(connector, &virtio_gpu_conn_helper_funcs); drm_encoder_init(dev, encoder, &virtio_gpu_enc_funcs, - DRM_MODE_ENCODER_VIRTUAL); + DRM_MODE_ENCODER_VIRTUAL, NULL); drm_encoder_helper_add(encoder, &virtio_gpu_enc_helper_funcs); encoder->possible_crtcs = 1 << index; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index c1c09b338cc1..2aff5e51d926 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c @@ -363,7 +363,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) connector->status = vmw_du_connector_detect(connector, true); drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs, - DRM_MODE_ENCODER_VIRTUAL); + DRM_MODE_ENCODER_VIRTUAL, NULL); drm_mode_connector_attach_encoder(connector, encoder); encoder->possible_crtcs = (1 << unit); encoder->possible_clones = 0; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index 5379dc4bbcac..6bb7af37934a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c @@ -597,7 +597,7 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit) connector->status = vmw_du_connector_detect(connector, true); drm_encoder_init(dev, encoder, &vmw_screen_object_encoder_funcs, - DRM_MODE_ENCODER_VIRTUAL); + DRM_MODE_ENCODER_VIRTUAL, NULL); drm_mode_connector_attach_encoder(connector, encoder); encoder->possible_crtcs = (1 << unit); encoder->possible_clones = 0; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index 05375a8cc129..45e72c2f15cd 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c @@ -1145,7 +1145,7 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit) connector->status = vmw_du_connector_detect(connector, false); drm_encoder_init(dev, encoder, &vmw_stdu_encoder_funcs, - DRM_MODE_ENCODER_VIRTUAL); + DRM_MODE_ENCODER_VIRTUAL, NULL); drm_mode_connector_attach_encoder(connector, encoder); encoder->possible_crtcs = (1 << unit); encoder->possible_clones = 0; diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index a6f0e25cbd51..5b5e6b650c11 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -2196,10 +2196,11 @@ void drm_bridge_mode_set(struct drm_bridge *bridge, void drm_bridge_pre_enable(struct drm_bridge *bridge); void drm_bridge_enable(struct drm_bridge *bridge); -extern int drm_encoder_init(struct drm_device *dev, - struct drm_encoder *encoder, - const struct drm_encoder_funcs *funcs, - int encoder_type); +extern __printf(5, 6) +int drm_encoder_init(struct drm_device *dev, + struct drm_encoder *encoder, + const struct drm_encoder_funcs *funcs, + int encoder_type, const char *name, ...); /** * drm_encoder_crtc_ok - can a given crtc drive a given encoder? -- cgit v1.2.3 From fa3ab4c2113c74a9eae9b6a718b167f7c8833e78 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Tue, 8 Dec 2015 18:41:53 +0200 Subject: drm: Add crtc->name and use it in debug messages MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Show a sensible name for the crtc in debug mesages. The driver may supply its own name, otherwise the core genrates the name ("crtc-0", "crtc-1" etc.). v2: kstrdup() the name passed by the caller (Jani) v3: Generate a default name if the driver doesn't supply one Signed-off-by: Ville Syrjälä Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1449592922-5545-6-git-send-email-ville.syrjala@linux.intel.com --- drivers/gpu/drm/drm_atomic.c | 41 ++++++++++++++------------- drivers/gpu/drm/drm_atomic_helper.c | 56 +++++++++++++++++++------------------ drivers/gpu/drm/drm_crtc.c | 34 ++++++++++++++++++++-- drivers/gpu/drm/drm_crtc_helper.c | 24 ++++++++-------- include/drm/drm_crtc.h | 2 ++ 5 files changed, 97 insertions(+), 60 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 07ab75e22b2b..feb66895e48c 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -288,8 +288,8 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state, state->crtcs[index] = crtc; crtc_state->state = state; - DRM_DEBUG_ATOMIC("Added [CRTC:%d] %p state to %p\n", - crtc->base.id, crtc_state, state); + DRM_DEBUG_ATOMIC("Added [CRTC:%d:%s] %p state to %p\n", + crtc->base.id, crtc->name, crtc_state, state); return crtc_state; } @@ -486,8 +486,8 @@ static int drm_atomic_crtc_check(struct drm_crtc *crtc, */ if (state->active && !state->enable) { - DRM_DEBUG_ATOMIC("[CRTC:%d] active without enabled\n", - crtc->base.id); + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active without enabled\n", + crtc->base.id, crtc->name); return -EINVAL; } @@ -496,15 +496,15 @@ static int drm_atomic_crtc_check(struct drm_crtc *crtc, * be able to trigger. */ if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) && WARN_ON(state->enable && !state->mode_blob)) { - DRM_DEBUG_ATOMIC("[CRTC:%d] enabled without mode blob\n", - crtc->base.id); + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled without mode blob\n", + crtc->base.id, crtc->name); return -EINVAL; } if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) && WARN_ON(!state->enable && state->mode_blob)) { - DRM_DEBUG_ATOMIC("[CRTC:%d] disabled with mode blob\n", - crtc->base.id); + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled with mode blob\n", + crtc->base.id, crtc->name); return -EINVAL; } @@ -1004,8 +1004,8 @@ drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state, } if (crtc) - DRM_DEBUG_ATOMIC("Link plane state %p to [CRTC:%d]\n", - plane_state, crtc->base.id); + DRM_DEBUG_ATOMIC("Link plane state %p to [CRTC:%d:%s]\n", + plane_state, crtc->base.id, crtc->name); else DRM_DEBUG_ATOMIC("Link plane state %p to [NOCRTC]\n", plane_state); @@ -1072,8 +1072,8 @@ drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state, conn_state->crtc = crtc; if (crtc) - DRM_DEBUG_ATOMIC("Link connector state %p to [CRTC:%d]\n", - conn_state, crtc->base.id); + DRM_DEBUG_ATOMIC("Link connector state %p to [CRTC:%d:%s]\n", + conn_state, crtc->base.id, crtc->name); else DRM_DEBUG_ATOMIC("Link connector state %p to [NOCRTC]\n", conn_state); @@ -1112,8 +1112,8 @@ drm_atomic_add_affected_connectors(struct drm_atomic_state *state, if (ret) return ret; - DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d] to %p\n", - crtc->base.id, state); + DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d:%s] to %p\n", + crtc->base.id, crtc->name, state); /* * Changed connectors are already in @state, so only need to look at the @@ -1193,8 +1193,9 @@ drm_atomic_connectors_for_crtc(struct drm_atomic_state *state, num_connected_connectors++; } - DRM_DEBUG_ATOMIC("State %p has %i connectors for [CRTC:%d]\n", - state, num_connected_connectors, crtc->base.id); + DRM_DEBUG_ATOMIC("State %p has %i connectors for [CRTC:%d:%s]\n", + state, num_connected_connectors, + crtc->base.id, crtc->name); return num_connected_connectors; } @@ -1256,8 +1257,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state) for_each_crtc_in_state(state, crtc, crtc_state, i) { ret = drm_atomic_crtc_check(crtc, crtc_state); if (ret) { - DRM_DEBUG_ATOMIC("[CRTC:%d] atomic core check failed\n", - crtc->base.id); + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic core check failed\n", + crtc->base.id, crtc->name); return ret; } } @@ -1268,8 +1269,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state) if (!state->allow_modeset) { for_each_crtc_in_state(state, crtc, crtc_state, i) { if (drm_atomic_crtc_needs_modeset(crtc_state)) { - DRM_DEBUG_ATOMIC("[CRTC:%d] requires full modeset\n", - crtc->base.id); + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requires full modeset\n", + crtc->base.id, crtc->name); return -EINVAL; } } diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 110f3db8dd05..6ba3fe5639e4 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -143,9 +143,9 @@ steal_encoder(struct drm_atomic_state *state, */ WARN_ON(!drm_modeset_is_locked(&config->connection_mutex)); - DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d], stealing it\n", + DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], stealing it\n", encoder->base.id, encoder->name, - encoder_crtc->base.id); + encoder_crtc->base.id, encoder_crtc->name); crtc_state = drm_atomic_get_crtc_state(state, encoder_crtc); if (IS_ERR(crtc_state)) @@ -246,12 +246,13 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx) } if (new_encoder == connector_state->best_encoder) { - DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d]\n", + DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d:%s]\n", connector->base.id, connector->name, new_encoder->base.id, new_encoder->name, - connector_state->crtc->base.id); + connector_state->crtc->base.id, + connector_state->crtc->name); return 0; } @@ -285,12 +286,13 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx) crtc_state = state->crtc_states[idx]; crtc_state->connectors_changed = true; - DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d]\n", + DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d:%s]\n", connector->base.id, connector->name, new_encoder->base.id, new_encoder->name, - connector_state->crtc->base.id); + connector_state->crtc->base.id, + connector_state->crtc->name); return 0; } @@ -374,8 +376,8 @@ mode_fixup(struct drm_atomic_state *state) ret = funcs->mode_fixup(crtc, &crtc_state->mode, &crtc_state->adjusted_mode); if (!ret) { - DRM_DEBUG_ATOMIC("[CRTC:%d] fixup failed\n", - crtc->base.id); + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] fixup failed\n", + crtc->base.id, crtc->name); return -EINVAL; } } @@ -422,14 +424,14 @@ drm_atomic_helper_check_modeset(struct drm_device *dev, for_each_crtc_in_state(state, crtc, crtc_state, i) { if (!drm_mode_equal(&crtc->state->mode, &crtc_state->mode)) { - DRM_DEBUG_ATOMIC("[CRTC:%d] mode changed\n", - crtc->base.id); + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode changed\n", + crtc->base.id, crtc->name); crtc_state->mode_changed = true; } if (crtc->state->enable != crtc_state->enable) { - DRM_DEBUG_ATOMIC("[CRTC:%d] enable changed\n", - crtc->base.id); + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enable changed\n", + crtc->base.id, crtc->name); /* * For clarity this assignment is done here, but @@ -470,18 +472,18 @@ drm_atomic_helper_check_modeset(struct drm_device *dev, * a full modeset because update_connector_routing force that. */ if (crtc->state->active != crtc_state->active) { - DRM_DEBUG_ATOMIC("[CRTC:%d] active changed\n", - crtc->base.id); + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active changed\n", + crtc->base.id, crtc->name); crtc_state->active_changed = true; } if (!drm_atomic_crtc_needs_modeset(crtc_state)) continue; - DRM_DEBUG_ATOMIC("[CRTC:%d] needs all connectors, enable: %c, active: %c\n", - crtc->base.id, + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] needs all connectors, enable: %c, active: %c\n", + crtc->base.id, crtc->name, crtc_state->enable ? 'y' : 'n', - crtc_state->active ? 'y' : 'n'); + crtc_state->active ? 'y' : 'n'); ret = drm_atomic_add_affected_connectors(state, crtc); if (ret != 0) @@ -495,8 +497,8 @@ drm_atomic_helper_check_modeset(struct drm_device *dev, crtc); if (crtc_state->enable != !!num_connectors) { - DRM_DEBUG_ATOMIC("[CRTC:%d] enabled/connectors mismatch\n", - crtc->base.id); + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled/connectors mismatch\n", + crtc->base.id, crtc->name); return -EINVAL; } @@ -559,8 +561,8 @@ drm_atomic_helper_check_planes(struct drm_device *dev, ret = funcs->atomic_check(crtc, state->crtc_states[i]); if (ret) { - DRM_DEBUG_ATOMIC("[CRTC:%d] atomic driver check failed\n", - crtc->base.id); + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n", + crtc->base.id, crtc->name); return ret; } } @@ -673,8 +675,8 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) funcs = crtc->helper_private; - DRM_DEBUG_ATOMIC("disabling [CRTC:%d]\n", - crtc->base.id); + DRM_DEBUG_ATOMIC("disabling [CRTC:%d:%s]\n", + crtc->base.id, crtc->name); /* Right function depends upon target state. */ @@ -785,8 +787,8 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state) funcs = crtc->helper_private; if (crtc->state->enable && funcs->mode_set_nofb) { - DRM_DEBUG_ATOMIC("modeset on [CRTC:%d]\n", - crtc->base.id); + DRM_DEBUG_ATOMIC("modeset on [CRTC:%d:%s]\n", + crtc->base.id, crtc->name); funcs->mode_set_nofb(crtc); } @@ -885,8 +887,8 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev, funcs = crtc->helper_private; if (crtc->state->enable) { - DRM_DEBUG_ATOMIC("enabling [CRTC:%d]\n", - crtc->base.id); + DRM_DEBUG_ATOMIC("enabling [CRTC:%d:%s]\n", + crtc->base.id, crtc->name); if (funcs->enable) funcs->enable(crtc); diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index aade4640fa6d..efa57e8a99b5 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -649,6 +649,18 @@ EXPORT_SYMBOL(drm_framebuffer_remove); DEFINE_WW_CLASS(crtc_ww_class); +static unsigned int drm_num_crtcs(struct drm_device *dev) +{ + unsigned int num = 0; + struct drm_crtc *tmp; + + drm_for_each_crtc(tmp, dev) { + num++; + } + + return num; +} + /** * drm_crtc_init_with_planes - Initialise a new CRTC object with * specified primary and cursor planes. @@ -684,6 +696,21 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc, if (ret) return ret; + if (name) { + va_list ap; + + va_start(ap, name); + crtc->name = kvasprintf(GFP_KERNEL, name, ap); + va_end(ap); + } else { + crtc->name = kasprintf(GFP_KERNEL, "crtc-%d", + drm_num_crtcs(dev)); + } + if (!crtc->name) { + drm_mode_object_put(dev, &crtc->base); + return -ENOMEM; + } + crtc->base.properties = &crtc->properties; list_add_tail(&crtc->head, &config->crtc_list); @@ -730,6 +757,8 @@ void drm_crtc_cleanup(struct drm_crtc *crtc) if (crtc->state && crtc->funcs->atomic_destroy_state) crtc->funcs->atomic_destroy_state(crtc, crtc->state); + kfree(crtc->name); + memset(crtc, 0, sizeof(*crtc)); } EXPORT_SYMBOL(drm_crtc_cleanup); @@ -1814,7 +1843,8 @@ int drm_mode_getresources(struct drm_device *dev, void *data, copied = 0; crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr; drm_for_each_crtc(crtc, dev) { - DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); + DRM_DEBUG_KMS("[CRTC:%d:%s]\n", + crtc->base.id, crtc->name); if (put_user(crtc->base.id, crtc_id + copied)) { ret = -EFAULT; goto out; @@ -2659,7 +2689,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, ret = -ENOENT; goto out; } - DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); + DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name); if (crtc_req->mode_valid) { /* If we have a mode we need a framebuffer. */ diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index fadbdb33bdcc..a02a7f9a6a9d 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -339,7 +339,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, DRM_DEBUG_KMS("CRTC fixup failed\n"); goto done; } - DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); + DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name); crtc->hwmode = *adjusted_mode; @@ -519,11 +519,13 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) set->fb = NULL; if (set->fb) { - DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n", - set->crtc->base.id, set->fb->base.id, - (int)set->num_connectors, set->x, set->y); + DRM_DEBUG_KMS("[CRTC:%d:%s] [FB:%d] #connectors=%d (x y) (%i %i)\n", + set->crtc->base.id, set->crtc->name, + set->fb->base.id, + (int)set->num_connectors, set->x, set->y); } else { - DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id); + DRM_DEBUG_KMS("[CRTC:%d:%s] [NOFB]\n", + set->crtc->base.id, set->crtc->name); drm_crtc_helper_disable(set->crtc); return 0; } @@ -663,12 +665,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) connector->encoder->crtc = new_crtc; } if (new_crtc) { - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n", - connector->base.id, connector->name, - new_crtc->base.id); + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d:%s]\n", + connector->base.id, connector->name, + new_crtc->base.id, new_crtc->name); } else { DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n", - connector->base.id, connector->name); + connector->base.id, connector->name); } } @@ -685,8 +687,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) if (!drm_crtc_helper_set_mode(set->crtc, set->mode, set->x, set->y, save_set.fb)) { - DRM_ERROR("failed to set mode on [CRTC:%d]\n", - set->crtc->base.id); + DRM_ERROR("failed to set mode on [CRTC:%d:%s]\n", + set->crtc->base.id, set->crtc->name); set->crtc->primary->fb = save_set.fb; ret = -EINVAL; goto fail; diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 5b5e6b650c11..4bea0a1151bc 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -712,6 +712,8 @@ struct drm_crtc { struct device_node *port; struct list_head head; + char *name; + /* * crtc mutex * -- cgit v1.2.3 From 9f4c97a236d35b9fe45168a979adb5e1522c0968 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Tue, 8 Dec 2015 18:41:54 +0200 Subject: drm: Add plane->name and use it in debug prints MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Show a sensible name for the plane in debug mesages. The driver may supply its own name, otherwise the core genrates the name ("plane-0", "plane-1" etc.). v2: kstrdup() the name passed by the caller (Jani) v3: Generate a default name if the driver doesn't supply one Signed-off-by: Ville Syrjälä Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1449592922-5545-7-git-send-email-ville.syrjala@linux.intel.com --- drivers/gpu/drm/drm_atomic.c | 12 ++++++------ drivers/gpu/drm/drm_atomic_helper.c | 4 ++-- drivers/gpu/drm/drm_crtc.c | 30 ++++++++++++++++++++++++++++++ include/drm/drm_crtc.h | 2 ++ 4 files changed, 40 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index feb66895e48c..6a21e5c378c1 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -549,8 +549,8 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state, state->planes[index] = plane; plane_state->state = state; - DRM_DEBUG_ATOMIC("Added [PLANE:%d] %p state to %p\n", - plane->base.id, plane_state, state); + DRM_DEBUG_ATOMIC("Added [PLANE:%d:%s] %p state to %p\n", + plane->base.id, plane->name, plane_state, state); if (plane_state->crtc) { struct drm_crtc_state *crtc_state; @@ -770,8 +770,8 @@ static int drm_atomic_plane_check(struct drm_plane *plane, } if (plane_switching_crtc(state->state, plane, state)) { - DRM_DEBUG_ATOMIC("[PLANE:%d] switching CRTC directly\n", - plane->base.id); + DRM_DEBUG_ATOMIC("[PLANE:%d:%s] switching CRTC directly\n", + plane->base.id, plane->name); return -EINVAL; } @@ -1248,8 +1248,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state) for_each_plane_in_state(state, plane, plane_state, i) { ret = drm_atomic_plane_check(plane, plane_state); if (ret) { - DRM_DEBUG_ATOMIC("[PLANE:%d] atomic core check failed\n", - plane->base.id); + DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic core check failed\n", + plane->base.id, plane->name); return ret; } } diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 6ba3fe5639e4..63f925b75357 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -545,8 +545,8 @@ drm_atomic_helper_check_planes(struct drm_device *dev, ret = funcs->atomic_check(plane, plane_state); if (ret) { - DRM_DEBUG_ATOMIC("[PLANE:%d] atomic driver check failed\n", - plane->base.id); + DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic driver check failed\n", + plane->base.id, plane->name); return ret; } } diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index efa57e8a99b5..62fa95fa5471 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -1181,6 +1181,18 @@ void drm_encoder_cleanup(struct drm_encoder *encoder) } EXPORT_SYMBOL(drm_encoder_cleanup); +static unsigned int drm_num_planes(struct drm_device *dev) +{ + unsigned int num = 0; + struct drm_plane *tmp; + + drm_for_each_plane(tmp, dev) { + num++; + } + + return num; +} + /** * drm_universal_plane_init - Initialize a new universal plane object * @dev: DRM device @@ -1224,6 +1236,22 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane, return -ENOMEM; } + if (name) { + va_list ap; + + va_start(ap, name); + plane->name = kvasprintf(GFP_KERNEL, name, ap); + va_end(ap); + } else { + plane->name = kasprintf(GFP_KERNEL, "plane-%d", + drm_num_planes(dev)); + } + if (!plane->name) { + kfree(plane->format_types); + drm_mode_object_put(dev, &plane->base); + return -ENOMEM; + } + memcpy(plane->format_types, formats, format_count * sizeof(uint32_t)); plane->format_count = format_count; plane->possible_crtcs = possible_crtcs; @@ -1314,6 +1342,8 @@ void drm_plane_cleanup(struct drm_plane *plane) if (plane->state && plane->funcs->atomic_destroy_state) plane->funcs->atomic_destroy_state(plane, plane->state); + kfree(plane->name); + memset(plane, 0, sizeof(*plane)); } EXPORT_SYMBOL(drm_plane_cleanup); diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 4bea0a1151bc..c2f98ba2bb98 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -1496,6 +1496,8 @@ struct drm_plane { struct drm_device *dev; struct list_head head; + char *name; + struct drm_modeset_lock mutex; struct drm_mode_object base; -- cgit v1.2.3 From 5ba894064d98547c82a1efd50eba40a92df777a1 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Thu, 10 Dec 2015 22:39:08 +0200 Subject: drm: Rename MODE_UNVERIFIED to MODE_STALE MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit MODE_UNVERIFIED actually means that the mode came from a previous probe, and if the new probe doesn't produce a matching mode it will get pruned from the list. Rename the flag to MODE_STALE to better convey the meaning. v2: Rebased due to conflicts with Daniel's doc stuff Cc: Adam Jackson Signed-off-by: Ville Syrjälä Reviewed-by: Daniel Vetter Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1449779948-10906-1-git-send-email-ville.syrjala@linux.intel.com --- drivers/gpu/drm/drm_modes.c | 2 +- drivers/gpu/drm/drm_probe_helper.c | 4 ++-- include/drm/drm_modes.h | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 1892e615528a..6cd582fdcc4b 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -1074,7 +1074,7 @@ static const char * const drm_mode_status_names[] = { MODE_STATUS(ONE_SIZE), MODE_STATUS(NO_REDUCED), MODE_STATUS(NO_STEREO), - MODE_STATUS(UNVERIFIED), + MODE_STATUS(STALE), MODE_STATUS(BAD), MODE_STATUS(ERROR), }; diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index b691253aa752..3bdc8684fe58 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -146,9 +146,9 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); - /* set all modes to the unverified state */ + /* set all old modes to the stale state */ list_for_each_entry(mode, &connector->modes, head) - mode->status = MODE_UNVERIFIED; + mode->status = MODE_STALE; old_status = connector->status; diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h index 9e6d7a1cd19b..d7445ccd958d 100644 --- a/include/drm/drm_modes.h +++ b/include/drm/drm_modes.h @@ -73,7 +73,7 @@ * @MODE_ONE_SIZE: only one resolution is supported * @MODE_NO_REDUCED: monitor doesn't accept reduced blanking * @MODE_NO_STEREO: stereo modes not supported - * @MODE_UNVERIFIED: mode needs to reverified + * @MODE_STALE: mode has become stale * @MODE_BAD: unspecified reason * @MODE_ERROR: error condition * @@ -117,7 +117,7 @@ enum drm_mode_status { MODE_ONE_SIZE, MODE_NO_REDUCED, MODE_NO_STEREO, - MODE_UNVERIFIED = -3, + MODE_STALE = -3, MODE_BAD = -2, MODE_ERROR = -1 }; -- cgit v1.2.3 From 6af3e6561243f167dabc03f732d27ff5365cd4a4 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Thu, 3 Dec 2015 23:14:14 +0200 Subject: drm: Drop drm_helper_probe_single_connector_modes_nomerge() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now that the mode type bit merge logic is fixed to only merge between new probed modes, hopefully we can eliminat the special case for qxl and virtio. That is make the merge the mode type bits from all matching new probed modes, just like every other driver. qxl and virtio got excluded from the merging in commit 3fbd6439e463 ("drm: copy mode type in drm_mode_connector_list_update()") commit abce1ec9b08a ("Revert "drm: copy mode type in drm_mode_connector_list_update()"") commit b87577b7c768 ("drm: try harder to avoid regression when merging mode bits") Cc: Marc-André Lureau Cc: Dave Airlie Cc: Daniel Vetter Cc: Adam Jackson Signed-off-by: Ville Syrjälä [danvet: Resolve conflicts with doc updates.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/drm_modes.c | 12 +--- drivers/gpu/drm/drm_probe_helper.c | 94 ++++++++++++-------------------- drivers/gpu/drm/qxl/qxl_display.c | 2 +- drivers/gpu/drm/virtio/virtgpu_display.c | 2 +- drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 2 +- include/drm/drm_crtc_helper.h | 4 -- include/drm/drm_modes.h | 2 +- 7 files changed, 41 insertions(+), 77 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 1888e3cbdeaf..a15e26281a41 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -1172,7 +1172,6 @@ EXPORT_SYMBOL(drm_mode_sort); /** * drm_mode_connector_list_update - update the mode list for the connector * @connector: the connector to update - * @merge_type_bits: whether to merge or overwrite type bits * * This moves the modes from the @connector probed_modes list * to the actual mode list. It compares the probed mode against the current @@ -1181,8 +1180,7 @@ EXPORT_SYMBOL(drm_mode_sort); * This is just a helper functions doesn't validate any modes itself and also * doesn't prune any invalid modes. Callers need to do that themselves. */ -void drm_mode_connector_list_update(struct drm_connector *connector, - bool merge_type_bits) +void drm_mode_connector_list_update(struct drm_connector *connector) { struct drm_display_mode *pmode, *pt; @@ -1215,14 +1213,10 @@ void drm_mode_connector_list_update(struct drm_connector *connector, drm_mode_copy(mode, pmode); } else if ((mode->type & DRM_MODE_TYPE_PREFERRED) == 0 && (pmode->type & DRM_MODE_TYPE_PREFERRED) != 0) { - if (merge_type_bits) - pmode->type |= mode->type; + pmode->type |= mode->type; drm_mode_copy(mode, pmode); } else { - if (merge_type_bits) - mode->type |= pmode->type; - else - mode->type = pmode->type; + mode->type |= pmode->type; } list_del(&pmode->head); diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index 3bdc8684fe58..483010f680d5 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -129,9 +129,39 @@ void drm_kms_helper_poll_enable_locked(struct drm_device *dev) } EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked); - -static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector, - uint32_t maxX, uint32_t maxY, bool merge_type_bits) +/** + * drm_helper_probe_single_connector_modes - get complete set of display modes + * @connector: connector to probe + * @maxX: max width for modes + * @maxY: max height for modes + * + * Based on the helper callbacks implemented by @connector in struct + * &drm_connector_helper_funcs try to detect all valid modes. Modes will first + * be added to the connector's probed_modes list, then culled (based on validity + * and the @maxX, @maxY parameters) and put into the normal modes list. + * + * Intended to be used as a generic implementation of the ->fill_modes() + * @connector vfunc for drivers that use the CRTC helpers for output mode + * filtering and detection. + * + * If the helper operation returns no mode, and if the connector status is + * connector_status_connected, standard VESA DMT modes up to 1024x768 are + * automatically added to the modes list by a call to + * drm_add_modes_noedid(). + * + * The function then filters out modes larger than @maxX and maxY if specified. + * It finally calls the optional connector ->mode_valid() helper operation for each + * mode in the probed list to check whether the mode is valid for the connector. + * + * Compared to drm_helper_probe_single_connector_modes_nomerge() this function + * merged the mode bits for the preferred mode, as a hack to work around some + * quirky issues on funky hardware. + * + * Returns: + * The number of modes found on @connector. + */ +int drm_helper_probe_single_connector_modes(struct drm_connector *connector, + uint32_t maxX, uint32_t maxY) { struct drm_device *dev = connector->dev; struct drm_display_mode *mode; @@ -223,7 +253,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect if (count == 0) goto prune; - drm_mode_connector_list_update(connector, merge_type_bits); + drm_mode_connector_list_update(connector); if (connector->interlace_allowed) mode_flags |= DRM_MODE_FLAG_INTERLACE; @@ -267,64 +297,8 @@ prune: return count; } - -/** - * drm_helper_probe_single_connector_modes - get complete set of display modes - * @connector: connector to probe - * @maxX: max width for modes - * @maxY: max height for modes - * - * Based on the helper callbacks implemented by @connector in struct - * &drm_connector_helper_funcs try to detect all valid modes. Modes will first - * be added to the connector's probed_modes list, then culled (based on validity - * and the @maxX, @maxY parameters) and put into the normal modes list. - * - * Intended to be used as a generic implementation of the ->fill_modes() - * @connector vfunc for drivers that use the CRTC helpers for output mode - * filtering and detection. - * - * If the helper operation returns no mode, and if the connector status is - * connector_status_connected, standard VESA DMT modes up to 1024x768 are - * automatically added to the modes list by a call to - * drm_add_modes_noedid(). - * - * The function then filters out modes larger than @maxX and maxY if specified. - * It finally calls the optional connector ->mode_valid() helper operation for each - * mode in the probed list to check whether the mode is valid for the connector. - * - * Compared to drm_helper_probe_single_connector_modes_nomerge() this function - * merged the mode bits for the preferred mode, as a hack to work around some - * quirky issues on funky hardware. - * - * Returns: - * The number of modes found on @connector. - */ -int drm_helper_probe_single_connector_modes(struct drm_connector *connector, - uint32_t maxX, uint32_t maxY) -{ - return drm_helper_probe_single_connector_modes_merge_bits(connector, maxX, maxY, true); -} EXPORT_SYMBOL(drm_helper_probe_single_connector_modes); -/** - * drm_helper_probe_single_connector_modes_nomerge - get complete set of display modes - * @connector: connector to probe - * @maxX: max width for modes - * @maxY: max height for modes - * - * This operates like drm_hehlper_probe_single_connector_modes() except it - * replaces the mode bits instead of merging them for preferred modes. - * - * Returns: - * The number of modes found on @connector. - */ -int drm_helper_probe_single_connector_modes_nomerge(struct drm_connector *connector, - uint32_t maxX, uint32_t maxY) -{ - return drm_helper_probe_single_connector_modes_merge_bits(connector, maxX, maxY, false); -} -EXPORT_SYMBOL(drm_helper_probe_single_connector_modes_nomerge); - /** * drm_kms_helper_hotplug_event - fire off KMS hotplug events * @dev: drm_device whose connector state changed diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 9be1af41e8d7..86276519b2ef 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -923,7 +923,7 @@ static void qxl_conn_destroy(struct drm_connector *connector) static const struct drm_connector_funcs qxl_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = qxl_conn_detect, - .fill_modes = drm_helper_probe_single_connector_modes_nomerge, + .fill_modes = drm_helper_probe_single_connector_modes, .set_property = qxl_conn_set_property, .destroy = qxl_conn_destroy, }; diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c index 588a7aec60bf..a165f03eaa79 100644 --- a/drivers/gpu/drm/virtio/virtgpu_display.c +++ b/drivers/gpu/drm/virtio/virtgpu_display.c @@ -400,7 +400,7 @@ static void virtio_gpu_conn_destroy(struct drm_connector *connector) static const struct drm_connector_funcs virtio_gpu_connector_funcs = { .dpms = drm_atomic_helper_connector_dpms, .detect = virtio_gpu_conn_detect, - .fill_modes = drm_helper_probe_single_connector_modes_nomerge, + .fill_modes = drm_helper_probe_single_connector_modes, .destroy = virtio_gpu_conn_destroy, .reset = drm_atomic_helper_connector_reset, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index d50596153679..9394c3535e85 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1538,7 +1538,7 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector, drm_mode_probed_add(connector, mode); } - drm_mode_connector_list_update(connector, true); + drm_mode_connector_list_update(connector); /* Move the prefered mode first, help apps pick the right mode. */ drm_mode_sort(&connector->modes); diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h index f94ff54ae25e..4b37afa2b73b 100644 --- a/include/drm/drm_crtc_helper.h +++ b/include/drm/drm_crtc_helper.h @@ -70,10 +70,6 @@ int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, extern int drm_helper_probe_single_connector_modes(struct drm_connector *connector, uint32_t maxX, uint32_t maxY); -extern int drm_helper_probe_single_connector_modes_nomerge(struct drm_connector - *connector, - uint32_t maxX, - uint32_t maxY); extern void drm_kms_helper_poll_init(struct drm_device *dev); extern void drm_kms_helper_poll_fini(struct drm_device *dev); extern bool drm_helper_hpd_irq_event(struct drm_device *dev); diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h index d7445ccd958d..625966a906f2 100644 --- a/include/drm/drm_modes.h +++ b/include/drm/drm_modes.h @@ -483,7 +483,7 @@ enum drm_mode_status drm_mode_validate_size(const struct drm_display_mode *mode, void drm_mode_prune_invalid(struct drm_device *dev, struct list_head *mode_list, bool verbose); void drm_mode_sort(struct list_head *mode_list); -void drm_mode_connector_list_update(struct drm_connector *connector, bool merge_type_bits); +void drm_mode_connector_list_update(struct drm_connector *connector); /* parsing cmdline modes */ bool -- cgit v1.2.3 From 5e1033561da1152c57b97ee84371dba2b3d64c25 Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Fri, 11 Dec 2015 09:16:38 -0800 Subject: ses: fix additional element traversal bug KASAN found that our additional element processing scripts drop off the end of the VPD page into unallocated space. The reason is that not every element has additional information but our traversal routines think they do, leading to them expecting far more additional information than is present. Fix this by adding a gate to the traversal routine so that it only processes elements that are expected to have additional information (list is in SES-2 section 6.1.13.1: Additional Element Status diagnostic page overview) Reported-by: Pavel Tikhomirov Tested-by: Pavel Tikhomirov Cc: stable@vger.kernel.org Signed-off-by: James Bottomley --- drivers/scsi/ses.c | 10 +++++++++- include/linux/enclosure.h | 4 ++++ 2 files changed, 13 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c index 7d9cec50b77d..044d06410d4c 100644 --- a/drivers/scsi/ses.c +++ b/drivers/scsi/ses.c @@ -559,7 +559,15 @@ static void ses_enclosure_data_process(struct enclosure_device *edev, if (desc_ptr) desc_ptr += len; - if (addl_desc_ptr) + if (addl_desc_ptr && + /* only find additional descriptions for specific devices */ + (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE || + type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE || + type_ptr[0] == ENCLOSURE_COMPONENT_SAS_EXPANDER || + /* these elements are optional */ + type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_TARGET_PORT || + type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_INITIATOR_PORT || + type_ptr[0] == ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS)) addl_desc_ptr += addl_desc_ptr[1] + 2; } diff --git a/include/linux/enclosure.h b/include/linux/enclosure.h index 7be22da321f3..a4cf57cd0f75 100644 --- a/include/linux/enclosure.h +++ b/include/linux/enclosure.h @@ -29,7 +29,11 @@ /* A few generic types ... taken from ses-2 */ enum enclosure_component_type { ENCLOSURE_COMPONENT_DEVICE = 0x01, + ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS = 0x07, + ENCLOSURE_COMPONENT_SCSI_TARGET_PORT = 0x14, + ENCLOSURE_COMPONENT_SCSI_INITIATOR_PORT = 0x15, ENCLOSURE_COMPONENT_ARRAY_DEVICE = 0x17, + ENCLOSURE_COMPONENT_SAS_EXPANDER = 0x18, }; /* ses-2 common element status */ -- cgit v1.2.3 From ad87e03213b552a5c33d5e1e7a19a73768397010 Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Thu, 10 Dec 2015 15:27:21 -0500 Subject: USB: add quirk for devices with broken LPM Some USB device / host controller combinations seem to have problems with Link Power Management. For example, Steinar found that his xHCI controller wouldn't handle bandwidth calculations correctly for two video cards simultaneously when LPM was enabled, even though the bus had plenty of bandwidth available. This patch introduces a new quirk flag for devices that should remain disabled for LPM, and creates quirk entries for Steinar's devices. Signed-off-by: Alan Stern Reported-by: Steinar H. Gunderson Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/hub.c | 7 ++++++- drivers/usb/core/quirks.c | 6 ++++++ include/linux/usb/quirks.h | 3 +++ 3 files changed, 15 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 585c3cb07da6..a5cc032ef77a 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -124,6 +124,10 @@ struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev) int usb_device_supports_lpm(struct usb_device *udev) { + /* Some devices have trouble with LPM */ + if (udev->quirks & USB_QUIRK_NO_LPM) + return 0; + /* USB 2.1 (and greater) devices indicate LPM support through * their USB 2.0 Extended Capabilities BOS descriptor. */ @@ -4512,6 +4516,8 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1, goto fail; } + usb_detect_quirks(udev); + if (udev->wusb == 0 && le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0201) { retval = usb_get_bos_descriptor(udev); if (!retval) { @@ -4710,7 +4716,6 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, if (status < 0) goto loop; - usb_detect_quirks(udev); if (udev->quirks & USB_QUIRK_DELAY_INIT) msleep(1000); diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index fcd6ac0c667f..6dc810bce295 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -202,6 +202,12 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x1a0a, 0x0200), .driver_info = USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL }, + /* Blackmagic Design Intensity Shuttle */ + { USB_DEVICE(0x1edb, 0xbd3b), .driver_info = USB_QUIRK_NO_LPM }, + + /* Blackmagic Design UltraStudio SDI */ + { USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM }, + { } /* terminating entry must be last */ }; diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h index 9948c874e3f1..1d0043dc34e4 100644 --- a/include/linux/usb/quirks.h +++ b/include/linux/usb/quirks.h @@ -47,4 +47,7 @@ /* device generates spurious wakeup, ignore remote wakeup capability */ #define USB_QUIRK_IGNORE_REMOTE_WAKEUP BIT(9) +/* device can't handle Link Power Management */ +#define USB_QUIRK_NO_LPM BIT(10) + #endif /* __LINUX_USB_QUIRKS_H */ -- cgit v1.2.3 From 56f047305dd4b6b61771ac4f523718e4111052a8 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 8 Dec 2015 07:22:01 -0800 Subject: xfrm: add rcu grace period in xfrm_policy_destroy() We will soon switch sk->sk_policy[] to RCU protection, as SYNACK packets are sent while listener socket is not locked. This patch simply adds RCU grace period before struct xfrm_policy freeing, and the corresponding rcu_head in struct xfrm_policy. Signed-off-by: Eric Dumazet Acked-by: Steffen Klassert Signed-off-by: David S. Miller --- include/net/xfrm.h | 1 + net/xfrm/xfrm_policy.c | 11 +++++++++-- 2 files changed, 10 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 4a9c21f9b4ea..8bae1ef647cd 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -548,6 +548,7 @@ struct xfrm_policy { u16 family; struct xfrm_sec_ctx *security; struct xfrm_tmpl xfrm_vec[XFRM_MAX_DEPTH]; + struct rcu_head rcu; }; static inline struct net *xp_net(const struct xfrm_policy *xp) diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 18276f0cc32b..f57a5712cedd 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -303,6 +303,14 @@ struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp) } EXPORT_SYMBOL(xfrm_policy_alloc); +static void xfrm_policy_destroy_rcu(struct rcu_head *head) +{ + struct xfrm_policy *policy = container_of(head, struct xfrm_policy, rcu); + + security_xfrm_policy_free(policy->security); + kfree(policy); +} + /* Destroy xfrm_policy: descendant resources must be released to this moment. */ void xfrm_policy_destroy(struct xfrm_policy *policy) @@ -312,8 +320,7 @@ void xfrm_policy_destroy(struct xfrm_policy *policy) if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer)) BUG(); - security_xfrm_policy_free(policy->security); - kfree(policy); + call_rcu(&policy->rcu, xfrm_policy_destroy_rcu); } EXPORT_SYMBOL(xfrm_policy_destroy); -- cgit v1.2.3 From d188ba86dd07a72ebebfa22fe9cb0b0572e57740 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 8 Dec 2015 07:22:02 -0800 Subject: xfrm: add rcu protection to sk->sk_policy[] XFRM can deal with SYNACK messages, sent while listener socket is not locked. We add proper rcu protection to __xfrm_sk_clone_policy() and xfrm_sk_policy_lookup() This might serve as the first step to remove xfrm.xfrm_policy_lock use in fast path. Fixes: fa76ce7328b2 ("inet: get rid of central tcp/dccp listener timer") Signed-off-by: Eric Dumazet Acked-by: Steffen Klassert Signed-off-by: David S. Miller --- include/net/sock.h | 2 +- include/net/xfrm.h | 24 +++++++++++++++--------- net/core/sock.c | 2 +- net/xfrm/xfrm_policy.c | 37 +++++++++++++++++++++++++------------ 4 files changed, 42 insertions(+), 23 deletions(-) (limited to 'include') diff --git a/include/net/sock.h b/include/net/sock.h index b1d475b5db68..eaef41433d7a 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -388,7 +388,7 @@ struct sock { struct socket_wq *sk_wq_raw; }; #ifdef CONFIG_XFRM - struct xfrm_policy *sk_policy[2]; + struct xfrm_policy __rcu *sk_policy[2]; #endif struct dst_entry *sk_rx_dst; struct dst_entry __rcu *sk_dst_cache; diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 8bae1ef647cd..d6f6e5006ee9 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -1142,12 +1142,14 @@ static inline int xfrm6_route_forward(struct sk_buff *skb) return xfrm_route_forward(skb, AF_INET6); } -int __xfrm_sk_clone_policy(struct sock *sk); +int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk); -static inline int xfrm_sk_clone_policy(struct sock *sk) +static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk) { - if (unlikely(sk->sk_policy[0] || sk->sk_policy[1])) - return __xfrm_sk_clone_policy(sk); + sk->sk_policy[0] = NULL; + sk->sk_policy[1] = NULL; + if (unlikely(osk->sk_policy[0] || osk->sk_policy[1])) + return __xfrm_sk_clone_policy(sk, osk); return 0; } @@ -1155,12 +1157,16 @@ int xfrm_policy_delete(struct xfrm_policy *pol, int dir); static inline void xfrm_sk_free_policy(struct sock *sk) { - if (unlikely(sk->sk_policy[0] != NULL)) { - xfrm_policy_delete(sk->sk_policy[0], XFRM_POLICY_MAX); + struct xfrm_policy *pol; + + pol = rcu_dereference_protected(sk->sk_policy[0], 1); + if (unlikely(pol != NULL)) { + xfrm_policy_delete(pol, XFRM_POLICY_MAX); sk->sk_policy[0] = NULL; } - if (unlikely(sk->sk_policy[1] != NULL)) { - xfrm_policy_delete(sk->sk_policy[1], XFRM_POLICY_MAX+1); + pol = rcu_dereference_protected(sk->sk_policy[1], 1); + if (unlikely(pol != NULL)) { + xfrm_policy_delete(pol, XFRM_POLICY_MAX+1); sk->sk_policy[1] = NULL; } } @@ -1170,7 +1176,7 @@ void xfrm_garbage_collect(struct net *net); #else static inline void xfrm_sk_free_policy(struct sock *sk) {} -static inline int xfrm_sk_clone_policy(struct sock *sk) { return 0; } +static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk) { return 0; } static inline int xfrm6_route_forward(struct sk_buff *skb) { return 1; } static inline int xfrm4_route_forward(struct sk_buff *skb) { return 1; } static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb) diff --git a/net/core/sock.c b/net/core/sock.c index d01c8f42dbb2..765be835b06c 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1550,7 +1550,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) */ is_charged = sk_filter_charge(newsk, filter); - if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk))) { + if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) { /* It is still raw copy of parent, so invalidate * destructor and make plain sk_free() */ newsk->sk_destruct = NULL; diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index f57a5712cedd..948fa5560de5 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1221,8 +1221,10 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir, struct xfrm_policy *pol; struct net *net = sock_net(sk); + rcu_read_lock(); read_lock_bh(&net->xfrm.xfrm_policy_lock); - if ((pol = sk->sk_policy[dir]) != NULL) { + pol = rcu_dereference(sk->sk_policy[dir]); + if (pol != NULL) { bool match = xfrm_selector_match(&pol->selector, fl, sk->sk_family); int err = 0; @@ -1246,6 +1248,7 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir, } out: read_unlock_bh(&net->xfrm.xfrm_policy_lock); + rcu_read_unlock(); return pol; } @@ -1314,13 +1317,14 @@ int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol) #endif write_lock_bh(&net->xfrm.xfrm_policy_lock); - old_pol = sk->sk_policy[dir]; - sk->sk_policy[dir] = pol; + old_pol = rcu_dereference_protected(sk->sk_policy[dir], + lockdep_is_held(&net->xfrm.xfrm_policy_lock)); if (pol) { pol->curlft.add_time = get_seconds(); pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0); xfrm_sk_policy_link(pol, dir); } + rcu_assign_pointer(sk->sk_policy[dir], pol); if (old_pol) { if (pol) xfrm_policy_requeue(old_pol, pol); @@ -1368,17 +1372,26 @@ static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir) return newp; } -int __xfrm_sk_clone_policy(struct sock *sk) +int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk) { - struct xfrm_policy *p0 = sk->sk_policy[0], - *p1 = sk->sk_policy[1]; + const struct xfrm_policy *p; + struct xfrm_policy *np; + int i, ret = 0; - sk->sk_policy[0] = sk->sk_policy[1] = NULL; - if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL) - return -ENOMEM; - if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL) - return -ENOMEM; - return 0; + rcu_read_lock(); + for (i = 0; i < 2; i++) { + p = rcu_dereference(osk->sk_policy[i]); + if (p) { + np = clone_policy(p, i); + if (unlikely(!np)) { + ret = -ENOMEM; + break; + } + rcu_assign_pointer(sk->sk_policy[i], np); + } + } + rcu_read_unlock(); + return ret; } static int -- cgit v1.2.3 From f7fc6bc414121954c45c5f18b70e2a8717d0d5b4 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Thu, 10 Dec 2015 09:14:20 -0800 Subject: uapi: export ila.h The file ila.h used for lightweight tunnels is being used by iproute2 but is not exported yet. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- include/uapi/linux/Kbuild | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index 628e6e64c2fb..c2e5d6cb34e3 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild @@ -186,6 +186,7 @@ header-y += if_tunnel.h header-y += if_vlan.h header-y += if_x25.h header-y += igmp.h +header-y += ila.h header-y += in6.h header-y += inet_diag.h header-y += in.h -- cgit v1.2.3 From 98e89cf02aed11166698dd53c6f14865613babb3 Mon Sep 17 00:00:00 2001 From: Nicolas Iooss Date: Fri, 11 Dec 2015 13:40:43 -0800 Subject: mm: kmemleak: mark kmemleak_init prototype as __init The kmemleak_init() definition in mm/kmemleak.c is marked __init but its prototype in include/linux/kmemleak.h is marked __ref since commit a6186d89c913 ("kmemleak: Mark the early log buffer as __initdata"). This causes a section mismatch which is reported as a warning when building with clang -Wsection, because kmemleak_init() is declared in section .ref.text but defined in .init.text. Fix this by marking kmemleak_init() prototype __init. Signed-off-by: Nicolas Iooss Signed-off-by: Catalin Marinas Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kmemleak.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h index d0a1f99e24e3..4894c6888bc6 100644 --- a/include/linux/kmemleak.h +++ b/include/linux/kmemleak.h @@ -25,7 +25,7 @@ #ifdef CONFIG_DEBUG_KMEMLEAK -extern void kmemleak_init(void) __ref; +extern void kmemleak_init(void) __init; extern void kmemleak_alloc(const void *ptr, size_t size, int min_count, gfp_t gfp) __ref; extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size, -- cgit v1.2.3 From 86fffe4a61dd972d5a4e23260d530be6da02f614 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 11 Dec 2015 13:40:46 -0800 Subject: kernel: remove stop_machine() Kconfig dependency Currently the full stop_machine() routine is only enabled on SMP if module unloading is enabled, or if the CPUs are hotpluggable. This leads to configurations where stop_machine() is broken as it will then only run the callback on the local CPU with irqs disabled, and not stop the other CPUs or run the callback on them. For example, this breaks MTRR setup on x86 in certain configs since ea8596bb2d8d379 ("kprobes/x86: Remove unused text_poke_smp() and text_poke_smp_batch() functions") as the MTRR is only established on the boot CPU. This patch removes the Kconfig option for STOP_MACHINE and uses the SMP and HOTPLUG_CPU config options to compile the correct stop_machine() for the architecture, removing the false dependency on MODULE_UNLOAD in the process. Link: https://lkml.org/lkml/2014/10/8/124 References: https://bugs.freedesktop.org/show_bug.cgi?id=84794 Signed-off-by: Chris Wilson Acked-by: Ingo Molnar Cc: "Paul E. McKenney" Cc: Pranith Kumar Cc: Michal Hocko Cc: Vladimir Davydov Cc: Johannes Weiner Cc: H. Peter Anvin Cc: Tejun Heo Cc: Iulia Manda Cc: Andy Lutomirski Cc: Rusty Russell Cc: Peter Zijlstra Cc: Chuck Ebbert Cc: Oleg Nesterov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/stop_machine.h | 6 +++--- init/Kconfig | 7 ------- kernel/stop_machine.c | 4 ++-- 3 files changed, 5 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h index 0adedca24c5b..0e1b1540597a 100644 --- a/include/linux/stop_machine.h +++ b/include/linux/stop_machine.h @@ -99,7 +99,7 @@ static inline int try_stop_cpus(const struct cpumask *cpumask, * grabbing every spinlock (and more). So the "read" side to such a * lock is anything which disables preemption. */ -#if defined(CONFIG_STOP_MACHINE) && defined(CONFIG_SMP) +#if defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU) /** * stop_machine: freeze the machine on all CPUs and run this function @@ -118,7 +118,7 @@ int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus); int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus); -#else /* CONFIG_STOP_MACHINE && CONFIG_SMP */ +#else /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */ static inline int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus) @@ -137,5 +137,5 @@ static inline int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data, return stop_machine(fn, data, cpus); } -#endif /* CONFIG_STOP_MACHINE && CONFIG_SMP */ +#endif /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */ #endif /* _LINUX_STOP_MACHINE */ diff --git a/init/Kconfig b/init/Kconfig index c24b6f767bf0..235c7a2c0d20 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -2030,13 +2030,6 @@ config INIT_ALL_POSSIBLE it was better to provide this option than to break all the archs and have several arch maintainers pursuing me down dark alleys. -config STOP_MACHINE - bool - default y - depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU - help - Need stop_machine() primitive. - source "block/Kconfig" config PREEMPT_NOTIFIERS diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 867bc20e1ef1..a3bbaee77c58 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c @@ -531,7 +531,7 @@ static int __init cpu_stop_init(void) } early_initcall(cpu_stop_init); -#ifdef CONFIG_STOP_MACHINE +#if defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU) static int __stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus) { @@ -631,4 +631,4 @@ int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data, return ret ?: done.ret; } -#endif /* CONFIG_STOP_MACHINE */ +#endif /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */ -- cgit v1.2.3 From dfd01f026058a59a513f8a365b439a0681b803af Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sun, 13 Dec 2015 22:11:16 +0100 Subject: sched/wait: Fix the signal handling fix Jan Stancek reported that I wrecked things for him by fixing things for Vladimir :/ His report was due to an UNINTERRUPTIBLE wait getting -EINTR, which should not be possible, however my previous patch made this possible by unconditionally checking signal_pending(). We cannot use current->state as was done previously, because the instruction after the store to that variable it can be changed. We must instead pass the initial state along and use that. Fixes: 68985633bccb ("sched/wait: Fix signal handling in bit wait helpers") Reported-by: Jan Stancek Reported-by: Chris Mason Tested-by: Jan Stancek Tested-by: Vladimir Murzin Tested-by: Chris Mason Reviewed-by: Paul Turner Cc: Ingo Molnar Cc: tglx@linutronix.de Cc: Oleg Nesterov Cc: hpa@zytor.com Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Linus Torvalds --- fs/cifs/inode.c | 6 +++--- fs/nfs/inode.c | 6 +++--- fs/nfs/internal.h | 2 +- fs/nfs/pagelist.c | 2 +- fs/nfs/pnfs.c | 4 ++-- include/linux/wait.h | 10 +++++----- kernel/sched/wait.c | 20 ++++++++++---------- net/sunrpc/sched.c | 6 +++--- 8 files changed, 28 insertions(+), 28 deletions(-) (limited to 'include') diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 6b66dd5d1540..a329f5ba35aa 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -1831,11 +1831,11 @@ cifs_invalidate_mapping(struct inode *inode) * @word: long word containing the bit lock */ static int -cifs_wait_bit_killable(struct wait_bit_key *key) +cifs_wait_bit_killable(struct wait_bit_key *key, int mode) { - if (fatal_signal_pending(current)) - return -ERESTARTSYS; freezable_schedule_unsafe(); + if (signal_pending_state(mode, current)) + return -ERESTARTSYS; return 0; } diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 31b0a52223a7..c7e8b87da5b2 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -75,11 +75,11 @@ nfs_fattr_to_ino_t(struct nfs_fattr *fattr) * nfs_wait_bit_killable - helper for functions that are sleeping on bit locks * @word: long word containing the bit lock */ -int nfs_wait_bit_killable(struct wait_bit_key *key) +int nfs_wait_bit_killable(struct wait_bit_key *key, int mode) { - if (fatal_signal_pending(current)) - return -ERESTARTSYS; freezable_schedule_unsafe(); + if (signal_pending_state(mode, current)) + return -ERESTARTSYS; return 0; } EXPORT_SYMBOL_GPL(nfs_wait_bit_killable); diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 56cfde26fb9c..9dea85f7f918 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -379,7 +379,7 @@ extern int nfs_drop_inode(struct inode *); extern void nfs_clear_inode(struct inode *); extern void nfs_evict_inode(struct inode *); void nfs_zap_acl_cache(struct inode *inode); -extern int nfs_wait_bit_killable(struct wait_bit_key *key); +extern int nfs_wait_bit_killable(struct wait_bit_key *key, int mode); /* super.c */ extern const struct super_operations nfs_sops; diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index fe3ddd20ff89..452a011ba0d8 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -129,7 +129,7 @@ __nfs_iocounter_wait(struct nfs_io_counter *c) set_bit(NFS_IO_INPROGRESS, &c->flags); if (atomic_read(&c->io_count) == 0) break; - ret = nfs_wait_bit_killable(&q.key); + ret = nfs_wait_bit_killable(&q.key, TASK_KILLABLE); } while (atomic_read(&c->io_count) != 0 && !ret); finish_wait(wq, &q.wait); return ret; diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 5a8ae2125b50..bec0384499f7 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -1466,11 +1466,11 @@ static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx, } /* stop waiting if someone clears NFS_LAYOUT_RETRY_LAYOUTGET bit. */ -static int pnfs_layoutget_retry_bit_wait(struct wait_bit_key *key) +static int pnfs_layoutget_retry_bit_wait(struct wait_bit_key *key, int mode) { if (!test_bit(NFS_LAYOUT_RETRY_LAYOUTGET, key->flags)) return 1; - return nfs_wait_bit_killable(key); + return nfs_wait_bit_killable(key, mode); } static bool pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo) diff --git a/include/linux/wait.h b/include/linux/wait.h index 1e1bf9f963a9..513b36f04dfd 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -145,7 +145,7 @@ __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old) list_del(&old->task_list); } -typedef int wait_bit_action_f(struct wait_bit_key *); +typedef int wait_bit_action_f(struct wait_bit_key *, int mode); void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key); void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key); @@ -960,10 +960,10 @@ int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key); } while (0) -extern int bit_wait(struct wait_bit_key *); -extern int bit_wait_io(struct wait_bit_key *); -extern int bit_wait_timeout(struct wait_bit_key *); -extern int bit_wait_io_timeout(struct wait_bit_key *); +extern int bit_wait(struct wait_bit_key *, int); +extern int bit_wait_io(struct wait_bit_key *, int); +extern int bit_wait_timeout(struct wait_bit_key *, int); +extern int bit_wait_io_timeout(struct wait_bit_key *, int); /** * wait_on_bit - wait for a bit to be cleared diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c index f10bd873e684..f15d6b6a538a 100644 --- a/kernel/sched/wait.c +++ b/kernel/sched/wait.c @@ -392,7 +392,7 @@ __wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q, do { prepare_to_wait(wq, &q->wait, mode); if (test_bit(q->key.bit_nr, q->key.flags)) - ret = (*action)(&q->key); + ret = (*action)(&q->key, mode); } while (test_bit(q->key.bit_nr, q->key.flags) && !ret); finish_wait(wq, &q->wait); return ret; @@ -431,7 +431,7 @@ __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q, prepare_to_wait_exclusive(wq, &q->wait, mode); if (!test_bit(q->key.bit_nr, q->key.flags)) continue; - ret = action(&q->key); + ret = action(&q->key, mode); if (!ret) continue; abort_exclusive_wait(wq, &q->wait, mode, &q->key); @@ -581,43 +581,43 @@ void wake_up_atomic_t(atomic_t *p) } EXPORT_SYMBOL(wake_up_atomic_t); -__sched int bit_wait(struct wait_bit_key *word) +__sched int bit_wait(struct wait_bit_key *word, int mode) { schedule(); - if (signal_pending(current)) + if (signal_pending_state(mode, current)) return -EINTR; return 0; } EXPORT_SYMBOL(bit_wait); -__sched int bit_wait_io(struct wait_bit_key *word) +__sched int bit_wait_io(struct wait_bit_key *word, int mode) { io_schedule(); - if (signal_pending(current)) + if (signal_pending_state(mode, current)) return -EINTR; return 0; } EXPORT_SYMBOL(bit_wait_io); -__sched int bit_wait_timeout(struct wait_bit_key *word) +__sched int bit_wait_timeout(struct wait_bit_key *word, int mode) { unsigned long now = READ_ONCE(jiffies); if (time_after_eq(now, word->timeout)) return -EAGAIN; schedule_timeout(word->timeout - now); - if (signal_pending(current)) + if (signal_pending_state(mode, current)) return -EINTR; return 0; } EXPORT_SYMBOL_GPL(bit_wait_timeout); -__sched int bit_wait_io_timeout(struct wait_bit_key *word) +__sched int bit_wait_io_timeout(struct wait_bit_key *word, int mode) { unsigned long now = READ_ONCE(jiffies); if (time_after_eq(now, word->timeout)) return -EAGAIN; io_schedule_timeout(word->timeout - now); - if (signal_pending(current)) + if (signal_pending_state(mode, current)) return -EINTR; return 0; } diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index f14f24ee9983..73ad57a59989 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -250,11 +250,11 @@ void rpc_destroy_wait_queue(struct rpc_wait_queue *queue) } EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue); -static int rpc_wait_bit_killable(struct wait_bit_key *key) +static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode) { - if (fatal_signal_pending(current)) - return -ERESTARTSYS; freezable_schedule_unsafe(); + if (signal_pending_state(mode, current)) + return -ERESTARTSYS; return 0; } -- cgit v1.2.3 From 8979a059281b2e25d864edfd2220def7140c09b1 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Mon, 14 Dec 2015 09:59:56 -0500 Subject: drm/msm: trivial whitespace fix Signed-off-by: Rob Clark --- include/uapi/drm/msm_drm.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h index e995ffbcf86a..81e6e0d1d360 100644 --- a/include/uapi/drm/msm_drm.h +++ b/include/uapi/drm/msm_drm.h @@ -121,7 +121,7 @@ struct drm_msm_gem_cpu_fini { struct drm_msm_gem_submit_reloc { __u32 submit_offset; /* in, offset from submit_bo */ __u32 or; /* in, value OR'd with result */ - __s32 shift; /* in, amount of left shift (can be negative) */ + __s32 shift; /* in, amount of left shift (can be negative) */ __u32 reloc_idx; /* in, index of reloc_bo buffer */ __u64 reloc_offset; /* in, offset from start of reloc_bo */ }; -- cgit v1.2.3 From e5f5d74747afa799bff109644be04b00af36043e Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Mon, 14 Dec 2015 14:29:58 +0100 Subject: openvswitch: fix trivial comment typo The commit 33db4125ec74 ("openvswitch: Rename LABEL->LABELS") left over an old OVS_CT_ATTR_LABEL instance, fix it. Fixes: 33db4125ec74 ("openvswitch: Rename LABEL->LABELS") Signed-off-by: Paolo Abeni Acked-by: Joe Stringer Signed-off-by: David S. Miller --- include/uapi/linux/openvswitch.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index 28ccedd000f5..a27222d5b413 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -628,7 +628,7 @@ struct ovs_action_hash { * @OVS_CT_ATTR_MARK: u32 value followed by u32 mask. For each bit set in the * mask, the corresponding bit in the value is copied to the connection * tracking mark field in the connection. - * @OVS_CT_ATTR_LABEL: %OVS_CT_LABELS_LEN value followed by %OVS_CT_LABELS_LEN + * @OVS_CT_ATTR_LABELS: %OVS_CT_LABELS_LEN value followed by %OVS_CT_LABELS_LEN * mask. For each bit set in the mask, the corresponding bit in the value is * copied to the connection tracking label field in the connection. * @OVS_CT_ATTR_HELPER: variable length string defining conntrack ALG. -- cgit v1.2.3 From 79462ad02e861803b3840cc782248c7359451cd9 Mon Sep 17 00:00:00 2001 From: Hannes Frederic Sowa Date: Mon, 14 Dec 2015 22:03:39 +0100 Subject: net: add validation for the socket syscall protocol argument MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 郭永刚 reported that one could simply crash the kernel as root by using a simple program: int socket_fd; struct sockaddr_in addr; addr.sin_port = 0; addr.sin_addr.s_addr = INADDR_ANY; addr.sin_family = 10; socket_fd = socket(10,3,0x40000000); connect(socket_fd , &addr,16); AF_INET, AF_INET6 sockets actually only support 8-bit protocol identifiers. inet_sock's skc_protocol field thus is sized accordingly, thus larger protocol identifiers simply cut off the higher bits and store a zero in the protocol fields. This could lead to e.g. NULL function pointer because as a result of the cut off inet_num is zero and we call down to inet_autobind, which is NULL for raw sockets. kernel: Call Trace: kernel: [] ? inet_autobind+0x2e/0x70 kernel: [] inet_dgram_connect+0x54/0x80 kernel: [] SYSC_connect+0xd9/0x110 kernel: [] ? ptrace_notify+0x5b/0x80 kernel: [] ? syscall_trace_enter_phase2+0x108/0x200 kernel: [] SyS_connect+0xe/0x10 kernel: [] tracesys_phase2+0x84/0x89 I found no particular commit which introduced this problem. CVE: CVE-2015-8543 Cc: Cong Wang Reported-by: 郭永刚 Signed-off-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- include/net/sock.h | 1 + net/ax25/af_ax25.c | 3 +++ net/decnet/af_decnet.c | 3 +++ net/ipv4/af_inet.c | 3 +++ net/ipv6/af_inet6.c | 3 +++ net/irda/af_irda.c | 3 +++ 6 files changed, 16 insertions(+) (limited to 'include') diff --git a/include/net/sock.h b/include/net/sock.h index eaef41433d7a..c4205e0a3a2d 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -403,6 +403,7 @@ struct sock { sk_no_check_rx : 1, sk_userlocks : 4, sk_protocol : 8, +#define SK_PROTOCOL_MAX U8_MAX sk_type : 16; kmemcheck_bitfield_end(flags); int sk_wmem_queued; diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index ae3a47f9d1d5..fbd0acf80b13 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -805,6 +805,9 @@ static int ax25_create(struct net *net, struct socket *sock, int protocol, struct sock *sk; ax25_cb *ax25; + if (protocol < 0 || protocol > SK_PROTOCOL_MAX) + return -EINVAL; + if (!net_eq(net, &init_net)) return -EAFNOSUPPORT; diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index eebf5ac8ce18..13d6b1a6e0fc 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c @@ -678,6 +678,9 @@ static int dn_create(struct net *net, struct socket *sock, int protocol, { struct sock *sk; + if (protocol < 0 || protocol > SK_PROTOCOL_MAX) + return -EINVAL; + if (!net_eq(net, &init_net)) return -EAFNOSUPPORT; diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 11c4ca13ec3b..5c5db6636704 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -257,6 +257,9 @@ static int inet_create(struct net *net, struct socket *sock, int protocol, int try_loading_module = 0; int err; + if (protocol < 0 || protocol >= IPPROTO_MAX) + return -EINVAL; + sock->state = SS_UNCONNECTED; /* Look for the requested type/protocol pair. */ diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 8ec0df75f1c4..9f5137cd604e 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -109,6 +109,9 @@ static int inet6_create(struct net *net, struct socket *sock, int protocol, int try_loading_module = 0; int err; + if (protocol < 0 || protocol >= IPPROTO_MAX) + return -EINVAL; + /* Look for the requested type/protocol pair. */ lookup_protocol: err = -ESOCKTNOSUPPORT; diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index e6aa48b5395c..923abd6b3064 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c @@ -1086,6 +1086,9 @@ static int irda_create(struct net *net, struct socket *sock, int protocol, struct sock *sk; struct irda_sock *self; + if (protocol < 0 || protocol > SK_PROTOCOL_MAX) + return -EINVAL; + if (net != &init_net) return -EAFNOSUPPORT; -- cgit v1.2.3 From eb227c554ee68b6719c9298947f344e713fa712c Mon Sep 17 00:00:00 2001 From: Nicolai Hähnle Date: Sat, 12 Dec 2015 11:42:23 -0500 Subject: drm/ttm: fix documentation of ttm_bo_reserve MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previously, the comment was inconsistent. EDEADLK is what the ww_mutex mechanism really returns. Signed-off-by: Nicolai Hähnle Signed-off-by: Dave Airlie --- include/drm/ttm/ttm_bo_driver.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 813042cede57..3d4bf08aa21f 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -826,10 +826,10 @@ static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo, * reserved, the validation sequence is checked against the validation * sequence of the process currently reserving the buffer, * and if the current validation sequence is greater than that of the process - * holding the reservation, the function returns -EAGAIN. Otherwise it sleeps + * holding the reservation, the function returns -EDEADLK. Otherwise it sleeps * waiting for the buffer to become unreserved, after which it retries * reserving. - * The caller should, when receiving an -EAGAIN error + * The caller should, when receiving an -EDEADLK error * release all its buffer reservations, wait for @bo to become unreserved, and * then rerun the validation with the same validation sequence. This procedure * will always guarantee that the process with the lowest validation sequence -- cgit v1.2.3 From 5037e9ef9454917b047f9f3a19b4dd179fbf7cd4 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 14 Dec 2015 14:08:53 -0800 Subject: net: fix IP early demux races David Wilder reported crashes caused by dst reuse. I am seeing a crash on a distro V4.2.3 kernel caused by a double release of a dst_entry. In ipv4_dst_destroy() the call to list_empty() finds a poisoned next pointer, indicating the dst_entry has already been removed from the list and freed. The crash occurs 18 to 24 hours into a run of a network stress exerciser. Thanks to his detailed report and analysis, we were able to understand the core issue. IP early demux can associate a dst to skb, after a lookup in TCP/UDP sockets. When socket cache is not properly set, we want to store into sk->sk_dst_cache the dst for future IP early demux lookups, by acquiring a stable refcount on the dst. Problem is this acquisition is simply using an atomic_inc(), which works well, unless the dst was queued for destruction from dst_release() noticing dst refcount went to zero, if DST_NOCACHE was set on dst. We need to make sure current refcount is not zero before incrementing it, or risk double free as David reported. This patch, being a stable candidate, adds two new helpers, and use them only from IP early demux problematic paths. It might be possible to merge in net-next skb_dst_force() and skb_dst_force_safe(), but I prefer having the smallest patch for stable kernels : Maybe some skb_dst_force() callers do not expect skb->dst can suddenly be cleared. Can probably be backported back to linux-3.6 kernels Reported-by: David J. Wilder Tested-by: David J. Wilder Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/dst.h | 33 +++++++++++++++++++++++++++++++++ include/net/sock.h | 2 +- net/ipv4/tcp_ipv4.c | 5 ++--- net/ipv6/tcp_ipv6.c | 3 +-- 4 files changed, 37 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/net/dst.h b/include/net/dst.h index 1279f9b09791..c7329dcd90cc 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -322,6 +322,39 @@ static inline void skb_dst_force(struct sk_buff *skb) } } +/** + * dst_hold_safe - Take a reference on a dst if possible + * @dst: pointer to dst entry + * + * This helper returns false if it could not safely + * take a reference on a dst. + */ +static inline bool dst_hold_safe(struct dst_entry *dst) +{ + if (dst->flags & DST_NOCACHE) + return atomic_inc_not_zero(&dst->__refcnt); + dst_hold(dst); + return true; +} + +/** + * skb_dst_force_safe - makes sure skb dst is refcounted + * @skb: buffer + * + * If dst is not yet refcounted and not destroyed, grab a ref on it. + */ +static inline void skb_dst_force_safe(struct sk_buff *skb) +{ + if (skb_dst_is_noref(skb)) { + struct dst_entry *dst = skb_dst(skb); + + if (!dst_hold_safe(dst)) + dst = NULL; + + skb->_skb_refdst = (unsigned long)dst; + } +} + /** * __skb_tunnel_rx - prepare skb for rx reinsert diff --git a/include/net/sock.h b/include/net/sock.h index c4205e0a3a2d..28790fe18206 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -817,7 +817,7 @@ void sk_stream_write_space(struct sock *sk); static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb) { /* dont let skb dst not refcounted, we are going to leave rcu lock */ - skb_dst_force(skb); + skb_dst_force_safe(skb); if (!sk->sk_backlog.tail) sk->sk_backlog.head = skb; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index db003438aaf5..d8841a2f1569 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1493,7 +1493,7 @@ bool tcp_prequeue(struct sock *sk, struct sk_buff *skb) if (likely(sk->sk_rx_dst)) skb_dst_drop(skb); else - skb_dst_force(skb); + skb_dst_force_safe(skb); __skb_queue_tail(&tp->ucopy.prequeue, skb); tp->ucopy.memory += skb->truesize; @@ -1721,8 +1721,7 @@ void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); - if (dst) { - dst_hold(dst); + if (dst && dst_hold_safe(dst)) { sk->sk_rx_dst = dst; inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; } diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index e7aab561b7b4..6b8a8a9091fa 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -93,10 +93,9 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); - if (dst) { + if (dst && dst_hold_safe(dst)) { const struct rt6_info *rt = (const struct rt6_info *)dst; - dst_hold(dst); sk->sk_rx_dst = dst; inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt); -- cgit v1.2.3 From d55f5320c7811d3858edaef5f2d4a9a7e43b1af8 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 8 Dec 2015 09:49:19 +0100 Subject: drm: Move more framebuffer doc from docbook to kerneldoc I missed a few paragraphs in the docbook that need to be pulled into the fbdev vfunc docs. v2: Spelling fixes from Thierry. Cc: Thierry Reding Link: http://patchwork.freedesktop.org/patch/msgid/1449564561-3896-3-git-send-email-daniel.vetter@ffwll.ch Reviewed-by: Thierry Reding Signed-off-by: Daniel Vetter --- Documentation/DocBook/gpu.tmpl | 72 +----------------------------------------- include/drm/drm_crtc.h | 18 ++++++++++- 2 files changed, 18 insertions(+), 72 deletions(-) (limited to 'include') diff --git a/Documentation/DocBook/gpu.tmpl b/Documentation/DocBook/gpu.tmpl index a27bdf9eaab5..6c6e81a9eaf4 100644 --- a/Documentation/DocBook/gpu.tmpl +++ b/Documentation/DocBook/gpu.tmpl @@ -986,10 +986,7 @@ int max_width, max_height; !Idrivers/gpu/drm/drm_atomic.c - Frame Buffer Creation - struct drm_framebuffer *(*fb_create)(struct drm_device *dev, - struct drm_file *file_priv, - struct drm_mode_fb_cmd2 *mode_cmd); + Frame Buffer Abstraction Frame buffers are abstract memory objects that provide a source of pixels to scanout to a CRTC. Applications explicitly request the @@ -1007,73 +1004,6 @@ int max_width, max_height; handles, e.g. vmwgfx directly exposes special TTM handles to userspace and so expects TTM handles in the create ioctl and not GEM handles. - - Drivers must first validate the requested frame buffer parameters passed - through the mode_cmd argument. In particular this is where invalid - sizes, pixel formats or pitches can be caught. - - - If the parameters are deemed valid, drivers then create, initialize and - return an instance of struct drm_framebuffer. - If desired the instance can be embedded in a larger driver-specific - structure. Drivers must fill its width, - height, pitches, - offsets, depth, - bits_per_pixel and - pixel_format fields from the values passed - through the drm_mode_fb_cmd2 argument. They - should call the drm_helper_mode_fill_fb_struct - helper function to do so. - - - - The initialization of the new framebuffer instance is finalized with a - call to drm_framebuffer_init which takes a pointer - to DRM frame buffer operations (struct - drm_framebuffer_funcs). Note that this function - publishes the framebuffer and so from this point on it can be accessed - concurrently from other threads. Hence it must be the last step in the - driver's framebuffer initialization sequence. Frame buffer operations - are - - - int (*create_handle)(struct drm_framebuffer *fb, - struct drm_file *file_priv, unsigned int *handle); - - Create a handle to the frame buffer underlying memory object. If - the frame buffer uses a multi-plane format, the handle will - reference the memory object associated with the first plane. - - - Drivers call drm_gem_handle_create to create - the handle. - - - - void (*destroy)(struct drm_framebuffer *framebuffer); - - Destroy the frame buffer object and frees all associated - resources. Drivers must call - drm_framebuffer_cleanup to free resources - allocated by the DRM core for the frame buffer object, and must - make sure to unreference all memory objects associated with the - frame buffer. Handles created by the - create_handle operation are released by - the DRM core. - - - - int (*dirty)(struct drm_framebuffer *framebuffer, - struct drm_file *file_priv, unsigned flags, unsigned color, - struct drm_clip_rect *clips, unsigned num_clips); - - This optional operation notifies the driver that a region of the - frame buffer has changed in response to a DRM_IOCTL_MODE_DIRTYFB - ioctl call. - - - - The lifetime of a drm framebuffer is controlled with a reference count, drivers can grab additional references with diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index c2f98ba2bb98..3b040b355472 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -172,7 +172,9 @@ struct drm_framebuffer_funcs { * Clean up framebuffer resources, specifically also unreference the * backing storage. The core guarantees to call this function for every * framebuffer successfully created by ->fb_create() in - * &drm_mode_config_funcs. + * &drm_mode_config_funcs. Drivers must also call + * drm_framebuffer_cleanup() to release DRM core resources for this + * framebuffer. */ void (*destroy)(struct drm_framebuffer *framebuffer); @@ -187,6 +189,9 @@ struct drm_framebuffer_funcs { * copying the current screen contents to a private buffer and blending * between that and the new contents. * + * GEM based drivers should call drm_gem_handle_create() to create the + * handle. + * * RETURNS: * * 0 on success or a negative error code on failure. @@ -1731,6 +1736,17 @@ struct drm_mode_config_funcs { * requested metadata, but most of that is left to the driver. See * struct &drm_mode_fb_cmd2 for details. * + * If the parameters are deemed valid and the backing storage objects in + * the underlying memory manager all exist, then the driver allocates + * a new &drm_framebuffer structure, subclassed to contain + * driver-specific information (like the internal native buffer object + * references). It also needs to fill out all relevant metadata, which + * should be done by calling drm_helper_mode_fill_fb_struct(). + * + * The initialization is finalized by calling drm_framebuffer_init(), + * which registers the framebuffer and makes it accessible to other + * threads. + * * RETURNS: * * A new framebuffer with an initial reference count of 1 or a negative -- cgit v1.2.3 From 16c3719c17d355b12fe2dac4458bd598f9204d6f Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Tue, 15 Dec 2015 12:20:58 +0100 Subject: drm: Constify drm_encoder_slave_funcs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Ville Syrjälä Reviewed-by: Alex Deucher Signed-off-by: Thierry Reding Signed-off-by: Boris Brezillon Link: http://patchwork.freedesktop.org/patch/msgid/1450178476-26284-11-git-send-email-boris.brezillon@free-electrons.com Signed-off-by: Daniel Vetter --- drivers/gpu/drm/drm_encoder_slave.c | 2 +- drivers/gpu/drm/nouveau/nouveau_encoder.h | 2 +- drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c | 6 +++--- drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c | 8 ++++---- include/drm/drm_encoder_slave.h | 2 +- 5 files changed, 10 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/drm_encoder_slave.c index d18b88b755c3..e8629076de32 100644 --- a/drivers/gpu/drm/drm_encoder_slave.c +++ b/drivers/gpu/drm/drm_encoder_slave.c @@ -124,7 +124,7 @@ EXPORT_SYMBOL(drm_i2c_encoder_destroy); * Wrapper fxns which can be plugged in to drm_encoder_helper_funcs: */ -static inline struct drm_encoder_slave_funcs * +static inline const struct drm_encoder_slave_funcs * get_slave_funcs(struct drm_encoder *enc) { return to_encoder_slave(enc)->slave_funcs; diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h index c38a86408363..ee6a6d3fc80f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_encoder.h +++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h @@ -83,7 +83,7 @@ static inline struct drm_encoder *to_drm_encoder(struct nouveau_encoder *enc) return &enc->base.base; } -static inline struct drm_encoder_slave_funcs * +static inline const struct drm_encoder_slave_funcs * get_slave_funcs(struct drm_encoder *enc) { return to_encoder_slave(enc)->slave_funcs; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c index 96f2eb43713c..a37b6e2fe51a 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c @@ -28,7 +28,7 @@ static int rcar_du_hdmi_connector_get_modes(struct drm_connector *connector) { struct rcar_du_connector *con = to_rcar_connector(connector); struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(con->encoder); - struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder); + const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder); if (sfuncs->get_modes == NULL) return 0; @@ -41,7 +41,7 @@ static int rcar_du_hdmi_connector_mode_valid(struct drm_connector *connector, { struct rcar_du_connector *con = to_rcar_connector(connector); struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(con->encoder); - struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder); + const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder); if (sfuncs->mode_valid == NULL) return MODE_OK; @@ -66,7 +66,7 @@ rcar_du_hdmi_connector_detect(struct drm_connector *connector, bool force) { struct rcar_du_connector *con = to_rcar_connector(connector); struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(con->encoder); - struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder); + const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder); if (sfuncs->detect == NULL) return connector_status_unknown; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c index 11267de26a51..2567efcbee36 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c @@ -35,7 +35,7 @@ struct rcar_du_hdmienc { static void rcar_du_hdmienc_disable(struct drm_encoder *encoder) { struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder); - struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder); + const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder); if (sfuncs->dpms) sfuncs->dpms(encoder, DRM_MODE_DPMS_OFF); @@ -50,7 +50,7 @@ static void rcar_du_hdmienc_disable(struct drm_encoder *encoder) static void rcar_du_hdmienc_enable(struct drm_encoder *encoder) { struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder); - struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder); + const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder); if (hdmienc->renc->lvds) rcar_du_lvdsenc_enable(hdmienc->renc->lvds, encoder->crtc, @@ -67,7 +67,7 @@ static int rcar_du_hdmienc_atomic_check(struct drm_encoder *encoder, struct drm_connector_state *conn_state) { struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder); - struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder); + const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder); struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode; const struct drm_display_mode *mode = &crtc_state->mode; @@ -89,7 +89,7 @@ static void rcar_du_hdmienc_mode_set(struct drm_encoder *encoder, struct drm_display_mode *adjusted_mode) { struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder); - struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder); + const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder); if (sfuncs->mode_set) sfuncs->mode_set(encoder, mode, adjusted_mode); diff --git a/include/drm/drm_encoder_slave.h b/include/drm/drm_encoder_slave.h index 8b9cc3671858..82cdf611393d 100644 --- a/include/drm/drm_encoder_slave.h +++ b/include/drm/drm_encoder_slave.h @@ -95,7 +95,7 @@ struct drm_encoder_slave_funcs { struct drm_encoder_slave { struct drm_encoder base; - struct drm_encoder_slave_funcs *slave_funcs; + const struct drm_encoder_slave_funcs *slave_funcs; void *slave_priv; void *bus_priv; }; -- cgit v1.2.3 From 399368aab39135cd9bc5e20b55b8b4d48658f904 Mon Sep 17 00:00:00 2001 From: Nicolas Iooss Date: Fri, 11 Dec 2015 11:20:27 +0100 Subject: drm: make drm_dev_set_unique() not use a format string drm_dev_set_unique() uses a format string to define the unique name of a device. This feature is not used as currently all the calls to this function either use "%s" as a format string or directly use dev_name(). Even though this second kind of call does not introduce security problems, because there cannot be "%" characters in dev_name() results, gcc issues a warning when building with -Wformat-security flag ("warning: format string is not a string literal (potentially insecure)"). This warning is useful to find real bugs like the one fixed by commit 3958b79266b1 ("configfs: fix kernel infoleak through user-controlled format string"). False positives which do not bring an extra value make the work of finding real bugs harder. Therefore remove the format-string feature from drm_dev_set_unique(). Signed-off-by: Nicolas Iooss Link: http://patchwork.freedesktop.org/patch/msgid/1449829228-4425-1-git-send-email-nicolas.iooss_linux@m4x.org Signed-off-by: Daniel Vetter --- drivers/gpu/drm/drm_drv.c | 17 ++++++----------- drivers/gpu/drm/nouveau/nouveau_drm.c | 2 +- drivers/gpu/drm/rockchip/rockchip_drm_drv.c | 2 +- include/drm/drmP.h | 2 +- 4 files changed, 9 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 7dd6728dd092..eaa4316f3c45 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -797,23 +797,18 @@ EXPORT_SYMBOL(drm_dev_unregister); /** * drm_dev_set_unique - Set the unique name of a DRM device * @dev: device of which to set the unique name - * @fmt: format string for unique name + * @name: unique name * - * Sets the unique name of a DRM device using the specified format string and - * a variable list of arguments. Drivers can use this at driver probe time if - * the unique name of the devices they drive is static. + * Sets the unique name of a DRM device using the specified string. Drivers + * can use this at driver probe time if the unique name of the devices they + * drive is static. * * Return: 0 on success or a negative error code on failure. */ -int drm_dev_set_unique(struct drm_device *dev, const char *fmt, ...) +int drm_dev_set_unique(struct drm_device *dev, const char *name) { - va_list ap; - kfree(dev->unique); - - va_start(ap, fmt); - dev->unique = kvasprintf(GFP_KERNEL, fmt, ap); - va_end(ap); + dev->unique = kstrdup(name, GFP_KERNEL); return dev->unique ? 0 : -ENOMEM; } diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 1d3ee5179ab8..2d23f95f17ce 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -1046,7 +1046,7 @@ nouveau_platform_device_create(const struct nvkm_device_tegra_func *func, goto err_free; } - err = drm_dev_set_unique(drm, "%s", dev_name(&pdev->dev)); + err = drm_dev_set_unique(drm, dev_name(&pdev->dev)); if (err < 0) goto err_free; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index f22e1e1ee64a..215d6c44af55 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c @@ -450,7 +450,7 @@ static int rockchip_drm_bind(struct device *dev) if (!drm) return -ENOMEM; - ret = drm_dev_set_unique(drm, "%s", dev_name(dev)); + ret = drm_dev_set_unique(drm, dev_name(dev)); if (ret) goto err_free; diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 5531d7bbe851..04caa8f8a52f 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1068,7 +1068,7 @@ void drm_dev_ref(struct drm_device *dev); void drm_dev_unref(struct drm_device *dev); int drm_dev_register(struct drm_device *dev, unsigned long flags); void drm_dev_unregister(struct drm_device *dev); -int drm_dev_set_unique(struct drm_device *dev, const char *fmt, ...); +int drm_dev_set_unique(struct drm_device *dev, const char *name); struct drm_minor *drm_minor_acquire(unsigned int minor_id); void drm_minor_release(struct drm_minor *minor); -- cgit v1.2.3 From a8c21a5451d831e67b7a6fb910f9ca8bc7b43554 Mon Sep 17 00:00:00 2001 From: The etnaviv authors Date: Thu, 3 Dec 2015 18:21:29 +0100 Subject: drm/etnaviv: add initial etnaviv DRM driver This adds the etnaviv DRM driver and hooks it up in Makefiles and Kconfig. Signed-off-by: Christian Gmeiner Signed-off-by: Russell King Signed-off-by: Lucas Stach Acked-by: Daniel Vetter --- drivers/gpu/drm/Kconfig | 2 + drivers/gpu/drm/Makefile | 1 + drivers/gpu/drm/etnaviv/Kconfig | 20 + drivers/gpu/drm/etnaviv/Makefile | 14 + drivers/gpu/drm/etnaviv/cmdstream.xml.h | 218 ++++ drivers/gpu/drm/etnaviv/common.xml.h | 249 ++++ drivers/gpu/drm/etnaviv/etnaviv_buffer.c | 268 +++++ drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c | 209 ++++ drivers/gpu/drm/etnaviv/etnaviv_drv.c | 707 +++++++++++ drivers/gpu/drm/etnaviv/etnaviv_drv.h | 161 +++ drivers/gpu/drm/etnaviv/etnaviv_dump.c | 227 ++++ drivers/gpu/drm/etnaviv/etnaviv_dump.h | 54 + drivers/gpu/drm/etnaviv/etnaviv_gem.c | 897 ++++++++++++++ drivers/gpu/drm/etnaviv/etnaviv_gem.h | 117 ++ drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c | 122 ++ drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c | 443 +++++++ drivers/gpu/drm/etnaviv/etnaviv_gpu.c | 1644 ++++++++++++++++++++++++++ drivers/gpu/drm/etnaviv/etnaviv_gpu.h | 209 ++++ drivers/gpu/drm/etnaviv/etnaviv_iommu.c | 240 ++++ drivers/gpu/drm/etnaviv/etnaviv_iommu.h | 28 + drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c | 33 + drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.h | 25 + drivers/gpu/drm/etnaviv/etnaviv_mmu.c | 299 +++++ drivers/gpu/drm/etnaviv/etnaviv_mmu.h | 71 ++ drivers/gpu/drm/etnaviv/state.xml.h | 351 ++++++ drivers/gpu/drm/etnaviv/state_hi.xml.h | 407 +++++++ include/uapi/drm/etnaviv_drm.h | 222 ++++ 27 files changed, 7238 insertions(+) create mode 100644 drivers/gpu/drm/etnaviv/Kconfig create mode 100644 drivers/gpu/drm/etnaviv/Makefile create mode 100644 drivers/gpu/drm/etnaviv/cmdstream.xml.h create mode 100644 drivers/gpu/drm/etnaviv/common.xml.h create mode 100644 drivers/gpu/drm/etnaviv/etnaviv_buffer.c create mode 100644 drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c create mode 100644 drivers/gpu/drm/etnaviv/etnaviv_drv.c create mode 100644 drivers/gpu/drm/etnaviv/etnaviv_drv.h create mode 100644 drivers/gpu/drm/etnaviv/etnaviv_dump.c create mode 100644 drivers/gpu/drm/etnaviv/etnaviv_dump.h create mode 100644 drivers/gpu/drm/etnaviv/etnaviv_gem.c create mode 100644 drivers/gpu/drm/etnaviv/etnaviv_gem.h create mode 100644 drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c create mode 100644 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c create mode 100644 drivers/gpu/drm/etnaviv/etnaviv_gpu.c create mode 100644 drivers/gpu/drm/etnaviv/etnaviv_gpu.h create mode 100644 drivers/gpu/drm/etnaviv/etnaviv_iommu.c create mode 100644 drivers/gpu/drm/etnaviv/etnaviv_iommu.h create mode 100644 drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c create mode 100644 drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.h create mode 100644 drivers/gpu/drm/etnaviv/etnaviv_mmu.c create mode 100644 drivers/gpu/drm/etnaviv/etnaviv_mmu.h create mode 100644 drivers/gpu/drm/etnaviv/state.xml.h create mode 100644 drivers/gpu/drm/etnaviv/state_hi.xml.h create mode 100644 include/uapi/drm/etnaviv_drm.h (limited to 'include') diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index c4bf9a1cf4a6..b02ac62f5863 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -266,3 +266,5 @@ source "drivers/gpu/drm/amd/amdkfd/Kconfig" source "drivers/gpu/drm/imx/Kconfig" source "drivers/gpu/drm/vc4/Kconfig" + +source "drivers/gpu/drm/etnaviv/Kconfig" diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 1e9ff4c3e3db..f858aa25fbb2 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -75,3 +75,4 @@ obj-y += i2c/ obj-y += panel/ obj-y += bridge/ obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu/ +obj-$(CONFIG_DRM_ETNAVIV) += etnaviv/ diff --git a/drivers/gpu/drm/etnaviv/Kconfig b/drivers/gpu/drm/etnaviv/Kconfig new file mode 100644 index 000000000000..2cde7a5442fb --- /dev/null +++ b/drivers/gpu/drm/etnaviv/Kconfig @@ -0,0 +1,20 @@ + +config DRM_ETNAVIV + tristate "ETNAVIV (DRM support for Vivante GPU IP cores)" + depends on DRM + depends on ARCH_MXC || ARCH_DOVE + select SHMEM + select TMPFS + select IOMMU_API + select IOMMU_SUPPORT + select WANT_DEV_COREDUMP + help + DRM driver for Vivante GPUs. + +config DRM_ETNAVIV_REGISTER_LOGGING + bool "enable ETNAVIV register logging" + depends on DRM_ETNAVIV + help + Compile in support for logging register reads/writes in a format + that can be parsed by envytools demsm tool. If enabled, register + logging can be switched on via etnaviv.reglog=y module param. diff --git a/drivers/gpu/drm/etnaviv/Makefile b/drivers/gpu/drm/etnaviv/Makefile new file mode 100644 index 000000000000..1086e9876f91 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/Makefile @@ -0,0 +1,14 @@ +etnaviv-y := \ + etnaviv_buffer.o \ + etnaviv_cmd_parser.o \ + etnaviv_drv.o \ + etnaviv_dump.o \ + etnaviv_gem_prime.o \ + etnaviv_gem_submit.o \ + etnaviv_gem.o \ + etnaviv_gpu.o \ + etnaviv_iommu_v2.o \ + etnaviv_iommu.o \ + etnaviv_mmu.o + +obj-$(CONFIG_DRM_ETNAVIV) += etnaviv.o diff --git a/drivers/gpu/drm/etnaviv/cmdstream.xml.h b/drivers/gpu/drm/etnaviv/cmdstream.xml.h new file mode 100644 index 000000000000..8c44ba9a694e --- /dev/null +++ b/drivers/gpu/drm/etnaviv/cmdstream.xml.h @@ -0,0 +1,218 @@ +#ifndef CMDSTREAM_XML +#define CMDSTREAM_XML + +/* Autogenerated file, DO NOT EDIT manually! + +This file was generated by the rules-ng-ng headergen tool in this git repository: +http://0x04.net/cgit/index.cgi/rules-ng-ng +git clone git://0x04.net/rules-ng-ng + +The rules-ng-ng source files this header was generated from are: +- cmdstream.xml ( 12589 bytes, from 2014-02-17 14:57:56) +- common.xml ( 18437 bytes, from 2015-03-25 11:27:41) + +Copyright (C) 2014 +*/ + + +#define FE_OPCODE_LOAD_STATE 0x00000001 +#define FE_OPCODE_END 0x00000002 +#define FE_OPCODE_NOP 0x00000003 +#define FE_OPCODE_DRAW_2D 0x00000004 +#define FE_OPCODE_DRAW_PRIMITIVES 0x00000005 +#define FE_OPCODE_DRAW_INDEXED_PRIMITIVES 0x00000006 +#define FE_OPCODE_WAIT 0x00000007 +#define FE_OPCODE_LINK 0x00000008 +#define FE_OPCODE_STALL 0x00000009 +#define FE_OPCODE_CALL 0x0000000a +#define FE_OPCODE_RETURN 0x0000000b +#define FE_OPCODE_CHIP_SELECT 0x0000000d +#define PRIMITIVE_TYPE_POINTS 0x00000001 +#define PRIMITIVE_TYPE_LINES 0x00000002 +#define PRIMITIVE_TYPE_LINE_STRIP 0x00000003 +#define PRIMITIVE_TYPE_TRIANGLES 0x00000004 +#define PRIMITIVE_TYPE_TRIANGLE_STRIP 0x00000005 +#define PRIMITIVE_TYPE_TRIANGLE_FAN 0x00000006 +#define PRIMITIVE_TYPE_LINE_LOOP 0x00000007 +#define PRIMITIVE_TYPE_QUADS 0x00000008 +#define VIV_FE_LOAD_STATE 0x00000000 + +#define VIV_FE_LOAD_STATE_HEADER 0x00000000 +#define VIV_FE_LOAD_STATE_HEADER_OP__MASK 0xf8000000 +#define VIV_FE_LOAD_STATE_HEADER_OP__SHIFT 27 +#define VIV_FE_LOAD_STATE_HEADER_OP_LOAD_STATE 0x08000000 +#define VIV_FE_LOAD_STATE_HEADER_FIXP 0x04000000 +#define VIV_FE_LOAD_STATE_HEADER_COUNT__MASK 0x03ff0000 +#define VIV_FE_LOAD_STATE_HEADER_COUNT__SHIFT 16 +#define VIV_FE_LOAD_STATE_HEADER_COUNT(x) (((x) << VIV_FE_LOAD_STATE_HEADER_COUNT__SHIFT) & VIV_FE_LOAD_STATE_HEADER_COUNT__MASK) +#define VIV_FE_LOAD_STATE_HEADER_OFFSET__MASK 0x0000ffff +#define VIV_FE_LOAD_STATE_HEADER_OFFSET__SHIFT 0 +#define VIV_FE_LOAD_STATE_HEADER_OFFSET(x) (((x) << VIV_FE_LOAD_STATE_HEADER_OFFSET__SHIFT) & VIV_FE_LOAD_STATE_HEADER_OFFSET__MASK) +#define VIV_FE_LOAD_STATE_HEADER_OFFSET__SHR 2 + +#define VIV_FE_END 0x00000000 + +#define VIV_FE_END_HEADER 0x00000000 +#define VIV_FE_END_HEADER_EVENT_ID__MASK 0x0000001f +#define VIV_FE_END_HEADER_EVENT_ID__SHIFT 0 +#define VIV_FE_END_HEADER_EVENT_ID(x) (((x) << VIV_FE_END_HEADER_EVENT_ID__SHIFT) & VIV_FE_END_HEADER_EVENT_ID__MASK) +#define VIV_FE_END_HEADER_EVENT_ENABLE 0x00000100 +#define VIV_FE_END_HEADER_OP__MASK 0xf8000000 +#define VIV_FE_END_HEADER_OP__SHIFT 27 +#define VIV_FE_END_HEADER_OP_END 0x10000000 + +#define VIV_FE_NOP 0x00000000 + +#define VIV_FE_NOP_HEADER 0x00000000 +#define VIV_FE_NOP_HEADER_OP__MASK 0xf8000000 +#define VIV_FE_NOP_HEADER_OP__SHIFT 27 +#define VIV_FE_NOP_HEADER_OP_NOP 0x18000000 + +#define VIV_FE_DRAW_2D 0x00000000 + +#define VIV_FE_DRAW_2D_HEADER 0x00000000 +#define VIV_FE_DRAW_2D_HEADER_COUNT__MASK 0x0000ff00 +#define VIV_FE_DRAW_2D_HEADER_COUNT__SHIFT 8 +#define VIV_FE_DRAW_2D_HEADER_COUNT(x) (((x) << VIV_FE_DRAW_2D_HEADER_COUNT__SHIFT) & VIV_FE_DRAW_2D_HEADER_COUNT__MASK) +#define VIV_FE_DRAW_2D_HEADER_DATA_COUNT__MASK 0x07ff0000 +#define VIV_FE_DRAW_2D_HEADER_DATA_COUNT__SHIFT 16 +#define VIV_FE_DRAW_2D_HEADER_DATA_COUNT(x) (((x) << VIV_FE_DRAW_2D_HEADER_DATA_COUNT__SHIFT) & VIV_FE_DRAW_2D_HEADER_DATA_COUNT__MASK) +#define VIV_FE_DRAW_2D_HEADER_OP__MASK 0xf8000000 +#define VIV_FE_DRAW_2D_HEADER_OP__SHIFT 27 +#define VIV_FE_DRAW_2D_HEADER_OP_DRAW_2D 0x20000000 + +#define VIV_FE_DRAW_2D_TOP_LEFT 0x00000008 +#define VIV_FE_DRAW_2D_TOP_LEFT_X__MASK 0x0000ffff +#define VIV_FE_DRAW_2D_TOP_LEFT_X__SHIFT 0 +#define VIV_FE_DRAW_2D_TOP_LEFT_X(x) (((x) << VIV_FE_DRAW_2D_TOP_LEFT_X__SHIFT) & VIV_FE_DRAW_2D_TOP_LEFT_X__MASK) +#define VIV_FE_DRAW_2D_TOP_LEFT_Y__MASK 0xffff0000 +#define VIV_FE_DRAW_2D_TOP_LEFT_Y__SHIFT 16 +#define VIV_FE_DRAW_2D_TOP_LEFT_Y(x) (((x) << VIV_FE_DRAW_2D_TOP_LEFT_Y__SHIFT) & VIV_FE_DRAW_2D_TOP_LEFT_Y__MASK) + +#define VIV_FE_DRAW_2D_BOTTOM_RIGHT 0x0000000c +#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_X__MASK 0x0000ffff +#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_X__SHIFT 0 +#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_X(x) (((x) << VIV_FE_DRAW_2D_BOTTOM_RIGHT_X__SHIFT) & VIV_FE_DRAW_2D_BOTTOM_RIGHT_X__MASK) +#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y__MASK 0xffff0000 +#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y__SHIFT 16 +#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y(x) (((x) << VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y__SHIFT) & VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y__MASK) + +#define VIV_FE_DRAW_PRIMITIVES 0x00000000 + +#define VIV_FE_DRAW_PRIMITIVES_HEADER 0x00000000 +#define VIV_FE_DRAW_PRIMITIVES_HEADER_OP__MASK 0xf8000000 +#define VIV_FE_DRAW_PRIMITIVES_HEADER_OP__SHIFT 27 +#define VIV_FE_DRAW_PRIMITIVES_HEADER_OP_DRAW_PRIMITIVES 0x28000000 + +#define VIV_FE_DRAW_PRIMITIVES_COMMAND 0x00000004 +#define VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE__MASK 0x000000ff +#define VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE__SHIFT 0 +#define VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE(x) (((x) << VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE__SHIFT) & VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE__MASK) + +#define VIV_FE_DRAW_PRIMITIVES_START 0x00000008 + +#define VIV_FE_DRAW_PRIMITIVES_COUNT 0x0000000c + +#define VIV_FE_DRAW_INDEXED_PRIMITIVES 0x00000000 + +#define VIV_FE_DRAW_INDEXED_PRIMITIVES_HEADER 0x00000000 +#define VIV_FE_DRAW_INDEXED_PRIMITIVES_HEADER_OP__MASK 0xf8000000 +#define VIV_FE_DRAW_INDEXED_PRIMITIVES_HEADER_OP__SHIFT 27 +#define VIV_FE_DRAW_INDEXED_PRIMITIVES_HEADER_OP_DRAW_INDEXED_PRIMITIVES 0x30000000 + +#define VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND 0x00000004 +#define VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE__MASK 0x000000ff +#define VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE__SHIFT 0 +#define VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE(x) (((x) << VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE__SHIFT) & VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE__MASK) + +#define VIV_FE_DRAW_INDEXED_PRIMITIVES_START 0x00000008 + +#define VIV_FE_DRAW_INDEXED_PRIMITIVES_COUNT 0x0000000c + +#define VIV_FE_DRAW_INDEXED_PRIMITIVES_OFFSET 0x00000010 + +#define VIV_FE_WAIT 0x00000000 + +#define VIV_FE_WAIT_HEADER 0x00000000 +#define VIV_FE_WAIT_HEADER_DELAY__MASK 0x0000ffff +#define VIV_FE_WAIT_HEADER_DELAY__SHIFT 0 +#define VIV_FE_WAIT_HEADER_DELAY(x) (((x) << VIV_FE_WAIT_HEADER_DELAY__SHIFT) & VIV_FE_WAIT_HEADER_DELAY__MASK) +#define VIV_FE_WAIT_HEADER_OP__MASK 0xf8000000 +#define VIV_FE_WAIT_HEADER_OP__SHIFT 27 +#define VIV_FE_WAIT_HEADER_OP_WAIT 0x38000000 + +#define VIV_FE_LINK 0x00000000 + +#define VIV_FE_LINK_HEADER 0x00000000 +#define VIV_FE_LINK_HEADER_PREFETCH__MASK 0x0000ffff +#define VIV_FE_LINK_HEADER_PREFETCH__SHIFT 0 +#define VIV_FE_LINK_HEADER_PREFETCH(x) (((x) << VIV_FE_LINK_HEADER_PREFETCH__SHIFT) & VIV_FE_LINK_HEADER_PREFETCH__MASK) +#define VIV_FE_LINK_HEADER_OP__MASK 0xf8000000 +#define VIV_FE_LINK_HEADER_OP__SHIFT 27 +#define VIV_FE_LINK_HEADER_OP_LINK 0x40000000 + +#define VIV_FE_LINK_ADDRESS 0x00000004 + +#define VIV_FE_STALL 0x00000000 + +#define VIV_FE_STALL_HEADER 0x00000000 +#define VIV_FE_STALL_HEADER_OP__MASK 0xf8000000 +#define VIV_FE_STALL_HEADER_OP__SHIFT 27 +#define VIV_FE_STALL_HEADER_OP_STALL 0x48000000 + +#define VIV_FE_STALL_TOKEN 0x00000004 +#define VIV_FE_STALL_TOKEN_FROM__MASK 0x0000001f +#define VIV_FE_STALL_TOKEN_FROM__SHIFT 0 +#define VIV_FE_STALL_TOKEN_FROM(x) (((x) << VIV_FE_STALL_TOKEN_FROM__SHIFT) & VIV_FE_STALL_TOKEN_FROM__MASK) +#define VIV_FE_STALL_TOKEN_TO__MASK 0x00001f00 +#define VIV_FE_STALL_TOKEN_TO__SHIFT 8 +#define VIV_FE_STALL_TOKEN_TO(x) (((x) << VIV_FE_STALL_TOKEN_TO__SHIFT) & VIV_FE_STALL_TOKEN_TO__MASK) + +#define VIV_FE_CALL 0x00000000 + +#define VIV_FE_CALL_HEADER 0x00000000 +#define VIV_FE_CALL_HEADER_PREFETCH__MASK 0x0000ffff +#define VIV_FE_CALL_HEADER_PREFETCH__SHIFT 0 +#define VIV_FE_CALL_HEADER_PREFETCH(x) (((x) << VIV_FE_CALL_HEADER_PREFETCH__SHIFT) & VIV_FE_CALL_HEADER_PREFETCH__MASK) +#define VIV_FE_CALL_HEADER_OP__MASK 0xf8000000 +#define VIV_FE_CALL_HEADER_OP__SHIFT 27 +#define VIV_FE_CALL_HEADER_OP_CALL 0x50000000 + +#define VIV_FE_CALL_ADDRESS 0x00000004 + +#define VIV_FE_CALL_RETURN_PREFETCH 0x00000008 + +#define VIV_FE_CALL_RETURN_ADDRESS 0x0000000c + +#define VIV_FE_RETURN 0x00000000 + +#define VIV_FE_RETURN_HEADER 0x00000000 +#define VIV_FE_RETURN_HEADER_OP__MASK 0xf8000000 +#define VIV_FE_RETURN_HEADER_OP__SHIFT 27 +#define VIV_FE_RETURN_HEADER_OP_RETURN 0x58000000 + +#define VIV_FE_CHIP_SELECT 0x00000000 + +#define VIV_FE_CHIP_SELECT_HEADER 0x00000000 +#define VIV_FE_CHIP_SELECT_HEADER_OP__MASK 0xf8000000 +#define VIV_FE_CHIP_SELECT_HEADER_OP__SHIFT 27 +#define VIV_FE_CHIP_SELECT_HEADER_OP_CHIP_SELECT 0x68000000 +#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP15 0x00008000 +#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP14 0x00004000 +#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP13 0x00002000 +#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP12 0x00001000 +#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP11 0x00000800 +#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP10 0x00000400 +#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP9 0x00000200 +#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP8 0x00000100 +#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP7 0x00000080 +#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP6 0x00000040 +#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP5 0x00000020 +#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP4 0x00000010 +#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP3 0x00000008 +#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP2 0x00000004 +#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP1 0x00000002 +#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP0 0x00000001 + + +#endif /* CMDSTREAM_XML */ diff --git a/drivers/gpu/drm/etnaviv/common.xml.h b/drivers/gpu/drm/etnaviv/common.xml.h new file mode 100644 index 000000000000..9e585d51fb78 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/common.xml.h @@ -0,0 +1,249 @@ +#ifndef COMMON_XML +#define COMMON_XML + +/* Autogenerated file, DO NOT EDIT manually! + +This file was generated by the rules-ng-ng headergen tool in this git repository: +http://0x04.net/cgit/index.cgi/rules-ng-ng +git clone git://0x04.net/rules-ng-ng + +The rules-ng-ng source files this header was generated from are: +- state_vg.xml ( 5973 bytes, from 2015-03-25 11:26:01) +- common.xml ( 18437 bytes, from 2015-03-25 11:27:41) + +Copyright (C) 2015 +*/ + + +#define PIPE_ID_PIPE_3D 0x00000000 +#define PIPE_ID_PIPE_2D 0x00000001 +#define SYNC_RECIPIENT_FE 0x00000001 +#define SYNC_RECIPIENT_RA 0x00000005 +#define SYNC_RECIPIENT_PE 0x00000007 +#define SYNC_RECIPIENT_DE 0x0000000b +#define SYNC_RECIPIENT_VG 0x0000000f +#define SYNC_RECIPIENT_TESSELATOR 0x00000010 +#define SYNC_RECIPIENT_VG2 0x00000011 +#define SYNC_RECIPIENT_TESSELATOR2 0x00000012 +#define SYNC_RECIPIENT_VG3 0x00000013 +#define SYNC_RECIPIENT_TESSELATOR3 0x00000014 +#define ENDIAN_MODE_NO_SWAP 0x00000000 +#define ENDIAN_MODE_SWAP_16 0x00000001 +#define ENDIAN_MODE_SWAP_32 0x00000002 +#define chipModel_GC300 0x00000300 +#define chipModel_GC320 0x00000320 +#define chipModel_GC350 0x00000350 +#define chipModel_GC355 0x00000355 +#define chipModel_GC400 0x00000400 +#define chipModel_GC410 0x00000410 +#define chipModel_GC420 0x00000420 +#define chipModel_GC450 0x00000450 +#define chipModel_GC500 0x00000500 +#define chipModel_GC530 0x00000530 +#define chipModel_GC600 0x00000600 +#define chipModel_GC700 0x00000700 +#define chipModel_GC800 0x00000800 +#define chipModel_GC860 0x00000860 +#define chipModel_GC880 0x00000880 +#define chipModel_GC1000 0x00001000 +#define chipModel_GC2000 0x00002000 +#define chipModel_GC2100 0x00002100 +#define chipModel_GC4000 0x00004000 +#define RGBA_BITS_R 0x00000001 +#define RGBA_BITS_G 0x00000002 +#define RGBA_BITS_B 0x00000004 +#define RGBA_BITS_A 0x00000008 +#define chipFeatures_FAST_CLEAR 0x00000001 +#define chipFeatures_SPECIAL_ANTI_ALIASING 0x00000002 +#define chipFeatures_PIPE_3D 0x00000004 +#define chipFeatures_DXT_TEXTURE_COMPRESSION 0x00000008 +#define chipFeatures_DEBUG_MODE 0x00000010 +#define chipFeatures_Z_COMPRESSION 0x00000020 +#define chipFeatures_YUV420_SCALER 0x00000040 +#define chipFeatures_MSAA 0x00000080 +#define chipFeatures_DC 0x00000100 +#define chipFeatures_PIPE_2D 0x00000200 +#define chipFeatures_ETC1_TEXTURE_COMPRESSION 0x00000400 +#define chipFeatures_FAST_SCALER 0x00000800 +#define chipFeatures_HIGH_DYNAMIC_RANGE 0x00001000 +#define chipFeatures_YUV420_TILER 0x00002000 +#define chipFeatures_MODULE_CG 0x00004000 +#define chipFeatures_MIN_AREA 0x00008000 +#define chipFeatures_NO_EARLY_Z 0x00010000 +#define chipFeatures_NO_422_TEXTURE 0x00020000 +#define chipFeatures_BUFFER_INTERLEAVING 0x00040000 +#define chipFeatures_BYTE_WRITE_2D 0x00080000 +#define chipFeatures_NO_SCALER 0x00100000 +#define chipFeatures_YUY2_AVERAGING 0x00200000 +#define chipFeatures_HALF_PE_CACHE 0x00400000 +#define chipFeatures_HALF_TX_CACHE 0x00800000 +#define chipFeatures_YUY2_RENDER_TARGET 0x01000000 +#define chipFeatures_MEM32 0x02000000 +#define chipFeatures_PIPE_VG 0x04000000 +#define chipFeatures_VGTS 0x08000000 +#define chipFeatures_FE20 0x10000000 +#define chipFeatures_BYTE_WRITE_3D 0x20000000 +#define chipFeatures_RS_YUV_TARGET 0x40000000 +#define chipFeatures_32_BIT_INDICES 0x80000000 +#define chipMinorFeatures0_FLIP_Y 0x00000001 +#define chipMinorFeatures0_DUAL_RETURN_BUS 0x00000002 +#define chipMinorFeatures0_ENDIANNESS_CONFIG 0x00000004 +#define chipMinorFeatures0_TEXTURE_8K 0x00000008 +#define chipMinorFeatures0_CORRECT_TEXTURE_CONVERTER 0x00000010 +#define chipMinorFeatures0_SPECIAL_MSAA_LOD 0x00000020 +#define chipMinorFeatures0_FAST_CLEAR_FLUSH 0x00000040 +#define chipMinorFeatures0_2DPE20 0x00000080 +#define chipMinorFeatures0_CORRECT_AUTO_DISABLE 0x00000100 +#define chipMinorFeatures0_RENDERTARGET_8K 0x00000200 +#define chipMinorFeatures0_2BITPERTILE 0x00000400 +#define chipMinorFeatures0_SEPARATE_TILE_STATUS_WHEN_INTERLEAVED 0x00000800 +#define chipMinorFeatures0_SUPER_TILED 0x00001000 +#define chipMinorFeatures0_VG_20 0x00002000 +#define chipMinorFeatures0_TS_EXTENDED_COMMANDS 0x00004000 +#define chipMinorFeatures0_COMPRESSION_FIFO_FIXED 0x00008000 +#define chipMinorFeatures0_HAS_SIGN_FLOOR_CEIL 0x00010000 +#define chipMinorFeatures0_VG_FILTER 0x00020000 +#define chipMinorFeatures0_VG_21 0x00040000 +#define chipMinorFeatures0_SHADER_HAS_W 0x00080000 +#define chipMinorFeatures0_HAS_SQRT_TRIG 0x00100000 +#define chipMinorFeatures0_MORE_MINOR_FEATURES 0x00200000 +#define chipMinorFeatures0_MC20 0x00400000 +#define chipMinorFeatures0_MSAA_SIDEBAND 0x00800000 +#define chipMinorFeatures0_BUG_FIXES0 0x01000000 +#define chipMinorFeatures0_VAA 0x02000000 +#define chipMinorFeatures0_BYPASS_IN_MSAA 0x04000000 +#define chipMinorFeatures0_HZ 0x08000000 +#define chipMinorFeatures0_NEW_TEXTURE 0x10000000 +#define chipMinorFeatures0_2D_A8_TARGET 0x20000000 +#define chipMinorFeatures0_CORRECT_STENCIL 0x40000000 +#define chipMinorFeatures0_ENHANCE_VR 0x80000000 +#define chipMinorFeatures1_RSUV_SWIZZLE 0x00000001 +#define chipMinorFeatures1_V2_COMPRESSION 0x00000002 +#define chipMinorFeatures1_VG_DOUBLE_BUFFER 0x00000004 +#define chipMinorFeatures1_EXTRA_EVENT_STATES 0x00000008 +#define chipMinorFeatures1_NO_STRIPING_NEEDED 0x00000010 +#define chipMinorFeatures1_TEXTURE_STRIDE 0x00000020 +#define chipMinorFeatures1_BUG_FIXES3 0x00000040 +#define chipMinorFeatures1_AUTO_DISABLE 0x00000080 +#define chipMinorFeatures1_AUTO_RESTART_TS 0x00000100 +#define chipMinorFeatures1_DISABLE_PE_GATING 0x00000200 +#define chipMinorFeatures1_L2_WINDOWING 0x00000400 +#define chipMinorFeatures1_HALF_FLOAT 0x00000800 +#define chipMinorFeatures1_PIXEL_DITHER 0x00001000 +#define chipMinorFeatures1_TWO_STENCIL_REFERENCE 0x00002000 +#define chipMinorFeatures1_EXTENDED_PIXEL_FORMAT 0x00004000 +#define chipMinorFeatures1_CORRECT_MIN_MAX_DEPTH 0x00008000 +#define chipMinorFeatures1_2D_DITHER 0x00010000 +#define chipMinorFeatures1_BUG_FIXES5 0x00020000 +#define chipMinorFeatures1_NEW_2D 0x00040000 +#define chipMinorFeatures1_NEW_FP 0x00080000 +#define chipMinorFeatures1_TEXTURE_HALIGN 0x00100000 +#define chipMinorFeatures1_NON_POWER_OF_TWO 0x00200000 +#define chipMinorFeatures1_LINEAR_TEXTURE_SUPPORT 0x00400000 +#define chipMinorFeatures1_HALTI0 0x00800000 +#define chipMinorFeatures1_CORRECT_OVERFLOW_VG 0x01000000 +#define chipMinorFeatures1_NEGATIVE_LOG_FIX 0x02000000 +#define chipMinorFeatures1_RESOLVE_OFFSET 0x04000000 +#define chipMinorFeatures1_OK_TO_GATE_AXI_CLOCK 0x08000000 +#define chipMinorFeatures1_MMU_VERSION 0x10000000 +#define chipMinorFeatures1_WIDE_LINE 0x20000000 +#define chipMinorFeatures1_BUG_FIXES6 0x40000000 +#define chipMinorFeatures1_FC_FLUSH_STALL 0x80000000 +#define chipMinorFeatures2_LINE_LOOP 0x00000001 +#define chipMinorFeatures2_LOGIC_OP 0x00000002 +#define chipMinorFeatures2_UNK2 0x00000004 +#define chipMinorFeatures2_SUPERTILED_TEXTURE 0x00000008 +#define chipMinorFeatures2_UNK4 0x00000010 +#define chipMinorFeatures2_RECT_PRIMITIVE 0x00000020 +#define chipMinorFeatures2_COMPOSITION 0x00000040 +#define chipMinorFeatures2_CORRECT_AUTO_DISABLE_COUNT 0x00000080 +#define chipMinorFeatures2_UNK8 0x00000100 +#define chipMinorFeatures2_UNK9 0x00000200 +#define chipMinorFeatures2_UNK10 0x00000400 +#define chipMinorFeatures2_SAMPLERBASE_16 0x00000800 +#define chipMinorFeatures2_UNK12 0x00001000 +#define chipMinorFeatures2_UNK13 0x00002000 +#define chipMinorFeatures2_UNK14 0x00004000 +#define chipMinorFeatures2_EXTRA_TEXTURE_STATE 0x00008000 +#define chipMinorFeatures2_FULL_DIRECTFB 0x00010000 +#define chipMinorFeatures2_2D_TILING 0x00020000 +#define chipMinorFeatures2_THREAD_WALKER_IN_PS 0x00040000 +#define chipMinorFeatures2_TILE_FILLER 0x00080000 +#define chipMinorFeatures2_UNK20 0x00100000 +#define chipMinorFeatures2_2D_MULTI_SOURCE_BLIT 0x00200000 +#define chipMinorFeatures2_UNK22 0x00400000 +#define chipMinorFeatures2_UNK23 0x00800000 +#define chipMinorFeatures2_UNK24 0x01000000 +#define chipMinorFeatures2_MIXED_STREAMS 0x02000000 +#define chipMinorFeatures2_2D_420_L2CACHE 0x04000000 +#define chipMinorFeatures2_UNK27 0x08000000 +#define chipMinorFeatures2_2D_NO_INDEX8_BRUSH 0x10000000 +#define chipMinorFeatures2_TEXTURE_TILED_READ 0x20000000 +#define chipMinorFeatures2_UNK30 0x40000000 +#define chipMinorFeatures2_UNK31 0x80000000 +#define chipMinorFeatures3_ROTATION_STALL_FIX 0x00000001 +#define chipMinorFeatures3_UNK1 0x00000002 +#define chipMinorFeatures3_2D_MULTI_SOURCE_BLT_EX 0x00000004 +#define chipMinorFeatures3_UNK3 0x00000008 +#define chipMinorFeatures3_UNK4 0x00000010 +#define chipMinorFeatures3_UNK5 0x00000020 +#define chipMinorFeatures3_UNK6 0x00000040 +#define chipMinorFeatures3_UNK7 0x00000080 +#define chipMinorFeatures3_UNK8 0x00000100 +#define chipMinorFeatures3_UNK9 0x00000200 +#define chipMinorFeatures3_BUG_FIXES10 0x00000400 +#define chipMinorFeatures3_UNK11 0x00000800 +#define chipMinorFeatures3_BUG_FIXES11 0x00001000 +#define chipMinorFeatures3_UNK13 0x00002000 +#define chipMinorFeatures3_UNK14 0x00004000 +#define chipMinorFeatures3_UNK15 0x00008000 +#define chipMinorFeatures3_UNK16 0x00010000 +#define chipMinorFeatures3_UNK17 0x00020000 +#define chipMinorFeatures3_UNK18 0x00040000 +#define chipMinorFeatures3_UNK19 0x00080000 +#define chipMinorFeatures3_UNK20 0x00100000 +#define chipMinorFeatures3_UNK21 0x00200000 +#define chipMinorFeatures3_UNK22 0x00400000 +#define chipMinorFeatures3_UNK23 0x00800000 +#define chipMinorFeatures3_UNK24 0x01000000 +#define chipMinorFeatures3_UNK25 0x02000000 +#define chipMinorFeatures3_UNK26 0x04000000 +#define chipMinorFeatures3_UNK27 0x08000000 +#define chipMinorFeatures3_UNK28 0x10000000 +#define chipMinorFeatures3_UNK29 0x20000000 +#define chipMinorFeatures3_UNK30 0x40000000 +#define chipMinorFeatures3_UNK31 0x80000000 +#define chipMinorFeatures4_UNK0 0x00000001 +#define chipMinorFeatures4_UNK1 0x00000002 +#define chipMinorFeatures4_UNK2 0x00000004 +#define chipMinorFeatures4_UNK3 0x00000008 +#define chipMinorFeatures4_UNK4 0x00000010 +#define chipMinorFeatures4_UNK5 0x00000020 +#define chipMinorFeatures4_UNK6 0x00000040 +#define chipMinorFeatures4_UNK7 0x00000080 +#define chipMinorFeatures4_UNK8 0x00000100 +#define chipMinorFeatures4_UNK9 0x00000200 +#define chipMinorFeatures4_UNK10 0x00000400 +#define chipMinorFeatures4_UNK11 0x00000800 +#define chipMinorFeatures4_UNK12 0x00001000 +#define chipMinorFeatures4_UNK13 0x00002000 +#define chipMinorFeatures4_UNK14 0x00004000 +#define chipMinorFeatures4_UNK15 0x00008000 +#define chipMinorFeatures4_UNK16 0x00010000 +#define chipMinorFeatures4_UNK17 0x00020000 +#define chipMinorFeatures4_UNK18 0x00040000 +#define chipMinorFeatures4_UNK19 0x00080000 +#define chipMinorFeatures4_UNK20 0x00100000 +#define chipMinorFeatures4_UNK21 0x00200000 +#define chipMinorFeatures4_UNK22 0x00400000 +#define chipMinorFeatures4_UNK23 0x00800000 +#define chipMinorFeatures4_UNK24 0x01000000 +#define chipMinorFeatures4_UNK25 0x02000000 +#define chipMinorFeatures4_UNK26 0x04000000 +#define chipMinorFeatures4_UNK27 0x08000000 +#define chipMinorFeatures4_UNK28 0x10000000 +#define chipMinorFeatures4_UNK29 0x20000000 +#define chipMinorFeatures4_UNK30 0x40000000 +#define chipMinorFeatures4_UNK31 0x80000000 + +#endif /* COMMON_XML */ diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c new file mode 100644 index 000000000000..332c55ebba6d --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c @@ -0,0 +1,268 @@ +/* + * Copyright (C) 2014 Etnaviv Project + * Author: Christian Gmeiner + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "etnaviv_gpu.h" +#include "etnaviv_gem.h" +#include "etnaviv_mmu.h" + +#include "common.xml.h" +#include "state.xml.h" +#include "cmdstream.xml.h" + +/* + * Command Buffer helper: + */ + + +static inline void OUT(struct etnaviv_cmdbuf *buffer, u32 data) +{ + u32 *vaddr = (u32 *)buffer->vaddr; + + BUG_ON(buffer->user_size >= buffer->size); + + vaddr[buffer->user_size / 4] = data; + buffer->user_size += 4; +} + +static inline void CMD_LOAD_STATE(struct etnaviv_cmdbuf *buffer, + u32 reg, u32 value) +{ + u32 index = reg >> VIV_FE_LOAD_STATE_HEADER_OFFSET__SHR; + + buffer->user_size = ALIGN(buffer->user_size, 8); + + /* write a register via cmd stream */ + OUT(buffer, VIV_FE_LOAD_STATE_HEADER_OP_LOAD_STATE | + VIV_FE_LOAD_STATE_HEADER_COUNT(1) | + VIV_FE_LOAD_STATE_HEADER_OFFSET(index)); + OUT(buffer, value); +} + +static inline void CMD_END(struct etnaviv_cmdbuf *buffer) +{ + buffer->user_size = ALIGN(buffer->user_size, 8); + + OUT(buffer, VIV_FE_END_HEADER_OP_END); +} + +static inline void CMD_WAIT(struct etnaviv_cmdbuf *buffer) +{ + buffer->user_size = ALIGN(buffer->user_size, 8); + + OUT(buffer, VIV_FE_WAIT_HEADER_OP_WAIT | 200); +} + +static inline void CMD_LINK(struct etnaviv_cmdbuf *buffer, + u16 prefetch, u32 address) +{ + buffer->user_size = ALIGN(buffer->user_size, 8); + + OUT(buffer, VIV_FE_LINK_HEADER_OP_LINK | + VIV_FE_LINK_HEADER_PREFETCH(prefetch)); + OUT(buffer, address); +} + +static inline void CMD_STALL(struct etnaviv_cmdbuf *buffer, + u32 from, u32 to) +{ + buffer->user_size = ALIGN(buffer->user_size, 8); + + OUT(buffer, VIV_FE_STALL_HEADER_OP_STALL); + OUT(buffer, VIV_FE_STALL_TOKEN_FROM(from) | VIV_FE_STALL_TOKEN_TO(to)); +} + +static void etnaviv_cmd_select_pipe(struct etnaviv_cmdbuf *buffer, u8 pipe) +{ + u32 flush; + u32 stall; + + /* + * This assumes that if we're switching to 2D, we're switching + * away from 3D, and vice versa. Hence, if we're switching to + * the 2D core, we need to flush the 3D depth and color caches, + * otherwise we need to flush the 2D pixel engine cache. + */ + if (pipe == ETNA_PIPE_2D) + flush = VIVS_GL_FLUSH_CACHE_DEPTH | VIVS_GL_FLUSH_CACHE_COLOR; + else + flush = VIVS_GL_FLUSH_CACHE_PE2D; + + stall = VIVS_GL_SEMAPHORE_TOKEN_FROM(SYNC_RECIPIENT_FE) | + VIVS_GL_SEMAPHORE_TOKEN_TO(SYNC_RECIPIENT_PE); + + CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush); + CMD_LOAD_STATE(buffer, VIVS_GL_SEMAPHORE_TOKEN, stall); + + CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); + + CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT, + VIVS_GL_PIPE_SELECT_PIPE(pipe)); +} + +static u32 gpu_va(struct etnaviv_gpu *gpu, struct etnaviv_cmdbuf *buf) +{ + return buf->paddr - gpu->memory_base; +} + +static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, + struct etnaviv_cmdbuf *buf, u32 off, u32 len) +{ + u32 size = buf->size; + u32 *ptr = buf->vaddr + off; + + dev_info(gpu->dev, "virt %p phys 0x%08x free 0x%08x\n", + ptr, gpu_va(gpu, buf) + off, size - len * 4 - off); + + print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4, + ptr, len * 4, 0); +} + +u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu) +{ + struct etnaviv_cmdbuf *buffer = gpu->buffer; + + /* initialize buffer */ + buffer->user_size = 0; + + CMD_WAIT(buffer); + CMD_LINK(buffer, 2, gpu_va(gpu, buffer) + buffer->user_size - 4); + + return buffer->user_size / 8; +} + +void etnaviv_buffer_end(struct etnaviv_gpu *gpu) +{ + struct etnaviv_cmdbuf *buffer = gpu->buffer; + + /* Replace the last WAIT with an END */ + buffer->user_size -= 16; + + CMD_END(buffer); + mb(); +} + +void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event, + struct etnaviv_cmdbuf *cmdbuf) +{ + struct etnaviv_cmdbuf *buffer = gpu->buffer; + u32 *lw = buffer->vaddr + buffer->user_size - 16; + u32 back, link_target, link_size, reserve_size, extra_size = 0; + + if (drm_debug & DRM_UT_DRIVER) + etnaviv_buffer_dump(gpu, buffer, 0, 0x50); + + /* + * If we need to flush the MMU prior to submitting this buffer, we + * will need to append a mmu flush load state, followed by a new + * link to this buffer - a total of four additional words. + */ + if (gpu->mmu->need_flush || gpu->switch_context) { + /* link command */ + extra_size += 2; + /* flush command */ + if (gpu->mmu->need_flush) + extra_size += 2; + /* pipe switch commands */ + if (gpu->switch_context) + extra_size += 8; + } + + reserve_size = (6 + extra_size) * 4; + + /* + * if we are going to completely overflow the buffer, we need to wrap. + */ + if (buffer->user_size + reserve_size > buffer->size) + buffer->user_size = 0; + + /* save offset back into main buffer */ + back = buffer->user_size + reserve_size - 6 * 4; + link_target = gpu_va(gpu, buffer) + buffer->user_size; + link_size = 6; + + /* Skip over any extra instructions */ + link_target += extra_size * sizeof(u32); + + if (drm_debug & DRM_UT_DRIVER) + pr_info("stream link to 0x%08x @ 0x%08x %p\n", + link_target, gpu_va(gpu, cmdbuf), cmdbuf->vaddr); + + /* jump back from cmd to main buffer */ + CMD_LINK(cmdbuf, link_size, link_target); + + link_target = gpu_va(gpu, cmdbuf); + link_size = cmdbuf->size / 8; + + + + if (drm_debug & DRM_UT_DRIVER) { + print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4, + cmdbuf->vaddr, cmdbuf->size, 0); + + pr_info("link op: %p\n", lw); + pr_info("link addr: %p\n", lw + 1); + pr_info("addr: 0x%08x\n", link_target); + pr_info("back: 0x%08x\n", gpu_va(gpu, buffer) + back); + pr_info("event: %d\n", event); + } + + if (gpu->mmu->need_flush || gpu->switch_context) { + u32 new_target = gpu_va(gpu, buffer) + buffer->user_size; + + if (gpu->mmu->need_flush) { + /* Add the MMU flush */ + CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU, + VIVS_GL_FLUSH_MMU_FLUSH_FEMMU | + VIVS_GL_FLUSH_MMU_FLUSH_UNK1 | + VIVS_GL_FLUSH_MMU_FLUSH_UNK2 | + VIVS_GL_FLUSH_MMU_FLUSH_PEMMU | + VIVS_GL_FLUSH_MMU_FLUSH_UNK4); + + gpu->mmu->need_flush = false; + } + + if (gpu->switch_context) { + etnaviv_cmd_select_pipe(buffer, cmdbuf->exec_state); + gpu->switch_context = false; + } + + /* And the link to the first buffer */ + CMD_LINK(buffer, link_size, link_target); + + /* Update the link target to point to above instructions */ + link_target = new_target; + link_size = extra_size; + } + + /* trigger event */ + CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) | + VIVS_GL_EVENT_FROM_PE); + + /* append WAIT/LINK to main buffer */ + CMD_WAIT(buffer); + CMD_LINK(buffer, 2, gpu_va(gpu, buffer) + (buffer->user_size - 4)); + + /* Change WAIT into a LINK command; write the address first. */ + *(lw + 1) = link_target; + mb(); + *(lw) = VIV_FE_LINK_HEADER_OP_LINK | + VIV_FE_LINK_HEADER_PREFETCH(link_size); + mb(); + + if (drm_debug & DRM_UT_DRIVER) + etnaviv_buffer_dump(gpu, buffer, 0, 0x50); +} diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c b/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c new file mode 100644 index 000000000000..dcfd565c88d1 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c @@ -0,0 +1,209 @@ +/* + * Copyright (C) 2015 Etnaviv Project + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include + +#include "etnaviv_gem.h" +#include "etnaviv_gpu.h" + +#include "cmdstream.xml.h" + +#define EXTRACT(val, field) (((val) & field##__MASK) >> field##__SHIFT) + +struct etna_validation_state { + struct etnaviv_gpu *gpu; + const struct drm_etnaviv_gem_submit_reloc *relocs; + unsigned int num_relocs; + u32 *start; +}; + +static const struct { + u16 offset; + u16 size; +} etnaviv_sensitive_states[] __initconst = { +#define ST(start, num) { (start) >> 2, (num) } + /* 2D */ + ST(0x1200, 1), + ST(0x1228, 1), + ST(0x1238, 1), + ST(0x1284, 1), + ST(0x128c, 1), + ST(0x1304, 1), + ST(0x1310, 1), + ST(0x1318, 1), + ST(0x12800, 4), + ST(0x128a0, 4), + ST(0x128c0, 4), + ST(0x12970, 4), + ST(0x12a00, 8), + ST(0x12b40, 8), + ST(0x12b80, 8), + ST(0x12ce0, 8), + /* 3D */ + ST(0x0644, 1), + ST(0x064c, 1), + ST(0x0680, 8), + ST(0x1410, 1), + ST(0x1430, 1), + ST(0x1458, 1), + ST(0x1460, 8), + ST(0x1480, 8), + ST(0x1500, 8), + ST(0x1520, 8), + ST(0x1608, 1), + ST(0x1610, 1), + ST(0x1658, 1), + ST(0x165c, 1), + ST(0x1664, 1), + ST(0x1668, 1), + ST(0x16a4, 1), + ST(0x16c0, 8), + ST(0x16e0, 8), + ST(0x1740, 8), + ST(0x2400, 14 * 16), + ST(0x10800, 32 * 16), +#undef ST +}; + +#define ETNAVIV_STATES_SIZE (VIV_FE_LOAD_STATE_HEADER_OFFSET__MASK + 1u) +static DECLARE_BITMAP(etnaviv_states, ETNAVIV_STATES_SIZE); + +void __init etnaviv_validate_init(void) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(etnaviv_sensitive_states); i++) + bitmap_set(etnaviv_states, etnaviv_sensitive_states[i].offset, + etnaviv_sensitive_states[i].size); +} + +static void etnaviv_warn_if_non_sensitive(struct etna_validation_state *state, + unsigned int buf_offset, unsigned int state_addr) +{ + if (state->num_relocs && state->relocs->submit_offset < buf_offset) { + dev_warn_once(state->gpu->dev, + "%s: relocation for non-sensitive state 0x%x at offset %u\n", + __func__, state_addr, + state->relocs->submit_offset); + while (state->num_relocs && + state->relocs->submit_offset < buf_offset) { + state->relocs++; + state->num_relocs--; + } + } +} + +static bool etnaviv_validate_load_state(struct etna_validation_state *state, + u32 *ptr, unsigned int state_offset, unsigned int num) +{ + unsigned int size = min(ETNAVIV_STATES_SIZE, state_offset + num); + unsigned int st_offset = state_offset, buf_offset; + + for_each_set_bit_from(st_offset, etnaviv_states, size) { + buf_offset = (ptr - state->start + + st_offset - state_offset) * 4; + + etnaviv_warn_if_non_sensitive(state, buf_offset, st_offset * 4); + if (state->num_relocs && + state->relocs->submit_offset == buf_offset) { + state->relocs++; + state->num_relocs--; + continue; + } + + dev_warn_ratelimited(state->gpu->dev, + "%s: load state touches restricted state 0x%x at offset %u\n", + __func__, st_offset * 4, buf_offset); + return false; + } + + if (state->num_relocs) { + buf_offset = (ptr - state->start + num) * 4; + etnaviv_warn_if_non_sensitive(state, buf_offset, st_offset * 4 + + state->relocs->submit_offset - + buf_offset); + } + + return true; +} + +static uint8_t cmd_length[32] = { + [FE_OPCODE_DRAW_PRIMITIVES] = 4, + [FE_OPCODE_DRAW_INDEXED_PRIMITIVES] = 6, + [FE_OPCODE_NOP] = 2, + [FE_OPCODE_STALL] = 2, +}; + +bool etnaviv_cmd_validate_one(struct etnaviv_gpu *gpu, u32 *stream, + unsigned int size, + struct drm_etnaviv_gem_submit_reloc *relocs, + unsigned int reloc_size) +{ + struct etna_validation_state state; + u32 *buf = stream; + u32 *end = buf + size; + + state.gpu = gpu; + state.relocs = relocs; + state.num_relocs = reloc_size; + state.start = stream; + + while (buf < end) { + u32 cmd = *buf; + unsigned int len, n, off; + unsigned int op = cmd >> 27; + + switch (op) { + case FE_OPCODE_LOAD_STATE: + n = EXTRACT(cmd, VIV_FE_LOAD_STATE_HEADER_COUNT); + len = ALIGN(1 + n, 2); + if (buf + len > end) + break; + + off = EXTRACT(cmd, VIV_FE_LOAD_STATE_HEADER_OFFSET); + if (!etnaviv_validate_load_state(&state, buf + 1, + off, n)) + return false; + break; + + case FE_OPCODE_DRAW_2D: + n = EXTRACT(cmd, VIV_FE_DRAW_2D_HEADER_COUNT); + if (n == 0) + n = 256; + len = 2 + n * 2; + break; + + default: + len = cmd_length[op]; + if (len == 0) { + dev_err(gpu->dev, "%s: op %u not permitted at offset %tu\n", + __func__, op, buf - state.start); + return false; + } + break; + } + + buf += len; + } + + if (buf > end) { + dev_err(gpu->dev, "%s: commands overflow end of buffer: %tu > %u\n", + __func__, buf - state.start, size); + return false; + } + + return true; +} diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c new file mode 100644 index 000000000000..5c89ebb52fd2 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -0,0 +1,707 @@ +/* + * Copyright (C) 2015 Etnaviv Project + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include +#include + +#include "etnaviv_drv.h" +#include "etnaviv_gpu.h" +#include "etnaviv_gem.h" +#include "etnaviv_mmu.h" +#include "etnaviv_gem.h" + +#ifdef CONFIG_DRM_ETNAVIV_REGISTER_LOGGING +static bool reglog; +MODULE_PARM_DESC(reglog, "Enable register read/write logging"); +module_param(reglog, bool, 0600); +#else +#define reglog 0 +#endif + +void __iomem *etnaviv_ioremap(struct platform_device *pdev, const char *name, + const char *dbgname) +{ + struct resource *res; + void __iomem *ptr; + + if (name) + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); + else + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + ptr = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(ptr)) { + dev_err(&pdev->dev, "failed to ioremap %s: %ld\n", name, + PTR_ERR(ptr)); + return ptr; + } + + if (reglog) + dev_printk(KERN_DEBUG, &pdev->dev, "IO:region %s 0x%p %08zx\n", + dbgname, ptr, (size_t)resource_size(res)); + + return ptr; +} + +void etnaviv_writel(u32 data, void __iomem *addr) +{ + if (reglog) + printk(KERN_DEBUG "IO:W %p %08x\n", addr, data); + + writel(data, addr); +} + +u32 etnaviv_readl(const void __iomem *addr) +{ + u32 val = readl(addr); + + if (reglog) + printk(KERN_DEBUG "IO:R %p %08x\n", addr, val); + + return val; +} + +/* + * DRM operations: + */ + + +static void load_gpu(struct drm_device *dev) +{ + struct etnaviv_drm_private *priv = dev->dev_private; + unsigned int i; + + for (i = 0; i < ETNA_MAX_PIPES; i++) { + struct etnaviv_gpu *g = priv->gpu[i]; + + if (g) { + int ret; + + ret = etnaviv_gpu_init(g); + if (ret) { + dev_err(g->dev, "hw init failed: %d\n", ret); + priv->gpu[i] = NULL; + } + } + } +} + +static int etnaviv_open(struct drm_device *dev, struct drm_file *file) +{ + struct etnaviv_file_private *ctx; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + file->driver_priv = ctx; + + return 0; +} + +static void etnaviv_preclose(struct drm_device *dev, struct drm_file *file) +{ + struct etnaviv_drm_private *priv = dev->dev_private; + struct etnaviv_file_private *ctx = file->driver_priv; + unsigned int i; + + for (i = 0; i < ETNA_MAX_PIPES; i++) { + struct etnaviv_gpu *gpu = priv->gpu[i]; + + if (gpu) { + mutex_lock(&gpu->lock); + if (gpu->lastctx == ctx) + gpu->lastctx = NULL; + mutex_unlock(&gpu->lock); + } + } + + kfree(ctx); +} + +/* + * DRM debugfs: + */ + +#ifdef CONFIG_DEBUG_FS +static int etnaviv_gem_show(struct drm_device *dev, struct seq_file *m) +{ + struct etnaviv_drm_private *priv = dev->dev_private; + + etnaviv_gem_describe_objects(priv, m); + + return 0; +} + +static int etnaviv_mm_show(struct drm_device *dev, struct seq_file *m) +{ + int ret; + + read_lock(&dev->vma_offset_manager->vm_lock); + ret = drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm); + read_unlock(&dev->vma_offset_manager->vm_lock); + + return ret; +} + +static int etnaviv_mmu_show(struct etnaviv_gpu *gpu, struct seq_file *m) +{ + seq_printf(m, "Active Objects (%s):\n", dev_name(gpu->dev)); + + mutex_lock(&gpu->mmu->lock); + drm_mm_dump_table(m, &gpu->mmu->mm); + mutex_unlock(&gpu->mmu->lock); + + return 0; +} + +static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, struct seq_file *m) +{ + struct etnaviv_cmdbuf *buf = gpu->buffer; + u32 size = buf->size; + u32 *ptr = buf->vaddr; + u32 i; + + seq_printf(m, "virt %p - phys 0x%llx - free 0x%08x\n", + buf->vaddr, (u64)buf->paddr, size - buf->user_size); + + for (i = 0; i < size / 4; i++) { + if (i && !(i % 4)) + seq_puts(m, "\n"); + if (i % 4 == 0) + seq_printf(m, "\t0x%p: ", ptr + i); + seq_printf(m, "%08x ", *(ptr + i)); + } + seq_puts(m, "\n"); +} + +static int etnaviv_ring_show(struct etnaviv_gpu *gpu, struct seq_file *m) +{ + seq_printf(m, "Ring Buffer (%s): ", dev_name(gpu->dev)); + + mutex_lock(&gpu->lock); + etnaviv_buffer_dump(gpu, m); + mutex_unlock(&gpu->lock); + + return 0; +} + +static int show_unlocked(struct seq_file *m, void *arg) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + int (*show)(struct drm_device *dev, struct seq_file *m) = + node->info_ent->data; + + return show(dev, m); +} + +static int show_each_gpu(struct seq_file *m, void *arg) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct etnaviv_drm_private *priv = dev->dev_private; + struct etnaviv_gpu *gpu; + int (*show)(struct etnaviv_gpu *gpu, struct seq_file *m) = + node->info_ent->data; + unsigned int i; + int ret = 0; + + for (i = 0; i < ETNA_MAX_PIPES; i++) { + gpu = priv->gpu[i]; + if (!gpu) + continue; + + ret = show(gpu, m); + if (ret < 0) + break; + } + + return ret; +} + +static struct drm_info_list etnaviv_debugfs_list[] = { + {"gpu", show_each_gpu, 0, etnaviv_gpu_debugfs}, + {"gem", show_unlocked, 0, etnaviv_gem_show}, + { "mm", show_unlocked, 0, etnaviv_mm_show }, + {"mmu", show_each_gpu, 0, etnaviv_mmu_show}, + {"ring", show_each_gpu, 0, etnaviv_ring_show}, +}; + +static int etnaviv_debugfs_init(struct drm_minor *minor) +{ + struct drm_device *dev = minor->dev; + int ret; + + ret = drm_debugfs_create_files(etnaviv_debugfs_list, + ARRAY_SIZE(etnaviv_debugfs_list), + minor->debugfs_root, minor); + + if (ret) { + dev_err(dev->dev, "could not install etnaviv_debugfs_list\n"); + return ret; + } + + return ret; +} + +static void etnaviv_debugfs_cleanup(struct drm_minor *minor) +{ + drm_debugfs_remove_files(etnaviv_debugfs_list, + ARRAY_SIZE(etnaviv_debugfs_list), minor); +} +#endif + +/* + * DRM ioctls: + */ + +static int etnaviv_ioctl_get_param(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct etnaviv_drm_private *priv = dev->dev_private; + struct drm_etnaviv_param *args = data; + struct etnaviv_gpu *gpu; + + if (args->pipe >= ETNA_MAX_PIPES) + return -EINVAL; + + gpu = priv->gpu[args->pipe]; + if (!gpu) + return -ENXIO; + + return etnaviv_gpu_get_param(gpu, args->param, &args->value); +} + +static int etnaviv_ioctl_gem_new(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_etnaviv_gem_new *args = data; + + if (args->flags & ~(ETNA_BO_CACHED | ETNA_BO_WC | ETNA_BO_UNCACHED | + ETNA_BO_FORCE_MMU)) + return -EINVAL; + + return etnaviv_gem_new_handle(dev, file, args->size, + args->flags, &args->handle); +} + +#define TS(t) ((struct timespec){ \ + .tv_sec = (t).tv_sec, \ + .tv_nsec = (t).tv_nsec \ +}) + +static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_etnaviv_gem_cpu_prep *args = data; + struct drm_gem_object *obj; + int ret; + + if (args->op & ~(ETNA_PREP_READ | ETNA_PREP_WRITE | ETNA_PREP_NOSYNC)) + return -EINVAL; + + obj = drm_gem_object_lookup(dev, file, args->handle); + if (!obj) + return -ENOENT; + + ret = etnaviv_gem_cpu_prep(obj, args->op, &TS(args->timeout)); + + drm_gem_object_unreference_unlocked(obj); + + return ret; +} + +static int etnaviv_ioctl_gem_cpu_fini(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_etnaviv_gem_cpu_fini *args = data; + struct drm_gem_object *obj; + int ret; + + if (args->flags) + return -EINVAL; + + obj = drm_gem_object_lookup(dev, file, args->handle); + if (!obj) + return -ENOENT; + + ret = etnaviv_gem_cpu_fini(obj); + + drm_gem_object_unreference_unlocked(obj); + + return ret; +} + +static int etnaviv_ioctl_gem_info(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_etnaviv_gem_info *args = data; + struct drm_gem_object *obj; + int ret; + + if (args->pad) + return -EINVAL; + + obj = drm_gem_object_lookup(dev, file, args->handle); + if (!obj) + return -ENOENT; + + ret = etnaviv_gem_mmap_offset(obj, &args->offset); + drm_gem_object_unreference_unlocked(obj); + + return ret; +} + +static int etnaviv_ioctl_wait_fence(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_etnaviv_wait_fence *args = data; + struct etnaviv_drm_private *priv = dev->dev_private; + struct timespec *timeout = &TS(args->timeout); + struct etnaviv_gpu *gpu; + + if (args->flags & ~(ETNA_WAIT_NONBLOCK)) + return -EINVAL; + + if (args->pipe >= ETNA_MAX_PIPES) + return -EINVAL; + + gpu = priv->gpu[args->pipe]; + if (!gpu) + return -ENXIO; + + if (args->flags & ETNA_WAIT_NONBLOCK) + timeout = NULL; + + return etnaviv_gpu_wait_fence_interruptible(gpu, args->fence, + timeout); +} + +static int etnaviv_ioctl_gem_userptr(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_etnaviv_gem_userptr *args = data; + int access; + + if (args->flags & ~(ETNA_USERPTR_READ|ETNA_USERPTR_WRITE) || + args->flags == 0) + return -EINVAL; + + if (offset_in_page(args->user_ptr | args->user_size) || + (uintptr_t)args->user_ptr != args->user_ptr || + (u32)args->user_size != args->user_size || + args->user_ptr & ~PAGE_MASK) + return -EINVAL; + + if (args->flags & ETNA_USERPTR_WRITE) + access = VERIFY_WRITE; + else + access = VERIFY_READ; + + if (!access_ok(access, (void __user *)(unsigned long)args->user_ptr, + args->user_size)) + return -EFAULT; + + return etnaviv_gem_new_userptr(dev, file, args->user_ptr, + args->user_size, args->flags, + &args->handle); +} + +static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct etnaviv_drm_private *priv = dev->dev_private; + struct drm_etnaviv_gem_wait *args = data; + struct timespec *timeout = &TS(args->timeout); + struct drm_gem_object *obj; + struct etnaviv_gpu *gpu; + int ret; + + if (args->flags & ~(ETNA_WAIT_NONBLOCK)) + return -EINVAL; + + if (args->pipe >= ETNA_MAX_PIPES) + return -EINVAL; + + gpu = priv->gpu[args->pipe]; + if (!gpu) + return -ENXIO; + + obj = drm_gem_object_lookup(dev, file, args->handle); + if (!obj) + return -ENOENT; + + if (args->flags & ETNA_WAIT_NONBLOCK) + timeout = NULL; + + ret = etnaviv_gem_wait_bo(gpu, obj, timeout); + + drm_gem_object_unreference_unlocked(obj); + + return ret; +} + +static const struct drm_ioctl_desc etnaviv_ioctls[] = { +#define ETNA_IOCTL(n, func, flags) \ + DRM_IOCTL_DEF_DRV(ETNAVIV_##n, etnaviv_ioctl_##func, flags) + ETNA_IOCTL(GET_PARAM, get_param, DRM_AUTH|DRM_RENDER_ALLOW), + ETNA_IOCTL(GEM_NEW, gem_new, DRM_AUTH|DRM_RENDER_ALLOW), + ETNA_IOCTL(GEM_INFO, gem_info, DRM_AUTH|DRM_RENDER_ALLOW), + ETNA_IOCTL(GEM_CPU_PREP, gem_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW), + ETNA_IOCTL(GEM_CPU_FINI, gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW), + ETNA_IOCTL(GEM_SUBMIT, gem_submit, DRM_AUTH|DRM_RENDER_ALLOW), + ETNA_IOCTL(WAIT_FENCE, wait_fence, DRM_AUTH|DRM_RENDER_ALLOW), + ETNA_IOCTL(GEM_USERPTR, gem_userptr, DRM_AUTH|DRM_RENDER_ALLOW), + ETNA_IOCTL(GEM_WAIT, gem_wait, DRM_AUTH|DRM_RENDER_ALLOW), +}; + +static const struct vm_operations_struct vm_ops = { + .fault = etnaviv_gem_fault, + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, +}; + +static const struct file_operations fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .unlocked_ioctl = drm_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = drm_compat_ioctl, +#endif + .poll = drm_poll, + .read = drm_read, + .llseek = no_llseek, + .mmap = etnaviv_gem_mmap, +}; + +static struct drm_driver etnaviv_drm_driver = { + .driver_features = DRIVER_HAVE_IRQ | + DRIVER_GEM | + DRIVER_PRIME | + DRIVER_RENDER, + .open = etnaviv_open, + .preclose = etnaviv_preclose, + .set_busid = drm_platform_set_busid, + .gem_free_object = etnaviv_gem_free_object, + .gem_vm_ops = &vm_ops, + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + .gem_prime_export = drm_gem_prime_export, + .gem_prime_import = drm_gem_prime_import, + .gem_prime_pin = etnaviv_gem_prime_pin, + .gem_prime_unpin = etnaviv_gem_prime_unpin, + .gem_prime_get_sg_table = etnaviv_gem_prime_get_sg_table, + .gem_prime_import_sg_table = etnaviv_gem_prime_import_sg_table, + .gem_prime_vmap = etnaviv_gem_prime_vmap, + .gem_prime_vunmap = etnaviv_gem_prime_vunmap, +#ifdef CONFIG_DEBUG_FS + .debugfs_init = etnaviv_debugfs_init, + .debugfs_cleanup = etnaviv_debugfs_cleanup, +#endif + .ioctls = etnaviv_ioctls, + .num_ioctls = DRM_ETNAVIV_NUM_IOCTLS, + .fops = &fops, + .name = "etnaviv", + .desc = "etnaviv DRM", + .date = "20151214", + .major = 1, + .minor = 0, +}; + +/* + * Platform driver: + */ +static int etnaviv_bind(struct device *dev) +{ + struct etnaviv_drm_private *priv; + struct drm_device *drm; + int ret; + + drm = drm_dev_alloc(&etnaviv_drm_driver, dev); + if (!drm) + return -ENOMEM; + + drm->platformdev = to_platform_device(dev); + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) { + dev_err(dev, "failed to allocate private data\n"); + ret = -ENOMEM; + goto out_unref; + } + drm->dev_private = priv; + + priv->wq = alloc_ordered_workqueue("etnaviv", 0); + if (!priv->wq) { + ret = -ENOMEM; + goto out_wq; + } + + mutex_init(&priv->gem_lock); + INIT_LIST_HEAD(&priv->gem_list); + priv->num_gpus = 0; + + dev_set_drvdata(dev, drm); + + ret = component_bind_all(dev, drm); + if (ret < 0) + goto out_bind; + + load_gpu(drm); + + ret = drm_dev_register(drm, 0); + if (ret) + goto out_register; + + return 0; + +out_register: + component_unbind_all(dev, drm); +out_bind: + flush_workqueue(priv->wq); + destroy_workqueue(priv->wq); +out_wq: + kfree(priv); +out_unref: + drm_dev_unref(drm); + + return ret; +} + +static void etnaviv_unbind(struct device *dev) +{ + struct drm_device *drm = dev_get_drvdata(dev); + struct etnaviv_drm_private *priv = drm->dev_private; + + drm_dev_unregister(drm); + + flush_workqueue(priv->wq); + destroy_workqueue(priv->wq); + + component_unbind_all(dev, drm); + + drm->dev_private = NULL; + kfree(priv); + + drm_put_dev(drm); +} + +static const struct component_master_ops etnaviv_master_ops = { + .bind = etnaviv_bind, + .unbind = etnaviv_unbind, +}; + +static int compare_of(struct device *dev, void *data) +{ + struct device_node *np = data; + + return dev->of_node == np; +} + +static int compare_str(struct device *dev, void *data) +{ + return !strcmp(dev_name(dev), data); +} + +static int etnaviv_pdev_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; + struct component_match *match = NULL; + + dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); + + if (node) { + struct device_node *core_node; + int i; + + for (i = 0; ; i++) { + core_node = of_parse_phandle(node, "cores", i); + if (!core_node) + break; + + component_match_add(&pdev->dev, &match, compare_of, + core_node); + of_node_put(core_node); + } + } else if (dev->platform_data) { + char **names = dev->platform_data; + unsigned i; + + for (i = 0; names[i]; i++) + component_match_add(dev, &match, compare_str, names[i]); + } + + return component_master_add_with_match(dev, &etnaviv_master_ops, match); +} + +static int etnaviv_pdev_remove(struct platform_device *pdev) +{ + component_master_del(&pdev->dev, &etnaviv_master_ops); + + return 0; +} + +static const struct of_device_id dt_match[] = { + { .compatible = "fsl,imx-gpu-subsystem" }, + { .compatible = "marvell,dove-gpu-subsystem" }, + {} +}; +MODULE_DEVICE_TABLE(of, dt_match); + +static struct platform_driver etnaviv_platform_driver = { + .probe = etnaviv_pdev_probe, + .remove = etnaviv_pdev_remove, + .driver = { + .owner = THIS_MODULE, + .name = "etnaviv", + .of_match_table = dt_match, + }, +}; + +static int __init etnaviv_init(void) +{ + int ret; + + etnaviv_validate_init(); + + ret = platform_driver_register(&etnaviv_gpu_driver); + if (ret != 0) + return ret; + + ret = platform_driver_register(&etnaviv_platform_driver); + if (ret != 0) + platform_driver_unregister(&etnaviv_gpu_driver); + + return ret; +} +module_init(etnaviv_init); + +static void __exit etnaviv_exit(void) +{ + platform_driver_unregister(&etnaviv_gpu_driver); + platform_driver_unregister(&etnaviv_platform_driver); +} +module_exit(etnaviv_exit); + +MODULE_AUTHOR("Christian Gmeiner "); +MODULE_AUTHOR("Russell King "); +MODULE_AUTHOR("Lucas Stach "); +MODULE_DESCRIPTION("etnaviv DRM Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:etnaviv"); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h new file mode 100644 index 000000000000..d6bd438bd5be --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h @@ -0,0 +1,161 @@ +/* + * Copyright (C) 2015 Etnaviv Project + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef __ETNAVIV_DRV_H__ +#define __ETNAVIV_DRV_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +struct etnaviv_cmdbuf; +struct etnaviv_gpu; +struct etnaviv_mmu; +struct etnaviv_gem_object; +struct etnaviv_gem_submit; + +struct etnaviv_file_private { + /* currently we don't do anything useful with this.. but when + * per-context address spaces are supported we'd keep track of + * the context's page-tables here. + */ + int dummy; +}; + +struct etnaviv_drm_private { + int num_gpus; + struct etnaviv_gpu *gpu[ETNA_MAX_PIPES]; + + /* list of GEM objects: */ + struct mutex gem_lock; + struct list_head gem_list; + + struct workqueue_struct *wq; +}; + +static inline void etnaviv_queue_work(struct drm_device *dev, + struct work_struct *w) +{ + struct etnaviv_drm_private *priv = dev->dev_private; + + queue_work(priv->wq, w); +} + +int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, + struct drm_file *file); + +int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma); +int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); +int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset); +int etnaviv_gem_get_iova(struct etnaviv_gpu *gpu, + struct drm_gem_object *obj, u32 *iova); +void etnaviv_gem_put_iova(struct etnaviv_gpu *gpu, struct drm_gem_object *obj); +struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj); +void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj); +void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); +struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, struct sg_table *sg); +int etnaviv_gem_prime_pin(struct drm_gem_object *obj); +void etnaviv_gem_prime_unpin(struct drm_gem_object *obj); +void *etnaviv_gem_vaddr(struct drm_gem_object *obj); +int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op, + struct timespec *timeout); +int etnaviv_gem_cpu_fini(struct drm_gem_object *obj); +void etnaviv_gem_free_object(struct drm_gem_object *obj); +int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file, + u32 size, u32 flags, u32 *handle); +struct drm_gem_object *etnaviv_gem_new_locked(struct drm_device *dev, + u32 size, u32 flags); +struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev, + u32 size, u32 flags); +int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file, + uintptr_t ptr, u32 size, u32 flags, u32 *handle); +u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu); +void etnaviv_buffer_end(struct etnaviv_gpu *gpu); +void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event, + struct etnaviv_cmdbuf *cmdbuf); +void etnaviv_validate_init(void); +bool etnaviv_cmd_validate_one(struct etnaviv_gpu *gpu, + u32 *stream, unsigned int size, + struct drm_etnaviv_gem_submit_reloc *relocs, unsigned int reloc_size); + +#ifdef CONFIG_DEBUG_FS +void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv, + struct seq_file *m); +#endif + +void __iomem *etnaviv_ioremap(struct platform_device *pdev, const char *name, + const char *dbgname); +void etnaviv_writel(u32 data, void __iomem *addr); +u32 etnaviv_readl(const void __iomem *addr); + +#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) +#define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) + +/* + * Return the storage size of a structure with a variable length array. + * The array is nelem elements of elem_size, where the base structure + * is defined by base. If the size overflows size_t, return zero. + */ +static inline size_t size_vstruct(size_t nelem, size_t elem_size, size_t base) +{ + if (elem_size && nelem > (SIZE_MAX - base) / elem_size) + return 0; + return base + nelem * elem_size; +} + +/* returns true if fence a comes after fence b */ +static inline bool fence_after(u32 a, u32 b) +{ + return (s32)(a - b) > 0; +} + +static inline bool fence_after_eq(u32 a, u32 b) +{ + return (s32)(a - b) >= 0; +} + +static inline unsigned long etnaviv_timeout_to_jiffies( + const struct timespec *timeout) +{ + unsigned long timeout_jiffies = timespec_to_jiffies(timeout); + unsigned long start_jiffies = jiffies; + unsigned long remaining_jiffies; + + if (time_after(start_jiffies, timeout_jiffies)) + remaining_jiffies = 0; + else + remaining_jiffies = timeout_jiffies - start_jiffies; + + return remaining_jiffies; +} + +#endif /* __ETNAVIV_DRV_H__ */ diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c new file mode 100644 index 000000000000..bf8fa859e8be --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c @@ -0,0 +1,227 @@ +/* + * Copyright (C) 2015 Etnaviv Project + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include +#include "etnaviv_dump.h" +#include "etnaviv_gem.h" +#include "etnaviv_gpu.h" +#include "etnaviv_mmu.h" +#include "state.xml.h" +#include "state_hi.xml.h" + +struct core_dump_iterator { + void *start; + struct etnaviv_dump_object_header *hdr; + void *data; +}; + +static const unsigned short etnaviv_dump_registers[] = { + VIVS_HI_AXI_STATUS, + VIVS_HI_CLOCK_CONTROL, + VIVS_HI_IDLE_STATE, + VIVS_HI_AXI_CONFIG, + VIVS_HI_INTR_ENBL, + VIVS_HI_CHIP_IDENTITY, + VIVS_HI_CHIP_FEATURE, + VIVS_HI_CHIP_MODEL, + VIVS_HI_CHIP_REV, + VIVS_HI_CHIP_DATE, + VIVS_HI_CHIP_TIME, + VIVS_HI_CHIP_MINOR_FEATURE_0, + VIVS_HI_CACHE_CONTROL, + VIVS_HI_AXI_CONTROL, + VIVS_PM_POWER_CONTROLS, + VIVS_PM_MODULE_CONTROLS, + VIVS_PM_MODULE_STATUS, + VIVS_PM_PULSE_EATER, + VIVS_MC_MMU_FE_PAGE_TABLE, + VIVS_MC_MMU_TX_PAGE_TABLE, + VIVS_MC_MMU_PE_PAGE_TABLE, + VIVS_MC_MMU_PEZ_PAGE_TABLE, + VIVS_MC_MMU_RA_PAGE_TABLE, + VIVS_MC_DEBUG_MEMORY, + VIVS_MC_MEMORY_BASE_ADDR_RA, + VIVS_MC_MEMORY_BASE_ADDR_FE, + VIVS_MC_MEMORY_BASE_ADDR_TX, + VIVS_MC_MEMORY_BASE_ADDR_PEZ, + VIVS_MC_MEMORY_BASE_ADDR_PE, + VIVS_MC_MEMORY_TIMING_CONTROL, + VIVS_MC_BUS_CONFIG, + VIVS_FE_DMA_STATUS, + VIVS_FE_DMA_DEBUG_STATE, + VIVS_FE_DMA_ADDRESS, + VIVS_FE_DMA_LOW, + VIVS_FE_DMA_HIGH, + VIVS_FE_AUTO_FLUSH, +}; + +static void etnaviv_core_dump_header(struct core_dump_iterator *iter, + u32 type, void *data_end) +{ + struct etnaviv_dump_object_header *hdr = iter->hdr; + + hdr->magic = cpu_to_le32(ETDUMP_MAGIC); + hdr->type = cpu_to_le32(type); + hdr->file_offset = cpu_to_le32(iter->data - iter->start); + hdr->file_size = cpu_to_le32(data_end - iter->data); + + iter->hdr++; + iter->data += hdr->file_size; +} + +static void etnaviv_core_dump_registers(struct core_dump_iterator *iter, + struct etnaviv_gpu *gpu) +{ + struct etnaviv_dump_registers *reg = iter->data; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(etnaviv_dump_registers); i++, reg++) { + reg->reg = etnaviv_dump_registers[i]; + reg->value = gpu_read(gpu, etnaviv_dump_registers[i]); + } + + etnaviv_core_dump_header(iter, ETDUMP_BUF_REG, reg); +} + +static void etnaviv_core_dump_mmu(struct core_dump_iterator *iter, + struct etnaviv_gpu *gpu, size_t mmu_size) +{ + etnaviv_iommu_dump(gpu->mmu, iter->data); + + etnaviv_core_dump_header(iter, ETDUMP_BUF_MMU, iter->data + mmu_size); +} + +static void etnaviv_core_dump_mem(struct core_dump_iterator *iter, u32 type, + void *ptr, size_t size, u64 iova) +{ + memcpy(iter->data, ptr, size); + + iter->hdr->iova = cpu_to_le64(iova); + + etnaviv_core_dump_header(iter, type, iter->data + size); +} + +void etnaviv_core_dump(struct etnaviv_gpu *gpu) +{ + struct core_dump_iterator iter; + struct etnaviv_vram_mapping *vram; + struct etnaviv_gem_object *obj; + struct etnaviv_cmdbuf *cmd; + unsigned int n_obj, n_bomap_pages; + size_t file_size, mmu_size; + __le64 *bomap, *bomap_start; + + mmu_size = etnaviv_iommu_dump_size(gpu->mmu); + + /* We always dump registers, mmu, ring and end marker */ + n_obj = 4; + n_bomap_pages = 0; + file_size = ARRAY_SIZE(etnaviv_dump_registers) * + sizeof(struct etnaviv_dump_registers) + + mmu_size + gpu->buffer->size; + + /* Add in the active command buffers */ + list_for_each_entry(cmd, &gpu->active_cmd_list, node) { + file_size += cmd->size; + n_obj++; + } + + /* Add in the active buffer objects */ + list_for_each_entry(vram, &gpu->mmu->mappings, mmu_node) { + if (!vram->use) + continue; + + obj = vram->object; + file_size += obj->base.size; + n_bomap_pages += obj->base.size >> PAGE_SHIFT; + n_obj++; + } + + /* If we have any buffer objects, add a bomap object */ + if (n_bomap_pages) { + file_size += n_bomap_pages * sizeof(__le64); + n_obj++; + } + + /* Add the size of the headers */ + file_size += sizeof(*iter.hdr) * n_obj; + + /* Allocate the file in vmalloc memory, it's likely to be big */ + iter.start = vmalloc(file_size); + if (!iter.start) { + dev_warn(gpu->dev, "failed to allocate devcoredump file\n"); + return; + } + + /* Point the data member after the headers */ + iter.hdr = iter.start; + iter.data = &iter.hdr[n_obj]; + + memset(iter.hdr, 0, iter.data - iter.start); + + etnaviv_core_dump_registers(&iter, gpu); + etnaviv_core_dump_mmu(&iter, gpu, mmu_size); + etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer->vaddr, + gpu->buffer->size, gpu->buffer->paddr); + + list_for_each_entry(cmd, &gpu->active_cmd_list, node) + etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD, cmd->vaddr, + cmd->size, cmd->paddr); + + /* Reserve space for the bomap */ + if (n_bomap_pages) { + bomap_start = bomap = iter.data; + memset(bomap, 0, sizeof(*bomap) * n_bomap_pages); + etnaviv_core_dump_header(&iter, ETDUMP_BUF_BOMAP, + bomap + n_bomap_pages); + } else { + /* Silence warning */ + bomap_start = bomap = NULL; + } + + list_for_each_entry(vram, &gpu->mmu->mappings, mmu_node) { + struct page **pages; + void *vaddr; + + if (vram->use == 0) + continue; + + obj = vram->object; + + pages = etnaviv_gem_get_pages(obj); + if (pages) { + int j; + + iter.hdr->data[0] = bomap - bomap_start; + + for (j = 0; j < obj->base.size >> PAGE_SHIFT; j++) + *bomap++ = cpu_to_le64(page_to_phys(*pages++)); + } + + iter.hdr->iova = cpu_to_le64(vram->iova); + + vaddr = etnaviv_gem_vaddr(&obj->base); + if (vaddr && !IS_ERR(vaddr)) + memcpy(iter.data, vaddr, obj->base.size); + + etnaviv_core_dump_header(&iter, ETDUMP_BUF_BO, iter.data + + obj->base.size); + } + + etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data); + + dev_coredumpv(gpu->dev, iter.start, iter.data - iter.start, GFP_KERNEL); +} diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.h b/drivers/gpu/drm/etnaviv/etnaviv_dump.h new file mode 100644 index 000000000000..97f2f8db9133 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.h @@ -0,0 +1,54 @@ +/* + * Copyright (C) 2015 Etnaviv Project + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + * Etnaviv devcoredump file definitions + */ +#ifndef ETNAVIV_DUMP_H +#define ETNAVIV_DUMP_H + +#include + +enum { + ETDUMP_MAGIC = 0x414e5445, + ETDUMP_BUF_REG = 0, + ETDUMP_BUF_MMU, + ETDUMP_BUF_RING, + ETDUMP_BUF_CMD, + ETDUMP_BUF_BOMAP, + ETDUMP_BUF_BO, + ETDUMP_BUF_END, +}; + +struct etnaviv_dump_object_header { + __le32 magic; + __le32 type; + __le32 file_offset; + __le32 file_size; + __le64 iova; + __le32 data[2]; +}; + +/* Registers object, an array of these */ +struct etnaviv_dump_registers { + __le32 reg; + __le32 value; +}; + +#ifdef __KERNEL__ +struct etnaviv_gpu; +void etnaviv_core_dump(struct etnaviv_gpu *gpu); +#endif + +#endif diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c new file mode 100644 index 000000000000..8d6f859f8200 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -0,0 +1,897 @@ +/* + * Copyright (C) 2015 Etnaviv Project + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include +#include + +#include "etnaviv_drv.h" +#include "etnaviv_gem.h" +#include "etnaviv_gpu.h" +#include "etnaviv_mmu.h" + +static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj) +{ + struct drm_device *dev = etnaviv_obj->base.dev; + struct sg_table *sgt = etnaviv_obj->sgt; + + /* + * For non-cached buffers, ensure the new pages are clean + * because display controller, GPU, etc. are not coherent. + */ + if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK) + dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL); +} + +static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj) +{ + struct drm_device *dev = etnaviv_obj->base.dev; + struct sg_table *sgt = etnaviv_obj->sgt; + + /* + * For non-cached buffers, ensure the new pages are clean + * because display controller, GPU, etc. are not coherent: + * + * WARNING: The DMA API does not support concurrent CPU + * and device access to the memory area. With BIDIRECTIONAL, + * we will clean the cache lines which overlap the region, + * and invalidate all cache lines (partially) contained in + * the region. + * + * If you have dirty data in the overlapping cache lines, + * that will corrupt the GPU-written data. If you have + * written into the remainder of the region, this can + * discard those writes. + */ + if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK) + dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL); +} + +/* called with etnaviv_obj->lock held */ +static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj) +{ + struct drm_device *dev = etnaviv_obj->base.dev; + struct page **p = drm_gem_get_pages(&etnaviv_obj->base); + + if (IS_ERR(p)) { + dev_err(dev->dev, "could not get pages: %ld\n", PTR_ERR(p)); + return PTR_ERR(p); + } + + etnaviv_obj->pages = p; + + return 0; +} + +static void put_pages(struct etnaviv_gem_object *etnaviv_obj) +{ + if (etnaviv_obj->sgt) { + etnaviv_gem_scatterlist_unmap(etnaviv_obj); + sg_free_table(etnaviv_obj->sgt); + kfree(etnaviv_obj->sgt); + etnaviv_obj->sgt = NULL; + } + if (etnaviv_obj->pages) { + drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages, + true, false); + + etnaviv_obj->pages = NULL; + } +} + +struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj) +{ + int ret; + + lockdep_assert_held(&etnaviv_obj->lock); + + if (!etnaviv_obj->pages) { + ret = etnaviv_obj->ops->get_pages(etnaviv_obj); + if (ret < 0) + return ERR_PTR(ret); + } + + if (!etnaviv_obj->sgt) { + struct drm_device *dev = etnaviv_obj->base.dev; + int npages = etnaviv_obj->base.size >> PAGE_SHIFT; + struct sg_table *sgt; + + sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages); + if (IS_ERR(sgt)) { + dev_err(dev->dev, "failed to allocate sgt: %ld\n", + PTR_ERR(sgt)); + return ERR_CAST(sgt); + } + + etnaviv_obj->sgt = sgt; + + etnaviv_gem_scatter_map(etnaviv_obj); + } + + return etnaviv_obj->pages; +} + +void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj) +{ + lockdep_assert_held(&etnaviv_obj->lock); + /* when we start tracking the pin count, then do something here */ +} + +static int etnaviv_gem_mmap_obj(struct drm_gem_object *obj, + struct vm_area_struct *vma) +{ + struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); + pgprot_t vm_page_prot; + + vma->vm_flags &= ~VM_PFNMAP; + vma->vm_flags |= VM_MIXEDMAP; + + vm_page_prot = vm_get_page_prot(vma->vm_flags); + + if (etnaviv_obj->flags & ETNA_BO_WC) { + vma->vm_page_prot = pgprot_writecombine(vm_page_prot); + } else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) { + vma->vm_page_prot = pgprot_noncached(vm_page_prot); + } else { + /* + * Shunt off cached objs to shmem file so they have their own + * address_space (so unmap_mapping_range does what we want, + * in particular in the case of mmap'd dmabufs) + */ + fput(vma->vm_file); + get_file(obj->filp); + vma->vm_pgoff = 0; + vma->vm_file = obj->filp; + + vma->vm_page_prot = vm_page_prot; + } + + return 0; +} + +int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct etnaviv_gem_object *obj; + int ret; + + ret = drm_gem_mmap(filp, vma); + if (ret) { + DBG("mmap failed: %d", ret); + return ret; + } + + obj = to_etnaviv_bo(vma->vm_private_data); + return etnaviv_gem_mmap_obj(vma->vm_private_data, vma); +} + +int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct drm_gem_object *obj = vma->vm_private_data; + struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); + struct page **pages, *page; + pgoff_t pgoff; + int ret; + + /* + * Make sure we don't parallel update on a fault, nor move or remove + * something from beneath our feet. Note that vm_insert_page() is + * specifically coded to take care of this, so we don't have to. + */ + ret = mutex_lock_interruptible(&etnaviv_obj->lock); + if (ret) + goto out; + + /* make sure we have pages attached now */ + pages = etnaviv_gem_get_pages(etnaviv_obj); + mutex_unlock(&etnaviv_obj->lock); + + if (IS_ERR(pages)) { + ret = PTR_ERR(pages); + goto out; + } + + /* We don't use vmf->pgoff since that has the fake offset: */ + pgoff = ((unsigned long)vmf->virtual_address - + vma->vm_start) >> PAGE_SHIFT; + + page = pages[pgoff]; + + VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, + page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT); + + ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page); + +out: + switch (ret) { + case -EAGAIN: + case 0: + case -ERESTARTSYS: + case -EINTR: + case -EBUSY: + /* + * EBUSY is ok: this just means that another thread + * already did the job. + */ + return VM_FAULT_NOPAGE; + case -ENOMEM: + return VM_FAULT_OOM; + default: + return VM_FAULT_SIGBUS; + } +} + +int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset) +{ + int ret; + + /* Make it mmapable */ + ret = drm_gem_create_mmap_offset(obj); + if (ret) + dev_err(obj->dev->dev, "could not allocate mmap offset\n"); + else + *offset = drm_vma_node_offset_addr(&obj->vma_node); + + return ret; +} + +static struct etnaviv_vram_mapping * +etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj, + struct etnaviv_iommu *mmu) +{ + struct etnaviv_vram_mapping *mapping; + + list_for_each_entry(mapping, &obj->vram_list, obj_node) { + if (mapping->mmu == mmu) + return mapping; + } + + return NULL; +} + +int etnaviv_gem_get_iova(struct etnaviv_gpu *gpu, + struct drm_gem_object *obj, u32 *iova) +{ + struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); + struct etnaviv_vram_mapping *mapping; + struct page **pages; + int ret = 0; + + mutex_lock(&etnaviv_obj->lock); + mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu); + if (mapping) { + /* + * Holding the object lock prevents the use count changing + * beneath us. If the use count is zero, the MMU might be + * reaping this object, so take the lock and re-check that + * the MMU owns this mapping to close this race. + */ + if (mapping->use == 0) { + mutex_lock(&gpu->mmu->lock); + if (mapping->mmu == gpu->mmu) + mapping->use += 1; + else + mapping = NULL; + mutex_unlock(&gpu->mmu->lock); + if (mapping) + goto out; + } else { + mapping->use += 1; + goto out; + } + } + + pages = etnaviv_gem_get_pages(etnaviv_obj); + if (IS_ERR(pages)) { + ret = PTR_ERR(pages); + goto out; + } + + /* + * See if we have a reaped vram mapping we can re-use before + * allocating a fresh mapping. + */ + mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL); + if (!mapping) { + mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); + if (!mapping) + return -ENOMEM; + + INIT_LIST_HEAD(&mapping->scan_node); + mapping->object = etnaviv_obj; + } else { + list_del(&mapping->obj_node); + } + + mapping->mmu = gpu->mmu; + mapping->use = 1; + + ret = etnaviv_iommu_map_gem(gpu->mmu, etnaviv_obj, gpu->memory_base, + mapping); + if (ret < 0) + kfree(mapping); + else + list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list); + +out: + mutex_unlock(&etnaviv_obj->lock); + + if (!ret) { + /* Take a reference on the object */ + drm_gem_object_reference(obj); + *iova = mapping->iova; + } + + return ret; +} + +void etnaviv_gem_put_iova(struct etnaviv_gpu *gpu, struct drm_gem_object *obj) +{ + struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); + struct etnaviv_vram_mapping *mapping; + + mutex_lock(&etnaviv_obj->lock); + mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu); + + WARN_ON(mapping->use == 0); + mapping->use -= 1; + mutex_unlock(&etnaviv_obj->lock); + + drm_gem_object_unreference_unlocked(obj); +} + +void *etnaviv_gem_vaddr(struct drm_gem_object *obj) +{ + struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); + + mutex_lock(&etnaviv_obj->lock); + if (!etnaviv_obj->vaddr) { + struct page **pages = etnaviv_gem_get_pages(etnaviv_obj); + + if (IS_ERR(pages)) + return ERR_CAST(pages); + + etnaviv_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, + VM_MAP, pgprot_writecombine(PAGE_KERNEL)); + } + mutex_unlock(&etnaviv_obj->lock); + + return etnaviv_obj->vaddr; +} + +static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op) +{ + if (op & ETNA_PREP_READ) + return DMA_FROM_DEVICE; + else if (op & ETNA_PREP_WRITE) + return DMA_TO_DEVICE; + else + return DMA_BIDIRECTIONAL; +} + +int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op, + struct timespec *timeout) +{ + struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); + struct drm_device *dev = obj->dev; + bool write = !!(op & ETNA_PREP_WRITE); + int ret; + + if (op & ETNA_PREP_NOSYNC) { + if (!reservation_object_test_signaled_rcu(etnaviv_obj->resv, + write)) + return -EBUSY; + } else { + unsigned long remain = etnaviv_timeout_to_jiffies(timeout); + + ret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv, + write, true, remain); + if (ret <= 0) + return ret == 0 ? -ETIMEDOUT : ret; + } + + if (etnaviv_obj->flags & ETNA_BO_CACHED) { + if (!etnaviv_obj->sgt) { + void *ret; + + mutex_lock(&etnaviv_obj->lock); + ret = etnaviv_gem_get_pages(etnaviv_obj); + mutex_unlock(&etnaviv_obj->lock); + if (IS_ERR(ret)) + return PTR_ERR(ret); + } + + dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl, + etnaviv_obj->sgt->nents, + etnaviv_op_to_dma_dir(op)); + etnaviv_obj->last_cpu_prep_op = op; + } + + return 0; +} + +int etnaviv_gem_cpu_fini(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); + + if (etnaviv_obj->flags & ETNA_BO_CACHED) { + /* fini without a prep is almost certainly a userspace error */ + WARN_ON(etnaviv_obj->last_cpu_prep_op == 0); + dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl, + etnaviv_obj->sgt->nents, + etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op)); + etnaviv_obj->last_cpu_prep_op = 0; + } + + return 0; +} + +int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj, + struct timespec *timeout) +{ + struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); + + return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout); +} + +#ifdef CONFIG_DEBUG_FS +static void etnaviv_gem_describe_fence(struct fence *fence, + const char *type, struct seq_file *m) +{ + if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + seq_printf(m, "\t%9s: %s %s seq %u\n", + type, + fence->ops->get_driver_name(fence), + fence->ops->get_timeline_name(fence), + fence->seqno); +} + +static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m) +{ + struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); + struct reservation_object *robj = etnaviv_obj->resv; + struct reservation_object_list *fobj; + struct fence *fence; + unsigned long off = drm_vma_node_start(&obj->vma_node); + + seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n", + etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I', + obj->name, obj->refcount.refcount.counter, + off, etnaviv_obj->vaddr, obj->size); + + rcu_read_lock(); + fobj = rcu_dereference(robj->fence); + if (fobj) { + unsigned int i, shared_count = fobj->shared_count; + + for (i = 0; i < shared_count; i++) { + fence = rcu_dereference(fobj->shared[i]); + etnaviv_gem_describe_fence(fence, "Shared", m); + } + } + + fence = rcu_dereference(robj->fence_excl); + if (fence) + etnaviv_gem_describe_fence(fence, "Exclusive", m); + rcu_read_unlock(); +} + +void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv, + struct seq_file *m) +{ + struct etnaviv_gem_object *etnaviv_obj; + int count = 0; + size_t size = 0; + + mutex_lock(&priv->gem_lock); + list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) { + struct drm_gem_object *obj = &etnaviv_obj->base; + + seq_puts(m, " "); + etnaviv_gem_describe(obj, m); + count++; + size += obj->size; + } + mutex_unlock(&priv->gem_lock); + + seq_printf(m, "Total %d objects, %zu bytes\n", count, size); +} +#endif + +static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj) +{ + if (etnaviv_obj->vaddr) + vunmap(etnaviv_obj->vaddr); + put_pages(etnaviv_obj); +} + +static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = { + .get_pages = etnaviv_gem_shmem_get_pages, + .release = etnaviv_gem_shmem_release, +}; + +void etnaviv_gem_free_object(struct drm_gem_object *obj) +{ + struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); + struct etnaviv_vram_mapping *mapping, *tmp; + + /* object should not be active */ + WARN_ON(is_active(etnaviv_obj)); + + list_del(&etnaviv_obj->gem_node); + + list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list, + obj_node) { + struct etnaviv_iommu *mmu = mapping->mmu; + + WARN_ON(mapping->use); + + if (mmu) + etnaviv_iommu_unmap_gem(mmu, mapping); + + list_del(&mapping->obj_node); + kfree(mapping); + } + + drm_gem_free_mmap_offset(obj); + etnaviv_obj->ops->release(etnaviv_obj); + if (etnaviv_obj->resv == &etnaviv_obj->_resv) + reservation_object_fini(&etnaviv_obj->_resv); + drm_gem_object_release(obj); + + kfree(etnaviv_obj); +} + +int etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj) +{ + struct etnaviv_drm_private *priv = dev->dev_private; + struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); + + mutex_lock(&priv->gem_lock); + list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list); + mutex_unlock(&priv->gem_lock); + + return 0; +} + +static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags, + struct reservation_object *robj, const struct etnaviv_gem_ops *ops, + struct drm_gem_object **obj) +{ + struct etnaviv_gem_object *etnaviv_obj; + unsigned sz = sizeof(*etnaviv_obj); + bool valid = true; + + /* validate flags */ + switch (flags & ETNA_BO_CACHE_MASK) { + case ETNA_BO_UNCACHED: + case ETNA_BO_CACHED: + case ETNA_BO_WC: + break; + default: + valid = false; + } + + if (!valid) { + dev_err(dev->dev, "invalid cache flag: %x\n", + (flags & ETNA_BO_CACHE_MASK)); + return -EINVAL; + } + + etnaviv_obj = kzalloc(sz, GFP_KERNEL); + if (!etnaviv_obj) + return -ENOMEM; + + etnaviv_obj->flags = flags; + etnaviv_obj->ops = ops; + if (robj) { + etnaviv_obj->resv = robj; + } else { + etnaviv_obj->resv = &etnaviv_obj->_resv; + reservation_object_init(&etnaviv_obj->_resv); + } + + mutex_init(&etnaviv_obj->lock); + INIT_LIST_HEAD(&etnaviv_obj->vram_list); + + *obj = &etnaviv_obj->base; + + return 0; +} + +static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev, + u32 size, u32 flags) +{ + struct drm_gem_object *obj = NULL; + int ret; + + size = PAGE_ALIGN(size); + + ret = etnaviv_gem_new_impl(dev, size, flags, NULL, + &etnaviv_gem_shmem_ops, &obj); + if (ret) + goto fail; + + ret = drm_gem_object_init(dev, obj, size); + if (ret == 0) { + struct address_space *mapping; + + /* + * Our buffers are kept pinned, so allocating them + * from the MOVABLE zone is a really bad idea, and + * conflicts with CMA. See coments above new_inode() + * why this is required _and_ expected if you're + * going to pin these pages. + */ + mapping = file_inode(obj->filp)->i_mapping; + mapping_set_gfp_mask(mapping, GFP_HIGHUSER); + } + + if (ret) + goto fail; + + return obj; + +fail: + if (obj) + drm_gem_object_unreference_unlocked(obj); + + return ERR_PTR(ret); +} + +/* convenience method to construct a GEM buffer object, and userspace handle */ +int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file, + u32 size, u32 flags, u32 *handle) +{ + struct drm_gem_object *obj; + int ret; + + obj = __etnaviv_gem_new(dev, size, flags); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + ret = etnaviv_gem_obj_add(dev, obj); + if (ret < 0) { + drm_gem_object_unreference_unlocked(obj); + return ret; + } + + ret = drm_gem_handle_create(file, obj, handle); + + /* drop reference from allocate - handle holds it now */ + drm_gem_object_unreference_unlocked(obj); + + return ret; +} + +struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev, + u32 size, u32 flags) +{ + struct drm_gem_object *obj; + int ret; + + obj = __etnaviv_gem_new(dev, size, flags); + if (IS_ERR(obj)) + return obj; + + ret = etnaviv_gem_obj_add(dev, obj); + if (ret < 0) { + drm_gem_object_unreference_unlocked(obj); + return ERR_PTR(ret); + } + + return obj; +} + +int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags, + struct reservation_object *robj, const struct etnaviv_gem_ops *ops, + struct etnaviv_gem_object **res) +{ + struct drm_gem_object *obj; + int ret; + + ret = etnaviv_gem_new_impl(dev, size, flags, robj, ops, &obj); + if (ret) + return ret; + + drm_gem_private_object_init(dev, obj, size); + + *res = to_etnaviv_bo(obj); + + return 0; +} + +struct get_pages_work { + struct work_struct work; + struct mm_struct *mm; + struct task_struct *task; + struct etnaviv_gem_object *etnaviv_obj; +}; + +static struct page **etnaviv_gem_userptr_do_get_pages( + struct etnaviv_gem_object *etnaviv_obj, struct mm_struct *mm, struct task_struct *task) +{ + int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT; + struct page **pvec; + uintptr_t ptr; + + pvec = drm_malloc_ab(npages, sizeof(struct page *)); + if (!pvec) + return ERR_PTR(-ENOMEM); + + pinned = 0; + ptr = etnaviv_obj->userptr.ptr; + + down_read(&mm->mmap_sem); + while (pinned < npages) { + ret = get_user_pages(task, mm, ptr, npages - pinned, + !etnaviv_obj->userptr.ro, 0, + pvec + pinned, NULL); + if (ret < 0) + break; + + ptr += ret * PAGE_SIZE; + pinned += ret; + } + up_read(&mm->mmap_sem); + + if (ret < 0) { + release_pages(pvec, pinned, 0); + drm_free_large(pvec); + return ERR_PTR(ret); + } + + return pvec; +} + +static void __etnaviv_gem_userptr_get_pages(struct work_struct *_work) +{ + struct get_pages_work *work = container_of(_work, typeof(*work), work); + struct etnaviv_gem_object *etnaviv_obj = work->etnaviv_obj; + struct page **pvec; + + pvec = etnaviv_gem_userptr_do_get_pages(etnaviv_obj, work->mm, work->task); + + mutex_lock(&etnaviv_obj->lock); + if (IS_ERR(pvec)) { + etnaviv_obj->userptr.work = ERR_CAST(pvec); + } else { + etnaviv_obj->userptr.work = NULL; + etnaviv_obj->pages = pvec; + } + + mutex_unlock(&etnaviv_obj->lock); + drm_gem_object_unreference_unlocked(&etnaviv_obj->base); + + mmput(work->mm); + put_task_struct(work->task); + kfree(work); +} + +static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj) +{ + struct page **pvec = NULL; + struct get_pages_work *work; + struct mm_struct *mm; + int ret, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT; + + if (etnaviv_obj->userptr.work) { + if (IS_ERR(etnaviv_obj->userptr.work)) { + ret = PTR_ERR(etnaviv_obj->userptr.work); + etnaviv_obj->userptr.work = NULL; + } else { + ret = -EAGAIN; + } + return ret; + } + + mm = get_task_mm(etnaviv_obj->userptr.task); + pinned = 0; + if (mm == current->mm) { + pvec = drm_malloc_ab(npages, sizeof(struct page *)); + if (!pvec) { + mmput(mm); + return -ENOMEM; + } + + pinned = __get_user_pages_fast(etnaviv_obj->userptr.ptr, npages, + !etnaviv_obj->userptr.ro, pvec); + if (pinned < 0) { + drm_free_large(pvec); + mmput(mm); + return pinned; + } + + if (pinned == npages) { + etnaviv_obj->pages = pvec; + mmput(mm); + return 0; + } + } + + release_pages(pvec, pinned, 0); + drm_free_large(pvec); + + work = kmalloc(sizeof(*work), GFP_KERNEL); + if (!work) { + mmput(mm); + return -ENOMEM; + } + + get_task_struct(current); + drm_gem_object_reference(&etnaviv_obj->base); + + work->mm = mm; + work->task = current; + work->etnaviv_obj = etnaviv_obj; + + etnaviv_obj->userptr.work = &work->work; + INIT_WORK(&work->work, __etnaviv_gem_userptr_get_pages); + + etnaviv_queue_work(etnaviv_obj->base.dev, &work->work); + + return -EAGAIN; +} + +static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj) +{ + if (etnaviv_obj->sgt) { + etnaviv_gem_scatterlist_unmap(etnaviv_obj); + sg_free_table(etnaviv_obj->sgt); + kfree(etnaviv_obj->sgt); + } + if (etnaviv_obj->pages) { + int npages = etnaviv_obj->base.size >> PAGE_SHIFT; + + release_pages(etnaviv_obj->pages, npages, 0); + drm_free_large(etnaviv_obj->pages); + } + put_task_struct(etnaviv_obj->userptr.task); +} + +static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = { + .get_pages = etnaviv_gem_userptr_get_pages, + .release = etnaviv_gem_userptr_release, +}; + +int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file, + uintptr_t ptr, u32 size, u32 flags, u32 *handle) +{ + struct etnaviv_gem_object *etnaviv_obj; + int ret; + + ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED, NULL, + &etnaviv_gem_userptr_ops, &etnaviv_obj); + if (ret) + return ret; + + etnaviv_obj->userptr.ptr = ptr; + etnaviv_obj->userptr.task = current; + etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE); + get_task_struct(current); + + ret = etnaviv_gem_obj_add(dev, &etnaviv_obj->base); + if (ret) { + drm_gem_object_unreference_unlocked(&etnaviv_obj->base); + return ret; + } + + ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle); + + /* drop reference from allocate - handle holds it now */ + drm_gem_object_unreference_unlocked(&etnaviv_obj->base); + + return ret; +} diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h new file mode 100644 index 000000000000..a300b4b3d545 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h @@ -0,0 +1,117 @@ +/* + * Copyright (C) 2015 Etnaviv Project + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef __ETNAVIV_GEM_H__ +#define __ETNAVIV_GEM_H__ + +#include +#include "etnaviv_drv.h" + +struct etnaviv_gem_ops; +struct etnaviv_gem_object; + +struct etnaviv_gem_userptr { + uintptr_t ptr; + struct task_struct *task; + struct work_struct *work; + bool ro; +}; + +struct etnaviv_vram_mapping { + struct list_head obj_node; + struct list_head scan_node; + struct list_head mmu_node; + struct etnaviv_gem_object *object; + struct etnaviv_iommu *mmu; + struct drm_mm_node vram_node; + unsigned int use; + u32 iova; +}; + +struct etnaviv_gem_object { + struct drm_gem_object base; + const struct etnaviv_gem_ops *ops; + struct mutex lock; + + u32 flags; + + struct list_head gem_node; + struct etnaviv_gpu *gpu; /* non-null if active */ + atomic_t gpu_active; + u32 access; + + struct page **pages; + struct sg_table *sgt; + void *vaddr; + + /* normally (resv == &_resv) except for imported bo's */ + struct reservation_object *resv; + struct reservation_object _resv; + + struct list_head vram_list; + + /* cache maintenance */ + u32 last_cpu_prep_op; + + struct etnaviv_gem_userptr userptr; +}; + +static inline +struct etnaviv_gem_object *to_etnaviv_bo(struct drm_gem_object *obj) +{ + return container_of(obj, struct etnaviv_gem_object, base); +} + +struct etnaviv_gem_ops { + int (*get_pages)(struct etnaviv_gem_object *); + void (*release)(struct etnaviv_gem_object *); +}; + +static inline bool is_active(struct etnaviv_gem_object *etnaviv_obj) +{ + return atomic_read(&etnaviv_obj->gpu_active) != 0; +} + +#define MAX_CMDS 4 + +/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc, + * associated with the cmdstream submission for synchronization (and + * make it easier to unwind when things go wrong, etc). This only + * lasts for the duration of the submit-ioctl. + */ +struct etnaviv_gem_submit { + struct drm_device *dev; + struct etnaviv_gpu *gpu; + struct ww_acquire_ctx ticket; + u32 fence; + unsigned int nr_bos; + struct { + u32 flags; + struct etnaviv_gem_object *obj; + u32 iova; + } bos[0]; +}; + +int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj, + struct timespec *timeout); +int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags, + struct reservation_object *robj, const struct etnaviv_gem_ops *ops, + struct etnaviv_gem_object **res); +int etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj); +struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *obj); +void etnaviv_gem_put_pages(struct etnaviv_gem_object *obj); + +#endif /* __ETNAVIV_GEM_H__ */ diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c new file mode 100644 index 000000000000..e94db4f95770 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c @@ -0,0 +1,122 @@ +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include +#include "etnaviv_drv.h" +#include "etnaviv_gem.h" + + +struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj) +{ + struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); + + BUG_ON(!etnaviv_obj->sgt); /* should have already pinned! */ + + return etnaviv_obj->sgt; +} + +void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj) +{ + return etnaviv_gem_vaddr(obj); +} + +void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) +{ + /* TODO msm_gem_vunmap() */ +} + +int etnaviv_gem_prime_pin(struct drm_gem_object *obj) +{ + if (!obj->import_attach) { + struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); + + mutex_lock(&etnaviv_obj->lock); + etnaviv_gem_get_pages(etnaviv_obj); + mutex_unlock(&etnaviv_obj->lock); + } + return 0; +} + +void etnaviv_gem_prime_unpin(struct drm_gem_object *obj) +{ + if (!obj->import_attach) { + struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); + + mutex_lock(&etnaviv_obj->lock); + etnaviv_gem_put_pages(to_etnaviv_bo(obj)); + mutex_unlock(&etnaviv_obj->lock); + } +} + +static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj) +{ + if (etnaviv_obj->vaddr) + dma_buf_vunmap(etnaviv_obj->base.import_attach->dmabuf, + etnaviv_obj->vaddr); + + /* Don't drop the pages for imported dmabuf, as they are not + * ours, just free the array we allocated: + */ + if (etnaviv_obj->pages) + drm_free_large(etnaviv_obj->pages); + + drm_prime_gem_destroy(&etnaviv_obj->base, etnaviv_obj->sgt); +} + +static const struct etnaviv_gem_ops etnaviv_gem_prime_ops = { + /* .get_pages should never be called */ + .release = etnaviv_gem_prime_release, +}; + +struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, struct sg_table *sgt) +{ + struct etnaviv_gem_object *etnaviv_obj; + size_t size = PAGE_ALIGN(attach->dmabuf->size); + int ret, npages; + + ret = etnaviv_gem_new_private(dev, size, ETNA_BO_WC, + attach->dmabuf->resv, + &etnaviv_gem_prime_ops, &etnaviv_obj); + if (ret < 0) + return ERR_PTR(ret); + + npages = size / PAGE_SIZE; + + etnaviv_obj->sgt = sgt; + etnaviv_obj->pages = drm_malloc_ab(npages, sizeof(struct page *)); + if (!etnaviv_obj->pages) { + ret = -ENOMEM; + goto fail; + } + + ret = drm_prime_sg_to_page_addr_arrays(sgt, etnaviv_obj->pages, + NULL, npages); + if (ret) + goto fail; + + ret = etnaviv_gem_obj_add(dev, &etnaviv_obj->base); + if (ret) + goto fail; + + return &etnaviv_obj->base; + +fail: + drm_gem_object_unreference_unlocked(&etnaviv_obj->base); + + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c new file mode 100644 index 000000000000..1aba01a999df --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -0,0 +1,443 @@ +/* + * Copyright (C) 2015 Etnaviv Project + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include +#include "etnaviv_drv.h" +#include "etnaviv_gpu.h" +#include "etnaviv_gem.h" + +/* + * Cmdstream submission: + */ + +#define BO_INVALID_FLAGS ~(ETNA_SUBMIT_BO_READ | ETNA_SUBMIT_BO_WRITE) +/* make sure these don't conflict w/ ETNAVIV_SUBMIT_BO_x */ +#define BO_LOCKED 0x4000 +#define BO_PINNED 0x2000 + +static inline void __user *to_user_ptr(u64 address) +{ + return (void __user *)(uintptr_t)address; +} + +static struct etnaviv_gem_submit *submit_create(struct drm_device *dev, + struct etnaviv_gpu *gpu, size_t nr) +{ + struct etnaviv_gem_submit *submit; + size_t sz = size_vstruct(nr, sizeof(submit->bos[0]), sizeof(*submit)); + + submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); + if (submit) { + submit->dev = dev; + submit->gpu = gpu; + + /* initially, until copy_from_user() and bo lookup succeeds: */ + submit->nr_bos = 0; + + ww_acquire_init(&submit->ticket, &reservation_ww_class); + } + + return submit; +} + +static int submit_lookup_objects(struct etnaviv_gem_submit *submit, + struct drm_file *file, struct drm_etnaviv_gem_submit_bo *submit_bos, + unsigned nr_bos) +{ + struct drm_etnaviv_gem_submit_bo *bo; + unsigned i; + int ret = 0; + + spin_lock(&file->table_lock); + + for (i = 0, bo = submit_bos; i < nr_bos; i++, bo++) { + struct drm_gem_object *obj; + + if (bo->flags & BO_INVALID_FLAGS) { + DRM_ERROR("invalid flags: %x\n", bo->flags); + ret = -EINVAL; + goto out_unlock; + } + + submit->bos[i].flags = bo->flags; + + /* normally use drm_gem_object_lookup(), but for bulk lookup + * all under single table_lock just hit object_idr directly: + */ + obj = idr_find(&file->object_idr, bo->handle); + if (!obj) { + DRM_ERROR("invalid handle %u at index %u\n", + bo->handle, i); + ret = -EINVAL; + goto out_unlock; + } + + /* + * Take a refcount on the object. The file table lock + * prevents the object_idr's refcount on this being dropped. + */ + drm_gem_object_reference(obj); + + submit->bos[i].obj = to_etnaviv_bo(obj); + } + +out_unlock: + submit->nr_bos = i; + spin_unlock(&file->table_lock); + + return ret; +} + +static void submit_unlock_object(struct etnaviv_gem_submit *submit, int i) +{ + if (submit->bos[i].flags & BO_LOCKED) { + struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; + + ww_mutex_unlock(&etnaviv_obj->resv->lock); + submit->bos[i].flags &= ~BO_LOCKED; + } +} + +static int submit_lock_objects(struct etnaviv_gem_submit *submit) +{ + int contended, slow_locked = -1, i, ret = 0; + +retry: + for (i = 0; i < submit->nr_bos; i++) { + struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; + + if (slow_locked == i) + slow_locked = -1; + + contended = i; + + if (!(submit->bos[i].flags & BO_LOCKED)) { + ret = ww_mutex_lock_interruptible(&etnaviv_obj->resv->lock, + &submit->ticket); + if (ret == -EALREADY) + DRM_ERROR("BO at index %u already on submit list\n", + i); + if (ret) + goto fail; + submit->bos[i].flags |= BO_LOCKED; + } + } + + ww_acquire_done(&submit->ticket); + + return 0; + +fail: + for (; i >= 0; i--) + submit_unlock_object(submit, i); + + if (slow_locked > 0) + submit_unlock_object(submit, slow_locked); + + if (ret == -EDEADLK) { + struct etnaviv_gem_object *etnaviv_obj; + + etnaviv_obj = submit->bos[contended].obj; + + /* we lost out in a seqno race, lock and retry.. */ + ret = ww_mutex_lock_slow_interruptible(&etnaviv_obj->resv->lock, + &submit->ticket); + if (!ret) { + submit->bos[contended].flags |= BO_LOCKED; + slow_locked = contended; + goto retry; + } + } + + return ret; +} + +static int submit_fence_sync(const struct etnaviv_gem_submit *submit) +{ + unsigned int context = submit->gpu->fence_context; + int i, ret = 0; + + for (i = 0; i < submit->nr_bos; i++) { + struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; + bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE; + + ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write); + if (ret) + break; + } + + return ret; +} + +static void submit_unpin_objects(struct etnaviv_gem_submit *submit) +{ + int i; + + for (i = 0; i < submit->nr_bos; i++) { + struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; + + if (submit->bos[i].flags & BO_PINNED) + etnaviv_gem_put_iova(submit->gpu, &etnaviv_obj->base); + + submit->bos[i].iova = 0; + submit->bos[i].flags &= ~BO_PINNED; + } +} + +static int submit_pin_objects(struct etnaviv_gem_submit *submit) +{ + int i, ret = 0; + + for (i = 0; i < submit->nr_bos; i++) { + struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; + u32 iova; + + ret = etnaviv_gem_get_iova(submit->gpu, &etnaviv_obj->base, + &iova); + if (ret) + break; + + submit->bos[i].flags |= BO_PINNED; + submit->bos[i].iova = iova; + } + + return ret; +} + +static int submit_bo(struct etnaviv_gem_submit *submit, u32 idx, + struct etnaviv_gem_object **obj, u32 *iova) +{ + if (idx >= submit->nr_bos) { + DRM_ERROR("invalid buffer index: %u (out of %u)\n", + idx, submit->nr_bos); + return -EINVAL; + } + + if (obj) + *obj = submit->bos[idx].obj; + if (iova) + *iova = submit->bos[idx].iova; + + return 0; +} + +/* process the reloc's and patch up the cmdstream as needed: */ +static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream, + u32 size, const struct drm_etnaviv_gem_submit_reloc *relocs, + u32 nr_relocs) +{ + u32 i, last_offset = 0; + u32 *ptr = stream; + int ret; + + for (i = 0; i < nr_relocs; i++) { + const struct drm_etnaviv_gem_submit_reloc *r = relocs + i; + struct etnaviv_gem_object *bobj; + u32 iova, off; + + if (unlikely(r->flags)) { + DRM_ERROR("invalid reloc flags\n"); + return -EINVAL; + } + + if (r->submit_offset % 4) { + DRM_ERROR("non-aligned reloc offset: %u\n", + r->submit_offset); + return -EINVAL; + } + + /* offset in dwords: */ + off = r->submit_offset / 4; + + if ((off >= size ) || + (off < last_offset)) { + DRM_ERROR("invalid offset %u at reloc %u\n", off, i); + return -EINVAL; + } + + ret = submit_bo(submit, r->reloc_idx, &bobj, &iova); + if (ret) + return ret; + + if (r->reloc_offset >= + bobj->base.size - sizeof(*ptr)) { + DRM_ERROR("relocation %u outside object", i); + return -EINVAL; + } + + ptr[off] = iova + r->reloc_offset; + + last_offset = off; + } + + return 0; +} + +static void submit_cleanup(struct etnaviv_gem_submit *submit) +{ + unsigned i; + + for (i = 0; i < submit->nr_bos; i++) { + struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; + + submit_unlock_object(submit, i); + drm_gem_object_unreference_unlocked(&etnaviv_obj->base); + } + + ww_acquire_fini(&submit->ticket); + kfree(submit); +} + +int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct etnaviv_drm_private *priv = dev->dev_private; + struct drm_etnaviv_gem_submit *args = data; + struct drm_etnaviv_gem_submit_reloc *relocs; + struct drm_etnaviv_gem_submit_bo *bos; + struct etnaviv_gem_submit *submit; + struct etnaviv_cmdbuf *cmdbuf; + struct etnaviv_gpu *gpu; + void *stream; + int ret; + + if (args->pipe >= ETNA_MAX_PIPES) + return -EINVAL; + + gpu = priv->gpu[args->pipe]; + if (!gpu) + return -ENXIO; + + if (args->stream_size % 4) { + DRM_ERROR("non-aligned cmdstream buffer size: %u\n", + args->stream_size); + return -EINVAL; + } + + if (args->exec_state != ETNA_PIPE_3D && + args->exec_state != ETNA_PIPE_2D && + args->exec_state != ETNA_PIPE_VG) { + DRM_ERROR("invalid exec_state: 0x%x\n", args->exec_state); + return -EINVAL; + } + + /* + * Copy the command submission and bo array to kernel space in + * one go, and do this outside of any locks. + */ + bos = drm_malloc_ab(args->nr_bos, sizeof(*bos)); + relocs = drm_malloc_ab(args->nr_relocs, sizeof(*relocs)); + stream = drm_malloc_ab(1, args->stream_size); + cmdbuf = etnaviv_gpu_cmdbuf_new(gpu, ALIGN(args->stream_size, 8) + 8, + args->nr_bos); + if (!bos || !relocs || !stream || !cmdbuf) { + ret = -ENOMEM; + goto err_submit_cmds; + } + + cmdbuf->exec_state = args->exec_state; + cmdbuf->ctx = file->driver_priv; + + ret = copy_from_user(bos, to_user_ptr(args->bos), + args->nr_bos * sizeof(*bos)); + if (ret) { + ret = -EFAULT; + goto err_submit_cmds; + } + + ret = copy_from_user(relocs, to_user_ptr(args->relocs), + args->nr_relocs * sizeof(*relocs)); + if (ret) { + ret = -EFAULT; + goto err_submit_cmds; + } + + ret = copy_from_user(stream, to_user_ptr(args->stream), + args->stream_size); + if (ret) { + ret = -EFAULT; + goto err_submit_cmds; + } + + submit = submit_create(dev, gpu, args->nr_bos); + if (!submit) { + ret = -ENOMEM; + goto err_submit_cmds; + } + + ret = submit_lookup_objects(submit, file, bos, args->nr_bos); + if (ret) + goto err_submit_objects; + + ret = submit_lock_objects(submit); + if (ret) + goto err_submit_objects; + + if (!etnaviv_cmd_validate_one(gpu, stream, args->stream_size / 4, + relocs, args->nr_relocs)) { + ret = -EINVAL; + goto err_submit_objects; + } + + ret = submit_fence_sync(submit); + if (ret) + goto err_submit_objects; + + ret = submit_pin_objects(submit); + if (ret) + goto out; + + ret = submit_reloc(submit, stream, args->stream_size / 4, + relocs, args->nr_relocs); + if (ret) + goto out; + + memcpy(cmdbuf->vaddr, stream, args->stream_size); + cmdbuf->user_size = ALIGN(args->stream_size, 8); + + ret = etnaviv_gpu_submit(gpu, submit, cmdbuf); + if (ret == 0) + cmdbuf = NULL; + + args->fence = submit->fence; + +out: + submit_unpin_objects(submit); + + /* + * If we're returning -EAGAIN, it may be due to the userptr code + * wanting to run its workqueue outside of any locks. Flush our + * workqueue to ensure that it is run in a timely manner. + */ + if (ret == -EAGAIN) + flush_workqueue(priv->wq); + +err_submit_objects: + submit_cleanup(submit); + +err_submit_cmds: + /* if we still own the cmdbuf */ + if (cmdbuf) + etnaviv_gpu_cmdbuf_free(cmdbuf); + if (stream) + drm_free_large(stream); + if (bos) + drm_free_large(bos); + if (relocs) + drm_free_large(relocs); + + return ret; +} diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c new file mode 100644 index 000000000000..d39093dc37e6 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -0,0 +1,1644 @@ +/* + * Copyright (C) 2015 Etnaviv Project + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include +#include +#include +#include +#include "etnaviv_dump.h" +#include "etnaviv_gpu.h" +#include "etnaviv_gem.h" +#include "etnaviv_mmu.h" +#include "etnaviv_iommu.h" +#include "etnaviv_iommu_v2.h" +#include "common.xml.h" +#include "state.xml.h" +#include "state_hi.xml.h" +#include "cmdstream.xml.h" + +static const struct platform_device_id gpu_ids[] = { + { .name = "etnaviv-gpu,2d" }, + { }, +}; + +static bool etnaviv_dump_core = true; +module_param_named(dump_core, etnaviv_dump_core, bool, 0600); + +/* + * Driver functions: + */ + +int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value) +{ + switch (param) { + case ETNAVIV_PARAM_GPU_MODEL: + *value = gpu->identity.model; + break; + + case ETNAVIV_PARAM_GPU_REVISION: + *value = gpu->identity.revision; + break; + + case ETNAVIV_PARAM_GPU_FEATURES_0: + *value = gpu->identity.features; + break; + + case ETNAVIV_PARAM_GPU_FEATURES_1: + *value = gpu->identity.minor_features0; + break; + + case ETNAVIV_PARAM_GPU_FEATURES_2: + *value = gpu->identity.minor_features1; + break; + + case ETNAVIV_PARAM_GPU_FEATURES_3: + *value = gpu->identity.minor_features2; + break; + + case ETNAVIV_PARAM_GPU_FEATURES_4: + *value = gpu->identity.minor_features3; + break; + + case ETNAVIV_PARAM_GPU_STREAM_COUNT: + *value = gpu->identity.stream_count; + break; + + case ETNAVIV_PARAM_GPU_REGISTER_MAX: + *value = gpu->identity.register_max; + break; + + case ETNAVIV_PARAM_GPU_THREAD_COUNT: + *value = gpu->identity.thread_count; + break; + + case ETNAVIV_PARAM_GPU_VERTEX_CACHE_SIZE: + *value = gpu->identity.vertex_cache_size; + break; + + case ETNAVIV_PARAM_GPU_SHADER_CORE_COUNT: + *value = gpu->identity.shader_core_count; + break; + + case ETNAVIV_PARAM_GPU_PIXEL_PIPES: + *value = gpu->identity.pixel_pipes; + break; + + case ETNAVIV_PARAM_GPU_VERTEX_OUTPUT_BUFFER_SIZE: + *value = gpu->identity.vertex_output_buffer_size; + break; + + case ETNAVIV_PARAM_GPU_BUFFER_SIZE: + *value = gpu->identity.buffer_size; + break; + + case ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT: + *value = gpu->identity.instruction_count; + break; + + case ETNAVIV_PARAM_GPU_NUM_CONSTANTS: + *value = gpu->identity.num_constants; + break; + + default: + DBG("%s: invalid param: %u", dev_name(gpu->dev), param); + return -EINVAL; + } + + return 0; +} + +static void etnaviv_hw_specs(struct etnaviv_gpu *gpu) +{ + if (gpu->identity.minor_features0 & + chipMinorFeatures0_MORE_MINOR_FEATURES) { + u32 specs[2]; + + specs[0] = gpu_read(gpu, VIVS_HI_CHIP_SPECS); + specs[1] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_2); + + gpu->identity.stream_count = + (specs[0] & VIVS_HI_CHIP_SPECS_STREAM_COUNT__MASK) + >> VIVS_HI_CHIP_SPECS_STREAM_COUNT__SHIFT; + gpu->identity.register_max = + (specs[0] & VIVS_HI_CHIP_SPECS_REGISTER_MAX__MASK) + >> VIVS_HI_CHIP_SPECS_REGISTER_MAX__SHIFT; + gpu->identity.thread_count = + (specs[0] & VIVS_HI_CHIP_SPECS_THREAD_COUNT__MASK) + >> VIVS_HI_CHIP_SPECS_THREAD_COUNT__SHIFT; + gpu->identity.vertex_cache_size = + (specs[0] & VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__MASK) + >> VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__SHIFT; + gpu->identity.shader_core_count = + (specs[0] & VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__MASK) + >> VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__SHIFT; + gpu->identity.pixel_pipes = + (specs[0] & VIVS_HI_CHIP_SPECS_PIXEL_PIPES__MASK) + >> VIVS_HI_CHIP_SPECS_PIXEL_PIPES__SHIFT; + gpu->identity.vertex_output_buffer_size = + (specs[0] & VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__MASK) + >> VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__SHIFT; + + gpu->identity.buffer_size = + (specs[1] & VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__MASK) + >> VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__SHIFT; + gpu->identity.instruction_count = + (specs[1] & VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__MASK) + >> VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__SHIFT; + gpu->identity.num_constants = + (specs[1] & VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__MASK) + >> VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__SHIFT; + } + + /* Fill in the stream count if not specified */ + if (gpu->identity.stream_count == 0) { + if (gpu->identity.model >= 0x1000) + gpu->identity.stream_count = 4; + else + gpu->identity.stream_count = 1; + } + + /* Convert the register max value */ + if (gpu->identity.register_max) + gpu->identity.register_max = 1 << gpu->identity.register_max; + else if (gpu->identity.model == 0x0400) + gpu->identity.register_max = 32; + else + gpu->identity.register_max = 64; + + /* Convert thread count */ + if (gpu->identity.thread_count) + gpu->identity.thread_count = 1 << gpu->identity.thread_count; + else if (gpu->identity.model == 0x0400) + gpu->identity.thread_count = 64; + else if (gpu->identity.model == 0x0500 || + gpu->identity.model == 0x0530) + gpu->identity.thread_count = 128; + else + gpu->identity.thread_count = 256; + + if (gpu->identity.vertex_cache_size == 0) + gpu->identity.vertex_cache_size = 8; + + if (gpu->identity.shader_core_count == 0) { + if (gpu->identity.model >= 0x1000) + gpu->identity.shader_core_count = 2; + else + gpu->identity.shader_core_count = 1; + } + + if (gpu->identity.pixel_pipes == 0) + gpu->identity.pixel_pipes = 1; + + /* Convert virtex buffer size */ + if (gpu->identity.vertex_output_buffer_size) { + gpu->identity.vertex_output_buffer_size = + 1 << gpu->identity.vertex_output_buffer_size; + } else if (gpu->identity.model == 0x0400) { + if (gpu->identity.revision < 0x4000) + gpu->identity.vertex_output_buffer_size = 512; + else if (gpu->identity.revision < 0x4200) + gpu->identity.vertex_output_buffer_size = 256; + else + gpu->identity.vertex_output_buffer_size = 128; + } else { + gpu->identity.vertex_output_buffer_size = 512; + } + + switch (gpu->identity.instruction_count) { + case 0: + if ((gpu->identity.model == 0x2000 && + gpu->identity.revision == 0x5108) || + gpu->identity.model == 0x880) + gpu->identity.instruction_count = 512; + else + gpu->identity.instruction_count = 256; + break; + + case 1: + gpu->identity.instruction_count = 1024; + break; + + case 2: + gpu->identity.instruction_count = 2048; + break; + + default: + gpu->identity.instruction_count = 256; + break; + } + + if (gpu->identity.num_constants == 0) + gpu->identity.num_constants = 168; +} + +static void etnaviv_hw_identify(struct etnaviv_gpu *gpu) +{ + u32 chipIdentity; + + chipIdentity = gpu_read(gpu, VIVS_HI_CHIP_IDENTITY); + + /* Special case for older graphic cores. */ + if (VIVS_HI_CHIP_IDENTITY_FAMILY(chipIdentity) == 0x01) { + gpu->identity.model = 0x500; /* gc500 */ + gpu->identity.revision = VIVS_HI_CHIP_IDENTITY_REVISION(chipIdentity); + } else { + + gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL); + gpu->identity.revision = gpu_read(gpu, VIVS_HI_CHIP_REV); + + /* + * !!!! HACK ALERT !!!! + * Because people change device IDs without letting software + * know about it - here is the hack to make it all look the + * same. Only for GC400 family. + */ + if ((gpu->identity.model & 0xff00) == 0x0400 && + gpu->identity.model != 0x0420) { + gpu->identity.model = gpu->identity.model & 0x0400; + } + + /* Another special case */ + if (gpu->identity.model == 0x300 && + gpu->identity.revision == 0x2201) { + u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE); + u32 chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME); + + if (chipDate == 0x20080814 && chipTime == 0x12051100) { + /* + * This IP has an ECO; put the correct + * revision in it. + */ + gpu->identity.revision = 0x1051; + } + } + } + + dev_info(gpu->dev, "model: GC%x, revision: %x\n", + gpu->identity.model, gpu->identity.revision); + + gpu->identity.features = gpu_read(gpu, VIVS_HI_CHIP_FEATURE); + + /* Disable fast clear on GC700. */ + if (gpu->identity.model == 0x700) + gpu->identity.features &= ~chipFeatures_FAST_CLEAR; + + if ((gpu->identity.model == 0x500 && gpu->identity.revision < 2) || + (gpu->identity.model == 0x300 && gpu->identity.revision < 0x2000)) { + + /* + * GC500 rev 1.x and GC300 rev < 2.0 doesn't have these + * registers. + */ + gpu->identity.minor_features0 = 0; + gpu->identity.minor_features1 = 0; + gpu->identity.minor_features2 = 0; + gpu->identity.minor_features3 = 0; + } else + gpu->identity.minor_features0 = + gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_0); + + if (gpu->identity.minor_features0 & + chipMinorFeatures0_MORE_MINOR_FEATURES) { + gpu->identity.minor_features1 = + gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_1); + gpu->identity.minor_features2 = + gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_2); + gpu->identity.minor_features3 = + gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_3); + } + + /* GC600 idle register reports zero bits where modules aren't present */ + if (gpu->identity.model == chipModel_GC600) { + gpu->idle_mask = VIVS_HI_IDLE_STATE_TX | + VIVS_HI_IDLE_STATE_RA | + VIVS_HI_IDLE_STATE_SE | + VIVS_HI_IDLE_STATE_PA | + VIVS_HI_IDLE_STATE_SH | + VIVS_HI_IDLE_STATE_PE | + VIVS_HI_IDLE_STATE_DE | + VIVS_HI_IDLE_STATE_FE; + } else { + gpu->idle_mask = ~VIVS_HI_IDLE_STATE_AXI_LP; + } + + etnaviv_hw_specs(gpu); +} + +static void etnaviv_gpu_load_clock(struct etnaviv_gpu *gpu, u32 clock) +{ + gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock | + VIVS_HI_CLOCK_CONTROL_FSCALE_CMD_LOAD); + gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock); +} + +static int etnaviv_hw_reset(struct etnaviv_gpu *gpu) +{ + u32 control, idle; + unsigned long timeout; + bool failed = true; + + /* TODO + * + * - clock gating + * - puls eater + * - what about VG? + */ + + /* We hope that the GPU resets in under one second */ + timeout = jiffies + msecs_to_jiffies(1000); + + while (time_is_after_jiffies(timeout)) { + control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS | + VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40); + + /* enable clock */ + etnaviv_gpu_load_clock(gpu, control); + + /* Wait for stable clock. Vivante's code waited for 1ms */ + usleep_range(1000, 10000); + + /* isolate the GPU. */ + control |= VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU; + gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control); + + /* set soft reset. */ + control |= VIVS_HI_CLOCK_CONTROL_SOFT_RESET; + gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control); + + /* wait for reset. */ + msleep(1); + + /* reset soft reset bit. */ + control &= ~VIVS_HI_CLOCK_CONTROL_SOFT_RESET; + gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control); + + /* reset GPU isolation. */ + control &= ~VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU; + gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control); + + /* read idle register. */ + idle = gpu_read(gpu, VIVS_HI_IDLE_STATE); + + /* try reseting again if FE it not idle */ + if ((idle & VIVS_HI_IDLE_STATE_FE) == 0) { + dev_dbg(gpu->dev, "FE is not idle\n"); + continue; + } + + /* read reset register. */ + control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL); + + /* is the GPU idle? */ + if (((control & VIVS_HI_CLOCK_CONTROL_IDLE_3D) == 0) || + ((control & VIVS_HI_CLOCK_CONTROL_IDLE_2D) == 0)) { + dev_dbg(gpu->dev, "GPU is not idle\n"); + continue; + } + + failed = false; + break; + } + + if (failed) { + idle = gpu_read(gpu, VIVS_HI_IDLE_STATE); + control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL); + + dev_err(gpu->dev, "GPU failed to reset: FE %sidle, 3D %sidle, 2D %sidle\n", + idle & VIVS_HI_IDLE_STATE_FE ? "" : "not ", + control & VIVS_HI_CLOCK_CONTROL_IDLE_3D ? "" : "not ", + control & VIVS_HI_CLOCK_CONTROL_IDLE_2D ? "" : "not "); + + return -EBUSY; + } + + /* We rely on the GPU running, so program the clock */ + control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS | + VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40); + + /* enable clock */ + etnaviv_gpu_load_clock(gpu, control); + + return 0; +} + +static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu) +{ + u16 prefetch; + + if (gpu->identity.model == chipModel_GC320 && + gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400 && + (gpu->identity.revision == 0x5007 || + gpu->identity.revision == 0x5220)) { + u32 mc_memory_debug; + + mc_memory_debug = gpu_read(gpu, VIVS_MC_DEBUG_MEMORY) & ~0xff; + + if (gpu->identity.revision == 0x5007) + mc_memory_debug |= 0x0c; + else + mc_memory_debug |= 0x08; + + gpu_write(gpu, VIVS_MC_DEBUG_MEMORY, mc_memory_debug); + } + + /* + * Update GPU AXI cache atttribute to "cacheable, no allocate". + * This is necessary to prevent the iMX6 SoC locking up. + */ + gpu_write(gpu, VIVS_HI_AXI_CONFIG, + VIVS_HI_AXI_CONFIG_AWCACHE(2) | + VIVS_HI_AXI_CONFIG_ARCACHE(2)); + + /* GC2000 rev 5108 needs a special bus config */ + if (gpu->identity.model == 0x2000 && gpu->identity.revision == 0x5108) { + u32 bus_config = gpu_read(gpu, VIVS_MC_BUS_CONFIG); + bus_config &= ~(VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK | + VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK); + bus_config |= VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG(1) | + VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG(0); + gpu_write(gpu, VIVS_MC_BUS_CONFIG, bus_config); + } + + /* set base addresses */ + gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, gpu->memory_base); + gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, gpu->memory_base); + gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_TX, gpu->memory_base); + gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PEZ, gpu->memory_base); + gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, gpu->memory_base); + + /* setup the MMU page table pointers */ + etnaviv_iommu_domain_restore(gpu, gpu->mmu->domain); + + /* Start command processor */ + prefetch = etnaviv_buffer_init(gpu); + + gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U); + gpu_write(gpu, VIVS_FE_COMMAND_ADDRESS, + gpu->buffer->paddr - gpu->memory_base); + gpu_write(gpu, VIVS_FE_COMMAND_CONTROL, + VIVS_FE_COMMAND_CONTROL_ENABLE | + VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch)); +} + +int etnaviv_gpu_init(struct etnaviv_gpu *gpu) +{ + int ret, i; + struct iommu_domain *iommu; + enum etnaviv_iommu_version version; + bool mmuv2; + + ret = pm_runtime_get_sync(gpu->dev); + if (ret < 0) + return ret; + + etnaviv_hw_identify(gpu); + + if (gpu->identity.model == 0) { + dev_err(gpu->dev, "Unknown GPU model\n"); + pm_runtime_put_autosuspend(gpu->dev); + return -ENXIO; + } + + ret = etnaviv_hw_reset(gpu); + if (ret) + goto fail; + + /* Setup IOMMU.. eventually we will (I think) do this once per context + * and have separate page tables per context. For now, to keep things + * simple and to get something working, just use a single address space: + */ + mmuv2 = gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION; + dev_dbg(gpu->dev, "mmuv2: %d\n", mmuv2); + + if (!mmuv2) { + iommu = etnaviv_iommu_domain_alloc(gpu); + version = ETNAVIV_IOMMU_V1; + } else { + iommu = etnaviv_iommu_v2_domain_alloc(gpu); + version = ETNAVIV_IOMMU_V2; + } + + if (!iommu) { + ret = -ENOMEM; + goto fail; + } + + /* TODO: we will leak here memory - fix it! */ + + gpu->mmu = etnaviv_iommu_new(gpu, iommu, version); + if (!gpu->mmu) { + ret = -ENOMEM; + goto fail; + } + + /* Create buffer: */ + gpu->buffer = etnaviv_gpu_cmdbuf_new(gpu, PAGE_SIZE, 0); + if (!gpu->buffer) { + ret = -ENOMEM; + dev_err(gpu->dev, "could not create command buffer\n"); + goto fail; + } + if (gpu->buffer->paddr - gpu->memory_base > 0x80000000) { + ret = -EINVAL; + dev_err(gpu->dev, + "command buffer outside valid memory window\n"); + goto free_buffer; + } + + /* Setup event management */ + spin_lock_init(&gpu->event_spinlock); + init_completion(&gpu->event_free); + for (i = 0; i < ARRAY_SIZE(gpu->event); i++) { + gpu->event[i].used = false; + complete(&gpu->event_free); + } + + /* Now program the hardware */ + mutex_lock(&gpu->lock); + etnaviv_gpu_hw_init(gpu); + mutex_unlock(&gpu->lock); + + pm_runtime_mark_last_busy(gpu->dev); + pm_runtime_put_autosuspend(gpu->dev); + + return 0; + +free_buffer: + etnaviv_gpu_cmdbuf_free(gpu->buffer); + gpu->buffer = NULL; +fail: + pm_runtime_mark_last_busy(gpu->dev); + pm_runtime_put_autosuspend(gpu->dev); + + return ret; +} + +#ifdef CONFIG_DEBUG_FS +struct dma_debug { + u32 address[2]; + u32 state[2]; +}; + +static void verify_dma(struct etnaviv_gpu *gpu, struct dma_debug *debug) +{ + u32 i; + + debug->address[0] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS); + debug->state[0] = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE); + + for (i = 0; i < 500; i++) { + debug->address[1] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS); + debug->state[1] = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE); + + if (debug->address[0] != debug->address[1]) + break; + + if (debug->state[0] != debug->state[1]) + break; + } +} + +int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m) +{ + struct dma_debug debug; + u32 dma_lo, dma_hi, axi, idle; + int ret; + + seq_printf(m, "%s Status:\n", dev_name(gpu->dev)); + + ret = pm_runtime_get_sync(gpu->dev); + if (ret < 0) + return ret; + + dma_lo = gpu_read(gpu, VIVS_FE_DMA_LOW); + dma_hi = gpu_read(gpu, VIVS_FE_DMA_HIGH); + axi = gpu_read(gpu, VIVS_HI_AXI_STATUS); + idle = gpu_read(gpu, VIVS_HI_IDLE_STATE); + + verify_dma(gpu, &debug); + + seq_puts(m, "\tfeatures\n"); + seq_printf(m, "\t minor_features0: 0x%08x\n", + gpu->identity.minor_features0); + seq_printf(m, "\t minor_features1: 0x%08x\n", + gpu->identity.minor_features1); + seq_printf(m, "\t minor_features2: 0x%08x\n", + gpu->identity.minor_features2); + seq_printf(m, "\t minor_features3: 0x%08x\n", + gpu->identity.minor_features3); + + seq_puts(m, "\tspecs\n"); + seq_printf(m, "\t stream_count: %d\n", + gpu->identity.stream_count); + seq_printf(m, "\t register_max: %d\n", + gpu->identity.register_max); + seq_printf(m, "\t thread_count: %d\n", + gpu->identity.thread_count); + seq_printf(m, "\t vertex_cache_size: %d\n", + gpu->identity.vertex_cache_size); + seq_printf(m, "\t shader_core_count: %d\n", + gpu->identity.shader_core_count); + seq_printf(m, "\t pixel_pipes: %d\n", + gpu->identity.pixel_pipes); + seq_printf(m, "\t vertex_output_buffer_size: %d\n", + gpu->identity.vertex_output_buffer_size); + seq_printf(m, "\t buffer_size: %d\n", + gpu->identity.buffer_size); + seq_printf(m, "\t instruction_count: %d\n", + gpu->identity.instruction_count); + seq_printf(m, "\t num_constants: %d\n", + gpu->identity.num_constants); + + seq_printf(m, "\taxi: 0x%08x\n", axi); + seq_printf(m, "\tidle: 0x%08x\n", idle); + idle |= ~gpu->idle_mask & ~VIVS_HI_IDLE_STATE_AXI_LP; + if ((idle & VIVS_HI_IDLE_STATE_FE) == 0) + seq_puts(m, "\t FE is not idle\n"); + if ((idle & VIVS_HI_IDLE_STATE_DE) == 0) + seq_puts(m, "\t DE is not idle\n"); + if ((idle & VIVS_HI_IDLE_STATE_PE) == 0) + seq_puts(m, "\t PE is not idle\n"); + if ((idle & VIVS_HI_IDLE_STATE_SH) == 0) + seq_puts(m, "\t SH is not idle\n"); + if ((idle & VIVS_HI_IDLE_STATE_PA) == 0) + seq_puts(m, "\t PA is not idle\n"); + if ((idle & VIVS_HI_IDLE_STATE_SE) == 0) + seq_puts(m, "\t SE is not idle\n"); + if ((idle & VIVS_HI_IDLE_STATE_RA) == 0) + seq_puts(m, "\t RA is not idle\n"); + if ((idle & VIVS_HI_IDLE_STATE_TX) == 0) + seq_puts(m, "\t TX is not idle\n"); + if ((idle & VIVS_HI_IDLE_STATE_VG) == 0) + seq_puts(m, "\t VG is not idle\n"); + if ((idle & VIVS_HI_IDLE_STATE_IM) == 0) + seq_puts(m, "\t IM is not idle\n"); + if ((idle & VIVS_HI_IDLE_STATE_FP) == 0) + seq_puts(m, "\t FP is not idle\n"); + if ((idle & VIVS_HI_IDLE_STATE_TS) == 0) + seq_puts(m, "\t TS is not idle\n"); + if (idle & VIVS_HI_IDLE_STATE_AXI_LP) + seq_puts(m, "\t AXI low power mode\n"); + + if (gpu->identity.features & chipFeatures_DEBUG_MODE) { + u32 read0 = gpu_read(gpu, VIVS_MC_DEBUG_READ0); + u32 read1 = gpu_read(gpu, VIVS_MC_DEBUG_READ1); + u32 write = gpu_read(gpu, VIVS_MC_DEBUG_WRITE); + + seq_puts(m, "\tMC\n"); + seq_printf(m, "\t read0: 0x%08x\n", read0); + seq_printf(m, "\t read1: 0x%08x\n", read1); + seq_printf(m, "\t write: 0x%08x\n", write); + } + + seq_puts(m, "\tDMA "); + + if (debug.address[0] == debug.address[1] && + debug.state[0] == debug.state[1]) { + seq_puts(m, "seems to be stuck\n"); + } else if (debug.address[0] == debug.address[1]) { + seq_puts(m, "adress is constant\n"); + } else { + seq_puts(m, "is runing\n"); + } + + seq_printf(m, "\t address 0: 0x%08x\n", debug.address[0]); + seq_printf(m, "\t address 1: 0x%08x\n", debug.address[1]); + seq_printf(m, "\t state 0: 0x%08x\n", debug.state[0]); + seq_printf(m, "\t state 1: 0x%08x\n", debug.state[1]); + seq_printf(m, "\t last fetch 64 bit word: 0x%08x 0x%08x\n", + dma_lo, dma_hi); + + ret = 0; + + pm_runtime_mark_last_busy(gpu->dev); + pm_runtime_put_autosuspend(gpu->dev); + + return ret; +} +#endif + +/* + * Power Management: + */ +static int enable_clk(struct etnaviv_gpu *gpu) +{ + if (gpu->clk_core) + clk_prepare_enable(gpu->clk_core); + if (gpu->clk_shader) + clk_prepare_enable(gpu->clk_shader); + + return 0; +} + +static int disable_clk(struct etnaviv_gpu *gpu) +{ + if (gpu->clk_core) + clk_disable_unprepare(gpu->clk_core); + if (gpu->clk_shader) + clk_disable_unprepare(gpu->clk_shader); + + return 0; +} + +static int enable_axi(struct etnaviv_gpu *gpu) +{ + if (gpu->clk_bus) + clk_prepare_enable(gpu->clk_bus); + + return 0; +} + +static int disable_axi(struct etnaviv_gpu *gpu) +{ + if (gpu->clk_bus) + clk_disable_unprepare(gpu->clk_bus); + + return 0; +} + +/* + * Hangcheck detection for locked gpu: + */ +static void recover_worker(struct work_struct *work) +{ + struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu, + recover_work); + unsigned long flags; + unsigned int i; + + dev_err(gpu->dev, "hangcheck recover!\n"); + + if (pm_runtime_get_sync(gpu->dev) < 0) + return; + + mutex_lock(&gpu->lock); + + /* Only catch the first event, or when manually re-armed */ + if (etnaviv_dump_core) { + etnaviv_core_dump(gpu); + etnaviv_dump_core = false; + } + + etnaviv_hw_reset(gpu); + + /* complete all events, the GPU won't do it after the reset */ + spin_lock_irqsave(&gpu->event_spinlock, flags); + for (i = 0; i < ARRAY_SIZE(gpu->event); i++) { + if (!gpu->event[i].used) + continue; + fence_signal(gpu->event[i].fence); + gpu->event[i].fence = NULL; + gpu->event[i].used = false; + complete(&gpu->event_free); + /* + * Decrement the PM count for each stuck event. This is safe + * even in atomic context as we use ASYNC RPM here. + */ + pm_runtime_put_autosuspend(gpu->dev); + } + spin_unlock_irqrestore(&gpu->event_spinlock, flags); + gpu->completed_fence = gpu->active_fence; + + etnaviv_gpu_hw_init(gpu); + gpu->switch_context = true; + + mutex_unlock(&gpu->lock); + pm_runtime_mark_last_busy(gpu->dev); + pm_runtime_put_autosuspend(gpu->dev); + + /* Retire the buffer objects in a work */ + etnaviv_queue_work(gpu->drm, &gpu->retire_work); +} + +static void hangcheck_timer_reset(struct etnaviv_gpu *gpu) +{ + DBG("%s", dev_name(gpu->dev)); + mod_timer(&gpu->hangcheck_timer, + round_jiffies_up(jiffies + DRM_ETNAVIV_HANGCHECK_JIFFIES)); +} + +static void hangcheck_handler(unsigned long data) +{ + struct etnaviv_gpu *gpu = (struct etnaviv_gpu *)data; + u32 fence = gpu->completed_fence; + bool progress = false; + + if (fence != gpu->hangcheck_fence) { + gpu->hangcheck_fence = fence; + progress = true; + } + + if (!progress) { + u32 dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS); + int change = dma_addr - gpu->hangcheck_dma_addr; + + if (change < 0 || change > 16) { + gpu->hangcheck_dma_addr = dma_addr; + progress = true; + } + } + + if (!progress && fence_after(gpu->active_fence, fence)) { + dev_err(gpu->dev, "hangcheck detected gpu lockup!\n"); + dev_err(gpu->dev, " completed fence: %u\n", fence); + dev_err(gpu->dev, " active fence: %u\n", + gpu->active_fence); + etnaviv_queue_work(gpu->drm, &gpu->recover_work); + } + + /* if still more pending work, reset the hangcheck timer: */ + if (fence_after(gpu->active_fence, gpu->hangcheck_fence)) + hangcheck_timer_reset(gpu); +} + +static void hangcheck_disable(struct etnaviv_gpu *gpu) +{ + del_timer_sync(&gpu->hangcheck_timer); + cancel_work_sync(&gpu->recover_work); +} + +/* fence object management */ +struct etnaviv_fence { + struct etnaviv_gpu *gpu; + struct fence base; +}; + +static inline struct etnaviv_fence *to_etnaviv_fence(struct fence *fence) +{ + return container_of(fence, struct etnaviv_fence, base); +} + +static const char *etnaviv_fence_get_driver_name(struct fence *fence) +{ + return "etnaviv"; +} + +static const char *etnaviv_fence_get_timeline_name(struct fence *fence) +{ + struct etnaviv_fence *f = to_etnaviv_fence(fence); + + return dev_name(f->gpu->dev); +} + +static bool etnaviv_fence_enable_signaling(struct fence *fence) +{ + return true; +} + +static bool etnaviv_fence_signaled(struct fence *fence) +{ + struct etnaviv_fence *f = to_etnaviv_fence(fence); + + return fence_completed(f->gpu, f->base.seqno); +} + +static void etnaviv_fence_release(struct fence *fence) +{ + struct etnaviv_fence *f = to_etnaviv_fence(fence); + + kfree_rcu(f, base.rcu); +} + +static const struct fence_ops etnaviv_fence_ops = { + .get_driver_name = etnaviv_fence_get_driver_name, + .get_timeline_name = etnaviv_fence_get_timeline_name, + .enable_signaling = etnaviv_fence_enable_signaling, + .signaled = etnaviv_fence_signaled, + .wait = fence_default_wait, + .release = etnaviv_fence_release, +}; + +static struct fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu) +{ + struct etnaviv_fence *f; + + f = kzalloc(sizeof(*f), GFP_KERNEL); + if (!f) + return NULL; + + f->gpu = gpu; + + fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock, + gpu->fence_context, ++gpu->next_fence); + + return &f->base; +} + +int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj, + unsigned int context, bool exclusive) +{ + struct reservation_object *robj = etnaviv_obj->resv; + struct reservation_object_list *fobj; + struct fence *fence; + int i, ret; + + if (!exclusive) { + ret = reservation_object_reserve_shared(robj); + if (ret) + return ret; + } + + /* + * If we have any shared fences, then the exclusive fence + * should be ignored as it will already have been signalled. + */ + fobj = reservation_object_get_list(robj); + if (!fobj || fobj->shared_count == 0) { + /* Wait on any existing exclusive fence which isn't our own */ + fence = reservation_object_get_excl(robj); + if (fence && fence->context != context) { + ret = fence_wait(fence, true); + if (ret) + return ret; + } + } + + if (!exclusive || !fobj) + return 0; + + for (i = 0; i < fobj->shared_count; i++) { + fence = rcu_dereference_protected(fobj->shared[i], + reservation_object_held(robj)); + if (fence->context != context) { + ret = fence_wait(fence, true); + if (ret) + return ret; + } + } + + return 0; +} + +/* + * event management: + */ + +static unsigned int event_alloc(struct etnaviv_gpu *gpu) +{ + unsigned long ret, flags; + unsigned int i, event = ~0U; + + ret = wait_for_completion_timeout(&gpu->event_free, + msecs_to_jiffies(10 * 10000)); + if (!ret) + dev_err(gpu->dev, "wait_for_completion_timeout failed"); + + spin_lock_irqsave(&gpu->event_spinlock, flags); + + /* find first free event */ + for (i = 0; i < ARRAY_SIZE(gpu->event); i++) { + if (gpu->event[i].used == false) { + gpu->event[i].used = true; + event = i; + break; + } + } + + spin_unlock_irqrestore(&gpu->event_spinlock, flags); + + return event; +} + +static void event_free(struct etnaviv_gpu *gpu, unsigned int event) +{ + unsigned long flags; + + spin_lock_irqsave(&gpu->event_spinlock, flags); + + if (gpu->event[event].used == false) { + dev_warn(gpu->dev, "event %u is already marked as free", + event); + spin_unlock_irqrestore(&gpu->event_spinlock, flags); + } else { + gpu->event[event].used = false; + spin_unlock_irqrestore(&gpu->event_spinlock, flags); + + complete(&gpu->event_free); + } +} + +/* + * Cmdstream submission/retirement: + */ + +struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu, u32 size, + size_t nr_bos) +{ + struct etnaviv_cmdbuf *cmdbuf; + size_t sz = size_vstruct(nr_bos, sizeof(cmdbuf->bo[0]), + sizeof(*cmdbuf)); + + cmdbuf = kzalloc(sz, GFP_KERNEL); + if (!cmdbuf) + return NULL; + + cmdbuf->vaddr = dma_alloc_writecombine(gpu->dev, size, &cmdbuf->paddr, + GFP_KERNEL); + if (!cmdbuf->vaddr) { + kfree(cmdbuf); + return NULL; + } + + cmdbuf->gpu = gpu; + cmdbuf->size = size; + + return cmdbuf; +} + +void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf) +{ + dma_free_writecombine(cmdbuf->gpu->dev, cmdbuf->size, + cmdbuf->vaddr, cmdbuf->paddr); + kfree(cmdbuf); +} + +static void retire_worker(struct work_struct *work) +{ + struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu, + retire_work); + u32 fence = gpu->completed_fence; + struct etnaviv_cmdbuf *cmdbuf, *tmp; + unsigned int i; + + mutex_lock(&gpu->lock); + list_for_each_entry_safe(cmdbuf, tmp, &gpu->active_cmd_list, node) { + if (!fence_is_signaled(cmdbuf->fence)) + break; + + list_del(&cmdbuf->node); + fence_put(cmdbuf->fence); + + for (i = 0; i < cmdbuf->nr_bos; i++) { + struct etnaviv_gem_object *etnaviv_obj = cmdbuf->bo[i]; + + atomic_dec(&etnaviv_obj->gpu_active); + /* drop the refcount taken in etnaviv_gpu_submit */ + etnaviv_gem_put_iova(gpu, &etnaviv_obj->base); + } + + etnaviv_gpu_cmdbuf_free(cmdbuf); + } + + gpu->retired_fence = fence; + + mutex_unlock(&gpu->lock); + + wake_up_all(&gpu->fence_event); +} + +int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu, + u32 fence, struct timespec *timeout) +{ + int ret; + + if (fence_after(fence, gpu->next_fence)) { + DRM_ERROR("waiting on invalid fence: %u (of %u)\n", + fence, gpu->next_fence); + return -EINVAL; + } + + if (!timeout) { + /* No timeout was requested: just test for completion */ + ret = fence_completed(gpu, fence) ? 0 : -EBUSY; + } else { + unsigned long remaining = etnaviv_timeout_to_jiffies(timeout); + + ret = wait_event_interruptible_timeout(gpu->fence_event, + fence_completed(gpu, fence), + remaining); + if (ret == 0) { + DBG("timeout waiting for fence: %u (retired: %u completed: %u)", + fence, gpu->retired_fence, + gpu->completed_fence); + ret = -ETIMEDOUT; + } else if (ret != -ERESTARTSYS) { + ret = 0; + } + } + + return ret; +} + +/* + * Wait for an object to become inactive. This, on it's own, is not race + * free: the object is moved by the retire worker off the active list, and + * then the iova is put. Moreover, the object could be re-submitted just + * after we notice that it's become inactive. + * + * Although the retirement happens under the gpu lock, we don't want to hold + * that lock in this function while waiting. + */ +int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu, + struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout) +{ + unsigned long remaining; + long ret; + + if (!timeout) + return !is_active(etnaviv_obj) ? 0 : -EBUSY; + + remaining = etnaviv_timeout_to_jiffies(timeout); + + ret = wait_event_interruptible_timeout(gpu->fence_event, + !is_active(etnaviv_obj), + remaining); + if (ret > 0) { + struct etnaviv_drm_private *priv = gpu->drm->dev_private; + + /* Synchronise with the retire worker */ + flush_workqueue(priv->wq); + return 0; + } else if (ret == -ERESTARTSYS) { + return -ERESTARTSYS; + } else { + return -ETIMEDOUT; + } +} + +int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu) +{ + return pm_runtime_get_sync(gpu->dev); +} + +void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu) +{ + pm_runtime_mark_last_busy(gpu->dev); + pm_runtime_put_autosuspend(gpu->dev); +} + +/* add bo's to gpu's ring, and kick gpu: */ +int etnaviv_gpu_submit(struct etnaviv_gpu *gpu, + struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf) +{ + struct fence *fence; + unsigned int event, i; + int ret; + + ret = etnaviv_gpu_pm_get_sync(gpu); + if (ret < 0) + return ret; + + mutex_lock(&gpu->lock); + + /* + * TODO + * + * - flush + * - data endian + * - prefetch + * + */ + + event = event_alloc(gpu); + if (unlikely(event == ~0U)) { + DRM_ERROR("no free event\n"); + ret = -EBUSY; + goto out_unlock; + } + + fence = etnaviv_gpu_fence_alloc(gpu); + if (!fence) { + event_free(gpu, event); + ret = -ENOMEM; + goto out_unlock; + } + + gpu->event[event].fence = fence; + submit->fence = fence->seqno; + gpu->active_fence = submit->fence; + + if (gpu->lastctx != cmdbuf->ctx) { + gpu->mmu->need_flush = true; + gpu->switch_context = true; + gpu->lastctx = cmdbuf->ctx; + } + + etnaviv_buffer_queue(gpu, event, cmdbuf); + + cmdbuf->fence = fence; + list_add_tail(&cmdbuf->node, &gpu->active_cmd_list); + + /* We're committed to adding this command buffer, hold a PM reference */ + pm_runtime_get_noresume(gpu->dev); + + for (i = 0; i < submit->nr_bos; i++) { + struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; + u32 iova; + + /* Each cmdbuf takes a refcount on the iova */ + etnaviv_gem_get_iova(gpu, &etnaviv_obj->base, &iova); + cmdbuf->bo[i] = etnaviv_obj; + atomic_inc(&etnaviv_obj->gpu_active); + + if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE) + reservation_object_add_excl_fence(etnaviv_obj->resv, + fence); + else + reservation_object_add_shared_fence(etnaviv_obj->resv, + fence); + } + cmdbuf->nr_bos = submit->nr_bos; + hangcheck_timer_reset(gpu); + ret = 0; + +out_unlock: + mutex_unlock(&gpu->lock); + + etnaviv_gpu_pm_put(gpu); + + return ret; +} + +/* + * Init/Cleanup: + */ +static irqreturn_t irq_handler(int irq, void *data) +{ + struct etnaviv_gpu *gpu = data; + irqreturn_t ret = IRQ_NONE; + + u32 intr = gpu_read(gpu, VIVS_HI_INTR_ACKNOWLEDGE); + + if (intr != 0) { + int event; + + pm_runtime_mark_last_busy(gpu->dev); + + dev_dbg(gpu->dev, "intr 0x%08x\n", intr); + + if (intr & VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR) { + dev_err(gpu->dev, "AXI bus error\n"); + intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR; + } + + while ((event = ffs(intr)) != 0) { + struct fence *fence; + + event -= 1; + + intr &= ~(1 << event); + + dev_dbg(gpu->dev, "event %u\n", event); + + fence = gpu->event[event].fence; + gpu->event[event].fence = NULL; + fence_signal(fence); + + /* + * Events can be processed out of order. Eg, + * - allocate and queue event 0 + * - allocate event 1 + * - event 0 completes, we process it + * - allocate and queue event 0 + * - event 1 and event 0 complete + * we can end up processing event 0 first, then 1. + */ + if (fence_after(fence->seqno, gpu->completed_fence)) + gpu->completed_fence = fence->seqno; + + event_free(gpu, event); + + /* + * We need to balance the runtime PM count caused by + * each submission. Upon submission, we increment + * the runtime PM counter, and allocate one event. + * So here, we put the runtime PM count for each + * completed event. + */ + pm_runtime_put_autosuspend(gpu->dev); + } + + /* Retire the buffer objects in a work */ + etnaviv_queue_work(gpu->drm, &gpu->retire_work); + + ret = IRQ_HANDLED; + } + + return ret; +} + +static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu) +{ + int ret; + + ret = enable_clk(gpu); + if (ret) + return ret; + + ret = enable_axi(gpu); + if (ret) { + disable_clk(gpu); + return ret; + } + + return 0; +} + +static int etnaviv_gpu_clk_disable(struct etnaviv_gpu *gpu) +{ + int ret; + + ret = disable_axi(gpu); + if (ret) + return ret; + + ret = disable_clk(gpu); + if (ret) + return ret; + + return 0; +} + +static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu) +{ + if (gpu->buffer) { + unsigned long timeout; + + /* Replace the last WAIT with END */ + etnaviv_buffer_end(gpu); + + /* + * We know that only the FE is busy here, this should + * happen quickly (as the WAIT is only 200 cycles). If + * we fail, just warn and continue. + */ + timeout = jiffies + msecs_to_jiffies(100); + do { + u32 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE); + + if ((idle & gpu->idle_mask) == gpu->idle_mask) + break; + + if (time_is_before_jiffies(timeout)) { + dev_warn(gpu->dev, + "timed out waiting for idle: idle=0x%x\n", + idle); + break; + } + + udelay(5); + } while (1); + } + + return etnaviv_gpu_clk_disable(gpu); +} + +#ifdef CONFIG_PM +static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu) +{ + u32 clock; + int ret; + + ret = mutex_lock_killable(&gpu->lock); + if (ret) + return ret; + + clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS | + VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40); + + etnaviv_gpu_load_clock(gpu, clock); + etnaviv_gpu_hw_init(gpu); + + gpu->switch_context = true; + + mutex_unlock(&gpu->lock); + + return 0; +} +#endif + +static int etnaviv_gpu_bind(struct device *dev, struct device *master, + void *data) +{ + struct drm_device *drm = data; + struct etnaviv_drm_private *priv = drm->dev_private; + struct etnaviv_gpu *gpu = dev_get_drvdata(dev); + int ret; + +#ifdef CONFIG_PM + ret = pm_runtime_get_sync(gpu->dev); +#else + ret = etnaviv_gpu_clk_enable(gpu); +#endif + if (ret < 0) + return ret; + + gpu->drm = drm; + gpu->fence_context = fence_context_alloc(1); + spin_lock_init(&gpu->fence_spinlock); + + INIT_LIST_HEAD(&gpu->active_cmd_list); + INIT_WORK(&gpu->retire_work, retire_worker); + INIT_WORK(&gpu->recover_work, recover_worker); + init_waitqueue_head(&gpu->fence_event); + + setup_timer(&gpu->hangcheck_timer, hangcheck_handler, + (unsigned long)gpu); + + priv->gpu[priv->num_gpus++] = gpu; + + pm_runtime_mark_last_busy(gpu->dev); + pm_runtime_put_autosuspend(gpu->dev); + + return 0; +} + +static void etnaviv_gpu_unbind(struct device *dev, struct device *master, + void *data) +{ + struct etnaviv_gpu *gpu = dev_get_drvdata(dev); + + DBG("%s", dev_name(gpu->dev)); + + hangcheck_disable(gpu); + +#ifdef CONFIG_PM + pm_runtime_get_sync(gpu->dev); + pm_runtime_put_sync_suspend(gpu->dev); +#else + etnaviv_gpu_hw_suspend(gpu); +#endif + + if (gpu->buffer) { + etnaviv_gpu_cmdbuf_free(gpu->buffer); + gpu->buffer = NULL; + } + + if (gpu->mmu) { + etnaviv_iommu_destroy(gpu->mmu); + gpu->mmu = NULL; + } + + gpu->drm = NULL; +} + +static const struct component_ops gpu_ops = { + .bind = etnaviv_gpu_bind, + .unbind = etnaviv_gpu_unbind, +}; + +static const struct of_device_id etnaviv_gpu_match[] = { + { + .compatible = "vivante,gc" + }, + { /* sentinel */ } +}; + +static int etnaviv_gpu_platform_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct etnaviv_gpu *gpu; + int err = 0; + + gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL); + if (!gpu) + return -ENOMEM; + + gpu->dev = &pdev->dev; + mutex_init(&gpu->lock); + + /* + * Set the GPU base address to the start of physical memory. This + * ensures that if we have up to 2GB, the v1 MMU can address the + * highest memory. This is important as command buffers may be + * allocated outside of this limit. + */ + gpu->memory_base = PHYS_OFFSET; + + /* Map registers: */ + gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev)); + if (IS_ERR(gpu->mmio)) + return PTR_ERR(gpu->mmio); + + /* Get Interrupt: */ + gpu->irq = platform_get_irq(pdev, 0); + if (gpu->irq < 0) { + err = gpu->irq; + dev_err(dev, "failed to get irq: %d\n", err); + goto fail; + } + + err = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, 0, + dev_name(gpu->dev), gpu); + if (err) { + dev_err(dev, "failed to request IRQ%u: %d\n", gpu->irq, err); + goto fail; + } + + /* Get Clocks: */ + gpu->clk_bus = devm_clk_get(&pdev->dev, "bus"); + DBG("clk_bus: %p", gpu->clk_bus); + if (IS_ERR(gpu->clk_bus)) + gpu->clk_bus = NULL; + + gpu->clk_core = devm_clk_get(&pdev->dev, "core"); + DBG("clk_core: %p", gpu->clk_core); + if (IS_ERR(gpu->clk_core)) + gpu->clk_core = NULL; + + gpu->clk_shader = devm_clk_get(&pdev->dev, "shader"); + DBG("clk_shader: %p", gpu->clk_shader); + if (IS_ERR(gpu->clk_shader)) + gpu->clk_shader = NULL; + + /* TODO: figure out max mapped size */ + dev_set_drvdata(dev, gpu); + + /* + * We treat the device as initially suspended. The runtime PM + * autosuspend delay is rather arbitary: no measurements have + * yet been performed to determine an appropriate value. + */ + pm_runtime_use_autosuspend(gpu->dev); + pm_runtime_set_autosuspend_delay(gpu->dev, 200); + pm_runtime_enable(gpu->dev); + + err = component_add(&pdev->dev, &gpu_ops); + if (err < 0) { + dev_err(&pdev->dev, "failed to register component: %d\n", err); + goto fail; + } + + return 0; + +fail: + return err; +} + +static int etnaviv_gpu_platform_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &gpu_ops); + pm_runtime_disable(&pdev->dev); + return 0; +} + +#ifdef CONFIG_PM +static int etnaviv_gpu_rpm_suspend(struct device *dev) +{ + struct etnaviv_gpu *gpu = dev_get_drvdata(dev); + u32 idle, mask; + + /* If we have outstanding fences, we're not idle */ + if (gpu->completed_fence != gpu->active_fence) + return -EBUSY; + + /* Check whether the hardware (except FE) is idle */ + mask = gpu->idle_mask & ~VIVS_HI_IDLE_STATE_FE; + idle = gpu_read(gpu, VIVS_HI_IDLE_STATE) & mask; + if (idle != mask) + return -EBUSY; + + return etnaviv_gpu_hw_suspend(gpu); +} + +static int etnaviv_gpu_rpm_resume(struct device *dev) +{ + struct etnaviv_gpu *gpu = dev_get_drvdata(dev); + int ret; + + ret = etnaviv_gpu_clk_enable(gpu); + if (ret) + return ret; + + /* Re-initialise the basic hardware state */ + if (gpu->drm && gpu->buffer) { + ret = etnaviv_gpu_hw_resume(gpu); + if (ret) { + etnaviv_gpu_clk_disable(gpu); + return ret; + } + } + + return 0; +} +#endif + +static const struct dev_pm_ops etnaviv_gpu_pm_ops = { + SET_RUNTIME_PM_OPS(etnaviv_gpu_rpm_suspend, etnaviv_gpu_rpm_resume, + NULL) +}; + +struct platform_driver etnaviv_gpu_driver = { + .driver = { + .name = "etnaviv-gpu", + .owner = THIS_MODULE, + .pm = &etnaviv_gpu_pm_ops, + .of_match_table = etnaviv_gpu_match, + }, + .probe = etnaviv_gpu_platform_probe, + .remove = etnaviv_gpu_platform_remove, + .id_table = gpu_ids, +}; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h new file mode 100644 index 000000000000..c75d50359ab0 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h @@ -0,0 +1,209 @@ +/* + * Copyright (C) 2015 Etnaviv Project + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef __ETNAVIV_GPU_H__ +#define __ETNAVIV_GPU_H__ + +#include +#include + +#include "etnaviv_drv.h" + +struct etnaviv_gem_submit; + +struct etnaviv_chip_identity { + /* Chip model. */ + u32 model; + + /* Revision value.*/ + u32 revision; + + /* Supported feature fields. */ + u32 features; + + /* Supported minor feature fields. */ + u32 minor_features0; + + /* Supported minor feature 1 fields. */ + u32 minor_features1; + + /* Supported minor feature 2 fields. */ + u32 minor_features2; + + /* Supported minor feature 3 fields. */ + u32 minor_features3; + + /* Number of streams supported. */ + u32 stream_count; + + /* Total number of temporary registers per thread. */ + u32 register_max; + + /* Maximum number of threads. */ + u32 thread_count; + + /* Number of shader cores. */ + u32 shader_core_count; + + /* Size of the vertex cache. */ + u32 vertex_cache_size; + + /* Number of entries in the vertex output buffer. */ + u32 vertex_output_buffer_size; + + /* Number of pixel pipes. */ + u32 pixel_pipes; + + /* Number of instructions. */ + u32 instruction_count; + + /* Number of constants. */ + u32 num_constants; + + /* Buffer size */ + u32 buffer_size; +}; + +struct etnaviv_event { + bool used; + struct fence *fence; +}; + +struct etnaviv_cmdbuf; + +struct etnaviv_gpu { + struct drm_device *drm; + struct device *dev; + struct mutex lock; + struct etnaviv_chip_identity identity; + struct etnaviv_file_private *lastctx; + bool switch_context; + + /* 'ring'-buffer: */ + struct etnaviv_cmdbuf *buffer; + + /* bus base address of memory */ + u32 memory_base; + + /* event management: */ + struct etnaviv_event event[30]; + struct completion event_free; + spinlock_t event_spinlock; + + /* list of currently in-flight command buffers */ + struct list_head active_cmd_list; + + u32 idle_mask; + + /* Fencing support */ + u32 next_fence; + u32 active_fence; + u32 completed_fence; + u32 retired_fence; + wait_queue_head_t fence_event; + unsigned int fence_context; + spinlock_t fence_spinlock; + + /* worker for handling active-list retiring: */ + struct work_struct retire_work; + + void __iomem *mmio; + int irq; + + struct etnaviv_iommu *mmu; + + /* Power Control: */ + struct clk *clk_bus; + struct clk *clk_core; + struct clk *clk_shader; + + /* Hang Detction: */ +#define DRM_ETNAVIV_HANGCHECK_PERIOD 500 /* in ms */ +#define DRM_ETNAVIV_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_ETNAVIV_HANGCHECK_PERIOD) + struct timer_list hangcheck_timer; + u32 hangcheck_fence; + u32 hangcheck_dma_addr; + struct work_struct recover_work; +}; + +struct etnaviv_cmdbuf { + /* device this cmdbuf is allocated for */ + struct etnaviv_gpu *gpu; + /* user context key, must be unique between all active users */ + struct etnaviv_file_private *ctx; + /* cmdbuf properties */ + void *vaddr; + dma_addr_t paddr; + u32 size; + u32 user_size; + /* fence after which this buffer is to be disposed */ + struct fence *fence; + /* target exec state */ + u32 exec_state; + /* per GPU in-flight list */ + struct list_head node; + /* BOs attached to this command buffer */ + unsigned int nr_bos; + struct etnaviv_gem_object *bo[0]; +}; + +static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data) +{ + etnaviv_writel(data, gpu->mmio + reg); +} + +static inline u32 gpu_read(struct etnaviv_gpu *gpu, u32 reg) +{ + return etnaviv_readl(gpu->mmio + reg); +} + +static inline bool fence_completed(struct etnaviv_gpu *gpu, u32 fence) +{ + return fence_after_eq(gpu->completed_fence, fence); +} + +static inline bool fence_retired(struct etnaviv_gpu *gpu, u32 fence) +{ + return fence_after_eq(gpu->retired_fence, fence); +} + +int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value); + +int etnaviv_gpu_init(struct etnaviv_gpu *gpu); + +#ifdef CONFIG_DEBUG_FS +int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m); +#endif + +int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj, + unsigned int context, bool exclusive); + +void etnaviv_gpu_retire(struct etnaviv_gpu *gpu); +int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu, + u32 fence, struct timespec *timeout); +int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu, + struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout); +int etnaviv_gpu_submit(struct etnaviv_gpu *gpu, + struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf); +struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu, + u32 size, size_t nr_bos); +void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf); +int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu); +void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu); + +extern struct platform_driver etnaviv_gpu_driver; + +#endif /* __ETNAVIV_GPU_H__ */ diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c new file mode 100644 index 000000000000..522cfd447892 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c @@ -0,0 +1,240 @@ +/* + * Copyright (C) 2014 Christian Gmeiner + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include + +#include "etnaviv_gpu.h" +#include "etnaviv_mmu.h" +#include "etnaviv_iommu.h" +#include "state_hi.xml.h" + +#define PT_SIZE SZ_2M +#define PT_ENTRIES (PT_SIZE / sizeof(u32)) + +#define GPU_MEM_START 0x80000000 + +struct etnaviv_iommu_domain_pgtable { + u32 *pgtable; + dma_addr_t paddr; +}; + +struct etnaviv_iommu_domain { + struct iommu_domain domain; + struct device *dev; + void *bad_page_cpu; + dma_addr_t bad_page_dma; + struct etnaviv_iommu_domain_pgtable pgtable; + spinlock_t map_lock; +}; + +static struct etnaviv_iommu_domain *to_etnaviv_domain(struct iommu_domain *domain) +{ + return container_of(domain, struct etnaviv_iommu_domain, domain); +} + +static int pgtable_alloc(struct etnaviv_iommu_domain_pgtable *pgtable, + size_t size) +{ + pgtable->pgtable = dma_alloc_coherent(NULL, size, &pgtable->paddr, GFP_KERNEL); + if (!pgtable->pgtable) + return -ENOMEM; + + return 0; +} + +static void pgtable_free(struct etnaviv_iommu_domain_pgtable *pgtable, + size_t size) +{ + dma_free_coherent(NULL, size, pgtable->pgtable, pgtable->paddr); +} + +static u32 pgtable_read(struct etnaviv_iommu_domain_pgtable *pgtable, + unsigned long iova) +{ + /* calcuate index into page table */ + unsigned int index = (iova - GPU_MEM_START) / SZ_4K; + phys_addr_t paddr; + + paddr = pgtable->pgtable[index]; + + return paddr; +} + +static void pgtable_write(struct etnaviv_iommu_domain_pgtable *pgtable, + unsigned long iova, phys_addr_t paddr) +{ + /* calcuate index into page table */ + unsigned int index = (iova - GPU_MEM_START) / SZ_4K; + + pgtable->pgtable[index] = paddr; +} + +static int __etnaviv_iommu_init(struct etnaviv_iommu_domain *etnaviv_domain) +{ + u32 *p; + int ret, i; + + etnaviv_domain->bad_page_cpu = dma_alloc_coherent(etnaviv_domain->dev, + SZ_4K, + &etnaviv_domain->bad_page_dma, + GFP_KERNEL); + if (!etnaviv_domain->bad_page_cpu) + return -ENOMEM; + + p = etnaviv_domain->bad_page_cpu; + for (i = 0; i < SZ_4K / 4; i++) + *p++ = 0xdead55aa; + + ret = pgtable_alloc(&etnaviv_domain->pgtable, PT_SIZE); + if (ret < 0) { + dma_free_coherent(etnaviv_domain->dev, SZ_4K, + etnaviv_domain->bad_page_cpu, + etnaviv_domain->bad_page_dma); + return ret; + } + + for (i = 0; i < PT_ENTRIES; i++) + etnaviv_domain->pgtable.pgtable[i] = + etnaviv_domain->bad_page_dma; + + spin_lock_init(&etnaviv_domain->map_lock); + + return 0; +} + +static void etnaviv_domain_free(struct iommu_domain *domain) +{ + struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain); + + pgtable_free(&etnaviv_domain->pgtable, PT_SIZE); + + dma_free_coherent(etnaviv_domain->dev, SZ_4K, + etnaviv_domain->bad_page_cpu, + etnaviv_domain->bad_page_dma); + + kfree(etnaviv_domain); +} + +static int etnaviv_iommuv1_map(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot) +{ + struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain); + + if (size != SZ_4K) + return -EINVAL; + + spin_lock(&etnaviv_domain->map_lock); + pgtable_write(&etnaviv_domain->pgtable, iova, paddr); + spin_unlock(&etnaviv_domain->map_lock); + + return 0; +} + +static size_t etnaviv_iommuv1_unmap(struct iommu_domain *domain, + unsigned long iova, size_t size) +{ + struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain); + + if (size != SZ_4K) + return -EINVAL; + + spin_lock(&etnaviv_domain->map_lock); + pgtable_write(&etnaviv_domain->pgtable, iova, + etnaviv_domain->bad_page_dma); + spin_unlock(&etnaviv_domain->map_lock); + + return SZ_4K; +} + +static phys_addr_t etnaviv_iommu_iova_to_phys(struct iommu_domain *domain, + dma_addr_t iova) +{ + struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain); + + return pgtable_read(&etnaviv_domain->pgtable, iova); +} + +static size_t etnaviv_iommuv1_dump_size(struct iommu_domain *domain) +{ + return PT_SIZE; +} + +static void etnaviv_iommuv1_dump(struct iommu_domain *domain, void *buf) +{ + struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain); + + memcpy(buf, etnaviv_domain->pgtable.pgtable, PT_SIZE); +} + +static struct etnaviv_iommu_ops etnaviv_iommu_ops = { + .ops = { + .domain_free = etnaviv_domain_free, + .map = etnaviv_iommuv1_map, + .unmap = etnaviv_iommuv1_unmap, + .iova_to_phys = etnaviv_iommu_iova_to_phys, + .pgsize_bitmap = SZ_4K, + }, + .dump_size = etnaviv_iommuv1_dump_size, + .dump = etnaviv_iommuv1_dump, +}; + +void etnaviv_iommu_domain_restore(struct etnaviv_gpu *gpu, + struct iommu_domain *domain) +{ + struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain); + u32 pgtable; + + /* set page table address in MC */ + pgtable = (u32)etnaviv_domain->pgtable.paddr; + + gpu_write(gpu, VIVS_MC_MMU_FE_PAGE_TABLE, pgtable); + gpu_write(gpu, VIVS_MC_MMU_TX_PAGE_TABLE, pgtable); + gpu_write(gpu, VIVS_MC_MMU_PE_PAGE_TABLE, pgtable); + gpu_write(gpu, VIVS_MC_MMU_PEZ_PAGE_TABLE, pgtable); + gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable); +} + +struct iommu_domain *etnaviv_iommu_domain_alloc(struct etnaviv_gpu *gpu) +{ + struct etnaviv_iommu_domain *etnaviv_domain; + int ret; + + etnaviv_domain = kzalloc(sizeof(*etnaviv_domain), GFP_KERNEL); + if (!etnaviv_domain) + return NULL; + + etnaviv_domain->dev = gpu->dev; + + etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING; + etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops; + etnaviv_domain->domain.geometry.aperture_start = GPU_MEM_START; + etnaviv_domain->domain.geometry.aperture_end = GPU_MEM_START + PT_ENTRIES * SZ_4K - 1; + + ret = __etnaviv_iommu_init(etnaviv_domain); + if (ret) + goto out_free; + + return &etnaviv_domain->domain; + +out_free: + kfree(etnaviv_domain); + return NULL; +} diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.h b/drivers/gpu/drm/etnaviv/etnaviv_iommu.h new file mode 100644 index 000000000000..cf45503f6b6f --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.h @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2014 Christian Gmeiner + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef __ETNAVIV_IOMMU_H__ +#define __ETNAVIV_IOMMU_H__ + +#include +struct etnaviv_gpu; + +struct iommu_domain *etnaviv_iommu_domain_alloc(struct etnaviv_gpu *gpu); +void etnaviv_iommu_domain_restore(struct etnaviv_gpu *gpu, + struct iommu_domain *domain); +struct iommu_domain *etnaviv_iommu_v2_domain_alloc(struct etnaviv_gpu *gpu); + +#endif /* __ETNAVIV_IOMMU_H__ */ diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c new file mode 100644 index 000000000000..fbb4aed3dc80 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c @@ -0,0 +1,33 @@ +/* + * Copyright (C) 2014 Christian Gmeiner + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include + +#include "etnaviv_gpu.h" +#include "etnaviv_iommu.h" +#include "state_hi.xml.h" + + +struct iommu_domain *etnaviv_iommu_v2_domain_alloc(struct etnaviv_gpu *gpu) +{ + /* TODO */ + return NULL; +} diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.h b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.h new file mode 100644 index 000000000000..603ea41c5389 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.h @@ -0,0 +1,25 @@ +/* + * Copyright (C) 2014 Christian Gmeiner + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef __ETNAVIV_IOMMU_V2_H__ +#define __ETNAVIV_IOMMU_V2_H__ + +#include +struct etnaviv_gpu; + +struct iommu_domain *etnaviv_iommu_v2_domain_alloc(struct etnaviv_gpu *gpu); + +#endif /* __ETNAVIV_IOMMU_V2_H__ */ diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c new file mode 100644 index 000000000000..6743bc648dc8 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c @@ -0,0 +1,299 @@ +/* + * Copyright (C) 2015 Etnaviv Project + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "etnaviv_drv.h" +#include "etnaviv_gem.h" +#include "etnaviv_gpu.h" +#include "etnaviv_mmu.h" + +static int etnaviv_fault_handler(struct iommu_domain *iommu, struct device *dev, + unsigned long iova, int flags, void *arg) +{ + DBG("*** fault: iova=%08lx, flags=%d", iova, flags); + return 0; +} + +int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova, + struct sg_table *sgt, unsigned len, int prot) +{ + struct iommu_domain *domain = iommu->domain; + struct scatterlist *sg; + unsigned int da = iova; + unsigned int i, j; + int ret; + + if (!domain || !sgt) + return -EINVAL; + + for_each_sg(sgt->sgl, sg, sgt->nents, i) { + u32 pa = sg_dma_address(sg) - sg->offset; + size_t bytes = sg_dma_len(sg) + sg->offset; + + VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes); + + ret = iommu_map(domain, da, pa, bytes, prot); + if (ret) + goto fail; + + da += bytes; + } + + return 0; + +fail: + da = iova; + + for_each_sg(sgt->sgl, sg, i, j) { + size_t bytes = sg_dma_len(sg) + sg->offset; + + iommu_unmap(domain, da, bytes); + da += bytes; + } + return ret; +} + +int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova, + struct sg_table *sgt, unsigned len) +{ + struct iommu_domain *domain = iommu->domain; + struct scatterlist *sg; + unsigned int da = iova; + int i; + + for_each_sg(sgt->sgl, sg, sgt->nents, i) { + size_t bytes = sg_dma_len(sg) + sg->offset; + size_t unmapped; + + unmapped = iommu_unmap(domain, da, bytes); + if (unmapped < bytes) + return unmapped; + + VERB("unmap[%d]: %08x(%zx)", i, iova, bytes); + + BUG_ON(!PAGE_ALIGNED(bytes)); + + da += bytes; + } + + return 0; +} + +static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu *mmu, + struct etnaviv_vram_mapping *mapping) +{ + struct etnaviv_gem_object *etnaviv_obj = mapping->object; + + etnaviv_iommu_unmap(mmu, mapping->vram_node.start, + etnaviv_obj->sgt, etnaviv_obj->base.size); + drm_mm_remove_node(&mapping->vram_node); +} + +int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu, + struct etnaviv_gem_object *etnaviv_obj, u32 memory_base, + struct etnaviv_vram_mapping *mapping) +{ + struct etnaviv_vram_mapping *free = NULL; + struct sg_table *sgt = etnaviv_obj->sgt; + struct drm_mm_node *node; + int ret; + + lockdep_assert_held(&etnaviv_obj->lock); + + mutex_lock(&mmu->lock); + + /* v1 MMU can optimize single entry (contiguous) scatterlists */ + if (sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) { + u32 iova; + + iova = sg_dma_address(sgt->sgl) - memory_base; + if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) { + mapping->iova = iova; + list_add_tail(&mapping->mmu_node, &mmu->mappings); + mutex_unlock(&mmu->lock); + return 0; + } + } + + node = &mapping->vram_node; + while (1) { + struct etnaviv_vram_mapping *m, *n; + struct list_head list; + bool found; + + ret = drm_mm_insert_node_in_range(&mmu->mm, node, + etnaviv_obj->base.size, 0, mmu->last_iova, ~0UL, + DRM_MM_SEARCH_DEFAULT); + + if (ret != -ENOSPC) + break; + + /* + * If we did not search from the start of the MMU region, + * try again in case there are free slots. + */ + if (mmu->last_iova) { + mmu->last_iova = 0; + mmu->need_flush = true; + continue; + } + + /* Try to retire some entries */ + drm_mm_init_scan(&mmu->mm, etnaviv_obj->base.size, 0, 0); + + found = 0; + INIT_LIST_HEAD(&list); + list_for_each_entry(free, &mmu->mappings, mmu_node) { + /* If this vram node has not been used, skip this. */ + if (!free->vram_node.mm) + continue; + + /* + * If the iova is pinned, then it's in-use, + * so we must keep its mapping. + */ + if (free->use) + continue; + + list_add(&free->scan_node, &list); + if (drm_mm_scan_add_block(&free->vram_node)) { + found = true; + break; + } + } + + if (!found) { + /* Nothing found, clean up and fail */ + list_for_each_entry_safe(m, n, &list, scan_node) + BUG_ON(drm_mm_scan_remove_block(&m->vram_node)); + break; + } + + /* + * drm_mm does not allow any other operations while + * scanning, so we have to remove all blocks first. + * If drm_mm_scan_remove_block() returns false, we + * can leave the block pinned. + */ + list_for_each_entry_safe(m, n, &list, scan_node) + if (!drm_mm_scan_remove_block(&m->vram_node)) + list_del_init(&m->scan_node); + + /* + * Unmap the blocks which need to be reaped from the MMU. + * Clear the mmu pointer to prevent the get_iova finding + * this mapping. + */ + list_for_each_entry_safe(m, n, &list, scan_node) { + etnaviv_iommu_remove_mapping(mmu, m); + m->mmu = NULL; + list_del_init(&m->mmu_node); + list_del_init(&m->scan_node); + } + + /* + * We removed enough mappings so that the new allocation will + * succeed. Ensure that the MMU will be flushed before the + * associated commit requesting this mapping, and retry the + * allocation one more time. + */ + mmu->need_flush = true; + } + + if (ret < 0) { + mutex_unlock(&mmu->lock); + return ret; + } + + mmu->last_iova = node->start + etnaviv_obj->base.size; + mapping->iova = node->start; + ret = etnaviv_iommu_map(mmu, node->start, sgt, etnaviv_obj->base.size, + IOMMU_READ | IOMMU_WRITE); + + if (ret < 0) { + drm_mm_remove_node(node); + mutex_unlock(&mmu->lock); + return ret; + } + + list_add_tail(&mapping->mmu_node, &mmu->mappings); + mutex_unlock(&mmu->lock); + + return ret; +} + +void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu, + struct etnaviv_vram_mapping *mapping) +{ + WARN_ON(mapping->use); + + mutex_lock(&mmu->lock); + + /* If the vram node is on the mm, unmap and remove the node */ + if (mapping->vram_node.mm == &mmu->mm) + etnaviv_iommu_remove_mapping(mmu, mapping); + + list_del(&mapping->mmu_node); + mutex_unlock(&mmu->lock); +} + +void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu) +{ + drm_mm_takedown(&mmu->mm); + iommu_domain_free(mmu->domain); + kfree(mmu); +} + +struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu, + struct iommu_domain *domain, enum etnaviv_iommu_version version) +{ + struct etnaviv_iommu *mmu; + + mmu = kzalloc(sizeof(*mmu), GFP_KERNEL); + if (!mmu) + return ERR_PTR(-ENOMEM); + + mmu->domain = domain; + mmu->gpu = gpu; + mmu->version = version; + mutex_init(&mmu->lock); + INIT_LIST_HEAD(&mmu->mappings); + + drm_mm_init(&mmu->mm, domain->geometry.aperture_start, + domain->geometry.aperture_end - + domain->geometry.aperture_start + 1); + + iommu_set_fault_handler(domain, etnaviv_fault_handler, gpu->dev); + + return mmu; +} + +size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu) +{ + struct etnaviv_iommu_ops *ops; + + ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops); + + return ops->dump_size(iommu->domain); +} + +void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf) +{ + struct etnaviv_iommu_ops *ops; + + ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops); + + ops->dump(iommu->domain, buf); +} diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h new file mode 100644 index 000000000000..fff215a47630 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h @@ -0,0 +1,71 @@ +/* + * Copyright (C) 2015 Etnaviv Project + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef __ETNAVIV_MMU_H__ +#define __ETNAVIV_MMU_H__ + +#include + +enum etnaviv_iommu_version { + ETNAVIV_IOMMU_V1 = 0, + ETNAVIV_IOMMU_V2, +}; + +struct etnaviv_gpu; +struct etnaviv_vram_mapping; + +struct etnaviv_iommu_ops { + struct iommu_ops ops; + size_t (*dump_size)(struct iommu_domain *); + void (*dump)(struct iommu_domain *, void *); +}; + +struct etnaviv_iommu { + struct etnaviv_gpu *gpu; + struct iommu_domain *domain; + + enum etnaviv_iommu_version version; + + /* memory manager for GPU address area */ + struct mutex lock; + struct list_head mappings; + struct drm_mm mm; + u32 last_iova; + bool need_flush; +}; + +struct etnaviv_gem_object; + +int etnaviv_iommu_attach(struct etnaviv_iommu *iommu, const char **names, + int cnt); +int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova, + struct sg_table *sgt, unsigned len, int prot); +int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova, + struct sg_table *sgt, unsigned len); +int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu, + struct etnaviv_gem_object *etnaviv_obj, u32 memory_base, + struct etnaviv_vram_mapping *mapping); +void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu, + struct etnaviv_vram_mapping *mapping); +void etnaviv_iommu_destroy(struct etnaviv_iommu *iommu); + +size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu); +void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf); + +struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu, + struct iommu_domain *domain, enum etnaviv_iommu_version version); + +#endif /* __ETNAVIV_MMU_H__ */ diff --git a/drivers/gpu/drm/etnaviv/state.xml.h b/drivers/gpu/drm/etnaviv/state.xml.h new file mode 100644 index 000000000000..368218304566 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/state.xml.h @@ -0,0 +1,351 @@ +#ifndef STATE_XML +#define STATE_XML + +/* Autogenerated file, DO NOT EDIT manually! + +This file was generated by the rules-ng-ng headergen tool in this git repository: +http://0x04.net/cgit/index.cgi/rules-ng-ng +git clone git://0x04.net/rules-ng-ng + +The rules-ng-ng source files this header was generated from are: +- state.xml ( 18882 bytes, from 2015-03-25 11:42:32) +- common.xml ( 18437 bytes, from 2015-03-25 11:27:41) +- state_hi.xml ( 23420 bytes, from 2015-03-25 11:47:21) +- state_2d.xml ( 51549 bytes, from 2015-03-25 11:25:06) +- state_3d.xml ( 54600 bytes, from 2015-03-25 11:25:19) +- state_vg.xml ( 5973 bytes, from 2015-03-25 11:26:01) + +Copyright (C) 2015 +*/ + + +#define VARYING_COMPONENT_USE_UNUSED 0x00000000 +#define VARYING_COMPONENT_USE_USED 0x00000001 +#define VARYING_COMPONENT_USE_POINTCOORD_X 0x00000002 +#define VARYING_COMPONENT_USE_POINTCOORD_Y 0x00000003 +#define FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE__MASK 0x000000ff +#define FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE__SHIFT 0 +#define FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE(x) (((x) << FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE__SHIFT) & FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE__MASK) +#define VIVS_FE 0x00000000 + +#define VIVS_FE_VERTEX_ELEMENT_CONFIG(i0) (0x00000600 + 0x4*(i0)) +#define VIVS_FE_VERTEX_ELEMENT_CONFIG__ESIZE 0x00000004 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG__LEN 0x00000010 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE__MASK 0x0000000f +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE__SHIFT 0 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_BYTE 0x00000000 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_UNSIGNED_BYTE 0x00000001 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_SHORT 0x00000002 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_UNSIGNED_SHORT 0x00000003 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_INT 0x00000004 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_UNSIGNED_INT 0x00000005 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_FLOAT 0x00000008 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_HALF_FLOAT 0x00000009 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_FIXED 0x0000000b +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_INT_10_10_10_2 0x0000000c +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_UNSIGNED_INT_10_10_10_2 0x0000000d +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN__MASK 0x00000030 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN__SHIFT 4 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN(x) (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN__MASK) +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NONCONSECUTIVE 0x00000080 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM__MASK 0x00000700 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM__SHIFT 8 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM(x) (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM__MASK) +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM__MASK 0x00003000 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM__SHIFT 12 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM(x) (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM__MASK) +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NORMALIZE__MASK 0x0000c000 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NORMALIZE__SHIFT 14 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NORMALIZE_OFF 0x00000000 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NORMALIZE_ON 0x00008000 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_START__MASK 0x00ff0000 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_START__SHIFT 16 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_START(x) (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_START__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_START__MASK) +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_END__MASK 0xff000000 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_END__SHIFT 24 +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_END(x) (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_END__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_END__MASK) + +#define VIVS_FE_CMD_STREAM_BASE_ADDR 0x00000640 + +#define VIVS_FE_INDEX_STREAM_BASE_ADDR 0x00000644 + +#define VIVS_FE_INDEX_STREAM_CONTROL 0x00000648 +#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE__MASK 0x00000003 +#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE__SHIFT 0 +#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE_UNSIGNED_CHAR 0x00000000 +#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE_UNSIGNED_SHORT 0x00000001 +#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE_UNSIGNED_INT 0x00000002 + +#define VIVS_FE_VERTEX_STREAM_BASE_ADDR 0x0000064c + +#define VIVS_FE_VERTEX_STREAM_CONTROL 0x00000650 + +#define VIVS_FE_COMMAND_ADDRESS 0x00000654 + +#define VIVS_FE_COMMAND_CONTROL 0x00000658 +#define VIVS_FE_COMMAND_CONTROL_PREFETCH__MASK 0x0000ffff +#define VIVS_FE_COMMAND_CONTROL_PREFETCH__SHIFT 0 +#define VIVS_FE_COMMAND_CONTROL_PREFETCH(x) (((x) << VIVS_FE_COMMAND_CONTROL_PREFETCH__SHIFT) & VIVS_FE_COMMAND_CONTROL_PREFETCH__MASK) +#define VIVS_FE_COMMAND_CONTROL_ENABLE 0x00010000 + +#define VIVS_FE_DMA_STATUS 0x0000065c + +#define VIVS_FE_DMA_DEBUG_STATE 0x00000660 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE__MASK 0x0000001f +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE__SHIFT 0 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_IDLE 0x00000000 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_DEC 0x00000001 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_ADR0 0x00000002 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_LOAD0 0x00000003 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_ADR1 0x00000004 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_LOAD1 0x00000005 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_3DADR 0x00000006 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_3DCMD 0x00000007 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_3DCNTL 0x00000008 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_3DIDXCNTL 0x00000009 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_INITREQDMA 0x0000000a +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_DRAWIDX 0x0000000b +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_DRAW 0x0000000c +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_2DRECT0 0x0000000d +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_2DRECT1 0x0000000e +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_2DDATA0 0x0000000f +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_2DDATA1 0x00000010 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_WAITFIFO 0x00000011 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_WAIT 0x00000012 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_LINK 0x00000013 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_END 0x00000014 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_STALL 0x00000015 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE__MASK 0x00000300 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE__SHIFT 8 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE_IDLE 0x00000000 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE_START 0x00000100 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE_REQ 0x00000200 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE_END 0x00000300 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE__MASK 0x00000c00 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE__SHIFT 10 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE_IDLE 0x00000000 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE_RAMVALID 0x00000400 +#define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE_VALID 0x00000800 +#define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE__MASK 0x00003000 +#define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE__SHIFT 12 +#define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE_IDLE 0x00000000 +#define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE_WAITIDX 0x00001000 +#define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE_CAL 0x00002000 +#define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE__MASK 0x0000c000 +#define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE__SHIFT 14 +#define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE_IDLE 0x00000000 +#define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE_LDADR 0x00004000 +#define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE_IDXCALC 0x00008000 +#define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE__MASK 0x00030000 +#define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE__SHIFT 16 +#define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE_IDLE 0x00000000 +#define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE_CKCACHE 0x00010000 +#define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE_MISS 0x00020000 + +#define VIVS_FE_DMA_ADDRESS 0x00000664 + +#define VIVS_FE_DMA_LOW 0x00000668 + +#define VIVS_FE_DMA_HIGH 0x0000066c + +#define VIVS_FE_AUTO_FLUSH 0x00000670 + +#define VIVS_FE_UNK00678 0x00000678 + +#define VIVS_FE_UNK0067C 0x0000067c + +#define VIVS_FE_VERTEX_STREAMS(i0) (0x00000000 + 0x4*(i0)) +#define VIVS_FE_VERTEX_STREAMS__ESIZE 0x00000004 +#define VIVS_FE_VERTEX_STREAMS__LEN 0x00000008 + +#define VIVS_FE_VERTEX_STREAMS_BASE_ADDR(i0) (0x00000680 + 0x4*(i0)) + +#define VIVS_FE_VERTEX_STREAMS_CONTROL(i0) (0x000006a0 + 0x4*(i0)) + +#define VIVS_FE_UNK00700(i0) (0x00000700 + 0x4*(i0)) +#define VIVS_FE_UNK00700__ESIZE 0x00000004 +#define VIVS_FE_UNK00700__LEN 0x00000010 + +#define VIVS_FE_UNK00740(i0) (0x00000740 + 0x4*(i0)) +#define VIVS_FE_UNK00740__ESIZE 0x00000004 +#define VIVS_FE_UNK00740__LEN 0x00000010 + +#define VIVS_FE_UNK00780(i0) (0x00000780 + 0x4*(i0)) +#define VIVS_FE_UNK00780__ESIZE 0x00000004 +#define VIVS_FE_UNK00780__LEN 0x00000010 + +#define VIVS_GL 0x00000000 + +#define VIVS_GL_PIPE_SELECT 0x00003800 +#define VIVS_GL_PIPE_SELECT_PIPE__MASK 0x00000001 +#define VIVS_GL_PIPE_SELECT_PIPE__SHIFT 0 +#define VIVS_GL_PIPE_SELECT_PIPE(x) (((x) << VIVS_GL_PIPE_SELECT_PIPE__SHIFT) & VIVS_GL_PIPE_SELECT_PIPE__MASK) + +#define VIVS_GL_EVENT 0x00003804 +#define VIVS_GL_EVENT_EVENT_ID__MASK 0x0000001f +#define VIVS_GL_EVENT_EVENT_ID__SHIFT 0 +#define VIVS_GL_EVENT_EVENT_ID(x) (((x) << VIVS_GL_EVENT_EVENT_ID__SHIFT) & VIVS_GL_EVENT_EVENT_ID__MASK) +#define VIVS_GL_EVENT_FROM_FE 0x00000020 +#define VIVS_GL_EVENT_FROM_PE 0x00000040 +#define VIVS_GL_EVENT_SOURCE__MASK 0x00001f00 +#define VIVS_GL_EVENT_SOURCE__SHIFT 8 +#define VIVS_GL_EVENT_SOURCE(x) (((x) << VIVS_GL_EVENT_SOURCE__SHIFT) & VIVS_GL_EVENT_SOURCE__MASK) + +#define VIVS_GL_SEMAPHORE_TOKEN 0x00003808 +#define VIVS_GL_SEMAPHORE_TOKEN_FROM__MASK 0x0000001f +#define VIVS_GL_SEMAPHORE_TOKEN_FROM__SHIFT 0 +#define VIVS_GL_SEMAPHORE_TOKEN_FROM(x) (((x) << VIVS_GL_SEMAPHORE_TOKEN_FROM__SHIFT) & VIVS_GL_SEMAPHORE_TOKEN_FROM__MASK) +#define VIVS_GL_SEMAPHORE_TOKEN_TO__MASK 0x00001f00 +#define VIVS_GL_SEMAPHORE_TOKEN_TO__SHIFT 8 +#define VIVS_GL_SEMAPHORE_TOKEN_TO(x) (((x) << VIVS_GL_SEMAPHORE_TOKEN_TO__SHIFT) & VIVS_GL_SEMAPHORE_TOKEN_TO__MASK) + +#define VIVS_GL_FLUSH_CACHE 0x0000380c +#define VIVS_GL_FLUSH_CACHE_DEPTH 0x00000001 +#define VIVS_GL_FLUSH_CACHE_COLOR 0x00000002 +#define VIVS_GL_FLUSH_CACHE_TEXTURE 0x00000004 +#define VIVS_GL_FLUSH_CACHE_PE2D 0x00000008 +#define VIVS_GL_FLUSH_CACHE_TEXTUREVS 0x00000010 +#define VIVS_GL_FLUSH_CACHE_SHADER_L1 0x00000020 +#define VIVS_GL_FLUSH_CACHE_SHADER_L2 0x00000040 + +#define VIVS_GL_FLUSH_MMU 0x00003810 +#define VIVS_GL_FLUSH_MMU_FLUSH_FEMMU 0x00000001 +#define VIVS_GL_FLUSH_MMU_FLUSH_UNK1 0x00000002 +#define VIVS_GL_FLUSH_MMU_FLUSH_UNK2 0x00000004 +#define VIVS_GL_FLUSH_MMU_FLUSH_PEMMU 0x00000008 +#define VIVS_GL_FLUSH_MMU_FLUSH_UNK4 0x00000010 + +#define VIVS_GL_VERTEX_ELEMENT_CONFIG 0x00003814 + +#define VIVS_GL_MULTI_SAMPLE_CONFIG 0x00003818 +#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES__MASK 0x00000003 +#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES__SHIFT 0 +#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES_NONE 0x00000000 +#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES_2X 0x00000001 +#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES_4X 0x00000002 +#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES_MASK 0x00000008 +#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES__MASK 0x000000f0 +#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES__SHIFT 4 +#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES(x) (((x) << VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES__SHIFT) & VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES__MASK) +#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES_MASK 0x00000100 +#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12__MASK 0x00007000 +#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12__SHIFT 12 +#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12(x) (((x) << VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12__SHIFT) & VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12__MASK) +#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12_MASK 0x00008000 +#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16__MASK 0x00030000 +#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16__SHIFT 16 +#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16(x) (((x) << VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16__SHIFT) & VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16__MASK) +#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16_MASK 0x00080000 + +#define VIVS_GL_VARYING_TOTAL_COMPONENTS 0x0000381c +#define VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM__MASK 0x000000ff +#define VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM__SHIFT 0 +#define VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM(x) (((x) << VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM__SHIFT) & VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM__MASK) + +#define VIVS_GL_VARYING_NUM_COMPONENTS 0x00003820 +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR0__MASK 0x00000007 +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR0__SHIFT 0 +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR0(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR0__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR0__MASK) +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR1__MASK 0x00000070 +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR1__SHIFT 4 +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR1(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR1__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR1__MASK) +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR2__MASK 0x00000700 +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR2__SHIFT 8 +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR2(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR2__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR2__MASK) +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR3__MASK 0x00007000 +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR3__SHIFT 12 +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR3(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR3__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR3__MASK) +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR4__MASK 0x00070000 +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR4__SHIFT 16 +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR4(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR4__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR4__MASK) +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR5__MASK 0x00700000 +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR5__SHIFT 20 +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR5(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR5__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR5__MASK) +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR6__MASK 0x07000000 +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR6__SHIFT 24 +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR6(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR6__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR6__MASK) +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR7__MASK 0x70000000 +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR7__SHIFT 28 +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR7(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR7__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR7__MASK) + +#define VIVS_GL_VARYING_COMPONENT_USE(i0) (0x00003828 + 0x4*(i0)) +#define VIVS_GL_VARYING_COMPONENT_USE__ESIZE 0x00000004 +#define VIVS_GL_VARYING_COMPONENT_USE__LEN 0x00000002 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP0__MASK 0x00000003 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP0__SHIFT 0 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP0(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP0__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP0__MASK) +#define VIVS_GL_VARYING_COMPONENT_USE_COMP1__MASK 0x0000000c +#define VIVS_GL_VARYING_COMPONENT_USE_COMP1__SHIFT 2 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP1(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP1__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP1__MASK) +#define VIVS_GL_VARYING_COMPONENT_USE_COMP2__MASK 0x00000030 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP2__SHIFT 4 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP2(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP2__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP2__MASK) +#define VIVS_GL_VARYING_COMPONENT_USE_COMP3__MASK 0x000000c0 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP3__SHIFT 6 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP3(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP3__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP3__MASK) +#define VIVS_GL_VARYING_COMPONENT_USE_COMP4__MASK 0x00000300 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP4__SHIFT 8 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP4(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP4__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP4__MASK) +#define VIVS_GL_VARYING_COMPONENT_USE_COMP5__MASK 0x00000c00 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP5__SHIFT 10 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP5(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP5__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP5__MASK) +#define VIVS_GL_VARYING_COMPONENT_USE_COMP6__MASK 0x00003000 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP6__SHIFT 12 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP6(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP6__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP6__MASK) +#define VIVS_GL_VARYING_COMPONENT_USE_COMP7__MASK 0x0000c000 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP7__SHIFT 14 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP7(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP7__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP7__MASK) +#define VIVS_GL_VARYING_COMPONENT_USE_COMP8__MASK 0x00030000 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP8__SHIFT 16 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP8(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP8__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP8__MASK) +#define VIVS_GL_VARYING_COMPONENT_USE_COMP9__MASK 0x000c0000 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP9__SHIFT 18 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP9(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP9__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP9__MASK) +#define VIVS_GL_VARYING_COMPONENT_USE_COMP10__MASK 0x00300000 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP10__SHIFT 20 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP10(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP10__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP10__MASK) +#define VIVS_GL_VARYING_COMPONENT_USE_COMP11__MASK 0x00c00000 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP11__SHIFT 22 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP11(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP11__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP11__MASK) +#define VIVS_GL_VARYING_COMPONENT_USE_COMP12__MASK 0x03000000 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP12__SHIFT 24 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP12(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP12__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP12__MASK) +#define VIVS_GL_VARYING_COMPONENT_USE_COMP13__MASK 0x0c000000 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP13__SHIFT 26 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP13(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP13__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP13__MASK) +#define VIVS_GL_VARYING_COMPONENT_USE_COMP14__MASK 0x30000000 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP14__SHIFT 28 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP14(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP14__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP14__MASK) +#define VIVS_GL_VARYING_COMPONENT_USE_COMP15__MASK 0xc0000000 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP15__SHIFT 30 +#define VIVS_GL_VARYING_COMPONENT_USE_COMP15(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP15__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP15__MASK) + +#define VIVS_GL_UNK03834 0x00003834 + +#define VIVS_GL_UNK03838 0x00003838 + +#define VIVS_GL_API_MODE 0x0000384c +#define VIVS_GL_API_MODE_OPENGL 0x00000000 +#define VIVS_GL_API_MODE_OPENVG 0x00000001 +#define VIVS_GL_API_MODE_OPENCL 0x00000002 + +#define VIVS_GL_CONTEXT_POINTER 0x00003850 + +#define VIVS_GL_UNK03A00 0x00003a00 + +#define VIVS_GL_STALL_TOKEN 0x00003c00 +#define VIVS_GL_STALL_TOKEN_FROM__MASK 0x0000001f +#define VIVS_GL_STALL_TOKEN_FROM__SHIFT 0 +#define VIVS_GL_STALL_TOKEN_FROM(x) (((x) << VIVS_GL_STALL_TOKEN_FROM__SHIFT) & VIVS_GL_STALL_TOKEN_FROM__MASK) +#define VIVS_GL_STALL_TOKEN_TO__MASK 0x00001f00 +#define VIVS_GL_STALL_TOKEN_TO__SHIFT 8 +#define VIVS_GL_STALL_TOKEN_TO(x) (((x) << VIVS_GL_STALL_TOKEN_TO__SHIFT) & VIVS_GL_STALL_TOKEN_TO__MASK) +#define VIVS_GL_STALL_TOKEN_FLIP0 0x40000000 +#define VIVS_GL_STALL_TOKEN_FLIP1 0x80000000 + +#define VIVS_DUMMY 0x00000000 + +#define VIVS_DUMMY_DUMMY 0x0003fffc + + +#endif /* STATE_XML */ diff --git a/drivers/gpu/drm/etnaviv/state_hi.xml.h b/drivers/gpu/drm/etnaviv/state_hi.xml.h new file mode 100644 index 000000000000..0064f2640396 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/state_hi.xml.h @@ -0,0 +1,407 @@ +#ifndef STATE_HI_XML +#define STATE_HI_XML + +/* Autogenerated file, DO NOT EDIT manually! + +This file was generated by the rules-ng-ng headergen tool in this git repository: +http://0x04.net/cgit/index.cgi/rules-ng-ng +git clone git://0x04.net/rules-ng-ng + +The rules-ng-ng source files this header was generated from are: +- state_hi.xml ( 23420 bytes, from 2015-03-25 11:47:21) +- common.xml ( 18437 bytes, from 2015-03-25 11:27:41) + +Copyright (C) 2015 +*/ + + +#define MMU_EXCEPTION_SLAVE_NOT_PRESENT 0x00000001 +#define MMU_EXCEPTION_PAGE_NOT_PRESENT 0x00000002 +#define MMU_EXCEPTION_WRITE_VIOLATION 0x00000003 +#define VIVS_HI 0x00000000 + +#define VIVS_HI_CLOCK_CONTROL 0x00000000 +#define VIVS_HI_CLOCK_CONTROL_CLK3D_DIS 0x00000001 +#define VIVS_HI_CLOCK_CONTROL_CLK2D_DIS 0x00000002 +#define VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__MASK 0x000001fc +#define VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__SHIFT 2 +#define VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(x) (((x) << VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__SHIFT) & VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__MASK) +#define VIVS_HI_CLOCK_CONTROL_FSCALE_CMD_LOAD 0x00000200 +#define VIVS_HI_CLOCK_CONTROL_DISABLE_RAM_CLK_GATING 0x00000400 +#define VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS 0x00000800 +#define VIVS_HI_CLOCK_CONTROL_SOFT_RESET 0x00001000 +#define VIVS_HI_CLOCK_CONTROL_IDLE_3D 0x00010000 +#define VIVS_HI_CLOCK_CONTROL_IDLE_2D 0x00020000 +#define VIVS_HI_CLOCK_CONTROL_IDLE_VG 0x00040000 +#define VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU 0x00080000 +#define VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE__MASK 0x00f00000 +#define VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE__SHIFT 20 +#define VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE(x) (((x) << VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE__SHIFT) & VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE__MASK) + +#define VIVS_HI_IDLE_STATE 0x00000004 +#define VIVS_HI_IDLE_STATE_FE 0x00000001 +#define VIVS_HI_IDLE_STATE_DE 0x00000002 +#define VIVS_HI_IDLE_STATE_PE 0x00000004 +#define VIVS_HI_IDLE_STATE_SH 0x00000008 +#define VIVS_HI_IDLE_STATE_PA 0x00000010 +#define VIVS_HI_IDLE_STATE_SE 0x00000020 +#define VIVS_HI_IDLE_STATE_RA 0x00000040 +#define VIVS_HI_IDLE_STATE_TX 0x00000080 +#define VIVS_HI_IDLE_STATE_VG 0x00000100 +#define VIVS_HI_IDLE_STATE_IM 0x00000200 +#define VIVS_HI_IDLE_STATE_FP 0x00000400 +#define VIVS_HI_IDLE_STATE_TS 0x00000800 +#define VIVS_HI_IDLE_STATE_AXI_LP 0x80000000 + +#define VIVS_HI_AXI_CONFIG 0x00000008 +#define VIVS_HI_AXI_CONFIG_AWID__MASK 0x0000000f +#define VIVS_HI_AXI_CONFIG_AWID__SHIFT 0 +#define VIVS_HI_AXI_CONFIG_AWID(x) (((x) << VIVS_HI_AXI_CONFIG_AWID__SHIFT) & VIVS_HI_AXI_CONFIG_AWID__MASK) +#define VIVS_HI_AXI_CONFIG_ARID__MASK 0x000000f0 +#define VIVS_HI_AXI_CONFIG_ARID__SHIFT 4 +#define VIVS_HI_AXI_CONFIG_ARID(x) (((x) << VIVS_HI_AXI_CONFIG_ARID__SHIFT) & VIVS_HI_AXI_CONFIG_ARID__MASK) +#define VIVS_HI_AXI_CONFIG_AWCACHE__MASK 0x00000f00 +#define VIVS_HI_AXI_CONFIG_AWCACHE__SHIFT 8 +#define VIVS_HI_AXI_CONFIG_AWCACHE(x) (((x) << VIVS_HI_AXI_CONFIG_AWCACHE__SHIFT) & VIVS_HI_AXI_CONFIG_AWCACHE__MASK) +#define VIVS_HI_AXI_CONFIG_ARCACHE__MASK 0x0000f000 +#define VIVS_HI_AXI_CONFIG_ARCACHE__SHIFT 12 +#define VIVS_HI_AXI_CONFIG_ARCACHE(x) (((x) << VIVS_HI_AXI_CONFIG_ARCACHE__SHIFT) & VIVS_HI_AXI_CONFIG_ARCACHE__MASK) + +#define VIVS_HI_AXI_STATUS 0x0000000c +#define VIVS_HI_AXI_STATUS_WR_ERR_ID__MASK 0x0000000f +#define VIVS_HI_AXI_STATUS_WR_ERR_ID__SHIFT 0 +#define VIVS_HI_AXI_STATUS_WR_ERR_ID(x) (((x) << VIVS_HI_AXI_STATUS_WR_ERR_ID__SHIFT) & VIVS_HI_AXI_STATUS_WR_ERR_ID__MASK) +#define VIVS_HI_AXI_STATUS_RD_ERR_ID__MASK 0x000000f0 +#define VIVS_HI_AXI_STATUS_RD_ERR_ID__SHIFT 4 +#define VIVS_HI_AXI_STATUS_RD_ERR_ID(x) (((x) << VIVS_HI_AXI_STATUS_RD_ERR_ID__SHIFT) & VIVS_HI_AXI_STATUS_RD_ERR_ID__MASK) +#define VIVS_HI_AXI_STATUS_DET_WR_ERR 0x00000100 +#define VIVS_HI_AXI_STATUS_DET_RD_ERR 0x00000200 + +#define VIVS_HI_INTR_ACKNOWLEDGE 0x00000010 +#define VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__MASK 0x7fffffff +#define VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__SHIFT 0 +#define VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC(x) (((x) << VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__SHIFT) & VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__MASK) +#define VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR 0x80000000 + +#define VIVS_HI_INTR_ENBL 0x00000014 +#define VIVS_HI_INTR_ENBL_INTR_ENBL_VEC__MASK 0xffffffff +#define VIVS_HI_INTR_ENBL_INTR_ENBL_VEC__SHIFT 0 +#define VIVS_HI_INTR_ENBL_INTR_ENBL_VEC(x) (((x) << VIVS_HI_INTR_ENBL_INTR_ENBL_VEC__SHIFT) & VIVS_HI_INTR_ENBL_INTR_ENBL_VEC__MASK) + +#define VIVS_HI_CHIP_IDENTITY 0x00000018 +#define VIVS_HI_CHIP_IDENTITY_FAMILY__MASK 0xff000000 +#define VIVS_HI_CHIP_IDENTITY_FAMILY__SHIFT 24 +#define VIVS_HI_CHIP_IDENTITY_FAMILY(x) (((x) << VIVS_HI_CHIP_IDENTITY_FAMILY__SHIFT) & VIVS_HI_CHIP_IDENTITY_FAMILY__MASK) +#define VIVS_HI_CHIP_IDENTITY_PRODUCT__MASK 0x00ff0000 +#define VIVS_HI_CHIP_IDENTITY_PRODUCT__SHIFT 16 +#define VIVS_HI_CHIP_IDENTITY_PRODUCT(x) (((x) << VIVS_HI_CHIP_IDENTITY_PRODUCT__SHIFT) & VIVS_HI_CHIP_IDENTITY_PRODUCT__MASK) +#define VIVS_HI_CHIP_IDENTITY_REVISION__MASK 0x0000f000 +#define VIVS_HI_CHIP_IDENTITY_REVISION__SHIFT 12 +#define VIVS_HI_CHIP_IDENTITY_REVISION(x) (((x) << VIVS_HI_CHIP_IDENTITY_REVISION__SHIFT) & VIVS_HI_CHIP_IDENTITY_REVISION__MASK) + +#define VIVS_HI_CHIP_FEATURE 0x0000001c + +#define VIVS_HI_CHIP_MODEL 0x00000020 + +#define VIVS_HI_CHIP_REV 0x00000024 + +#define VIVS_HI_CHIP_DATE 0x00000028 + +#define VIVS_HI_CHIP_TIME 0x0000002c + +#define VIVS_HI_CHIP_MINOR_FEATURE_0 0x00000034 + +#define VIVS_HI_CACHE_CONTROL 0x00000038 + +#define VIVS_HI_MEMORY_COUNTER_RESET 0x0000003c + +#define VIVS_HI_PROFILE_READ_BYTES8 0x00000040 + +#define VIVS_HI_PROFILE_WRITE_BYTES8 0x00000044 + +#define VIVS_HI_CHIP_SPECS 0x00000048 +#define VIVS_HI_CHIP_SPECS_STREAM_COUNT__MASK 0x0000000f +#define VIVS_HI_CHIP_SPECS_STREAM_COUNT__SHIFT 0 +#define VIVS_HI_CHIP_SPECS_STREAM_COUNT(x) (((x) << VIVS_HI_CHIP_SPECS_STREAM_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_STREAM_COUNT__MASK) +#define VIVS_HI_CHIP_SPECS_REGISTER_MAX__MASK 0x000000f0 +#define VIVS_HI_CHIP_SPECS_REGISTER_MAX__SHIFT 4 +#define VIVS_HI_CHIP_SPECS_REGISTER_MAX(x) (((x) << VIVS_HI_CHIP_SPECS_REGISTER_MAX__SHIFT) & VIVS_HI_CHIP_SPECS_REGISTER_MAX__MASK) +#define VIVS_HI_CHIP_SPECS_THREAD_COUNT__MASK 0x00000f00 +#define VIVS_HI_CHIP_SPECS_THREAD_COUNT__SHIFT 8 +#define VIVS_HI_CHIP_SPECS_THREAD_COUNT(x) (((x) << VIVS_HI_CHIP_SPECS_THREAD_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_THREAD_COUNT__MASK) +#define VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__MASK 0x0001f000 +#define VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__SHIFT 12 +#define VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE(x) (((x) << VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__SHIFT) & VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__MASK) +#define VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__MASK 0x01f00000 +#define VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__SHIFT 20 +#define VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT(x) (((x) << VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__MASK) +#define VIVS_HI_CHIP_SPECS_PIXEL_PIPES__MASK 0x0e000000 +#define VIVS_HI_CHIP_SPECS_PIXEL_PIPES__SHIFT 25 +#define VIVS_HI_CHIP_SPECS_PIXEL_PIPES(x) (((x) << VIVS_HI_CHIP_SPECS_PIXEL_PIPES__SHIFT) & VIVS_HI_CHIP_SPECS_PIXEL_PIPES__MASK) +#define VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__MASK 0xf0000000 +#define VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__SHIFT 28 +#define VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE(x) (((x) << VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__SHIFT) & VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__MASK) + +#define VIVS_HI_PROFILE_WRITE_BURSTS 0x0000004c + +#define VIVS_HI_PROFILE_WRITE_REQUESTS 0x00000050 + +#define VIVS_HI_PROFILE_READ_BURSTS 0x00000058 + +#define VIVS_HI_PROFILE_READ_REQUESTS 0x0000005c + +#define VIVS_HI_PROFILE_READ_LASTS 0x00000060 + +#define VIVS_HI_GP_OUT0 0x00000064 + +#define VIVS_HI_GP_OUT1 0x00000068 + +#define VIVS_HI_GP_OUT2 0x0000006c + +#define VIVS_HI_AXI_CONTROL 0x00000070 +#define VIVS_HI_AXI_CONTROL_WR_FULL_BURST_MODE 0x00000001 + +#define VIVS_HI_CHIP_MINOR_FEATURE_1 0x00000074 + +#define VIVS_HI_PROFILE_TOTAL_CYCLES 0x00000078 + +#define VIVS_HI_PROFILE_IDLE_CYCLES 0x0000007c + +#define VIVS_HI_CHIP_SPECS_2 0x00000080 +#define VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__MASK 0x000000ff +#define VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__SHIFT 0 +#define VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE(x) (((x) << VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__SHIFT) & VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__MASK) +#define VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__MASK 0x0000ff00 +#define VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__SHIFT 8 +#define VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT(x) (((x) << VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__MASK) +#define VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__MASK 0xffff0000 +#define VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__SHIFT 16 +#define VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS(x) (((x) << VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__SHIFT) & VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__MASK) + +#define VIVS_HI_CHIP_MINOR_FEATURE_2 0x00000084 + +#define VIVS_HI_CHIP_MINOR_FEATURE_3 0x00000088 + +#define VIVS_HI_CHIP_MINOR_FEATURE_4 0x00000094 + +#define VIVS_PM 0x00000000 + +#define VIVS_PM_POWER_CONTROLS 0x00000100 +#define VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING 0x00000001 +#define VIVS_PM_POWER_CONTROLS_DISABLE_STALL_MODULE_CLOCK_GATING 0x00000002 +#define VIVS_PM_POWER_CONTROLS_DISABLE_STARVE_MODULE_CLOCK_GATING 0x00000004 +#define VIVS_PM_POWER_CONTROLS_TURN_ON_COUNTER__MASK 0x000000f0 +#define VIVS_PM_POWER_CONTROLS_TURN_ON_COUNTER__SHIFT 4 +#define VIVS_PM_POWER_CONTROLS_TURN_ON_COUNTER(x) (((x) << VIVS_PM_POWER_CONTROLS_TURN_ON_COUNTER__SHIFT) & VIVS_PM_POWER_CONTROLS_TURN_ON_COUNTER__MASK) +#define VIVS_PM_POWER_CONTROLS_TURN_OFF_COUNTER__MASK 0xffff0000 +#define VIVS_PM_POWER_CONTROLS_TURN_OFF_COUNTER__SHIFT 16 +#define VIVS_PM_POWER_CONTROLS_TURN_OFF_COUNTER(x) (((x) << VIVS_PM_POWER_CONTROLS_TURN_OFF_COUNTER__SHIFT) & VIVS_PM_POWER_CONTROLS_TURN_OFF_COUNTER__MASK) + +#define VIVS_PM_MODULE_CONTROLS 0x00000104 +#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_FE 0x00000001 +#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_DE 0x00000002 +#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PE 0x00000004 + +#define VIVS_PM_MODULE_STATUS 0x00000108 +#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_FE 0x00000001 +#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_DE 0x00000002 +#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_PE 0x00000004 + +#define VIVS_PM_PULSE_EATER 0x0000010c + +#define VIVS_MMUv2 0x00000000 + +#define VIVS_MMUv2_SAFE_ADDRESS 0x00000180 + +#define VIVS_MMUv2_CONFIGURATION 0x00000184 +#define VIVS_MMUv2_CONFIGURATION_MODE__MASK 0x00000001 +#define VIVS_MMUv2_CONFIGURATION_MODE__SHIFT 0 +#define VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K 0x00000000 +#define VIVS_MMUv2_CONFIGURATION_MODE_MODE1_K 0x00000001 +#define VIVS_MMUv2_CONFIGURATION_MODE_MASK 0x00000008 +#define VIVS_MMUv2_CONFIGURATION_FLUSH__MASK 0x00000010 +#define VIVS_MMUv2_CONFIGURATION_FLUSH__SHIFT 4 +#define VIVS_MMUv2_CONFIGURATION_FLUSH_FLUSH 0x00000010 +#define VIVS_MMUv2_CONFIGURATION_FLUSH_MASK 0x00000080 +#define VIVS_MMUv2_CONFIGURATION_ADDRESS_MASK 0x00000100 +#define VIVS_MMUv2_CONFIGURATION_ADDRESS__MASK 0xfffffc00 +#define VIVS_MMUv2_CONFIGURATION_ADDRESS__SHIFT 10 +#define VIVS_MMUv2_CONFIGURATION_ADDRESS(x) (((x) << VIVS_MMUv2_CONFIGURATION_ADDRESS__SHIFT) & VIVS_MMUv2_CONFIGURATION_ADDRESS__MASK) + +#define VIVS_MMUv2_STATUS 0x00000188 +#define VIVS_MMUv2_STATUS_EXCEPTION0__MASK 0x00000003 +#define VIVS_MMUv2_STATUS_EXCEPTION0__SHIFT 0 +#define VIVS_MMUv2_STATUS_EXCEPTION0(x) (((x) << VIVS_MMUv2_STATUS_EXCEPTION0__SHIFT) & VIVS_MMUv2_STATUS_EXCEPTION0__MASK) +#define VIVS_MMUv2_STATUS_EXCEPTION1__MASK 0x00000030 +#define VIVS_MMUv2_STATUS_EXCEPTION1__SHIFT 4 +#define VIVS_MMUv2_STATUS_EXCEPTION1(x) (((x) << VIVS_MMUv2_STATUS_EXCEPTION1__SHIFT) & VIVS_MMUv2_STATUS_EXCEPTION1__MASK) +#define VIVS_MMUv2_STATUS_EXCEPTION2__MASK 0x00000300 +#define VIVS_MMUv2_STATUS_EXCEPTION2__SHIFT 8 +#define VIVS_MMUv2_STATUS_EXCEPTION2(x) (((x) << VIVS_MMUv2_STATUS_EXCEPTION2__SHIFT) & VIVS_MMUv2_STATUS_EXCEPTION2__MASK) +#define VIVS_MMUv2_STATUS_EXCEPTION3__MASK 0x00003000 +#define VIVS_MMUv2_STATUS_EXCEPTION3__SHIFT 12 +#define VIVS_MMUv2_STATUS_EXCEPTION3(x) (((x) << VIVS_MMUv2_STATUS_EXCEPTION3__SHIFT) & VIVS_MMUv2_STATUS_EXCEPTION3__MASK) + +#define VIVS_MMUv2_CONTROL 0x0000018c +#define VIVS_MMUv2_CONTROL_ENABLE 0x00000001 + +#define VIVS_MMUv2_EXCEPTION_ADDR(i0) (0x00000190 + 0x4*(i0)) +#define VIVS_MMUv2_EXCEPTION_ADDR__ESIZE 0x00000004 +#define VIVS_MMUv2_EXCEPTION_ADDR__LEN 0x00000004 + +#define VIVS_MC 0x00000000 + +#define VIVS_MC_MMU_FE_PAGE_TABLE 0x00000400 + +#define VIVS_MC_MMU_TX_PAGE_TABLE 0x00000404 + +#define VIVS_MC_MMU_PE_PAGE_TABLE 0x00000408 + +#define VIVS_MC_MMU_PEZ_PAGE_TABLE 0x0000040c + +#define VIVS_MC_MMU_RA_PAGE_TABLE 0x00000410 + +#define VIVS_MC_DEBUG_MEMORY 0x00000414 +#define VIVS_MC_DEBUG_MEMORY_SPECIAL_PATCH_GC320 0x00000008 +#define VIVS_MC_DEBUG_MEMORY_FAST_CLEAR_BYPASS 0x00100000 +#define VIVS_MC_DEBUG_MEMORY_COMPRESSION_BYPASS 0x00200000 + +#define VIVS_MC_MEMORY_BASE_ADDR_RA 0x00000418 + +#define VIVS_MC_MEMORY_BASE_ADDR_FE 0x0000041c + +#define VIVS_MC_MEMORY_BASE_ADDR_TX 0x00000420 + +#define VIVS_MC_MEMORY_BASE_ADDR_PEZ 0x00000424 + +#define VIVS_MC_MEMORY_BASE_ADDR_PE 0x00000428 + +#define VIVS_MC_MEMORY_TIMING_CONTROL 0x0000042c + +#define VIVS_MC_MEMORY_FLUSH 0x00000430 + +#define VIVS_MC_PROFILE_CYCLE_COUNTER 0x00000438 + +#define VIVS_MC_DEBUG_READ0 0x0000043c + +#define VIVS_MC_DEBUG_READ1 0x00000440 + +#define VIVS_MC_DEBUG_WRITE 0x00000444 + +#define VIVS_MC_PROFILE_RA_READ 0x00000448 + +#define VIVS_MC_PROFILE_TX_READ 0x0000044c + +#define VIVS_MC_PROFILE_FE_READ 0x00000450 + +#define VIVS_MC_PROFILE_PE_READ 0x00000454 + +#define VIVS_MC_PROFILE_DE_READ 0x00000458 + +#define VIVS_MC_PROFILE_SH_READ 0x0000045c + +#define VIVS_MC_PROFILE_PA_READ 0x00000460 + +#define VIVS_MC_PROFILE_SE_READ 0x00000464 + +#define VIVS_MC_PROFILE_MC_READ 0x00000468 + +#define VIVS_MC_PROFILE_HI_READ 0x0000046c + +#define VIVS_MC_PROFILE_CONFIG0 0x00000470 +#define VIVS_MC_PROFILE_CONFIG0_FE__MASK 0x0000000f +#define VIVS_MC_PROFILE_CONFIG0_FE__SHIFT 0 +#define VIVS_MC_PROFILE_CONFIG0_FE_RESET 0x0000000f +#define VIVS_MC_PROFILE_CONFIG0_DE__MASK 0x00000f00 +#define VIVS_MC_PROFILE_CONFIG0_DE__SHIFT 8 +#define VIVS_MC_PROFILE_CONFIG0_DE_RESET 0x00000f00 +#define VIVS_MC_PROFILE_CONFIG0_PE__MASK 0x000f0000 +#define VIVS_MC_PROFILE_CONFIG0_PE__SHIFT 16 +#define VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_KILLED_BY_COLOR_PIPE 0x00000000 +#define VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_KILLED_BY_DEPTH_PIPE 0x00010000 +#define VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_DRAWN_BY_COLOR_PIPE 0x00020000 +#define VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_DRAWN_BY_DEPTH_PIPE 0x00030000 +#define VIVS_MC_PROFILE_CONFIG0_PE_PIXELS_RENDERED_2D 0x000b0000 +#define VIVS_MC_PROFILE_CONFIG0_PE_RESET 0x000f0000 +#define VIVS_MC_PROFILE_CONFIG0_SH__MASK 0x0f000000 +#define VIVS_MC_PROFILE_CONFIG0_SH__SHIFT 24 +#define VIVS_MC_PROFILE_CONFIG0_SH_SHADER_CYCLES 0x04000000 +#define VIVS_MC_PROFILE_CONFIG0_SH_PS_INST_COUNTER 0x07000000 +#define VIVS_MC_PROFILE_CONFIG0_SH_RENDERED_PIXEL_COUNTER 0x08000000 +#define VIVS_MC_PROFILE_CONFIG0_SH_VS_INST_COUNTER 0x09000000 +#define VIVS_MC_PROFILE_CONFIG0_SH_RENDERED_VERTICE_COUNTER 0x0a000000 +#define VIVS_MC_PROFILE_CONFIG0_SH_VTX_BRANCH_INST_COUNTER 0x0b000000 +#define VIVS_MC_PROFILE_CONFIG0_SH_VTX_TEXLD_INST_COUNTER 0x0c000000 +#define VIVS_MC_PROFILE_CONFIG0_SH_PXL_BRANCH_INST_COUNTER 0x0d000000 +#define VIVS_MC_PROFILE_CONFIG0_SH_PXL_TEXLD_INST_COUNTER 0x0e000000 +#define VIVS_MC_PROFILE_CONFIG0_SH_RESET 0x0f000000 + +#define VIVS_MC_PROFILE_CONFIG1 0x00000474 +#define VIVS_MC_PROFILE_CONFIG1_PA__MASK 0x0000000f +#define VIVS_MC_PROFILE_CONFIG1_PA__SHIFT 0 +#define VIVS_MC_PROFILE_CONFIG1_PA_INPUT_VTX_COUNTER 0x00000003 +#define VIVS_MC_PROFILE_CONFIG1_PA_INPUT_PRIM_COUNTER 0x00000004 +#define VIVS_MC_PROFILE_CONFIG1_PA_OUTPUT_PRIM_COUNTER 0x00000005 +#define VIVS_MC_PROFILE_CONFIG1_PA_DEPTH_CLIPPED_COUNTER 0x00000006 +#define VIVS_MC_PROFILE_CONFIG1_PA_TRIVIAL_REJECTED_COUNTER 0x00000007 +#define VIVS_MC_PROFILE_CONFIG1_PA_CULLED_COUNTER 0x00000008 +#define VIVS_MC_PROFILE_CONFIG1_PA_RESET 0x0000000f +#define VIVS_MC_PROFILE_CONFIG1_SE__MASK 0x00000f00 +#define VIVS_MC_PROFILE_CONFIG1_SE__SHIFT 8 +#define VIVS_MC_PROFILE_CONFIG1_SE_CULLED_TRIANGLE_COUNT 0x00000000 +#define VIVS_MC_PROFILE_CONFIG1_SE_CULLED_LINES_COUNT 0x00000100 +#define VIVS_MC_PROFILE_CONFIG1_SE_RESET 0x00000f00 +#define VIVS_MC_PROFILE_CONFIG1_RA__MASK 0x000f0000 +#define VIVS_MC_PROFILE_CONFIG1_RA__SHIFT 16 +#define VIVS_MC_PROFILE_CONFIG1_RA_VALID_PIXEL_COUNT 0x00000000 +#define VIVS_MC_PROFILE_CONFIG1_RA_TOTAL_QUAD_COUNT 0x00010000 +#define VIVS_MC_PROFILE_CONFIG1_RA_VALID_QUAD_COUNT_AFTER_EARLY_Z 0x00020000 +#define VIVS_MC_PROFILE_CONFIG1_RA_TOTAL_PRIMITIVE_COUNT 0x00030000 +#define VIVS_MC_PROFILE_CONFIG1_RA_PIPE_CACHE_MISS_COUNTER 0x00090000 +#define VIVS_MC_PROFILE_CONFIG1_RA_PREFETCH_CACHE_MISS_COUNTER 0x000a0000 +#define VIVS_MC_PROFILE_CONFIG1_RA_CULLED_QUAD_COUNT 0x000b0000 +#define VIVS_MC_PROFILE_CONFIG1_RA_RESET 0x000f0000 +#define VIVS_MC_PROFILE_CONFIG1_TX__MASK 0x0f000000 +#define VIVS_MC_PROFILE_CONFIG1_TX__SHIFT 24 +#define VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_BILINEAR_REQUESTS 0x00000000 +#define VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_TRILINEAR_REQUESTS 0x01000000 +#define VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_DISCARDED_TEXTURE_REQUESTS 0x02000000 +#define VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_TEXTURE_REQUESTS 0x03000000 +#define VIVS_MC_PROFILE_CONFIG1_TX_UNKNOWN 0x04000000 +#define VIVS_MC_PROFILE_CONFIG1_TX_MEM_READ_COUNT 0x05000000 +#define VIVS_MC_PROFILE_CONFIG1_TX_MEM_READ_IN_8B_COUNT 0x06000000 +#define VIVS_MC_PROFILE_CONFIG1_TX_CACHE_MISS_COUNT 0x07000000 +#define VIVS_MC_PROFILE_CONFIG1_TX_CACHE_HIT_TEXEL_COUNT 0x08000000 +#define VIVS_MC_PROFILE_CONFIG1_TX_CACHE_MISS_TEXEL_COUNT 0x09000000 +#define VIVS_MC_PROFILE_CONFIG1_TX_RESET 0x0f000000 + +#define VIVS_MC_PROFILE_CONFIG2 0x00000478 +#define VIVS_MC_PROFILE_CONFIG2_MC__MASK 0x0000000f +#define VIVS_MC_PROFILE_CONFIG2_MC__SHIFT 0 +#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_8B_FROM_PIPELINE 0x00000001 +#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_8B_FROM_IP 0x00000002 +#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_WRITE_REQ_8B_FROM_PIPELINE 0x00000003 +#define VIVS_MC_PROFILE_CONFIG2_MC_RESET 0x0000000f +#define VIVS_MC_PROFILE_CONFIG2_HI__MASK 0x00000f00 +#define VIVS_MC_PROFILE_CONFIG2_HI__SHIFT 8 +#define VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_READ_REQUEST_STALLED 0x00000000 +#define VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_WRITE_REQUEST_STALLED 0x00000100 +#define VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_WRITE_DATA_STALLED 0x00000200 +#define VIVS_MC_PROFILE_CONFIG2_HI_RESET 0x00000f00 + +#define VIVS_MC_PROFILE_CONFIG3 0x0000047c + +#define VIVS_MC_BUS_CONFIG 0x00000480 +#define VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK 0x0000000f +#define VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__SHIFT 0 +#define VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG(x) (((x) << VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__SHIFT) & VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK) +#define VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK 0x000000f0 +#define VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__SHIFT 4 +#define VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG(x) (((x) << VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__SHIFT) & VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK) + +#define VIVS_MC_START_COMPOSITION 0x00000554 + +#define VIVS_MC_128B_MERGE 0x00000558 + + +#endif /* STATE_HI_XML */ diff --git a/include/uapi/drm/etnaviv_drm.h b/include/uapi/drm/etnaviv_drm.h new file mode 100644 index 000000000000..4cc989ad6851 --- /dev/null +++ b/include/uapi/drm/etnaviv_drm.h @@ -0,0 +1,222 @@ +/* + * Copyright (C) 2015 Etnaviv Project + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef __ETNAVIV_DRM_H__ +#define __ETNAVIV_DRM_H__ + +#include "drm.h" + +/* Please note that modifications to all structs defined here are + * subject to backwards-compatibility constraints: + * 1) Do not use pointers, use __u64 instead for 32 bit / 64 bit + * user/kernel compatibility + * 2) Keep fields aligned to their size + * 3) Because of how drm_ioctl() works, we can add new fields at + * the end of an ioctl if some care is taken: drm_ioctl() will + * zero out the new fields at the tail of the ioctl, so a zero + * value should have a backwards compatible meaning. And for + * output params, userspace won't see the newly added output + * fields.. so that has to be somehow ok. + */ + +/* timeouts are specified in clock-monotonic absolute times (to simplify + * restarting interrupted ioctls). The following struct is logically the + * same as 'struct timespec' but 32/64b ABI safe. + */ +struct drm_etnaviv_timespec { + __s64 tv_sec; /* seconds */ + __s64 tv_nsec; /* nanoseconds */ +}; + +#define ETNAVIV_PARAM_GPU_MODEL 0x01 +#define ETNAVIV_PARAM_GPU_REVISION 0x02 +#define ETNAVIV_PARAM_GPU_FEATURES_0 0x03 +#define ETNAVIV_PARAM_GPU_FEATURES_1 0x04 +#define ETNAVIV_PARAM_GPU_FEATURES_2 0x05 +#define ETNAVIV_PARAM_GPU_FEATURES_3 0x06 +#define ETNAVIV_PARAM_GPU_FEATURES_4 0x07 + +#define ETNAVIV_PARAM_GPU_STREAM_COUNT 0x10 +#define ETNAVIV_PARAM_GPU_REGISTER_MAX 0x11 +#define ETNAVIV_PARAM_GPU_THREAD_COUNT 0x12 +#define ETNAVIV_PARAM_GPU_VERTEX_CACHE_SIZE 0x13 +#define ETNAVIV_PARAM_GPU_SHADER_CORE_COUNT 0x14 +#define ETNAVIV_PARAM_GPU_PIXEL_PIPES 0x15 +#define ETNAVIV_PARAM_GPU_VERTEX_OUTPUT_BUFFER_SIZE 0x16 +#define ETNAVIV_PARAM_GPU_BUFFER_SIZE 0x17 +#define ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT 0x18 +#define ETNAVIV_PARAM_GPU_NUM_CONSTANTS 0x19 + +#define ETNA_MAX_PIPES 4 + +struct drm_etnaviv_param { + __u32 pipe; /* in */ + __u32 param; /* in, ETNAVIV_PARAM_x */ + __u64 value; /* out (get_param) or in (set_param) */ +}; + +/* + * GEM buffers: + */ + +#define ETNA_BO_CACHE_MASK 0x000f0000 +/* cache modes */ +#define ETNA_BO_CACHED 0x00010000 +#define ETNA_BO_WC 0x00020000 +#define ETNA_BO_UNCACHED 0x00040000 +/* map flags */ +#define ETNA_BO_FORCE_MMU 0x00100000 + +struct drm_etnaviv_gem_new { + __u64 size; /* in */ + __u32 flags; /* in, mask of ETNA_BO_x */ + __u32 handle; /* out */ +}; + +struct drm_etnaviv_gem_info { + __u32 handle; /* in */ + __u32 pad; + __u64 offset; /* out, offset to pass to mmap() */ +}; + +#define ETNA_PREP_READ 0x01 +#define ETNA_PREP_WRITE 0x02 +#define ETNA_PREP_NOSYNC 0x04 + +struct drm_etnaviv_gem_cpu_prep { + __u32 handle; /* in */ + __u32 op; /* in, mask of ETNA_PREP_x */ + struct drm_etnaviv_timespec timeout; /* in */ +}; + +struct drm_etnaviv_gem_cpu_fini { + __u32 handle; /* in */ + __u32 flags; /* in, placeholder for now, no defined values */ +}; + +/* + * Cmdstream Submission: + */ + +/* The value written into the cmdstream is logically: + * relocbuf->gpuaddr + reloc_offset + * + * NOTE that reloc's must be sorted by order of increasing submit_offset, + * otherwise EINVAL. + */ +struct drm_etnaviv_gem_submit_reloc { + __u32 submit_offset; /* in, offset from submit_bo */ + __u32 reloc_idx; /* in, index of reloc_bo buffer */ + __u64 reloc_offset; /* in, offset from start of reloc_bo */ + __u32 flags; /* in, placeholder for now, no defined values */ +}; + +/* Each buffer referenced elsewhere in the cmdstream submit (ie. the + * cmdstream buffer(s) themselves or reloc entries) has one (and only + * one) entry in the submit->bos[] table. + * + * As a optimization, the current buffer (gpu virtual address) can be + * passed back through the 'presumed' field. If on a subsequent reloc, + * userspace passes back a 'presumed' address that is still valid, + * then patching the cmdstream for this entry is skipped. This can + * avoid kernel needing to map/access the cmdstream bo in the common + * case. + */ +#define ETNA_SUBMIT_BO_READ 0x0001 +#define ETNA_SUBMIT_BO_WRITE 0x0002 +struct drm_etnaviv_gem_submit_bo { + __u32 flags; /* in, mask of ETNA_SUBMIT_BO_x */ + __u32 handle; /* in, GEM handle */ + __u64 presumed; /* in/out, presumed buffer address */ +}; + +/* Each cmdstream submit consists of a table of buffers involved, and + * one or more cmdstream buffers. This allows for conditional execution + * (context-restore), and IB buffers needed for per tile/bin draw cmds. + */ +#define ETNA_PIPE_3D 0x00 +#define ETNA_PIPE_2D 0x01 +#define ETNA_PIPE_VG 0x02 +struct drm_etnaviv_gem_submit { + __u32 fence; /* out */ + __u32 pipe; /* in */ + __u32 exec_state; /* in, initial execution state (ETNA_PIPE_x) */ + __u32 nr_bos; /* in, number of submit_bo's */ + __u32 nr_relocs; /* in, number of submit_reloc's */ + __u32 stream_size; /* in, cmdstream size */ + __u64 bos; /* in, ptr to array of submit_bo's */ + __u64 relocs; /* in, ptr to array of submit_reloc's */ + __u64 stream; /* in, ptr to cmdstream */ +}; + +/* The normal way to synchronize with the GPU is just to CPU_PREP on + * a buffer if you need to access it from the CPU (other cmdstream + * submission from same or other contexts, PAGE_FLIP ioctl, etc, all + * handle the required synchronization under the hood). This ioctl + * mainly just exists as a way to implement the gallium pipe_fence + * APIs without requiring a dummy bo to synchronize on. + */ +#define ETNA_WAIT_NONBLOCK 0x01 +struct drm_etnaviv_wait_fence { + __u32 pipe; /* in */ + __u32 fence; /* in */ + __u32 flags; /* in, mask of ETNA_WAIT_x */ + __u32 pad; + struct drm_etnaviv_timespec timeout; /* in */ +}; + +#define ETNA_USERPTR_READ 0x01 +#define ETNA_USERPTR_WRITE 0x02 +struct drm_etnaviv_gem_userptr { + __u64 user_ptr; /* in, page aligned user pointer */ + __u64 user_size; /* in, page aligned user size */ + __u32 flags; /* in, flags */ + __u32 handle; /* out, non-zero handle */ +}; + +struct drm_etnaviv_gem_wait { + __u32 pipe; /* in */ + __u32 handle; /* in, bo to be waited for */ + __u32 flags; /* in, mask of ETNA_WAIT_x */ + __u32 pad; + struct drm_etnaviv_timespec timeout; /* in */ +}; + +#define DRM_ETNAVIV_GET_PARAM 0x00 +/* placeholder: +#define DRM_ETNAVIV_SET_PARAM 0x01 + */ +#define DRM_ETNAVIV_GEM_NEW 0x02 +#define DRM_ETNAVIV_GEM_INFO 0x03 +#define DRM_ETNAVIV_GEM_CPU_PREP 0x04 +#define DRM_ETNAVIV_GEM_CPU_FINI 0x05 +#define DRM_ETNAVIV_GEM_SUBMIT 0x06 +#define DRM_ETNAVIV_WAIT_FENCE 0x07 +#define DRM_ETNAVIV_GEM_USERPTR 0x08 +#define DRM_ETNAVIV_GEM_WAIT 0x09 +#define DRM_ETNAVIV_NUM_IOCTLS 0x0a + +#define DRM_IOCTL_ETNAVIV_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GET_PARAM, struct drm_etnaviv_param) +#define DRM_IOCTL_ETNAVIV_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_NEW, struct drm_etnaviv_gem_new) +#define DRM_IOCTL_ETNAVIV_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_INFO, struct drm_etnaviv_gem_info) +#define DRM_IOCTL_ETNAVIV_GEM_CPU_PREP DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_CPU_PREP, struct drm_etnaviv_gem_cpu_prep) +#define DRM_IOCTL_ETNAVIV_GEM_CPU_FINI DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_CPU_FINI, struct drm_etnaviv_gem_cpu_fini) +#define DRM_IOCTL_ETNAVIV_GEM_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_SUBMIT, struct drm_etnaviv_gem_submit) +#define DRM_IOCTL_ETNAVIV_WAIT_FENCE DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_WAIT_FENCE, struct drm_etnaviv_wait_fence) +#define DRM_IOCTL_ETNAVIV_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_USERPTR, struct drm_etnaviv_gem_userptr) +#define DRM_IOCTL_ETNAVIV_GEM_WAIT DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_WAIT, struct drm_etnaviv_gem_wait) + +#endif /* __ETNAVIV_DRM_H__ */ -- cgit v1.2.3 From 887dc9f2cef6e98dcccf807da5e6faf4f60ba483 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 15 Dec 2015 20:56:44 -0800 Subject: inet: tcp: fix inetpeer_set_addr_v4() David Ahern added a vif field in the a4 part of inetpeer_addr struct. This broke IPv4 TCP fast open client side and more generally tcp metrics cache, because inetpeer_addr_cmp() is now comparing two u32 instead of one. inetpeer_set_addr_v4() needs to properly init vif field, otherwise the comparison result depends on uninitialized data. Fixes: 192132b9a034 ("net: Add support for VRFs to inetpeer cache") Reported-by: Yuchung Cheng Signed-off-by: Eric Dumazet Cc: Neal Cardwell Signed-off-by: David S. Miller --- include/net/inetpeer.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h index 4a6009d4486b..235c7811a86a 100644 --- a/include/net/inetpeer.h +++ b/include/net/inetpeer.h @@ -78,6 +78,7 @@ void inet_initpeers(void) __init; static inline void inetpeer_set_addr_v4(struct inetpeer_addr *iaddr, __be32 ip) { iaddr->a4.addr = ip; + iaddr->a4.vif = 0; iaddr->family = AF_INET; } -- cgit v1.2.3 From 7bbadd2d1009575dad675afc16650ebb5aa10612 Mon Sep 17 00:00:00 2001 From: Hannes Frederic Sowa Date: Mon, 14 Dec 2015 23:30:43 +0100 Subject: net: fix warnings in 'make htmldocs' by moving macro definition out of field declaration Docbook does not like the definition of macros inside a field declaration and adds a warning. Move the definition out. Fixes: 79462ad02e86180 ("net: add validation for the socket syscall protocol argument") Reported-by: kbuild test robot Signed-off-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- include/net/sock.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/sock.h b/include/net/sock.h index 28790fe18206..14d3c0734007 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -403,8 +403,8 @@ struct sock { sk_no_check_rx : 1, sk_userlocks : 4, sk_protocol : 8, -#define SK_PROTOCOL_MAX U8_MAX sk_type : 16; +#define SK_PROTOCOL_MAX U8_MAX kmemcheck_bitfield_end(flags); int sk_wmem_queued; gfp_t sk_allocation; -- cgit v1.2.3 From 454d5d882c7e412b840e3c99010fe81a9862f6fb Mon Sep 17 00:00:00 2001 From: David Vrabel Date: Fri, 30 Oct 2015 14:58:08 +0000 Subject: xen: Add RING_COPY_REQUEST() Using RING_GET_REQUEST() on a shared ring is easy to use incorrectly (i.e., by not considering that the other end may alter the data in the shared ring while it is being inspected). Safe usage of a request generally requires taking a local copy. Provide a RING_COPY_REQUEST() macro to use instead of RING_GET_REQUEST() and an open-coded memcpy(). This takes care of ensuring that the copy is done correctly regardless of any possible compiler optimizations. Use a volatile source to prevent the compiler from reordering or omitting the copy. This is part of XSA155. CC: stable@vger.kernel.org Signed-off-by: David Vrabel Signed-off-by: Konrad Rzeszutek Wilk --- include/xen/interface/io/ring.h | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'include') diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h index 7d28aff605c7..7dc685b4057d 100644 --- a/include/xen/interface/io/ring.h +++ b/include/xen/interface/io/ring.h @@ -181,6 +181,20 @@ struct __name##_back_ring { \ #define RING_GET_REQUEST(_r, _idx) \ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req)) +/* + * Get a local copy of a request. + * + * Use this in preference to RING_GET_REQUEST() so all processing is + * done on a local copy that cannot be modified by the other end. + * + * Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this + * to be ineffective where _req is a struct which consists of only bitfields. + */ +#define RING_COPY_REQUEST(_r, _idx, _req) do { \ + /* Use volatile to force the copy into _req. */ \ + *(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx); \ +} while (0) + #define RING_GET_RESPONSE(_r, _idx) \ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp)) -- cgit v1.2.3 From 1d5cda4076d930d6d52088ed2c7753f7c564cbd7 Mon Sep 17 00:00:00 2001 From: James Morse Date: Fri, 18 Dec 2015 14:22:07 -0800 Subject: include/linux/mmdebug.h: should include linux/bug.h mmdebug.h uses BUILD_BUG_ON_INVALID(), assuming someone else included linux/bug.h. Include it ourselves. This saves build-failures such as: arch/arm64/include/asm/pgtable.h: In function 'set_pte_at': arch/arm64/include/asm/pgtable.h:281:3: error: implicit declaration of function 'BUILD_BUG_ON_INVALID' [-Werror=implicit-function-declaration] VM_WARN_ONCE(!pte_young(pte), Fixes: 02602a18c32d7 ("bug: completely remove code generated by disabled VM_BUG_ON()") Signed-off-by: James Morse Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmdebug.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h index 877ef226f90f..772362adf471 100644 --- a/include/linux/mmdebug.h +++ b/include/linux/mmdebug.h @@ -1,6 +1,7 @@ #ifndef LINUX_MM_DEBUG_H #define LINUX_MM_DEBUG_H 1 +#include #include struct page; -- cgit v1.2.3 From 6beb8201bbc41a50ccf2c3b0aecfc93c6d8be763 Mon Sep 17 00:00:00 2001 From: Nicolai Hähnle Date: Sat, 12 Dec 2015 11:42:23 -0500 Subject: drm/ttm: fix documentation of ttm_bo_reserve MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previously, the comment was inconsistent. EDEADLK is what the ww_mutex mechanism really returns. Signed-off-by: Nicolai Hähnle Signed-off-by: Alex Deucher --- include/drm/ttm/ttm_bo_driver.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 813042cede57..3d4bf08aa21f 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -826,10 +826,10 @@ static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo, * reserved, the validation sequence is checked against the validation * sequence of the process currently reserving the buffer, * and if the current validation sequence is greater than that of the process - * holding the reservation, the function returns -EAGAIN. Otherwise it sleeps + * holding the reservation, the function returns -EDEADLK. Otherwise it sleeps * waiting for the buffer to become unreserved, after which it retries * reserving. - * The caller should, when receiving an -EAGAIN error + * The caller should, when receiving an -EDEADLK error * release all its buffer reservations, wait for @bo to become unreserved, and * then rerun the validation with the same validation sequence. This procedure * will always guarantee that the process with the lowest validation sequence -- cgit v1.2.3 From 60d8edd415e9da63599c7601707ca78ad74a927e Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 11 Nov 2015 23:14:39 -0500 Subject: drm: add drm_pcie_get_max_link_width helper (v2) Add a helper to get the max link width of the port. Similar to the helper to get the max link speed. v2: fix typo in commit message Reviewed-by: Jammy Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/drm_pci.c | 20 ++++++++++++++++++++ include/drm/drmP.h | 1 + 2 files changed, 21 insertions(+) (limited to 'include') diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c index fcd2a86acd2c..a1fff1179a97 100644 --- a/drivers/gpu/drm/drm_pci.c +++ b/drivers/gpu/drm/drm_pci.c @@ -410,6 +410,26 @@ int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask) } EXPORT_SYMBOL(drm_pcie_get_speed_cap_mask); +int drm_pcie_get_max_link_width(struct drm_device *dev, u32 *mlw) +{ + struct pci_dev *root; + u32 lnkcap; + + *mlw = 0; + if (!dev->pdev) + return -EINVAL; + + root = dev->pdev->bus->self; + + pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap); + + *mlw = (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4; + + DRM_INFO("probing mlw for device %x:%x = %x\n", root->vendor, root->device, lnkcap); + return 0; +} +EXPORT_SYMBOL(drm_pcie_get_max_link_width); + #else int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver) diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 30d4a5a495e2..bd5d2c536ddc 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1098,6 +1098,7 @@ static inline int drm_pci_set_busid(struct drm_device *dev, #define DRM_PCIE_SPEED_80 4 extern int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *speed_mask); +extern int drm_pcie_get_max_link_width(struct drm_device *dev, u32 *mlw); /* platform section */ extern int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device); -- cgit v1.2.3 From b75e1513fcdbc6e8988a57bc0d9fc21f8c4d7b2c Mon Sep 17 00:00:00 2001 From: Tomi Valkeinen Date: Fri, 28 Aug 2015 11:23:03 +0300 Subject: drm/omap: remove unused plugin defines Remove unused defines related to SGX plugin which are not used. Signed-off-by: Tomi Valkeinen Acked-by: Laurent Pinchart --- drivers/gpu/drm/omapdrm/omap_drv.h | 6 ------ include/uapi/drm/omap_drm.h | 6 ------ 2 files changed, 12 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h index 718f032aa052..9e0030731c37 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.h +++ b/drivers/gpu/drm/omapdrm/omap_drv.h @@ -38,12 +38,6 @@ struct omap_drm_usergart; -/* max # of mapper-id's that can be assigned.. todo, come up with a better - * (but still inexpensive) way to store/access per-buffer mapper private - * data.. - */ -#define MAX_MAPPERS 2 - /* parameters which describe (unrotated) coordinates of scanout within a fb: */ struct omap_drm_window { uint32_t rotation; diff --git a/include/uapi/drm/omap_drm.h b/include/uapi/drm/omap_drm.h index 0750c01bb480..38a3bd847e15 100644 --- a/include/uapi/drm/omap_drm.h +++ b/include/uapi/drm/omap_drm.h @@ -101,9 +101,6 @@ struct drm_omap_gem_info { #define DRM_OMAP_GET_PARAM 0x00 #define DRM_OMAP_SET_PARAM 0x01 -/* placeholder for plugin-api -#define DRM_OMAP_GET_BASE 0x02 -*/ #define DRM_OMAP_GEM_NEW 0x03 #define DRM_OMAP_GEM_CPU_PREP 0x04 #define DRM_OMAP_GEM_CPU_FINI 0x05 @@ -112,9 +109,6 @@ struct drm_omap_gem_info { #define DRM_IOCTL_OMAP_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GET_PARAM, struct drm_omap_param) #define DRM_IOCTL_OMAP_SET_PARAM DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_SET_PARAM, struct drm_omap_param) -/* placeholder for plugin-api -#define DRM_IOCTL_OMAP_GET_BASE DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GET_BASE, struct drm_omap_get_base) -*/ #define DRM_IOCTL_OMAP_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GEM_NEW, struct drm_omap_gem_new) #define DRM_IOCTL_OMAP_GEM_CPU_PREP DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_GEM_CPU_PREP, struct drm_omap_gem_cpu_prep) #define DRM_IOCTL_OMAP_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_GEM_CPU_FINI, struct drm_omap_gem_cpu_fini) -- cgit v1.2.3 From 69a0f89c0641668d402573a05b327ac8ed6d2560 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Wed, 30 Dec 2015 22:20:30 +0100 Subject: drm/dp/mst: constify drm_dp_mst_topology_cbs structures The drm_dp_mst_topology_cbs structures are never modified, so declare them as const. Done with the help of Coccinelle. Signed-off-by: Julia Lawall Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_dp_mst.c | 2 +- drivers/gpu/drm/radeon/radeon_dp_mst.c | 2 +- include/drm/drm_dp_mst_helper.h | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index e8d369d0a713..9ae1a4fc5bb1 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -512,7 +512,7 @@ static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) drm_kms_helper_hotplug_event(dev); } -static struct drm_dp_mst_topology_cbs mst_cbs = { +static const struct drm_dp_mst_topology_cbs mst_cbs = { .add_connector = intel_dp_add_mst_connector, .register_connector = intel_dp_register_mst_connector, .destroy_connector = intel_dp_destroy_mst_connector, diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c index 94323f51ffcf..8a0222573f6a 100644 --- a/drivers/gpu/drm/radeon/radeon_dp_mst.c +++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c @@ -329,7 +329,7 @@ static void radeon_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) drm_kms_helper_hotplug_event(dev); } -struct drm_dp_mst_topology_cbs mst_cbs = { +const struct drm_dp_mst_topology_cbs mst_cbs = { .add_connector = radeon_dp_add_mst_connector, .register_connector = radeon_dp_register_mst_connector, .destroy_connector = radeon_dp_destroy_mst_connector, diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h index 5340099741ae..16663713cc9f 100644 --- a/include/drm/drm_dp_mst_helper.h +++ b/include/drm/drm_dp_mst_helper.h @@ -420,7 +420,7 @@ struct drm_dp_payload { struct drm_dp_mst_topology_mgr { struct device *dev; - struct drm_dp_mst_topology_cbs *cbs; + const struct drm_dp_mst_topology_cbs *cbs; int max_dpcd_transaction_bytes; struct drm_dp_aux *aux; /* auxch for this topology mgr to use */ int max_payloads; -- cgit v1.2.3 From 1f16ee7fa13649f4e55aa48ad31c3eb0722a62d3 Mon Sep 17 00:00:00 2001 From: Mykola Lysenko Date: Fri, 18 Dec 2015 17:14:43 -0500 Subject: drm/dp/mst: always send reply for UP request We should always send reply for UP request in order to make downstream device clean-up resources appropriately. Issue was that reply for UP request was sent only once. Acked-by: Dave Airlie Signed-off-by: Mykola Lysenko Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/drm_dp_mst_topology.c | 30 +++++++++++------------------- include/drm/drm_dp_mst_helper.h | 2 -- 2 files changed, 11 insertions(+), 21 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 7710de6e8a55..ca92a3217465 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -1494,26 +1494,18 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) } /* called holding qlock */ -static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) +static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_sideband_msg_tx *txmsg) { - struct drm_dp_sideband_msg_tx *txmsg; int ret; /* construct a chunk from the first msg in the tx_msg queue */ - if (list_empty(&mgr->tx_msg_upq)) { - mgr->tx_up_in_progress = false; - return; - } - - txmsg = list_first_entry(&mgr->tx_msg_upq, struct drm_dp_sideband_msg_tx, next); ret = process_single_tx_qlock(mgr, txmsg, true); - if (ret == 1) { - /* up txmsgs aren't put in slots - so free after we send it */ - list_del(&txmsg->next); - kfree(txmsg); - } else if (ret) + + if (ret != 1) DRM_DEBUG_KMS("failed to send msg in q %d\n", ret); - mgr->tx_up_in_progress = true; + + txmsg->dst->tx_slots[txmsg->seqno] = NULL; } static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr, @@ -1907,11 +1899,12 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr, drm_dp_encode_up_ack_reply(txmsg, req_type); mutex_lock(&mgr->qlock); - list_add_tail(&txmsg->next, &mgr->tx_msg_upq); - if (!mgr->tx_up_in_progress) { - process_single_up_tx_qlock(mgr); - } + + process_single_up_tx_qlock(mgr, txmsg); + mutex_unlock(&mgr->qlock); + + kfree(txmsg); return 0; } @@ -2843,7 +2836,6 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, mutex_init(&mgr->qlock); mutex_init(&mgr->payload_lock); mutex_init(&mgr->destroy_connector_lock); - INIT_LIST_HEAD(&mgr->tx_msg_upq); INIT_LIST_HEAD(&mgr->tx_msg_downq); INIT_LIST_HEAD(&mgr->destroy_connector_list); INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work); diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h index 74b5888bbc73..4fc55a87dfee 100644 --- a/include/drm/drm_dp_mst_helper.h +++ b/include/drm/drm_dp_mst_helper.h @@ -451,9 +451,7 @@ struct drm_dp_mst_topology_mgr { the mstb tx_slots and txmsg->state once they are queued */ struct mutex qlock; struct list_head tx_msg_downq; - struct list_head tx_msg_upq; bool tx_down_in_progress; - bool tx_up_in_progress; /* payload info + lock for it */ struct mutex payload_lock; -- cgit v1.2.3 From 4cd39917ddb2fb5691e05b13b13f1f2398343b3e Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Mon, 4 Jan 2016 12:53:16 +0100 Subject: drm/atomic: Add __drm_atomic_helper_connector_reset, v2. This is useful for drivers that subclass connector_state, like tegra. Changes since v1: - Docbook updates. Signed-off-by: Maarten Lankhorst Link: http://patchwork.freedesktop.org/patch/msgid/1451908400-25147-2-git-send-email-maarten.lankhorst@linux.intel.com Signed-off-by: Daniel Vetter --- drivers/gpu/drm/drm_atomic_helper.c | 30 ++++++++++++++++++++++++++---- include/drm/drm_atomic_helper.h | 2 ++ 2 files changed, 28 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 268d37f26960..26d258d0618b 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -2605,6 +2605,28 @@ void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane, } EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state); +/** + * __drm_atomic_helper_connector_reset - reset state on connector + * @connector: drm connector + * @conn_state: connector state to assign + * + * Initializes the newly allocated @conn_state and assigns it to + * #connector ->state, usually required when initializing the drivers + * or when called from the ->reset hook. + * + * This is useful for drivers that subclass the connector state. + */ +void +__drm_atomic_helper_connector_reset(struct drm_connector *connector, + struct drm_connector_state *conn_state) +{ + if (conn_state) + conn_state->connector = connector; + + connector->state = conn_state; +} +EXPORT_SYMBOL(__drm_atomic_helper_connector_reset); + /** * drm_atomic_helper_connector_reset - default ->reset hook for connectors * @connector: drm connector @@ -2615,11 +2637,11 @@ EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state); */ void drm_atomic_helper_connector_reset(struct drm_connector *connector) { - kfree(connector->state); - connector->state = kzalloc(sizeof(*connector->state), GFP_KERNEL); + struct drm_connector_state *conn_state = + kzalloc(sizeof(*conn_state), GFP_KERNEL); - if (connector->state) - connector->state->connector = connector; + kfree(connector->state); + __drm_atomic_helper_connector_reset(connector, conn_state); } EXPORT_SYMBOL(drm_atomic_helper_connector_reset); diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h index a286cce98720..89d008dc08e2 100644 --- a/include/drm/drm_atomic_helper.h +++ b/include/drm/drm_atomic_helper.h @@ -126,6 +126,8 @@ void __drm_atomic_helper_plane_destroy_state(struct drm_plane *plane, void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane, struct drm_plane_state *state); +void __drm_atomic_helper_connector_reset(struct drm_connector *connector, + struct drm_connector_state *conn_state); void drm_atomic_helper_connector_reset(struct drm_connector *connector); void __drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector, -- cgit v1.2.3 From 4cd9fa529d77dde8f760adb3d934dfac6e169b1e Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Mon, 4 Jan 2016 12:53:18 +0100 Subject: drm/atomic: add connector mask to drm_crtc_state. It can be useful to iterate over connectors without grabbing connection_mutex. It can also be used to see how many connectors are on a crtc without iterating over the list. Signed-off-by: Maarten Lankhorst Link: http://patchwork.freedesktop.org/patch/msgid/1451908400-25147-4-git-send-email-maarten.lankhorst@linux.intel.com Signed-off-by: Daniel Vetter --- drivers/gpu/drm/drm_atomic.c | 11 +++++++++++ include/drm/drm_crtc.h | 3 +++ 2 files changed, 14 insertions(+) (limited to 'include') diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 6a21e5c378c1..14b321580517 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -1063,10 +1063,21 @@ drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state, { struct drm_crtc_state *crtc_state; + if (conn_state->crtc && conn_state->crtc != crtc) { + crtc_state = drm_atomic_get_existing_crtc_state(conn_state->state, + conn_state->crtc); + + crtc_state->connector_mask &= + ~(1 << drm_connector_index(conn_state->connector)); + } + if (crtc) { crtc_state = drm_atomic_get_crtc_state(conn_state->state, crtc); if (IS_ERR(crtc_state)) return PTR_ERR(crtc_state); + + crtc_state->connector_mask |= + 1 << drm_connector_index(conn_state->connector); } conn_state->crtc = crtc; diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 3b040b355472..e3c4d486e1af 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -306,6 +306,7 @@ struct drm_plane_helper_funcs; * @active_changed: crtc_state->active has been toggled. * @connectors_changed: connectors to this crtc have been updated * @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes + * @connector_mask: bitmask of (1 << drm_connector_index(connector)) of attached connectors * @last_vblank_count: for helpers and drivers to capture the vblank of the * update to ensure framebuffer cleanup isn't done too early * @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings @@ -339,6 +340,8 @@ struct drm_crtc_state { */ u32 plane_mask; + u32 connector_mask; + /* last_vblank_count: for vblank waits before cleanup */ u32 last_vblank_count; -- cgit v1.2.3 From 4cba68507cf58db99752cf79198beb4a85a9f8ce Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 8 Dec 2015 09:49:20 +0100 Subject: drm/atomic-helper: Reject legacy flips on a disabled pipe MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We want this for consistency with existing page_flip semantics. Since this spurred quite a discussion on IRC also document why we reject event generation when the pipe is off: It's not that it's hard to implement, but userspace has a track recording which proves that it's way too easy to accidentally abuse and cause havoc. We want to make sure userspace doesn't get away with that. v2: Somehow thought we do reject events already, but that code only existed in my imagination ... Also suggestions from Thierry. Cc: Daniel Stone Cc: Ville Syrjälä Cc: Thierry Reding Reviewed-by: Daniel Stone Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1449564561-3896-4-git-send-email-daniel.vetter@ffwll.ch --- drivers/gpu/drm/drm_atomic.c | 16 ++++++++++++++++ drivers/gpu/drm/drm_atomic_helper.c | 9 +++++++++ include/drm/drm_crtc.h | 3 ++- 3 files changed, 27 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 14b321580517..6050c80f1dbc 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -508,6 +508,22 @@ static int drm_atomic_crtc_check(struct drm_crtc *crtc, return -EINVAL; } + /* + * Reject event generation for when a CRTC is off and stays off. + * It wouldn't be hard to implement this, but userspace has a track + * record of happily burning through 100% cpu (or worse, crash) when the + * display pipe is suspended. To avoid all that fun just reject updates + * that ask for events since likely that indicates a bug in the + * compositor's drawing loop. This is consistent with the vblank IOCTL + * and legacy page_flip IOCTL which also reject service on a disabled + * pipe. + */ + if (state->event && !state->active && !crtc->state->active) { + DRM_DEBUG_ATOMIC("[CRTC:%d] requesting event but off\n", + crtc->base.id); + return -EINVAL; + } + return 0; } diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 26d258d0618b..738104b68d26 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -2284,6 +2284,15 @@ retry: goto fail; drm_atomic_set_fb_for_plane(plane_state, fb); + /* Make sure we don't accidentally do a full modeset. */ + state->allow_modeset = false; + if (!crtc_state->active) { + DRM_DEBUG_ATOMIC("[CRTC:%d] disabled, rejecting legacy flip\n", + crtc->base.id); + ret = -EINVAL; + goto fail; + } + ret = drm_atomic_async_commit(state); if (ret != 0) goto fail; diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index e3c4d486e1af..c65a212db77e 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -551,7 +551,8 @@ struct drm_crtc_funcs { * ->page_flip() operation is already pending the callback should return * -EBUSY. Pageflips on a disabled CRTC (either by setting a NULL mode * or just runtime disabled through DPMS respectively the new atomic - * "ACTIVE" state) should result in an -EINVAL error code. + * "ACTIVE" state) should result in an -EINVAL error code. Note that + * drm_atomic_helper_page_flip() checks this already for atomic drivers. */ int (*page_flip)(struct drm_crtc *crtc, struct drm_framebuffer *fb, -- cgit v1.2.3 From df7d678bea8ba8904bdb293c8e96aa9488f7dbee Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Mon, 4 Jan 2016 07:53:36 +0100 Subject: drm/docs: more leftovers from the big vtable documentation pile Another pile of vfuncs from the old gpu.tmpl xml documentation that I've forgotten to delete. I spotted a few more things to clarify/extend in the new kerneldoc while going through this once more. v2: Spelling fixes (Thierry). v3: More spelling fixes and use Thierry's proposal to clarify why drivers need to validate modes both in ->mode_fixup and ->mode_valid. Cc: Laurent Pinchart Cc: Thierry Reding Acked-by: Thierry Reding Signed-off-by: Daniel Vetter --- Documentation/DocBook/gpu.tmpl | 188 ------------------------------- include/drm/drm_modeset_helper_vtables.h | 44 +++++++- 2 files changed, 41 insertions(+), 191 deletions(-) (limited to 'include') diff --git a/Documentation/DocBook/gpu.tmpl b/Documentation/DocBook/gpu.tmpl index 225a246c5f53..faa5e0d4208d 100644 --- a/Documentation/DocBook/gpu.tmpl +++ b/Documentation/DocBook/gpu.tmpl @@ -1578,194 +1578,6 @@ void intel_crt_init(struct drm_device *dev) To use it, a driver must provide bottom functions for all of the three KMS entities. - - Legacy CRTC Helper Operations - - - bool (*mode_fixup)(struct drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode); - - Let CRTCs adjust the requested mode or reject it completely. This - operation returns true if the mode is accepted (possibly after being - adjusted) or false if it is rejected. - - - The mode_fixup operation should reject the - mode if it can't reasonably use it. The definition of "reasonable" - is currently fuzzy in this context. One possible behaviour would be - to set the adjusted mode to the panel timings when a fixed-mode - panel is used with hardware capable of scaling. Another behaviour - would be to accept any input mode and adjust it to the closest mode - supported by the hardware (FIXME: This needs to be clarified). - - - - int (*mode_set_base)(struct drm_crtc *crtc, int x, int y, - struct drm_framebuffer *old_fb) - - Move the CRTC on the current frame buffer (stored in - crtc->fb) to position (x,y). Any of the frame - buffer, x position or y position may have been modified. - - - This helper operation is optional. If not provided, the - drm_crtc_helper_set_config function will fall - back to the mode_set helper operation. - - - FIXME: Why are x and y passed as arguments, as they can be accessed - through crtc->x and - crtc->y? - - - - void (*prepare)(struct drm_crtc *crtc); - - Prepare the CRTC for mode setting. This operation is called after - validating the requested mode. Drivers use it to perform - device-specific operations required before setting the new mode. - - - - int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode, int x, int y, - struct drm_framebuffer *old_fb); - - Set a new mode, position and frame buffer. Depending on the device - requirements, the mode can be stored internally by the driver and - applied in the commit operation, or - programmed to the hardware immediately. - - - The mode_set operation returns 0 on success - or a negative error code if an error occurs. - - - - void (*commit)(struct drm_crtc *crtc); - - Commit a mode. This operation is called after setting the new mode. - Upon return the device must use the new mode and be fully - operational. - - - - - - Encoder Helper Operations - - - bool (*mode_fixup)(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode); - - Let encoders adjust the requested mode or reject it completely. This - operation returns true if the mode is accepted (possibly after being - adjusted) or false if it is rejected. See the - mode_fixup CRTC helper - operation for an explanation of the allowed adjustments. - - - - void (*prepare)(struct drm_encoder *encoder); - - Prepare the encoder for mode setting. This operation is called after - validating the requested mode. Drivers use it to perform - device-specific operations required before setting the new mode. - - - - void (*mode_set)(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode); - - Set a new mode. Depending on the device requirements, the mode can - be stored internally by the driver and applied in the - commit operation, or programmed to the - hardware immediately. - - - - void (*commit)(struct drm_encoder *encoder); - - Commit a mode. This operation is called after setting the new mode. - Upon return the device must use the new mode and be fully - operational. - - - - - - Connector Helper Operations - - - struct drm_encoder *(*best_encoder)(struct drm_connector *connector); - - Return a pointer to the best encoder for the connecter. Device that - map connectors to encoders 1:1 simply return the pointer to the - associated encoder. This operation is mandatory. - - - - int (*get_modes)(struct drm_connector *connector); - - Fill the connector's probed_modes list - by parsing EDID data with drm_add_edid_modes, - adding standard VESA DMT modes with drm_add_modes_noedid, - or calling drm_mode_probed_add directly for every - supported mode and return the number of modes it has detected. This - operation is mandatory. - - - Note that the caller function will automatically add standard VESA - DMT modes up to 1024x768 if the get_modes - helper operation returns no mode and if the connector status is - connector_status_connected. There is no need to call - drm_add_edid_modes manually in that case. - - - The vrefresh value is computed by - drm_helper_probe_single_connector_modes. - - - When parsing EDID data, drm_add_edid_modes fills the - connector display_info - width_mm and - height_mm fields. When creating modes - manually the get_modes helper operation must - set the display_info - width_mm and - height_mm fields if they haven't been set - already (for instance at initialization time when a fixed-size panel is - attached to the connector). The mode width_mm - and height_mm fields are only used internally - during EDID parsing and should not be set when creating modes manually. - - - - int (*mode_valid)(struct drm_connector *connector, - struct drm_display_mode *mode); - - Verify whether a mode is valid for the connector. Return MODE_OK for - supported modes and one of the enum drm_mode_status values (MODE_*) - for unsupported modes. This operation is optional. - - - As the mode rejection reason is currently not used beside for - immediately removing the unsupported mode, an implementation can - return MODE_BAD regardless of the exact reason why the mode is not - valid. - - - Note that the mode_valid helper operation is - only called for modes detected by the device, and - not for modes set by the user through the CRTC - set_config operation. - - - - Atomic Modeset Helper Functions Reference diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h index 29e0dc50031d..a126a0d7aed4 100644 --- a/include/drm/drm_modeset_helper_vtables.h +++ b/include/drm/drm_modeset_helper_vtables.h @@ -131,6 +131,20 @@ struct drm_crtc_helper_funcs { * Atomic drivers which need to inspect and adjust more state should * instead use the @atomic_check callback. * + * Also beware that neither core nor helpers filter modes before + * passing them to the driver: While the list of modes that is + * advertised to userspace is filtered using the connector's + * ->mode_valid() callback, neither the core nor the helpers do any + * filtering on modes passed in from userspace when setting a mode. It + * is therefore possible for userspace to pass in a mode that was + * previously filtered out using ->mode_valid() or add a custom mode + * that wasn't probed from EDID or similar to begin with. Even though + * this is an advanced feature and rarely used nowadays, some users rely + * on being able to specify modes manually so drivers must be prepared + * to deal with it. Specifically this means that all drivers need not + * only validate modes in ->mode_valid() but also in ->mode_fixup() to + * make sure invalid modes passed in from userspace are rejected. + * * RETURNS: * * True if an acceptable configuration is possible, false if the modeset @@ -188,7 +202,9 @@ struct drm_crtc_helper_funcs { * This callback is used by the legacy CRTC helpers to set a new * framebuffer and scanout position. It is optional and used as an * optimized fast-path instead of a full mode set operation with all the - * resulting flickering. Since it can't update other planes it's + * resulting flickering. If it is not present + * drm_crtc_helper_set_config() will fall back to a full modeset, using + * the ->mode_set() callback. Since it can't update other planes it's * incompatible with atomic modeset support. * * This callback is only used by the CRTC helpers and deprecated. @@ -439,6 +455,20 @@ struct drm_encoder_helper_funcs { * Atomic drivers which need to inspect and adjust more state should * instead use the @atomic_check callback. * + * Also beware that neither core nor helpers filter modes before + * passing them to the driver: While the list of modes that is + * advertised to userspace is filtered using the connector's + * ->mode_valid() callback, neither the core nor the helpers do any + * filtering on modes passed in from userspace when setting a mode. It + * is therefore possible for userspace to pass in a mode that was + * previously filtered out using ->mode_valid() or add a custom mode + * that wasn't probed from EDID or similar to begin with. Even though + * this is an advanced feature and rarely used nowadays, some users rely + * on being able to specify modes manually so drivers must be prepared + * to deal with it. Specifically this means that all drivers need not + * only validate modes in ->mode_valid() but also in ->mode_fixup() to + * make sure invalid modes passed in from userspace are rejected. + * * RETURNS: * * True if an acceptable configuration is possible, false if the modeset @@ -640,8 +670,16 @@ struct drm_connector_helper_funcs { * In this function drivers then parse the modes in the EDID and add * them by calling drm_add_edid_modes(). But connectors that driver a * fixed panel can also manually add specific modes using - * drm_mode_probed_add(). Finally drivers that support audio probably - * want to update the ELD data, too, using drm_edid_to_eld(). + * drm_mode_probed_add(). Drivers which manually add modes should also + * make sure that the @display_info, @width_mm and @height_mm fields of the + * struct #drm_connector are filled in. + * + * Virtual drivers that just want some standard VESA mode with a given + * resolution can call drm_add_modes_noedid(), and mark the preferred + * one using drm_set_preferred_mode(). + * + * Finally drivers that support audio probably want to update the ELD + * data, too, using drm_edid_to_eld(). * * This function is only called after the ->detect() hook has indicated * that a sink is connected and when the EDID isn't overridden through -- cgit v1.2.3 From 14de6c44d149c68df1800ded42bbab51485ef67a Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Mon, 4 Jan 2016 12:53:20 +0100 Subject: drm/atomic: Remove drm_atomic_connectors_for_crtc. Now that connector_mask is reliable there's no need for this function any more. Signed-off-by: Maarten Lankhorst Link: http://patchwork.freedesktop.org/patch/msgid/1451908400-25147-6-git-send-email-maarten.lankhorst@linux.intel.com Signed-off-by: Daniel Vetter --- drivers/gpu/drm/drm_atomic.c | 30 ------------------------------ drivers/gpu/drm/drm_atomic_helper.c | 10 ++++------ drivers/gpu/drm/vc4/vc4_crtc.c | 2 +- include/drm/drm_atomic.h | 4 ---- 4 files changed, 5 insertions(+), 41 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 6050c80f1dbc..3f74193885f1 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -1198,36 +1198,6 @@ drm_atomic_add_affected_planes(struct drm_atomic_state *state, } EXPORT_SYMBOL(drm_atomic_add_affected_planes); -/** - * drm_atomic_connectors_for_crtc - count number of connected outputs - * @state: atomic state - * @crtc: DRM crtc - * - * This function counts all connectors which will be connected to @crtc - * according to @state. Useful to recompute the enable state for @crtc. - */ -int -drm_atomic_connectors_for_crtc(struct drm_atomic_state *state, - struct drm_crtc *crtc) -{ - struct drm_connector *connector; - struct drm_connector_state *conn_state; - - int i, num_connected_connectors = 0; - - for_each_connector_in_state(state, connector, conn_state, i) { - if (conn_state->crtc == crtc) - num_connected_connectors++; - } - - DRM_DEBUG_ATOMIC("State %p has %i connectors for [CRTC:%d:%s]\n", - state, num_connected_connectors, - crtc->base.id, crtc->name); - - return num_connected_connectors; -} -EXPORT_SYMBOL(drm_atomic_connectors_for_crtc); - /** * drm_atomic_legacy_backoff - locking backoff for legacy ioctls * @state: atomic state diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 738104b68d26..57cccd68ca52 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -463,7 +463,8 @@ drm_atomic_helper_check_modeset(struct drm_device *dev, * crtc only changed its mode but has the same set of connectors. */ for_each_crtc_in_state(state, crtc, crtc_state, i) { - int num_connectors; + bool has_connectors = + !!crtc_state->connector_mask; /* * We must set ->active_changed after walking connectors for @@ -492,10 +493,7 @@ drm_atomic_helper_check_modeset(struct drm_device *dev, if (ret != 0) return ret; - num_connectors = drm_atomic_connectors_for_crtc(state, - crtc); - - if (crtc_state->enable != !!num_connectors) { + if (crtc_state->enable != has_connectors) { DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled/connectors mismatch\n", crtc->base.id, crtc->name); @@ -1754,7 +1752,7 @@ static int update_output_state(struct drm_atomic_state *state, if (crtc == set->crtc) continue; - if (!drm_atomic_connectors_for_crtc(state, crtc)) { + if (!crtc_state->connector_mask) { ret = drm_atomic_set_mode_prop_for_crtc(crtc_state, NULL); if (ret < 0) diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index 8d0d70e51ef2..018145e0b87d 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -328,7 +328,7 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc, /* The pixelvalve can only feed one encoder (and encoders are * 1:1 with connectors.) */ - if (drm_atomic_connectors_for_crtc(state->state, crtc) > 1) + if (hweight32(state->connector_mask) > 1) return -EINVAL; drm_atomic_crtc_state_for_each_plane(plane, state) { diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h index d8576ac55693..d3eaa5df187a 100644 --- a/include/drm/drm_atomic.h +++ b/include/drm/drm_atomic.h @@ -130,10 +130,6 @@ int __must_check drm_atomic_add_affected_planes(struct drm_atomic_state *state, struct drm_crtc *crtc); -int -drm_atomic_connectors_for_crtc(struct drm_atomic_state *state, - struct drm_crtc *crtc); - void drm_atomic_legacy_backoff(struct drm_atomic_state *state); void -- cgit v1.2.3 From ab74961810ba7935b5f9643da10daaa36690f48e Mon Sep 17 00:00:00 2001 From: Christian König Date: Mon, 11 Jan 2016 15:35:20 +0100 Subject: drm/ttm: add ttm_bo_move_to_lru_tail function v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This allows the drivers to move a BO to the end of the LRU without removing and adding it again. v2: Make it more robust, handle pinned and swapable BOs as well. Reviewed-by: Thomas Hellstrom Signed-off-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/ttm/ttm_bo.c | 21 +++++++++++++++++++++ include/drm/ttm/ttm_bo_api.h | 10 ++++++++++ 2 files changed, 31 insertions(+) (limited to 'include') diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 3ea9a01c960c..4cbf26555093 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -228,6 +228,27 @@ void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo) } EXPORT_SYMBOL(ttm_bo_del_sub_from_lru); +void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo) +{ + struct ttm_bo_device *bdev = bo->bdev; + struct ttm_mem_type_manager *man; + + lockdep_assert_held(&bo->resv->lock.base); + + if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) { + list_del_init(&bo->swap); + list_del_init(&bo->lru); + + } else { + if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) + list_move_tail(&bo->swap, &bo->glob->swap_lru); + + man = &bdev->man[bo->mem.mem_type]; + list_move_tail(&bo->lru, &man->lru); + } +} +EXPORT_SYMBOL(ttm_bo_move_to_lru_tail); + /* * Call bo->mutex locked. */ diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index c768ddfbe53c..afae2316bd43 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -383,6 +383,16 @@ extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo); */ extern int ttm_bo_del_from_lru(struct ttm_buffer_object *bo); +/** + * ttm_bo_move_to_lru_tail + * + * @bo: The buffer object. + * + * Move this BO to the tail of all lru lists used to lookup and reserve an + * object. This function must be called with struct ttm_bo_global::lru_lock + * held, and is used to make a BO less likely to be considered for eviction. + */ +extern void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo); /** * ttm_bo_lock_delayed_workqueue -- cgit v1.2.3