diff options
| author | Jani Nikula <jani.nikula@intel.com> | 2019-11-15 13:17:39 +0200 |
|---|---|---|
| committer | Jani Nikula <jani.nikula@intel.com> | 2019-11-15 13:17:39 +0200 |
| commit | e205ceeb253723f4e4bdce619844ed678ae48276 (patch) | |
| tree | b6f5e3b21f915320a86733504faf8206569b8057 /drivers | |
| parent | drm/i915/guc: Properly capture & release GuC interrupts on Gen11+ (diff) | |
| parent | Merge branch 'vmwgfx-next' of git://people.freedesktop.org/~thomash/linux int... (diff) | |
| download | linux-e205ceeb253723f4e4bdce619844ed678ae48276.tar.gz linux-e205ceeb253723f4e4bdce619844ed678ae48276.zip | |
Merge drm/drm-next into drm-intel-next-queued
Backmerge to get dfce90259d74 ("Backmerge i915 security patches from
commit 'ea0b163b13ff' into drm-next") and thus 100d46bd72ec ("Merge
Intel Gen8/Gen9 graphics fixes from Jon Bloomfield.").
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Diffstat (limited to 'drivers')
1232 files changed, 37977 insertions, 17323 deletions
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index 3b2525908dd8..a1a858ad4d18 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c @@ -905,8 +905,8 @@ void acpi_cppc_processor_exit(struct acpi_processor *pr) pcc_data[pcc_ss_id]->refcount--; if (!pcc_data[pcc_ss_id]->refcount) { pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel); - pcc_data[pcc_ss_id]->pcc_channel_acquired = 0; kfree(pcc_data[pcc_ss_id]); + pcc_data[pcc_ss_id] = NULL; } } } diff --git a/drivers/acpi/hmat/hmat.c b/drivers/acpi/hmat/hmat.c index 8f9a28a870b0..8b0de8a3c647 100644 --- a/drivers/acpi/hmat/hmat.c +++ b/drivers/acpi/hmat/hmat.c @@ -403,7 +403,7 @@ static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *heade pr_info("HMAT: Memory Flags:%04x Processor Domain:%d Memory Domain:%d\n", p->flags, p->processor_PD, p->memory_PD); - if (p->flags & ACPI_HMAT_MEMORY_PD_VALID) { + if (p->flags & ACPI_HMAT_MEMORY_PD_VALID && hmat_revision == 1) { target = find_mem_target(p->memory_PD); if (!target) { pr_debug("HMAT: Memory Domain missing from SRAT\n"); diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index 1413324982f0..14e68f202f81 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -1322,7 +1322,7 @@ static ssize_t scrub_show(struct device *dev, nfit_device_lock(dev); nd_desc = dev_get_drvdata(dev); if (!nd_desc) { - device_unlock(dev); + nfit_device_unlock(dev); return rc; } acpi_desc = to_acpi_desc(nd_desc); diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index 08da9c29f1e9..62114a03a51a 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c @@ -290,14 +290,13 @@ static int acpi_processor_notifier(struct notifier_block *nb, unsigned long event, void *data) { struct cpufreq_policy *policy = data; - int cpu = policy->cpu; if (event == CPUFREQ_CREATE_POLICY) { - acpi_thermal_cpufreq_init(cpu); - acpi_processor_ppc_init(cpu); + acpi_thermal_cpufreq_init(policy); + acpi_processor_ppc_init(policy); } else if (event == CPUFREQ_REMOVE_POLICY) { - acpi_processor_ppc_exit(cpu); - acpi_thermal_cpufreq_exit(cpu); + acpi_processor_ppc_exit(policy); + acpi_thermal_cpufreq_exit(policy); } return 0; diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index 2261713d1aec..5909e8fa4013 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c @@ -81,10 +81,10 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr) pr->performance_platform_limit = (int)ppc; if (ppc >= pr->performance->state_count || - unlikely(!dev_pm_qos_request_active(&pr->perflib_req))) + unlikely(!freq_qos_request_active(&pr->perflib_req))) return 0; - ret = dev_pm_qos_update_request(&pr->perflib_req, + ret = freq_qos_update_request(&pr->perflib_req, pr->performance->states[ppc].core_frequency * 1000); if (ret < 0) { pr_warn("Failed to update perflib freq constraint: CPU%d (%d)\n", @@ -157,26 +157,36 @@ void acpi_processor_ignore_ppc_init(void) ignore_ppc = 0; } -void acpi_processor_ppc_init(int cpu) +void acpi_processor_ppc_init(struct cpufreq_policy *policy) { - struct acpi_processor *pr = per_cpu(processors, cpu); - int ret; + unsigned int cpu; - ret = dev_pm_qos_add_request(get_cpu_device(cpu), - &pr->perflib_req, DEV_PM_QOS_MAX_FREQUENCY, - INT_MAX); - if (ret < 0) { - pr_err("Failed to add freq constraint for CPU%d (%d)\n", cpu, - ret); - return; + for_each_cpu(cpu, policy->related_cpus) { + struct acpi_processor *pr = per_cpu(processors, cpu); + int ret; + + if (!pr) + continue; + + ret = freq_qos_add_request(&policy->constraints, + &pr->perflib_req, + FREQ_QOS_MAX, INT_MAX); + if (ret < 0) + pr_err("Failed to add freq constraint for CPU%d (%d)\n", + cpu, ret); } } -void acpi_processor_ppc_exit(int cpu) +void acpi_processor_ppc_exit(struct cpufreq_policy *policy) { - struct acpi_processor *pr = per_cpu(processors, cpu); + unsigned int cpu; - dev_pm_qos_remove_request(&pr->perflib_req); + for_each_cpu(cpu, policy->related_cpus) { + struct acpi_processor *pr = per_cpu(processors, cpu); + + if (pr) + freq_qos_remove_request(&pr->perflib_req); + } } static int acpi_processor_get_performance_control(struct acpi_processor *pr) diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c index ec2638f1df4f..41feb88ee92d 100644 --- a/drivers/acpi/processor_thermal.c +++ b/drivers/acpi/processor_thermal.c @@ -105,7 +105,7 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state) pr = per_cpu(processors, i); - if (unlikely(!dev_pm_qos_request_active(&pr->thermal_req))) + if (unlikely(!freq_qos_request_active(&pr->thermal_req))) continue; policy = cpufreq_cpu_get(i); @@ -116,7 +116,7 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state) cpufreq_cpu_put(policy); - ret = dev_pm_qos_update_request(&pr->thermal_req, max_freq); + ret = freq_qos_update_request(&pr->thermal_req, max_freq); if (ret < 0) { pr_warn("Failed to update thermal freq constraint: CPU%d (%d)\n", pr->id, ret); @@ -125,26 +125,36 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state) return 0; } -void acpi_thermal_cpufreq_init(int cpu) +void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy) { - struct acpi_processor *pr = per_cpu(processors, cpu); - int ret; - - ret = dev_pm_qos_add_request(get_cpu_device(cpu), - &pr->thermal_req, DEV_PM_QOS_MAX_FREQUENCY, - INT_MAX); - if (ret < 0) { - pr_err("Failed to add freq constraint for CPU%d (%d)\n", cpu, - ret); - return; + unsigned int cpu; + + for_each_cpu(cpu, policy->related_cpus) { + struct acpi_processor *pr = per_cpu(processors, cpu); + int ret; + + if (!pr) + continue; + + ret = freq_qos_add_request(&policy->constraints, + &pr->thermal_req, + FREQ_QOS_MAX, INT_MAX); + if (ret < 0) + pr_err("Failed to add freq constraint for CPU%d (%d)\n", + cpu, ret); } } -void acpi_thermal_cpufreq_exit(int cpu) +void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy) { - struct acpi_processor *pr = per_cpu(processors, cpu); + unsigned int cpu; + + for_each_cpu(cpu, policy->related_cpus) { + struct acpi_processor *pr = per_cpu(processors, policy->cpu); - dev_pm_qos_remove_request(&pr->thermal_req); + if (pr) + freq_qos_remove_request(&pr->thermal_req); + } } #else /* ! CONFIG_CPU_FREQ */ static int cpufreq_get_max_state(unsigned int cpu) diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 9fa77d72ef27..2af937a8b1c5 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -362,19 +362,6 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = { }, }, /* - * https://bugzilla.kernel.org/show_bug.cgi?id=196907 - * Some Dell XPS13 9360 cannot do suspend-to-idle using the Low Power - * S0 Idle firmware interface. - */ - { - .callback = init_default_s3, - .ident = "Dell XPS13 9360", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"), - }, - }, - /* * ThinkPad X1 Tablet(2016) cannot do suspend-to-idle using * the Low Power S0 Idle firmware interface (see * https://bugzilla.kernel.org/show_bug.cgi?id=199057). diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c index f39f075abff9..fe1523664816 100644 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c @@ -409,9 +409,11 @@ static int amba_device_try_add(struct amba_device *dev, struct resource *parent) */ rstc = of_reset_control_array_get_optional_shared(dev->dev.of_node); if (IS_ERR(rstc)) { - if (PTR_ERR(rstc) != -EPROBE_DEFER) - dev_err(&dev->dev, "Can't get amba reset!\n"); - return PTR_ERR(rstc); + ret = PTR_ERR(rstc); + if (ret != -EPROBE_DEFER) + dev_err(&dev->dev, "can't get reset: %d\n", + ret); + goto err_reset; } reset_control_deassert(rstc); reset_control_put(rstc); @@ -472,6 +474,12 @@ static int amba_device_try_add(struct amba_device *dev, struct resource *parent) release_resource(&dev->res); err_out: return ret; + + err_reset: + amba_put_disable_pclk(dev); + iounmap(tmp); + dev_pm_domain_detach(&dev->dev, true); + goto err_release; } /* diff --git a/drivers/android/binder.c b/drivers/android/binder.c index c0a491277aca..265d9dd46a5e 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -57,6 +57,7 @@ #include <linux/sched/signal.h> #include <linux/sched/mm.h> #include <linux/seq_file.h> +#include <linux/string.h> #include <linux/uaccess.h> #include <linux/pid_namespace.h> #include <linux/security.h> @@ -66,6 +67,7 @@ #include <linux/task_work.h> #include <uapi/linux/android/binder.h> +#include <uapi/linux/android/binderfs.h> #include <asm/cacheflush.h> @@ -95,10 +97,6 @@ DEFINE_SHOW_ATTRIBUTE(proc); #define SZ_1K 0x400 #endif -#ifndef SZ_4M -#define SZ_4M 0x400000 -#endif - #define FORBIDDEN_MMAP_FLAGS (VM_WRITE) enum { @@ -2876,7 +2874,7 @@ static void binder_transaction(struct binder_proc *proc, e->target_handle = tr->target.handle; e->data_size = tr->data_size; e->offsets_size = tr->offsets_size; - e->context_name = proc->context->name; + strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME); if (reply) { binder_inner_proc_lock(proc); @@ -5175,9 +5173,6 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) if (proc->tsk != current->group_leader) return -EINVAL; - if ((vma->vm_end - vma->vm_start) > SZ_4M) - vma->vm_end = vma->vm_start + SZ_4M; - binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", __func__, proc->pid, vma->vm_start, vma->vm_end, diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 6d79a1b0d446..eb76a823fbb2 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -22,6 +22,7 @@ #include <asm/cacheflush.h> #include <linux/uaccess.h> #include <linux/highmem.h> +#include <linux/sizes.h> #include "binder_alloc.h" #include "binder_trace.h" @@ -156,7 +157,7 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked( } /** - * binder_alloc_buffer_lookup() - get buffer given user ptr + * binder_alloc_prepare_to_free() - get buffer given user ptr * @alloc: binder_alloc for this proc * @user_ptr: User pointer to buffer data * @@ -689,7 +690,9 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, alloc->buffer = (void __user *)vma->vm_start; mutex_unlock(&binder_alloc_mmap_lock); - alloc->pages = kcalloc((vma->vm_end - vma->vm_start) / PAGE_SIZE, + alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start, + SZ_4M); + alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE, sizeof(alloc->pages[0]), GFP_KERNEL); if (alloc->pages == NULL) { @@ -697,7 +700,6 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, failure_string = "alloc page array"; goto err_alloc_pages_failed; } - alloc->buffer_size = vma->vm_end - vma->vm_start; buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); if (!buffer) { diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h index bd47f7f72075..ae991097d14d 100644 --- a/drivers/android/binder_internal.h +++ b/drivers/android/binder_internal.h @@ -130,7 +130,7 @@ struct binder_transaction_log_entry { int return_error_line; uint32_t return_error; uint32_t return_error_param; - const char *context_name; + char context_name[BINDERFS_MAX_NAME + 1]; }; struct binder_transaction_log { diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index dd92faf197d5..05c2b32dcc4d 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -1600,7 +1600,9 @@ static void ahci_intel_pcs_quirk(struct pci_dev *pdev, struct ahci_host_priv *hp */ if (!id || id->vendor != PCI_VENDOR_ID_INTEL) return; - if (((enum board_ids) id->driver_data) < board_ahci_pcs7) + + /* Skip applying the quirk on Denverton and beyond */ + if (((enum board_ids) id->driver_data) >= board_ahci_pcs7) return; /* diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c index e742780950de..8befce036af8 100644 --- a/drivers/ata/libahci_platform.c +++ b/drivers/ata/libahci_platform.c @@ -153,17 +153,13 @@ int ahci_platform_enable_regulators(struct ahci_host_priv *hpriv) { int rc, i; - if (hpriv->ahci_regulator) { - rc = regulator_enable(hpriv->ahci_regulator); - if (rc) - return rc; - } + rc = regulator_enable(hpriv->ahci_regulator); + if (rc) + return rc; - if (hpriv->phy_regulator) { - rc = regulator_enable(hpriv->phy_regulator); - if (rc) - goto disable_ahci_pwrs; - } + rc = regulator_enable(hpriv->phy_regulator); + if (rc) + goto disable_ahci_pwrs; for (i = 0; i < hpriv->nports; i++) { if (!hpriv->target_pwrs[i]) @@ -181,11 +177,9 @@ disable_target_pwrs: if (hpriv->target_pwrs[i]) regulator_disable(hpriv->target_pwrs[i]); - if (hpriv->phy_regulator) - regulator_disable(hpriv->phy_regulator); + regulator_disable(hpriv->phy_regulator); disable_ahci_pwrs: - if (hpriv->ahci_regulator) - regulator_disable(hpriv->ahci_regulator); + regulator_disable(hpriv->ahci_regulator); return rc; } EXPORT_SYMBOL_GPL(ahci_platform_enable_regulators); @@ -207,10 +201,8 @@ void ahci_platform_disable_regulators(struct ahci_host_priv *hpriv) regulator_disable(hpriv->target_pwrs[i]); } - if (hpriv->ahci_regulator) - regulator_disable(hpriv->ahci_regulator); - if (hpriv->phy_regulator) - regulator_disable(hpriv->phy_regulator); + regulator_disable(hpriv->ahci_regulator); + regulator_disable(hpriv->phy_regulator); } EXPORT_SYMBOL_GPL(ahci_platform_disable_regulators); /** @@ -359,7 +351,7 @@ static int ahci_platform_get_regulator(struct ahci_host_priv *hpriv, u32 port, struct regulator *target_pwr; int rc = 0; - target_pwr = regulator_get_optional(dev, "target"); + target_pwr = regulator_get(dev, "target"); if (!IS_ERR(target_pwr)) hpriv->target_pwrs[port] = target_pwr; @@ -436,16 +428,14 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev, hpriv->clks[i] = clk; } - hpriv->ahci_regulator = devm_regulator_get_optional(dev, "ahci"); + hpriv->ahci_regulator = devm_regulator_get(dev, "ahci"); if (IS_ERR(hpriv->ahci_regulator)) { rc = PTR_ERR(hpriv->ahci_regulator); - if (rc == -EPROBE_DEFER) + if (rc != 0) goto err_out; - rc = 0; - hpriv->ahci_regulator = NULL; } - hpriv->phy_regulator = devm_regulator_get_optional(dev, "phy"); + hpriv->phy_regulator = devm_regulator_get(dev, "phy"); if (IS_ERR(hpriv->phy_regulator)) { rc = PTR_ERR(hpriv->phy_regulator); if (rc == -EPROBE_DEFER) diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 76d0f9de767b..58e09ffe8b9c 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -4791,27 +4791,6 @@ void ata_scsi_hotplug(struct work_struct *work) return; } - /* - * XXX - UGLY HACK - * - * The block layer suspend/resume path is fundamentally broken due - * to freezable kthreads and workqueue and may deadlock if a block - * device gets removed while resume is in progress. I don't know - * what the solution is short of removing freezable kthreads and - * workqueues altogether. - * - * The following is an ugly hack to avoid kicking off device - * removal while freezer is active. This is a joke but does avoid - * this particular deadlock scenario. - * - * https://bugzilla.kernel.org/show_bug.cgi?id=62801 - * http://marc.info/?l=linux-kernel&m=138695698516487 - */ -#ifdef CONFIG_FREEZER - while (pm_freezing) - msleep(10); -#endif - DPRINTK("ENTER\n"); mutex_lock(&ap->scsi_scan_mutex); diff --git a/drivers/base/core.c b/drivers/base/core.c index 2db62d98e395..7bd9cd366d41 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -9,6 +9,7 @@ */ #include <linux/acpi.h> +#include <linux/cpufreq.h> #include <linux/device.h> #include <linux/err.h> #include <linux/fwnode.h> @@ -3179,6 +3180,8 @@ void device_shutdown(void) wait_for_device_probe(); device_block_probing(); + cpufreq_suspend(); + spin_lock(&devices_kset->list_lock); /* * Walk the devices list backward, shutting down each in turn. diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 6bea4f3f8040..55907c27075b 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -540,6 +540,9 @@ static ssize_t soft_offline_page_store(struct device *dev, pfn >>= PAGE_SHIFT; if (!pfn_valid(pfn)) return -ENXIO; + /* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */ + if (!pfn_to_online_page(pfn)) + return -EIO; ret = soft_offline_page(pfn_to_page(pfn), 0); return ret == 0 ? count : ret; } diff --git a/drivers/base/platform.c b/drivers/base/platform.c index b6c6c7d97d5b..b230beb6ccb4 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -241,12 +241,8 @@ struct resource *platform_get_resource_byname(struct platform_device *dev, } EXPORT_SYMBOL_GPL(platform_get_resource_byname); -/** - * platform_get_irq_byname - get an IRQ for a device by name - * @dev: platform device - * @name: IRQ name - */ -int platform_get_irq_byname(struct platform_device *dev, const char *name) +static int __platform_get_irq_byname(struct platform_device *dev, + const char *name) { struct resource *r; @@ -262,12 +258,48 @@ int platform_get_irq_byname(struct platform_device *dev, const char *name) if (r) return r->start; - dev_err(&dev->dev, "IRQ %s not found\n", name); return -ENXIO; } + +/** + * platform_get_irq_byname - get an IRQ for a device by name + * @dev: platform device + * @name: IRQ name + * + * Get an IRQ like platform_get_irq(), but then by name rather then by index. + * + * Return: IRQ number on success, negative error number on failure. + */ +int platform_get_irq_byname(struct platform_device *dev, const char *name) +{ + int ret; + + ret = __platform_get_irq_byname(dev, name); + if (ret < 0 && ret != -EPROBE_DEFER) + dev_err(&dev->dev, "IRQ %s not found\n", name); + + return ret; +} EXPORT_SYMBOL_GPL(platform_get_irq_byname); /** + * platform_get_irq_byname_optional - get an optional IRQ for a device by name + * @dev: platform device + * @name: IRQ name + * + * Get an optional IRQ by name like platform_get_irq_byname(). Except that it + * does not print an error message if an IRQ can not be obtained. + * + * Return: IRQ number on success, negative error number on failure. + */ +int platform_get_irq_byname_optional(struct platform_device *dev, + const char *name) +{ + return __platform_get_irq_byname(dev, name); +} +EXPORT_SYMBOL_GPL(platform_get_irq_byname_optional); + +/** * platform_add_devices - add a numbers of platform devices * @devs: array of platform devices to add * @num: number of platform devices in array diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index 6c90fd7e2ff8..350dcafd751f 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c @@ -115,20 +115,10 @@ s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type) spin_lock_irqsave(&dev->power.lock, flags); - switch (type) { - case DEV_PM_QOS_RESUME_LATENCY: + if (type == DEV_PM_QOS_RESUME_LATENCY) { ret = IS_ERR_OR_NULL(qos) ? PM_QOS_RESUME_LATENCY_NO_CONSTRAINT : pm_qos_read_value(&qos->resume_latency); - break; - case DEV_PM_QOS_MIN_FREQUENCY: - ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE - : pm_qos_read_value(&qos->min_frequency); - break; - case DEV_PM_QOS_MAX_FREQUENCY: - ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE - : pm_qos_read_value(&qos->max_frequency); - break; - default: + } else { WARN_ON(1); ret = 0; } @@ -169,14 +159,6 @@ static int apply_constraint(struct dev_pm_qos_request *req, req->dev->power.set_latency_tolerance(req->dev, value); } break; - case DEV_PM_QOS_MIN_FREQUENCY: - ret = pm_qos_update_target(&qos->min_frequency, - &req->data.pnode, action, value); - break; - case DEV_PM_QOS_MAX_FREQUENCY: - ret = pm_qos_update_target(&qos->max_frequency, - &req->data.pnode, action, value); - break; case DEV_PM_QOS_FLAGS: ret = pm_qos_update_flags(&qos->flags, &req->data.flr, action, value); @@ -227,24 +209,6 @@ static int dev_pm_qos_constraints_allocate(struct device *dev) c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; c->type = PM_QOS_MIN; - c = &qos->min_frequency; - plist_head_init(&c->list); - c->target_value = PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE; - c->default_value = PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE; - c->no_constraint_value = PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE; - c->type = PM_QOS_MAX; - c->notifiers = ++n; - BLOCKING_INIT_NOTIFIER_HEAD(n); - - c = &qos->max_frequency; - plist_head_init(&c->list); - c->target_value = PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE; - c->default_value = PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE; - c->no_constraint_value = PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE; - c->type = PM_QOS_MIN; - c->notifiers = ++n; - BLOCKING_INIT_NOTIFIER_HEAD(n); - INIT_LIST_HEAD(&qos->flags.list); spin_lock_irq(&dev->power.lock); @@ -305,18 +269,6 @@ void dev_pm_qos_constraints_destroy(struct device *dev) memset(req, 0, sizeof(*req)); } - c = &qos->min_frequency; - plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) { - apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE); - memset(req, 0, sizeof(*req)); - } - - c = &qos->max_frequency; - plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) { - apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE); - memset(req, 0, sizeof(*req)); - } - f = &qos->flags; list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) { apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); @@ -428,8 +380,6 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req, switch(req->type) { case DEV_PM_QOS_RESUME_LATENCY: case DEV_PM_QOS_LATENCY_TOLERANCE: - case DEV_PM_QOS_MIN_FREQUENCY: - case DEV_PM_QOS_MAX_FREQUENCY: curr_value = req->data.pnode.prio; break; case DEV_PM_QOS_FLAGS: @@ -557,14 +507,6 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier, ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers, notifier); break; - case DEV_PM_QOS_MIN_FREQUENCY: - ret = blocking_notifier_chain_register(dev->power.qos->min_frequency.notifiers, - notifier); - break; - case DEV_PM_QOS_MAX_FREQUENCY: - ret = blocking_notifier_chain_register(dev->power.qos->max_frequency.notifiers, - notifier); - break; default: WARN_ON(1); ret = -EINVAL; @@ -604,14 +546,6 @@ int dev_pm_qos_remove_notifier(struct device *dev, ret = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers, notifier); break; - case DEV_PM_QOS_MIN_FREQUENCY: - ret = blocking_notifier_chain_unregister(dev->power.qos->min_frequency.notifiers, - notifier); - break; - case DEV_PM_QOS_MAX_FREQUENCY: - ret = blocking_notifier_chain_unregister(dev->power.qos->max_frequency.notifiers, - notifier); - break; default: WARN_ON(1); ret = -EINVAL; diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 5b248763a672..a18155cdce41 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -786,7 +786,6 @@ int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cm if (nc->tentative && connection->agreed_pro_version < 92) { rcu_read_unlock(); - mutex_unlock(&sock->mutex); drbd_err(connection, "--dry-run is not supported by peer"); return -EOPNOTSUPP; } diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index ac07e8c94c79..a94ee45440b3 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -248,8 +248,8 @@ static void nbd_put(struct nbd_device *nbd) if (refcount_dec_and_mutex_lock(&nbd->refs, &nbd_index_mutex)) { idr_remove(&nbd_index_idr, nbd->index); - mutex_unlock(&nbd_index_mutex); nbd_dev_remove(nbd); + mutex_unlock(&nbd_index_mutex); } } @@ -385,17 +385,16 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req, struct nbd_device *nbd = cmd->nbd; struct nbd_config *config; + if (!mutex_trylock(&cmd->lock)) + return BLK_EH_RESET_TIMER; + if (!refcount_inc_not_zero(&nbd->config_refs)) { cmd->status = BLK_STS_TIMEOUT; + mutex_unlock(&cmd->lock); goto done; } config = nbd->config; - if (!mutex_trylock(&cmd->lock)) { - nbd_config_put(nbd); - return BLK_EH_RESET_TIMER; - } - if (config->num_connections > 1) { dev_err_ratelimited(nbd_to_dev(nbd), "Connection timed out, retrying (%d/%d alive)\n", @@ -711,6 +710,12 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) ret = -ENOENT; goto out; } + if (cmd->status != BLK_STS_OK) { + dev_err(disk_to_dev(nbd->disk), "Command already handled %p\n", + req); + ret = -ENOENT; + goto out; + } if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) { dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n", req); @@ -792,7 +797,10 @@ static bool nbd_clear_req(struct request *req, void *data, bool reserved) { struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req); + mutex_lock(&cmd->lock); cmd->status = BLK_STS_IOERR; + mutex_unlock(&cmd->lock); + blk_mq_complete_request(req); return true; } @@ -972,6 +980,25 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx, return ret; } +static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd, + int *err) +{ + struct socket *sock; + + *err = 0; + sock = sockfd_lookup(fd, err); + if (!sock) + return NULL; + + if (sock->ops->shutdown == sock_no_shutdown) { + dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n"); + *err = -EINVAL; + return NULL; + } + + return sock; +} + static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg, bool netlink) { @@ -981,7 +1008,7 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg, struct nbd_sock *nsock; int err; - sock = sockfd_lookup(arg, &err); + sock = nbd_get_socket(nbd, arg, &err); if (!sock) return err; @@ -1033,7 +1060,7 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg) int i; int err; - sock = sockfd_lookup(arg, &err); + sock = nbd_get_socket(nbd, arg, &err); if (!sock) return err; diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c index eabc116832a7..3d7fdea872f8 100644 --- a/drivers/block/null_blk_zoned.c +++ b/drivers/block/null_blk_zoned.c @@ -142,8 +142,7 @@ static blk_status_t null_zone_reset(struct nullb_cmd *cmd, sector_t sector) zone->wp = zone->start; break; default: - cmd->error = BLK_STS_NOTSUPP; - break; + return BLK_STS_NOTSUPP; } return BLK_STS_OK; } diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 7c4350c0fb77..39136675dae5 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -6639,10 +6639,13 @@ static int rbd_add_acquire_lock(struct rbd_device *rbd_dev) queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0); ret = wait_for_completion_killable_timeout(&rbd_dev->acquire_wait, ceph_timeout_jiffies(rbd_dev->opts->lock_timeout)); - if (ret > 0) + if (ret > 0) { ret = rbd_dev->acquire_err; - else if (!ret) - ret = -ETIMEDOUT; + } else { + cancel_delayed_work_sync(&rbd_dev->lock_dwork); + if (!ret) + ret = -ETIMEDOUT; + } if (ret) { rbd_warn(rbd_dev, "failed to acquire exclusive lock: %ld", ret); diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index d58a359a6622..4285e75e52c3 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -413,13 +413,14 @@ static void reset_bdev(struct zram *zram) static ssize_t backing_dev_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct file *file; struct zram *zram = dev_to_zram(dev); - struct file *file = zram->backing_dev; char *p; ssize_t ret; down_read(&zram->init_lock); - if (!zram->backing_dev) { + file = zram->backing_dev; + if (!file) { memcpy(buf, "none\n", 5); up_read(&zram->init_lock); return 5; diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index ad50efb470aa..2b6670daf7fc 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -74,6 +74,7 @@ static const char * const clock_names[SYSC_MAX_CLOCKS] = { * @clk_disable_quirk: module specific clock disable quirk * @reset_done_quirk: module specific reset done quirk * @module_enable_quirk: module specific enable quirk + * @module_disable_quirk: module specific disable quirk */ struct sysc { struct device *dev; @@ -100,6 +101,7 @@ struct sysc { void (*clk_disable_quirk)(struct sysc *sysc); void (*reset_done_quirk)(struct sysc *sysc); void (*module_enable_quirk)(struct sysc *sysc); + void (*module_disable_quirk)(struct sysc *sysc); }; static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np, @@ -959,6 +961,9 @@ static int sysc_disable_module(struct device *dev) if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV) return 0; + if (ddata->module_disable_quirk) + ddata->module_disable_quirk(ddata); + regbits = ddata->cap->regbits; reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]); @@ -1248,6 +1253,9 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_MODULE_QUIRK_SGX), SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0, SYSC_MODULE_QUIRK_WDT), + /* Watchdog on am3 and am4 */ + SYSC_QUIRK("wdt", 0x44e35000, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0, + SYSC_MODULE_QUIRK_WDT | SYSC_QUIRK_SWSUP_SIDLE), #ifdef DEBUG SYSC_QUIRK("adc", 0, 0, 0x10, -1, 0x47300001, 0xffffffff, 0), @@ -1440,14 +1448,14 @@ static void sysc_reset_done_quirk_wdt(struct sysc *ddata) !(val & 0x10), 100, MAX_MODULE_SOFTRESET_WAIT); if (error) - dev_warn(ddata->dev, "wdt disable spr failed\n"); + dev_warn(ddata->dev, "wdt disable step1 failed\n"); - sysc_write(ddata, wps, 0x5555); + sysc_write(ddata, spr, 0x5555); error = readl_poll_timeout(ddata->module_va + wps, val, !(val & 0x10), 100, MAX_MODULE_SOFTRESET_WAIT); if (error) - dev_warn(ddata->dev, "wdt disable wps failed\n"); + dev_warn(ddata->dev, "wdt disable step2 failed\n"); } static void sysc_init_module_quirks(struct sysc *ddata) @@ -1471,8 +1479,10 @@ static void sysc_init_module_quirks(struct sysc *ddata) if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_SGX) ddata->module_enable_quirk = sysc_module_enable_quirk_sgx; - if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_WDT) + if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_WDT) { ddata->reset_done_quirk = sysc_reset_done_quirk_wdt; + ddata->module_disable_quirk = sysc_reset_done_quirk_wdt; + } } static int sysc_clockdomain_init(struct sysc *ddata) diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c index 87083b3a2769..37c22667e831 100644 --- a/drivers/clk/at91/clk-main.c +++ b/drivers/clk/at91/clk-main.c @@ -297,7 +297,10 @@ static int clk_main_probe_frequency(struct regmap *regmap) regmap_read(regmap, AT91_CKGR_MCFR, &mcfr); if (mcfr & AT91_PMC_MAINRDY) return 0; - usleep_range(MAINF_LOOP_MIN_WAIT, MAINF_LOOP_MAX_WAIT); + if (system_state < SYSTEM_RUNNING) + udelay(MAINF_LOOP_MIN_WAIT); + else + usleep_range(MAINF_LOOP_MIN_WAIT, MAINF_LOOP_MAX_WAIT); } while (time_before(prep_time, timeout)); return -ETIMEDOUT; diff --git a/drivers/clk/at91/sam9x60.c b/drivers/clk/at91/sam9x60.c index 9790ddfa5b3c..86238d5ecb4d 100644 --- a/drivers/clk/at91/sam9x60.c +++ b/drivers/clk/at91/sam9x60.c @@ -43,6 +43,7 @@ static const struct clk_pll_characteristics upll_characteristics = { }; static const struct clk_programmable_layout sam9x60_programmable_layout = { + .pres_mask = 0xff, .pres_shift = 8, .css_mask = 0x1f, .have_slck_mck = 0, diff --git a/drivers/clk/at91/sckc.c b/drivers/clk/at91/sckc.c index 9bfe9a28294a..fac0ca56d42d 100644 --- a/drivers/clk/at91/sckc.c +++ b/drivers/clk/at91/sckc.c @@ -76,7 +76,10 @@ static int clk_slow_osc_prepare(struct clk_hw *hw) writel(tmp | osc->bits->cr_osc32en, sckcr); - usleep_range(osc->startup_usec, osc->startup_usec + 1); + if (system_state < SYSTEM_RUNNING) + udelay(osc->startup_usec); + else + usleep_range(osc->startup_usec, osc->startup_usec + 1); return 0; } @@ -187,7 +190,10 @@ static int clk_slow_rc_osc_prepare(struct clk_hw *hw) writel(readl(sckcr) | osc->bits->cr_rcen, sckcr); - usleep_range(osc->startup_usec, osc->startup_usec + 1); + if (system_state < SYSTEM_RUNNING) + udelay(osc->startup_usec); + else + usleep_range(osc->startup_usec, osc->startup_usec + 1); return 0; } @@ -288,7 +294,10 @@ static int clk_sam9x5_slow_set_parent(struct clk_hw *hw, u8 index) writel(tmp, sckcr); - usleep_range(SLOWCK_SW_TIME_USEC, SLOWCK_SW_TIME_USEC + 1); + if (system_state < SYSTEM_RUNNING) + udelay(SLOWCK_SW_TIME_USEC); + else + usleep_range(SLOWCK_SW_TIME_USEC, SLOWCK_SW_TIME_USEC + 1); return 0; } @@ -533,7 +542,10 @@ static int clk_sama5d4_slow_osc_prepare(struct clk_hw *hw) return 0; } - usleep_range(osc->startup_usec, osc->startup_usec + 1); + if (system_state < SYSTEM_RUNNING) + udelay(osc->startup_usec); + else + usleep_range(osc->startup_usec, osc->startup_usec + 1); osc->prepared = true; return 0; diff --git a/drivers/clk/clk-ast2600.c b/drivers/clk/clk-ast2600.c index 1c1bb39bb04e..b1318e6b655b 100644 --- a/drivers/clk/clk-ast2600.c +++ b/drivers/clk/clk-ast2600.c @@ -266,10 +266,11 @@ static int aspeed_g6_clk_enable(struct clk_hw *hw) /* Enable clock */ if (gate->flags & CLK_GATE_SET_TO_DISABLE) { - regmap_write(gate->map, get_clock_reg(gate), clk); - } else { - /* Use set to clear register */ + /* Clock is clear to enable, so use set to clear register */ regmap_write(gate->map, get_clock_reg(gate) + 0x04, clk); + } else { + /* Clock is set to enable, so use write to set register */ + regmap_write(gate->map, get_clock_reg(gate), clk); } if (gate->reset_idx >= 0) { diff --git a/drivers/clk/imx/clk-imx8mm.c b/drivers/clk/imx/clk-imx8mm.c index 067ab876911d..172589e94f60 100644 --- a/drivers/clk/imx/clk-imx8mm.c +++ b/drivers/clk/imx/clk-imx8mm.c @@ -638,7 +638,7 @@ static int imx8mm_clocks_probe(struct platform_device *pdev) clks[IMX8MM_CLK_A53_DIV], clks[IMX8MM_CLK_A53_SRC], clks[IMX8MM_ARM_PLL_OUT], - clks[IMX8MM_CLK_24M]); + clks[IMX8MM_SYS_PLL1_800M]); imx_check_clocks(clks, ARRAY_SIZE(clks)); diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c index 47a4b44ba3cb..58b5acee3830 100644 --- a/drivers/clk/imx/clk-imx8mn.c +++ b/drivers/clk/imx/clk-imx8mn.c @@ -610,7 +610,7 @@ static int imx8mn_clocks_probe(struct platform_device *pdev) clks[IMX8MN_CLK_A53_DIV], clks[IMX8MN_CLK_A53_SRC], clks[IMX8MN_ARM_PLL_OUT], - clks[IMX8MN_CLK_24M]); + clks[IMX8MN_SYS_PLL1_800M]); imx_check_clocks(clks, ARRAY_SIZE(clks)); diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c index ea4c791f106d..b3af61cc6fb9 100644 --- a/drivers/clk/meson/g12a.c +++ b/drivers/clk/meson/g12a.c @@ -343,6 +343,7 @@ static struct clk_regmap g12a_cpu_clk_premux0 = { .offset = HHI_SYS_CPU_CLK_CNTL0, .mask = 0x3, .shift = 0, + .flags = CLK_MUX_ROUND_CLOSEST, }, .hw.init = &(struct clk_init_data){ .name = "cpu_clk_dyn0_sel", @@ -353,8 +354,7 @@ static struct clk_regmap g12a_cpu_clk_premux0 = { { .hw = &g12a_fclk_div3.hw }, }, .num_parents = 3, - /* This sub-tree is used a parking clock */ - .flags = CLK_SET_RATE_NO_REPARENT, + .flags = CLK_SET_RATE_PARENT, }, }; @@ -410,6 +410,7 @@ static struct clk_regmap g12a_cpu_clk_postmux0 = { .offset = HHI_SYS_CPU_CLK_CNTL0, .mask = 0x1, .shift = 2, + .flags = CLK_MUX_ROUND_CLOSEST, }, .hw.init = &(struct clk_init_data){ .name = "cpu_clk_dyn0", @@ -466,6 +467,7 @@ static struct clk_regmap g12a_cpu_clk_dyn = { .offset = HHI_SYS_CPU_CLK_CNTL0, .mask = 0x1, .shift = 10, + .flags = CLK_MUX_ROUND_CLOSEST, }, .hw.init = &(struct clk_init_data){ .name = "cpu_clk_dyn", @@ -485,6 +487,7 @@ static struct clk_regmap g12a_cpu_clk = { .offset = HHI_SYS_CPU_CLK_CNTL0, .mask = 0x1, .shift = 11, + .flags = CLK_MUX_ROUND_CLOSEST, }, .hw.init = &(struct clk_init_data){ .name = "cpu_clk", @@ -504,6 +507,7 @@ static struct clk_regmap g12b_cpu_clk = { .offset = HHI_SYS_CPU_CLK_CNTL0, .mask = 0x1, .shift = 11, + .flags = CLK_MUX_ROUND_CLOSEST, }, .hw.init = &(struct clk_init_data){ .name = "cpu_clk", @@ -523,6 +527,7 @@ static struct clk_regmap g12b_cpub_clk_premux0 = { .offset = HHI_SYS_CPUB_CLK_CNTL, .mask = 0x3, .shift = 0, + .flags = CLK_MUX_ROUND_CLOSEST, }, .hw.init = &(struct clk_init_data){ .name = "cpub_clk_dyn0_sel", @@ -533,6 +538,7 @@ static struct clk_regmap g12b_cpub_clk_premux0 = { { .hw = &g12a_fclk_div3.hw }, }, .num_parents = 3, + .flags = CLK_SET_RATE_PARENT, }, }; @@ -567,6 +573,7 @@ static struct clk_regmap g12b_cpub_clk_postmux0 = { .offset = HHI_SYS_CPUB_CLK_CNTL, .mask = 0x1, .shift = 2, + .flags = CLK_MUX_ROUND_CLOSEST, }, .hw.init = &(struct clk_init_data){ .name = "cpub_clk_dyn0", @@ -644,6 +651,7 @@ static struct clk_regmap g12b_cpub_clk_dyn = { .offset = HHI_SYS_CPUB_CLK_CNTL, .mask = 0x1, .shift = 10, + .flags = CLK_MUX_ROUND_CLOSEST, }, .hw.init = &(struct clk_init_data){ .name = "cpub_clk_dyn", @@ -663,6 +671,7 @@ static struct clk_regmap g12b_cpub_clk = { .offset = HHI_SYS_CPUB_CLK_CNTL, .mask = 0x1, .shift = 11, + .flags = CLK_MUX_ROUND_CLOSEST, }, .hw.init = &(struct clk_init_data){ .name = "cpub_clk", diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c index 7cfb998eeb3e..1f9c056e684c 100644 --- a/drivers/clk/meson/gxbb.c +++ b/drivers/clk/meson/gxbb.c @@ -935,6 +935,7 @@ static struct clk_regmap gxbb_sar_adc_clk_div = { &gxbb_sar_adc_clk_sel.hw }, .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, }, }; diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c index 7670cc596c74..31466cd1842f 100644 --- a/drivers/clk/samsung/clk-exynos5420.c +++ b/drivers/clk/samsung/clk-exynos5420.c @@ -165,12 +165,18 @@ static const unsigned long exynos5x_clk_regs[] __initconst = { GATE_BUS_CPU, GATE_SCLK_CPU, CLKOUT_CMU_CPU, + CPLL_CON0, + DPLL_CON0, EPLL_CON0, EPLL_CON1, EPLL_CON2, RPLL_CON0, RPLL_CON1, RPLL_CON2, + IPLL_CON0, + SPLL_CON0, + VPLL_CON0, + MPLL_CON0, SRC_TOP0, SRC_TOP1, SRC_TOP2, @@ -1172,8 +1178,6 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = { GATE(CLK_SCLK_ISP_SENSOR2, "sclk_isp_sensor2", "dout_isp_sensor2", GATE_TOP_SCLK_ISP, 12, CLK_SET_RATE_PARENT, 0), - GATE(CLK_G3D, "g3d", "mout_user_aclk_g3d", GATE_IP_G3D, 9, 0, 0), - /* CDREX */ GATE(CLK_CLKM_PHY0, "clkm_phy0", "dout_sclk_cdrex", GATE_BUS_CDREX0, 0, 0, 0), @@ -1248,6 +1252,15 @@ static struct exynos5_subcmu_reg_dump exynos5x_gsc_suspend_regs[] = { { DIV2_RATIO0, 0, 0x30 }, /* DIV dout_gscl_blk_300 */ }; +static const struct samsung_gate_clock exynos5x_g3d_gate_clks[] __initconst = { + GATE(CLK_G3D, "g3d", "mout_user_aclk_g3d", GATE_IP_G3D, 9, 0, 0), +}; + +static struct exynos5_subcmu_reg_dump exynos5x_g3d_suspend_regs[] = { + { GATE_IP_G3D, 0x3ff, 0x3ff }, /* G3D gates */ + { SRC_TOP5, 0, BIT(16) }, /* MUX mout_user_aclk_g3d */ +}; + static const struct samsung_div_clock exynos5x_mfc_div_clks[] __initconst = { DIV(0, "dout_mfc_blk", "mout_user_aclk333", DIV4_RATIO, 0, 2), }; @@ -1320,6 +1333,14 @@ static const struct exynos5_subcmu_info exynos5x_gsc_subcmu = { .pd_name = "GSC", }; +static const struct exynos5_subcmu_info exynos5x_g3d_subcmu = { + .gate_clks = exynos5x_g3d_gate_clks, + .nr_gate_clks = ARRAY_SIZE(exynos5x_g3d_gate_clks), + .suspend_regs = exynos5x_g3d_suspend_regs, + .nr_suspend_regs = ARRAY_SIZE(exynos5x_g3d_suspend_regs), + .pd_name = "G3D", +}; + static const struct exynos5_subcmu_info exynos5x_mfc_subcmu = { .div_clks = exynos5x_mfc_div_clks, .nr_div_clks = ARRAY_SIZE(exynos5x_mfc_div_clks), @@ -1351,6 +1372,7 @@ static const struct exynos5_subcmu_info exynos5800_mau_subcmu = { static const struct exynos5_subcmu_info *exynos5x_subcmus[] = { &exynos5x_disp_subcmu, &exynos5x_gsc_subcmu, + &exynos5x_g3d_subcmu, &exynos5x_mfc_subcmu, &exynos5x_mscl_subcmu, }; @@ -1358,6 +1380,7 @@ static const struct exynos5_subcmu_info *exynos5x_subcmus[] = { static const struct exynos5_subcmu_info *exynos5800_subcmus[] = { &exynos5x_disp_subcmu, &exynos5x_gsc_subcmu, + &exynos5x_g3d_subcmu, &exynos5x_mfc_subcmu, &exynos5x_mscl_subcmu, &exynos5800_mau_subcmu, diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c index 7824c2ba3d8e..4b1aa9382ad2 100644 --- a/drivers/clk/samsung/clk-exynos5433.c +++ b/drivers/clk/samsung/clk-exynos5433.c @@ -13,6 +13,7 @@ #include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> +#include <linux/slab.h> #include <dt-bindings/clock/exynos5433.h> @@ -5584,6 +5585,8 @@ static int __init exynos5433_cmu_probe(struct platform_device *pdev) data->clk_save = samsung_clk_alloc_reg_dump(info->clk_regs, info->nr_clk_regs); + if (!data->clk_save) + return -ENOMEM; data->nr_clk_save = info->nr_clk_regs; data->clk_suspend = info->suspend_regs; data->nr_clk_suspend = info->nr_suspend_regs; @@ -5592,12 +5595,19 @@ static int __init exynos5433_cmu_probe(struct platform_device *pdev) if (data->nr_pclks > 0) { data->pclks = devm_kcalloc(dev, sizeof(struct clk *), data->nr_pclks, GFP_KERNEL); - + if (!data->pclks) { + kfree(data->clk_save); + return -ENOMEM; + } for (i = 0; i < data->nr_pclks; i++) { struct clk *clk = of_clk_get(dev->of_node, i); - if (IS_ERR(clk)) + if (IS_ERR(clk)) { + kfree(data->clk_save); + while (--i >= 0) + clk_put(data->pclks[i]); return PTR_ERR(clk); + } data->pclks[i] = clk; } } diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80.c b/drivers/clk/sunxi-ng/ccu-sun9i-a80.c index dcac1391767f..ef29582676f6 100644 --- a/drivers/clk/sunxi-ng/ccu-sun9i-a80.c +++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80.c @@ -1224,7 +1224,7 @@ static int sun9i_a80_ccu_probe(struct platform_device *pdev) /* Enforce d1 = 0, d2 = 0 for Audio PLL */ val = readl(reg + SUN9I_A80_PLL_AUDIO_REG); - val &= (BIT(16) & BIT(18)); + val &= ~(BIT(16) | BIT(18)); writel(val, reg + SUN9I_A80_PLL_AUDIO_REG); /* Enforce P = 1 for both CPU cluster PLLs */ diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c index d3a43381a792..27201fd26e44 100644 --- a/drivers/clk/sunxi/clk-sunxi.c +++ b/drivers/clk/sunxi/clk-sunxi.c @@ -1080,8 +1080,8 @@ static struct clk ** __init sunxi_divs_clk_setup(struct device_node *node, rate_hw, rate_ops, gate_hw, &clk_gate_ops, clkflags | - data->div[i].critical ? - CLK_IS_CRITICAL : 0); + (data->div[i].critical ? + CLK_IS_CRITICAL : 0)); WARN_ON(IS_ERR(clk_data->clks[i])); } diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c index a01ca9395179..f65e16c4f3c4 100644 --- a/drivers/clk/ti/clk-dra7-atl.c +++ b/drivers/clk/ti/clk-dra7-atl.c @@ -174,7 +174,6 @@ static void __init of_dra7_atl_clock_setup(struct device_node *node) struct clk_init_data init = { NULL }; const char **parent_names = NULL; struct clk *clk; - int ret; clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL); if (!clk_hw) { @@ -207,11 +206,6 @@ static void __init of_dra7_atl_clock_setup(struct device_node *node) clk = ti_clk_register(NULL, &clk_hw->hw, node->name); if (!IS_ERR(clk)) { - ret = ti_clk_add_alias(NULL, clk, node->name); - if (ret) { - clk_unregister(clk); - goto cleanup; - } of_clk_add_provider(node, of_clk_src_simple_get, clk); kfree(parent_names); return; diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c index 975995eea15c..b0c0690a5a12 100644 --- a/drivers/clk/ti/clkctrl.c +++ b/drivers/clk/ti/clkctrl.c @@ -100,11 +100,12 @@ static bool _omap4_is_timeout(union omap4_timeout *time, u32 timeout) * can be from a timer that requires pm_runtime access, which * will eventually bring us here with timekeeping_suspended, * during both suspend entry and resume paths. This happens - * at least on am43xx platform. + * at least on am43xx platform. Account for flakeyness + * with udelay() by multiplying the timeout value by 2. */ if (unlikely(_early_timeout || timekeeping_suspended)) { if (time->cycles++ < timeout) { - udelay(1); + udelay(1 * 2); return false; } } else { diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c index 354b27d14a19..62812f80b5cc 100644 --- a/drivers/clocksource/sh_mtu2.c +++ b/drivers/clocksource/sh_mtu2.c @@ -328,12 +328,13 @@ static int sh_mtu2_register(struct sh_mtu2_channel *ch, const char *name) return 0; } +static const unsigned int sh_mtu2_channel_offsets[] = { + 0x300, 0x380, 0x000, +}; + static int sh_mtu2_setup_channel(struct sh_mtu2_channel *ch, unsigned int index, struct sh_mtu2_device *mtu) { - static const unsigned int channel_offsets[] = { - 0x300, 0x380, 0x000, - }; char name[6]; int irq; int ret; @@ -356,7 +357,7 @@ static int sh_mtu2_setup_channel(struct sh_mtu2_channel *ch, unsigned int index, return ret; } - ch->base = mtu->mapbase + channel_offsets[index]; + ch->base = mtu->mapbase + sh_mtu2_channel_offsets[index]; ch->index = index; return sh_mtu2_register(ch, dev_name(&mtu->pdev->dev)); @@ -408,7 +409,12 @@ static int sh_mtu2_setup(struct sh_mtu2_device *mtu, } /* Allocate and setup the channels. */ - mtu->num_channels = 3; + ret = platform_irq_count(pdev); + if (ret < 0) + goto err_unmap; + + mtu->num_channels = min_t(unsigned int, ret, + ARRAY_SIZE(sh_mtu2_channel_offsets)); mtu->channels = kcalloc(mtu->num_channels, sizeof(*mtu->channels), GFP_KERNEL); diff --git a/drivers/clocksource/timer-mediatek.c b/drivers/clocksource/timer-mediatek.c index a562f491b0f8..9318edcd8963 100644 --- a/drivers/clocksource/timer-mediatek.c +++ b/drivers/clocksource/timer-mediatek.c @@ -268,15 +268,12 @@ static int __init mtk_syst_init(struct device_node *node) ret = timer_of_init(node, &to); if (ret) - goto err; + return ret; clockevents_config_and_register(&to.clkevt, timer_of_rate(&to), TIMER_SYNC_TICKS, 0xffffffff); return 0; -err: - timer_of_cleanup(&to); - return ret; } static int __init mtk_gpt_init(struct device_node *node) @@ -293,7 +290,7 @@ static int __init mtk_gpt_init(struct device_node *node) ret = timer_of_init(node, &to); if (ret) - goto err; + return ret; /* Configure clock source */ mtk_gpt_setup(&to, TIMER_CLK_SRC, GPT_CTRL_OP_FREERUN); @@ -311,9 +308,6 @@ static int __init mtk_gpt_init(struct device_node *node) mtk_gpt_enable_irq(&to, TIMER_CLK_EVT); return 0; -err: - timer_of_cleanup(&to); - return ret; } TIMER_OF_DECLARE(mtk_mt6577, "mediatek,mt6577-timer", mtk_gpt_init); TIMER_OF_DECLARE(mtk_mt6765, "mediatek,mt6765-timer", mtk_syst_init); diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index c52d6fa32aac..48a224a6b178 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -720,7 +720,7 @@ static ssize_t store_##file_name \ if (ret != 1) \ return -EINVAL; \ \ - ret = dev_pm_qos_update_request(policy->object##_freq_req, val);\ + ret = freq_qos_update_request(policy->object##_freq_req, val);\ return ret >= 0 ? count : ret; \ } @@ -1202,19 +1202,21 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu) goto err_free_real_cpus; } + freq_constraints_init(&policy->constraints); + policy->nb_min.notifier_call = cpufreq_notifier_min; policy->nb_max.notifier_call = cpufreq_notifier_max; - ret = dev_pm_qos_add_notifier(dev, &policy->nb_min, - DEV_PM_QOS_MIN_FREQUENCY); + ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MIN, + &policy->nb_min); if (ret) { dev_err(dev, "Failed to register MIN QoS notifier: %d (%*pbl)\n", ret, cpumask_pr_args(policy->cpus)); goto err_kobj_remove; } - ret = dev_pm_qos_add_notifier(dev, &policy->nb_max, - DEV_PM_QOS_MAX_FREQUENCY); + ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MAX, + &policy->nb_max); if (ret) { dev_err(dev, "Failed to register MAX QoS notifier: %d (%*pbl)\n", ret, cpumask_pr_args(policy->cpus)); @@ -1232,8 +1234,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu) return policy; err_min_qos_notifier: - dev_pm_qos_remove_notifier(dev, &policy->nb_min, - DEV_PM_QOS_MIN_FREQUENCY); + freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN, + &policy->nb_min); err_kobj_remove: cpufreq_policy_put_kobj(policy); err_free_real_cpus: @@ -1250,7 +1252,6 @@ err_free_policy: static void cpufreq_policy_free(struct cpufreq_policy *policy) { - struct device *dev = get_cpu_device(policy->cpu); unsigned long flags; int cpu; @@ -1262,10 +1263,13 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy) per_cpu(cpufreq_cpu_data, cpu) = NULL; write_unlock_irqrestore(&cpufreq_driver_lock, flags); - dev_pm_qos_remove_notifier(dev, &policy->nb_max, - DEV_PM_QOS_MAX_FREQUENCY); - dev_pm_qos_remove_notifier(dev, &policy->nb_min, - DEV_PM_QOS_MIN_FREQUENCY); + freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MAX, + &policy->nb_max); + freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN, + &policy->nb_min); + + /* Cancel any pending policy->update work before freeing the policy. */ + cancel_work_sync(&policy->update); if (policy->max_freq_req) { /* @@ -1274,10 +1278,10 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy) */ blocking_notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_REMOVE_POLICY, policy); - dev_pm_qos_remove_request(policy->max_freq_req); + freq_qos_remove_request(policy->max_freq_req); } - dev_pm_qos_remove_request(policy->min_freq_req); + freq_qos_remove_request(policy->min_freq_req); kfree(policy->min_freq_req); cpufreq_policy_put_kobj(policy); @@ -1357,8 +1361,6 @@ static int cpufreq_online(unsigned int cpu) cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); if (new_policy) { - struct device *dev = get_cpu_device(cpu); - for_each_cpu(j, policy->related_cpus) { per_cpu(cpufreq_cpu_data, j) = policy; add_cpu_dev_symlink(policy, j); @@ -1369,36 +1371,31 @@ static int cpufreq_online(unsigned int cpu) if (!policy->min_freq_req) goto out_destroy_policy; - ret = dev_pm_qos_add_request(dev, policy->min_freq_req, - DEV_PM_QOS_MIN_FREQUENCY, - policy->min); + ret = freq_qos_add_request(&policy->constraints, + policy->min_freq_req, FREQ_QOS_MIN, + policy->min); if (ret < 0) { /* - * So we don't call dev_pm_qos_remove_request() for an + * So we don't call freq_qos_remove_request() for an * uninitialized request. */ kfree(policy->min_freq_req); policy->min_freq_req = NULL; - - dev_err(dev, "Failed to add min-freq constraint (%d)\n", - ret); goto out_destroy_policy; } /* * This must be initialized right here to avoid calling - * dev_pm_qos_remove_request() on uninitialized request in case + * freq_qos_remove_request() on uninitialized request in case * of errors. */ policy->max_freq_req = policy->min_freq_req + 1; - ret = dev_pm_qos_add_request(dev, policy->max_freq_req, - DEV_PM_QOS_MAX_FREQUENCY, - policy->max); + ret = freq_qos_add_request(&policy->constraints, + policy->max_freq_req, FREQ_QOS_MAX, + policy->max); if (ret < 0) { policy->max_freq_req = NULL; - dev_err(dev, "Failed to add max-freq constraint (%d)\n", - ret); goto out_destroy_policy; } @@ -2374,7 +2371,6 @@ int cpufreq_set_policy(struct cpufreq_policy *policy, struct cpufreq_policy *new_policy) { struct cpufreq_governor *old_gov; - struct device *cpu_dev = get_cpu_device(policy->cpu); int ret; pr_debug("setting new policy for CPU %u: %u - %u kHz\n", @@ -2386,8 +2382,8 @@ int cpufreq_set_policy(struct cpufreq_policy *policy, * PM QoS framework collects all the requests from users and provide us * the final aggregated value here. */ - new_policy->min = dev_pm_qos_read_value(cpu_dev, DEV_PM_QOS_MIN_FREQUENCY); - new_policy->max = dev_pm_qos_read_value(cpu_dev, DEV_PM_QOS_MAX_FREQUENCY); + new_policy->min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN); + new_policy->max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX); /* verify the cpu speed can be set within this limit */ ret = cpufreq_driver->verify(new_policy); @@ -2518,7 +2514,7 @@ static int cpufreq_boost_set_sw(int state) break; } - ret = dev_pm_qos_update_request(policy->max_freq_req, policy->max); + ret = freq_qos_update_request(policy->max_freq_req, policy->max); if (ret < 0) break; } @@ -2737,14 +2733,6 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver) } EXPORT_SYMBOL_GPL(cpufreq_unregister_driver); -/* - * Stop cpufreq at shutdown to make sure it isn't holding any locks - * or mutexes when secondary CPUs are halted. - */ -static struct syscore_ops cpufreq_syscore_ops = { - .shutdown = cpufreq_suspend, -}; - struct kobject *cpufreq_global_kobject; EXPORT_SYMBOL(cpufreq_global_kobject); @@ -2756,8 +2744,6 @@ static int __init cpufreq_core_init(void) cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj); BUG_ON(!cpufreq_global_kobject); - register_syscore_ops(&cpufreq_syscore_ops); - return 0; } module_param(off, int, 0444); diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 9f02de9a1b47..8ab31702cf6a 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -847,11 +847,9 @@ static void intel_pstate_hwp_force_min_perf(int cpu) value |= HWP_MAX_PERF(min_perf); value |= HWP_MIN_PERF(min_perf); - /* Set EPP/EPB to min */ + /* Set EPP to min */ if (boot_cpu_has(X86_FEATURE_HWP_EPP)) value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE); - else - intel_pstate_set_epb(cpu, HWP_EPP_BALANCE_POWERSAVE); wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); } @@ -1088,10 +1086,10 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, static struct cpufreq_driver intel_pstate; -static void update_qos_request(enum dev_pm_qos_req_type type) +static void update_qos_request(enum freq_qos_req_type type) { int max_state, turbo_max, freq, i, perf_pct; - struct dev_pm_qos_request *req; + struct freq_qos_request *req; struct cpufreq_policy *policy; for_each_possible_cpu(i) { @@ -1112,7 +1110,7 @@ static void update_qos_request(enum dev_pm_qos_req_type type) else turbo_max = cpu->pstate.turbo_pstate; - if (type == DEV_PM_QOS_MIN_FREQUENCY) { + if (type == FREQ_QOS_MIN) { perf_pct = global.min_perf_pct; } else { req++; @@ -1122,7 +1120,7 @@ static void update_qos_request(enum dev_pm_qos_req_type type) freq = DIV_ROUND_UP(turbo_max * perf_pct, 100); freq *= cpu->pstate.scaling; - if (dev_pm_qos_update_request(req, freq) < 0) + if (freq_qos_update_request(req, freq) < 0) pr_warn("Failed to update freq constraint: CPU%d\n", i); } } @@ -1153,7 +1151,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b, if (intel_pstate_driver == &intel_pstate) intel_pstate_update_policies(); else - update_qos_request(DEV_PM_QOS_MAX_FREQUENCY); + update_qos_request(FREQ_QOS_MAX); mutex_unlock(&intel_pstate_driver_lock); @@ -1187,7 +1185,7 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b, if (intel_pstate_driver == &intel_pstate) intel_pstate_update_policies(); else - update_qos_request(DEV_PM_QOS_MIN_FREQUENCY); + update_qos_request(FREQ_QOS_MIN); mutex_unlock(&intel_pstate_driver_lock); @@ -2381,7 +2379,7 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy, static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy) { int max_state, turbo_max, min_freq, max_freq, ret; - struct dev_pm_qos_request *req; + struct freq_qos_request *req; struct cpudata *cpu; struct device *dev; @@ -2416,15 +2414,15 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy) max_freq = DIV_ROUND_UP(turbo_max * global.max_perf_pct, 100); max_freq *= cpu->pstate.scaling; - ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_MIN_FREQUENCY, - min_freq); + ret = freq_qos_add_request(&policy->constraints, req, FREQ_QOS_MIN, + min_freq); if (ret < 0) { dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret); goto free_req; } - ret = dev_pm_qos_add_request(dev, req + 1, DEV_PM_QOS_MAX_FREQUENCY, - max_freq); + ret = freq_qos_add_request(&policy->constraints, req + 1, FREQ_QOS_MAX, + max_freq); if (ret < 0) { dev_err(dev, "Failed to add max-freq constraint (%d)\n", ret); goto remove_min_req; @@ -2435,7 +2433,7 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy) return 0; remove_min_req: - dev_pm_qos_remove_request(req); + freq_qos_remove_request(req); free_req: kfree(req); pstate_exit: @@ -2446,12 +2444,12 @@ pstate_exit: static int intel_cpufreq_cpu_exit(struct cpufreq_policy *policy) { - struct dev_pm_qos_request *req; + struct freq_qos_request *req; req = policy->driver_data; - dev_pm_qos_remove_request(req + 1); - dev_pm_qos_remove_request(req); + freq_qos_remove_request(req + 1); + freq_qos_remove_request(req); kfree(req); return intel_pstate_cpu_exit(policy); diff --git a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c index bc9dd30395c4..037fe23bc6ed 100644 --- a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c +++ b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c @@ -65,7 +65,7 @@ EXPORT_SYMBOL_GPL(cbe_cpufreq_set_pmode_pmi); static void cbe_cpufreq_handle_pmi(pmi_message_t pmi_msg) { struct cpufreq_policy *policy; - struct dev_pm_qos_request *req; + struct freq_qos_request *req; u8 node, slow_mode; int cpu, ret; @@ -86,7 +86,7 @@ static void cbe_cpufreq_handle_pmi(pmi_message_t pmi_msg) req = policy->driver_data; - ret = dev_pm_qos_update_request(req, + ret = freq_qos_update_request(req, policy->freq_table[slow_mode].frequency); if (ret < 0) pr_warn("Failed to update freq constraint: %d\n", ret); @@ -103,7 +103,7 @@ static struct pmi_handler cbe_pmi_handler = { void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy) { - struct dev_pm_qos_request *req; + struct freq_qos_request *req; int ret; if (!cbe_cpufreq_has_pmi) @@ -113,9 +113,8 @@ void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy) if (!req) return; - ret = dev_pm_qos_add_request(get_cpu_device(policy->cpu), req, - DEV_PM_QOS_MAX_FREQUENCY, - policy->freq_table[0].frequency); + ret = freq_qos_add_request(&policy->constraints, req, FREQ_QOS_MAX, + policy->freq_table[0].frequency); if (ret < 0) { pr_err("Failed to add freq constraint (%d)\n", ret); kfree(req); @@ -128,10 +127,10 @@ EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_policy_init); void cbe_cpufreq_pmi_policy_exit(struct cpufreq_policy *policy) { - struct dev_pm_qos_request *req = policy->driver_data; + struct freq_qos_request *req = policy->driver_data; if (cbe_cpufreq_has_pmi) { - dev_pm_qos_remove_request(req); + freq_qos_remove_request(req); kfree(req); } } diff --git a/drivers/cpuidle/cpuidle-haltpoll.c b/drivers/cpuidle/cpuidle-haltpoll.c index 932390b028f1..b0ce9bc78113 100644 --- a/drivers/cpuidle/cpuidle-haltpoll.c +++ b/drivers/cpuidle/cpuidle-haltpoll.c @@ -95,6 +95,10 @@ static int __init haltpoll_init(void) int ret; struct cpuidle_driver *drv = &haltpoll_driver; + /* Do not load haltpoll if idle= is passed */ + if (boot_option_idle_override != IDLE_NO_OVERRIDE) + return -ENODEV; + cpuidle_poll_state_init(drv); if (!kvm_para_available() || diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c index 774d991d7cca..aca75237bbcf 100644 --- a/drivers/crypto/chelsio/chtls/chtls_cm.c +++ b/drivers/crypto/chelsio/chtls/chtls_cm.c @@ -1297,7 +1297,7 @@ static void make_established(struct sock *sk, u32 snd_isn, unsigned int opt) tp->write_seq = snd_isn; tp->snd_nxt = snd_isn; tp->snd_una = snd_isn; - inet_sk(sk)->inet_id = tp->write_seq ^ jiffies; + inet_sk(sk)->inet_id = prandom_u32(); assign_rxopt(sk, opt); if (tp->rcv_wnd > (RCV_BUFSIZ_M << 10)) diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c index 0891ab829b1b..98bc5a4cd5e7 100644 --- a/drivers/crypto/chelsio/chtls/chtls_io.c +++ b/drivers/crypto/chelsio/chtls/chtls_io.c @@ -1702,7 +1702,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, return peekmsg(sk, msg, len, nonblock, flags); if (sk_can_busy_loop(sk) && - skb_queue_empty(&sk->sk_receive_queue) && + skb_queue_empty_lockless(&sk->sk_receive_queue) && sk->sk_state == TCP_ESTABLISHED) sk_busy_loop(sk, nonblock); diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 433d91d710e4..d377b4ca66bf 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -45,10 +45,10 @@ static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen) size_t ret = 0; dmabuf = dentry->d_fsdata; - mutex_lock(&dmabuf->lock); + dma_resv_lock(dmabuf->resv, NULL); if (dmabuf->name) ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN); - mutex_unlock(&dmabuf->lock); + dma_resv_unlock(dmabuf->resv); return dynamic_dname(dentry, buffer, buflen, "/%s:%s", dentry->d_name.name, ret > 0 ? name : ""); @@ -334,7 +334,7 @@ static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf) if (IS_ERR(name)) return PTR_ERR(name); - mutex_lock(&dmabuf->lock); + dma_resv_lock(dmabuf->resv, NULL); if (!list_empty(&dmabuf->attachments)) { ret = -EBUSY; kfree(name); @@ -344,7 +344,7 @@ static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf) dmabuf->name = name; out_unlock: - mutex_unlock(&dmabuf->lock); + dma_resv_unlock(dmabuf->resv); return ret; } @@ -403,10 +403,10 @@ static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file) /* Don't count the temporary reference taken inside procfs seq_show */ seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1); seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name); - mutex_lock(&dmabuf->lock); + dma_resv_lock(dmabuf->resv, NULL); if (dmabuf->name) seq_printf(m, "name:\t%s\n", dmabuf->name); - mutex_unlock(&dmabuf->lock); + dma_resv_unlock(dmabuf->resv); } static const struct file_operations dma_buf_fops = { @@ -525,6 +525,10 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) return ERR_PTR(-EINVAL); } + if (WARN_ON(exp_info->ops->cache_sgt_mapping && + exp_info->ops->dynamic_mapping)) + return ERR_PTR(-EINVAL); + if (!try_module_get(exp_info->owner)) return ERR_PTR(-ENOENT); @@ -645,10 +649,11 @@ void dma_buf_put(struct dma_buf *dmabuf) EXPORT_SYMBOL_GPL(dma_buf_put); /** - * dma_buf_attach - Add the device to dma_buf's attachments list; optionally, + * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list; optionally, * calls attach() of dma_buf_ops to allow device-specific attach functionality - * @dmabuf: [in] buffer to attach device to. - * @dev: [in] device to be attached. + * @dmabuf: [in] buffer to attach device to. + * @dev: [in] device to be attached. + * @dynamic_mapping: [in] calling convention for map/unmap * * Returns struct dma_buf_attachment pointer for this attachment. Attachments * must be cleaned up by calling dma_buf_detach(). @@ -662,8 +667,9 @@ EXPORT_SYMBOL_GPL(dma_buf_put); * accessible to @dev, and cannot be moved to a more suitable place. This is * indicated with the error code -EBUSY. */ -struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, - struct device *dev) +struct dma_buf_attachment * +dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev, + bool dynamic_mapping) { struct dma_buf_attachment *attach; int ret; @@ -677,25 +683,69 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, attach->dev = dev; attach->dmabuf = dmabuf; - - mutex_lock(&dmabuf->lock); + attach->dynamic_mapping = dynamic_mapping; if (dmabuf->ops->attach) { ret = dmabuf->ops->attach(dmabuf, attach); if (ret) goto err_attach; } + dma_resv_lock(dmabuf->resv, NULL); list_add(&attach->node, &dmabuf->attachments); + dma_resv_unlock(dmabuf->resv); - mutex_unlock(&dmabuf->lock); + /* When either the importer or the exporter can't handle dynamic + * mappings we cache the mapping here to avoid issues with the + * reservation object lock. + */ + if (dma_buf_attachment_is_dynamic(attach) != + dma_buf_is_dynamic(dmabuf)) { + struct sg_table *sgt; + + if (dma_buf_is_dynamic(attach->dmabuf)) + dma_resv_lock(attach->dmabuf->resv, NULL); + + sgt = dmabuf->ops->map_dma_buf(attach, DMA_BIDIRECTIONAL); + if (!sgt) + sgt = ERR_PTR(-ENOMEM); + if (IS_ERR(sgt)) { + ret = PTR_ERR(sgt); + goto err_unlock; + } + if (dma_buf_is_dynamic(attach->dmabuf)) + dma_resv_unlock(attach->dmabuf->resv); + attach->sgt = sgt; + attach->dir = DMA_BIDIRECTIONAL; + } return attach; err_attach: kfree(attach); - mutex_unlock(&dmabuf->lock); + return ERR_PTR(ret); + +err_unlock: + if (dma_buf_is_dynamic(attach->dmabuf)) + dma_resv_unlock(attach->dmabuf->resv); + + dma_buf_detach(dmabuf, attach); return ERR_PTR(ret); } +EXPORT_SYMBOL_GPL(dma_buf_dynamic_attach); + +/** + * dma_buf_attach - Wrapper for dma_buf_dynamic_attach + * @dmabuf: [in] buffer to attach device to. + * @dev: [in] device to be attached. + * + * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static + * mapping. + */ +struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, + struct device *dev) +{ + return dma_buf_dynamic_attach(dmabuf, dev, false); +} EXPORT_SYMBOL_GPL(dma_buf_attach); /** @@ -711,15 +761,22 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) if (WARN_ON(!dmabuf || !attach)) return; - if (attach->sgt) + if (attach->sgt) { + if (dma_buf_is_dynamic(attach->dmabuf)) + dma_resv_lock(attach->dmabuf->resv, NULL); + dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir); - mutex_lock(&dmabuf->lock); + if (dma_buf_is_dynamic(attach->dmabuf)) + dma_resv_unlock(attach->dmabuf->resv); + } + + dma_resv_lock(dmabuf->resv, NULL); list_del(&attach->node); + dma_resv_unlock(dmabuf->resv); if (dmabuf->ops->detach) dmabuf->ops->detach(dmabuf, attach); - mutex_unlock(&dmabuf->lock); kfree(attach); } EXPORT_SYMBOL_GPL(dma_buf_detach); @@ -749,6 +806,9 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, if (WARN_ON(!attach || !attach->dmabuf)) return ERR_PTR(-EINVAL); + if (dma_buf_attachment_is_dynamic(attach)) + dma_resv_assert_held(attach->dmabuf->resv); + if (attach->sgt) { /* * Two mappings with different directions for the same @@ -761,6 +821,9 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, return attach->sgt; } + if (dma_buf_is_dynamic(attach->dmabuf)) + dma_resv_assert_held(attach->dmabuf->resv); + sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction); if (!sg_table) sg_table = ERR_PTR(-ENOMEM); @@ -793,9 +856,15 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, if (WARN_ON(!attach || !attach->dmabuf || !sg_table)) return; + if (dma_buf_attachment_is_dynamic(attach)) + dma_resv_assert_held(attach->dmabuf->resv); + if (attach->sgt == sg_table) return; + if (dma_buf_is_dynamic(attach->dmabuf)) + dma_resv_assert_held(attach->dmabuf->resv); + attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction); } EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment); @@ -1171,13 +1240,10 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused) "size", "flags", "mode", "count", "ino"); list_for_each_entry(buf_obj, &db_list.head, list_node) { - ret = mutex_lock_interruptible(&buf_obj->lock); - if (ret) { - seq_puts(s, - "\tERROR locking buffer object: skipping\n"); - continue; - } + ret = dma_resv_lock_interruptible(buf_obj->resv, NULL); + if (ret) + goto error_unlock; seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n", buf_obj->size, @@ -1223,19 +1289,23 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused) seq_printf(s, "\t%s\n", dev_name(attach_obj->dev)); attach_count++; } + dma_resv_unlock(buf_obj->resv); seq_printf(s, "Total %d devices attached\n\n", attach_count); count++; size += buf_obj->size; - mutex_unlock(&buf_obj->lock); } seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size); mutex_unlock(&db_list.lock); return 0; + +error_unlock: + mutex_unlock(&db_list.lock); + return ret; } DEFINE_SHOW_ATTRIBUTE(dma_buf_debug); diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 9ba74ab7e912..c27e206a764c 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -1707,6 +1707,14 @@ static void sdma_add_scripts(struct sdma_engine *sdma, if (!sdma->script_number) sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; + if (sdma->script_number > sizeof(struct sdma_script_start_addrs) + / sizeof(s32)) { + dev_err(sdma->dev, + "SDMA script number %d not match with firmware.\n", + sdma->script_number); + return; + } + for (i = 0; i < sdma->script_number; i++) if (addr_arr[i] > 0) saddr_arr[i] = addr_arr[i]; diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c index 8e90a405939d..ef73f65224b1 100644 --- a/drivers/dma/qcom/bam_dma.c +++ b/drivers/dma/qcom/bam_dma.c @@ -694,6 +694,25 @@ static int bam_dma_terminate_all(struct dma_chan *chan) /* remove all transactions, including active transaction */ spin_lock_irqsave(&bchan->vc.lock, flag); + /* + * If we have transactions queued, then some might be committed to the + * hardware in the desc fifo. The only way to reset the desc fifo is + * to do a hardware reset (either by pipe or the entire block). + * bam_chan_init_hw() will trigger a pipe reset, and also reinit the + * pipe. If the pipe is left disabled (default state after pipe reset) + * and is accessed by a connected hardware engine, a fatal error in + * the BAM will occur. There is a small window where this could happen + * with bam_chan_init_hw(), but it is assumed that the caller has + * stopped activity on any attached hardware engine. Make sure to do + * this first so that the BAM hardware doesn't cause memory corruption + * by accessing freed resources. + */ + if (!list_empty(&bchan->desc_list)) { + async_desc = list_first_entry(&bchan->desc_list, + struct bam_async_desc, desc_node); + bam_chan_init_hw(bchan, async_desc->dir); + } + list_for_each_entry_safe(async_desc, tmp, &bchan->desc_list, desc_node) { list_add(&async_desc->vd.node, &bchan->vc.desc_issued); diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c index 525dc7338fe3..8546ad034720 100644 --- a/drivers/dma/sprd-dma.c +++ b/drivers/dma/sprd-dma.c @@ -134,6 +134,10 @@ #define SPRD_DMA_SRC_TRSF_STEP_OFFSET 0 #define SPRD_DMA_TRSF_STEP_MASK GENMASK(15, 0) +/* SPRD DMA_SRC_BLK_STEP register definition */ +#define SPRD_DMA_LLIST_HIGH_MASK GENMASK(31, 28) +#define SPRD_DMA_LLIST_HIGH_SHIFT 28 + /* define DMA channel mode & trigger mode mask */ #define SPRD_DMA_CHN_MODE_MASK GENMASK(7, 0) #define SPRD_DMA_TRG_MODE_MASK GENMASK(7, 0) @@ -208,6 +212,7 @@ struct sprd_dma_dev { struct sprd_dma_chn channels[0]; }; +static void sprd_dma_free_desc(struct virt_dma_desc *vd); static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param); static struct of_dma_filter_info sprd_dma_info = { .filter_fn = sprd_dma_filter_fn, @@ -609,12 +614,19 @@ static int sprd_dma_alloc_chan_resources(struct dma_chan *chan) static void sprd_dma_free_chan_resources(struct dma_chan *chan) { struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); + struct virt_dma_desc *cur_vd = NULL; unsigned long flags; spin_lock_irqsave(&schan->vc.lock, flags); + if (schan->cur_desc) + cur_vd = &schan->cur_desc->vd; + sprd_dma_stop(schan); spin_unlock_irqrestore(&schan->vc.lock, flags); + if (cur_vd) + sprd_dma_free_desc(cur_vd); + vchan_free_chan_resources(&schan->vc); pm_runtime_put(chan->device->dev); } @@ -717,6 +729,7 @@ static int sprd_dma_fill_desc(struct dma_chan *chan, u32 int_mode = flags & SPRD_DMA_INT_MASK; int src_datawidth, dst_datawidth, src_step, dst_step; u32 temp, fix_mode = 0, fix_en = 0; + phys_addr_t llist_ptr; if (dir == DMA_MEM_TO_DEV) { src_step = sprd_dma_get_step(slave_cfg->src_addr_width); @@ -814,13 +827,16 @@ static int sprd_dma_fill_desc(struct dma_chan *chan, * Set the link-list pointer point to next link-list * configuration's physical address. */ - hw->llist_ptr = schan->linklist.phy_addr + temp; + llist_ptr = schan->linklist.phy_addr + temp; + hw->llist_ptr = lower_32_bits(llist_ptr); + hw->src_blk_step = (upper_32_bits(llist_ptr) << SPRD_DMA_LLIST_HIGH_SHIFT) & + SPRD_DMA_LLIST_HIGH_MASK; } else { hw->llist_ptr = 0; + hw->src_blk_step = 0; } hw->frg_step = 0; - hw->src_blk_step = 0; hw->des_blk_step = 0; return 0; } @@ -1023,15 +1039,22 @@ static int sprd_dma_resume(struct dma_chan *chan) static int sprd_dma_terminate_all(struct dma_chan *chan) { struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); + struct virt_dma_desc *cur_vd = NULL; unsigned long flags; LIST_HEAD(head); spin_lock_irqsave(&schan->vc.lock, flags); + if (schan->cur_desc) + cur_vd = &schan->cur_desc->vd; + sprd_dma_stop(schan); vchan_get_all_descriptors(&schan->vc, &head); spin_unlock_irqrestore(&schan->vc.lock, flags); + if (cur_vd) + sprd_dma_free_desc(cur_vd); + vchan_dma_desc_free_list(&schan->vc, &head); return 0; } diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c index 5f8adf5c1f20..6e1268552f74 100644 --- a/drivers/dma/tegra210-adma.c +++ b/drivers/dma/tegra210-adma.c @@ -40,6 +40,7 @@ #define ADMA_CH_CONFIG_MAX_BURST_SIZE 16 #define ADMA_CH_CONFIG_WEIGHT_FOR_WRR(val) ((val) & 0xf) #define ADMA_CH_CONFIG_MAX_BUFS 8 +#define TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(reqs) (reqs << 4) #define ADMA_CH_FIFO_CTRL 0x2c #define TEGRA210_ADMA_CH_FIFO_CTRL_TXSIZE(val) (((val) & 0xf) << 8) @@ -77,6 +78,7 @@ struct tegra_adma; * @ch_req_tx_shift: Register offset for AHUB transmit channel select. * @ch_req_rx_shift: Register offset for AHUB receive channel select. * @ch_base_offset: Register offset of DMA channel registers. + * @has_outstanding_reqs: If DMA channel can have outstanding requests. * @ch_fifo_ctrl: Default value for channel FIFO CTRL register. * @ch_req_mask: Mask for Tx or Rx channel select. * @ch_req_max: Maximum number of Tx or Rx channels available. @@ -95,6 +97,7 @@ struct tegra_adma_chip_data { unsigned int ch_req_max; unsigned int ch_reg_size; unsigned int nr_channels; + bool has_outstanding_reqs; }; /* @@ -594,6 +597,8 @@ static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc, ADMA_CH_CTRL_FLOWCTRL_EN; ch_regs->config |= cdata->adma_get_burst_config(burst_size); ch_regs->config |= ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1); + if (cdata->has_outstanding_reqs) + ch_regs->config |= TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(8); ch_regs->fifo_ctrl = cdata->ch_fifo_ctrl; ch_regs->tc = desc->period_len & ADMA_CH_TC_COUNT_MASK; @@ -778,6 +783,7 @@ static const struct tegra_adma_chip_data tegra210_chip_data = { .ch_req_tx_shift = 28, .ch_req_rx_shift = 24, .ch_base_offset = 0, + .has_outstanding_reqs = false, .ch_fifo_ctrl = TEGRA210_FIFO_CTRL_DEFAULT, .ch_req_mask = 0xf, .ch_req_max = 10, @@ -792,6 +798,7 @@ static const struct tegra_adma_chip_data tegra186_chip_data = { .ch_req_tx_shift = 27, .ch_req_rx_shift = 22, .ch_base_offset = 0x10000, + .has_outstanding_reqs = true, .ch_fifo_ctrl = TEGRA186_FIFO_CTRL_DEFAULT, .ch_req_mask = 0x1f, .ch_req_max = 20, diff --git a/drivers/dma/ti/cppi41.c b/drivers/dma/ti/cppi41.c index 2f946f55076c..8c2f7ebe998c 100644 --- a/drivers/dma/ti/cppi41.c +++ b/drivers/dma/ti/cppi41.c @@ -586,9 +586,22 @@ static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg( enum dma_transfer_direction dir, unsigned long tx_flags, void *context) { struct cppi41_channel *c = to_cpp41_chan(chan); + struct dma_async_tx_descriptor *txd = NULL; + struct cppi41_dd *cdd = c->cdd; struct cppi41_desc *d; struct scatterlist *sg; unsigned int i; + int error; + + error = pm_runtime_get(cdd->ddev.dev); + if (error < 0) { + pm_runtime_put_noidle(cdd->ddev.dev); + + return NULL; + } + + if (cdd->is_suspended) + goto err_out_not_ready; d = c->desc; for_each_sg(sgl, sg, sg_len, i) { @@ -611,7 +624,13 @@ static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg( d++; } - return &c->txd; + txd = &c->txd; + +err_out_not_ready: + pm_runtime_mark_last_busy(cdd->ddev.dev); + pm_runtime_put_autosuspend(cdd->ddev.dev); + + return txd; } static void cppi41_compute_td_desc(struct cppi41_desc *d) diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index e7dc3c4dc8e0..5d56f1e4d332 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -68,6 +68,9 @@ #define XILINX_DMA_DMACR_CIRC_EN BIT(1) #define XILINX_DMA_DMACR_RUNSTOP BIT(0) #define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5) +#define XILINX_DMA_DMACR_DELAY_MASK GENMASK(31, 24) +#define XILINX_DMA_DMACR_FRAME_COUNT_MASK GENMASK(23, 16) +#define XILINX_DMA_DMACR_MASTER_MASK GENMASK(11, 8) #define XILINX_DMA_REG_DMASR 0x0004 #define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15) @@ -1354,7 +1357,8 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) node); hw = &segment->hw; - xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr); + xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR, + xilinx_prep_dma_addr_t(hw->buf_addr)); /* Start the transfer */ dma_ctrl_write(chan, XILINX_DMA_REG_BTT, @@ -2117,8 +2121,10 @@ int xilinx_vdma_channel_set_config(struct dma_chan *dchan, chan->config.gen_lock = cfg->gen_lock; chan->config.master = cfg->master; + dmacr &= ~XILINX_DMA_DMACR_GENLOCK_EN; if (cfg->gen_lock && chan->genlock) { dmacr |= XILINX_DMA_DMACR_GENLOCK_EN; + dmacr &= ~XILINX_DMA_DMACR_MASTER_MASK; dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT; } @@ -2134,11 +2140,13 @@ int xilinx_vdma_channel_set_config(struct dma_chan *dchan, chan->config.delay = cfg->delay; if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) { + dmacr &= ~XILINX_DMA_DMACR_FRAME_COUNT_MASK; dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT; chan->config.coalesc = cfg->coalesc; } if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) { + dmacr &= ~XILINX_DMA_DMACR_DELAY_MASK; dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT; chan->config.delay = cfg->delay; } diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c index d413a0bdc9ad..0bb62857ffb2 100644 --- a/drivers/edac/ghes_edac.c +++ b/drivers/edac/ghes_edac.c @@ -553,7 +553,11 @@ void ghes_edac_unregister(struct ghes *ghes) if (!ghes_pvt) return; + if (atomic_dec_return(&ghes_init)) + return; + mci = ghes_pvt->mci; + ghes_pvt = NULL; edac_mc_del_mc(mci->pdev); edac_mc_free(mci); } diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index 35ed56b9c34f..1e21fc3e9851 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c @@ -408,7 +408,7 @@ static void __init save_mem_devices(const struct dmi_header *dm, void *v) bytes = ~0ull; else if (size & 0x8000) bytes = (u64)(size & 0x7fff) << 10; - else if (size != 0x7fff) + else if (size != 0x7fff || dm->length < 0x20) bytes = (u64)size << 20; else bytes = (u64)get_unaligned((u32 *)&d[0x1C]) << 20; diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index 178ee8106828..b248870a9806 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig @@ -182,6 +182,7 @@ config RESET_ATTACK_MITIGATION config EFI_RCI2_TABLE bool "EFI Runtime Configuration Interface Table Version 2 Support" + depends on X86 || COMPILE_TEST help Displays the content of the Runtime Configuration Interface Table version 2 on Dell EMC PowerEdge systems as a binary diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index addf0749dd8b..b1af0de2e100 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -381,7 +381,7 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie, printk("%s""vendor_id: 0x%04x, device_id: 0x%04x\n", pfx, pcie->device_id.vendor_id, pcie->device_id.device_id); p = pcie->device_id.class_code; - printk("%s""class_code: %02x%02x%02x\n", pfx, p[0], p[1], p[2]); + printk("%s""class_code: %02x%02x%02x\n", pfx, p[2], p[1], p[0]); } if (pcie->validation_bits & CPER_PCIE_VALID_SERIAL_NUMBER) printk("%s""serial number: 0x%04x, 0x%04x\n", pfx, diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 8d3e778e988b..e98bbf8e56d9 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -267,6 +267,9 @@ static __init int efivar_ssdt_load(void) void *data; int ret; + if (!efivar_ssdt[0]) + return 0; + ret = efivar_init(efivar_ssdt_iter, &entries, true, &entries); list_for_each_entry_safe(entry, aux, &entries, list) { @@ -551,7 +554,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz, sizeof(*seed) + size); if (seed != NULL) { pr_notice("seeding entropy pool\n"); - add_device_randomness(seed->bits, seed->size); + add_bootloader_randomness(seed->bits, seed->size); early_memunmap(seed, sizeof(*seed) + size); } else { pr_err("Could not map UEFI random seed!\n"); diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index 0460c7581220..ee0661ddb25b 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -52,6 +52,7 @@ lib-$(CONFIG_EFI_ARMSTUB) += arm-stub.o fdt.o string.o random.o \ lib-$(CONFIG_ARM) += arm32-stub.o lib-$(CONFIG_ARM64) += arm64-stub.o +CFLAGS_arm32-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET) CFLAGS_arm64-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET) # diff --git a/drivers/firmware/efi/libstub/arm32-stub.c b/drivers/firmware/efi/libstub/arm32-stub.c index e8f7aefb6813..41213bf5fcf5 100644 --- a/drivers/firmware/efi/libstub/arm32-stub.c +++ b/drivers/firmware/efi/libstub/arm32-stub.c @@ -195,6 +195,7 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table, unsigned long dram_base, efi_loaded_image_t *image) { + unsigned long kernel_base; efi_status_t status; /* @@ -204,9 +205,18 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table, * loaded. These assumptions are made by the decompressor, * before any memory map is available. */ - dram_base = round_up(dram_base, SZ_128M); + kernel_base = round_up(dram_base, SZ_128M); - status = reserve_kernel_base(sys_table, dram_base, reserve_addr, + /* + * Note that some platforms (notably, the Raspberry Pi 2) put + * spin-tables and other pieces of firmware at the base of RAM, + * abusing the fact that the window of TEXT_OFFSET bytes at the + * base of the kernel image is only partially used at the moment. + * (Up to 5 pages are used for the swapper page tables) + */ + kernel_base += TEXT_OFFSET - 5 * PAGE_SIZE; + + status = reserve_kernel_base(sys_table, kernel_base, reserve_addr, reserve_size); if (status != EFI_SUCCESS) { pr_efi_err(sys_table, "Unable to allocate memory for uncompressed kernel.\n"); @@ -220,7 +230,7 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table, *image_size = image->image_size; status = efi_relocate_kernel(sys_table, image_addr, *image_size, *image_size, - dram_base + MAX_UNCOMP_KERNEL_SIZE, 0); + kernel_base + MAX_UNCOMP_KERNEL_SIZE, 0, 0); if (status != EFI_SUCCESS) { pr_efi_err(sys_table, "Failed to relocate kernel.\n"); efi_free(sys_table, *reserve_size, *reserve_addr); diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c index 3caae7f2cf56..35dbc2791c97 100644 --- a/drivers/firmware/efi/libstub/efi-stub-helper.c +++ b/drivers/firmware/efi/libstub/efi-stub-helper.c @@ -260,11 +260,11 @@ fail: } /* - * Allocate at the lowest possible address. + * Allocate at the lowest possible address that is not below 'min'. */ -efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg, - unsigned long size, unsigned long align, - unsigned long *addr) +efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg, + unsigned long size, unsigned long align, + unsigned long *addr, unsigned long min) { unsigned long map_size, desc_size, buff_size; efi_memory_desc_t *map; @@ -311,13 +311,8 @@ efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg, start = desc->phys_addr; end = start + desc->num_pages * EFI_PAGE_SIZE; - /* - * Don't allocate at 0x0. It will confuse code that - * checks pointers against NULL. Skip the first 8 - * bytes so we start at a nice even number. - */ - if (start == 0x0) - start += 8; + if (start < min) + start = min; start = round_up(start, align); if ((start + size) > end) @@ -698,7 +693,8 @@ efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg, unsigned long image_size, unsigned long alloc_size, unsigned long preferred_addr, - unsigned long alignment) + unsigned long alignment, + unsigned long min_addr) { unsigned long cur_image_addr; unsigned long new_addr = 0; @@ -731,8 +727,8 @@ efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg, * possible. */ if (status != EFI_SUCCESS) { - status = efi_low_alloc(sys_table_arg, alloc_size, alignment, - &new_addr); + status = efi_low_alloc_above(sys_table_arg, alloc_size, + alignment, &new_addr, min_addr); } if (status != EFI_SUCCESS) { pr_efi_err(sys_table_arg, "Failed to allocate usable memory for kernel.\n"); diff --git a/drivers/firmware/efi/rci2-table.c b/drivers/firmware/efi/rci2-table.c index 3e290f96620a..76b0c354a027 100644 --- a/drivers/firmware/efi/rci2-table.c +++ b/drivers/firmware/efi/rci2-table.c @@ -76,7 +76,7 @@ static u16 checksum(void) return chksum; } -int __init efi_rci2_sysfs_init(void) +static int __init efi_rci2_sysfs_init(void) { struct kobject *tables_kobj; int ret = -ENOMEM; diff --git a/drivers/firmware/efi/test/efi_test.c b/drivers/firmware/efi/test/efi_test.c index 877745c3aaf2..7baf48c01e72 100644 --- a/drivers/firmware/efi/test/efi_test.c +++ b/drivers/firmware/efi/test/efi_test.c @@ -14,6 +14,7 @@ #include <linux/init.h> #include <linux/proc_fs.h> #include <linux/efi.h> +#include <linux/security.h> #include <linux/slab.h> #include <linux/uaccess.h> @@ -717,6 +718,13 @@ static long efi_test_ioctl(struct file *file, unsigned int cmd, static int efi_test_open(struct inode *inode, struct file *file) { + int ret = security_locked_down(LOCKDOWN_EFI_TEST); + + if (ret) + return ret; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; /* * nothing special to do here * We do accept multiple open files at the same time as we diff --git a/drivers/firmware/efi/tpm.c b/drivers/firmware/efi/tpm.c index 1d3f5ca3eaaf..31f9f0e369b9 100644 --- a/drivers/firmware/efi/tpm.c +++ b/drivers/firmware/efi/tpm.c @@ -40,7 +40,7 @@ int __init efi_tpm_eventlog_init(void) { struct linux_efi_tpm_eventlog *log_tbl; struct efi_tcg2_final_events_table *final_tbl; - unsigned int tbl_size; + int tbl_size; int ret = 0; if (efi.tpm_log == EFI_INVALID_TABLE_ADDR) { @@ -75,16 +75,29 @@ int __init efi_tpm_eventlog_init(void) goto out; } - tbl_size = tpm2_calc_event_log_size((void *)efi.tpm_final_log - + sizeof(final_tbl->version) - + sizeof(final_tbl->nr_events), - final_tbl->nr_events, - log_tbl->log); + tbl_size = 0; + if (final_tbl->nr_events != 0) { + void *events = (void *)efi.tpm_final_log + + sizeof(final_tbl->version) + + sizeof(final_tbl->nr_events); + + tbl_size = tpm2_calc_event_log_size(events, + final_tbl->nr_events, + log_tbl->log); + } + + if (tbl_size < 0) { + pr_err(FW_BUG "Failed to parse event in TPM Final Events Log\n"); + ret = -EINVAL; + goto out_calc; + } + memblock_reserve((unsigned long)final_tbl, tbl_size + sizeof(*final_tbl)); - early_memunmap(final_tbl, sizeof(*final_tbl)); efi_tpm_final_log_size = tbl_size; +out_calc: + early_memunmap(final_tbl, sizeof(*final_tbl)); out: early_memunmap(log_tbl, sizeof(*log_tbl)); return ret; diff --git a/drivers/firmware/google/vpd_decode.c b/drivers/firmware/google/vpd_decode.c index dda525c0f968..5c6f2a74f104 100644 --- a/drivers/firmware/google/vpd_decode.c +++ b/drivers/firmware/google/vpd_decode.c @@ -52,7 +52,7 @@ static int vpd_decode_entry(const u32 max_len, const u8 *input_buf, if (max_len - consumed < *entry_len) return VPD_FAIL; - consumed += decoded_len; + consumed += *entry_len; *_consumed = consumed; return VPD_OK; } diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c index fe7a73f52329..bb287f35cf40 100644 --- a/drivers/gpio/gpio-eic-sprd.c +++ b/drivers/gpio/gpio-eic-sprd.c @@ -530,11 +530,12 @@ static void sprd_eic_handle_one_type(struct gpio_chip *chip) } for_each_set_bit(n, ®, SPRD_EIC_PER_BANK_NR) { - girq = irq_find_mapping(chip->irq.domain, - bank * SPRD_EIC_PER_BANK_NR + n); + u32 offset = bank * SPRD_EIC_PER_BANK_NR + n; + + girq = irq_find_mapping(chip->irq.domain, offset); generic_handle_irq(girq); - sprd_eic_toggle_trigger(chip, girq, n); + sprd_eic_toggle_trigger(chip, girq, offset); } } } diff --git a/drivers/gpio/gpio-intel-mid.c b/drivers/gpio/gpio-intel-mid.c index 4d835f9089df..86a10c808ef6 100644 --- a/drivers/gpio/gpio-intel-mid.c +++ b/drivers/gpio/gpio-intel-mid.c @@ -293,8 +293,9 @@ static void intel_mid_irq_handler(struct irq_desc *desc) chip->irq_eoi(data); } -static void intel_mid_irq_init_hw(struct intel_mid_gpio *priv) +static int intel_mid_irq_init_hw(struct gpio_chip *chip) { + struct intel_mid_gpio *priv = gpiochip_get_data(chip); void __iomem *reg; unsigned base; @@ -309,6 +310,8 @@ static void intel_mid_irq_init_hw(struct intel_mid_gpio *priv) reg = gpio_reg(&priv->chip, base, GEDR); writel(~0, reg); } + + return 0; } static int __maybe_unused intel_gpio_runtime_idle(struct device *dev) @@ -372,6 +375,7 @@ static int intel_gpio_probe(struct pci_dev *pdev, girq = &priv->chip.irq; girq->chip = &intel_mid_irqchip; + girq->init_hw = intel_mid_irq_init_hw; girq->parent_handler = intel_mid_irq_handler; girq->num_parents = 1; girq->parents = devm_kcalloc(&pdev->dev, girq->num_parents, @@ -384,9 +388,8 @@ static int intel_gpio_probe(struct pci_dev *pdev, girq->default_type = IRQ_TYPE_NONE; girq->handler = handle_simple_irq; - intel_mid_irq_init_hw(priv); - pci_set_drvdata(pdev, priv); + retval = devm_gpiochip_add_data(&pdev->dev, &priv->chip, priv); if (retval) { dev_err(&pdev->dev, "gpiochip_add error %d\n", retval); diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c index 6bb9741ad036..e9e47c0d5be7 100644 --- a/drivers/gpio/gpio-lynxpoint.c +++ b/drivers/gpio/gpio-lynxpoint.c @@ -294,8 +294,9 @@ static struct irq_chip lp_irqchip = { .flags = IRQCHIP_SKIP_SET_WAKE, }; -static void lp_gpio_irq_init_hw(struct lp_gpio *lg) +static int lp_gpio_irq_init_hw(struct gpio_chip *chip) { + struct lp_gpio *lg = gpiochip_get_data(chip); unsigned long reg; unsigned base; @@ -307,6 +308,8 @@ static void lp_gpio_irq_init_hw(struct lp_gpio *lg) reg = lp_gpio_reg(&lg->chip, base, LP_INT_STAT); outl(0xffffffff, reg); } + + return 0; } static int lp_gpio_probe(struct platform_device *pdev) @@ -364,6 +367,7 @@ static int lp_gpio_probe(struct platform_device *pdev) girq = &gc->irq; girq->chip = &lp_irqchip; + girq->init_hw = lp_gpio_irq_init_hw; girq->parent_handler = lp_gpio_irq_handler; girq->num_parents = 1; girq->parents = devm_kcalloc(&pdev->dev, girq->num_parents, @@ -373,9 +377,7 @@ static int lp_gpio_probe(struct platform_device *pdev) return -ENOMEM; girq->parents[0] = (unsigned)irq_rc->start; girq->default_type = IRQ_TYPE_NONE; - girq->handler = handle_simple_irq; - - lp_gpio_irq_init_hw(lg); + girq->handler = handle_bad_irq; } ret = devm_gpiochip_add_data(dev, gc, lg); diff --git a/drivers/gpio/gpio-max77620.c b/drivers/gpio/gpio-max77620.c index 47d05e357e61..faf86ea9c51a 100644 --- a/drivers/gpio/gpio-max77620.c +++ b/drivers/gpio/gpio-max77620.c @@ -192,13 +192,13 @@ static int max77620_gpio_set_debounce(struct max77620_gpio *mgpio, case 0: val = MAX77620_CNFG_GPIO_DBNC_None; break; - case 1 ... 8: + case 1000 ... 8000: val = MAX77620_CNFG_GPIO_DBNC_8ms; break; - case 9 ... 16: + case 9000 ... 16000: val = MAX77620_CNFG_GPIO_DBNC_16ms; break; - case 17 ... 32: + case 17000 ... 32000: val = MAX77620_CNFG_GPIO_DBNC_32ms; break; default: diff --git a/drivers/gpio/gpio-merrifield.c b/drivers/gpio/gpio-merrifield.c index 4f27ddfe1e2f..3302125e5265 100644 --- a/drivers/gpio/gpio-merrifield.c +++ b/drivers/gpio/gpio-merrifield.c @@ -397,7 +397,6 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id { const struct mrfld_gpio_pinrange *range; const char *pinctrl_dev_name; - struct gpio_irq_chip *girq; struct mrfld_gpio *priv; u32 gpio_base, irq_base; void __iomem *base; @@ -445,21 +444,6 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id raw_spin_lock_init(&priv->lock); - girq = &priv->chip.irq; - girq->chip = &mrfld_irqchip; - girq->parent_handler = mrfld_irq_handler; - girq->num_parents = 1; - girq->parents = devm_kcalloc(&pdev->dev, girq->num_parents, - sizeof(*girq->parents), - GFP_KERNEL); - if (!girq->parents) - return -ENOMEM; - girq->parents[0] = pdev->irq; - girq->default_type = IRQ_TYPE_NONE; - girq->handler = handle_bad_irq; - - mrfld_irq_init_hw(priv); - pci_set_drvdata(pdev, priv); retval = devm_gpiochip_add_data(&pdev->dev, &priv->chip, priv); if (retval) { @@ -481,6 +465,18 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id } } + retval = gpiochip_irqchip_add(&priv->chip, &mrfld_irqchip, irq_base, + handle_bad_irq, IRQ_TYPE_NONE); + if (retval) { + dev_err(&pdev->dev, "could not connect irqchip to gpiochip\n"); + return retval; + } + + mrfld_irq_init_hw(priv); + + gpiochip_set_chained_irqchip(&priv->chip, &mrfld_irqchip, pdev->irq, + mrfld_irq_handler); + return 0; } diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index 1eea2c6c2e1d..80ea49f570f4 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -317,7 +317,7 @@ struct gpio_desc *gpiod_get_from_of_node(struct device_node *node, transitory = flags & OF_GPIO_TRANSITORY; ret = gpiod_request(desc, label); - if (ret == -EBUSY && (flags & GPIOD_FLAGS_BIT_NONEXCLUSIVE)) + if (ret == -EBUSY && (dflags & GPIOD_FLAGS_BIT_NONEXCLUSIVE)) return desc; if (ret) return ERR_PTR(ret); diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index bdbc1649eafa..104ed299d5ea 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -86,6 +86,7 @@ static int gpiochip_add_irqchip(struct gpio_chip *gpiochip, struct lock_class_key *lock_key, struct lock_class_key *request_key); static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip); +static int gpiochip_irqchip_init_hw(struct gpio_chip *gpiochip); static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gpiochip); static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gpiochip); @@ -1406,6 +1407,10 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, machine_gpiochip_add(chip); + ret = gpiochip_irqchip_init_hw(chip); + if (ret) + goto err_remove_acpi_chip; + ret = gpiochip_irqchip_init_valid_mask(chip); if (ret) goto err_remove_acpi_chip; @@ -1622,6 +1627,16 @@ static struct gpio_chip *find_chip_by_name(const char *name) * The following is irqchip helper code for gpiochips. */ +static int gpiochip_irqchip_init_hw(struct gpio_chip *gc) +{ + struct gpio_irq_chip *girq = &gc->irq; + + if (!girq->init_hw) + return 0; + + return girq->init_hw(gc); +} + static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gc) { struct gpio_irq_chip *girq = &gc->irq; @@ -2446,8 +2461,13 @@ static inline int gpiochip_add_irqchip(struct gpio_chip *gpiochip, { return 0; } - static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip) {} + +static inline int gpiochip_irqchip_init_hw(struct gpio_chip *gpiochip) +{ + return 0; +} + static inline int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gpiochip) { return 0; @@ -3070,8 +3090,10 @@ int gpiod_direction_output(struct gpio_desc *desc, int value) if (!ret) goto set_output_value; /* Emulate open drain by not actively driving the line high */ - if (value) - return gpiod_direction_input(desc); + if (value) { + ret = gpiod_direction_input(desc); + goto set_output_flag; + } } else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) { ret = gpio_set_config(gc, gpio_chip_hwgpio(desc), @@ -3079,8 +3101,10 @@ int gpiod_direction_output(struct gpio_desc *desc, int value) if (!ret) goto set_output_value; /* Emulate open source by not actively driving the line low */ - if (!value) - return gpiod_direction_input(desc); + if (!value) { + ret = gpiod_direction_input(desc); + goto set_output_flag; + } } else { gpio_set_config(gc, gpio_chip_hwgpio(desc), PIN_CONFIG_DRIVE_PUSH_PULL); @@ -3088,6 +3112,17 @@ int gpiod_direction_output(struct gpio_desc *desc, int value) set_output_value: return gpiod_direction_output_raw_commit(desc, value); + +set_output_flag: + /* + * When emulating open-source or open-drain functionalities by not + * actively driving the line (setting mode to input) we still need to + * set the IS_OUT flag or otherwise we won't be able to set the line + * value anymore. + */ + if (ret == 0) + set_bit(FLAG_IS_OUT, &desc->flags); + return ret; } EXPORT_SYMBOL_GPL(gpiod_direction_output); @@ -3448,8 +3483,6 @@ static void gpio_set_open_drain_value_commit(struct gpio_desc *desc, bool value) if (value) { ret = chip->direction_input(chip, offset); - if (!ret) - clear_bit(FLAG_IS_OUT, &desc->flags); } else { ret = chip->direction_output(chip, offset, 0); if (!ret) @@ -3479,8 +3512,6 @@ static void gpio_set_open_source_value_commit(struct gpio_desc *desc, bool value set_bit(FLAG_IS_OUT, &desc->flags); } else { ret = chip->direction_input(chip, offset); - if (!ret) - clear_bit(FLAG_IS_OUT, &desc->flags); } trace_gpio_direction(desc_to_gpio(desc), !value, ret); if (ret < 0) diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index e4bf9f8e2430..76db8bc0dd1f 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -97,6 +97,20 @@ config DRM_KMS_FB_HELPER help FBDEV helpers for KMS drivers. +config DRM_DEBUG_DP_MST_TOPOLOGY_REFS + bool "Enable refcount backtrace history in the DP MST helpers" + select STACKDEPOT + depends on DRM_KMS_HELPER + depends on DEBUG_KERNEL + depends on EXPERT + help + Enables debug tracing for topology refs in DRM's DP MST helpers. A + history of each topology reference/dereference will be printed to the + kernel log once a port or branch device's topology refcount reaches 0. + + This has the potential to use a lot of memory and print some very + large kernel messages. If in doubt, say "N". + config DRM_FBDEV_EMULATION bool "Enable legacy fbdev support for your modesetting driver" depends on DRM @@ -169,6 +183,13 @@ config DRM_TTM GPU memory types. Will be enabled automatically if a device driver uses it. +config DRM_TTM_DMA_PAGE_POOL + bool + depends on DRM_TTM && (SWIOTLB || INTEL_IOMMU) + default y + help + Choose this if you need the TTM dma page pool + config DRM_VRAM_HELPER tristate depends on DRM @@ -236,9 +257,9 @@ config DRM_AMDGPU tristate "AMD GPU" depends on DRM && PCI && MMU select FW_LOADER - select DRM_KMS_HELPER + select DRM_KMS_HELPER select DRM_SCHED - select DRM_TTM + select DRM_TTM select POWER_SUPPLY select HWMON select BACKLIGHT_CLASS_DEVICE @@ -267,6 +288,7 @@ config DRM_VKMS tristate "Virtual KMS (EXPERIMENTAL)" depends on DRM select DRM_KMS_HELPER + select CRC32 default n help Virtual Kernel Mode-Setting (VKMS) is used for testing or for @@ -407,7 +429,7 @@ config DRM_R128 config DRM_I810 tristate "Intel I810" - # !PREEMPT because of missing ioctl locking + # !PREEMPTION because of missing ioctl locking depends on DRM && AGP && AGP_INTEL && (!PREEMPTION || BROKEN) help Choose this option if you have an Intel I810 graphics card. If M is diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 00962a659009..ca0e435559d5 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -53,8 +53,9 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \ amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \ amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \ - amdgpu_gmc.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \ - amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o smu_v11_0_i2c.o + amdgpu_gmc.o amdgpu_mmhub.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \ + amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o amdgpu_nbio.o \ + amdgpu_umc.o smu_v11_0_i2c.o amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o @@ -67,7 +68,7 @@ amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce amdgpu-y += \ vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \ vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o navi10_reg_init.o navi14_reg_init.o \ - arct_reg_init.o navi12_reg_init.o + arct_reg_init.o navi12_reg_init.o mxgpu_nv.o # add DF block amdgpu-y += \ @@ -83,7 +84,7 @@ amdgpu-y += \ # add UMC block amdgpu-y += \ - umc_v6_1.o + umc_v6_1.o umc_v6_0.o # add IH block amdgpu-y += \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index bd37df5dd6d0..bcc5d40a8d5f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -73,6 +73,7 @@ #include "amdgpu_gmc.h" #include "amdgpu_gfx.h" #include "amdgpu_sdma.h" +#include "amdgpu_nbio.h" #include "amdgpu_dm.h" #include "amdgpu_virt.h" #include "amdgpu_csa.h" @@ -106,6 +107,8 @@ struct amdgpu_mgpu_info uint32_t num_apu; }; +#define AMDGPU_MAX_TIMEOUT_PARAM_LENGTH 256 + /* * Modules parameters. */ @@ -122,6 +125,7 @@ extern int amdgpu_disp_priority; extern int amdgpu_hw_i2c; extern int amdgpu_pcie_gen2; extern int amdgpu_msi; +extern char amdgpu_lockup_timeout[AMDGPU_MAX_TIMEOUT_PARAM_LENGTH]; extern int amdgpu_dpm; extern int amdgpu_fw_load_type; extern int amdgpu_aspm; @@ -135,6 +139,7 @@ extern int amdgpu_vm_fragment_size; extern int amdgpu_vm_fault_stop; extern int amdgpu_vm_debug; extern int amdgpu_vm_update_mode; +extern int amdgpu_exp_hw_support; extern int amdgpu_dc; extern int amdgpu_sched_jobs; extern int amdgpu_sched_hw_submission; @@ -146,11 +151,7 @@ extern uint amdgpu_sdma_phase_quantum; extern char *amdgpu_disable_cu; extern char *amdgpu_virtual_display; extern uint amdgpu_pp_feature_mask; -extern int amdgpu_ngg; -extern int amdgpu_prim_buf_per_se; -extern int amdgpu_pos_buf_per_se; -extern int amdgpu_cntl_sb_buf_per_se; -extern int amdgpu_param_buf_per_se; +extern uint amdgpu_force_long_training; extern int amdgpu_job_hang_limit; extern int amdgpu_lbpw; extern int amdgpu_compute_multipipe; @@ -167,6 +168,12 @@ extern int amdgpu_mcbp; extern int amdgpu_discovery; extern int amdgpu_mes; extern int amdgpu_noretry; +extern int amdgpu_force_asic_type; +#ifdef CONFIG_HSA_AMD +extern int sched_policy; +#else +static const int sched_policy = KFD_SCHED_POLICY_HWS; +#endif #ifdef CONFIG_DRM_AMDGPU_SI extern int amdgpu_si_support; @@ -283,6 +290,9 @@ struct amdgpu_ip_block_version { const struct amd_ip_funcs *funcs; }; +#define HW_REV(_Major, _Minor, _Rev) \ + ((((uint32_t) (_Major)) << 16) | ((uint32_t) (_Minor) << 8) | ((uint32_t) (_Rev))) + struct amdgpu_ip_block { struct amdgpu_ip_block_status status; const struct amdgpu_ip_block_version *version; @@ -425,7 +435,6 @@ struct amdgpu_fpriv { }; int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv); -int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev); int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned size, struct amdgpu_ib *ib); @@ -477,7 +486,6 @@ struct amdgpu_cs_parser { uint64_t bytes_moved_vis_threshold; uint64_t bytes_moved; uint64_t bytes_moved_vis; - struct amdgpu_bo_list_entry *evictable; /* user fence */ struct amdgpu_bo_list_entry uf_entry; @@ -624,6 +632,11 @@ struct amdgpu_fw_vram_usage { u64 size; struct amdgpu_bo *reserved_bo; void *va; + + /* Offset on the top of VRAM, used as c2p write buffer. + */ + u64 mem_train_fb_loc; + bool mem_train_support; }; /* @@ -644,71 +657,14 @@ typedef void (*amdgpu_wreg64_t)(struct amdgpu_device*, uint32_t, uint64_t); typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t); typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t); - -/* - * amdgpu nbio functions - * - */ -struct nbio_hdp_flush_reg { - u32 ref_and_mask_cp0; - u32 ref_and_mask_cp1; - u32 ref_and_mask_cp2; - u32 ref_and_mask_cp3; - u32 ref_and_mask_cp4; - u32 ref_and_mask_cp5; - u32 ref_and_mask_cp6; - u32 ref_and_mask_cp7; - u32 ref_and_mask_cp8; - u32 ref_and_mask_cp9; - u32 ref_and_mask_sdma0; - u32 ref_and_mask_sdma1; - u32 ref_and_mask_sdma2; - u32 ref_and_mask_sdma3; - u32 ref_and_mask_sdma4; - u32 ref_and_mask_sdma5; - u32 ref_and_mask_sdma6; - u32 ref_and_mask_sdma7; -}; - struct amdgpu_mmio_remap { u32 reg_offset; resource_size_t bus_addr; }; -struct amdgpu_nbio_funcs { - const struct nbio_hdp_flush_reg *hdp_flush_reg; - u32 (*get_hdp_flush_req_offset)(struct amdgpu_device *adev); - u32 (*get_hdp_flush_done_offset)(struct amdgpu_device *adev); - u32 (*get_pcie_index_offset)(struct amdgpu_device *adev); - u32 (*get_pcie_data_offset)(struct amdgpu_device *adev); - u32 (*get_rev_id)(struct amdgpu_device *adev); - void (*mc_access_enable)(struct amdgpu_device *adev, bool enable); - void (*hdp_flush)(struct amdgpu_device *adev, struct amdgpu_ring *ring); - u32 (*get_memsize)(struct amdgpu_device *adev); - void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance, - bool use_doorbell, int doorbell_index, int doorbell_size); - void (*vcn_doorbell_range)(struct amdgpu_device *adev, bool use_doorbell, - int doorbell_index, int instance); - void (*enable_doorbell_aperture)(struct amdgpu_device *adev, - bool enable); - void (*enable_doorbell_selfring_aperture)(struct amdgpu_device *adev, - bool enable); - void (*ih_doorbell_range)(struct amdgpu_device *adev, - bool use_doorbell, int doorbell_index); - void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev, - bool enable); - void (*update_medium_grain_light_sleep)(struct amdgpu_device *adev, - bool enable); - void (*get_clockgating_state)(struct amdgpu_device *adev, - u32 *flags); - void (*ih_control)(struct amdgpu_device *adev); - void (*init_registers)(struct amdgpu_device *adev); - void (*detect_hw_virt)(struct amdgpu_device *adev); - void (*remap_hdp_registers)(struct amdgpu_device *adev); -}; - struct amdgpu_df_funcs { void (*sw_init)(struct amdgpu_device *adev); + void (*sw_fini)(struct amdgpu_device *adev); void (*enable_broadcast_mode)(struct amdgpu_device *adev, bool enable); u32 (*get_fb_channel_number)(struct amdgpu_device *adev); @@ -813,6 +769,7 @@ struct amdgpu_device { uint8_t *bios; uint32_t bios_size; struct amdgpu_bo *stolen_vga_memory; + struct amdgpu_bo *discovery_memory; uint32_t bios_scratch_reg_offset; uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH]; @@ -921,6 +878,12 @@ struct amdgpu_device { u32 cg_flags; u32 pg_flags; + /* nbio */ + struct amdgpu_nbio nbio; + + /* mmhub */ + struct amdgpu_mmhub mmhub; + /* gfx */ struct amdgpu_gfx gfx; @@ -974,9 +937,7 @@ struct amdgpu_device { /* soc15 register offset based on ip, instance and segment */ uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE]; - const struct amdgpu_nbio_funcs *nbio_funcs; const struct amdgpu_df_funcs *df_funcs; - const struct amdgpu_mmhub_funcs *mmhub_funcs; /* delayed work_func for deferring clockgating during resume */ struct delayed_work delayed_init_work; @@ -1009,8 +970,6 @@ struct amdgpu_device { int asic_reset_res; struct work_struct xgmi_reset_work; - bool in_baco_reset; - long gfx_timeout; long sdma_timeout; long video_timeout; @@ -1018,6 +977,9 @@ struct amdgpu_device { uint64_t unique_id; uint64_t df_perfmon_config_assign_mask[AMDGPU_MAX_DF_PERFMONS]; + + /* device pstate */ + int pstate; }; static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev) @@ -1032,6 +994,8 @@ int amdgpu_device_init(struct amdgpu_device *adev, void amdgpu_device_fini(struct amdgpu_device *adev); int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev); +void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos, + uint32_t *buf, size_t size, bool write); uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t acc_flags); void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 07eb29885372..d3da9dde4ee1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -63,45 +63,10 @@ void amdgpu_amdkfd_fini(void) void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev) { - const struct kfd2kgd_calls *kfd2kgd; - - switch (adev->asic_type) { -#ifdef CONFIG_DRM_AMDGPU_CIK - case CHIP_KAVERI: - case CHIP_HAWAII: - kfd2kgd = amdgpu_amdkfd_gfx_7_get_functions(); - break; -#endif - case CHIP_CARRIZO: - case CHIP_TONGA: - case CHIP_FIJI: - case CHIP_POLARIS10: - case CHIP_POLARIS11: - case CHIP_POLARIS12: - case CHIP_VEGAM: - kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions(); - break; - case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_VEGA20: - case CHIP_RAVEN: - kfd2kgd = amdgpu_amdkfd_gfx_9_0_get_functions(); - break; - case CHIP_ARCTURUS: - kfd2kgd = amdgpu_amdkfd_arcturus_get_functions(); - break; - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_NAVI12: - kfd2kgd = amdgpu_amdkfd_gfx_10_0_get_functions(); - break; - default: - dev_info(adev->dev, "kfd not supported on this ASIC\n"); - return; - } + bool vf = amdgpu_sriov_vf(adev); adev->kfd.dev = kgd2kfd_probe((struct kgd_dev *)adev, - adev->pdev, kfd2kgd); + adev->pdev, adev->asic_type, vf); if (adev->kfd.dev) amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size; @@ -165,14 +130,6 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) adev->gfx.mec.queue_bitmap, KGD_MAX_QUEUES); - /* remove the KIQ bit as well */ - if (adev->gfx.kiq.ring.sched.ready) - clear_bit(amdgpu_gfx_mec_queue_to_bit(adev, - adev->gfx.kiq.ring.me - 1, - adev->gfx.kiq.ring.pipe, - adev->gfx.kiq.ring.queue), - gpu_resources.queue_bitmap); - /* According to linux/bitmap.h we shouldn't use bitmap_clear if * nbits is not compile time constant */ @@ -202,7 +159,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) adev->doorbell_index.last_non_cp; } - kgd2kfd_device_init(adev->kfd.dev, &gpu_resources); + kgd2kfd_device_init(adev->kfd.dev, adev->ddev, &gpu_resources); } } @@ -709,38 +666,14 @@ int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm) return 0; } -struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void) -{ - return NULL; -} - -struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void) -{ - return NULL; -} - -struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void) -{ - return NULL; -} - -struct kfd2kgd_calls *amdgpu_amdkfd_arcturus_get_functions(void) -{ - return NULL; -} - -struct kfd2kgd_calls *amdgpu_amdkfd_gfx_10_0_get_functions(void) -{ - return NULL; -} - struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev, - const struct kfd2kgd_calls *f2g) + unsigned int asic_type, bool vf) { return NULL; } bool kgd2kfd_device_init(struct kfd_dev *kfd, + struct drm_device *ddev, const struct kgd2kfd_shared_resources *gpu_resources) { return false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index e519df3fd2b6..069d5d230810 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -57,7 +57,7 @@ struct kgd_mem { unsigned int mapped_to_gpu_memory; uint64_t va; - uint32_t mapping_flags; + uint32_t alloc_flags; atomic_t invalid; struct amdkfd_process_info *process_info; @@ -137,12 +137,6 @@ int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine, void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle); bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd); -struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void); -struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void); -struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void); -struct kfd2kgd_calls *amdgpu_amdkfd_arcturus_get_functions(void); -struct kfd2kgd_calls *amdgpu_amdkfd_gfx_10_0_get_functions(void); - bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid); int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev); @@ -179,10 +173,17 @@ uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd); uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd); uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src); +/* Read user wptr from a specified user address space with page fault + * disabled. The memory must be pinned and mapped to the hardware when + * this is called in hqd_load functions, so it should never fault in + * the first place. This resolves a circular lock dependency involving + * four locks, including the DQM lock and mmap_sem. + */ #define read_user_wptr(mmptr, wptr, dst) \ ({ \ bool valid = false; \ if ((mmptr) && (wptr)) { \ + pagefault_disable(); \ if ((mmptr) == current->mm) { \ valid = !get_user((dst), (wptr)); \ } else if (current->mm == NULL) { \ @@ -190,6 +191,7 @@ uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *s valid = !get_user((dst), (wptr)); \ unuse_mm(mmptr); \ } \ + pagefault_enable(); \ } \ valid; \ }) @@ -240,8 +242,9 @@ void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo); int kgd2kfd_init(void); void kgd2kfd_exit(void); struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev, - const struct kfd2kgd_calls *f2g); + unsigned int asic_type, bool vf); bool kgd2kfd_device_init(struct kfd_dev *kfd, + struct drm_device *ddev, const struct kgd2kfd_shared_resources *gpu_resources); void kgd2kfd_device_exit(struct kfd_dev *kfd); void kgd2kfd_suspend(struct kfd_dev *kfd); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c index c79aaebeeaf0..b6713e0ed1b2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c @@ -19,10 +19,6 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ - -#undef pr_fmt -#define pr_fmt(fmt) "kfd2kgd: " fmt - #include <linux/module.h> #include <linux/fdtable.h> #include <linux/uaccess.h> @@ -69,11 +65,11 @@ static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd) return (struct v9_sdma_mqd *)mqd; } -static uint32_t get_sdma_base_addr(struct amdgpu_device *adev, +static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev, unsigned int engine_id, unsigned int queue_id) { - uint32_t base[8] = { + uint32_t sdma_engine_reg_base[8] = { SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL, SOC15_REG_OFFSET(SDMA1, 0, @@ -91,111 +87,82 @@ static uint32_t get_sdma_base_addr(struct amdgpu_device *adev, SOC15_REG_OFFSET(SDMA7, 0, mmSDMA7_RLC0_RB_CNTL) - mmSDMA7_RLC0_RB_CNTL }; - uint32_t retval; - retval = base[engine_id] + queue_id * (mmSDMA0_RLC1_RB_CNTL - - mmSDMA0_RLC0_RB_CNTL); + uint32_t retval = sdma_engine_reg_base[engine_id] + + queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL); - pr_debug("sdma base address: 0x%x\n", retval); + pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id, + queue_id, retval); return retval; } -static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev, - u32 instance, u32 offset) -{ - switch (instance) { - case 0: - return (adev->reg_offset[SDMA0_HWIP][0][0] + offset); - case 1: - return (adev->reg_offset[SDMA1_HWIP][0][1] + offset); - case 2: - return (adev->reg_offset[SDMA2_HWIP][0][1] + offset); - case 3: - return (adev->reg_offset[SDMA3_HWIP][0][1] + offset); - case 4: - return (adev->reg_offset[SDMA4_HWIP][0][1] + offset); - case 5: - return (adev->reg_offset[SDMA5_HWIP][0][1] + offset); - case 6: - return (adev->reg_offset[SDMA6_HWIP][0][1] + offset); - case 7: - return (adev->reg_offset[SDMA7_HWIP][0][1] + offset); - default: - break; - } - return 0; -} - static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, uint32_t __user *wptr, struct mm_struct *mm) { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v9_sdma_mqd *m; - uint32_t sdma_base_addr, sdmax_gfx_context_cntl; + uint32_t sdma_rlc_reg_offset; unsigned long end_jiffies; uint32_t data; uint64_t data64; uint64_t __user *wptr64 = (uint64_t __user *)wptr; m = get_sdma_mqd(mqd); - sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id, + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, m->sdma_queue_id); - sdmax_gfx_context_cntl = sdma_v4_0_get_reg_offset(adev, - m->sdma_engine_id, mmSDMA0_GFX_CONTEXT_CNTL); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)); end_jiffies = msecs_to_jiffies(2000) + jiffies; while (true) { - data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); + data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS); if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) break; - if (time_after(jiffies, end_jiffies)) + if (time_after(jiffies, end_jiffies)) { + pr_err("SDMA RLC not idle in %s\n", __func__); return -ETIME; + } usleep_range(500, 1000); } - data = RREG32(sdmax_gfx_context_cntl); - data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL, - RESUME_CTX, 0); - WREG32(sdmax_gfx_context_cntl, data); - WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL_OFFSET, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET, m->sdmax_rlcx_doorbell_offset); data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL, ENABLE, 1); - WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR, + m->sdmax_rlcx_rb_rptr); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI, m->sdmax_rlcx_rb_rptr_hi); - WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1); if (read_user_wptr(mm, wptr64, data64)) { - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, lower_32_bits(data64)); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI, upper_32_bits(data64)); } else { - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, m->sdmax_rlcx_rb_rptr); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI, m->sdmax_rlcx_rb_rptr_hi); } - WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI, m->sdmax_rlcx_rb_base_hi); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, m->sdmax_rlcx_rb_rptr_addr_lo); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, m->sdmax_rlcx_rb_rptr_addr_hi); data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL, RB_ENABLE, 1); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data); return 0; } @@ -205,7 +172,8 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, uint32_t (**dump)[2], uint32_t *n_regs) { struct amdgpu_device *adev = get_amdgpu_device(kgd); - uint32_t sdma_base_addr = get_sdma_base_addr(adev, engine_id, queue_id); + uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, + engine_id, queue_id); uint32_t i = 0, reg; #undef HQD_N_REGS #define HQD_N_REGS (19+6+7+10) @@ -215,15 +183,15 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, return -ENOMEM; for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++) - DUMP_REG(sdma_base_addr + reg); + DUMP_REG(sdma_rlc_reg_offset + reg); for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++) - DUMP_REG(sdma_base_addr + reg); + DUMP_REG(sdma_rlc_reg_offset + reg); for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN; reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++) - DUMP_REG(sdma_base_addr + reg); + DUMP_REG(sdma_rlc_reg_offset + reg); for (reg = mmSDMA0_RLC0_MIDCMD_DATA0; reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++) - DUMP_REG(sdma_base_addr + reg); + DUMP_REG(sdma_rlc_reg_offset + reg); WARN_ON_ONCE(i != HQD_N_REGS); *n_regs = i; @@ -235,14 +203,14 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v9_sdma_mqd *m; - uint32_t sdma_base_addr; + uint32_t sdma_rlc_reg_offset; uint32_t sdma_rlc_rb_cntl; m = get_sdma_mqd(mqd); - sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id, + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, m->sdma_queue_id); - sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL); + sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL); if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK) return true; @@ -255,40 +223,42 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v9_sdma_mqd *m; - uint32_t sdma_base_addr; + uint32_t sdma_rlc_reg_offset; uint32_t temp; unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies; m = get_sdma_mqd(mqd); - sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id, + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, m->sdma_queue_id); - temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL); + temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL); temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK; - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp); while (true) { - temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); + temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS); if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) break; - if (time_after(jiffies, end_jiffies)) + if (time_after(jiffies, end_jiffies)) { + pr_err("SDMA RLC not idle in %s\n", __func__); return -ETIME; + } usleep_range(500, 1000); } - WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, - RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) | + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, + RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) | SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK); - m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR); + m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR); m->sdmax_rlcx_rb_rptr_hi = - RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI); + RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI); return 0; } -static const struct kfd2kgd_calls kfd2kgd = { +const struct kfd2kgd_calls arcturus_kfd2kgd = { .program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings, .set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping, .init_interrupts = kgd_gfx_v9_init_interrupts, @@ -304,20 +274,11 @@ static const struct kfd2kgd_calls kfd2kgd = { .address_watch_execute = kgd_gfx_v9_address_watch_execute, .wave_control_execute = kgd_gfx_v9_wave_control_execute, .address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset, - .get_atc_vmid_pasid_mapping_pasid = - kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid, - .get_atc_vmid_pasid_mapping_valid = - kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid, - .set_scratch_backing_va = kgd_gfx_v9_set_scratch_backing_va, + .get_atc_vmid_pasid_mapping_info = + kgd_gfx_v9_get_atc_vmid_pasid_mapping_info, .get_tile_config = kgd_gfx_v9_get_tile_config, .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base, .invalidate_tlbs = kgd_gfx_v9_invalidate_tlbs, .invalidate_tlbs_vmid = kgd_gfx_v9_invalidate_tlbs_vmid, .get_hive_id = amdgpu_amdkfd_get_hive_id, }; - -struct kfd2kgd_calls *amdgpu_amdkfd_arcturus_get_functions(void) -{ - return (struct kfd2kgd_calls *)&kfd2kgd; -} - diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c index d10f483f5e27..61cd707158e4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c @@ -19,18 +19,9 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ -#undef pr_fmt -#define pr_fmt(fmt) "kfd2kgd: " fmt - -#include <linux/module.h> -#include <linux/fdtable.h> -#include <linux/uaccess.h> -#include <linux/firmware.h> #include <linux/mmu_context.h> #include "amdgpu.h" #include "amdgpu_amdkfd.h" -#include "amdgpu_ucode.h" -#include "soc15_hw_ip.h" #include "gc/gc_10_1_0_offset.h" #include "gc/gc_10_1_0_sh_mask.h" #include "navi10_enum.h" @@ -42,6 +33,7 @@ #include "v10_structs.h" #include "nv.h" #include "nvd.h" +#include "gfxhub_v2_0.h" enum hqd_dequeue_request_type { NO_ACTION = 0, @@ -50,63 +42,6 @@ enum hqd_dequeue_request_type { SAVE_WAVES }; -/* - * Register access functions - */ - -static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, - uint32_t sh_mem_config, - uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, - uint32_t sh_mem_bases); -static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, - unsigned int vmid); -static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id); -static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, - uint32_t queue_id, uint32_t __user *wptr, - uint32_t wptr_shift, uint32_t wptr_mask, - struct mm_struct *mm); -static int kgd_hqd_dump(struct kgd_dev *kgd, - uint32_t pipe_id, uint32_t queue_id, - uint32_t (**dump)[2], uint32_t *n_regs); -static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, - uint32_t __user *wptr, struct mm_struct *mm); -static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, - uint32_t engine_id, uint32_t queue_id, - uint32_t (**dump)[2], uint32_t *n_regs); -static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, - uint32_t pipe_id, uint32_t queue_id); -static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd); -static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, - enum kfd_preempt_type reset_type, - unsigned int utimeout, uint32_t pipe_id, - uint32_t queue_id); -static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, - unsigned int utimeout); -#if 0 -static uint32_t get_watch_base_addr(struct amdgpu_device *adev); -#endif -static int kgd_address_watch_disable(struct kgd_dev *kgd); -static int kgd_address_watch_execute(struct kgd_dev *kgd, - unsigned int watch_point_id, - uint32_t cntl_val, - uint32_t addr_hi, - uint32_t addr_lo); -static int kgd_wave_control_execute(struct kgd_dev *kgd, - uint32_t gfx_index_val, - uint32_t sq_cmd); -static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd, - unsigned int watch_point_id, - unsigned int reg_offset); - -static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, - uint8_t vmid); -static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, - uint8_t vmid); -static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, - uint64_t page_table_base); -static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid); -static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid); - /* Because of REG_GET_FIELD() being used, we put this function in the * asic specific file. */ @@ -139,37 +74,6 @@ static int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd, return 0; } -static const struct kfd2kgd_calls kfd2kgd = { - .program_sh_mem_settings = kgd_program_sh_mem_settings, - .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, - .init_interrupts = kgd_init_interrupts, - .hqd_load = kgd_hqd_load, - .hqd_sdma_load = kgd_hqd_sdma_load, - .hqd_dump = kgd_hqd_dump, - .hqd_sdma_dump = kgd_hqd_sdma_dump, - .hqd_is_occupied = kgd_hqd_is_occupied, - .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied, - .hqd_destroy = kgd_hqd_destroy, - .hqd_sdma_destroy = kgd_hqd_sdma_destroy, - .address_watch_disable = kgd_address_watch_disable, - .address_watch_execute = kgd_address_watch_execute, - .wave_control_execute = kgd_wave_control_execute, - .address_watch_get_offset = kgd_address_watch_get_offset, - .get_atc_vmid_pasid_mapping_pasid = - get_atc_vmid_pasid_mapping_pasid, - .get_atc_vmid_pasid_mapping_valid = - get_atc_vmid_pasid_mapping_valid, - .invalidate_tlbs = invalidate_tlbs, - .invalidate_tlbs_vmid = invalidate_tlbs_vmid, - .set_vm_context_page_table_base = set_vm_context_page_table_base, - .get_tile_config = amdgpu_amdkfd_get_tile_config, -}; - -struct kfd2kgd_calls *amdgpu_amdkfd_gfx_10_0_get_functions() -{ - return (struct kfd2kgd_calls *)&kfd2kgd; -} - static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) { return (struct amdgpu_device *)kgd; @@ -250,11 +154,6 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, ATC_VMID0_PASID_MAPPING__VALID_MASK; pr_debug("pasid 0x%x vmid %d, reg value %x\n", pasid, vmid, pasid_mapping); - /* - * need to do this twice, once for gfx and once for mmhub - * for ATC add 16 to VMID for mmhub, for IH different registers. - * ATC_VMID0..15 registers are separate from ATC_VMID16..31. - */ pr_debug("ATHUB, reg %x\n", SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid); WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid, @@ -306,11 +205,11 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id) return 0; } -static uint32_t get_sdma_base_addr(struct amdgpu_device *adev, +static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev, unsigned int engine_id, unsigned int queue_id) { - uint32_t base[2] = { + uint32_t sdma_engine_reg_base[2] = { SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL, /* On gfx10, mmSDMA1_xxx registers are defined NOT based @@ -322,12 +221,12 @@ static uint32_t get_sdma_base_addr(struct amdgpu_device *adev, SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL }; - uint32_t retval; - retval = base[engine_id] + queue_id * (mmSDMA0_RLC1_RB_CNTL - - mmSDMA0_RLC0_RB_CNTL); + uint32_t retval = sdma_engine_reg_base[engine_id] + + queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL); - pr_debug("sdma base address: 0x%x\n", retval); + pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id, + queue_id, retval); return retval; } @@ -488,72 +387,67 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v10_sdma_mqd *m; - uint32_t sdma_base_addr, sdmax_gfx_context_cntl; + uint32_t sdma_rlc_reg_offset; unsigned long end_jiffies; uint32_t data; uint64_t data64; uint64_t __user *wptr64 = (uint64_t __user *)wptr; m = get_sdma_mqd(mqd); - sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id, + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, m->sdma_queue_id); - pr_debug("sdma load base addr %x for engine %d, queue %d\n", sdma_base_addr, m->sdma_engine_id, m->sdma_queue_id); - sdmax_gfx_context_cntl = m->sdma_engine_id ? - SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GFX_CONTEXT_CNTL) : - SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_CONTEXT_CNTL); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)); end_jiffies = msecs_to_jiffies(2000) + jiffies; while (true) { - data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); + data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS); if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) break; - if (time_after(jiffies, end_jiffies)) + if (time_after(jiffies, end_jiffies)) { + pr_err("SDMA RLC not idle in %s\n", __func__); return -ETIME; + } usleep_range(500, 1000); } - data = RREG32(sdmax_gfx_context_cntl); - data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL, - RESUME_CTX, 0); - WREG32(sdmax_gfx_context_cntl, data); - WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL_OFFSET, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET, m->sdmax_rlcx_doorbell_offset); data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL, ENABLE, 1); - WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR, + m->sdmax_rlcx_rb_rptr); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI, m->sdmax_rlcx_rb_rptr_hi); - WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1); if (read_user_wptr(mm, wptr64, data64)) { - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, lower_32_bits(data64)); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI, upper_32_bits(data64)); } else { - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, m->sdmax_rlcx_rb_rptr); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI, m->sdmax_rlcx_rb_rptr_hi); } - WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI, m->sdmax_rlcx_rb_base_hi); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, m->sdmax_rlcx_rb_rptr_addr_lo); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, m->sdmax_rlcx_rb_rptr_addr_hi); data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL, RB_ENABLE, 1); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data); return 0; } @@ -563,28 +457,26 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, uint32_t (**dump)[2], uint32_t *n_regs) { struct amdgpu_device *adev = get_amdgpu_device(kgd); - uint32_t sdma_base_addr = get_sdma_base_addr(adev, engine_id, queue_id); + uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, + engine_id, queue_id); uint32_t i = 0, reg; #undef HQD_N_REGS #define HQD_N_REGS (19+6+7+10) - pr_debug("sdma dump engine id %d queue_id %d\n", engine_id, queue_id); - pr_debug("sdma base addr %x\n", sdma_base_addr); - *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL); if (*dump == NULL) return -ENOMEM; for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++) - DUMP_REG(sdma_base_addr + reg); + DUMP_REG(sdma_rlc_reg_offset + reg); for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++) - DUMP_REG(sdma_base_addr + reg); + DUMP_REG(sdma_rlc_reg_offset + reg); for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN; reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++) - DUMP_REG(sdma_base_addr + reg); + DUMP_REG(sdma_rlc_reg_offset + reg); for (reg = mmSDMA0_RLC0_MIDCMD_DATA0; reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++) - DUMP_REG(sdma_base_addr + reg); + DUMP_REG(sdma_rlc_reg_offset + reg); WARN_ON_ONCE(i != HQD_N_REGS); *n_regs = i; @@ -618,14 +510,14 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v10_sdma_mqd *m; - uint32_t sdma_base_addr; + uint32_t sdma_rlc_reg_offset; uint32_t sdma_rlc_rb_cntl; m = get_sdma_mqd(mqd); - sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id, + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, m->sdma_queue_id); - sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL); + sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL); if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK) return true; @@ -746,59 +638,52 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v10_sdma_mqd *m; - uint32_t sdma_base_addr; + uint32_t sdma_rlc_reg_offset; uint32_t temp; unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies; m = get_sdma_mqd(mqd); - sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id, + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, m->sdma_queue_id); - temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL); + temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL); temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK; - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp); while (true) { - temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); + temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS); if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) break; - if (time_after(jiffies, end_jiffies)) + if (time_after(jiffies, end_jiffies)) { + pr_err("SDMA RLC not idle in %s\n", __func__); return -ETIME; + } usleep_range(500, 1000); } - WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, - RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) | + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, + RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) | SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK); - m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR); + m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR); m->sdmax_rlcx_rb_rptr_hi = - RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI); + RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI); return 0; } -static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, - uint8_t vmid) +static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, + uint8_t vmid, uint16_t *p_pasid) { - uint32_t reg; + uint32_t value; struct amdgpu_device *adev = (struct amdgpu_device *) kgd; - reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid); - return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK; -} - -static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, - uint8_t vmid) -{ - uint32_t reg; - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; + *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK; - reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) - + vmid); - return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK; + return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK); } static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid) @@ -830,6 +715,8 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid) { struct amdgpu_device *adev = (struct amdgpu_device *) kgd; int vmid; + uint16_t queried_pasid; + bool ret; struct amdgpu_ring *ring = &adev->gfx.kiq.ring; if (amdgpu_emu_mode == 0 && ring->sched.ready) @@ -838,13 +725,13 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid) for (vmid = 0; vmid < 16; vmid++) { if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) continue; - if (get_atc_vmid_pasid_mapping_valid(kgd, vmid)) { - if (get_atc_vmid_pasid_mapping_pasid(kgd, vmid) - == pasid) { - amdgpu_gmc_flush_gpu_tlb(adev, vmid, - AMDGPU_GFXHUB_0, 0); - break; - } + + ret = get_atc_vmid_pasid_mapping_info(kgd, vmid, + &queried_pasid); + if (ret && queried_pasid == pasid) { + amdgpu_gmc_flush_gpu_tlb(adev, vmid, + AMDGPU_GFXHUB_0, 0); + break; } } @@ -914,7 +801,6 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, uint64_t page_table_base) { struct amdgpu_device *adev = get_amdgpu_device(kgd); - uint64_t base = page_table_base | AMDGPU_PTE_VALID; if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) { pr_err("trying to set page table base for wrong VMID %u\n", @@ -922,18 +808,31 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, return; } - /* TODO: take advantage of per-process address space size. For - * now, all processes share the same address space size, like - * on GFX8 and older. - */ - WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32) + (vmid*2), 0); - WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32) + (vmid*2), 0); - - WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32) + (vmid*2), - lower_32_bits(adev->vm_manager.max_pfn - 1)); - WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32) + (vmid*2), - upper_32_bits(adev->vm_manager.max_pfn - 1)); - - WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32) + (vmid*2), lower_32_bits(base)); - WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) + (vmid*2), upper_32_bits(base)); + /* SDMA is on gfxhub as well for Navi1* series */ + gfxhub_v2_0_setup_vm_pt_regs(adev, vmid, page_table_base); } + +const struct kfd2kgd_calls gfx_v10_kfd2kgd = { + .program_sh_mem_settings = kgd_program_sh_mem_settings, + .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, + .init_interrupts = kgd_init_interrupts, + .hqd_load = kgd_hqd_load, + .hqd_sdma_load = kgd_hqd_sdma_load, + .hqd_dump = kgd_hqd_dump, + .hqd_sdma_dump = kgd_hqd_sdma_dump, + .hqd_is_occupied = kgd_hqd_is_occupied, + .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied, + .hqd_destroy = kgd_hqd_destroy, + .hqd_sdma_destroy = kgd_hqd_sdma_destroy, + .address_watch_disable = kgd_address_watch_disable, + .address_watch_execute = kgd_address_watch_execute, + .wave_control_execute = kgd_wave_control_execute, + .address_watch_get_offset = kgd_address_watch_get_offset, + .get_atc_vmid_pasid_mapping_info = + get_atc_vmid_pasid_mapping_info, + .get_tile_config = amdgpu_amdkfd_get_tile_config, + .set_vm_context_page_table_base = set_vm_context_page_table_base, + .invalidate_tlbs = invalidate_tlbs, + .invalidate_tlbs_vmid = invalidate_tlbs_vmid, + .get_hive_id = amdgpu_amdkfd_get_hive_id, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index 5f459bf5f622..6e6f0a99ec06 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c @@ -20,8 +20,6 @@ * OTHER DEALINGS IN THE SOFTWARE. */ -#include <linux/fdtable.h> -#include <linux/uaccess.h> #include <linux/mmu_context.h> #include "amdgpu.h" @@ -86,65 +84,6 @@ union TCP_WATCH_CNTL_BITS { float f32All; }; -/* - * Register access functions - */ - -static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, - uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, - uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases); - -static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, - unsigned int vmid); - -static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id); -static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, - uint32_t queue_id, uint32_t __user *wptr, - uint32_t wptr_shift, uint32_t wptr_mask, - struct mm_struct *mm); -static int kgd_hqd_dump(struct kgd_dev *kgd, - uint32_t pipe_id, uint32_t queue_id, - uint32_t (**dump)[2], uint32_t *n_regs); -static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, - uint32_t __user *wptr, struct mm_struct *mm); -static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, - uint32_t engine_id, uint32_t queue_id, - uint32_t (**dump)[2], uint32_t *n_regs); -static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, - uint32_t pipe_id, uint32_t queue_id); - -static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, - enum kfd_preempt_type reset_type, - unsigned int utimeout, uint32_t pipe_id, - uint32_t queue_id); -static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd); -static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, - unsigned int utimeout); -static int kgd_address_watch_disable(struct kgd_dev *kgd); -static int kgd_address_watch_execute(struct kgd_dev *kgd, - unsigned int watch_point_id, - uint32_t cntl_val, - uint32_t addr_hi, - uint32_t addr_lo); -static int kgd_wave_control_execute(struct kgd_dev *kgd, - uint32_t gfx_index_val, - uint32_t sq_cmd); -static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd, - unsigned int watch_point_id, - unsigned int reg_offset); - -static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, uint8_t vmid); -static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, - uint8_t vmid); - -static void set_scratch_backing_va(struct kgd_dev *kgd, - uint64_t va, uint32_t vmid); -static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, - uint64_t page_table_base); -static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid); -static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid); -static uint32_t read_vmid_from_vmfault_reg(struct kgd_dev *kgd); - /* Because of REG_GET_FIELD() being used, we put this function in the * asic specific file. */ @@ -170,37 +109,6 @@ static int get_tile_config(struct kgd_dev *kgd, return 0; } -static const struct kfd2kgd_calls kfd2kgd = { - .program_sh_mem_settings = kgd_program_sh_mem_settings, - .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, - .init_interrupts = kgd_init_interrupts, - .hqd_load = kgd_hqd_load, - .hqd_sdma_load = kgd_hqd_sdma_load, - .hqd_dump = kgd_hqd_dump, - .hqd_sdma_dump = kgd_hqd_sdma_dump, - .hqd_is_occupied = kgd_hqd_is_occupied, - .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied, - .hqd_destroy = kgd_hqd_destroy, - .hqd_sdma_destroy = kgd_hqd_sdma_destroy, - .address_watch_disable = kgd_address_watch_disable, - .address_watch_execute = kgd_address_watch_execute, - .wave_control_execute = kgd_wave_control_execute, - .address_watch_get_offset = kgd_address_watch_get_offset, - .get_atc_vmid_pasid_mapping_pasid = get_atc_vmid_pasid_mapping_pasid, - .get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid, - .set_scratch_backing_va = set_scratch_backing_va, - .get_tile_config = get_tile_config, - .set_vm_context_page_table_base = set_vm_context_page_table_base, - .invalidate_tlbs = invalidate_tlbs, - .invalidate_tlbs_vmid = invalidate_tlbs_vmid, - .read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg, -}; - -struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void) -{ - return (struct kfd2kgd_calls *)&kfd2kgd; -} - static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) { return (struct amdgpu_device *)kgd; @@ -303,14 +211,15 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id) return 0; } -static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m) +static inline uint32_t get_sdma_rlc_reg_offset(struct cik_sdma_rlc_registers *m) { uint32_t retval; retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET + m->sdma_queue_id * KFD_CIK_SDMA_QUEUE_OFFSET; - pr_debug("sdma base address: 0x%x\n", retval); + pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", + m->sdma_engine_id, m->sdma_queue_id, retval); return retval; } @@ -413,60 +322,52 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, struct amdgpu_device *adev = get_amdgpu_device(kgd); struct cik_sdma_rlc_registers *m; unsigned long end_jiffies; - uint32_t sdma_base_addr; + uint32_t sdma_rlc_reg_offset; uint32_t data; m = get_sdma_mqd(mqd); - sdma_base_addr = get_sdma_base_addr(m); + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, m->sdma_rlc_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)); end_jiffies = msecs_to_jiffies(2000) + jiffies; while (true) { - data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); + data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS); if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) break; - if (time_after(jiffies, end_jiffies)) + if (time_after(jiffies, end_jiffies)) { + pr_err("SDMA RLC not idle in %s\n", __func__); return -ETIME; + } usleep_range(500, 1000); } - if (m->sdma_engine_id) { - data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL); - data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL, - RESUME_CTX, 0); - WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data); - } else { - data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL); - data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL, - RESUME_CTX, 0); - WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data); - } data = REG_SET_FIELD(m->sdma_rlc_doorbell, SDMA0_RLC0_DOORBELL, ENABLE, 1); - WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdma_rlc_rb_rptr); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR, + m->sdma_rlc_rb_rptr); if (read_user_wptr(mm, wptr, data)) - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, data); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, data); else - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, m->sdma_rlc_rb_rptr); - WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_VIRTUAL_ADDR, m->sdma_rlc_virtual_addr); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI, m->sdma_rlc_rb_base_hi); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, m->sdma_rlc_rb_rptr_addr_lo); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, m->sdma_rlc_rb_rptr_addr_hi); data = REG_SET_FIELD(m->sdma_rlc_rb_cntl, SDMA0_RLC0_RB_CNTL, RB_ENABLE, 1); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data); return 0; } @@ -524,13 +425,13 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct cik_sdma_rlc_registers *m; - uint32_t sdma_base_addr; + uint32_t sdma_rlc_reg_offset; uint32_t sdma_rlc_rb_cntl; m = get_sdma_mqd(mqd); - sdma_base_addr = get_sdma_base_addr(m); + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m); - sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL); + sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL); if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK) return true; @@ -645,32 +546,34 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct cik_sdma_rlc_registers *m; - uint32_t sdma_base_addr; + uint32_t sdma_rlc_reg_offset; uint32_t temp; unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies; m = get_sdma_mqd(mqd); - sdma_base_addr = get_sdma_base_addr(m); + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m); - temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL); + temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL); temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK; - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp); while (true) { - temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); + temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS); if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) break; - if (time_after(jiffies, end_jiffies)) + if (time_after(jiffies, end_jiffies)) { + pr_err("SDMA RLC not idle in %s\n", __func__); return -ETIME; + } usleep_range(500, 1000); } - WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, - RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) | + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, + RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) | SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK); - m->sdma_rlc_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR); + m->sdma_rlc_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR); return 0; } @@ -758,24 +661,16 @@ static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd, return watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + reg_offset]; } -static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, - uint8_t vmid) +static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, + uint8_t vmid, uint16_t *p_pasid) { - uint32_t reg; + uint32_t value; struct amdgpu_device *adev = (struct amdgpu_device *) kgd; - reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); - return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK; -} - -static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, - uint8_t vmid) -{ - uint32_t reg; - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; + value = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); + *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK; - reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); - return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK; + return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK); } static void set_scratch_backing_va(struct kgd_dev *kgd, @@ -855,3 +750,28 @@ static uint32_t read_vmid_from_vmfault_reg(struct kgd_dev *kgd) return REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID); } + +const struct kfd2kgd_calls gfx_v7_kfd2kgd = { + .program_sh_mem_settings = kgd_program_sh_mem_settings, + .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, + .init_interrupts = kgd_init_interrupts, + .hqd_load = kgd_hqd_load, + .hqd_sdma_load = kgd_hqd_sdma_load, + .hqd_dump = kgd_hqd_dump, + .hqd_sdma_dump = kgd_hqd_sdma_dump, + .hqd_is_occupied = kgd_hqd_is_occupied, + .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied, + .hqd_destroy = kgd_hqd_destroy, + .hqd_sdma_destroy = kgd_hqd_sdma_destroy, + .address_watch_disable = kgd_address_watch_disable, + .address_watch_execute = kgd_address_watch_execute, + .wave_control_execute = kgd_wave_control_execute, + .address_watch_get_offset = kgd_address_watch_get_offset, + .get_atc_vmid_pasid_mapping_info = get_atc_vmid_pasid_mapping_info, + .set_scratch_backing_va = set_scratch_backing_va, + .get_tile_config = get_tile_config, + .set_vm_context_page_table_base = set_vm_context_page_table_base, + .invalidate_tlbs = invalidate_tlbs, + .invalidate_tlbs_vmid = invalidate_tlbs_vmid, + .read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c index 6d2f61449606..bfbddedb2380 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c @@ -20,9 +20,6 @@ * OTHER DEALINGS IN THE SOFTWARE. */ -#include <linux/module.h> -#include <linux/fdtable.h> -#include <linux/uaccess.h> #include <linux/mmu_context.h> #include "amdgpu.h" @@ -44,62 +41,6 @@ enum hqd_dequeue_request_type { RESET_WAVES }; -/* - * Register access functions - */ - -static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, - uint32_t sh_mem_config, - uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, - uint32_t sh_mem_bases); -static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, - unsigned int vmid); -static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id); -static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, - uint32_t queue_id, uint32_t __user *wptr, - uint32_t wptr_shift, uint32_t wptr_mask, - struct mm_struct *mm); -static int kgd_hqd_dump(struct kgd_dev *kgd, - uint32_t pipe_id, uint32_t queue_id, - uint32_t (**dump)[2], uint32_t *n_regs); -static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, - uint32_t __user *wptr, struct mm_struct *mm); -static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, - uint32_t engine_id, uint32_t queue_id, - uint32_t (**dump)[2], uint32_t *n_regs); -static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, - uint32_t pipe_id, uint32_t queue_id); -static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd); -static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, - enum kfd_preempt_type reset_type, - unsigned int utimeout, uint32_t pipe_id, - uint32_t queue_id); -static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, - unsigned int utimeout); -static int kgd_address_watch_disable(struct kgd_dev *kgd); -static int kgd_address_watch_execute(struct kgd_dev *kgd, - unsigned int watch_point_id, - uint32_t cntl_val, - uint32_t addr_hi, - uint32_t addr_lo); -static int kgd_wave_control_execute(struct kgd_dev *kgd, - uint32_t gfx_index_val, - uint32_t sq_cmd); -static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd, - unsigned int watch_point_id, - unsigned int reg_offset); - -static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, - uint8_t vmid); -static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, - uint8_t vmid); -static void set_scratch_backing_va(struct kgd_dev *kgd, - uint64_t va, uint32_t vmid); -static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, - uint64_t page_table_base); -static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid); -static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid); - /* Because of REG_GET_FIELD() being used, we put this function in the * asic specific file. */ @@ -125,38 +66,6 @@ static int get_tile_config(struct kgd_dev *kgd, return 0; } -static const struct kfd2kgd_calls kfd2kgd = { - .program_sh_mem_settings = kgd_program_sh_mem_settings, - .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, - .init_interrupts = kgd_init_interrupts, - .hqd_load = kgd_hqd_load, - .hqd_sdma_load = kgd_hqd_sdma_load, - .hqd_dump = kgd_hqd_dump, - .hqd_sdma_dump = kgd_hqd_sdma_dump, - .hqd_is_occupied = kgd_hqd_is_occupied, - .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied, - .hqd_destroy = kgd_hqd_destroy, - .hqd_sdma_destroy = kgd_hqd_sdma_destroy, - .address_watch_disable = kgd_address_watch_disable, - .address_watch_execute = kgd_address_watch_execute, - .wave_control_execute = kgd_wave_control_execute, - .address_watch_get_offset = kgd_address_watch_get_offset, - .get_atc_vmid_pasid_mapping_pasid = - get_atc_vmid_pasid_mapping_pasid, - .get_atc_vmid_pasid_mapping_valid = - get_atc_vmid_pasid_mapping_valid, - .set_scratch_backing_va = set_scratch_backing_va, - .get_tile_config = get_tile_config, - .set_vm_context_page_table_base = set_vm_context_page_table_base, - .invalidate_tlbs = invalidate_tlbs, - .invalidate_tlbs_vmid = invalidate_tlbs_vmid, -}; - -struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void) -{ - return (struct kfd2kgd_calls *)&kfd2kgd; -} - static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) { return (struct amdgpu_device *)kgd; @@ -260,13 +169,15 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id) return 0; } -static inline uint32_t get_sdma_base_addr(struct vi_sdma_mqd *m) +static inline uint32_t get_sdma_rlc_reg_offset(struct vi_sdma_mqd *m) { uint32_t retval; retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET + m->sdma_queue_id * KFD_VI_SDMA_QUEUE_OFFSET; - pr_debug("sdma base address: 0x%x\n", retval); + + pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", + m->sdma_engine_id, m->sdma_queue_id, retval); return retval; } @@ -398,59 +309,51 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, struct amdgpu_device *adev = get_amdgpu_device(kgd); struct vi_sdma_mqd *m; unsigned long end_jiffies; - uint32_t sdma_base_addr; + uint32_t sdma_rlc_reg_offset; uint32_t data; m = get_sdma_mqd(mqd); - sdma_base_addr = get_sdma_base_addr(m); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)); end_jiffies = msecs_to_jiffies(2000) + jiffies; while (true) { - data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); + data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS); if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) break; - if (time_after(jiffies, end_jiffies)) + if (time_after(jiffies, end_jiffies)) { + pr_err("SDMA RLC not idle in %s\n", __func__); return -ETIME; + } usleep_range(500, 1000); } - if (m->sdma_engine_id) { - data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL); - data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL, - RESUME_CTX, 0); - WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data); - } else { - data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL); - data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL, - RESUME_CTX, 0); - WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data); - } data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL, ENABLE, 1); - WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR, + m->sdmax_rlcx_rb_rptr); if (read_user_wptr(mm, wptr, data)) - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, data); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, data); else - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, m->sdmax_rlcx_rb_rptr); - WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_VIRTUAL_ADDR, m->sdmax_rlcx_virtual_addr); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI, m->sdmax_rlcx_rb_base_hi); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, m->sdmax_rlcx_rb_rptr_addr_lo); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, m->sdmax_rlcx_rb_rptr_addr_hi); data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL, RB_ENABLE, 1); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data); return 0; } @@ -517,13 +420,13 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct vi_sdma_mqd *m; - uint32_t sdma_base_addr; + uint32_t sdma_rlc_reg_offset; uint32_t sdma_rlc_rb_cntl; m = get_sdma_mqd(mqd); - sdma_base_addr = get_sdma_base_addr(m); + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m); - sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL); + sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL); if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK) return true; @@ -641,54 +544,48 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct vi_sdma_mqd *m; - uint32_t sdma_base_addr; + uint32_t sdma_rlc_reg_offset; uint32_t temp; unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies; m = get_sdma_mqd(mqd); - sdma_base_addr = get_sdma_base_addr(m); + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m); - temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL); + temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL); temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK; - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp); while (true) { - temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); + temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS); if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) break; - if (time_after(jiffies, end_jiffies)) + if (time_after(jiffies, end_jiffies)) { + pr_err("SDMA RLC not idle in %s\n", __func__); return -ETIME; + } usleep_range(500, 1000); } - WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, - RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) | + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, + RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) | SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK); - m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR); + m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR); return 0; } -static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, - uint8_t vmid) +static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, + uint8_t vmid, uint16_t *p_pasid) { - uint32_t reg; + uint32_t value; struct amdgpu_device *adev = (struct amdgpu_device *) kgd; - reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); - return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK; -} + value = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); + *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK; -static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, - uint8_t vmid) -{ - uint32_t reg; - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; - - reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); - return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK; + return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK); } static int kgd_address_watch_disable(struct kgd_dev *kgd) @@ -798,3 +695,28 @@ static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid) RREG32(mmVM_INVALIDATE_RESPONSE); return 0; } + +const struct kfd2kgd_calls gfx_v8_kfd2kgd = { + .program_sh_mem_settings = kgd_program_sh_mem_settings, + .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, + .init_interrupts = kgd_init_interrupts, + .hqd_load = kgd_hqd_load, + .hqd_sdma_load = kgd_hqd_sdma_load, + .hqd_dump = kgd_hqd_dump, + .hqd_sdma_dump = kgd_hqd_sdma_dump, + .hqd_is_occupied = kgd_hqd_is_occupied, + .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied, + .hqd_destroy = kgd_hqd_destroy, + .hqd_sdma_destroy = kgd_hqd_sdma_destroy, + .address_watch_disable = kgd_address_watch_disable, + .address_watch_execute = kgd_address_watch_execute, + .wave_control_execute = kgd_wave_control_execute, + .address_watch_get_offset = kgd_address_watch_get_offset, + .get_atc_vmid_pasid_mapping_info = + get_atc_vmid_pasid_mapping_info, + .set_scratch_backing_va = set_scratch_backing_va, + .get_tile_config = get_tile_config, + .set_vm_context_page_table_base = set_vm_context_page_table_base, + .invalidate_tlbs = invalidate_tlbs, + .invalidate_tlbs_vmid = invalidate_tlbs_vmid, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index e262f2ac07a3..47c853ef1051 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -19,17 +19,10 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ - -#define pr_fmt(fmt) "kfd2kgd: " fmt - -#include <linux/module.h> -#include <linux/fdtable.h> -#include <linux/uaccess.h> #include <linux/mmu_context.h> #include "amdgpu.h" #include "amdgpu_amdkfd.h" -#include "soc15_hw_ip.h" #include "gc/gc_9_0_offset.h" #include "gc/gc_9_0_sh_mask.h" #include "vega10_enum.h" @@ -50,9 +43,6 @@ #include "gmc_v9_0.h" -#define V9_PIPE_PER_MEC (4) -#define V9_QUEUES_PER_PIPE_MEC (8) - enum hqd_dequeue_request_type { NO_ACTION = 0, DRAIN_PIPE, @@ -226,22 +216,21 @@ int kgd_gfx_v9_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id) return 0; } -static uint32_t get_sdma_base_addr(struct amdgpu_device *adev, +static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev, unsigned int engine_id, unsigned int queue_id) { - uint32_t base[2] = { + uint32_t sdma_engine_reg_base[2] = { SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL, SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_RLC0_RB_CNTL) - mmSDMA1_RLC0_RB_CNTL }; - uint32_t retval; + uint32_t retval = sdma_engine_reg_base[engine_id] + + queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL); - retval = base[engine_id] + queue_id * (mmSDMA0_RLC1_RB_CNTL - - mmSDMA0_RLC0_RB_CNTL); - - pr_debug("sdma base address: 0x%x\n", retval); + pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id, + queue_id, retval); return retval; } @@ -388,71 +377,67 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v9_sdma_mqd *m; - uint32_t sdma_base_addr, sdmax_gfx_context_cntl; + uint32_t sdma_rlc_reg_offset; unsigned long end_jiffies; uint32_t data; uint64_t data64; uint64_t __user *wptr64 = (uint64_t __user *)wptr; m = get_sdma_mqd(mqd); - sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id, + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, m->sdma_queue_id); - sdmax_gfx_context_cntl = m->sdma_engine_id ? - SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GFX_CONTEXT_CNTL) : - SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_CONTEXT_CNTL); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)); end_jiffies = msecs_to_jiffies(2000) + jiffies; while (true) { - data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); + data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS); if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) break; - if (time_after(jiffies, end_jiffies)) + if (time_after(jiffies, end_jiffies)) { + pr_err("SDMA RLC not idle in %s\n", __func__); return -ETIME; + } usleep_range(500, 1000); } - data = RREG32(sdmax_gfx_context_cntl); - data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL, - RESUME_CTX, 0); - WREG32(sdmax_gfx_context_cntl, data); - WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL_OFFSET, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET, m->sdmax_rlcx_doorbell_offset); data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL, ENABLE, 1); - WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR, + m->sdmax_rlcx_rb_rptr); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI, m->sdmax_rlcx_rb_rptr_hi); - WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1); if (read_user_wptr(mm, wptr64, data64)) { - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, lower_32_bits(data64)); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI, upper_32_bits(data64)); } else { - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, m->sdmax_rlcx_rb_rptr); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI, m->sdmax_rlcx_rb_rptr_hi); } - WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI, m->sdmax_rlcx_rb_base_hi); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, m->sdmax_rlcx_rb_rptr_addr_lo); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, m->sdmax_rlcx_rb_rptr_addr_hi); data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL, RB_ENABLE, 1); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data); return 0; } @@ -462,7 +447,8 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, uint32_t (**dump)[2], uint32_t *n_regs) { struct amdgpu_device *adev = get_amdgpu_device(kgd); - uint32_t sdma_base_addr = get_sdma_base_addr(adev, engine_id, queue_id); + uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, + engine_id, queue_id); uint32_t i = 0, reg; #undef HQD_N_REGS #define HQD_N_REGS (19+6+7+10) @@ -472,15 +458,15 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, return -ENOMEM; for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++) - DUMP_REG(sdma_base_addr + reg); + DUMP_REG(sdma_rlc_reg_offset + reg); for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++) - DUMP_REG(sdma_base_addr + reg); + DUMP_REG(sdma_rlc_reg_offset + reg); for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN; reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++) - DUMP_REG(sdma_base_addr + reg); + DUMP_REG(sdma_rlc_reg_offset + reg); for (reg = mmSDMA0_RLC0_MIDCMD_DATA0; reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++) - DUMP_REG(sdma_base_addr + reg); + DUMP_REG(sdma_rlc_reg_offset + reg); WARN_ON_ONCE(i != HQD_N_REGS); *n_regs = i; @@ -514,14 +500,14 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v9_sdma_mqd *m; - uint32_t sdma_base_addr; + uint32_t sdma_rlc_reg_offset; uint32_t sdma_rlc_rb_cntl; m = get_sdma_mqd(mqd); - sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id, + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, m->sdma_queue_id); - sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL); + sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL); if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK) return true; @@ -584,59 +570,52 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v9_sdma_mqd *m; - uint32_t sdma_base_addr; + uint32_t sdma_rlc_reg_offset; uint32_t temp; unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies; m = get_sdma_mqd(mqd); - sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id, + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, m->sdma_queue_id); - temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL); + temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL); temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK; - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp); while (true) { - temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); + temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS); if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) break; - if (time_after(jiffies, end_jiffies)) + if (time_after(jiffies, end_jiffies)) { + pr_err("SDMA RLC not idle in %s\n", __func__); return -ETIME; + } usleep_range(500, 1000); } - WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, - RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) | + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, + RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) | SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK); - m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR); + m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR); m->sdmax_rlcx_rb_rptr_hi = - RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI); + RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI); return 0; } -bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, - uint8_t vmid) +bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, + uint8_t vmid, uint16_t *p_pasid) { - uint32_t reg; + uint32_t value; struct amdgpu_device *adev = (struct amdgpu_device *) kgd; - reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid); - return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK; -} - -uint16_t kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, - uint8_t vmid) -{ - uint32_t reg; - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; + *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK; - reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) - + vmid); - return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK; + return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK); } static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid, @@ -671,6 +650,8 @@ int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid) { struct amdgpu_device *adev = (struct amdgpu_device *) kgd; int vmid, i; + uint16_t queried_pasid; + bool ret; struct amdgpu_ring *ring = &adev->gfx.kiq.ring; uint32_t flush_type = 0; @@ -686,14 +667,14 @@ int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid) for (vmid = 0; vmid < 16; vmid++) { if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) continue; - if (kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid(kgd, vmid)) { - if (kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid(kgd, vmid) - == pasid) { - for (i = 0; i < adev->num_vmhubs; i++) - amdgpu_gmc_flush_gpu_tlb(adev, vmid, - i, flush_type); - break; - } + + ret = kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(kgd, vmid, + &queried_pasid); + if (ret && queried_pasid == pasid) { + for (i = 0; i < adev->num_vmhubs; i++) + amdgpu_gmc_flush_gpu_tlb(adev, vmid, + i, flush_type); + break; } } @@ -777,15 +758,6 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd, return 0; } -void kgd_gfx_v9_set_scratch_backing_va(struct kgd_dev *kgd, - uint64_t va, uint32_t vmid) -{ - /* No longer needed on GFXv9. The scratch base address is - * passed to the shader by the CP. It's the user mode driver's - * responsibility. - */ -} - void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, uint64_t page_table_base) { @@ -811,7 +783,7 @@ void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmi gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base); } -static const struct kfd2kgd_calls kfd2kgd = { +const struct kfd2kgd_calls gfx_v9_kfd2kgd = { .program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings, .set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping, .init_interrupts = kgd_gfx_v9_init_interrupts, @@ -827,19 +799,11 @@ static const struct kfd2kgd_calls kfd2kgd = { .address_watch_execute = kgd_gfx_v9_address_watch_execute, .wave_control_execute = kgd_gfx_v9_wave_control_execute, .address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset, - .get_atc_vmid_pasid_mapping_pasid = - kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid, - .get_atc_vmid_pasid_mapping_valid = - kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid, - .set_scratch_backing_va = kgd_gfx_v9_set_scratch_backing_va, + .get_atc_vmid_pasid_mapping_info = + kgd_gfx_v9_get_atc_vmid_pasid_mapping_info, .get_tile_config = kgd_gfx_v9_get_tile_config, .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base, .invalidate_tlbs = kgd_gfx_v9_invalidate_tlbs, .invalidate_tlbs_vmid = kgd_gfx_v9_invalidate_tlbs_vmid, .get_hive_id = amdgpu_amdkfd_get_hive_id, }; - -struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void) -{ - return (struct kfd2kgd_calls *)&kfd2kgd; -} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h index 26d8879bff9d..d9e9ad22b2bd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h @@ -55,14 +55,10 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd, unsigned int watch_point_id, unsigned int reg_offset); -bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, - uint8_t vmid); -uint16_t kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, - uint8_t vmid); +bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, + uint8_t vmid, uint16_t *p_pasid); void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, uint64_t page_table_base); -void kgd_gfx_v9_set_scratch_backing_va(struct kgd_dev *kgd, - uint64_t va, uint32_t vmid); int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid); int kgd_gfx_v9_invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid); int kgd_gfx_v9_get_tile_config(struct kgd_dev *kgd, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 6d021ecc8d59..ae6f5446262c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -19,9 +19,6 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ - -#define pr_fmt(fmt) "kfd2kgd: " fmt - #include <linux/dma-buf.h> #include <linux/list.h> #include <linux/pagemap.h> @@ -33,11 +30,6 @@ #include "amdgpu_amdkfd.h" #include "amdgpu_dma_buf.h" -/* Special VM and GART address alignment needed for VI pre-Fiji due to - * a HW bug. - */ -#define VI_BO_SIZE_ALIGN (0x8000) - /* BO flag to indicate a KFD userptr BO */ #define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63) @@ -349,13 +341,46 @@ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync) struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); int ret; - ret = amdgpu_vm_update_directories(adev, vm); + ret = amdgpu_vm_update_pdes(adev, vm, false); if (ret) return ret; return amdgpu_sync_fence(NULL, sync, vm->last_update, false); } +static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem) +{ + struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev); + bool coherent = mem->alloc_flags & ALLOC_MEM_FLAGS_COHERENT; + uint32_t mapping_flags; + + mapping_flags = AMDGPU_VM_PAGE_READABLE; + if (mem->alloc_flags & ALLOC_MEM_FLAGS_WRITABLE) + mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE; + if (mem->alloc_flags & ALLOC_MEM_FLAGS_EXECUTABLE) + mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE; + + switch (adev->asic_type) { + case CHIP_ARCTURUS: + if (mem->alloc_flags & ALLOC_MEM_FLAGS_VRAM) { + if (bo_adev == adev) + mapping_flags |= coherent ? + AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW; + else + mapping_flags |= AMDGPU_VM_MTYPE_UC; + } else { + mapping_flags |= coherent ? + AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; + } + break; + default: + mapping_flags |= coherent ? + AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; + } + + return amdgpu_gem_va_map_flags(adev, mapping_flags); +} + /* add_bo_to_vm - Add a BO to a VM * * Everything that needs to bo done only once when a BO is first added @@ -404,8 +429,7 @@ static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem, } bo_va_entry->va = va; - bo_va_entry->pte_flags = amdgpu_gmc_get_pte_flags(adev, - mem->mapping_flags); + bo_va_entry->pte_flags = get_pte_flags(adev, mem); bo_va_entry->kgd_dev = (void *)adev; list_add(&bo_va_entry->bo_list, list_bo_va); @@ -586,7 +610,7 @@ static int reserve_bo_and_vm(struct kgd_mem *mem, amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]); ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list, - false, &ctx->duplicates, true); + false, &ctx->duplicates); if (!ret) ctx->reserved = true; else { @@ -659,7 +683,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem, } ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list, - false, &ctx->duplicates, true); + false, &ctx->duplicates); if (!ret) ctx->reserved = true; else @@ -1079,10 +1103,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( uint64_t user_addr = 0; struct amdgpu_bo *bo; struct amdgpu_bo_param bp; - int byte_align; u32 domain, alloc_domain; u64 alloc_flags; - uint32_t mapping_flags; int ret; /* @@ -1135,25 +1157,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( if ((*mem)->aql_queue) size = size >> 1; - /* Workaround for TLB bug on older VI chips */ - byte_align = (adev->family == AMDGPU_FAMILY_VI && - adev->asic_type != CHIP_FIJI && - adev->asic_type != CHIP_POLARIS10 && - adev->asic_type != CHIP_POLARIS11 && - adev->asic_type != CHIP_POLARIS12 && - adev->asic_type != CHIP_VEGAM) ? - VI_BO_SIZE_ALIGN : 1; - - mapping_flags = AMDGPU_VM_PAGE_READABLE; - if (flags & ALLOC_MEM_FLAGS_WRITABLE) - mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE; - if (flags & ALLOC_MEM_FLAGS_EXECUTABLE) - mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE; - if (flags & ALLOC_MEM_FLAGS_COHERENT) - mapping_flags |= AMDGPU_VM_MTYPE_UC; - else - mapping_flags |= AMDGPU_VM_MTYPE_NC; - (*mem)->mapping_flags = mapping_flags; + (*mem)->alloc_flags = flags; amdgpu_sync_create(&(*mem)->sync); @@ -1168,7 +1172,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( memset(&bp, 0, sizeof(bp)); bp.size = size; - bp.byte_align = byte_align; + bp.byte_align = 1; bp.domain = alloc_domain; bp.flags = alloc_flags; bp.type = bo_type; @@ -1626,9 +1630,10 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd, INIT_LIST_HEAD(&(*mem)->bo_va_list); mutex_init(&(*mem)->lock); - (*mem)->mapping_flags = - AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE | - AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_NC; + (*mem)->alloc_flags = + ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ? + ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT) | + ALLOC_MEM_FLAGS_WRITABLE | ALLOC_MEM_FLAGS_EXECUTABLE; (*mem)->bo = amdgpu_bo_ref(bo); (*mem)->va = va; @@ -1797,8 +1802,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) } /* Reserve all BOs and page tables for validation */ - ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates, - true); + ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates); WARN(!list_empty(&duplicates), "Duplicates should be empty"); if (ret) goto out_free; @@ -1996,7 +2000,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) } ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list, - false, &duplicate_save, true); + false, &duplicate_save); if (ret) { pr_debug("Memory eviction: TTM Reserve Failed. Try again\n"); goto ttm_reserve_fail; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 1c9d40f97a9b..72232fccf61a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -2038,6 +2038,11 @@ int amdgpu_atombios_init(struct amdgpu_device *adev) if (adev->is_atom_fw) { amdgpu_atomfirmware_scratch_regs_init(adev); amdgpu_atomfirmware_allocate_fb_scratch(adev); + ret = amdgpu_atomfirmware_get_mem_train_fb_loc(adev); + if (ret) { + DRM_ERROR("Failed to get mem train fb location.\n"); + return ret; + } } else { amdgpu_atombios_scratch_regs_init(adev); amdgpu_atombios_allocate_fb_scratch(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c index daf687428cdb..ff4eb96bdfb5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c @@ -27,6 +27,7 @@ #include "amdgpu_atomfirmware.h" #include "atom.h" #include "atombios.h" +#include "soc15_hw_ip.h" bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev) { @@ -120,65 +121,14 @@ union vram_info { struct atom_vram_info_header_v2_3 v23; struct atom_vram_info_header_v2_4 v24; }; -/* - * Return vram width from integrated system info table, if available, - * or 0 if not. - */ -int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev) -{ - struct amdgpu_mode_info *mode_info = &adev->mode_info; - int index; - u16 data_offset, size; - union igp_info *igp_info; - union vram_info *vram_info; - u32 mem_channel_number; - u32 mem_channel_width; - u8 frev, crev; - - if (adev->flags & AMD_IS_APU) - index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, - integratedsysteminfo); - else - index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, - vram_info); - /* get any igp specific overrides */ - if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, &size, - &frev, &crev, &data_offset)) { - if (adev->flags & AMD_IS_APU) { - igp_info = (union igp_info *) - (mode_info->atom_context->bios + data_offset); - switch (crev) { - case 11: - mem_channel_number = igp_info->v11.umachannelnumber; - /* channel width is 64 */ - return mem_channel_number * 64; - default: - return 0; - } - } else { - vram_info = (union vram_info *) - (mode_info->atom_context->bios + data_offset); - switch (crev) { - case 3: - mem_channel_number = vram_info->v23.vram_module[0].channel_num; - mem_channel_width = vram_info->v23.vram_module[0].channel_width; - return mem_channel_number * (1 << mem_channel_width); - case 4: - mem_channel_number = vram_info->v24.vram_module[0].channel_num; - mem_channel_width = vram_info->v24.vram_module[0].channel_width; - return mem_channel_number * (1 << mem_channel_width); - default: - return 0; - } - } - } - - return 0; -} +union vram_module { + struct atom_vram_module_v9 v9; + struct atom_vram_module_v10 v10; +}; -static int convert_atom_mem_type_to_vram_type (struct amdgpu_device *adev, - int atom_mem_type) +static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev, + int atom_mem_type) { int vram_type; @@ -219,19 +169,25 @@ static int convert_atom_mem_type_to_vram_type (struct amdgpu_device *adev, return vram_type; } -/* - * Return vram type from either integrated system info table - * or umc info table, if available, or 0 (TYPE_UNKNOWN) if not - */ -int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev) + + +int +amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev, + int *vram_width, int *vram_type, + int *vram_vendor) { struct amdgpu_mode_info *mode_info = &adev->mode_info; - int index; + int index, i = 0; u16 data_offset, size; union igp_info *igp_info; union vram_info *vram_info; + union vram_module *vram_module; u8 frev, crev; u8 mem_type; + u8 mem_vendor; + u32 mem_channel_number; + u32 mem_channel_width; + u32 module_id; if (adev->flags & AMD_IS_APU) index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, @@ -239,6 +195,7 @@ int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev) else index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, vram_info); + if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, &size, &frev, &crev, &data_offset)) { @@ -247,25 +204,67 @@ int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev) (mode_info->atom_context->bios + data_offset); switch (crev) { case 11: + mem_channel_number = igp_info->v11.umachannelnumber; + /* channel width is 64 */ + if (vram_width) + *vram_width = mem_channel_number * 64; mem_type = igp_info->v11.memorytype; - return convert_atom_mem_type_to_vram_type(adev, mem_type); + if (vram_type) + *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); + break; default: - return 0; + return -EINVAL; } } else { vram_info = (union vram_info *) (mode_info->atom_context->bios + data_offset); + module_id = (RREG32(adev->bios_scratch_reg_offset + 4) & 0x00ff0000) >> 16; switch (crev) { case 3: - mem_type = vram_info->v23.vram_module[0].memory_type; - return convert_atom_mem_type_to_vram_type(adev, mem_type); + if (module_id > vram_info->v23.vram_module_num) + module_id = 0; + vram_module = (union vram_module *)vram_info->v23.vram_module; + while (i < module_id) { + vram_module = (union vram_module *) + ((u8 *)vram_module + vram_module->v9.vram_module_size); + i++; + } + mem_type = vram_module->v9.memory_type; + if (vram_type) + *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); + mem_channel_number = vram_module->v9.channel_num; + mem_channel_width = vram_module->v9.channel_width; + if (vram_width) + *vram_width = mem_channel_number * (1 << mem_channel_width); + mem_vendor = (vram_module->v9.vender_rev_id) & 0xF; + if (vram_vendor) + *vram_vendor = mem_vendor; + break; case 4: - mem_type = vram_info->v24.vram_module[0].memory_type; - return convert_atom_mem_type_to_vram_type(adev, mem_type); + if (module_id > vram_info->v24.vram_module_num) + module_id = 0; + vram_module = (union vram_module *)vram_info->v24.vram_module; + while (i < module_id) { + vram_module = (union vram_module *) + ((u8 *)vram_module + vram_module->v10.vram_module_size); + i++; + } + mem_type = vram_module->v10.memory_type; + if (vram_type) + *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); + mem_channel_number = vram_module->v10.channel_num; + mem_channel_width = vram_module->v10.channel_width; + if (vram_width) + *vram_width = mem_channel_number * (1 << mem_channel_width); + mem_vendor = (vram_module->v10.vender_rev_id) & 0xF; + if (vram_vendor) + *vram_vendor = mem_vendor; + break; default: - return 0; + return -EINVAL; } } + } return 0; @@ -464,3 +463,138 @@ int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev) } return -EINVAL; } + +/* + * Check if VBIOS supports GDDR6 training data save/restore + */ +static bool gddr6_mem_train_vbios_support(struct amdgpu_device *adev) +{ + uint16_t data_offset; + int index; + + index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, + firmwareinfo); + if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL, + NULL, NULL, &data_offset)) { + struct atom_firmware_info_v3_1 *firmware_info = + (struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios + + data_offset); + + DRM_DEBUG("atom firmware capability:0x%08x.\n", + le32_to_cpu(firmware_info->firmware_capability)); + + if (le32_to_cpu(firmware_info->firmware_capability) & + ATOM_FIRMWARE_CAP_ENABLE_2STAGE_BIST_TRAINING) + return true; + } + + return false; +} + +static int gddr6_mem_train_support(struct amdgpu_device *adev) +{ + int ret; + uint32_t major, minor, revision, hw_v; + + if (gddr6_mem_train_vbios_support(adev)) { + amdgpu_discovery_get_ip_version(adev, MP0_HWID, &major, &minor, &revision); + hw_v = HW_REV(major, minor, revision); + /* + * treat 0 revision as a special case since register for MP0 and MMHUB is missing + * for some Navi10 A0, preventing driver from discovering the hwip information since + * none of the functions will be initialized, it should not cause any problems + */ + switch (hw_v) { + case HW_REV(11, 0, 0): + case HW_REV(11, 0, 5): + ret = 1; + break; + default: + DRM_ERROR("memory training vbios supports but psp hw(%08x)" + " doesn't support!\n", hw_v); + ret = -1; + break; + } + } else { + ret = 0; + hw_v = -1; + } + + + DRM_DEBUG("mp0 hw_v %08x, ret:%d.\n", hw_v, ret); + return ret; +} + +int amdgpu_atomfirmware_get_mem_train_fb_loc(struct amdgpu_device *adev) +{ + struct atom_context *ctx = adev->mode_info.atom_context; + unsigned char *bios = ctx->bios; + struct vram_reserve_block *reserved_block; + int index, block_number; + uint8_t frev, crev; + uint16_t data_offset, size; + uint32_t start_address_in_kb; + uint64_t offset; + int ret; + + adev->fw_vram_usage.mem_train_support = false; + + if (adev->asic_type != CHIP_NAVI10 && + adev->asic_type != CHIP_NAVI14) + return 0; + + if (amdgpu_sriov_vf(adev)) + return 0; + + ret = gddr6_mem_train_support(adev); + if (ret == -1) + return -EINVAL; + else if (ret == 0) + return 0; + + index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, + vram_usagebyfirmware); + ret = amdgpu_atom_parse_data_header(ctx, index, &size, &frev, &crev, + &data_offset); + if (ret == 0) { + DRM_ERROR("parse data header failed.\n"); + return -EINVAL; + } + + DRM_DEBUG("atom firmware common table header size:0x%04x, frev:0x%02x," + " crev:0x%02x, data_offset:0x%04x.\n", size, frev, crev, data_offset); + /* only support 2.1+ */ + if (((uint16_t)frev << 8 | crev) < 0x0201) { + DRM_ERROR("frev:0x%02x, crev:0x%02x < 2.1 !\n", frev, crev); + return -EINVAL; + } + + reserved_block = (struct vram_reserve_block *) + (bios + data_offset + sizeof(struct atom_common_table_header)); + block_number = ((unsigned int)size - sizeof(struct atom_common_table_header)) + / sizeof(struct vram_reserve_block); + reserved_block += (block_number > 0) ? block_number-1 : 0; + DRM_DEBUG("block_number:0x%04x, last block: 0x%08xkb sz, %dkb fw, %dkb drv.\n", + block_number, + le32_to_cpu(reserved_block->start_address_in_kb), + le16_to_cpu(reserved_block->used_by_firmware_in_kb), + le16_to_cpu(reserved_block->used_by_driver_in_kb)); + if (reserved_block->used_by_firmware_in_kb > 0) { + start_address_in_kb = le32_to_cpu(reserved_block->start_address_in_kb); + offset = (uint64_t)start_address_in_kb * ONE_KiB; + if ((offset & (ONE_MiB - 1)) < (4 * ONE_KiB + 1) ) { + offset -= ONE_MiB; + } + + offset &= ~(ONE_MiB - 1); + adev->fw_vram_usage.mem_train_fb_loc = offset; + adev->fw_vram_usage.mem_train_support = true; + DRM_DEBUG("mem_train_fb_loc:0x%09llx.\n", offset); + ret = 0; + } else { + DRM_ERROR("used_by_firmware_in_kb is 0!\n"); + ret = -EINVAL; + } + + return ret; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h index 5ec6f92f353c..f871af5ea6f3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h @@ -29,8 +29,9 @@ bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev); void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev); int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev); -int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev); -int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev); +int amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev, + int *vram_width, int *vram_type, int *vram_vendor); +int amdgpu_atomfirmware_get_mem_train_fb_loc(struct amdgpu_device *adev); int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev); int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev); bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index 3e35a8f2c5e5..a97fb759e2f4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c @@ -613,17 +613,7 @@ static bool amdgpu_atpx_detect(void) bool d3_supported = false; struct pci_dev *parent_pdev; - while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { - vga_count++; - - has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true); - - parent_pdev = pci_upstream_bridge(pdev); - d3_supported |= parent_pdev && parent_pdev->bridge_d3; - amdgpu_atpx_get_quirks(pdev); - } - - while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) { + while ((pdev = pci_get_class(PCI_BASE_CLASS_DISPLAY << 16, pdev)) != NULL) { vga_count++; has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c index 649e68c4479b..d1495e1c9289 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c @@ -33,7 +33,7 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size, { unsigned long start_jiffies; unsigned long end_jiffies; - struct dma_fence *fence = NULL; + struct dma_fence *fence; int i, r; start_jiffies = jiffies; @@ -44,16 +44,14 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size, if (r) goto exit_do_move; r = dma_fence_wait(fence, false); + dma_fence_put(fence); if (r) goto exit_do_move; - dma_fence_put(fence); } end_jiffies = jiffies; r = jiffies_to_msecs(end_jiffies - start_jiffies); exit_do_move: - if (fence) - dma_fence_put(fence); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c index 7bcf86c61999..85b0515c0fdc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c @@ -140,7 +140,12 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp, return 0; error_free: - while (i--) { + for (i = 0; i < last_entry; ++i) { + struct amdgpu_bo *bo = ttm_to_amdgpu_bo(array[i].tv.bo); + + amdgpu_bo_unref(&bo); + } + for (i = first_userptr; i < num_entries; ++i) { struct amdgpu_bo *bo = ttm_to_amdgpu_bo(array[i].tv.bo); amdgpu_bo_unref(&bo); @@ -270,7 +275,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, r = amdgpu_bo_create_list_entry_array(&args->in, &info); if (r) - goto error_free; + return r; switch (args->in.operation) { case AMDGPU_BO_LIST_OP_CREATE: @@ -283,8 +288,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL); mutex_unlock(&fpriv->bo_list_lock); if (r < 0) { - amdgpu_bo_list_put(list); - return r; + goto error_put_list; } handle = r; @@ -306,9 +310,8 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, mutex_unlock(&fpriv->bo_list_lock); if (IS_ERR(old)) { - amdgpu_bo_list_put(list); r = PTR_ERR(old); - goto error_free; + goto error_put_list; } amdgpu_bo_list_put(old); @@ -325,8 +328,10 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, return 0; +error_put_list: + amdgpu_bo_list_put(list); + error_free: - if (info) - kvfree(info); + kvfree(info); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index d8729285f731..a62cbc8199de 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -1019,8 +1019,12 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force) */ if (amdgpu_connector->shared_ddc && (ret == connector_status_connected)) { struct drm_connector *list_connector; + struct drm_connector_list_iter iter; struct amdgpu_connector *list_amdgpu_connector; - list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) { + + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(list_connector, + &iter) { if (connector == list_connector) continue; list_amdgpu_connector = to_amdgpu_connector(list_connector); @@ -1037,6 +1041,7 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force) } } } + drm_connector_list_iter_end(&iter); } } } @@ -1494,6 +1499,7 @@ amdgpu_connector_add(struct amdgpu_device *adev, { struct drm_device *dev = adev->ddev; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector; struct amdgpu_connector_atom_dig *amdgpu_dig_connector; struct drm_encoder *encoder; @@ -1508,10 +1514,12 @@ amdgpu_connector_add(struct amdgpu_device *adev, return; /* see if we already added it */ - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { amdgpu_connector = to_amdgpu_connector(connector); if (amdgpu_connector->connector_id == connector_id) { amdgpu_connector->devices |= supported_device; + drm_connector_list_iter_end(&iter); return; } if (amdgpu_connector->ddc_bus && i2c_bus->valid) { @@ -1526,6 +1534,7 @@ amdgpu_connector_add(struct amdgpu_device *adev, } } } + drm_connector_list_iter_end(&iter); /* check if it's a dp bridge */ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 2e53feed40e2..a169ff16277f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -35,6 +35,7 @@ #include "amdgpu_trace.h" #include "amdgpu_gmc.h" #include "amdgpu_gem.h" +#include "amdgpu_ras.h" static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, struct drm_amdgpu_cs_chunk_fence *data, @@ -449,75 +450,12 @@ retry: return r; } -/* Last resort, try to evict something from the current working set */ -static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p, - struct amdgpu_bo *validated) -{ - uint32_t domain = validated->allowed_domains; - struct ttm_operation_ctx ctx = { true, false }; - int r; - - if (!p->evictable) - return false; - - for (;&p->evictable->tv.head != &p->validated; - p->evictable = list_prev_entry(p->evictable, tv.head)) { - - struct amdgpu_bo_list_entry *candidate = p->evictable; - struct amdgpu_bo *bo = ttm_to_amdgpu_bo(candidate->tv.bo); - struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - bool update_bytes_moved_vis; - uint32_t other; - - /* If we reached our current BO we can forget it */ - if (bo == validated) - break; - - /* We can't move pinned BOs here */ - if (bo->pin_count) - continue; - - other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); - - /* Check if this BO is in one of the domains we need space for */ - if (!(other & domain)) - continue; - - /* Check if we can move this BO somewhere else */ - other = bo->allowed_domains & ~domain; - if (!other) - continue; - - /* Good we can try to move this BO somewhere else */ - update_bytes_moved_vis = - !amdgpu_gmc_vram_full_visible(&adev->gmc) && - amdgpu_bo_in_cpu_visible_vram(bo); - amdgpu_bo_placement_from_domain(bo, other); - r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); - p->bytes_moved += ctx.bytes_moved; - if (update_bytes_moved_vis) - p->bytes_moved_vis += ctx.bytes_moved; - - if (unlikely(r)) - break; - - p->evictable = list_prev_entry(p->evictable, tv.head); - list_move(&candidate->tv.head, &p->validated); - - return true; - } - - return false; -} - static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo) { struct amdgpu_cs_parser *p = param; int r; - do { - r = amdgpu_cs_bo_validate(p, bo); - } while (r == -ENOMEM && amdgpu_cs_try_evict(p, bo)); + r = amdgpu_cs_bo_validate(p, bo); if (r) return r; @@ -536,7 +474,6 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, list_for_each_entry(lobj, validated, tv.head) { struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo); - bool binding_userptr = false; struct mm_struct *usermm; usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm); @@ -553,20 +490,14 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, lobj->user_pages); - binding_userptr = true; } - if (p->evictable == lobj) - p->evictable = NULL; - r = amdgpu_cs_validate(p, bo); if (r) return r; - if (binding_userptr) { - kvfree(lobj->user_pages); - lobj->user_pages = NULL; - } + kvfree(lobj->user_pages); + lobj->user_pages = NULL; } return 0; } @@ -650,7 +581,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, } r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, - &duplicates, false); + &duplicates); if (unlikely(r != 0)) { if (r != -ERESTARTSYS) DRM_ERROR("ttm_eu_reserve_buffers failed.\n"); @@ -661,9 +592,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, &p->bytes_moved_vis_threshold); p->bytes_moved = 0; p->bytes_moved_vis = 0; - p->evictable = list_last_entry(&p->validated, - struct amdgpu_bo_list_entry, - tv.head); r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm, amdgpu_cs_validate, p); @@ -915,7 +843,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) if (r) return r; - r = amdgpu_vm_update_directories(adev, vm); + r = amdgpu_vm_update_pdes(adev, vm, false); if (r) return r; @@ -1359,6 +1287,9 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) bool reserved_buffers = false; int i, r; + if (amdgpu_ras_intr_triggered()) + return -EHWPOISON; + if (!adev->accel_working) return -EBUSY; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c index 35a8d3c96fc9..08047bc4d588 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c @@ -80,7 +80,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, list_add(&csa_tv.head, &list); amdgpu_vm_get_pd_bo(vm, &list, &pd); - r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL, false); + r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); if (r) { DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 6614d8a6f4c8..2cdaf3b2a721 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -604,8 +604,11 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr) continue; } - for (i = 0; i < num_entities; i++) + for (i = 0; i < num_entities; i++) { + mutex_lock(&ctx->adev->lock_reset); drm_sched_entity_fini(&ctx->entities[0][i].entity); + mutex_unlock(&ctx->adev->lock_reset); + } } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 5652cc72ed3a..8e6726e0d035 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -859,6 +859,9 @@ static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data) struct amdgpu_device *adev = dev->dev_private; int r = 0, i; + /* Avoid accidently unparking the sched thread during GPU reset */ + mutex_lock(&adev->lock_reset); + /* hold on the scheduler */ for (i = 0; i < AMDGPU_MAX_RINGS; i++) { struct amdgpu_ring *ring = adev->rings[i]; @@ -884,6 +887,8 @@ static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data) kthread_unpark(ring->sched.thread); } + mutex_unlock(&adev->lock_reset); + return 0; } @@ -1036,6 +1041,9 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val) if (!fences) return -ENOMEM; + /* Avoid accidently unparking the sched thread during GPU reset */ + mutex_lock(&adev->lock_reset); + /* stop the scheduler */ kthread_park(ring->sched.thread); @@ -1075,10 +1083,11 @@ failure: /* restart the scheduler */ kthread_unpark(ring->sched.thread); + mutex_unlock(&adev->lock_reset); + ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched); - if (fences) - kfree(fences); + kfree(fences); return 0; } @@ -1090,8 +1099,8 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) { adev->debugfs_preempt = debugfs_create_file("amdgpu_preempt_ib", 0600, - adev->ddev->primary->debugfs_root, - (void *)adev, &fops_ib_preempt); + adev->ddev->primary->debugfs_root, adev, + &fops_ib_preempt); if (!(adev->debugfs_preempt)) { DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n"); return -EIO; @@ -1103,8 +1112,7 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) void amdgpu_debugfs_preempt_cleanup(struct amdgpu_device *adev) { - if (adev->debugfs_preempt) - debugfs_remove(adev->debugfs_preempt); + debugfs_remove(adev->debugfs_preempt); } #else diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 5a1939dbd4e3..58f6b3b92831 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -65,6 +65,8 @@ #include "amdgpu_ras.h" #include "amdgpu_pmu.h" +#include <linux/suspend.h> + MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin"); @@ -78,7 +80,7 @@ MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin"); #define AMDGPU_RESUME_MS 2000 -static const char *amdgpu_asic_name[] = { +const char *amdgpu_asic_name[] = { "TAHITI", "PITCAIRN", "VERDE", @@ -151,6 +153,36 @@ bool amdgpu_device_is_px(struct drm_device *dev) return false; } +/** + * VRAM access helper functions. + * + * amdgpu_device_vram_access - read/write a buffer in vram + * + * @adev: amdgpu_device pointer + * @pos: offset of the buffer in vram + * @buf: virtual address of the buffer in system memory + * @size: read/write size, sizeof(@buf) must > @size + * @write: true - write to vram, otherwise - read from vram + */ +void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos, + uint32_t *buf, size_t size, bool write) +{ + uint64_t last; + unsigned long flags; + + last = size - 4; + for (last += pos; pos <= last; pos += 4) { + spin_lock_irqsave(&adev->mmio_idx_lock, flags); + WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000); + WREG32_NO_KIQ(mmMM_INDEX_HI, pos >> 31); + if (write) + WREG32_NO_KIQ(mmMM_DATA, *buf++); + else + *buf++ = RREG32_NO_KIQ(mmMM_DATA); + spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); + } +} + /* * MMIO register access helper functions. */ @@ -1023,12 +1055,6 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev) amdgpu_device_check_block_size(adev); - ret = amdgpu_device_get_job_timeout_settings(adev); - if (ret) { - dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n"); - return ret; - } - adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type); return ret; @@ -1469,6 +1495,9 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) + goto parse_soc_bounding_box; + adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se); adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh); adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se); @@ -1496,7 +1525,13 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) adev->gfx.config.num_packer_per_sc = le32_to_cpu(gpu_info_fw->num_packer_per_sc); } + +parse_soc_bounding_box: #ifdef CONFIG_DRM_AMD_DC_DCN2_0 + /* + * soc bounding box info is not integrated in disocovery table, + * we always need to parse it from gpu info firmware. + */ if (hdr->version_minor == 2) { const struct gpu_info_firmware_v1_2 *gpu_info_fw = (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data + @@ -1613,6 +1648,9 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) if (r) return r; + if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) + amdgpu_discovery_get_gfx_info(adev); + amdgpu_amdkfd_device_probe(adev); if (amdgpu_sriov_vf(adev)) { @@ -1622,7 +1660,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) } adev->pm.pp_feature = amdgpu_pp_feature_mask; - if (amdgpu_sriov_vf(adev)) + if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS) adev->pm.pp_feature &= ~PP_GFXOFF_MASK; for (i = 0; i < adev->num_ip_blocks; i++) { @@ -1839,6 +1877,19 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) if (r) goto init_failed; + /* + * retired pages will be loaded from eeprom and reserved here, + * it should be called after amdgpu_device_ip_hw_init_phase2 since + * for some ASICs the RAS EEPROM code relies on SMU fully functioning + * for I2C communication which only true at this point. + * recovery_init may fail, but it can free all resources allocated by + * itself and its failure should not stop amdgpu init process. + * + * Note: theoretically, this should be called before all vram allocations + * to protect retired page from abusing + */ + amdgpu_ras_recovery_init(adev); + if (adev->gmc.xgmi.num_physical_nodes > 1) amdgpu_xgmi_add_device(adev); amdgpu_amdkfd_device_init(adev); @@ -2006,6 +2057,7 @@ out: */ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) { + struct amdgpu_gpu_instance *gpu_instance; int i = 0, r; for (i = 0; i < adev->num_ip_blocks; i++) { @@ -2031,8 +2083,39 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) if (r) DRM_ERROR("enable mgpu fan boost failed (%d).\n", r); - /* set to low pstate by default */ - amdgpu_xgmi_set_pstate(adev, 0); + + if (adev->gmc.xgmi.num_physical_nodes > 1) { + mutex_lock(&mgpu_info.mutex); + + /* + * Reset device p-state to low as this was booted with high. + * + * This should be performed only after all devices from the same + * hive get initialized. + * + * However, it's unknown how many device in the hive in advance. + * As this is counted one by one during devices initializations. + * + * So, we wait for all XGMI interlinked devices initialized. + * This may bring some delays as those devices may come from + * different hives. But that should be OK. + */ + if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) { + for (i = 0; i < mgpu_info.num_gpu; i++) { + gpu_instance = &(mgpu_info.gpu_ins[i]); + if (gpu_instance->adev->flags & AMD_IS_APU) + continue; + + r = amdgpu_xgmi_set_pstate(gpu_instance->adev, 0); + if (r) { + DRM_ERROR("pstate setting failed (%d).\n", r); + break; + } + } + } + + mutex_unlock(&mgpu_info.mutex); + } return 0; } @@ -2220,6 +2303,12 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) /* displays are handled in phase1 */ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) continue; + /* PSP lost connection when err_event_athub occurs */ + if (amdgpu_ras_intr_triggered() && + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) { + adev->ip_blocks[i].status.hw = false; + continue; + } /* XXX handle errors */ r = adev->ip_blocks[i].version->funcs->suspend(adev); /* XXX handle errors */ @@ -2231,17 +2320,17 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) /* handle putting the SMC in the appropriate state */ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { if (is_support_sw_smu(adev)) { - /* todo */ + r = smu_set_mp1_state(&adev->smu, adev->mp1_state); } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_mp1_state) { r = adev->powerplay.pp_funcs->set_mp1_state( adev->powerplay.pp_handle, adev->mp1_state); - if (r) { - DRM_ERROR("SMC failed to set mp1 state %d, %d\n", - adev->mp1_state, r); - return r; - } + } + if (r) { + DRM_ERROR("SMC failed to set mp1 state %d, %d\n", + adev->mp1_state, r); + return r; } } @@ -2556,6 +2645,73 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work) adev->asic_reset_res, adev->ddev->unique); } +static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev) +{ + char *input = amdgpu_lockup_timeout; + char *timeout_setting = NULL; + int index = 0; + long timeout; + int ret = 0; + + /* + * By default timeout for non compute jobs is 10000. + * And there is no timeout enforced on compute jobs. + * In SR-IOV or passthrough mode, timeout for compute + * jobs are 10000 by default. + */ + adev->gfx_timeout = msecs_to_jiffies(10000); + adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; + if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev)) + adev->compute_timeout = adev->gfx_timeout; + else + adev->compute_timeout = MAX_SCHEDULE_TIMEOUT; + + if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) { + while ((timeout_setting = strsep(&input, ",")) && + strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) { + ret = kstrtol(timeout_setting, 0, &timeout); + if (ret) + return ret; + + if (timeout == 0) { + index++; + continue; + } else if (timeout < 0) { + timeout = MAX_SCHEDULE_TIMEOUT; + } else { + timeout = msecs_to_jiffies(timeout); + } + + switch (index++) { + case 0: + adev->gfx_timeout = timeout; + break; + case 1: + adev->compute_timeout = timeout; + break; + case 2: + adev->sdma_timeout = timeout; + break; + case 3: + adev->video_timeout = timeout; + break; + default: + break; + } + } + /* + * There is only one value specified and + * it should apply to all non-compute jobs. + */ + if (index == 1) { + adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; + if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev)) + adev->compute_timeout = adev->gfx_timeout; + } + } + + return ret; +} /** * amdgpu_device_init - initialize the driver @@ -2583,7 +2739,12 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->ddev = ddev; adev->pdev = pdev; adev->flags = flags; - adev->asic_type = flags & AMD_ASIC_MASK; + + if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST) + adev->asic_type = amdgpu_force_asic_type; + else + adev->asic_type = flags & AMD_ASIC_MASK; + adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT; if (amdgpu_emu_mode == 1) adev->usec_timeout *= 2; @@ -2726,6 +2887,12 @@ int amdgpu_device_init(struct amdgpu_device *adev, if (r) return r; + r = amdgpu_device_get_job_timeout_settings(adev); + if (r) { + dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n"); + return r; + } + /* doorbell bar mapping and doorbell index init*/ amdgpu_device_doorbell_init(adev); @@ -2885,6 +3052,13 @@ fence_driver_init: DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n"); } + /* + * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost. + * Otherwise the mgpu fan boost feature will be skipped due to the + * gpu instance is counted less. + */ + amdgpu_register_gpu_instance(adev); + /* enable clockgating, etc. after ib tests, etc. since some blocks require * explicit gating rather than handling it automatically. */ @@ -3007,6 +3181,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) struct amdgpu_device *adev; struct drm_crtc *crtc; struct drm_connector *connector; + struct drm_connector_list_iter iter; int r; if (dev == NULL || dev->dev_private == NULL) { @@ -3029,9 +3204,11 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) if (!amdgpu_device_has_dc_support(adev)) { /* turn off display hw */ drm_modeset_lock_all(dev); - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); - } + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) + drm_helper_connector_dpms(connector, + DRM_MODE_DPMS_OFF); + drm_connector_list_iter_end(&iter); drm_modeset_unlock_all(dev); /* unpin the front buffers and cursors */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { @@ -3082,15 +3259,11 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) */ amdgpu_bo_evict_vram(adev); - pci_save_state(dev->pdev); if (suspend) { + pci_save_state(dev->pdev); /* Shut down the device */ pci_disable_device(dev->pdev); pci_set_power_state(dev->pdev, PCI_D3hot); - } else { - r = amdgpu_asic_reset(adev); - if (r) - DRM_ERROR("amdgpu asic reset failed\n"); } return 0; @@ -3110,6 +3283,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) { struct drm_connector *connector; + struct drm_connector_list_iter iter; struct amdgpu_device *adev = dev->dev_private; struct drm_crtc *crtc; int r = 0; @@ -3180,9 +3354,13 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) /* turn on display hw */ drm_modeset_lock_all(dev); - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); - } + + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) + drm_helper_connector_dpms(connector, + DRM_MODE_DPMS_ON); + drm_connector_list_iter_end(&iter); + drm_modeset_unlock_all(dev); } amdgpu_fbdev_set_suspend(adev, 0); @@ -3628,11 +3806,6 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive, break; } } - - list_for_each_entry(tmp_adev, device_list_handle, - gmc.xgmi.head) { - amdgpu_ras_reserve_bad_pages(tmp_adev); - } } } @@ -3736,25 +3909,18 @@ static bool amdgpu_device_lock_adev(struct amdgpu_device *adev, bool trylock) adev->mp1_state = PP_MP1_STATE_NONE; break; } - /* Block kfd: SRIOV would do it separately */ - if (!amdgpu_sriov_vf(adev)) - amdgpu_amdkfd_pre_reset(adev); return true; } static void amdgpu_device_unlock_adev(struct amdgpu_device *adev) { - /*unlock kfd: SRIOV would do it separately */ - if (!amdgpu_sriov_vf(adev)) - amdgpu_amdkfd_post_reset(adev); amdgpu_vf_error_trans_all(adev); adev->mp1_state = PP_MP1_STATE_NONE; adev->in_gpu_reset = 0; mutex_unlock(&adev->lock_reset); } - /** * amdgpu_device_gpu_recover - reset the asic and recover scheduler * @@ -3774,11 +3940,24 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, struct amdgpu_hive_info *hive = NULL; struct amdgpu_device *tmp_adev = NULL; int i, r = 0; + bool in_ras_intr = amdgpu_ras_intr_triggered(); + + /* + * Flush RAM to disk so that after reboot + * the user can read log and see why the system rebooted. + */ + if (in_ras_intr && amdgpu_ras_get_context(adev)->reboot) { + + DRM_WARN("Emergency reboot."); + + ksys_sync_helper(); + emergency_restart(); + } need_full_reset = job_signaled = false; INIT_LIST_HEAD(&device_list); - dev_info(adev->dev, "GPU reset begin!\n"); + dev_info(adev->dev, "GPU %s begin!\n", in_ras_intr ? "jobs stop":"reset"); cancel_delayed_work_sync(&adev->delayed_init_work); @@ -3805,9 +3984,16 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, return 0; } + /* Block kfd: SRIOV would do it separately */ + if (!amdgpu_sriov_vf(adev)) + amdgpu_amdkfd_pre_reset(adev); + /* Build list of devices to reset */ if (adev->gmc.xgmi.num_physical_nodes > 1) { if (!hive) { + /*unlock kfd: SRIOV would do it separately */ + if (!amdgpu_sriov_vf(adev)) + amdgpu_amdkfd_post_reset(adev); amdgpu_device_unlock_adev(adev); return -ENODEV; } @@ -3823,17 +4009,22 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, device_list_handle = &device_list; } - /* - * Mark these ASICs to be reseted as untracked first - * And add them back after reset completed - */ - list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) - amdgpu_unregister_gpu_instance(tmp_adev); - /* block all schedulers and reset given job's ring */ list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { + if (tmp_adev != adev) { + amdgpu_device_lock_adev(tmp_adev, false); + if (!amdgpu_sriov_vf(tmp_adev)) + amdgpu_amdkfd_pre_reset(tmp_adev); + } + + /* + * Mark these ASICs to be reseted as untracked first + * And add them back after reset completed + */ + amdgpu_unregister_gpu_instance(tmp_adev); + /* disable ras on ALL IPs */ - if (amdgpu_device_ip_need_full_reset(tmp_adev)) + if (!in_ras_intr && amdgpu_device_ip_need_full_reset(tmp_adev)) amdgpu_ras_suspend(tmp_adev); for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { @@ -3843,10 +4034,16 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, continue; drm_sched_stop(&ring->sched, job ? &job->base : NULL); + + if (in_ras_intr) + amdgpu_job_stop_all_jobs_on_sched(&ring->sched); } } + if (in_ras_intr) + goto skip_sched_resume; + /* * Must check guilty signal here since after this point all old * HW fences are force signaled. @@ -3857,9 +4054,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, dma_fence_is_signaled(job->base.s_fence->parent)) job_signaled = true; - if (!amdgpu_device_ip_need_full_reset(adev)) - device_list_handle = &device_list; - if (job_signaled) { dev_info(adev->dev, "Guilty job already signaled, skipping HW reset"); goto skip_hw_reset; @@ -3881,7 +4075,6 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */ if (tmp_adev == adev) continue; - amdgpu_device_lock_adev(tmp_adev, false); r = amdgpu_device_pre_asic_reset(tmp_adev, NULL, &need_full_reset); @@ -3909,6 +4102,7 @@ skip_hw_reset: /* Post ASIC reset for all devs .*/ list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = tmp_adev->rings[i]; @@ -3930,12 +4124,18 @@ skip_hw_reset: if (r) { /* bad news, how to tell it to userspace ? */ - dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter)); + dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter)); amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r); } else { - dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&adev->gpu_reset_counter)); + dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter)); } + } +skip_sched_resume: + list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { + /*unlock kfd: SRIOV would do it separately */ + if (!in_ras_intr && !amdgpu_sriov_vf(tmp_adev)) + amdgpu_amdkfd_post_reset(tmp_adev); amdgpu_device_unlock_adev(tmp_adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 1481899f86c1..f95092741c38 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -134,20 +134,10 @@ static int hw_id_map[MAX_HWIP] = { static int amdgpu_discovery_read_binary(struct amdgpu_device *adev, uint8_t *binary) { - uint32_t *p = (uint32_t *)binary; uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20; - uint64_t pos = vram_size - BINARY_MAX_SIZE; - unsigned long flags; - - while (pos < vram_size) { - spin_lock_irqsave(&adev->mmio_idx_lock, flags); - WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000); - WREG32_NO_KIQ(mmMM_INDEX_HI, pos >> 31); - *p++ = RREG32_NO_KIQ(mmMM_DATA); - spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); - pos += 4; - } + uint64_t pos = vram_size - DISCOVERY_TMR_SIZE; + amdgpu_device_vram_access(adev, pos, (uint32_t *)binary, DISCOVERY_TMR_SIZE, false); return 0; } @@ -179,7 +169,7 @@ int amdgpu_discovery_init(struct amdgpu_device *adev) uint16_t checksum; int r; - adev->discovery = kzalloc(BINARY_MAX_SIZE, GFP_KERNEL); + adev->discovery = kzalloc(DISCOVERY_TMR_SIZE, GFP_KERNEL); if (!adev->discovery) return -ENOMEM; @@ -333,7 +323,7 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) } int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, - int *major, int *minor) + int *major, int *minor, int *revision) { struct binary_header *bhdr; struct ip_discovery_header *ihdr; @@ -369,6 +359,8 @@ int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, *major = ip->major; if (minor) *minor = ip->minor; + if (revision) + *revision = ip->revision; return 0; } ip_offset += sizeof(*ip) + 4 * (ip->num_base_address - 1); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h index 85b8c4d4d576..ba78e15d9b05 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h @@ -24,11 +24,13 @@ #ifndef __AMDGPU_DISCOVERY__ #define __AMDGPU_DISCOVERY__ +#define DISCOVERY_TMR_SIZE (64 << 10) + int amdgpu_discovery_init(struct amdgpu_device *adev); void amdgpu_discovery_fini(struct amdgpu_device *adev); int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev); int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, - int *major, int *minor); + int *major, int *minor, int *revision); int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev); #endif /* __AMDGPU_DISCOVERY__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 1d4aaa9580f4..d2dd59a95e8a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -370,11 +370,13 @@ void amdgpu_display_print_display_setup(struct drm_device *dev) struct amdgpu_connector *amdgpu_connector; struct drm_encoder *encoder; struct amdgpu_encoder *amdgpu_encoder; + struct drm_connector_list_iter iter; uint32_t devices; int i = 0; + drm_connector_list_iter_begin(dev, &iter); DRM_INFO("AMDGPU Display Connectors\n"); - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_for_each_connector_iter(connector, &iter) { amdgpu_connector = to_amdgpu_connector(connector); DRM_INFO("Connector %d:\n", i); DRM_INFO(" %s\n", connector->name); @@ -438,6 +440,7 @@ void amdgpu_display_print_display_setup(struct drm_device *dev) } i++; } + drm_connector_list_iter_end(&iter); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index 61f108ec2b5c..e2eec7b66334 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -34,27 +34,12 @@ #include "amdgpu.h" #include "amdgpu_display.h" #include "amdgpu_gem.h" +#include "amdgpu_dma_buf.h" #include <drm/amdgpu_drm.h> #include <linux/dma-buf.h> #include <linux/dma-fence-array.h> /** - * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table - * implementation - * @obj: GEM buffer object (BO) - * - * Returns: - * A scatter/gather table for the pinned pages of the BO's memory. - */ -struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj) -{ - struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); - int npages = bo->tbo.num_pages; - - return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages); -} - -/** * amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation * @obj: GEM BO * @@ -179,92 +164,126 @@ err_fences_put: } /** - * amdgpu_dma_buf_map_attach - &dma_buf_ops.attach implementation - * @dma_buf: Shared DMA buffer + * amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation + * + * @dmabuf: DMA-buf where we attach to + * @attach: attachment to add + * + * Add the attachment as user to the exported DMA-buf. + */ +static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attach) +{ + struct drm_gem_object *obj = dmabuf->priv; + struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + int r; + + if (attach->dev->driver == adev->dev->driver) + return 0; + + r = amdgpu_bo_reserve(bo, false); + if (unlikely(r != 0)) + return r; + + /* + * We only create shared fences for internal use, but importers + * of the dmabuf rely on exclusive fences for implicitly + * tracking write hazards. As any of the current fences may + * correspond to a write, we need to convert all existing + * fences on the reservation object into a single exclusive + * fence. + */ + r = __dma_resv_make_exclusive(bo->tbo.base.resv); + if (r) + return r; + + bo->prime_shared_count++; + amdgpu_bo_unreserve(bo); + return 0; +} + +/** + * amdgpu_dma_buf_detach - &dma_buf_ops.detach implementation + * + * @dmabuf: DMA-buf where we remove the attachment from + * @attach: the attachment to remove + * + * Called when an attachment is removed from the DMA-buf. + */ +static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attach) +{ + struct drm_gem_object *obj = dmabuf->priv; + struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + + if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count) + bo->prime_shared_count--; +} + +/** + * amdgpu_dma_buf_map - &dma_buf_ops.map_dma_buf implementation * @attach: DMA-buf attachment + * @dir: DMA direction * * Makes sure that the shared DMA buffer can be accessed by the target device. * For now, simply pins it to the GTT domain, where it should be accessible by * all DMA devices. * * Returns: - * 0 on success or a negative error code on failure. + * sg_table filled with the DMA addresses to use or ERR_PRT with negative error + * code. */ -static int amdgpu_dma_buf_map_attach(struct dma_buf *dma_buf, - struct dma_buf_attachment *attach) +static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach, + enum dma_data_direction dir) { + struct dma_buf *dma_buf = attach->dmabuf; struct drm_gem_object *obj = dma_buf->priv; struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); - struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + struct sg_table *sgt; long r; - r = drm_gem_map_attach(dma_buf, attach); - if (r) - return r; - - r = amdgpu_bo_reserve(bo, false); - if (unlikely(r != 0)) - goto error_detach; - - - if (attach->dev->driver != adev->dev->driver) { - /* - * We only create shared fences for internal use, but importers - * of the dmabuf rely on exclusive fences for implicitly - * tracking write hazards. As any of the current fences may - * correspond to a write, we need to convert all existing - * fences on the reservation object into a single exclusive - * fence. - */ - r = __dma_resv_make_exclusive(bo->tbo.base.resv); - if (r) - goto error_unreserve; - } - - /* pin buffer into GTT */ r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT); if (r) - goto error_unreserve; + return ERR_PTR(r); - if (attach->dev->driver != adev->dev->driver) - bo->prime_shared_count++; + sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages, bo->tbo.num_pages); + if (IS_ERR(sgt)) + return sgt; -error_unreserve: - amdgpu_bo_unreserve(bo); + if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir, + DMA_ATTR_SKIP_CPU_SYNC)) + goto error_free; -error_detach: - if (r) - drm_gem_map_detach(dma_buf, attach); - return r; + return sgt; + +error_free: + sg_free_table(sgt); + kfree(sgt); + return ERR_PTR(-ENOMEM); } /** - * amdgpu_dma_buf_map_detach - &dma_buf_ops.detach implementation - * @dma_buf: Shared DMA buffer + * amdgpu_dma_buf_unmap - &dma_buf_ops.unmap_dma_buf implementation * @attach: DMA-buf attachment + * @sgt: sg_table to unmap + * @dir: DMA direction * * This is called when a shared DMA buffer no longer needs to be accessible by * another device. For now, simply unpins the buffer from GTT. */ -static void amdgpu_dma_buf_map_detach(struct dma_buf *dma_buf, - struct dma_buf_attachment *attach) +static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach, + struct sg_table *sgt, + enum dma_data_direction dir) { - struct drm_gem_object *obj = dma_buf->priv; + struct drm_gem_object *obj = attach->dmabuf->priv; struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); - struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - int ret = 0; - - ret = amdgpu_bo_reserve(bo, true); - if (unlikely(ret != 0)) - goto error; + dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir); + sg_free_table(sgt); + kfree(sgt); amdgpu_bo_unpin(bo); - if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count) - bo->prime_shared_count--; - amdgpu_bo_unreserve(bo); - -error: - drm_gem_map_detach(dma_buf, attach); } /** @@ -308,10 +327,11 @@ static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf, } const struct dma_buf_ops amdgpu_dmabuf_ops = { - .attach = amdgpu_dma_buf_map_attach, - .detach = amdgpu_dma_buf_map_detach, - .map_dma_buf = drm_gem_map_dma_buf, - .unmap_dma_buf = drm_gem_unmap_dma_buf, + .dynamic_mapping = true, + .attach = amdgpu_dma_buf_attach, + .detach = amdgpu_dma_buf_detach, + .map_dma_buf = amdgpu_dma_buf_map, + .unmap_dma_buf = amdgpu_dma_buf_unmap, .release = drm_gem_dmabuf_release, .begin_cpu_access = amdgpu_dma_buf_begin_cpu_access, .mmap = drm_gem_dmabuf_mmap, @@ -321,7 +341,6 @@ const struct dma_buf_ops amdgpu_dmabuf_ops = { /** * amdgpu_gem_prime_export - &drm_driver.gem_prime_export implementation - * @dev: DRM device * @gobj: GEM BO * @flags: Flags such as DRM_CLOEXEC and DRM_RDWR. * @@ -350,31 +369,28 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj, } /** - * amdgpu_gem_prime_import_sg_table - &drm_driver.gem_prime_import_sg_table - * implementation + * amdgpu_dma_buf_create_obj - create BO for DMA-buf import + * * @dev: DRM device - * @attach: DMA-buf attachment - * @sg: Scatter/gather table + * @dma_buf: DMA-buf * - * Imports shared DMA buffer memory exported by another device. + * Creates an empty SG BO for DMA-buf import. * * Returns: * A new GEM BO of the given DRM device, representing the memory * described by the given DMA-buf attachment and scatter/gather table. */ -struct drm_gem_object * -amdgpu_gem_prime_import_sg_table(struct drm_device *dev, - struct dma_buf_attachment *attach, - struct sg_table *sg) +static struct drm_gem_object * +amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf) { - struct dma_resv *resv = attach->dmabuf->resv; + struct dma_resv *resv = dma_buf->resv; struct amdgpu_device *adev = dev->dev_private; struct amdgpu_bo *bo; struct amdgpu_bo_param bp; int ret; memset(&bp, 0, sizeof(bp)); - bp.size = attach->dmabuf->size; + bp.size = dma_buf->size; bp.byte_align = PAGE_SIZE; bp.domain = AMDGPU_GEM_DOMAIN_CPU; bp.flags = 0; @@ -385,11 +401,9 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev, if (ret) goto error; - bo->tbo.sg = sg; - bo->tbo.ttm->sg = sg; bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; - if (attach->dmabuf->ops != &amdgpu_dmabuf_ops) + if (dma_buf->ops != &amdgpu_dmabuf_ops) bo->prime_shared_count = 1; dma_resv_unlock(resv); @@ -405,15 +419,15 @@ error: * @dev: DRM device * @dma_buf: Shared DMA buffer * - * The main work is done by the &drm_gem_prime_import helper, which in turn - * uses &amdgpu_gem_prime_import_sg_table. + * Import a dma_buf into a the driver and potentially create a new GEM object. * * Returns: * GEM BO representing the shared DMA buffer for the given device. */ struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev, - struct dma_buf *dma_buf) + struct dma_buf *dma_buf) { + struct dma_buf_attachment *attach; struct drm_gem_object *obj; if (dma_buf->ops == &amdgpu_dmabuf_ops) { @@ -428,5 +442,17 @@ struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev, } } - return drm_gem_prime_import(dev, dma_buf); + obj = amdgpu_dma_buf_create_obj(dev, dma_buf); + if (IS_ERR(obj)) + return obj; + + attach = dma_buf_dynamic_attach(dma_buf, dev->dev, true); + if (IS_ERR(attach)) { + drm_gem_object_put(obj); + return ERR_CAST(attach); + } + + get_dma_buf(dma_buf); + obj->import_attach = attach; + return obj; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h index 5012e6ab58f1..ec447a7b6b28 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h @@ -25,11 +25,6 @@ #include <drm/drm_gem.h> -struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj); -struct drm_gem_object * -amdgpu_gem_prime_import_sg_table(struct drm_device *dev, - struct dma_buf_attachment *attach, - struct sg_table *sg); struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj, int flags); struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c index 5803fcbae22f..9cc270efee7c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c @@ -911,7 +911,8 @@ int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low) if (is_support_sw_smu(adev)) { ret = smu_get_dpm_freq_range(&adev->smu, SMU_GFXCLK, low ? &clk_freq : NULL, - !low ? &clk_freq : NULL); + !low ? &clk_freq : NULL, + true); if (ret) return 0; return clk_freq * 100; @@ -928,7 +929,8 @@ int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low) if (is_support_sw_smu(adev)) { ret = smu_get_dpm_freq_range(&adev->smu, SMU_UCLK, low ? &clk_freq : NULL, - !low ? &clk_freq : NULL); + !low ? &clk_freq : NULL, + true); if (ret) return 0; return clk_freq * 100; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h index 1c5c0fd76dbf..2cfb677272af 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h @@ -298,12 +298,6 @@ enum amdgpu_pcie_gen { #define amdgpu_dpm_get_current_power_state(adev) \ ((adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)) -#define amdgpu_smu_get_current_power_state(adev) \ - ((adev)->smu.ppt_funcs->get_current_power_state(&((adev)->smu))) - -#define amdgpu_smu_set_power_state(adev) \ - ((adev)->smu.ppt_funcs->set_power_state(&((adev)->smu))) - #define amdgpu_dpm_get_pp_num_states(adev, data) \ ((adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index baf32484b820..557be89421a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -43,6 +43,8 @@ #include "amdgpu_amdkfd.h" +#include "amdgpu_ras.h" + /* * KMS wrapper. * - 3.0.0 - initial driver @@ -82,13 +84,12 @@ * - 3.33.0 - Fixes for GDS ENOMEM failures in AMDGPU_CS. * - 3.34.0 - Non-DC can flip correctly between buffers with different pitches * - 3.35.0 - Add drm_amdgpu_info_device::tcc_disabled_mask + * - 3.36.0 - Allow reading more status registers on si/cik */ #define KMS_DRIVER_MAJOR 3 -#define KMS_DRIVER_MINOR 35 +#define KMS_DRIVER_MINOR 36 #define KMS_DRIVER_PATCHLEVEL 0 -#define AMDGPU_MAX_TIMEOUT_PARAM_LENTH 256 - int amdgpu_vram_limit = 0; int amdgpu_vis_vram_limit = 0; int amdgpu_gart_size = -1; /* auto */ @@ -101,7 +102,7 @@ int amdgpu_disp_priority = 0; int amdgpu_hw_i2c = 0; int amdgpu_pcie_gen2 = -1; int amdgpu_msi = -1; -char amdgpu_lockup_timeout[AMDGPU_MAX_TIMEOUT_PARAM_LENTH]; +char amdgpu_lockup_timeout[AMDGPU_MAX_TIMEOUT_PARAM_LENGTH]; int amdgpu_dpm = -1; int amdgpu_fw_load_type = -1; int amdgpu_aspm = -1; @@ -128,11 +129,7 @@ char *amdgpu_disable_cu = NULL; char *amdgpu_virtual_display = NULL; /* OverDrive(bit 14) disabled by default*/ uint amdgpu_pp_feature_mask = 0xffffbfff; -int amdgpu_ngg = 0; -int amdgpu_prim_buf_per_se = 0; -int amdgpu_pos_buf_per_se = 0; -int amdgpu_cntl_sb_buf_per_se = 0; -int amdgpu_param_buf_per_se = 0; +uint amdgpu_force_long_training = 0; int amdgpu_job_hang_limit = 0; int amdgpu_lbpw = -1; int amdgpu_compute_multipipe = -1; @@ -146,12 +143,13 @@ int amdgpu_mcbp = 0; int amdgpu_discovery = -1; int amdgpu_mes = 0; int amdgpu_noretry = 1; +int amdgpu_force_asic_type = -1; struct amdgpu_mgpu_info mgpu_info = { .mutex = __MUTEX_INITIALIZER(mgpu_info.mutex), }; int amdgpu_ras_enable = -1; -uint amdgpu_ras_mask = 0xfffffffb; +uint amdgpu_ras_mask = 0xffffffff; /** * DOC: vramlimit (int) @@ -244,16 +242,21 @@ module_param_named(msi, amdgpu_msi, int, 0444); * * The format can be [Non-Compute] or [GFX,Compute,SDMA,Video]. That is there can be one or * multiple values specified. 0 and negative values are invalidated. They will be adjusted - * to default timeout. - * - With one value specified, the setting will apply to all non-compute jobs. - * - With multiple values specified, the first one will be for GFX. The second one is for Compute. - * And the third and fourth ones are for SDMA and Video. + * to the default timeout. + * + * - With one value specified, the setting will apply to all non-compute jobs. + * - With multiple values specified, the first one will be for GFX. + * The second one is for Compute. The third and fourth ones are + * for SDMA and Video. + * * By default(with no lockup_timeout settings), the timeout for all non-compute(GFX, SDMA and Video) * jobs is 10000. And there is no timeout enforced on compute jobs. */ -MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: 10000 for non-compute jobs and infinity timeout for compute jobs." +MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: for bare metal 10000 for non-compute jobs and infinity timeout for compute jobs; " + "for passthrough or sriov, 10000 for all jobs." " 0: keep default value. negative: infinity timeout), " - "format is [Non-Compute] or [GFX,Compute,SDMA,Video]"); + "format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; " + "for passthrough or sriov [all jobs] or [GFX,Compute,SDMA,Video]."); module_param_string(lockup_timeout, amdgpu_lockup_timeout, sizeof(amdgpu_lockup_timeout), 0444); /** @@ -392,6 +395,14 @@ MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))"); module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, uint, 0444); /** + * DOC: forcelongtraining (uint) + * Force long memory training in resume. + * The default is zero, indicates short training in resume. + */ +MODULE_PARM_DESC(forcelongtraining, "force memory long training"); +module_param_named(forcelongtraining, amdgpu_force_long_training, uint, 0444); + +/** * DOC: pcie_gen_cap (uint) * Override PCIE gen speed capabilities. See the CAIL flags in drivers/gpu/drm/amd/include/amd_pcie.h. * The default is 0 (automatic for each asic). @@ -449,42 +460,6 @@ MODULE_PARM_DESC(virtual_display, module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444); /** - * DOC: ngg (int) - * Set to enable Next Generation Graphics (1 = enable). The default is 0 (disabled). - */ -MODULE_PARM_DESC(ngg, "Next Generation Graphics (1 = enable, 0 = disable(default depending on gfx))"); -module_param_named(ngg, amdgpu_ngg, int, 0444); - -/** - * DOC: prim_buf_per_se (int) - * Override the size of Primitive Buffer per Shader Engine in Byte. The default is 0 (depending on gfx). - */ -MODULE_PARM_DESC(prim_buf_per_se, "the size of Primitive Buffer per Shader Engine (default depending on gfx)"); -module_param_named(prim_buf_per_se, amdgpu_prim_buf_per_se, int, 0444); - -/** - * DOC: pos_buf_per_se (int) - * Override the size of Position Buffer per Shader Engine in Byte. The default is 0 (depending on gfx). - */ -MODULE_PARM_DESC(pos_buf_per_se, "the size of Position Buffer per Shader Engine (default depending on gfx)"); -module_param_named(pos_buf_per_se, amdgpu_pos_buf_per_se, int, 0444); - -/** - * DOC: cntl_sb_buf_per_se (int) - * Override the size of Control Sideband per Shader Engine in Byte. The default is 0 (depending on gfx). - */ -MODULE_PARM_DESC(cntl_sb_buf_per_se, "the size of Control Sideband per Shader Engine (default depending on gfx)"); -module_param_named(cntl_sb_buf_per_se, amdgpu_cntl_sb_buf_per_se, int, 0444); - -/** - * DOC: param_buf_per_se (int) - * Override the size of Off-Chip Parameter Cache per Shader Engine in Byte. - * The default is 0 (depending on gfx). - */ -MODULE_PARM_DESC(param_buf_per_se, "the size of Off-Chip Parameter Cache per Shader Engine (default depending on gfx)"); -module_param_named(param_buf_per_se, amdgpu_param_buf_per_se, int, 0444); - -/** * DOC: job_hang_limit (int) * Set how much time allow a job hang and not drop it. The default is 0. */ @@ -616,6 +591,16 @@ MODULE_PARM_DESC(noretry, "Disable retry faults (0 = retry enabled, 1 = retry disabled (default))"); module_param_named(noretry, amdgpu_noretry, int, 0644); +/** + * DOC: force_asic_type (int) + * A non negative value used to specify the asic type for all supported GPUs. + */ +MODULE_PARM_DESC(force_asic_type, + "A non negative value used to specify the asic type for all supported GPUs"); +module_param_named(force_asic_type, amdgpu_force_asic_type, int, 0444); + + + #ifdef CONFIG_HSA_AMD /** * DOC: sched_policy (int) @@ -1016,12 +1001,14 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x7340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT}, {0x1002, 0x7341, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT}, {0x1002, 0x7347, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT}, /* Renoir */ {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU|AMD_EXP_HW_SUPPORT}, /* Navi12 */ {0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x7362, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT}, {0, 0, 0} }; @@ -1048,6 +1035,41 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, return -ENODEV; } +#ifdef CONFIG_DRM_AMDGPU_SI + if (!amdgpu_si_support) { + switch (flags & AMD_ASIC_MASK) { + case CHIP_TAHITI: + case CHIP_PITCAIRN: + case CHIP_VERDE: + case CHIP_OLAND: + case CHIP_HAINAN: + dev_info(&pdev->dev, + "SI support provided by radeon.\n"); + dev_info(&pdev->dev, + "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n" + ); + return -ENODEV; + } + } +#endif +#ifdef CONFIG_DRM_AMDGPU_CIK + if (!amdgpu_cik_support) { + switch (flags & AMD_ASIC_MASK) { + case CHIP_KAVERI: + case CHIP_BONAIRE: + case CHIP_HAWAII: + case CHIP_KABINI: + case CHIP_MULLINS: + dev_info(&pdev->dev, + "CIK support provided by radeon.\n"); + dev_info(&pdev->dev, + "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n" + ); + return -ENODEV; + } + } +#endif + /* Get rid of things like offb */ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "amdgpudrmfb"); if (ret) @@ -1092,7 +1114,10 @@ amdgpu_pci_remove(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); - DRM_ERROR("Device removal is currently not supported outside of fbcon\n"); +#ifdef MODULE + if (THIS_MODULE->state != MODULE_STATE_GOING) +#endif + DRM_ERROR("Hotplug removal is not supported\n"); drm_dev_unplug(dev); drm_dev_put(dev); pci_disable_device(pdev); @@ -1105,6 +1130,9 @@ amdgpu_pci_shutdown(struct pci_dev *pdev) struct drm_device *dev = pci_get_drvdata(pdev); struct amdgpu_device *adev = dev->dev_private; + if (amdgpu_ras_intr_triggered()) + return; + /* if we are running in a VM, make sure the device * torn down properly on reboot/shutdown. * unfortunately we can't detect certain @@ -1139,8 +1167,13 @@ static int amdgpu_pmops_resume(struct device *dev) static int amdgpu_pmops_freeze(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_dev->dev_private; + int r; - return amdgpu_device_suspend(drm_dev, false, true); + r = amdgpu_device_suspend(drm_dev, false, true); + if (r) + return r; + return amdgpu_asic_reset(adev); } static int amdgpu_pmops_thaw(struct device *dev) @@ -1312,66 +1345,6 @@ int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv) return 0; } -int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev) -{ - char *input = amdgpu_lockup_timeout; - char *timeout_setting = NULL; - int index = 0; - long timeout; - int ret = 0; - - /* - * By default timeout for non compute jobs is 10000. - * And there is no timeout enforced on compute jobs. - */ - adev->gfx_timeout = msecs_to_jiffies(10000); - adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; - adev->compute_timeout = MAX_SCHEDULE_TIMEOUT; - - if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENTH)) { - while ((timeout_setting = strsep(&input, ",")) && - strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENTH)) { - ret = kstrtol(timeout_setting, 0, &timeout); - if (ret) - return ret; - - if (timeout == 0) { - index++; - continue; - } else if (timeout < 0) { - timeout = MAX_SCHEDULE_TIMEOUT; - } else { - timeout = msecs_to_jiffies(timeout); - } - - switch (index++) { - case 0: - adev->gfx_timeout = timeout; - break; - case 1: - adev->compute_timeout = timeout; - break; - case 2: - adev->sdma_timeout = timeout; - break; - case 3: - adev->video_timeout = timeout; - break; - default: - break; - } - } - /* - * There is only one value specified and - * it should apply to all non-compute jobs. - */ - if (index == 1) - adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; - } - - return ret; -} - static bool amdgpu_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe, bool in_vblank_irq, int *vpos, int *hpos, @@ -1410,8 +1383,6 @@ static struct drm_driver kms_driver = { .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_export = amdgpu_gem_prime_export, .gem_prime_import = amdgpu_gem_prime_import, - .gem_prime_get_sg_table = amdgpu_gem_prime_get_sg_table, - .gem_prime_import_sg_table = amdgpu_gem_prime_import_sg_table, .gem_prime_vmap = amdgpu_gem_prime_vmap, .gem_prime_vunmap = amdgpu_gem_prime_vunmap, .gem_prime_mmap = amdgpu_gem_prime_mmap, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c index 571a6dfb473e..61fcf247a638 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c @@ -37,12 +37,14 @@ amdgpu_link_encoder_connector(struct drm_device *dev) { struct amdgpu_device *adev = dev->dev_private; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector; struct drm_encoder *encoder; struct amdgpu_encoder *amdgpu_encoder; + drm_connector_list_iter_begin(dev, &iter); /* walk the list and link encoders to connectors */ - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_for_each_connector_iter(connector, &iter) { amdgpu_connector = to_amdgpu_connector(connector); list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { amdgpu_encoder = to_amdgpu_encoder(encoder); @@ -55,6 +57,7 @@ amdgpu_link_encoder_connector(struct drm_device *dev) } } } + drm_connector_list_iter_end(&iter); } void amdgpu_encoder_set_active_device(struct drm_encoder *encoder) @@ -62,8 +65,10 @@ void amdgpu_encoder_set_active_device(struct drm_encoder *encoder) struct drm_device *dev = encoder->dev; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct drm_connector *connector; + struct drm_connector_list_iter iter; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { if (connector->encoder == encoder) { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); amdgpu_encoder->active_device = amdgpu_encoder->devices & amdgpu_connector->devices; @@ -72,6 +77,7 @@ void amdgpu_encoder_set_active_device(struct drm_encoder *encoder) amdgpu_connector->devices, encoder->encoder_type); } } + drm_connector_list_iter_end(&iter); } struct drm_connector * @@ -79,15 +85,20 @@ amdgpu_get_connector_for_encoder(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); - struct drm_connector *connector; + struct drm_connector *connector, *found = NULL; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { amdgpu_connector = to_amdgpu_connector(connector); - if (amdgpu_encoder->active_device & amdgpu_connector->devices) - return connector; + if (amdgpu_encoder->active_device & amdgpu_connector->devices) { + found = connector; + break; + } } - return NULL; + drm_connector_list_iter_end(&iter); + return found; } struct drm_connector * @@ -95,15 +106,20 @@ amdgpu_get_connector_for_encoder_init(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); - struct drm_connector *connector; + struct drm_connector *connector, *found = NULL; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { amdgpu_connector = to_amdgpu_connector(connector); - if (amdgpu_encoder->devices & amdgpu_connector->devices) - return connector; + if (amdgpu_encoder->devices & amdgpu_connector->devices) { + found = connector; + break; + } } - return NULL; + drm_connector_list_iter_end(&iter); + return found; } struct drm_encoder *amdgpu_get_external_encoder(struct drm_encoder *encoder) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 23085b352cf2..377fe20bce23 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -462,18 +462,7 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring, timeout = adev->gfx_timeout; break; case AMDGPU_RING_TYPE_COMPUTE: - /* - * For non-sriov case, no timeout enforce - * on compute ring by default. Unless user - * specifies a timeout for compute ring. - * - * For sriov case, always use the timeout - * as gfx ring - */ - if (!amdgpu_sriov_vf(ring->adev)) - timeout = adev->compute_timeout; - else - timeout = adev->gfx_timeout; + timeout = adev->compute_timeout; break; case AMDGPU_RING_TYPE_SDMA: timeout = adev->sdma_timeout; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index 5e8bdded265f..19705e399905 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -71,7 +71,7 @@ */ static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev) { - struct page *dummy_page = adev->mman.bdev.glob->dummy_read_page; + struct page *dummy_page = ttm_bo_glob.dummy_read_page; if (adev->dummy_page_addr) return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 8ceb44925947..4277125a79ee 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -175,7 +175,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, amdgpu_vm_get_pd_bo(vm, &list, &vm_pd); - r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates, false); + r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates); if (r) { dev_err(adev->dev, "leaking bo va because " "we fail to reserve bo (%d)\n", r); @@ -527,13 +527,41 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, goto error; } - r = amdgpu_vm_update_directories(adev, vm); + r = amdgpu_vm_update_pdes(adev, vm, false); error: if (r && r != -ERESTARTSYS) DRM_ERROR("Couldn't update BO_VA (%d)\n", r); } +/** + * amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags + * + * @adev: amdgpu_device pointer + * @flags: GEM UAPI flags + * + * Returns the GEM UAPI flags mapped into hardware for the ASIC. + */ +uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags) +{ + uint64_t pte_flag = 0; + + if (flags & AMDGPU_VM_PAGE_EXECUTABLE) + pte_flag |= AMDGPU_PTE_EXECUTABLE; + if (flags & AMDGPU_VM_PAGE_READABLE) + pte_flag |= AMDGPU_PTE_READABLE; + if (flags & AMDGPU_VM_PAGE_WRITEABLE) + pte_flag |= AMDGPU_PTE_WRITEABLE; + if (flags & AMDGPU_VM_PAGE_PRT) + pte_flag |= AMDGPU_PTE_PRT; + + if (adev->gmc.gmc_funcs->map_mtype) + pte_flag |= amdgpu_gmc_map_mtype(adev, + flags & AMDGPU_VM_MTYPE_MASK); + + return pte_flag; +} + int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { @@ -613,7 +641,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd); - r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates, false); + r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); if (r) goto error_unref; @@ -631,7 +659,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, switch (args->operation) { case AMDGPU_VA_OP_MAP: - va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags); + va_flags = amdgpu_gem_va_map_flags(adev, args->flags); r = amdgpu_vm_bo_map(adev, bo_va, args->va_address, args->offset_in_bo, args->map_size, va_flags); @@ -646,7 +674,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, args->map_size); break; case AMDGPU_VA_OP_REPLACE: - va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags); + va_flags = amdgpu_gem_va_map_flags(adev, args->flags); r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address, args->offset_in_bo, args->map_size, va_flags); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h index 0b66d2e6b5d5..e0f025dd1b14 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h @@ -67,6 +67,7 @@ int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); +uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags); int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index f9bef3154b99..c9d1fada6188 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -26,6 +26,7 @@ #include "amdgpu.h" #include "amdgpu_gfx.h" #include "amdgpu_rlc.h" +#include "amdgpu_ras.h" /* delay 0.1 second to enable gfx off feature */ #define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100) @@ -231,12 +232,10 @@ void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev) void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev) { - int i, queue, pipe, me; + int i, queue, me; for (i = 0; i < AMDGPU_MAX_GFX_QUEUES; ++i) { queue = i % adev->gfx.me.num_queue_per_pipe; - pipe = (i / adev->gfx.me.num_queue_per_pipe) - % adev->gfx.me.num_pipe_per_me; me = (i / adev->gfx.me.num_queue_per_pipe) / adev->gfx.me.num_pipe_per_me; @@ -320,8 +319,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, return r; } -void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring, - struct amdgpu_irq_src *irq) +void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring) { amdgpu_device_wb_free(ring->adev, ring->adev->virt.reg_val_offs); amdgpu_ring_fini(ring); @@ -569,3 +567,102 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable) mutex_unlock(&adev->gfx.gfx_off_mutex); } + +int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev) +{ + int r; + struct ras_fs_if fs_info = { + .sysfs_name = "gfx_err_count", + .debugfs_name = "gfx_err_inject", + }; + struct ras_ih_if ih_info = { + .cb = amdgpu_gfx_process_ras_data_cb, + }; + + if (!adev->gfx.ras_if) { + adev->gfx.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL); + if (!adev->gfx.ras_if) + return -ENOMEM; + adev->gfx.ras_if->block = AMDGPU_RAS_BLOCK__GFX; + adev->gfx.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; + adev->gfx.ras_if->sub_block_index = 0; + strcpy(adev->gfx.ras_if->name, "gfx"); + } + fs_info.head = ih_info.head = *adev->gfx.ras_if; + + r = amdgpu_ras_late_init(adev, adev->gfx.ras_if, + &fs_info, &ih_info); + if (r) + goto free; + + if (amdgpu_ras_is_supported(adev, adev->gfx.ras_if->block)) { + r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0); + if (r) + goto late_fini; + } else { + /* free gfx ras_if if ras is not supported */ + r = 0; + goto free; + } + + return 0; +late_fini: + amdgpu_ras_late_fini(adev, adev->gfx.ras_if, &ih_info); +free: + kfree(adev->gfx.ras_if); + adev->gfx.ras_if = NULL; + return r; +} + +void amdgpu_gfx_ras_fini(struct amdgpu_device *adev) +{ + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX) && + adev->gfx.ras_if) { + struct ras_common_if *ras_if = adev->gfx.ras_if; + struct ras_ih_if ih_info = { + .head = *ras_if, + .cb = amdgpu_gfx_process_ras_data_cb, + }; + + amdgpu_ras_late_fini(adev, ras_if, &ih_info); + kfree(ras_if); + } +} + +int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev, + void *err_data, + struct amdgpu_iv_entry *entry) +{ + /* TODO ue will trigger an interrupt. + * + * When “Full RAS” is enabled, the per-IP interrupt sources should + * be disabled and the driver should only look for the aggregated + * interrupt via sync flood + */ + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) { + kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); + if (adev->gfx.funcs->query_ras_error_count) + adev->gfx.funcs->query_ras_error_count(adev, err_data); + amdgpu_ras_reset_gpu(adev, 0); + } + return AMDGPU_RAS_SUCCESS; +} + +int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + struct ras_common_if *ras_if = adev->gfx.ras_if; + struct ras_dispatch_if ih_data = { + .entry = entry, + }; + + if (!ras_if) + return 0; + + ih_data.head = *ras_if; + + DRM_ERROR("CP ECC ERROR IRQ\n"); + amdgpu_ras_interrupt_dispatch(adev, &ih_data); + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 6ee4021910e2..a74ecd449775 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -201,28 +201,6 @@ struct amdgpu_gfx_funcs { int (*query_ras_error_count) (struct amdgpu_device *adev, void *ras_error_status); }; -struct amdgpu_ngg_buf { - struct amdgpu_bo *bo; - uint64_t gpu_addr; - uint32_t size; - uint32_t bo_size; -}; - -enum { - NGG_PRIM = 0, - NGG_POS, - NGG_CNTL, - NGG_PARAM, - NGG_BUF_MAX -}; - -struct amdgpu_ngg { - struct amdgpu_ngg_buf buf[NGG_BUF_MAX]; - uint32_t gds_reserve_addr; - uint32_t gds_reserve_size; - bool init; -}; - struct sq_work { struct work_struct work; unsigned ih_data; @@ -289,6 +267,7 @@ struct amdgpu_gfx { uint32_t mec2_feature_version; bool mec_fw_write_wait; bool me_fw_write_wait; + bool cp_fw_write_wait; struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS]; unsigned num_gfx_rings; struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS]; @@ -311,9 +290,6 @@ struct amdgpu_gfx { uint32_t grbm_soft_reset; uint32_t srbm_soft_reset; - /* NGG */ - struct amdgpu_ngg ngg; - /* gfx off */ bool gfx_off_state; /* true: enabled, false: disabled */ struct mutex gfx_off_mutex; @@ -355,8 +331,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, struct amdgpu_ring *ring, struct amdgpu_irq_src *irq); -void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring, - struct amdgpu_irq_src *irq); +void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring); void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev); int amdgpu_gfx_kiq_init(struct amdgpu_device *adev, @@ -384,5 +359,12 @@ void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit, bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, int me, int pipe, int queue); void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable); - +int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev); +void amdgpu_gfx_ras_fini(struct amdgpu_device *adev); +int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev, + void *err_data, + struct amdgpu_iv_entry *entry); +int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 5790db61fa2c..a12f33c0f5df 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -27,6 +27,8 @@ #include <linux/io-64-nonatomic-lo-hi.h> #include "amdgpu.h" +#include "amdgpu_ras.h" +#include "amdgpu_xgmi.h" /** * amdgpu_gmc_get_pde_for_bo - get the PDE for a BO @@ -305,3 +307,29 @@ bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr, gmc->fault_hash[hash].idx = gmc->last_fault++; return false; } + +int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev) +{ + int r; + + if (adev->umc.funcs && adev->umc.funcs->ras_late_init) { + r = adev->umc.funcs->ras_late_init(adev); + if (r) + return r; + } + + if (adev->mmhub.funcs && adev->mmhub.funcs->ras_late_init) { + r = adev->mmhub.funcs->ras_late_init(adev); + if (r) + return r; + } + + return amdgpu_xgmi_ras_late_init(adev); +} + +void amdgpu_gmc_ras_fini(struct amdgpu_device *adev) +{ + amdgpu_umc_ras_fini(adev); + amdgpu_mmhub_ras_fini(adev); + amdgpu_xgmi_ras_fini(adev); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index b6e1d98ef01e..406736a1bd3d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -99,12 +99,15 @@ struct amdgpu_gmc_funcs { unsigned pasid); /* enable/disable PRT support */ void (*set_prt)(struct amdgpu_device *adev, bool enable); - /* set pte flags based per asic */ - uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev, - uint32_t flags); + /* map mtype to hardware flags */ + uint64_t (*map_mtype)(struct amdgpu_device *adev, uint32_t flags); /* get the pde for a given mc addr */ void (*get_vm_pde)(struct amdgpu_device *adev, int level, u64 *dst, u64 *flags); + /* get the pte flags to use for a BO VA mapping */ + void (*get_vm_pte)(struct amdgpu_device *adev, + struct amdgpu_bo_va_mapping *mapping, + uint64_t *flags); }; struct amdgpu_xgmi { @@ -120,21 +123,52 @@ struct amdgpu_xgmi { /* gpu list in the same hive */ struct list_head head; bool supported; + struct ras_common_if *ras_if; }; struct amdgpu_gmc { + /* FB's physical address in MMIO space (for CPU to + * map FB). This is different compared to the agp/ + * gart/vram_start/end field as the later is from + * GPU's view and aper_base is from CPU's view. + */ resource_size_t aper_size; resource_size_t aper_base; /* for some chips with <= 32MB we need to lie * about vram size near mc fb location */ u64 mc_vram_size; u64 visible_vram_size; + /* AGP aperture start and end in MC address space + * Driver find a hole in the MC address space + * to place AGP by setting MC_VM_AGP_BOT/TOP registers + * Under VMID0, logical address == MC address. AGP + * aperture maps to physical bus or IOVA addressed. + * AGP aperture is used to simulate FB in ZFB case. + * AGP aperture is also used for page table in system + * memory (mainly for APU). + * + */ u64 agp_size; u64 agp_start; u64 agp_end; + /* GART aperture start and end in MC address space + * Driver find a hole in the MC address space + * to place GART by setting VM_CONTEXT0_PAGE_TABLE_START/END_ADDR + * registers + * Under VMID0, logical address inside GART aperture will + * be translated through gpuvm gart page table to access + * paged system memory + */ u64 gart_size; u64 gart_start; u64 gart_end; + /* Frame buffer aperture of this GPU device. Different from + * fb_start (see below), this only covers the local GPU device. + * Driver get fb_start from MC_VM_FB_LOCATION_BASE (set by vbios) + * and calculate vram_start of this local device by adding an + * offset inside the XGMI hive. + * Under VMID0, logical address == MC address + */ u64 vram_start; u64 vram_end; /* FB region , it's same as local vram region in single GPU, in XGMI @@ -153,6 +187,7 @@ struct amdgpu_gmc { uint32_t fw_version; struct amdgpu_irq_src vm_fault; uint32_t vram_type; + uint8_t vram_vendor; uint32_t srbm_soft_reset; bool prt_warning; uint64_t stolen_size; @@ -177,15 +212,14 @@ struct amdgpu_gmc { struct amdgpu_xgmi xgmi; struct amdgpu_irq_src ecc_irq; - struct ras_common_if *umc_ras_if; - struct ras_common_if *mmhub_ras_if; }; #define amdgpu_gmc_flush_gpu_tlb(adev, vmid, vmhub, type) ((adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (vmhub), (type))) #define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr)) #define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid)) +#define amdgpu_gmc_map_mtype(adev, flags) (adev)->gmc.gmc_funcs->map_mtype((adev),(flags)) #define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags)) -#define amdgpu_gmc_get_pte_flags(adev, flags) (adev)->gmc.gmc_funcs->get_vm_pte_flags((adev),(flags)) +#define amdgpu_gmc_get_vm_pte(adev, mapping, flags) (adev)->gmc.gmc_funcs->get_vm_pte((adev), (mapping), (flags)) /** * amdgpu_gmc_vram_full_visible - Check if full VRAM is visible through the BAR @@ -230,5 +264,7 @@ void amdgpu_gmc_agp_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc); bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr, uint16_t pasid, uint64_t timestamp); +int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev); +void amdgpu_gmc_ras_fini(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index 53734da1c2df..6f9289735e31 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -282,7 +282,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, !dma_fence_is_later(updates, (*id)->flushed_updates)) updates = NULL; - if ((*id)->owner != vm->entity.fence_context || + if ((*id)->owner != vm->direct.fence_context || job->vm_pd_addr != (*id)->pd_gpu_addr || updates || !(*id)->last_flush || ((*id)->last_flush->context != fence_context && @@ -349,7 +349,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm, struct dma_fence *flushed; /* Check all the prerequisites to using this VMID */ - if ((*id)->owner != vm->entity.fence_context) + if ((*id)->owner != vm->direct.fence_context) continue; if ((*id)->pd_gpu_addr != job->vm_pd_addr) @@ -449,7 +449,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, } id->pd_gpu_addr = job->vm_pd_addr; - id->owner = vm->entity.fence_context; + id->owner = vm->direct.fence_context; if (job->vm_needs_flush) { dma_fence_put(id->last_flush); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 2a3f5ec298db..30d540d23b77 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -87,10 +87,13 @@ static void amdgpu_hotplug_work_func(struct work_struct *work) struct drm_device *dev = adev->ddev; struct drm_mode_config *mode_config = &dev->mode_config; struct drm_connector *connector; + struct drm_connector_list_iter iter; mutex_lock(&mode_config->mutex); - list_for_each_entry(connector, &mode_config->connector_list, head) + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) amdgpu_connector_hotplug(connector); + drm_connector_list_iter_end(&iter); mutex_unlock(&mode_config->mutex); /* Just fire off a uevent and let userspace tell us what to do */ drm_helper_hpd_irq_event(dev); @@ -153,6 +156,20 @@ irqreturn_t amdgpu_irq_handler(int irq, void *arg) ret = amdgpu_ih_process(adev, &adev->irq.ih); if (ret == IRQ_HANDLED) pm_runtime_mark_last_busy(dev->dev); + + /* For the hardware that cannot enable bif ring for both ras_controller_irq + * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status + * register to check whether the interrupt is triggered or not, and properly + * ack the interrupt if it is there + */ + if (adev->nbio.funcs && + adev->nbio.funcs->handle_ras_controller_intr_no_bifring) + adev->nbio.funcs->handle_ras_controller_intr_no_bifring(adev); + + if (adev->nbio.funcs && + adev->nbio.funcs->handle_ras_err_event_athub_intr_no_bifring) + adev->nbio.funcs->handle_ras_err_event_athub_intr_no_bifring(adev); + return ret; } @@ -228,10 +245,19 @@ int amdgpu_irq_init(struct amdgpu_device *adev) adev->irq.msi_enabled = false; if (amdgpu_msi_ok(adev)) { - int ret = pci_enable_msi(adev->pdev); - if (!ret) { + int nvec = pci_msix_vec_count(adev->pdev); + unsigned int flags; + + if (nvec <= 0) { + flags = PCI_IRQ_MSI; + } else { + flags = PCI_IRQ_MSI | PCI_IRQ_MSIX; + } + /* we only need one vector */ + nvec = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags); + if (nvec > 0) { adev->irq.msi_enabled = true; - dev_dbg(adev->dev, "amdgpu: using MSI.\n"); + dev_dbg(adev->dev, "amdgpu: using MSI/MSI-X.\n"); } } @@ -254,7 +280,8 @@ int amdgpu_irq_init(struct amdgpu_device *adev) INIT_WORK(&adev->irq.ih2_work, amdgpu_irq_handle_ih2); adev->irq.installed = true; - r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq); + /* Use vector 0 for MSI-X */ + r = drm_irq_install(adev->ddev, pci_irq_vector(adev->pdev, 0)); if (r) { adev->irq.installed = false; if (!amdgpu_device_has_dc_support(adev)) @@ -284,7 +311,7 @@ void amdgpu_irq_fini(struct amdgpu_device *adev) drm_irq_uninstall(adev->ddev); adev->irq.installed = false; if (adev->irq.msi_enabled) - pci_disable_msi(adev->pdev); + pci_free_irq_vectors(adev->pdev); if (!amdgpu_device_has_dc_support(adev)) flush_work(&adev->hotplug_work); } @@ -369,7 +396,7 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev, * amdgpu_irq_dispatch - dispatch IRQ to IP blocks * * @adev: amdgpu device pointer - * @entry: interrupt vector pointer + * @ih: interrupt ring instance * * Dispatches IRQ to IP blocks. */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 9d76e0923a5a..4fb20e870e63 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -218,7 +218,7 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job) struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched); struct dma_fence *fence = NULL, *finished; struct amdgpu_job *job; - int r; + int r = 0; job = to_amdgpu_job(sched_job); finished = &job->base.s_fence->finished; @@ -243,9 +243,49 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job) job->fence = dma_fence_get(fence); amdgpu_job_free_resources(job); + + fence = r ? ERR_PTR(r) : fence; return fence; } +#define to_drm_sched_job(sched_job) \ + container_of((sched_job), struct drm_sched_job, queue_node) + +void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched) +{ + struct drm_sched_job *s_job; + struct drm_sched_entity *s_entity = NULL; + int i; + + /* Signal all jobs not yet scheduled */ + for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { + struct drm_sched_rq *rq = &sched->sched_rq[i]; + + if (!rq) + continue; + + spin_lock(&rq->lock); + list_for_each_entry(s_entity, &rq->entities, list) { + while ((s_job = to_drm_sched_job(spsc_queue_pop(&s_entity->job_queue)))) { + struct drm_sched_fence *s_fence = s_job->s_fence; + + dma_fence_signal(&s_fence->scheduled); + dma_fence_set_error(&s_fence->finished, -EHWPOISON); + dma_fence_signal(&s_fence->finished); + } + } + spin_unlock(&rq->lock); + } + + /* Signal all jobs already scheduled to HW */ + list_for_each_entry(s_job, &sched->ring_mirror_list, node) { + struct drm_sched_fence *s_fence = s_job->s_fence; + + dma_fence_set_error(&s_fence->finished, -EHWPOISON); + dma_fence_signal(&s_fence->finished); + } +} + const struct drm_sched_backend_ops amdgpu_sched_ops = { .dependency = amdgpu_job_dependency, .run_job = amdgpu_job_run, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h index 51e62504c279..dc7ee9358dcd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h @@ -76,4 +76,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, void *owner, struct dma_fence **f); int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring, struct dma_fence **fence); + +void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index f2c097983f48..82c46c4faaad 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -144,41 +144,6 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) struct amdgpu_device *adev; int r, acpi_status; -#ifdef CONFIG_DRM_AMDGPU_SI - if (!amdgpu_si_support) { - switch (flags & AMD_ASIC_MASK) { - case CHIP_TAHITI: - case CHIP_PITCAIRN: - case CHIP_VERDE: - case CHIP_OLAND: - case CHIP_HAINAN: - dev_info(dev->dev, - "SI support provided by radeon.\n"); - dev_info(dev->dev, - "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n" - ); - return -ENODEV; - } - } -#endif -#ifdef CONFIG_DRM_AMDGPU_CIK - if (!amdgpu_cik_support) { - switch (flags & AMD_ASIC_MASK) { - case CHIP_KAVERI: - case CHIP_BONAIRE: - case CHIP_HAWAII: - case CHIP_KABINI: - case CHIP_MULLINS: - dev_info(dev->dev, - "CIK support provided by radeon.\n"); - dev_info(dev->dev, - "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n" - ); - return -ENODEV; - } - } -#endif - adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL); if (adev == NULL) { return -ENOMEM; @@ -225,7 +190,6 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) pm_runtime_put_autosuspend(dev->dev); } - amdgpu_register_gpu_instance(adev); out: if (r) { /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */ @@ -619,9 +583,12 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file struct drm_amdgpu_info_vram_gtt vram_gtt; vram_gtt.vram_size = adev->gmc.real_vram_size - - atomic64_read(&adev->vram_pin_size); - vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size - - atomic64_read(&adev->visible_pin_size); + atomic64_read(&adev->vram_pin_size) - + AMDGPU_VM_RESERVED_VRAM; + vram_gtt.vram_cpu_accessible_size = + min(adev->gmc.visible_vram_size - + atomic64_read(&adev->visible_pin_size), + vram_gtt.vram_size); vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size; vram_gtt.gtt_size *= PAGE_SIZE; vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size); @@ -634,15 +601,18 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file memset(&mem, 0, sizeof(mem)); mem.vram.total_heap_size = adev->gmc.real_vram_size; mem.vram.usable_heap_size = adev->gmc.real_vram_size - - atomic64_read(&adev->vram_pin_size); + atomic64_read(&adev->vram_pin_size) - + AMDGPU_VM_RESERVED_VRAM; mem.vram.heap_usage = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4; mem.cpu_accessible_vram.total_heap_size = adev->gmc.visible_vram_size; - mem.cpu_accessible_vram.usable_heap_size = adev->gmc.visible_vram_size - - atomic64_read(&adev->visible_pin_size); + mem.cpu_accessible_vram.usable_heap_size = + min(adev->gmc.visible_vram_size - + atomic64_read(&adev->visible_pin_size), + mem.vram.usable_heap_size); mem.cpu_accessible_vram.heap_usage = amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); mem.cpu_accessible_vram.max_allocation = @@ -764,17 +734,6 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file dev_info.vce_harvest_config = adev->vce.harvest_config; dev_info.gc_double_offchip_lds_buf = adev->gfx.config.double_offchip_lds_buf; - - if (amdgpu_ngg) { - dev_info.prim_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PRIM].gpu_addr; - dev_info.prim_buf_size = adev->gfx.ngg.buf[NGG_PRIM].size; - dev_info.pos_buf_gpu_addr = adev->gfx.ngg.buf[NGG_POS].gpu_addr; - dev_info.pos_buf_size = adev->gfx.ngg.buf[NGG_POS].size; - dev_info.cntl_sb_buf_gpu_addr = adev->gfx.ngg.buf[NGG_CNTL].gpu_addr; - dev_info.cntl_sb_buf_size = adev->gfx.ngg.buf[NGG_CNTL].size; - dev_info.param_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PARAM].gpu_addr; - dev_info.param_buf_size = adev->gfx.ngg.buf[NGG_PARAM].size; - } dev_info.wave_front_size = adev->gfx.cu_info.wave_front_size; dev_info.num_shader_visible_vgprs = adev->gfx.config.max_gprs; dev_info.num_cu_per_sh = adev->gfx.config.max_cu_per_sh; @@ -1003,6 +962,12 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) /* Ensure IB tests are run on ring */ flush_delayed_work(&adev->delayed_init_work); + + if (amdgpu_ras_intr_triggered()) { + DRM_ERROR("RAS Intr triggered, device disabled!!"); + return -EHWPOISON; + } + file_priv->driver_priv = NULL; r = pm_runtime_get_sync(dev->dev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c new file mode 100644 index 000000000000..676c48c02d77 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c @@ -0,0 +1,70 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu.h" +#include "amdgpu_ras.h" + +int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev) +{ + int r; + struct ras_ih_if ih_info = { + .cb = NULL, + }; + struct ras_fs_if fs_info = { + .sysfs_name = "mmhub_err_count", + .debugfs_name = "mmhub_err_inject", + }; + + if (!adev->mmhub.ras_if) { + adev->mmhub.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL); + if (!adev->mmhub.ras_if) + return -ENOMEM; + adev->mmhub.ras_if->block = AMDGPU_RAS_BLOCK__MMHUB; + adev->mmhub.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; + adev->mmhub.ras_if->sub_block_index = 0; + strcpy(adev->mmhub.ras_if->name, "mmhub"); + } + ih_info.head = fs_info.head = *adev->mmhub.ras_if; + r = amdgpu_ras_late_init(adev, adev->mmhub.ras_if, + &fs_info, &ih_info); + if (r || !amdgpu_ras_is_supported(adev, adev->mmhub.ras_if->block)) { + kfree(adev->mmhub.ras_if); + adev->mmhub.ras_if = NULL; + } + + return r; +} + +void amdgpu_mmhub_ras_fini(struct amdgpu_device *adev) +{ + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB) && + adev->mmhub.ras_if) { + struct ras_common_if *ras_if = adev->mmhub.ras_if; + struct ras_ih_if ih_info = { + .cb = NULL, + }; + + amdgpu_ras_late_fini(adev, ras_if, &ih_info); + kfree(ras_if); + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h index 2d75ecfa199b..1cd78940cf82 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h @@ -23,9 +23,17 @@ struct amdgpu_mmhub_funcs { void (*ras_init)(struct amdgpu_device *adev); + int (*ras_late_init)(struct amdgpu_device *adev); void (*query_ras_error_count)(struct amdgpu_device *adev, void *ras_error_status); }; +struct amdgpu_mmhub { + struct ras_common_if *ras_if; + const struct amdgpu_mmhub_funcs *funcs; +}; + +int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev); +void amdgpu_mmhub_ras_fini(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index 31d4deb5d294..392300f77b13 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c @@ -136,6 +136,7 @@ void amdgpu_mn_unlock(struct amdgpu_mn *mn) * amdgpu_mn_read_lock - take the read side lock for this notifier * * @amn: our notifier + * @blockable: is the notifier blockable */ static int amdgpu_mn_read_lock(struct amdgpu_mn *amn, bool blockable) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c new file mode 100644 index 000000000000..7d5c3a9de9ea --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c @@ -0,0 +1,84 @@ +/* + * Copyright (C) 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#include "amdgpu.h" +#include "amdgpu_ras.h" + +int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev) +{ + int r; + struct ras_ih_if ih_info = { + .cb = NULL, + }; + struct ras_fs_if fs_info = { + .sysfs_name = "pcie_bif_err_count", + .debugfs_name = "pcie_bif_err_inject", + }; + + if (!adev->nbio.ras_if) { + adev->nbio.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL); + if (!adev->nbio.ras_if) + return -ENOMEM; + adev->nbio.ras_if->block = AMDGPU_RAS_BLOCK__PCIE_BIF; + adev->nbio.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; + adev->nbio.ras_if->sub_block_index = 0; + strcpy(adev->nbio.ras_if->name, "pcie_bif"); + } + ih_info.head = fs_info.head = *adev->nbio.ras_if; + r = amdgpu_ras_late_init(adev, adev->nbio.ras_if, + &fs_info, &ih_info); + if (r) + goto free; + + if (amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) { + r = amdgpu_irq_get(adev, &adev->nbio.ras_controller_irq, 0); + if (r) + goto late_fini; + r = amdgpu_irq_get(adev, &adev->nbio.ras_err_event_athub_irq, 0); + if (r) + goto late_fini; + } else { + r = 0; + goto free; + } + + return 0; +late_fini: + amdgpu_ras_late_fini(adev, adev->nbio.ras_if, &ih_info); +free: + kfree(adev->nbio.ras_if); + adev->nbio.ras_if = NULL; + return r; +} + +void amdgpu_nbio_ras_fini(struct amdgpu_device *adev) +{ + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF) && + adev->nbio.ras_if) { + struct ras_common_if *ras_if = adev->nbio.ras_if; + struct ras_ih_if ih_info = { + .cb = NULL, + }; + + amdgpu_ras_late_fini(adev, ras_if, &ih_info); + kfree(ras_if); + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h new file mode 100644 index 000000000000..919bd566ba3c --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h @@ -0,0 +1,101 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __AMDGPU_NBIO_H__ +#define __AMDGPU_NBIO_H__ + +/* + * amdgpu nbio functions + */ +struct nbio_hdp_flush_reg { + u32 ref_and_mask_cp0; + u32 ref_and_mask_cp1; + u32 ref_and_mask_cp2; + u32 ref_and_mask_cp3; + u32 ref_and_mask_cp4; + u32 ref_and_mask_cp5; + u32 ref_and_mask_cp6; + u32 ref_and_mask_cp7; + u32 ref_and_mask_cp8; + u32 ref_and_mask_cp9; + u32 ref_and_mask_sdma0; + u32 ref_and_mask_sdma1; + u32 ref_and_mask_sdma2; + u32 ref_and_mask_sdma3; + u32 ref_and_mask_sdma4; + u32 ref_and_mask_sdma5; + u32 ref_and_mask_sdma6; + u32 ref_and_mask_sdma7; +}; + +struct amdgpu_nbio_funcs { + const struct nbio_hdp_flush_reg *hdp_flush_reg; + u32 (*get_hdp_flush_req_offset)(struct amdgpu_device *adev); + u32 (*get_hdp_flush_done_offset)(struct amdgpu_device *adev); + u32 (*get_pcie_index_offset)(struct amdgpu_device *adev); + u32 (*get_pcie_data_offset)(struct amdgpu_device *adev); + u32 (*get_rev_id)(struct amdgpu_device *adev); + void (*mc_access_enable)(struct amdgpu_device *adev, bool enable); + void (*hdp_flush)(struct amdgpu_device *adev, struct amdgpu_ring *ring); + u32 (*get_memsize)(struct amdgpu_device *adev); + void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance, + bool use_doorbell, int doorbell_index, int doorbell_size); + void (*vcn_doorbell_range)(struct amdgpu_device *adev, bool use_doorbell, + int doorbell_index, int instance); + void (*enable_doorbell_aperture)(struct amdgpu_device *adev, + bool enable); + void (*enable_doorbell_selfring_aperture)(struct amdgpu_device *adev, + bool enable); + void (*ih_doorbell_range)(struct amdgpu_device *adev, + bool use_doorbell, int doorbell_index); + void (*enable_doorbell_interrupt)(struct amdgpu_device *adev, + bool enable); + void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev, + bool enable); + void (*update_medium_grain_light_sleep)(struct amdgpu_device *adev, + bool enable); + void (*get_clockgating_state)(struct amdgpu_device *adev, + u32 *flags); + void (*ih_control)(struct amdgpu_device *adev); + void (*init_registers)(struct amdgpu_device *adev); + void (*detect_hw_virt)(struct amdgpu_device *adev); + void (*remap_hdp_registers)(struct amdgpu_device *adev); + void (*handle_ras_controller_intr_no_bifring)(struct amdgpu_device *adev); + void (*handle_ras_err_event_athub_intr_no_bifring)(struct amdgpu_device *adev); + int (*init_ras_controller_interrupt)(struct amdgpu_device *adev); + int (*init_ras_err_event_athub_interrupt)(struct amdgpu_device *adev); + void (*query_ras_error_count)(struct amdgpu_device *adev, + void *ras_error_status); + int (*ras_late_init)(struct amdgpu_device *adev); +}; + +struct amdgpu_nbio { + const struct nbio_hdp_flush_reg *hdp_flush_reg; + struct amdgpu_irq_src ras_controller_irq; + struct amdgpu_irq_src ras_err_event_athub_irq; + struct ras_common_if *ras_if; + const struct amdgpu_nbio_funcs *funcs; +}; + +int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev); +void amdgpu_nbio_ras_fini(struct amdgpu_device *adev); +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 1fead0e8b890..e3f16b49e970 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -343,6 +343,70 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev, } /** + * amdgpu_bo_create_kernel_at - create BO for kernel use at specific location + * + * @adev: amdgpu device object + * @offset: offset of the BO + * @size: size of the BO + * @domain: where to place it + * @bo_ptr: used to initialize BOs in structures + * @cpu_addr: optional CPU address mapping + * + * Creates a kernel BO at a specific offset in the address space of the domain. + * + * Returns: + * 0 on success, negative error code otherwise. + */ +int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev, + uint64_t offset, uint64_t size, uint32_t domain, + struct amdgpu_bo **bo_ptr, void **cpu_addr) +{ + struct ttm_operation_ctx ctx = { false, false }; + unsigned int i; + int r; + + offset &= PAGE_MASK; + size = ALIGN(size, PAGE_SIZE); + + r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE, domain, bo_ptr, + NULL, cpu_addr); + if (r) + return r; + + /* + * Remove the original mem node and create a new one at the request + * position. + */ + if (cpu_addr) + amdgpu_bo_kunmap(*bo_ptr); + + ttm_bo_mem_put(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.mem); + + for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) { + (*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT; + (*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT; + } + r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement, + &(*bo_ptr)->tbo.mem, &ctx); + if (r) + goto error; + + if (cpu_addr) { + r = amdgpu_bo_kmap(*bo_ptr, cpu_addr); + if (r) + goto error; + } + + amdgpu_bo_unreserve(*bo_ptr); + return 0; + +error: + amdgpu_bo_unreserve(*bo_ptr); + amdgpu_bo_unref(bo_ptr); + return r; +} + +/** * amdgpu_bo_free_kernel - free BO for kernel use * * @bo: amdgpu BO to free @@ -451,9 +515,10 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, { struct ttm_operation_ctx ctx = { .interruptible = (bp->type != ttm_bo_type_kernel), - .no_wait_gpu = false, + .no_wait_gpu = bp->no_wait_gpu, .resv = bp->resv, - .flags = TTM_OPT_FLAG_ALLOW_RES_EVICT + .flags = bp->type != ttm_bo_type_kernel ? + TTM_OPT_FLAG_ALLOW_RES_EVICT : 0 }; struct amdgpu_bo *bo; unsigned long page_align, size = bp->size; @@ -1058,7 +1123,10 @@ void amdgpu_bo_fini(struct amdgpu_device *adev) int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo, struct vm_area_struct *vma) { - return ttm_fbdev_mmap(vma, &bo->tbo); + if (vma->vm_pgoff != 0) + return -EACCES; + + return ttm_bo_mmap_obj(vma, &bo->tbo); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 658f4c9779b7..7e99f6c58c48 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -41,6 +41,7 @@ struct amdgpu_bo_param { u32 preferred_domain; u64 flags; enum ttm_bo_type type; + bool no_wait_gpu; struct dma_resv *resv; }; @@ -237,6 +238,9 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev, unsigned long size, int align, u32 domain, struct amdgpu_bo **bo_ptr, u64 *gpu_addr, void **cpu_addr); +int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev, + uint64_t offset, uint64_t size, uint32_t domain, + struct amdgpu_bo **bo_ptr, void **cpu_addr); void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr, void **cpu_addr); int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 03930313c263..f205f56e3358 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -161,7 +161,7 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev, if (is_support_sw_smu(adev)) { if (adev->smu.ppt_funcs->get_current_power_state) - pm = amdgpu_smu_get_current_power_state(adev); + pm = smu_get_current_power_state(&adev->smu); else pm = adev->pm.dpm.user_state; } else if (adev->powerplay.pp_funcs->get_current_power_state) { @@ -805,8 +805,7 @@ static ssize_t amdgpu_get_pp_feature_status(struct device *dev, } /** - * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk - * pp_dpm_pcie + * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie * * The amdgpu driver provides a sysfs API for adjusting what power levels * are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk, @@ -822,9 +821,15 @@ static ssize_t amdgpu_get_pp_feature_status(struct device *dev, * * To manually adjust these states, first select manual using * power_dpm_force_performance_level. - * Secondly,Enter a new value for each level by inputing a string that + * Secondly, enter a new value for each level by inputing a string that * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie" - * E.g., echo 4 5 6 to > pp_dpm_sclk will enable sclk levels 4, 5, and 6. + * E.g., + * + * .. code-block:: bash + * + * echo "4 5 6" > pp_dpm_sclk + * + * will enable sclk levels 4, 5, and 6. * * NOTE: change to the dcefclk max dpm level is not supported now */ @@ -902,7 +907,7 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, return ret; if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask); + ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask, true); else if (adev->powerplay.pp_funcs->force_clock_level) ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask); @@ -949,7 +954,7 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, return ret; if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask); + ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask, true); else if (adev->powerplay.pp_funcs->force_clock_level) ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask); @@ -989,7 +994,7 @@ static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev, return ret; if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask); + ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask, true); else if (adev->powerplay.pp_funcs->force_clock_level) ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask); @@ -1029,7 +1034,7 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev, return ret; if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask); + ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask, true); else if (adev->powerplay.pp_funcs->force_clock_level) ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask); @@ -1069,7 +1074,7 @@ static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev, return ret; if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask); + ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask, true); else if (adev->powerplay.pp_funcs->force_clock_level) ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask); @@ -1109,7 +1114,7 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, return ret; if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask); + ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask, true); else if (adev->powerplay.pp_funcs->force_clock_level) ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask); @@ -1301,7 +1306,7 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, } parameter[parameter_size] = profile_mode; if (is_support_sw_smu(adev)) - ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size); + ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size, true); else if (adev->powerplay.pp_funcs->set_power_profile_mode) ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size); if (!ret) @@ -2010,7 +2015,7 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev, uint32_t limit = 0; if (is_support_sw_smu(adev)) { - smu_get_power_limit(&adev->smu, &limit, true); + smu_get_power_limit(&adev->smu, &limit, true, true); return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) { adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true); @@ -2028,7 +2033,7 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev, uint32_t limit = 0; if (is_support_sw_smu(adev)) { - smu_get_power_limit(&adev->smu, &limit, false); + smu_get_power_limit(&adev->smu, &limit, false, true); return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) { adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false); @@ -2196,9 +2201,9 @@ static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev, * * - fan1_input: fan speed in RPM * - * - fan[1-*]_target: Desired fan speed Unit: revolution/min (RPM) + * - fan[1-\*]_target: Desired fan speed Unit: revolution/min (RPM) * - * - fan[1-*]_enable: Enable or disable the sensors.1: Enable 0: Disable + * - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable * * hwmon interfaces for GPU clocks: * @@ -2825,6 +2830,19 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) DRM_ERROR("failed to create device file pp_dpm_sclk\n"); return ret; } + + /* Arcturus does not support standalone mclk/socclk/fclk level setting */ + if (adev->asic_type == CHIP_ARCTURUS) { + dev_attr_pp_dpm_mclk.attr.mode &= ~S_IWUGO; + dev_attr_pp_dpm_mclk.store = NULL; + + dev_attr_pp_dpm_socclk.attr.mode &= ~S_IWUGO; + dev_attr_pp_dpm_socclk.store = NULL; + + dev_attr_pp_dpm_fclk.attr.mode &= ~S_IWUGO; + dev_attr_pp_dpm_fclk.store = NULL; + } + ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk); if (ret) { DRM_ERROR("failed to create device file pp_dpm_mclk\n"); @@ -3008,7 +3026,8 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm; smu_handle_task(&adev->smu, smu_dpm->dpm_level, - AMD_PP_TASK_DISPLAY_CONFIG_CHANGE); + AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, + true); } else { if (adev->powerplay.pp_funcs->dispatch_tasks) { if (!amdgpu_device_has_dc_support(adev)) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 4d71537a960d..bbe9ac7e843f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -34,6 +34,8 @@ #include "psp_v11_0.h" #include "psp_v12_0.h" +#include "amdgpu_ras.h" + static void psp_set_funcs(struct amdgpu_device *adev); static int psp_early_init(void *handle) @@ -88,6 +90,17 @@ static int psp_sw_init(void *handle) return ret; } + ret = psp_mem_training_init(psp); + if (ret) { + DRM_ERROR("Failed to initialize memory training!\n"); + return ret; + } + ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT); + if (ret) { + DRM_ERROR("Failed to process memory training!\n"); + return ret; + } + return 0; } @@ -95,6 +108,7 @@ static int psp_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + psp_mem_training_fini(&adev->psp); release_firmware(adev->psp.sos_fw); adev->psp.sos_fw = NULL; release_firmware(adev->psp.asd_fw); @@ -151,10 +165,19 @@ psp_cmd_submit_buf(struct psp_context *psp, return ret; } + amdgpu_asic_invalidate_hdp(psp->adev, NULL); while (*((unsigned int *)psp->fence_buf) != index) { if (--timeout == 0) break; + /* + * Shouldn't wait for timeout when err_event_athub occurs, + * because gpu reset thread triggered and lock resource should + * be released for psp resume sequence. + */ + if (amdgpu_ras_intr_triggered()) + break; msleep(1); + amdgpu_asic_invalidate_hdp(psp->adev, NULL); } /* In some cases, psp response status is not 0 even there is no @@ -168,8 +191,9 @@ psp_cmd_submit_buf(struct psp_context *psp, if (ucode) DRM_WARN("failed to load ucode id (%d) ", ucode->ucode_id); - DRM_WARN("psp command failed and response status is (0x%X)\n", - psp->cmd_buf_mem->resp.status & GFX_CMD_STATUS_MASK); + DRM_DEBUG_DRIVER("psp command (0x%X) failed and response status is (0x%X)\n", + psp->cmd_buf_mem->cmd_id, + psp->cmd_buf_mem->resp.status & GFX_CMD_STATUS_MASK); if (!timeout) { mutex_unlock(&psp->mutex); return -EINVAL; @@ -253,7 +277,8 @@ static int psp_tmr_init(struct psp_context *psp) /* For ASICs support RLC autoload, psp will parse the toc * and calculate the total size of TMR needed */ - if (psp->toc_start_addr && + if (!amdgpu_sriov_vf(psp->adev) && + psp->toc_start_addr && psp->toc_bin_size && psp->fw_pri_buf) { ret = psp_load_toc(psp, &tmr_size); @@ -287,15 +312,9 @@ static int psp_tmr_load(struct psp_context *psp) ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); - if (ret) - goto failed; kfree(cmd); - return 0; - -failed: - kfree(cmd); return ret; } @@ -772,6 +791,324 @@ static int psp_ras_initialize(struct psp_context *psp) } // ras end +// HDCP start +static void psp_prep_hdcp_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, + uint64_t hdcp_ta_mc, + uint64_t hdcp_mc_shared, + uint32_t hdcp_ta_size, + uint32_t shared_size) +{ + cmd->cmd_id = GFX_CMD_ID_LOAD_TA; + cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(hdcp_ta_mc); + cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(hdcp_ta_mc); + cmd->cmd.cmd_load_ta.app_len = hdcp_ta_size; + + cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = + lower_32_bits(hdcp_mc_shared); + cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = + upper_32_bits(hdcp_mc_shared); + cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size; +} + +static int psp_hdcp_init_shared_buf(struct psp_context *psp) +{ + int ret; + + /* + * Allocate 16k memory aligned to 4k from Frame Buffer (local + * physical) for hdcp ta <-> Driver + */ + ret = amdgpu_bo_create_kernel(psp->adev, PSP_HDCP_SHARED_MEM_SIZE, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, + &psp->hdcp_context.hdcp_shared_bo, + &psp->hdcp_context.hdcp_shared_mc_addr, + &psp->hdcp_context.hdcp_shared_buf); + + return ret; +} + +static int psp_hdcp_load(struct psp_context *psp) +{ + int ret; + struct psp_gfx_cmd_resp *cmd; + + /* + * TODO: bypass the loading in sriov for now + */ + if (amdgpu_sriov_vf(psp->adev)) + return 0; + + cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + memset(psp->fw_pri_buf, 0, PSP_1_MEG); + memcpy(psp->fw_pri_buf, psp->ta_hdcp_start_addr, + psp->ta_hdcp_ucode_size); + + psp_prep_hdcp_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, + psp->hdcp_context.hdcp_shared_mc_addr, + psp->ta_hdcp_ucode_size, + PSP_HDCP_SHARED_MEM_SIZE); + + ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); + + if (!ret) { + psp->hdcp_context.hdcp_initialized = 1; + psp->hdcp_context.session_id = cmd->resp.session_id; + } + + kfree(cmd); + + return ret; +} +static int psp_hdcp_initialize(struct psp_context *psp) +{ + int ret; + + if (!psp->hdcp_context.hdcp_initialized) { + ret = psp_hdcp_init_shared_buf(psp); + if (ret) + return ret; + } + + ret = psp_hdcp_load(psp); + if (ret) + return ret; + + return 0; +} +static void psp_prep_hdcp_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd, + uint32_t hdcp_session_id) +{ + cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA; + cmd->cmd.cmd_unload_ta.session_id = hdcp_session_id; +} + +static int psp_hdcp_unload(struct psp_context *psp) +{ + int ret; + struct psp_gfx_cmd_resp *cmd; + + /* + * TODO: bypass the unloading in sriov for now + */ + if (amdgpu_sriov_vf(psp->adev)) + return 0; + + cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + psp_prep_hdcp_ta_unload_cmd_buf(cmd, psp->hdcp_context.session_id); + + ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); + + kfree(cmd); + + return ret; +} + +static void psp_prep_hdcp_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, + uint32_t ta_cmd_id, + uint32_t hdcp_session_id) +{ + cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD; + cmd->cmd.cmd_invoke_cmd.session_id = hdcp_session_id; + cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id; + /* Note: cmd_invoke_cmd.buf is not used for now */ +} + +int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id) +{ + int ret; + struct psp_gfx_cmd_resp *cmd; + + /* + * TODO: bypass the loading in sriov for now + */ + if (amdgpu_sriov_vf(psp->adev)) + return 0; + + cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + psp_prep_hdcp_ta_invoke_cmd_buf(cmd, ta_cmd_id, + psp->hdcp_context.session_id); + + ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); + + kfree(cmd); + + return ret; +} + +static int psp_hdcp_terminate(struct psp_context *psp) +{ + int ret; + + if (!psp->hdcp_context.hdcp_initialized) + return 0; + + ret = psp_hdcp_unload(psp); + if (ret) + return ret; + + psp->hdcp_context.hdcp_initialized = 0; + + /* free hdcp shared memory */ + amdgpu_bo_free_kernel(&psp->hdcp_context.hdcp_shared_bo, + &psp->hdcp_context.hdcp_shared_mc_addr, + &psp->hdcp_context.hdcp_shared_buf); + + return 0; +} +// HDCP end + +// DTM start +static void psp_prep_dtm_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, + uint64_t dtm_ta_mc, + uint64_t dtm_mc_shared, + uint32_t dtm_ta_size, + uint32_t shared_size) +{ + cmd->cmd_id = GFX_CMD_ID_LOAD_TA; + cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(dtm_ta_mc); + cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(dtm_ta_mc); + cmd->cmd.cmd_load_ta.app_len = dtm_ta_size; + + cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(dtm_mc_shared); + cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(dtm_mc_shared); + cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size; +} + +static int psp_dtm_init_shared_buf(struct psp_context *psp) +{ + int ret; + + /* + * Allocate 16k memory aligned to 4k from Frame Buffer (local + * physical) for dtm ta <-> Driver + */ + ret = amdgpu_bo_create_kernel(psp->adev, PSP_DTM_SHARED_MEM_SIZE, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, + &psp->dtm_context.dtm_shared_bo, + &psp->dtm_context.dtm_shared_mc_addr, + &psp->dtm_context.dtm_shared_buf); + + return ret; +} + +static int psp_dtm_load(struct psp_context *psp) +{ + int ret; + struct psp_gfx_cmd_resp *cmd; + + /* + * TODO: bypass the loading in sriov for now + */ + if (amdgpu_sriov_vf(psp->adev)) + return 0; + + cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + memset(psp->fw_pri_buf, 0, PSP_1_MEG); + memcpy(psp->fw_pri_buf, psp->ta_dtm_start_addr, psp->ta_dtm_ucode_size); + + psp_prep_dtm_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, + psp->dtm_context.dtm_shared_mc_addr, + psp->ta_dtm_ucode_size, + PSP_DTM_SHARED_MEM_SIZE); + + ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); + + if (!ret) { + psp->dtm_context.dtm_initialized = 1; + psp->dtm_context.session_id = cmd->resp.session_id; + } + + kfree(cmd); + + return ret; +} + +static int psp_dtm_initialize(struct psp_context *psp) +{ + int ret; + + if (!psp->dtm_context.dtm_initialized) { + ret = psp_dtm_init_shared_buf(psp); + if (ret) + return ret; + } + + ret = psp_dtm_load(psp); + if (ret) + return ret; + + return 0; +} + +static void psp_prep_dtm_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, + uint32_t ta_cmd_id, + uint32_t dtm_session_id) +{ + cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD; + cmd->cmd.cmd_invoke_cmd.session_id = dtm_session_id; + cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id; + /* Note: cmd_invoke_cmd.buf is not used for now */ +} + +int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id) +{ + int ret; + struct psp_gfx_cmd_resp *cmd; + + /* + * TODO: bypass the loading in sriov for now + */ + if (amdgpu_sriov_vf(psp->adev)) + return 0; + + cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + psp_prep_dtm_ta_invoke_cmd_buf(cmd, ta_cmd_id, + psp->dtm_context.session_id); + + ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); + + kfree(cmd); + + return ret; +} + +static int psp_dtm_terminate(struct psp_context *psp) +{ + int ret; + + if (!psp->dtm_context.dtm_initialized) + return 0; + + ret = psp_hdcp_unload(psp); + if (ret) + return ret; + + psp->dtm_context.dtm_initialized = 0; + + /* free hdcp shared memory */ + amdgpu_bo_free_kernel(&psp->dtm_context.dtm_shared_bo, + &psp->dtm_context.dtm_shared_mc_addr, + &psp->dtm_context.dtm_shared_buf); + + return 0; +} +// DTM end + static int psp_hw_start(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; @@ -845,6 +1182,16 @@ static int psp_hw_start(struct psp_context *psp) if (ret) dev_err(psp->adev->dev, "RAS: Failed to initialize RAS\n"); + + ret = psp_hdcp_initialize(psp); + if (ret) + dev_err(psp->adev->dev, + "HDCP: Failed to initialize HDCP\n"); + + ret = psp_dtm_initialize(psp); + if (ret) + dev_err(psp->adev->dev, + "DTM: Failed to initialize DTM\n"); } return 0; @@ -950,21 +1297,7 @@ static void psp_print_fw_hdr(struct psp_context *psp, struct amdgpu_firmware_info *ucode) { struct amdgpu_device *adev = psp->adev; - const struct sdma_firmware_header_v1_0 *sdma_hdr = - (const struct sdma_firmware_header_v1_0 *) - adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data; - const struct gfx_firmware_header_v1_0 *ce_hdr = - (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data; - const struct gfx_firmware_header_v1_0 *pfp_hdr = - (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data; - const struct gfx_firmware_header_v1_0 *me_hdr = - (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; - const struct gfx_firmware_header_v1_0 *mec_hdr = - (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; - const struct rlc_firmware_header_v2_0 *rlc_hdr = - (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; - const struct smc_firmware_header_v1_0 *smc_hdr = - (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data; + struct common_firmware_header *hdr; switch (ucode->ucode_id) { case AMDGPU_UCODE_ID_SDMA0: @@ -975,25 +1308,33 @@ static void psp_print_fw_hdr(struct psp_context *psp, case AMDGPU_UCODE_ID_SDMA5: case AMDGPU_UCODE_ID_SDMA6: case AMDGPU_UCODE_ID_SDMA7: - amdgpu_ucode_print_sdma_hdr(&sdma_hdr->header); + hdr = (struct common_firmware_header *) + adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data; + amdgpu_ucode_print_sdma_hdr(hdr); break; case AMDGPU_UCODE_ID_CP_CE: - amdgpu_ucode_print_gfx_hdr(&ce_hdr->header); + hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data; + amdgpu_ucode_print_gfx_hdr(hdr); break; case AMDGPU_UCODE_ID_CP_PFP: - amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header); + hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data; + amdgpu_ucode_print_gfx_hdr(hdr); break; case AMDGPU_UCODE_ID_CP_ME: - amdgpu_ucode_print_gfx_hdr(&me_hdr->header); + hdr = (struct common_firmware_header *)adev->gfx.me_fw->data; + amdgpu_ucode_print_gfx_hdr(hdr); break; case AMDGPU_UCODE_ID_CP_MEC1: - amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); + hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data; + amdgpu_ucode_print_gfx_hdr(hdr); break; case AMDGPU_UCODE_ID_RLC_G: - amdgpu_ucode_print_rlc_hdr(&rlc_hdr->header); + hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data; + amdgpu_ucode_print_rlc_hdr(hdr); break; case AMDGPU_UCODE_ID_SMC: - amdgpu_ucode_print_smc_hdr(&smc_hdr->header); + hdr = (struct common_firmware_header *)adev->pm.fw->data; + amdgpu_ucode_print_smc_hdr(hdr); break; default: break; @@ -1079,10 +1420,6 @@ out: ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT)) /* skip mec JT when autoload is enabled */ continue; - /* Renoir only needs to load mec jump table one time */ - if (adev->asic_type == CHIP_RENOIR && - ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT) - continue; psp_print_fw_hdr(psp, ucode); @@ -1091,7 +1428,8 @@ out: return ret; /* Start rlc autoload after psp recieved all the gfx firmware */ - if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM) { + if (psp->autoload_supported && ucode->ucode_id == + AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM) { ret = psp_rlc_autoload(psp); if (ret) { DRM_ERROR("Failed to start rlc autoload\n"); @@ -1216,8 +1554,11 @@ static int psp_hw_fini(void *handle) psp->xgmi_context.initialized == 1) psp_xgmi_terminate(psp); - if (psp->adev->psp.ta_fw) + if (psp->adev->psp.ta_fw) { psp_ras_terminate(psp); + psp_dtm_terminate(psp); + psp_hdcp_terminate(psp); + } psp_ring_destroy(psp, PSP_RING_TYPE__KM); @@ -1259,6 +1600,16 @@ static int psp_suspend(void *handle) DRM_ERROR("Failed to terminate ras ta\n"); return ret; } + ret = psp_hdcp_terminate(psp); + if (ret) { + DRM_ERROR("Failed to terminate hdcp ta\n"); + return ret; + } + ret = psp_dtm_terminate(psp); + if (ret) { + DRM_ERROR("Failed to terminate dtm ta\n"); + return ret; + } } ret = psp_ring_stop(psp, PSP_RING_TYPE__KM); @@ -1278,6 +1629,12 @@ static int psp_resume(void *handle) DRM_INFO("PSP is resuming...\n"); + ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME); + if (ret) { + DRM_ERROR("Failed to process memory training!\n"); + return ret; + } + mutex_lock(&adev->firmware.mutex); ret = psp_hw_start(psp); @@ -1317,9 +1674,6 @@ int psp_rlc_autoload_start(struct psp_context *psp) int ret; struct psp_gfx_cmd_resp *cmd; - if (amdgpu_sriov_vf(psp->adev)) - return 0; - cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); if (!cmd) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index bc0947f6bc8a..09c5474ebcc3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -37,6 +37,9 @@ #define PSP_RAS_SHARED_MEM_SIZE 0x4000 #define PSP_1_MEG 0x100000 #define PSP_TMR_SIZE 0x400000 +#define PSP_HDCP_SHARED_MEM_SIZE 0x4000 +#define PSP_DTM_SHARED_MEM_SIZE 0x4000 +#define PSP_SHARED_MEM_SIZE 0x4000 struct psp_context; struct psp_xgmi_node_info; @@ -46,6 +49,8 @@ enum psp_bootloader_cmd { PSP_BL__LOAD_SYSDRV = 0x10000, PSP_BL__LOAD_SOSDRV = 0x20000, PSP_BL__LOAD_KEY_DATABASE = 0x80000, + PSP_BL__DRAM_LONG_TRAIN = 0x100000, + PSP_BL__DRAM_SHORT_TRAIN = 0x200000, }; enum psp_ring_type @@ -108,6 +113,9 @@ struct psp_funcs struct ta_ras_trigger_error_input *info); int (*ras_cure_posion)(struct psp_context *psp, uint64_t *mode_ptr); int (*rlc_autoload_start)(struct psp_context *psp); + int (*mem_training_init)(struct psp_context *psp); + void (*mem_training_fini)(struct psp_context *psp); + int (*mem_training)(struct psp_context *psp, uint32_t ops); }; #define AMDGPU_XGMI_MAX_CONNECTED_NODES 64 @@ -142,6 +150,65 @@ struct psp_ras_context { struct amdgpu_ras *ras; }; +struct psp_hdcp_context { + bool hdcp_initialized; + uint32_t session_id; + struct amdgpu_bo *hdcp_shared_bo; + uint64_t hdcp_shared_mc_addr; + void *hdcp_shared_buf; +}; + +struct psp_dtm_context { + bool dtm_initialized; + uint32_t session_id; + struct amdgpu_bo *dtm_shared_bo; + uint64_t dtm_shared_mc_addr; + void *dtm_shared_buf; +}; + +#define MEM_TRAIN_SYSTEM_SIGNATURE 0x54534942 +#define GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES 0x1000 +#define GDDR6_MEM_TRAINING_OFFSET 0x8000 + +enum psp_memory_training_init_flag { + PSP_MEM_TRAIN_NOT_SUPPORT = 0x0, + PSP_MEM_TRAIN_SUPPORT = 0x1, + PSP_MEM_TRAIN_INIT_FAILED = 0x2, + PSP_MEM_TRAIN_RESERVE_SUCCESS = 0x4, + PSP_MEM_TRAIN_INIT_SUCCESS = 0x8, +}; + +enum psp_memory_training_ops { + PSP_MEM_TRAIN_SEND_LONG_MSG = 0x1, + PSP_MEM_TRAIN_SAVE = 0x2, + PSP_MEM_TRAIN_RESTORE = 0x4, + PSP_MEM_TRAIN_SEND_SHORT_MSG = 0x8, + PSP_MEM_TRAIN_COLD_BOOT = PSP_MEM_TRAIN_SEND_LONG_MSG, + PSP_MEM_TRAIN_RESUME = PSP_MEM_TRAIN_SEND_SHORT_MSG, +}; + +struct psp_memory_training_context { + /*training data size*/ + u64 train_data_size; + /* + * sys_cache + * cpu virtual address + * system memory buffer that used to store the training data. + */ + void *sys_cache; + + /*vram offset of the p2c training data*/ + u64 p2c_train_data_offset; + struct amdgpu_bo *p2c_bo; + + /*vram offset of the c2p training data*/ + u64 c2p_train_data_offset; + struct amdgpu_bo *c2p_bo; + + enum psp_memory_training_init_flag init; + u32 training_cnt; +}; + struct psp_context { struct amdgpu_device *adev; @@ -206,9 +273,21 @@ struct psp_context uint32_t ta_ras_ucode_version; uint32_t ta_ras_ucode_size; uint8_t *ta_ras_start_addr; + + uint32_t ta_hdcp_ucode_version; + uint32_t ta_hdcp_ucode_size; + uint8_t *ta_hdcp_start_addr; + + uint32_t ta_dtm_ucode_version; + uint32_t ta_dtm_ucode_size; + uint8_t *ta_dtm_start_addr; + struct psp_xgmi_context xgmi_context; struct psp_ras_context ras; + struct psp_hdcp_context hdcp_context; + struct psp_dtm_context dtm_context; struct mutex mutex; + struct psp_memory_training_context mem_train_ctx; }; struct amdgpu_psp_funcs { @@ -251,6 +330,12 @@ struct amdgpu_psp_funcs { (psp)->funcs->xgmi_set_topology_info((psp), (num_device), (topology)) : -EINVAL) #define psp_rlc_autoload(psp) \ ((psp)->funcs->rlc_autoload_start ? (psp)->funcs->rlc_autoload_start((psp)) : 0) +#define psp_mem_training_init(psp) \ + ((psp)->funcs->mem_training_init ? (psp)->funcs->mem_training_init((psp)) : 0) +#define psp_mem_training_fini(psp) \ + ((psp)->funcs->mem_training_fini ? (psp)->funcs->mem_training_fini((psp)) : 0) +#define psp_mem_training(psp, ops) \ + ((psp)->funcs->mem_training ? (psp)->funcs->mem_training((psp), (ops)) : 0) #define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i)) @@ -279,6 +364,8 @@ int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id); int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id); int psp_ras_enable_features(struct psp_context *psp, union ta_ras_cmd_input *info, bool enable); +int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id); +int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id); int psp_rlc_autoload_start(struct psp_context *psp); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 016ea274b955..404483437bd3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -25,10 +25,13 @@ #include <linux/list.h> #include <linux/module.h> #include <linux/uaccess.h> +#include <linux/reboot.h> +#include <linux/syscalls.h> #include "amdgpu.h" #include "amdgpu_ras.h" #include "amdgpu_atomfirmware.h" +#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h" const char *ras_error_string[] = { "none", @@ -65,11 +68,16 @@ const char *ras_block_string[] = { /* inject address is 52 bits */ #define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52) -static int amdgpu_ras_reserve_vram(struct amdgpu_device *adev, - uint64_t offset, uint64_t size, - struct amdgpu_bo **bo_ptr); -static int amdgpu_ras_release_vram(struct amdgpu_device *adev, - struct amdgpu_bo **bo_ptr); +enum amdgpu_ras_retire_page_reservation { + AMDGPU_RAS_RETIRE_PAGE_RESERVED, + AMDGPU_RAS_RETIRE_PAGE_PENDING, + AMDGPU_RAS_RETIRE_PAGE_FAULT, +}; + +atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0); + +static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev, + uint64_t addr); static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf, size_t size, loff_t *pos) @@ -189,6 +197,10 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f, return 0; } + +static struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev, + struct ras_common_if *head); + /** * DOC: AMDGPU RAS debugfs control interface * @@ -208,31 +220,44 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f, * As their names indicate, inject operation will write the * value to the address. * - * Second member: struct ras_debug_if::op. + * The second member: struct ras_debug_if::op. * It has three kinds of operations. - * 0: disable RAS on the block. Take ::head as its data. - * 1: enable RAS on the block. Take ::head as its data. - * 2: inject errors on the block. Take ::inject as its data. + * + * - 0: disable RAS on the block. Take ::head as its data. + * - 1: enable RAS on the block. Take ::head as its data. + * - 2: inject errors on the block. Take ::inject as its data. * * How to use the interface? - * programs: - * copy the struct ras_debug_if in your codes and initialize it. - * write the struct to the control node. * - * bash: - * echo op block [error [sub_blcok address value]] > .../ras/ras_ctrl - * op: disable, enable, inject - * disable: only block is needed - * enable: block and error are needed - * inject: error, address, value are needed - * block: umc, smda, gfx, ......... - * see ras_block_string[] for details - * error: ue, ce - * ue: multi_uncorrectable - * ce: single_correctable - * sub_block: sub block index, pass 0 if there is no sub block + * Programs + * + * Copy the struct ras_debug_if in your codes and initialize it. + * Write the struct to the control node. + * + * Shells + * + * .. code-block:: bash + * + * echo op block [error [sub_block address value]] > .../ras/ras_ctrl + * + * Parameters: + * + * op: disable, enable, inject + * disable: only block is needed + * enable: block and error are needed + * inject: error, address, value are needed + * block: umc, sdma, gfx, ......... + * see ras_block_string[] for details + * error: ue, ce + * ue: multi_uncorrectable + * ce: single_correctable + * sub_block: + * sub block index, pass 0 if there is no sub block + * + * here are some examples for bash commands: + * + * .. code-block:: bash * - * here are some examples for bash commands, * echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl * echo inject umc ce 0 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl * echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl @@ -245,8 +270,11 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f, * For inject, please check corresponding err count at * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count * - * NOTE: operation is only allowed on blocks which are supported. - * Please check ras mask at /sys/module/amdgpu/parameters/ras_mask + * .. note:: + * Operations are only allowed on blocks which are supported. + * Please check ras mask at /sys/module/amdgpu/parameters/ras_mask + * to see which blocks support RAS on a particular asic. + * */ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *buf, size_t size, loff_t *pos) @@ -276,6 +304,14 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user * break; } + /* umc ce/ue error injection for a bad page is not allowed */ + if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) && + amdgpu_ras_check_bad_page(adev, data.inject.address)) { + DRM_WARN("RAS WARN: 0x%llx has been marked as bad before error injection!\n", + data.inject.address); + break; + } + /* data.inject.address is offset instead of absolute gpu address */ ret = amdgpu_ras_error_inject(adev, &data.inject); break; @@ -290,6 +326,33 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user * return size; } +/** + * DOC: AMDGPU RAS debugfs EEPROM table reset interface + * + * Some boards contain an EEPROM which is used to persistently store a list of + * bad pages which experiences ECC errors in vram. This interface provides + * a way to reset the EEPROM, e.g., after testing error injection. + * + * Usage: + * + * .. code-block:: bash + * + * echo 1 > ../ras/ras_eeprom_reset + * + * will reset EEPROM table to 0 entries. + * + */ +static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private; + int ret; + + ret = amdgpu_ras_eeprom_reset_table(&adev->psp.ras.ras->eeprom_control); + + return ret == 1 ? size : -EIO; +} + static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = { .owner = THIS_MODULE, .read = NULL, @@ -297,6 +360,34 @@ static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = { .llseek = default_llseek }; +static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = { + .owner = THIS_MODULE, + .read = NULL, + .write = amdgpu_ras_debugfs_eeprom_write, + .llseek = default_llseek +}; + +/** + * DOC: AMDGPU RAS sysfs Error Count Interface + * + * It allows the user to read the error count for each IP block on the gpu through + * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count + * + * It outputs the multiple lines which report the uncorrected (ue) and corrected + * (ce) error counts. + * + * The format of one line is below, + * + * [ce|ue]: count + * + * Example: + * + * .. code-block:: bash + * + * ue: 0 + * ce: 1 + * + */ static ssize_t amdgpu_ras_sysfs_read(struct device *dev, struct device_attribute *attr, char *buf) { @@ -475,15 +566,17 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev, if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head))) return 0; - ret = psp_ras_enable_features(&adev->psp, &info, enable); - if (ret) { - DRM_ERROR("RAS ERROR: %s %s feature failed ret %d\n", - enable ? "enable":"disable", - ras_block_str(head->block), - ret); - if (ret == TA_RAS_STATUS__RESET_NEEDED) - return -EAGAIN; - return -EINVAL; + if (!amdgpu_ras_intr_triggered()) { + ret = psp_ras_enable_features(&adev->psp, &info, enable); + if (ret) { + DRM_ERROR("RAS ERROR: %s %s feature failed ret %d\n", + enable ? "enable":"disable", + ras_block_str(head->block), + ret); + if (ret == TA_RAS_STATUS__RESET_NEEDED) + return -EAGAIN; + return -EINVAL; + } } /* setup the obj */ @@ -615,8 +708,12 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev, adev->gfx.funcs->query_ras_error_count(adev, &err_data); break; case AMDGPU_RAS_BLOCK__MMHUB: - if (adev->mmhub_funcs->query_ras_error_count) - adev->mmhub_funcs->query_ras_error_count(adev, &err_data); + if (adev->mmhub.funcs->query_ras_error_count) + adev->mmhub.funcs->query_ras_error_count(adev, &err_data); + break; + case AMDGPU_RAS_BLOCK__PCIE_BIF: + if (adev->nbio.funcs->query_ras_error_count) + adev->nbio.funcs->query_ras_error_count(adev, &err_data); break; default: break; @@ -628,12 +725,14 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev, info->ue_count = obj->err_data.ue_count; info->ce_count = obj->err_data.ce_count; - if (err_data.ce_count) + if (err_data.ce_count) { dev_info(adev->dev, "%ld correctable errors detected in %s block\n", obj->err_data.ce_count, ras_block_str(info->head.block)); - if (err_data.ue_count) + } + if (err_data.ue_count) { dev_info(adev->dev, "%ld uncorrectable errors detected in %s block\n", obj->err_data.ue_count, ras_block_str(info->head.block)); + } return 0; } @@ -664,6 +763,8 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev, break; case AMDGPU_RAS_BLOCK__UMC: case AMDGPU_RAS_BLOCK__MMHUB: + case AMDGPU_RAS_BLOCK__XGMI_WAFL: + case AMDGPU_RAS_BLOCK__PCIE_BIF: ret = psp_ras_trigger_error(&adev->psp, &block_info); break; default: @@ -723,18 +824,18 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev, static char *amdgpu_ras_badpage_flags_str(unsigned int flags) { switch (flags) { - case 0: + case AMDGPU_RAS_RETIRE_PAGE_RESERVED: return "R"; - case 1: + case AMDGPU_RAS_RETIRE_PAGE_PENDING: return "P"; - case 2: + case AMDGPU_RAS_RETIRE_PAGE_FAULT: default: return "F"; }; } -/* - * DOC: ras sysfs gpu_vram_bad_pages interface +/** + * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface * * It allows user to read the bad pages of vram on the gpu through * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages @@ -746,14 +847,21 @@ static char *amdgpu_ras_badpage_flags_str(unsigned int flags) * * gpu pfn and gpu page size are printed in hex format. * flags can be one of below character, + * * R: reserved, this gpu page is reserved and not able to use. + * * P: pending for reserve, this gpu page is marked as bad, will be reserved - * in next window of page_reserve. + * in next window of page_reserve. + * * F: unable to reserve. this gpu page can't be reserved due to some reasons. * - * examples: - * 0x00000001 : 0x00001000 : R - * 0x00000002 : 0x00001000 : P + * Examples: + * + * .. code-block:: bash + * + * 0x00000001 : 0x00001000 : R + * 0x00000002 : 0x00001000 : P + * */ static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f, @@ -927,6 +1035,24 @@ static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev) } /* sysfs end */ +/** + * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors + * + * Normally when there is an uncorrectable error, the driver will reset + * the GPU to recover. However, in the event of an unrecoverable error, + * the driver provides an interface to reboot the system automatically + * in that event. + * + * The following file in debugfs provides that interface: + * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot + * + * Usage: + * + * .. code-block:: bash + * + * echo true > .../ras/auto_reboot + * + */ /* debugfs begin */ static void amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev) { @@ -934,8 +1060,21 @@ static void amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev) struct drm_minor *minor = adev->ddev->primary; con->dir = debugfs_create_dir("ras", minor->debugfs_root); - con->ent = debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, con->dir, - adev, &amdgpu_ras_debugfs_ctrl_ops); + debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, con->dir, + adev, &amdgpu_ras_debugfs_ctrl_ops); + debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, con->dir, + adev, &amdgpu_ras_debugfs_eeprom_ops); + + /* + * After one uncorrectable error happens, usually GPU recovery will + * be scheduled. But due to the known problem in GPU recovery failing + * to bring GPU back, below interface provides one direct way to + * user to reboot system automatically in such case within + * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine + * will never be called. + */ + debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, con->dir, + &con->reboot); } void amdgpu_ras_debugfs_create(struct amdgpu_device *adev, @@ -980,10 +1119,8 @@ static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev) amdgpu_ras_debugfs_remove(adev, &obj->head); } - debugfs_remove(con->ent); - debugfs_remove(con->dir); + debugfs_remove_recursive(con->dir); con->dir = NULL; - con->ent = NULL; } /* debugfs end */ @@ -1188,15 +1325,15 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev, for (; i < data->count; i++) { (*bps)[i] = (struct ras_badpage){ - .bp = data->bps[i].bp, + .bp = data->bps[i].retired_page, .size = AMDGPU_GPU_PAGE_SIZE, - .flags = 0, + .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED, }; if (data->last_reserved <= i) - (*bps)[i].flags = 1; - else if (data->bps[i].bo == NULL) - (*bps)[i].flags = 2; + (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING; + else if (data->bps_bo[i] == NULL) + (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT; } *count = data->count; @@ -1214,105 +1351,46 @@ static void amdgpu_ras_do_recovery(struct work_struct *work) atomic_set(&ras->in_recovery, 0); } -static int amdgpu_ras_release_vram(struct amdgpu_device *adev, - struct amdgpu_bo **bo_ptr) -{ - /* no need to free it actually. */ - amdgpu_bo_free_kernel(bo_ptr, NULL, NULL); - return 0; -} - -/* reserve vram with size@offset */ -static int amdgpu_ras_reserve_vram(struct amdgpu_device *adev, - uint64_t offset, uint64_t size, - struct amdgpu_bo **bo_ptr) -{ - struct ttm_operation_ctx ctx = { false, false }; - struct amdgpu_bo_param bp; - int r = 0; - int i; - struct amdgpu_bo *bo; - - if (bo_ptr) - *bo_ptr = NULL; - memset(&bp, 0, sizeof(bp)); - bp.size = size; - bp.byte_align = PAGE_SIZE; - bp.domain = AMDGPU_GEM_DOMAIN_VRAM; - bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | - AMDGPU_GEM_CREATE_NO_CPU_ACCESS; - bp.type = ttm_bo_type_kernel; - bp.resv = NULL; - - r = amdgpu_bo_create(adev, &bp, &bo); - if (r) - return -EINVAL; - - r = amdgpu_bo_reserve(bo, false); - if (r) - goto error_reserve; - - offset = ALIGN(offset, PAGE_SIZE); - for (i = 0; i < bo->placement.num_placement; ++i) { - bo->placements[i].fpfn = offset >> PAGE_SHIFT; - bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT; - } - - ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem); - r = ttm_bo_mem_space(&bo->tbo, &bo->placement, &bo->tbo.mem, &ctx); - if (r) - goto error_pin; - - r = amdgpu_bo_pin_restricted(bo, - AMDGPU_GEM_DOMAIN_VRAM, - offset, - offset + size); - if (r) - goto error_pin; - - if (bo_ptr) - *bo_ptr = bo; - - amdgpu_bo_unreserve(bo); - return r; - -error_pin: - amdgpu_bo_unreserve(bo); -error_reserve: - amdgpu_bo_unref(&bo); - return r; -} - /* alloc/realloc bps array */ static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev, struct ras_err_handler_data *data, int pages) { unsigned int old_space = data->count + data->space_left; unsigned int new_space = old_space + pages; - unsigned int align_space = ALIGN(new_space, 1024); - void *tmp = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL); - - if (!tmp) + unsigned int align_space = ALIGN(new_space, 512); + void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL); + struct amdgpu_bo **bps_bo = + kmalloc(align_space * sizeof(*data->bps_bo), GFP_KERNEL); + + if (!bps || !bps_bo) { + kfree(bps); + kfree(bps_bo); return -ENOMEM; + } if (data->bps) { - memcpy(tmp, data->bps, + memcpy(bps, data->bps, data->count * sizeof(*data->bps)); kfree(data->bps); } + if (data->bps_bo) { + memcpy(bps_bo, data->bps_bo, + data->count * sizeof(*data->bps_bo)); + kfree(data->bps_bo); + } - data->bps = tmp; + data->bps = bps; + data->bps_bo = bps_bo; data->space_left += align_space - old_space; return 0; } /* it deal with vram only. */ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev, - unsigned long *bps, int pages) + struct eeprom_table_record *bps, int pages) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_err_handler_data *data; - int i = pages; int ret = 0; if (!con || !con->eh_data || !bps || pages <= 0) @@ -1329,24 +1407,120 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev, goto out; } - while (i--) - data->bps[data->count++].bp = bps[i]; - + memcpy(&data->bps[data->count], bps, pages * sizeof(*data->bps)); + data->count += pages; data->space_left -= pages; + out: mutex_unlock(&con->recovery_lock); return ret; } +/* + * write error record array to eeprom, the function should be + * protected by recovery_lock + */ +static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + struct ras_err_handler_data *data; + struct amdgpu_ras_eeprom_control *control; + int save_count; + + if (!con || !con->eh_data) + return 0; + + control = &con->eeprom_control; + data = con->eh_data; + save_count = data->count - control->num_recs; + /* only new entries are saved */ + if (save_count > 0) + if (amdgpu_ras_eeprom_process_recods(control, + &data->bps[control->num_recs], + true, + save_count)) { + DRM_ERROR("Failed to save EEPROM table data!"); + return -EIO; + } + + return 0; +} + +/* + * read error record array in eeprom and reserve enough space for + * storing new bad pages + */ +static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev) +{ + struct amdgpu_ras_eeprom_control *control = + &adev->psp.ras.ras->eeprom_control; + struct eeprom_table_record *bps = NULL; + int ret = 0; + + /* no bad page record, skip eeprom access */ + if (!control->num_recs) + return ret; + + bps = kcalloc(control->num_recs, sizeof(*bps), GFP_KERNEL); + if (!bps) + return -ENOMEM; + + if (amdgpu_ras_eeprom_process_recods(control, bps, false, + control->num_recs)) { + DRM_ERROR("Failed to load EEPROM table records!"); + ret = -EIO; + goto out; + } + + ret = amdgpu_ras_add_bad_pages(adev, bps, control->num_recs); + +out: + kfree(bps); + return ret; +} + +/* + * check if an address belongs to bad page + * + * Note: this check is only for umc block + */ +static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev, + uint64_t addr) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + struct ras_err_handler_data *data; + int i; + bool ret = false; + + if (!con || !con->eh_data) + return ret; + + mutex_lock(&con->recovery_lock); + data = con->eh_data; + if (!data) + goto out; + + addr >>= AMDGPU_GPU_PAGE_SHIFT; + for (i = 0; i < data->count; i++) + if (addr == data->bps[i].retired_page) { + ret = true; + goto out; + } + +out: + mutex_unlock(&con->recovery_lock); + return ret; +} + /* called in gpu recovery/init */ int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_err_handler_data *data; uint64_t bp; - struct amdgpu_bo *bo; - int i; + struct amdgpu_bo *bo = NULL; + int i, ret = 0; if (!con || !con->eh_data) return 0; @@ -1357,18 +1531,29 @@ int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev) goto out; /* reserve vram at driver post stage. */ for (i = data->last_reserved; i < data->count; i++) { - bp = data->bps[i].bp; + bp = data->bps[i].retired_page; - if (amdgpu_ras_reserve_vram(adev, bp << PAGE_SHIFT, - PAGE_SIZE, &bo)) - DRM_ERROR("RAS ERROR: reserve vram %llx fail\n", bp); + /* There are two cases of reserve error should be ignored: + * 1) a ras bad page has been allocated (used by someone); + * 2) a ras bad page has been reserved (duplicate error injection + * for one page); + */ + if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT, + AMDGPU_GPU_PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &bo, NULL)) + DRM_WARN("RAS WARN: reserve vram for retired page %llx fail\n", bp); - data->bps[i].bo = bo; + data->bps_bo[i] = bo; data->last_reserved = i + 1; + bo = NULL; } + + /* continue to save bad pages to eeprom even reesrve_vram fails */ + ret = amdgpu_ras_save_bad_pages(adev); out: mutex_unlock(&con->recovery_lock); - return 0; + return ret; } /* called when driver unload */ @@ -1388,11 +1573,11 @@ static int amdgpu_ras_release_bad_pages(struct amdgpu_device *adev) goto out; for (i = data->last_reserved - 1; i >= 0; i--) { - bo = data->bps[i].bo; + bo = data->bps_bo[i]; - amdgpu_ras_release_vram(adev, &bo); + amdgpu_bo_free_kernel(&bo, NULL, NULL); - data->bps[i].bo = bo; + data->bps_bo[i] = bo; data->last_reserved = i; } out: @@ -1400,41 +1585,54 @@ out: return 0; } -static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev) -{ - /* TODO - * write the array to eeprom when SMU disabled. - */ - return 0; -} - -static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev) -{ - /* TODO - * read the array to eeprom when SMU disabled. - */ - return 0; -} - -static int amdgpu_ras_recovery_init(struct amdgpu_device *adev) +int amdgpu_ras_recovery_init(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); - struct ras_err_handler_data **data = &con->eh_data; + struct ras_err_handler_data **data; + int ret; - *data = kmalloc(sizeof(**data), - GFP_KERNEL|__GFP_ZERO); - if (!*data) - return -ENOMEM; + if (con) + data = &con->eh_data; + else + return 0; + + *data = kmalloc(sizeof(**data), GFP_KERNEL | __GFP_ZERO); + if (!*data) { + ret = -ENOMEM; + goto out; + } mutex_init(&con->recovery_lock); INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery); atomic_set(&con->in_recovery, 0); con->adev = adev; - amdgpu_ras_load_bad_pages(adev); - amdgpu_ras_reserve_bad_pages(adev); + ret = amdgpu_ras_eeprom_init(&con->eeprom_control); + if (ret) + goto free; + + if (con->eeprom_control.num_recs) { + ret = amdgpu_ras_load_bad_pages(adev); + if (ret) + goto free; + ret = amdgpu_ras_reserve_bad_pages(adev); + if (ret) + goto release; + } return 0; + +release: + amdgpu_ras_release_bad_pages(adev); +free: + kfree((*data)->bps); + kfree((*data)->bps_bo); + kfree(*data); + con->eh_data = NULL; +out: + DRM_WARN("Failed to initialize ras recovery!\n"); + + return ret; } static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev) @@ -1442,13 +1640,17 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev) struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_err_handler_data *data = con->eh_data; + /* recovery_init failed to init it, fini is useless */ + if (!data) + return 0; + cancel_work_sync(&con->recovery_work); - amdgpu_ras_save_bad_pages(adev); amdgpu_ras_release_bad_pages(adev); mutex_lock(&con->recovery_lock); con->eh_data = NULL; kfree(data->bps); + kfree(data->bps_bo); kfree(data); mutex_unlock(&con->recovery_lock); @@ -1500,6 +1702,7 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev, int amdgpu_ras_init(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + int r; if (con) return 0; @@ -1527,31 +1730,106 @@ int amdgpu_ras_init(struct amdgpu_device *adev) /* Might need get this flag from vbios. */ con->flags = RAS_DEFAULT_FLAGS; - if (amdgpu_ras_recovery_init(adev)) - goto recovery_out; + if (adev->nbio.funcs->init_ras_controller_interrupt) { + r = adev->nbio.funcs->init_ras_controller_interrupt(adev); + if (r) + return r; + } + + if (adev->nbio.funcs->init_ras_err_event_athub_interrupt) { + r = adev->nbio.funcs->init_ras_err_event_athub_interrupt(adev); + if (r) + return r; + } amdgpu_ras_mask &= AMDGPU_RAS_BLOCK_MASK; if (amdgpu_ras_fs_init(adev)) goto fs_out; - /* ras init for each ras block */ - if (adev->umc.funcs->ras_init) - adev->umc.funcs->ras_init(adev); - DRM_INFO("RAS INFO: ras initialized successfully, " "hardware ability[%x] ras_mask[%x]\n", con->hw_supported, con->supported); return 0; fs_out: - amdgpu_ras_recovery_fini(adev); -recovery_out: amdgpu_ras_set_context(adev, NULL); kfree(con); return -EINVAL; } +/* helper function to handle common stuff in ip late init phase */ +int amdgpu_ras_late_init(struct amdgpu_device *adev, + struct ras_common_if *ras_block, + struct ras_fs_if *fs_info, + struct ras_ih_if *ih_info) +{ + int r; + + /* disable RAS feature per IP block if it is not supported */ + if (!amdgpu_ras_is_supported(adev, ras_block->block)) { + amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0); + return 0; + } + + r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1); + if (r) { + if (r == -EAGAIN) { + /* request gpu reset. will run again */ + amdgpu_ras_request_reset_on_boot(adev, + ras_block->block); + return 0; + } else if (adev->in_suspend || adev->in_gpu_reset) { + /* in resume phase, if fail to enable ras, + * clean up all ras fs nodes, and disable ras */ + goto cleanup; + } else + return r; + } + + /* in resume phase, no need to create ras fs node */ + if (adev->in_suspend || adev->in_gpu_reset) + return 0; + + if (ih_info->cb) { + r = amdgpu_ras_interrupt_add_handler(adev, ih_info); + if (r) + goto interrupt; + } + + amdgpu_ras_debugfs_create(adev, fs_info); + + r = amdgpu_ras_sysfs_create(adev, fs_info); + if (r) + goto sysfs; + + return 0; +cleanup: + amdgpu_ras_sysfs_remove(adev, ras_block); +sysfs: + amdgpu_ras_debugfs_remove(adev, ras_block); + if (ih_info->cb) + amdgpu_ras_interrupt_remove_handler(adev, ih_info); +interrupt: + amdgpu_ras_feature_enable(adev, ras_block, 0); + return r; +} + +/* helper function to remove ras fs node and interrupt handler */ +void amdgpu_ras_late_fini(struct amdgpu_device *adev, + struct ras_common_if *ras_block, + struct ras_ih_if *ih_info) +{ + if (!ras_block || !ih_info) + return; + + amdgpu_ras_sysfs_remove(adev, ras_block); + amdgpu_ras_debugfs_remove(adev, ras_block); + if (ih_info->cb) + amdgpu_ras_interrupt_remove_handler(adev, ih_info); + amdgpu_ras_feature_enable(adev, ras_block, 0); +} + /* do some init work after IP late init as dependence. * and it runs in resume/gpu reset/booting up cases. */ @@ -1645,3 +1923,18 @@ int amdgpu_ras_fini(struct amdgpu_device *adev) return 0; } + +void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev) +{ + uint32_t hw_supported, supported; + + amdgpu_ras_check_supported(adev, &hw_supported, &supported); + if (!hw_supported) + return; + + if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) { + DRM_WARN("RAS event of type ERREVENT_ATHUB_INTERRUPT detected!\n"); + + amdgpu_ras_reset_gpu(adev, false); + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index 6c76bb2a6843..f80fd3428c98 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -317,8 +317,6 @@ struct amdgpu_ras { struct list_head head; /* debugfs */ struct dentry *dir; - /* debugfs ctrl */ - struct dentry *ent; /* sysfs */ struct device_attribute features_attr; struct bin_attribute badpages_attr; @@ -334,7 +332,7 @@ struct amdgpu_ras { struct mutex recovery_lock; uint32_t flags; - + bool reboot; struct amdgpu_ras_eeprom_control eeprom_control; }; @@ -347,15 +345,14 @@ struct ras_err_data { unsigned long ue_count; unsigned long ce_count; unsigned long err_addr_cnt; - uint64_t *err_addr; + struct eeprom_table_record *err_addr; }; struct ras_err_handler_data { - /* point to bad pages array */ - struct { - unsigned long bp; - struct amdgpu_bo *bo; - } *bps; + /* point to bad page records array */ + struct eeprom_table_record *bps; + /* point to reserved bo array */ + struct amdgpu_bo **bps_bo; /* the count of entries */ int count; /* the space can place new entries */ @@ -365,7 +362,7 @@ struct ras_err_handler_data { }; typedef int (*ras_ih_cb)(struct amdgpu_device *adev, - struct ras_err_data *err_data, + void *err_data, struct amdgpu_iv_entry *entry); struct ras_ih_data { @@ -481,6 +478,7 @@ static inline int amdgpu_ras_is_supported(struct amdgpu_device *adev, return ras && (ras->supported & (1 << block)); } +int amdgpu_ras_recovery_init(struct amdgpu_device *adev); int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev, unsigned int block); @@ -492,7 +490,7 @@ unsigned long amdgpu_ras_query_error_count(struct amdgpu_device *adev, /* error handling functions */ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev, - unsigned long *bps, int pages); + struct eeprom_table_record *bps, int pages); int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev); @@ -501,6 +499,12 @@ static inline int amdgpu_ras_reset_gpu(struct amdgpu_device *adev, { struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + /* save bad page to eeprom before gpu reset, + * i2c may be unstable in gpu reset + */ + if (in_task()) + amdgpu_ras_reserve_bad_pages(adev); + if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0) schedule_work(&ras->recovery_work); return 0; @@ -566,6 +570,13 @@ amdgpu_ras_error_to_ta(enum amdgpu_ras_error_type error) { int amdgpu_ras_init(struct amdgpu_device *adev); int amdgpu_ras_fini(struct amdgpu_device *adev); int amdgpu_ras_pre_fini(struct amdgpu_device *adev); +int amdgpu_ras_late_init(struct amdgpu_device *adev, + struct ras_common_if *ras_block, + struct ras_fs_if *fs_info, + struct ras_ih_if *ih_info); +void amdgpu_ras_late_fini(struct amdgpu_device *adev, + struct ras_common_if *ras_block, + struct ras_ih_if *ih_info); int amdgpu_ras_feature_enable(struct amdgpu_device *adev, struct ras_common_if *head, bool enable); @@ -599,4 +610,14 @@ int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev, int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev, struct ras_dispatch_if *info); + +extern atomic_t amdgpu_ras_in_intr; + +static inline bool amdgpu_ras_intr_triggered(void) +{ + return !!atomic_read(&amdgpu_ras_in_intr); +} + +void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c index 8a32b5c93778..7de16c0c2f20 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c @@ -100,7 +100,101 @@ static int __update_table_header(struct amdgpu_ras_eeprom_control *control, return ret; } -static uint32_t __calc_hdr_byte_sum(struct amdgpu_ras_eeprom_control *control); + + +static uint32_t __calc_hdr_byte_sum(struct amdgpu_ras_eeprom_control *control) +{ + int i; + uint32_t tbl_sum = 0; + + /* Header checksum, skip checksum field in the calculation */ + for (i = 0; i < sizeof(control->tbl_hdr) - sizeof(control->tbl_hdr.checksum); i++) + tbl_sum += *(((unsigned char *)&control->tbl_hdr) + i); + + return tbl_sum; +} + +static uint32_t __calc_recs_byte_sum(struct eeprom_table_record *records, + int num) +{ + int i, j; + uint32_t tbl_sum = 0; + + /* Records checksum */ + for (i = 0; i < num; i++) { + struct eeprom_table_record *record = &records[i]; + + for (j = 0; j < sizeof(*record); j++) { + tbl_sum += *(((unsigned char *)record) + j); + } + } + + return tbl_sum; +} + +static inline uint32_t __calc_tbl_byte_sum(struct amdgpu_ras_eeprom_control *control, + struct eeprom_table_record *records, int num) +{ + return __calc_hdr_byte_sum(control) + __calc_recs_byte_sum(records, num); +} + +/* Checksum = 256 -((sum of all table entries) mod 256) */ +static void __update_tbl_checksum(struct amdgpu_ras_eeprom_control *control, + struct eeprom_table_record *records, int num, + uint32_t old_hdr_byte_sum) +{ + /* + * This will update the table sum with new records. + * + * TODO: What happens when the EEPROM table is to be wrapped around + * and old records from start will get overridden. + */ + + /* need to recalculate updated header byte sum */ + control->tbl_byte_sum -= old_hdr_byte_sum; + control->tbl_byte_sum += __calc_tbl_byte_sum(control, records, num); + + control->tbl_hdr.checksum = 256 - (control->tbl_byte_sum % 256); +} + +/* table sum mod 256 + checksum must equals 256 */ +static bool __validate_tbl_checksum(struct amdgpu_ras_eeprom_control *control, + struct eeprom_table_record *records, int num) +{ + control->tbl_byte_sum = __calc_tbl_byte_sum(control, records, num); + + if (control->tbl_hdr.checksum + (control->tbl_byte_sum % 256) != 256) { + DRM_WARN("Checksum mismatch, checksum: %u ", control->tbl_hdr.checksum); + return false; + } + + return true; +} + +int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control) +{ + unsigned char buff[EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE] = { 0 }; + struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr; + int ret = 0; + + mutex_lock(&control->tbl_mutex); + + hdr->header = EEPROM_TABLE_HDR_VAL; + hdr->version = EEPROM_TABLE_VER; + hdr->first_rec_offset = EEPROM_RECORD_START; + hdr->tbl_size = EEPROM_TABLE_HEADER_SIZE; + + control->tbl_byte_sum = 0; + __update_tbl_checksum(control, NULL, 0, 0); + control->next_addr = EEPROM_RECORD_START; + + ret = __update_table_header(control, buff); + + mutex_unlock(&control->tbl_mutex); + + return ret; + +} int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control) { @@ -122,6 +216,10 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control) ret = smu_v11_0_i2c_eeprom_control_init(&control->eeprom_accessor); break; + case CHIP_ARCTURUS: + ret = smu_i2c_eeprom_init(&adev->smu, &control->eeprom_accessor); + break; + default: return 0; } @@ -143,25 +241,18 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control) if (hdr->header == EEPROM_TABLE_HDR_VAL) { control->num_recs = (hdr->tbl_size - EEPROM_TABLE_HEADER_SIZE) / EEPROM_TABLE_RECORD_SIZE; + control->tbl_byte_sum = __calc_hdr_byte_sum(control); + control->next_addr = EEPROM_RECORD_START; + DRM_DEBUG_DRIVER("Found existing EEPROM table with %d records", control->num_recs); } else { DRM_INFO("Creating new EEPROM table"); - hdr->header = EEPROM_TABLE_HDR_VAL; - hdr->version = EEPROM_TABLE_VER; - hdr->first_rec_offset = EEPROM_RECORD_START; - hdr->tbl_size = EEPROM_TABLE_HEADER_SIZE; - - adev->psp.ras.ras->eeprom_control.tbl_byte_sum = - __calc_hdr_byte_sum(&adev->psp.ras.ras->eeprom_control); - ret = __update_table_header(control, buff); + ret = amdgpu_ras_eeprom_reset_table(control); } - /* Start inserting records from here */ - adev->psp.ras.ras->eeprom_control.next_addr = EEPROM_RECORD_START; - return ret == 1 ? 0 : -EIO; } @@ -173,6 +264,9 @@ void amdgpu_ras_eeprom_fini(struct amdgpu_ras_eeprom_control *control) case CHIP_VEGA20: smu_v11_0_i2c_eeprom_control_fini(&control->eeprom_accessor); break; + case CHIP_ARCTURUS: + smu_i2c_eeprom_fini(&adev->smu, &control->eeprom_accessor); + break; default: return; @@ -226,8 +320,8 @@ static void __decode_table_record_from_buff(struct amdgpu_ras_eeprom_control *co record->offset = (le64_to_cpu(tmp) & 0xffffffffffff); i += 6; - buff[i++] = record->mem_channel; - buff[i++] = record->mcumc_id; + record->mem_channel = buff[i++]; + record->mcumc_id = buff[i++]; memcpy(&tmp, buff + i, 6); record->retired_page = (le64_to_cpu(tmp) & 0xffffffffffff); @@ -266,87 +360,18 @@ static uint32_t __correct_eeprom_dest_address(uint32_t curr_address) return curr_address; } - -static uint32_t __calc_hdr_byte_sum(struct amdgpu_ras_eeprom_control *control) -{ - int i; - uint32_t tbl_sum = 0; - - /* Header checksum, skip checksum field in the calculation */ - for (i = 0; i < sizeof(control->tbl_hdr) - sizeof(control->tbl_hdr.checksum); i++) - tbl_sum += *(((unsigned char *)&control->tbl_hdr) + i); - - return tbl_sum; -} - -static uint32_t __calc_recs_byte_sum(struct eeprom_table_record *records, - int num) -{ - int i, j; - uint32_t tbl_sum = 0; - - /* Records checksum */ - for (i = 0; i < num; i++) { - struct eeprom_table_record *record = &records[i]; - - for (j = 0; j < sizeof(*record); j++) { - tbl_sum += *(((unsigned char *)record) + j); - } - } - - return tbl_sum; -} - -static inline uint32_t __calc_tbl_byte_sum(struct amdgpu_ras_eeprom_control *control, - struct eeprom_table_record *records, int num) -{ - return __calc_hdr_byte_sum(control) + __calc_recs_byte_sum(records, num); -} - -/* Checksum = 256 -((sum of all table entries) mod 256) */ -static void __update_tbl_checksum(struct amdgpu_ras_eeprom_control *control, - struct eeprom_table_record *records, int num, - uint32_t old_hdr_byte_sum) -{ - /* - * This will update the table sum with new records. - * - * TODO: What happens when the EEPROM table is to be wrapped around - * and old records from start will get overridden. - */ - - /* need to recalculate updated header byte sum */ - control->tbl_byte_sum -= old_hdr_byte_sum; - control->tbl_byte_sum += __calc_tbl_byte_sum(control, records, num); - - control->tbl_hdr.checksum = 256 - (control->tbl_byte_sum % 256); -} - -/* table sum mod 256 + checksum must equals 256 */ -static bool __validate_tbl_checksum(struct amdgpu_ras_eeprom_control *control, - struct eeprom_table_record *records, int num) -{ - control->tbl_byte_sum = __calc_tbl_byte_sum(control, records, num); - - if (control->tbl_hdr.checksum + (control->tbl_byte_sum % 256) != 256) { - DRM_WARN("Checksum mismatch, checksum: %u ", control->tbl_hdr.checksum); - return false; - } - - return true; -} - int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control, struct eeprom_table_record *records, bool write, int num) { int i, ret = 0; - struct i2c_msg *msgs; - unsigned char *buffs; + struct i2c_msg *msgs, *msg; + unsigned char *buffs, *buff; + struct eeprom_table_record *record; struct amdgpu_device *adev = to_amdgpu_device(control); - if (adev->asic_type != CHIP_VEGA20) + if (adev->asic_type != CHIP_VEGA20 && adev->asic_type != CHIP_ARCTURUS) return 0; buffs = kcalloc(num, EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE, @@ -373,9 +398,9 @@ int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control, * 256b */ for (i = 0; i < num; i++) { - unsigned char *buff = &buffs[i * (EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE)]; - struct eeprom_table_record *record = &records[i]; - struct i2c_msg *msg = &msgs[i]; + buff = &buffs[i * (EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE)]; + record = &records[i]; + msg = &msgs[i]; control->next_addr = __correct_eeprom_dest_address(control->next_addr); @@ -415,8 +440,8 @@ int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control, if (!write) { for (i = 0; i < num; i++) { - unsigned char *buff = &buffs[i*(EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE)]; - struct eeprom_table_record *record = &records[i]; + buff = &buffs[i*(EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE)]; + record = &records[i]; __decode_table_record_from_buff(control, record, buff + EEPROM_ADDRESS_SIZE); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h index 41f3fcb9a29b..622269957c1b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h @@ -79,6 +79,7 @@ struct eeprom_table_record { int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control); void amdgpu_ras_eeprom_fini(struct amdgpu_ras_eeprom_control *control); +int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control); int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control, struct eeprom_table_record *records, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c index 5c13c503e61f..6010999d9020 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c @@ -23,6 +23,7 @@ #include "amdgpu.h" #include "amdgpu_sdma.h" +#include "amdgpu_ras.h" #define AMDGPU_CSA_SDMA_SIZE 64 /* SDMA CSA reside in the 3rd page of CSA */ @@ -83,3 +84,101 @@ uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring, return csa_mc_addr; } + +int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev, + void *ras_ih_info) +{ + int r, i; + struct ras_ih_if *ih_info = (struct ras_ih_if *)ras_ih_info; + struct ras_fs_if fs_info = { + .sysfs_name = "sdma_err_count", + .debugfs_name = "sdma_err_inject", + }; + + if (!ih_info) + return -EINVAL; + + if (!adev->sdma.ras_if) { + adev->sdma.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL); + if (!adev->sdma.ras_if) + return -ENOMEM; + adev->sdma.ras_if->block = AMDGPU_RAS_BLOCK__SDMA; + adev->sdma.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; + adev->sdma.ras_if->sub_block_index = 0; + strcpy(adev->sdma.ras_if->name, "sdma"); + } + fs_info.head = ih_info->head = *adev->sdma.ras_if; + + r = amdgpu_ras_late_init(adev, adev->sdma.ras_if, + &fs_info, ih_info); + if (r) + goto free; + + if (amdgpu_ras_is_supported(adev, adev->sdma.ras_if->block)) { + for (i = 0; i < adev->sdma.num_instances; i++) { + r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq, + AMDGPU_SDMA_IRQ_INSTANCE0 + i); + if (r) + goto late_fini; + } + } else { + r = 0; + goto free; + } + + return 0; + +late_fini: + amdgpu_ras_late_fini(adev, adev->sdma.ras_if, ih_info); +free: + kfree(adev->sdma.ras_if); + adev->sdma.ras_if = NULL; + return r; +} + +void amdgpu_sdma_ras_fini(struct amdgpu_device *adev) +{ + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA) && + adev->sdma.ras_if) { + struct ras_common_if *ras_if = adev->sdma.ras_if; + struct ras_ih_if ih_info = { + .head = *ras_if, + /* the cb member will not be used by + * amdgpu_ras_interrupt_remove_handler, init it only + * to cheat the check in ras_late_fini + */ + .cb = amdgpu_sdma_process_ras_data_cb, + }; + + amdgpu_ras_late_fini(adev, ras_if, &ih_info); + kfree(ras_if); + } +} + +int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev, + void *err_data, + struct amdgpu_iv_entry *entry) +{ + kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); + amdgpu_ras_reset_gpu(adev, 0); + + return AMDGPU_RAS_SUCCESS; +} + +int amdgpu_sdma_process_ecc_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + struct ras_common_if *ras_if = adev->sdma.ras_if; + struct ras_dispatch_if ih_data = { + .entry = entry, + }; + + if (!ras_if) + return 0; + + ih_data.head = *ras_if; + + amdgpu_ras_interrupt_dispatch(adev, &ih_data); + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h index a9ae0d8a0589..761ff8be6314 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h @@ -104,4 +104,13 @@ struct amdgpu_sdma_instance * amdgpu_sdma_get_instance_from_ring(struct amdgpu_ring *ring); int amdgpu_sdma_get_index_from_ring(struct amdgpu_ring *ring, uint32_t *index); uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring, unsigned vmid); +int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev, + void *ras_ih_info); +void amdgpu_sdma_ras_fini(struct amdgpu_device *adev); +int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev, + void *err_data, + struct amdgpu_iv_entry *entry); +int amdgpu_sdma_process_ecc_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c index b66d29d5ffa2..b158230af8db 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c @@ -138,6 +138,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) } dma_fence_put(fence); + fence = NULL; r = amdgpu_bo_kmap(vram_obj, &vram_map); if (r) { @@ -183,6 +184,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) } dma_fence_put(fence); + fence = NULL; r = amdgpu_bo_kmap(gtt_obj[i], >t_map); if (r) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 77674a7b9616..f940526c5889 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -170,7 +170,7 @@ TRACE_EVENT(amdgpu_cs_ioctl, __field(unsigned int, context) __field(unsigned int, seqno) __field(struct dma_fence *, fence) - __field(char *, ring_name) + __string(ring, to_amdgpu_ring(job->base.sched)->name) __field(u32, num_ibs) ), @@ -179,12 +179,12 @@ TRACE_EVENT(amdgpu_cs_ioctl, __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job)) __entry->context = job->base.s_fence->finished.context; __entry->seqno = job->base.s_fence->finished.seqno; - __entry->ring_name = to_amdgpu_ring(job->base.sched)->name; + __assign_str(ring, to_amdgpu_ring(job->base.sched)->name) __entry->num_ibs = job->num_ibs; ), TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u", __entry->sched_job_id, __get_str(timeline), __entry->context, - __entry->seqno, __entry->ring_name, __entry->num_ibs) + __entry->seqno, __get_str(ring), __entry->num_ibs) ); TRACE_EVENT(amdgpu_sched_run_job, @@ -195,7 +195,7 @@ TRACE_EVENT(amdgpu_sched_run_job, __string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job)) __field(unsigned int, context) __field(unsigned int, seqno) - __field(char *, ring_name) + __string(ring, to_amdgpu_ring(job->base.sched)->name) __field(u32, num_ibs) ), @@ -204,12 +204,12 @@ TRACE_EVENT(amdgpu_sched_run_job, __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job)) __entry->context = job->base.s_fence->finished.context; __entry->seqno = job->base.s_fence->finished.seqno; - __entry->ring_name = to_amdgpu_ring(job->base.sched)->name; + __assign_str(ring, to_amdgpu_ring(job->base.sched)->name) __entry->num_ibs = job->num_ibs; ), TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u", __entry->sched_job_id, __get_str(timeline), __entry->context, - __entry->seqno, __entry->ring_name, __entry->num_ibs) + __entry->seqno, __get_str(ring), __entry->num_ibs) ); @@ -323,14 +323,15 @@ DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_cs, TRACE_EVENT(amdgpu_vm_set_ptes, TP_PROTO(uint64_t pe, uint64_t addr, unsigned count, - uint32_t incr, uint64_t flags), - TP_ARGS(pe, addr, count, incr, flags), + uint32_t incr, uint64_t flags, bool direct), + TP_ARGS(pe, addr, count, incr, flags, direct), TP_STRUCT__entry( __field(u64, pe) __field(u64, addr) __field(u32, count) __field(u32, incr) __field(u64, flags) + __field(bool, direct) ), TP_fast_assign( @@ -339,28 +340,32 @@ TRACE_EVENT(amdgpu_vm_set_ptes, __entry->count = count; __entry->incr = incr; __entry->flags = flags; + __entry->direct = direct; ), - TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%llx, count=%u", - __entry->pe, __entry->addr, __entry->incr, - __entry->flags, __entry->count) + TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%llx, count=%u, " + "direct=%d", __entry->pe, __entry->addr, __entry->incr, + __entry->flags, __entry->count, __entry->direct) ); TRACE_EVENT(amdgpu_vm_copy_ptes, - TP_PROTO(uint64_t pe, uint64_t src, unsigned count), - TP_ARGS(pe, src, count), + TP_PROTO(uint64_t pe, uint64_t src, unsigned count, bool direct), + TP_ARGS(pe, src, count, direct), TP_STRUCT__entry( __field(u64, pe) __field(u64, src) __field(u32, count) + __field(bool, direct) ), TP_fast_assign( __entry->pe = pe; __entry->src = src; __entry->count = count; + __entry->direct = direct; ), - TP_printk("pe=%010Lx, src=%010Lx, count=%u", - __entry->pe, __entry->src, __entry->count) + TP_printk("pe=%010Lx, src=%010Lx, count=%u, direct=%d", + __entry->pe, __entry->src, __entry->count, + __entry->direct) ); TRACE_EVENT(amdgpu_vm_flush, @@ -468,7 +473,7 @@ TRACE_EVENT(amdgpu_ib_pipe_sync, TP_PROTO(struct amdgpu_job *sched_job, struct dma_fence *fence), TP_ARGS(sched_job, fence), TP_STRUCT__entry( - __field(const char *,name) + __string(ring, sched_job->base.sched->name); __field(uint64_t, id) __field(struct dma_fence *, fence) __field(uint64_t, ctx) @@ -476,14 +481,14 @@ TRACE_EVENT(amdgpu_ib_pipe_sync, ), TP_fast_assign( - __entry->name = sched_job->base.sched->name; + __assign_str(ring, sched_job->base.sched->name) __entry->id = sched_job->base.id; __entry->fence = fence; __entry->ctx = fence->context; __entry->seqno = fence->seqno; ), TP_printk("job ring=%s, id=%llu, need pipe sync to fence=%p, context=%llu, seq=%u", - __entry->name, __entry->id, + __get_str(ring), __entry->id, __entry->fence, __entry->ctx, __entry->seqno) ); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 8e867b8b432f..61d9b7774d42 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -39,6 +39,7 @@ #include <linux/slab.h> #include <linux/swap.h> #include <linux/swiotlb.h> +#include <linux/dma-buf.h> #include <drm/ttm/ttm_bo_api.h> #include <drm/ttm/ttm_bo_driver.h> @@ -54,6 +55,7 @@ #include "amdgpu_trace.h" #include "amdgpu_amdkfd.h" #include "amdgpu_sdma.h" +#include "amdgpu_ras.h" #include "bif/bif_4_1_d.h" static int amdgpu_map_buffer(struct ttm_buffer_object *bo, @@ -484,15 +486,12 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, struct ttm_mem_reg *new_mem) { - struct amdgpu_device *adev; struct ttm_mem_reg *old_mem = &bo->mem; struct ttm_mem_reg tmp_mem; struct ttm_place placements; struct ttm_placement placement; int r; - adev = amdgpu_ttm_adev(bo->bdev); - /* create space/pages for new_mem in GTT space */ tmp_mem = *new_mem; tmp_mem.mm_node = NULL; @@ -543,15 +542,12 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, struct ttm_mem_reg *new_mem) { - struct amdgpu_device *adev; struct ttm_mem_reg *old_mem = &bo->mem; struct ttm_mem_reg tmp_mem; struct ttm_placement placement; struct ttm_place placements; int r; - adev = amdgpu_ttm_adev(bo->bdev); - /* make space in GTT for old_mem buffer */ tmp_mem = *new_mem; tmp_mem.mm_node = NULL; @@ -763,6 +759,7 @@ static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo, */ struct amdgpu_ttm_tt { struct ttm_dma_tt ttm; + struct drm_gem_object *gobj; u64 offset; uint64_t userptr; struct task_struct *usertask; @@ -1217,16 +1214,14 @@ static struct ttm_backend_func amdgpu_backend_func = { static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags) { - struct amdgpu_device *adev; struct amdgpu_ttm_tt *gtt; - adev = amdgpu_ttm_adev(bo->bdev); - gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL); if (gtt == NULL) { return NULL; } gtt->ttm.ttm.func = &amdgpu_backend_func; + gtt->gobj = &bo->base; /* allocate space for the uninitialized page entries */ if (ttm_sg_tt_init(>t->ttm, bo, page_flags)) { @@ -1247,7 +1242,6 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm, { struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); struct amdgpu_ttm_tt *gtt = (void *)ttm; - bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); /* user pages are bound by amdgpu_ttm_tt_pin_userptr() */ if (gtt && gtt->userptr) { @@ -1260,7 +1254,19 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm, return 0; } - if (slave && ttm->sg) { + if (ttm->page_flags & TTM_PAGE_FLAG_SG) { + if (!ttm->sg) { + struct dma_buf_attachment *attach; + struct sg_table *sgt; + + attach = gtt->gobj->import_attach; + sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); + if (IS_ERR(sgt)) + return PTR_ERR(sgt); + + ttm->sg = sgt; + } + drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, gtt->ttm.dma_address, ttm->num_pages); @@ -1287,9 +1293,8 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm, */ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) { - struct amdgpu_device *adev; struct amdgpu_ttm_tt *gtt = (void *)ttm; - bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); + struct amdgpu_device *adev; if (gtt && gtt->userptr) { amdgpu_ttm_tt_set_user_pages(ttm, NULL); @@ -1298,7 +1303,16 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) return; } - if (slave) + if (ttm->sg && gtt->gobj->import_attach) { + struct dma_buf_attachment *attach; + + attach = gtt->gobj->import_attach; + dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL); + ttm->sg = NULL; + return; + } + + if (ttm->page_flags & TTM_PAGE_FLAG_SG) return; adev = amdgpu_ttm_adev(ttm->bdev); @@ -1634,81 +1648,105 @@ static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev) */ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev) { - struct ttm_operation_ctx ctx = { false, false }; - struct amdgpu_bo_param bp; - int r = 0; - int i; - u64 vram_size = adev->gmc.visible_vram_size; - u64 offset = adev->fw_vram_usage.start_offset; - u64 size = adev->fw_vram_usage.size; - struct amdgpu_bo *bo; - - memset(&bp, 0, sizeof(bp)); - bp.size = adev->fw_vram_usage.size; - bp.byte_align = PAGE_SIZE; - bp.domain = AMDGPU_GEM_DOMAIN_VRAM; - bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; - bp.type = ttm_bo_type_kernel; - bp.resv = NULL; + uint64_t vram_size = adev->gmc.visible_vram_size; + adev->fw_vram_usage.va = NULL; adev->fw_vram_usage.reserved_bo = NULL; - if (adev->fw_vram_usage.size > 0 && - adev->fw_vram_usage.size <= vram_size) { + if (adev->fw_vram_usage.size == 0 || + adev->fw_vram_usage.size > vram_size) + return 0; - r = amdgpu_bo_create(adev, &bp, - &adev->fw_vram_usage.reserved_bo); - if (r) - goto error_create; + return amdgpu_bo_create_kernel_at(adev, + adev->fw_vram_usage.start_offset, + adev->fw_vram_usage.size, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->fw_vram_usage.reserved_bo, + &adev->fw_vram_usage.va); +} - r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false); - if (r) - goto error_reserve; +/* + * Memoy training reservation functions + */ - /* remove the original mem node and create a new one at the - * request position - */ - bo = adev->fw_vram_usage.reserved_bo; - offset = ALIGN(offset, PAGE_SIZE); - for (i = 0; i < bo->placement.num_placement; ++i) { - bo->placements[i].fpfn = offset >> PAGE_SHIFT; - bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT; - } +/** + * amdgpu_ttm_training_reserve_vram_fini - free memory training reserved vram + * + * @adev: amdgpu_device pointer + * + * free memory training reserved vram if it has been reserved. + */ +static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev) +{ + struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx; - ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem); - r = ttm_bo_mem_space(&bo->tbo, &bo->placement, - &bo->tbo.mem, &ctx); - if (r) - goto error_pin; + ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT; + amdgpu_bo_free_kernel(&ctx->c2p_bo, NULL, NULL); + ctx->c2p_bo = NULL; - r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo, - AMDGPU_GEM_DOMAIN_VRAM, - adev->fw_vram_usage.start_offset, - (adev->fw_vram_usage.start_offset + - adev->fw_vram_usage.size)); - if (r) - goto error_pin; - r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo, - &adev->fw_vram_usage.va); - if (r) - goto error_kmap; + amdgpu_bo_free_kernel(&ctx->p2c_bo, NULL, NULL); + ctx->p2c_bo = NULL; - amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo); + return 0; +} + +/** + * amdgpu_ttm_training_reserve_vram_init - create bo vram reservation from memory training + * + * @adev: amdgpu_device pointer + * + * create bo vram reservation from memory training. + */ +static int amdgpu_ttm_training_reserve_vram_init(struct amdgpu_device *adev) +{ + int ret; + struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx; + + memset(ctx, 0, sizeof(*ctx)); + if (!adev->fw_vram_usage.mem_train_support) { + DRM_DEBUG("memory training does not support!\n"); + return 0; } - return r; -error_kmap: - amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo); -error_pin: - amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo); -error_reserve: - amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo); -error_create: - adev->fw_vram_usage.va = NULL; - adev->fw_vram_usage.reserved_bo = NULL; - return r; + ctx->c2p_train_data_offset = adev->fw_vram_usage.mem_train_fb_loc; + ctx->p2c_train_data_offset = (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET); + ctx->train_data_size = GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES; + + DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n", + ctx->train_data_size, + ctx->p2c_train_data_offset, + ctx->c2p_train_data_offset); + + ret = amdgpu_bo_create_kernel_at(adev, + ctx->p2c_train_data_offset, + ctx->train_data_size, + AMDGPU_GEM_DOMAIN_VRAM, + &ctx->p2c_bo, + NULL); + if (ret) { + DRM_ERROR("alloc p2c_bo failed(%d)!\n", ret); + goto Err_out; + } + + ret = amdgpu_bo_create_kernel_at(adev, + ctx->c2p_train_data_offset, + ctx->train_data_size, + AMDGPU_GEM_DOMAIN_VRAM, + &ctx->c2p_bo, + NULL); + if (ret) { + DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret); + goto Err_out; + } + + ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS; + return 0; + +Err_out: + amdgpu_ttm_training_reserve_vram_fini(adev); + return ret; } + /** * amdgpu_ttm_init - Init the memory management (ttm) as well as various * gtt/vram related fields. @@ -1772,6 +1810,14 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) return r; } + /* + *The reserved vram for memory training must be pinned to the specified + *place on the VRAM, so reserve it early. + */ + r = amdgpu_ttm_training_reserve_vram_init(adev); + if (r) + return r; + /* allocate memory as required for VGA * This is used for VGA emulation and pre-OS scanout buffers to * avoid display artifacts while transitioning between pre-OS @@ -1782,6 +1828,20 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) NULL, &stolen_vga_buf); if (r) return r; + + /* + * reserve one TMR (64K) memory at the top of VRAM which holds + * IP Discovery data and is protected by PSP. + */ + r = amdgpu_bo_create_kernel_at(adev, + adev->gmc.real_vram_size - DISCOVERY_TMR_SIZE, + DISCOVERY_TMR_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->discovery_memory, + NULL); + if (r) + return r; + DRM_INFO("amdgpu: %uM of VRAM memory ready\n", (unsigned) (adev->gmc.real_vram_size / (1024 * 1024))); @@ -1857,7 +1917,11 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) return; amdgpu_ttm_debugfs_fini(adev); + amdgpu_ttm_training_reserve_vram_fini(adev); + /* return the IP Discovery TMR memory back to VRAM */ + amdgpu_bo_free_kernel(&adev->discovery_memory, NULL, NULL); amdgpu_ttm_fw_reserve_vram_fini(adev); + if (adev->mman.aper_base_kaddr) iounmap(adev->mman.aper_base_kaddr); adev->mman.aper_base_kaddr = NULL; @@ -1953,10 +2017,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo, *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GPU_PAGE_SIZE; - num_dw = adev->mman.buffer_funcs->copy_num_dw; - while (num_dw & 0x7) - num_dw++; - + num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); num_bytes = num_pages * 8; r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job); @@ -2016,11 +2077,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, max_bytes = adev->mman.buffer_funcs->copy_max_bytes; num_loops = DIV_ROUND_UP(byte_count, max_bytes); - num_dw = num_loops * adev->mman.buffer_funcs->copy_num_dw; - - /* for IB padding */ - while (num_dw & 0x7) - num_dw++; + num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8); r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index 3a6115ad0196..833fc4b68940 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -360,6 +360,7 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type) case CHIP_RAVEN: case CHIP_VEGA12: case CHIP_VEGA20: + case CHIP_ARCTURUS: case CHIP_RENOIR: case CHIP_NAVI10: case CHIP_NAVI14: @@ -368,8 +369,6 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type) return AMDGPU_FW_LOAD_DIRECT; else return AMDGPU_FW_LOAD_PSP; - case CHIP_ARCTURUS: - return AMDGPU_FW_LOAD_DIRECT; default: DRM_ERROR("Unknown firmware load type\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h index b34f00d42049..410587b950f3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h @@ -108,6 +108,12 @@ struct ta_firmware_header_v1_0 { uint32_t ta_ras_ucode_version; uint32_t ta_ras_offset_bytes; uint32_t ta_ras_size_bytes; + uint32_t ta_hdcp_ucode_version; + uint32_t ta_hdcp_offset_bytes; + uint32_t ta_hdcp_size_bytes; + uint32_t ta_dtm_ucode_version; + uint32_t ta_dtm_offset_bytes; + uint32_t ta_dtm_size_bytes; }; /* version_major=1, version_minor=0 */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c new file mode 100644 index 000000000000..d4fb9cf27e21 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c @@ -0,0 +1,158 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu_ras.h" + +int amdgpu_umc_ras_late_init(struct amdgpu_device *adev) +{ + int r; + struct ras_fs_if fs_info = { + .sysfs_name = "umc_err_count", + .debugfs_name = "umc_err_inject", + }; + struct ras_ih_if ih_info = { + .cb = amdgpu_umc_process_ras_data_cb, + }; + + if (!adev->umc.ras_if) { + adev->umc.ras_if = + kmalloc(sizeof(struct ras_common_if), GFP_KERNEL); + if (!adev->umc.ras_if) + return -ENOMEM; + adev->umc.ras_if->block = AMDGPU_RAS_BLOCK__UMC; + adev->umc.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; + adev->umc.ras_if->sub_block_index = 0; + strcpy(adev->umc.ras_if->name, "umc"); + } + ih_info.head = fs_info.head = *adev->umc.ras_if; + + r = amdgpu_ras_late_init(adev, adev->umc.ras_if, + &fs_info, &ih_info); + if (r) + goto free; + + if (amdgpu_ras_is_supported(adev, adev->umc.ras_if->block)) { + r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0); + if (r) + goto late_fini; + } else { + r = 0; + goto free; + } + + /* ras init of specific umc version */ + if (adev->umc.funcs && adev->umc.funcs->err_cnt_init) + adev->umc.funcs->err_cnt_init(adev); + + return 0; + +late_fini: + amdgpu_ras_late_fini(adev, adev->umc.ras_if, &ih_info); +free: + kfree(adev->umc.ras_if); + adev->umc.ras_if = NULL; + return r; +} + +void amdgpu_umc_ras_fini(struct amdgpu_device *adev) +{ + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC) && + adev->umc.ras_if) { + struct ras_common_if *ras_if = adev->umc.ras_if; + struct ras_ih_if ih_info = { + .head = *ras_if, + .cb = amdgpu_umc_process_ras_data_cb, + }; + + amdgpu_ras_late_fini(adev, ras_if, &ih_info); + kfree(ras_if); + } +} + +int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev, + void *ras_error_status, + struct amdgpu_iv_entry *entry) +{ + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + + /* When “Full RAS” is enabled, the per-IP interrupt sources should + * be disabled and the driver should only look for the aggregated + * interrupt via sync flood + */ + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) + return AMDGPU_RAS_SUCCESS; + + kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); + if (adev->umc.funcs && + adev->umc.funcs->query_ras_error_count) + adev->umc.funcs->query_ras_error_count(adev, ras_error_status); + + if (adev->umc.funcs && + adev->umc.funcs->query_ras_error_address && + adev->umc.max_ras_err_cnt_per_query) { + err_data->err_addr = + kcalloc(adev->umc.max_ras_err_cnt_per_query, + sizeof(struct eeprom_table_record), GFP_KERNEL); + /* still call query_ras_error_address to clear error status + * even NOMEM error is encountered + */ + if(!err_data->err_addr) + DRM_WARN("Failed to alloc memory for umc error address record!\n"); + + /* umc query_ras_error_address is also responsible for clearing + * error status + */ + adev->umc.funcs->query_ras_error_address(adev, ras_error_status); + } + + /* only uncorrectable error needs gpu reset */ + if (err_data->ue_count) { + if (err_data->err_addr_cnt && + amdgpu_ras_add_bad_pages(adev, err_data->err_addr, + err_data->err_addr_cnt)) + DRM_WARN("Failed to add ras bad page!\n"); + + amdgpu_ras_reset_gpu(adev, 0); + } + + kfree(err_data->err_addr); + return AMDGPU_RAS_SUCCESS; +} + +int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + struct ras_common_if *ras_if = adev->umc.ras_if; + struct ras_dispatch_if ih_data = { + .entry = entry, + }; + + if (!ras_if) + return 0; + + ih_data.head = *ras_if; + + amdgpu_ras_interrupt_dispatch(adev, &ih_data); + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h index 975afa04df09..3283032a78e5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h @@ -54,7 +54,8 @@ adev->umc.funcs->disable_umc_index_mode(adev); struct amdgpu_umc_funcs { - void (*ras_init)(struct amdgpu_device *adev); + void (*err_cnt_init)(struct amdgpu_device *adev); + int (*ras_late_init)(struct amdgpu_device *adev); void (*query_ras_error_count)(struct amdgpu_device *adev, void *ras_error_status); void (*query_ras_error_address)(struct amdgpu_device *adev, @@ -62,6 +63,7 @@ struct amdgpu_umc_funcs { void (*enable_umc_index_mode)(struct amdgpu_device *adev, uint32_t umc_instance); void (*disable_umc_index_mode)(struct amdgpu_device *adev); + void (*init_registers)(struct amdgpu_device *adev); }; struct amdgpu_umc { @@ -75,8 +77,17 @@ struct amdgpu_umc { uint32_t channel_offs; /* channel index table of interleaved memory */ const uint32_t *channel_idx_tbl; + struct ras_common_if *ras_if; const struct amdgpu_umc_funcs *funcs; }; +int amdgpu_umc_ras_late_init(struct amdgpu_device *adev); +void amdgpu_umc_ras_fini(struct amdgpu_device *adev); +int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev, + void *ras_error_status, + struct amdgpu_iv_entry *entry); +int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index b2c364b8695f..b4dd89af6f09 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -39,6 +39,8 @@ #include "cikd.h" #include "uvd/uvd_4_2_d.h" +#include "amdgpu_ras.h" + /* 1 second timeout */ #define UVD_IDLE_TIMEOUT msecs_to_jiffies(1000) @@ -372,7 +374,13 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) if (!adev->uvd.inst[j].saved_bo) return -ENOMEM; - memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size); + /* re-write 0 since err_event_athub will corrupt VCPU buffer */ + if (amdgpu_ras_intr_triggered()) { + DRM_WARN("UVD VCPU state may lost due to RAS ERREVENT_ATHUB_INTERRUPT\n"); + memset(adev->uvd.inst[j].saved_bo, 0, size); + } else { + memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size); + } } return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index b70b3c45bb29..703677f2ff6f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -80,6 +80,11 @@ MODULE_FIRMWARE(FIRMWARE_VEGA12); MODULE_FIRMWARE(FIRMWARE_VEGA20); static void amdgpu_vce_idle_work_handler(struct work_struct *work); +static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, + struct amdgpu_bo *bo, + struct dma_fence **fence); +static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, + bool direct, struct dma_fence **fence); /** * amdgpu_vce_init - allocate memory, load vce firmware @@ -428,14 +433,15 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp) * * Open up a stream for HW test */ -int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, - struct dma_fence **fence) +static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, + struct amdgpu_bo *bo, + struct dma_fence **fence) { const unsigned ib_size_dw = 1024; struct amdgpu_job *job; struct amdgpu_ib *ib; struct dma_fence *f = NULL; - uint64_t dummy; + uint64_t addr; int i, r; r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); @@ -444,7 +450,7 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, ib = &job->ibs[0]; - dummy = ib->gpu_addr + 1024; + addr = amdgpu_bo_gpu_offset(bo); /* stitch together an VCE create msg */ ib->length_dw = 0; @@ -476,8 +482,8 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, ib->ptr[ib->length_dw++] = 0x00000014; /* len */ ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */ - ib->ptr[ib->length_dw++] = upper_32_bits(dummy); - ib->ptr[ib->length_dw++] = dummy; + ib->ptr[ib->length_dw++] = upper_32_bits(addr); + ib->ptr[ib->length_dw++] = addr; ib->ptr[ib->length_dw++] = 0x00000001; for (i = ib->length_dw; i < ib_size_dw; ++i) @@ -507,8 +513,8 @@ err: * * Close up a stream for HW test or if userspace failed to do so */ -int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, - bool direct, struct dma_fence **fence) +static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, + bool direct, struct dma_fence **fence) { const unsigned ib_size_dw = 1024; struct amdgpu_job *job; @@ -1110,13 +1116,20 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring) int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct dma_fence *fence = NULL; + struct amdgpu_bo *bo = NULL; long r; /* skip vce ring1/2 ib test for now, since it's not reliable */ if (ring != &ring->adev->vce.ring[0]) return 0; - r = amdgpu_vce_get_create_msg(ring, 1, NULL); + r = amdgpu_bo_create_reserved(ring->adev, 512, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &bo, NULL, NULL); + if (r) + return r; + + r = amdgpu_vce_get_create_msg(ring, 1, bo, NULL); if (r) goto error; @@ -1132,5 +1145,7 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout) error: dma_fence_put(fence); + amdgpu_bo_unreserve(bo); + amdgpu_bo_unref(&bo); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h index 30ea54dd9117..d6d83a3ec803 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h @@ -58,10 +58,6 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev); int amdgpu_vce_entity_init(struct amdgpu_device *adev); int amdgpu_vce_suspend(struct amdgpu_device *adev); int amdgpu_vce_resume(struct amdgpu_device *adev); -int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, - struct dma_fence **fence); -int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, - bool direct, struct dma_fence **fence); void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp); int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx); int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 7a6beb2e7c4e..3199e4a5ff12 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -569,13 +569,14 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring) } static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, - struct dma_fence **fence) + struct amdgpu_bo *bo, + struct dma_fence **fence) { const unsigned ib_size_dw = 16; struct amdgpu_job *job; struct amdgpu_ib *ib; struct dma_fence *f = NULL; - uint64_t dummy; + uint64_t addr; int i, r; r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); @@ -583,14 +584,14 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand return r; ib = &job->ibs[0]; - dummy = ib->gpu_addr + 1024; + addr = amdgpu_bo_gpu_offset(bo); ib->length_dw = 0; ib->ptr[ib->length_dw++] = 0x00000018; ib->ptr[ib->length_dw++] = 0x00000001; /* session info */ ib->ptr[ib->length_dw++] = handle; - ib->ptr[ib->length_dw++] = upper_32_bits(dummy); - ib->ptr[ib->length_dw++] = dummy; + ib->ptr[ib->length_dw++] = upper_32_bits(addr); + ib->ptr[ib->length_dw++] = addr; ib->ptr[ib->length_dw++] = 0x0000000b; ib->ptr[ib->length_dw++] = 0x00000014; @@ -621,13 +622,14 @@ err: } static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, - struct dma_fence **fence) + struct amdgpu_bo *bo, + struct dma_fence **fence) { const unsigned ib_size_dw = 16; struct amdgpu_job *job; struct amdgpu_ib *ib; struct dma_fence *f = NULL; - uint64_t dummy; + uint64_t addr; int i, r; r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); @@ -635,14 +637,14 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han return r; ib = &job->ibs[0]; - dummy = ib->gpu_addr + 1024; + addr = amdgpu_bo_gpu_offset(bo); ib->length_dw = 0; ib->ptr[ib->length_dw++] = 0x00000018; ib->ptr[ib->length_dw++] = 0x00000001; ib->ptr[ib->length_dw++] = handle; - ib->ptr[ib->length_dw++] = upper_32_bits(dummy); - ib->ptr[ib->length_dw++] = dummy; + ib->ptr[ib->length_dw++] = upper_32_bits(addr); + ib->ptr[ib->length_dw++] = addr; ib->ptr[ib->length_dw++] = 0x0000000b; ib->ptr[ib->length_dw++] = 0x00000014; @@ -675,13 +677,20 @@ err: int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct dma_fence *fence = NULL; + struct amdgpu_bo *bo = NULL; long r; - r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL); + r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &bo, NULL, NULL); + if (r) + return r; + + r = amdgpu_vcn_enc_get_create_msg(ring, 1, bo, NULL); if (r) goto error; - r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &fence); + r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, bo, &fence); if (r) goto error; @@ -693,6 +702,8 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) error: dma_fence_put(fence); + amdgpu_bo_unreserve(bo); + amdgpu_bo_unref(&bo); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 5251352f5922..598c24505c73 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -130,7 +130,8 @@ static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev, if (level == adev->vm_manager.root_level) /* For the root directory */ - return round_up(adev->vm_manager.max_pfn, 1ULL << shift) >> shift; + return round_up(adev->vm_manager.max_pfn, 1ULL << shift) + >> shift; else if (level != AMDGPU_VM_PTB) /* Everything in between */ return 512; @@ -341,7 +342,7 @@ static struct amdgpu_vm_pt *amdgpu_vm_pt_parent(struct amdgpu_vm_pt *pt) return container_of(parent->vm_bo, struct amdgpu_vm_pt, base); } -/** +/* * amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt */ struct amdgpu_vm_pt_cursor { @@ -482,6 +483,7 @@ static void amdgpu_vm_pt_next(struct amdgpu_device *adev, * * @adev: amdgpu_device structure * @vm: amdgpu_vm structure + * @start: optional cursor to start with * @cursor: state to initialize * * Starts a deep first traversal of the PD/PT tree. @@ -535,7 +537,7 @@ static void amdgpu_vm_pt_next_dfs(struct amdgpu_device *adev, amdgpu_vm_pt_ancestor(cursor); } -/** +/* * for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs */ #define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) \ @@ -566,6 +568,14 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, list_add(&entry->tv.head, validated); } +/** + * amdgpu_vm_del_from_lru_notify - update bulk_moveable flag + * + * @bo: BO which was removed from the LRU + * + * Make sure the bulk_moveable flag is updated when a BO is removed from the + * LRU. + */ void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo) { struct amdgpu_bo *abo; @@ -600,19 +610,18 @@ void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo) void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, struct amdgpu_vm *vm) { - struct ttm_bo_global *glob = adev->mman.bdev.glob; struct amdgpu_vm_bo_base *bo_base; if (vm->bulk_moveable) { - spin_lock(&glob->lru_lock); + spin_lock(&ttm_bo_glob.lru_lock); ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move); - spin_unlock(&glob->lru_lock); + spin_unlock(&ttm_bo_glob.lru_lock); return; } memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move)); - spin_lock(&glob->lru_lock); + spin_lock(&ttm_bo_glob.lru_lock); list_for_each_entry(bo_base, &vm->idle, vm_status) { struct amdgpu_bo *bo = bo_base->bo; @@ -624,7 +633,7 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, ttm_bo_move_to_lru_tail(&bo->shadow->tbo, &vm->lru_bulk_move); } - spin_unlock(&glob->lru_lock); + spin_unlock(&ttm_bo_glob.lru_lock); vm->bulk_moveable = true; } @@ -693,6 +702,7 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm) * @adev: amdgpu_device pointer * @vm: VM to clear BO from * @bo: BO to clear + * @direct: use a direct update * * Root PD needs to be reserved when calling this. * @@ -701,7 +711,8 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm) */ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, struct amdgpu_vm *vm, - struct amdgpu_bo *bo) + struct amdgpu_bo *bo, + bool direct) { struct ttm_operation_ctx ctx = { true, false }; unsigned level = adev->vm_manager.root_level; @@ -760,6 +771,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, memset(¶ms, 0, sizeof(params)); params.adev = adev; params.vm = vm; + params.direct = direct; r = vm->update_funcs->prepare(¶ms, AMDGPU_FENCE_OWNER_KFD, NULL); if (r) @@ -813,10 +825,13 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, * * @adev: amdgpu_device pointer * @vm: requesting vm + * @level: the page table level + * @direct: use a direct update * @bp: resulting BO allocation parameters */ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm, - int level, struct amdgpu_bo_param *bp) + int level, bool direct, + struct amdgpu_bo_param *bp) { memset(bp, 0, sizeof(*bp)); @@ -831,6 +846,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm, else if (!vm->root.base.bo || vm->root.base.bo->shadow) bp->flags |= AMDGPU_GEM_CREATE_SHADOW; bp->type = ttm_bo_type_kernel; + bp->no_wait_gpu = direct; if (vm->root.base.bo) bp->resv = vm->root.base.bo->tbo.base.resv; } @@ -841,6 +857,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm, * @adev: amdgpu_device pointer * @vm: VM to allocate page tables for * @cursor: Which page table to allocate + * @direct: use a direct update * * Make sure a specific page table or directory is allocated. * @@ -850,7 +867,8 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm, */ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, struct amdgpu_vm *vm, - struct amdgpu_vm_pt_cursor *cursor) + struct amdgpu_vm_pt_cursor *cursor, + bool direct) { struct amdgpu_vm_pt *entry = cursor->entry; struct amdgpu_bo_param bp; @@ -871,7 +889,7 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, if (entry->base.bo) return 0; - amdgpu_vm_bo_param(adev, vm, cursor->level, &bp); + amdgpu_vm_bo_param(adev, vm, cursor->level, direct, &bp); r = amdgpu_bo_create(adev, &bp, &pt); if (r) @@ -883,7 +901,7 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, pt->parent = amdgpu_bo_ref(cursor->parent->base.bo); amdgpu_vm_bo_base_init(&entry->base, vm, pt); - r = amdgpu_vm_clear_bo(adev, vm, pt); + r = amdgpu_vm_clear_bo(adev, vm, pt, direct); if (r) goto error_free_pt; @@ -1020,7 +1038,8 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring, * Returns: * 0 on success, errno otherwise. */ -int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync) +int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, + bool need_pipe_sync) { struct amdgpu_device *adev = ring->adev; unsigned vmhub = ring->funcs->vmhub; @@ -1034,10 +1053,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_ id->oa_base != job->oa_base || id->oa_size != job->oa_size); bool vm_flush_needed = job->vm_needs_flush; - bool pasid_mapping_needed = id->pasid != job->pasid || - !id->pasid_mapping || - !dma_fence_is_signaled(id->pasid_mapping); struct dma_fence *fence = NULL; + bool pasid_mapping_needed = false; unsigned patch_offset = 0; int r; @@ -1047,6 +1064,12 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_ pasid_mapping_needed = true; } + mutex_lock(&id_mgr->lock); + if (id->pasid != job->pasid || !id->pasid_mapping || + !dma_fence_is_signaled(id->pasid_mapping)) + pasid_mapping_needed = true; + mutex_unlock(&id_mgr->lock); + gds_switch_needed &= !!ring->funcs->emit_gds_switch; vm_flush_needed &= !!ring->funcs->emit_vm_flush && job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET; @@ -1086,9 +1109,11 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_ } if (pasid_mapping_needed) { + mutex_lock(&id_mgr->lock); id->pasid = job->pasid; dma_fence_put(id->pasid_mapping); id->pasid_mapping = dma_fence_get(fence); + mutex_unlock(&id_mgr->lock); } dma_fence_put(fence); @@ -1172,10 +1197,10 @@ uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr) return result; } -/* +/** * amdgpu_vm_update_pde - update a single level in the hierarchy * - * @param: parameters for the update + * @params: parameters for the update * @vm: requested vm * @entry: entry to update * @@ -1199,7 +1224,7 @@ static int amdgpu_vm_update_pde(struct amdgpu_vm_update_params *params, return vm->update_funcs->update(params, bo, pde, pt, 1, 0, flags); } -/* +/** * amdgpu_vm_invalidate_pds - mark all PDs as invalid * * @adev: amdgpu_device pointer @@ -1218,19 +1243,20 @@ static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev, amdgpu_vm_bo_relocated(&entry->base); } -/* - * amdgpu_vm_update_directories - make sure that all directories are valid +/** + * amdgpu_vm_update_pdes - make sure that all directories are valid * * @adev: amdgpu_device pointer * @vm: requested vm + * @direct: submit directly to the paging queue * * Makes sure all directories are up to date. * * Returns: * 0 for success, error for failure. */ -int amdgpu_vm_update_directories(struct amdgpu_device *adev, - struct amdgpu_vm *vm) +int amdgpu_vm_update_pdes(struct amdgpu_device *adev, + struct amdgpu_vm *vm, bool direct) { struct amdgpu_vm_update_params params; int r; @@ -1241,6 +1267,7 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev, memset(¶ms, 0, sizeof(params)); params.adev = adev; params.vm = vm; + params.direct = direct; r = vm->update_funcs->prepare(¶ms, AMDGPU_FENCE_OWNER_VM, NULL); if (r) @@ -1268,7 +1295,7 @@ error: return r; } -/** +/* * amdgpu_vm_update_flags - figure out flags for PTE updates * * Make sure to set the right flags for the PTEs at the desired level. @@ -1391,7 +1418,11 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params, uint64_t incr, entry_end, pe_start; struct amdgpu_bo *pt; - r = amdgpu_vm_alloc_pts(params->adev, params->vm, &cursor); + /* make sure that the page tables covering the address range are + * actually allocated + */ + r = amdgpu_vm_alloc_pts(params->adev, params->vm, &cursor, + params->direct); if (r) return r; @@ -1463,7 +1494,12 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params, } while (frag_start < entry_end); if (amdgpu_vm_pt_descendant(adev, &cursor)) { - /* Free all child entries */ + /* Free all child entries. + * Update the tables with the flags and addresses and free up subsequent + * tables in the case of huge pages or freed up areas. + * This is the maximum you can free, because all other page tables are not + * completely covered by the range and so potentially still in use. + */ while (cursor.pfn < frag_start) { amdgpu_vm_free_pts(adev, params->vm, &cursor); amdgpu_vm_pt_next(adev, &cursor); @@ -1482,13 +1518,14 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params, * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table * * @adev: amdgpu_device pointer - * @exclusive: fence we need to sync to - * @pages_addr: DMA addresses to use for mapping * @vm: requested vm + * @direct: direct submission in a page fault + * @exclusive: fence we need to sync to * @start: start of mapped range * @last: last mapped entry * @flags: flags for the entries * @addr: addr to set the area to + * @pages_addr: DMA addresses to use for mapping * @fence: optional resulting fence * * Fill in the page table entries between @start and @last. @@ -1497,11 +1534,11 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params, * 0 for success, -EINVAL for failure. */ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, + struct amdgpu_vm *vm, bool direct, struct dma_fence *exclusive, - dma_addr_t *pages_addr, - struct amdgpu_vm *vm, uint64_t start, uint64_t last, uint64_t flags, uint64_t addr, + dma_addr_t *pages_addr, struct dma_fence **fence) { struct amdgpu_vm_update_params params; @@ -1511,6 +1548,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, memset(¶ms, 0, sizeof(params)); params.adev = adev; params.vm = vm; + params.direct = direct; params.pages_addr = pages_addr; /* sync to everything except eviction fences on unmapping */ @@ -1569,27 +1607,8 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, if (!(mapping->flags & AMDGPU_PTE_WRITEABLE)) flags &= ~AMDGPU_PTE_WRITEABLE; - flags &= ~AMDGPU_PTE_EXECUTABLE; - flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE; - - if (adev->asic_type >= CHIP_NAVI10) { - flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK; - flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK); - } else { - flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK; - flags |= (mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK); - } - - if ((mapping->flags & AMDGPU_PTE_PRT) && - (adev->asic_type >= CHIP_VEGA10)) { - flags |= AMDGPU_PTE_PRT; - if (adev->asic_type >= CHIP_NAVI10) { - flags |= AMDGPU_PTE_SNOOPED; - flags |= AMDGPU_PTE_LOG; - flags |= AMDGPU_PTE_SYSTEM; - } - flags &= ~AMDGPU_PTE_VALID; - } + /* Apply ASIC specific mapping flags */ + amdgpu_gmc_get_vm_pte(adev, mapping, &flags); trace_amdgpu_vm_bo_update(mapping); @@ -1633,7 +1652,8 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, dma_addr = pages_addr; } else { addr = pages_addr[pfn]; - max_entries = count * AMDGPU_GPU_PAGES_IN_CPU_PAGE; + max_entries = count * + AMDGPU_GPU_PAGES_IN_CPU_PAGE; } } else if (flags & AMDGPU_PTE_VALID) { @@ -1642,9 +1662,9 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, } last = min((uint64_t)mapping->last, start + max_entries - 1); - r = amdgpu_vm_bo_update_mapping(adev, exclusive, dma_addr, vm, + r = amdgpu_vm_bo_update_mapping(adev, vm, false, exclusive, start, last, flags, addr, - fence); + dma_addr, fence); if (r) return r; @@ -1672,8 +1692,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, * Returns: * 0 for success, -EINVAL for failure. */ -int amdgpu_vm_bo_update(struct amdgpu_device *adev, - struct amdgpu_bo_va *bo_va, +int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, bool clear) { struct amdgpu_bo *bo = bo_va->base.bo; @@ -1700,7 +1719,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm); pages_addr = ttm->dma_address; } - exclusive = dma_resv_get_excl(bo->tbo.base.resv); + exclusive = bo->tbo.moving; } if (bo) { @@ -1731,12 +1750,6 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, return r; } - if (vm->use_cpu_for_update) { - /* Flush HDP */ - mb(); - amdgpu_asic_flush_hdp(adev, NULL); - } - /* If the BO is not in its preferred location add it back to * the evicted list so that it gets validated again on the * next command submission. @@ -1744,7 +1757,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) { uint32_t mem_type = bo->tbo.mem.mem_type; - if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type))) + if (!(bo->preferred_domains & + amdgpu_mem_type_to_domain(mem_type))) amdgpu_vm_bo_evicted(&bo_va->base); else amdgpu_vm_bo_idle(&bo_va->base); @@ -1938,9 +1952,9 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, mapping->start < AMDGPU_GMC_HOLE_START) init_pte_value = AMDGPU_PTE_DEFAULT_ATC; - r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm, + r = amdgpu_vm_bo_update_mapping(adev, vm, false, NULL, mapping->start, mapping->last, - init_pte_value, 0, &f); + init_pte_value, 0, NULL, &f); amdgpu_vm_free_mapping(adev, vm, mapping, f); if (r) { dma_fence_put(f); @@ -2682,12 +2696,17 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, spin_lock_init(&vm->invalidated_lock); INIT_LIST_HEAD(&vm->freed); - /* create scheduler entity for page table updates */ - r = drm_sched_entity_init(&vm->entity, adev->vm_manager.vm_pte_rqs, + /* create scheduler entities for page table updates */ + r = drm_sched_entity_init(&vm->direct, adev->vm_manager.vm_pte_rqs, adev->vm_manager.vm_pte_num_rqs, NULL); if (r) return r; + r = drm_sched_entity_init(&vm->delayed, adev->vm_manager.vm_pte_rqs, + adev->vm_manager.vm_pte_num_rqs, NULL); + if (r) + goto error_free_direct; + vm->pte_support_ats = false; if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) { @@ -2702,7 +2721,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, } DRM_DEBUG_DRIVER("VM update mode is %s\n", vm->use_cpu_for_update ? "CPU" : "SDMA"); - WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)), + WARN_ONCE((vm->use_cpu_for_update && + !amdgpu_gmc_vram_full_visible(&adev->gmc)), "CPU update of VM recommended only for large BAR system\n"); if (vm->use_cpu_for_update) @@ -2711,12 +2731,12 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, vm->update_funcs = &amdgpu_vm_sdma_funcs; vm->last_update = NULL; - amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, &bp); + amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, false, &bp); if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) bp.flags &= ~AMDGPU_GEM_CREATE_SHADOW; r = amdgpu_bo_create(adev, &bp, &root); if (r) - goto error_free_sched_entity; + goto error_free_delayed; r = amdgpu_bo_reserve(root, true); if (r) @@ -2728,7 +2748,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, amdgpu_vm_bo_base_init(&vm->root.base, vm, root); - r = amdgpu_vm_clear_bo(adev, vm, root); + r = amdgpu_vm_clear_bo(adev, vm, root, false); if (r) goto error_unreserve; @@ -2759,8 +2779,11 @@ error_free_root: amdgpu_bo_unref(&vm->root.base.bo); vm->root.base.bo = NULL; -error_free_sched_entity: - drm_sched_entity_destroy(&vm->entity); +error_free_delayed: + drm_sched_entity_destroy(&vm->delayed); + +error_free_direct: + drm_sched_entity_destroy(&vm->direct); return r; } @@ -2801,6 +2824,7 @@ static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev, * * @adev: amdgpu_device pointer * @vm: requested vm + * @pasid: pasid to use * * This only works on GFX VMs that don't have any BOs added and no * page tables allocated yet. @@ -2816,7 +2840,8 @@ static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev, * Returns: * 0 for success, -errno for errors. */ -int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid) +int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, + unsigned int pasid) { bool pte_support_ats = (adev->asic_type == CHIP_RAVEN); int r; @@ -2848,7 +2873,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, uns */ if (pte_support_ats != vm->pte_support_ats) { vm->pte_support_ats = pte_support_ats; - r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo); + r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo, false); if (r) goto free_idr; } @@ -2858,7 +2883,8 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, uns AMDGPU_VM_USE_CPU_FOR_COMPUTE); DRM_DEBUG_DRIVER("VM update mode is %s\n", vm->use_cpu_for_update ? "CPU" : "SDMA"); - WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)), + WARN_ONCE((vm->use_cpu_for_update && + !amdgpu_gmc_vram_full_visible(&adev->gmc)), "CPU update of VM recommended only for large BAR system\n"); if (vm->use_cpu_for_update) @@ -2937,19 +2963,38 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) struct amdgpu_bo_va_mapping *mapping, *tmp; bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt; struct amdgpu_bo *root; - int i, r; + int i; amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm); + root = amdgpu_bo_ref(vm->root.base.bo); + amdgpu_bo_reserve(root, true); if (vm->pasid) { unsigned long flags; spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); idr_remove(&adev->vm_manager.pasid_idr, vm->pasid); spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); + vm->pasid = 0; + } + + list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { + if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) { + amdgpu_vm_prt_fini(adev, vm); + prt_fini_needed = false; + } + + list_del(&mapping->list); + amdgpu_vm_free_mapping(adev, vm, mapping, NULL); } - drm_sched_entity_destroy(&vm->entity); + amdgpu_vm_free_pts(adev, vm, NULL); + amdgpu_bo_unreserve(root); + amdgpu_bo_unref(&root); + WARN_ON(vm->root.base.bo); + + drm_sched_entity_destroy(&vm->direct); + drm_sched_entity_destroy(&vm->delayed); if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { dev_err(adev->dev, "still active bo inside vm\n"); @@ -2962,26 +3007,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) list_del(&mapping->list); kfree(mapping); } - list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { - if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) { - amdgpu_vm_prt_fini(adev, vm); - prt_fini_needed = false; - } - - list_del(&mapping->list); - amdgpu_vm_free_mapping(adev, vm, mapping, NULL); - } - root = amdgpu_bo_ref(vm->root.base.bo); - r = amdgpu_bo_reserve(root, true); - if (r) { - dev_err(adev->dev, "Leaking page tables because BO reservation failed\n"); - } else { - amdgpu_vm_free_pts(adev, vm, NULL); - amdgpu_bo_unreserve(root); - } - amdgpu_bo_unref(&root); - WARN_ON(vm->root.base.bo); dma_fence_put(vm->last_update); for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) amdgpu_vmid_free_reserved(adev, vm, i); @@ -3065,8 +3091,9 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) switch (args->in.op) { case AMDGPU_VM_OP_RESERVE_VMID: - /* current, we only have requirement to reserve vmid from gfxhub */ - r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0); + /* We only have requirement to reserve vmid from gfxhub */ + r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, + AMDGPU_GFXHUB_0); if (r) return r; break; @@ -3109,13 +3136,88 @@ void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid, */ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm) { - if (!vm->task_info.pid) { - vm->task_info.pid = current->pid; - get_task_comm(vm->task_info.task_name, current); + if (vm->task_info.pid) + return; - if (current->group_leader->mm == current->mm) { - vm->task_info.tgid = current->group_leader->pid; - get_task_comm(vm->task_info.process_name, current->group_leader); - } + vm->task_info.pid = current->pid; + get_task_comm(vm->task_info.task_name, current); + + if (current->group_leader->mm != current->mm) + return; + + vm->task_info.tgid = current->group_leader->pid; + get_task_comm(vm->task_info.process_name, current->group_leader); +} + +/** + * amdgpu_vm_handle_fault - graceful handling of VM faults. + * @adev: amdgpu device pointer + * @pasid: PASID of the VM + * @addr: Address of the fault + * + * Try to gracefully handle a VM fault. Return true if the fault was handled and + * shouldn't be reported any more. + */ +bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid, + uint64_t addr) +{ + struct amdgpu_bo *root; + uint64_t value, flags; + struct amdgpu_vm *vm; + long r; + + spin_lock(&adev->vm_manager.pasid_lock); + vm = idr_find(&adev->vm_manager.pasid_idr, pasid); + if (vm) + root = amdgpu_bo_ref(vm->root.base.bo); + else + root = NULL; + spin_unlock(&adev->vm_manager.pasid_lock); + + if (!root) + return false; + + r = amdgpu_bo_reserve(root, true); + if (r) + goto error_unref; + + /* Double check that the VM still exists */ + spin_lock(&adev->vm_manager.pasid_lock); + vm = idr_find(&adev->vm_manager.pasid_idr, pasid); + if (vm && vm->root.base.bo != root) + vm = NULL; + spin_unlock(&adev->vm_manager.pasid_lock); + if (!vm) + goto error_unlock; + + addr /= AMDGPU_GPU_PAGE_SIZE; + flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED | + AMDGPU_PTE_SYSTEM; + + if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) { + /* Redirect the access to the dummy page */ + value = adev->dummy_page_addr; + flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE | + AMDGPU_PTE_WRITEABLE; + } else { + /* Let the hw retry silently on the PTE */ + value = 0; } + + r = amdgpu_vm_bo_update_mapping(adev, vm, true, NULL, addr, addr + 1, + flags, value, NULL, NULL); + if (r) + goto error_unlock; + + r = amdgpu_vm_update_pdes(adev, vm, true); + +error_unlock: + amdgpu_bo_unreserve(root); + if (r < 0) + DRM_ERROR("Can't handle page fault (%ld)\n", r); + +error_unref: + amdgpu_bo_unref(&root); + + return false; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 2eda3a8c330d..4dbbe1b6b413 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -99,6 +99,9 @@ struct amdgpu_bo_list_entry; #define AMDGPU_VM_FAULT_STOP_FIRST 1 #define AMDGPU_VM_FAULT_STOP_ALWAYS 2 +/* Reserve 4MB VRAM for page tables */ +#define AMDGPU_VM_RESERVED_VRAM (4ULL << 20) + /* max number of VMHUB */ #define AMDGPU_MAX_VMHUBS 3 #define AMDGPU_GFXHUB_0 0 @@ -199,6 +202,11 @@ struct amdgpu_vm_update_params { struct amdgpu_vm *vm; /** + * @direct: if changes should be made directly + */ + bool direct; + + /** * @pages_addr: * * DMA addresses to use for mapping @@ -254,8 +262,9 @@ struct amdgpu_vm { struct amdgpu_vm_pt root; struct dma_fence *last_update; - /* Scheduler entity for page table updates */ - struct drm_sched_entity entity; + /* Scheduler entities for page table updates */ + struct drm_sched_entity direct; + struct drm_sched_entity delayed; unsigned int pasid; /* dedicated to vm */ @@ -357,8 +366,8 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, int (*callback)(void *p, struct amdgpu_bo *bo), void *param); int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync); -int amdgpu_vm_update_directories(struct amdgpu_device *adev, - struct amdgpu_vm *vm); +int amdgpu_vm_update_pdes(struct amdgpu_device *adev, + struct amdgpu_vm *vm, bool direct); int amdgpu_vm_clear_freed(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct dma_fence **fence); @@ -404,6 +413,8 @@ void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev); void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid, struct amdgpu_task_info *task_info); +bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid, + uint64_t addr); void amdgpu_vm_set_task_info(struct amdgpu_vm *vm); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c index 5222d165abfc..73fec7a0ced5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c @@ -49,13 +49,6 @@ static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, void *owner, { int r; - /* Wait for PT BOs to be idle. PTs share the same resv. object - * as the root PD BO - */ - r = amdgpu_bo_sync_wait(p->vm->root.base.bo, owner, true); - if (unlikely(r)) - return r; - /* Wait for any BO move to be completed */ if (exclusive) { r = dma_fence_wait(exclusive, true); @@ -63,7 +56,14 @@ static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, void *owner, return r; } - return 0; + /* Don't wait for submissions during page fault */ + if (p->direct) + return 0; + + /* Wait for PT BOs to be idle. PTs share the same resv. object + * as the root PD BO + */ + return amdgpu_bo_sync_wait(p->vm->root.base.bo, owner, true); } /** @@ -89,7 +89,7 @@ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p, pe += (unsigned long)amdgpu_bo_kptr(bo); - trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags); + trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->direct); for (i = 0; i < count; i++) { value = p->pages_addr ? diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c index 61fc584cbb1a..832db59f441e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c @@ -68,17 +68,19 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p, if (r) return r; + p->num_dw_left = ndw; + + /* Wait for moves to be completed */ r = amdgpu_sync_fence(p->adev, &p->job->sync, exclusive, false); if (r) return r; - r = amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.base.resv, - owner, false); - if (r) - return r; + /* Don't wait for any submissions during page fault handling */ + if (p->direct) + return 0; - p->num_dw_left = ndw; - return 0; + return amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.base.resv, + owner, false); } /** @@ -95,22 +97,23 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p, { struct amdgpu_bo *root = p->vm->root.base.bo; struct amdgpu_ib *ib = p->job->ibs; + struct drm_sched_entity *entity; struct amdgpu_ring *ring; struct dma_fence *f; int r; - ring = container_of(p->vm->entity.rq->sched, struct amdgpu_ring, sched); + entity = p->direct ? &p->vm->direct : &p->vm->delayed; + ring = container_of(entity->rq->sched, struct amdgpu_ring, sched); WARN_ON(ib->length_dw == 0); amdgpu_ring_pad_ib(ring, ib); WARN_ON(ib->length_dw > p->num_dw_left); - r = amdgpu_job_submit(p->job, &p->vm->entity, - AMDGPU_FENCE_OWNER_VM, &f); + r = amdgpu_job_submit(p->job, entity, AMDGPU_FENCE_OWNER_VM, &f); if (r) goto error; amdgpu_bo_fence(root, f, true); - if (fence) + if (fence && !p->direct) swap(*fence, f); dma_fence_put(f); return 0; @@ -120,7 +123,6 @@ error: return r; } - /** * amdgpu_vm_sdma_copy_ptes - copy the PTEs from mapping * @@ -141,7 +143,7 @@ static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p, src += p->num_dw_left * 4; pe += amdgpu_bo_gpu_offset(bo); - trace_amdgpu_vm_copy_ptes(pe, src, count); + trace_amdgpu_vm_copy_ptes(pe, src, count, p->direct); amdgpu_vm_copy_pte(p->adev, ib, pe, src, count); } @@ -168,7 +170,7 @@ static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p, struct amdgpu_ib *ib = p->job->ibs; pe += amdgpu_bo_gpu_offset(bo); - trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags); + trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->direct); if (count < 3) { amdgpu_vm_write_pte(p->adev, ib, pe, addr | flags, count, incr); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index 3a9d8c15fe9f..82a3299e53c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -23,6 +23,9 @@ */ #include "amdgpu.h" +#include "amdgpu_vm.h" +#include "amdgpu_atomfirmware.h" +#include "atom.h" struct amdgpu_vram_mgr { struct drm_mm mm; @@ -101,6 +104,39 @@ static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev, amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM])); } +static ssize_t amdgpu_mem_info_vram_vendor(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + + switch (adev->gmc.vram_vendor) { + case SAMSUNG: + return snprintf(buf, PAGE_SIZE, "samsung\n"); + case INFINEON: + return snprintf(buf, PAGE_SIZE, "infineon\n"); + case ELPIDA: + return snprintf(buf, PAGE_SIZE, "elpida\n"); + case ETRON: + return snprintf(buf, PAGE_SIZE, "etron\n"); + case NANYA: + return snprintf(buf, PAGE_SIZE, "nanya\n"); + case HYNIX: + return snprintf(buf, PAGE_SIZE, "hynix\n"); + case MOSEL: + return snprintf(buf, PAGE_SIZE, "mosel\n"); + case WINBOND: + return snprintf(buf, PAGE_SIZE, "winbond\n"); + case ESMT: + return snprintf(buf, PAGE_SIZE, "esmt\n"); + case MICRON: + return snprintf(buf, PAGE_SIZE, "micron\n"); + default: + return snprintf(buf, PAGE_SIZE, "unknown\n"); + } +} + static DEVICE_ATTR(mem_info_vram_total, S_IRUGO, amdgpu_mem_info_vram_total_show, NULL); static DEVICE_ATTR(mem_info_vis_vram_total, S_IRUGO, @@ -109,6 +145,8 @@ static DEVICE_ATTR(mem_info_vram_used, S_IRUGO, amdgpu_mem_info_vram_used_show, NULL); static DEVICE_ATTR(mem_info_vis_vram_used, S_IRUGO, amdgpu_mem_info_vis_vram_used_show, NULL); +static DEVICE_ATTR(mem_info_vram_vendor, S_IRUGO, + amdgpu_mem_info_vram_vendor, NULL); /** * amdgpu_vram_mgr_init - init VRAM manager and DRM MM @@ -154,6 +192,11 @@ static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man, DRM_ERROR("Failed to create device file mem_info_vis_vram_used\n"); return ret; } + ret = device_create_file(adev->dev, &dev_attr_mem_info_vram_vendor); + if (ret) { + DRM_ERROR("Failed to create device file mem_info_vram_vendor\n"); + return ret; + } return 0; } @@ -180,6 +223,7 @@ static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man) device_remove_file(adev->dev, &dev_attr_mem_info_vis_vram_total); device_remove_file(adev->dev, &dev_attr_mem_info_vram_used); device_remove_file(adev->dev, &dev_attr_mem_info_vis_vram_used); + device_remove_file(adev->dev, &dev_attr_mem_info_vram_vendor); return 0; } @@ -275,7 +319,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, struct drm_mm_node *nodes; enum drm_mm_insert_mode mode; unsigned long lpfn, num_nodes, pages_per_node, pages_left; - uint64_t vis_usage = 0, mem_bytes; + uint64_t vis_usage = 0, mem_bytes, max_bytes; unsigned i; int r; @@ -283,9 +327,13 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, if (!lpfn) lpfn = man->size; + max_bytes = adev->gmc.mc_vram_size; + if (tbo->type != ttm_bo_type_kernel) + max_bytes -= AMDGPU_VM_RESERVED_VRAM; + /* bail out quickly if there's likely not enough VRAM for this BO */ mem_bytes = (u64)mem->num_pages << PAGE_SHIFT; - if (atomic64_add_return(mem_bytes, &mgr->usage) > adev->gmc.mc_vram_size) { + if (atomic64_add_return(mem_bytes, &mgr->usage) > max_bytes) { atomic64_sub(mem_bytes, &mgr->usage); mem->mm_node = NULL; return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 65aae75f80fd..61d13d8b7b20 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -25,6 +25,7 @@ #include "amdgpu.h" #include "amdgpu_xgmi.h" #include "amdgpu_smu.h" +#include "amdgpu_ras.h" #include "df/df_3_6_offset.h" static DEFINE_MUTEX(xgmi_mutex); @@ -273,22 +274,55 @@ int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate) { int ret = 0; struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0); + struct amdgpu_device *tmp_adev; + bool update_hive_pstate = true; + bool is_high_pstate = pstate && adev->asic_type == CHIP_VEGA20; if (!hive) return 0; - if (hive->pstate == pstate) - return 0; + mutex_lock(&hive->hive_lock); + + if (hive->pstate == pstate) { + adev->pstate = is_high_pstate ? pstate : adev->pstate; + goto out; + } dev_dbg(adev->dev, "Set xgmi pstate %d.\n", pstate); if (is_support_sw_smu_xgmi(adev)) ret = smu_set_xgmi_pstate(&adev->smu, pstate); - if (ret) + else if (adev->powerplay.pp_funcs && + adev->powerplay.pp_funcs->set_xgmi_pstate) + ret = adev->powerplay.pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle, + pstate); + + if (ret) { dev_err(adev->dev, "XGMI: Set pstate failure on device %llx, hive %llx, ret %d", adev->gmc.xgmi.node_id, adev->gmc.xgmi.hive_id, ret); + goto out; + } + + /* Update device pstate */ + adev->pstate = pstate; + + /* + * Update the hive pstate only all devices of the hive + * are in the same pstate + */ + list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { + if (tmp_adev->pstate != adev->pstate) { + update_hive_pstate = false; + break; + } + } + if (update_hive_pstate || is_high_pstate) + hive->pstate = pstate; + +out: + mutex_unlock(&hive->hive_lock); return ret; } @@ -363,6 +397,9 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) goto exit; } + /* Set default device pstate */ + adev->pstate = -1; + top_info = &adev->psp.xgmi_context.top_info; list_add_tail(&adev->gmc.xgmi.head, &hive->device_list); @@ -437,3 +474,52 @@ void amdgpu_xgmi_remove_device(struct amdgpu_device *adev) mutex_unlock(&hive->hive_lock); } } + +int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev) +{ + int r; + struct ras_ih_if ih_info = { + .cb = NULL, + }; + struct ras_fs_if fs_info = { + .sysfs_name = "xgmi_wafl_err_count", + .debugfs_name = "xgmi_wafl_err_inject", + }; + + if (!adev->gmc.xgmi.supported || + adev->gmc.xgmi.num_physical_nodes == 0) + return 0; + + if (!adev->gmc.xgmi.ras_if) { + adev->gmc.xgmi.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL); + if (!adev->gmc.xgmi.ras_if) + return -ENOMEM; + adev->gmc.xgmi.ras_if->block = AMDGPU_RAS_BLOCK__XGMI_WAFL; + adev->gmc.xgmi.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; + adev->gmc.xgmi.ras_if->sub_block_index = 0; + strcpy(adev->gmc.xgmi.ras_if->name, "xgmi_wafl"); + } + ih_info.head = fs_info.head = *adev->gmc.xgmi.ras_if; + r = amdgpu_ras_late_init(adev, adev->gmc.xgmi.ras_if, + &fs_info, &ih_info); + if (r || !amdgpu_ras_is_supported(adev, adev->gmc.xgmi.ras_if->block)) { + kfree(adev->gmc.xgmi.ras_if); + adev->gmc.xgmi.ras_if = NULL; + } + + return r; +} + +void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev) +{ + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL) && + adev->gmc.xgmi.ras_if) { + struct ras_common_if *ras_if = adev->gmc.xgmi.ras_if; + struct ras_ih_if ih_info = { + .cb = NULL, + }; + + amdgpu_ras_late_fini(adev, ras_if, &ih_info); + kfree(ras_if); + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h index fbcee31788c4..bbf504ff7051 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h @@ -42,6 +42,8 @@ void amdgpu_xgmi_remove_device(struct amdgpu_device *adev); int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate); int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev, struct amdgpu_device *peer_adev); +int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev); +void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev); static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev, struct amdgpu_device *bo_adev) diff --git a/drivers/gpu/drm/amd/amdgpu/arct_reg_init.c b/drivers/gpu/drm/amd/amdgpu/arct_reg_init.c index 4853899b1824..fda99c958c3b 100644 --- a/drivers/gpu/drm/amd/amdgpu/arct_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/arct_reg_init.c @@ -24,7 +24,6 @@ #include "soc15.h" #include "soc15_common.h" -#include "soc15_hw_ip.h" #include "arct_ip_offset.h" int arct_reg_base_init(struct amdgpu_device *adev) @@ -52,6 +51,8 @@ int arct_reg_base_init(struct amdgpu_device *adev) adev->reg_offset[SDMA7_HWIP][i] = (uint32_t *)(&(SDMA7_BASE.instance[i])); adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i])); adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i])); + adev->reg_offset[UMC_HWIP][i] = (uint32_t *)(&(UMC_BASE.instance[i])); + adev->reg_offset[RSMU_HWIP][i] = (uint32_t *)(&(RSMU_BASE.instance[i])); } return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index b81bb414fcb3..2d64d270725d 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -966,6 +966,25 @@ static bool cik_read_bios_from_rom(struct amdgpu_device *adev, static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = { {mmGRBM_STATUS}, + {mmGRBM_STATUS2}, + {mmGRBM_STATUS_SE0}, + {mmGRBM_STATUS_SE1}, + {mmGRBM_STATUS_SE2}, + {mmGRBM_STATUS_SE3}, + {mmSRBM_STATUS}, + {mmSRBM_STATUS2}, + {mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET}, + {mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET}, + {mmCP_STAT}, + {mmCP_STALLED_STAT1}, + {mmCP_STALLED_STAT2}, + {mmCP_STALLED_STAT3}, + {mmCP_CPF_BUSY_STAT}, + {mmCP_CPF_STALLED_STAT1}, + {mmCP_CPF_STATUS}, + {mmCP_CPC_BUSY_STAT}, + {mmCP_CPC_STALLED_STAT1}, + {mmCP_CPC_STATUS}, {mmGB_ADDR_CONFIG}, {mmMC_ARB_RAMCFG}, {mmGB_TILE_MODE0}, @@ -1270,15 +1289,15 @@ static int cik_gpu_pci_config_reset(struct amdgpu_device *adev) } /** - * cik_asic_reset - soft reset GPU + * cik_asic_pci_config_reset - soft reset GPU * * @adev: amdgpu_device pointer * - * Look up which blocks are hung and attempt - * to reset them. + * Use PCI Config method to reset the GPU. + * * Returns 0 for success. */ -static int cik_asic_reset(struct amdgpu_device *adev) +static int cik_asic_pci_config_reset(struct amdgpu_device *adev) { int r; @@ -1294,7 +1313,45 @@ static int cik_asic_reset(struct amdgpu_device *adev) static enum amd_reset_method cik_asic_reset_method(struct amdgpu_device *adev) { - return AMD_RESET_METHOD_LEGACY; + bool baco_reset; + + switch (adev->asic_type) { + case CHIP_BONAIRE: + case CHIP_HAWAII: + /* disable baco reset until it works */ + /* smu7_asic_get_baco_capability(adev, &baco_reset); */ + baco_reset = false; + break; + default: + baco_reset = false; + break; + } + + if (baco_reset) + return AMD_RESET_METHOD_BACO; + else + return AMD_RESET_METHOD_LEGACY; +} + +/** + * cik_asic_reset - soft reset GPU + * + * @adev: amdgpu_device pointer + * + * Look up which blocks are hung and attempt + * to reset them. + * Returns 0 for success. + */ +static int cik_asic_reset(struct amdgpu_device *adev) +{ + int r; + + if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) + r = smu7_asic_baco_reset(adev); + else + r = cik_asic_pci_config_reset(adev); + + return r; } static u32 cik_get_config_memsize(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/cik.h b/drivers/gpu/drm/amd/amdgpu/cik.h index 54c625a2e570..9870bf27870e 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.h +++ b/drivers/gpu/drm/amd/amdgpu/cik.h @@ -31,4 +31,7 @@ void cik_srbm_select(struct amdgpu_device *adev, int cik_set_ip_blocks(struct amdgpu_device *adev); void legacy_doorbell_index_init(struct amdgpu_device *adev); +int smu7_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap); +int smu7_asic_baco_reset(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 645550e7caf5..40d2ac723dd6 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -330,9 +330,11 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev) { struct drm_device *dev = adev->ddev; struct drm_connector *connector; + struct drm_connector_list_iter iter; u32 tmp; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) @@ -368,6 +370,7 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev) amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); } + drm_connector_list_iter_end(&iter); } /** @@ -382,9 +385,11 @@ static void dce_v10_0_hpd_fini(struct amdgpu_device *adev) { struct drm_device *dev = adev->ddev; struct drm_connector *connector; + struct drm_connector_list_iter iter; u32 tmp; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) @@ -397,6 +402,7 @@ static void dce_v10_0_hpd_fini(struct amdgpu_device *adev) amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); } + drm_connector_list_iter_end(&iter); } static u32 dce_v10_0_hpd_get_gpio_reg(struct amdgpu_device *adev) @@ -1219,10 +1225,12 @@ static void dce_v10_0_afmt_audio_select_pin(struct drm_encoder *encoder) static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder, struct drm_display_mode *mode) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector = NULL; u32 tmp; int interlace = 0; @@ -1230,12 +1238,14 @@ static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder, if (!dig || !dig->afmt || !dig->afmt->pin) return; - list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { if (connector->encoder == encoder) { amdgpu_connector = to_amdgpu_connector(connector); break; } } + drm_connector_list_iter_end(&iter); if (!amdgpu_connector) { DRM_ERROR("Couldn't find encoder's connector\n"); @@ -1261,10 +1271,12 @@ static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder, static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector = NULL; u32 tmp; u8 *sadb = NULL; @@ -1273,12 +1285,14 @@ static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder if (!dig || !dig->afmt || !dig->afmt->pin) return; - list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { if (connector->encoder == encoder) { amdgpu_connector = to_amdgpu_connector(connector); break; } } + drm_connector_list_iter_end(&iter); if (!amdgpu_connector) { DRM_ERROR("Couldn't find encoder's connector\n"); @@ -1313,10 +1327,12 @@ static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector = NULL; struct cea_sad *sads; int i, sad_count; @@ -1339,12 +1355,14 @@ static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder) if (!dig || !dig->afmt || !dig->afmt->pin) return; - list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { if (connector->encoder == encoder) { amdgpu_connector = to_amdgpu_connector(connector); break; } } + drm_connector_list_iter_end(&iter); if (!amdgpu_connector) { DRM_ERROR("Couldn't find encoder's connector\n"); @@ -1352,10 +1370,10 @@ static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder) } sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads); - if (sad_count <= 0) { + if (sad_count < 0) DRM_ERROR("Couldn't read SADs: %d\n", sad_count); + if (sad_count <= 0) return; - } BUG_ON(!sads); for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index d9f470632b2c..898ef72d423c 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -348,9 +348,11 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev) { struct drm_device *dev = adev->ddev; struct drm_connector *connector; + struct drm_connector_list_iter iter; u32 tmp; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) @@ -385,6 +387,7 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev) dce_v11_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd); amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); } + drm_connector_list_iter_end(&iter); } /** @@ -399,9 +402,11 @@ static void dce_v11_0_hpd_fini(struct amdgpu_device *adev) { struct drm_device *dev = adev->ddev; struct drm_connector *connector; + struct drm_connector_list_iter iter; u32 tmp; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) @@ -413,6 +418,7 @@ static void dce_v11_0_hpd_fini(struct amdgpu_device *adev) amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); } + drm_connector_list_iter_end(&iter); } static u32 dce_v11_0_hpd_get_gpio_reg(struct amdgpu_device *adev) @@ -1245,10 +1251,12 @@ static void dce_v11_0_afmt_audio_select_pin(struct drm_encoder *encoder) static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder, struct drm_display_mode *mode) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector = NULL; u32 tmp; int interlace = 0; @@ -1256,12 +1264,14 @@ static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder, if (!dig || !dig->afmt || !dig->afmt->pin) return; - list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { if (connector->encoder == encoder) { amdgpu_connector = to_amdgpu_connector(connector); break; } } + drm_connector_list_iter_end(&iter); if (!amdgpu_connector) { DRM_ERROR("Couldn't find encoder's connector\n"); @@ -1287,10 +1297,12 @@ static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder, static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector = NULL; u32 tmp; u8 *sadb = NULL; @@ -1299,12 +1311,14 @@ static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder if (!dig || !dig->afmt || !dig->afmt->pin) return; - list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { if (connector->encoder == encoder) { amdgpu_connector = to_amdgpu_connector(connector); break; } } + drm_connector_list_iter_end(&iter); if (!amdgpu_connector) { DRM_ERROR("Couldn't find encoder's connector\n"); @@ -1339,10 +1353,12 @@ static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector = NULL; struct cea_sad *sads; int i, sad_count; @@ -1365,12 +1381,14 @@ static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder) if (!dig || !dig->afmt || !dig->afmt->pin) return; - list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { if (connector->encoder == encoder) { amdgpu_connector = to_amdgpu_connector(connector); break; } } + drm_connector_list_iter_end(&iter); if (!amdgpu_connector) { DRM_ERROR("Couldn't find encoder's connector\n"); @@ -1378,10 +1396,10 @@ static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder) } sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads); - if (sad_count <= 0) { + if (sad_count < 0) DRM_ERROR("Couldn't read SADs: %d\n", sad_count); + if (sad_count <= 0) return; - } BUG_ON(!sads); for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index 3eb2e7429269..db15a112becc 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -281,9 +281,11 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev) { struct drm_device *dev = adev->ddev; struct drm_connector *connector; + struct drm_connector_list_iter iter; u32 tmp; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) @@ -309,7 +311,7 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev) dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd); amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); } - + drm_connector_list_iter_end(&iter); } /** @@ -324,9 +326,11 @@ static void dce_v6_0_hpd_fini(struct amdgpu_device *adev) { struct drm_device *dev = adev->ddev; struct drm_connector *connector; + struct drm_connector_list_iter iter; u32 tmp; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) @@ -338,6 +342,7 @@ static void dce_v6_0_hpd_fini(struct amdgpu_device *adev) amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); } + drm_connector_list_iter_end(&iter); } static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev) @@ -1124,20 +1129,24 @@ static void dce_v6_0_audio_select_pin(struct drm_encoder *encoder) static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder, struct drm_display_mode *mode) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector = NULL; int interlace = 0; u32 tmp; - list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { if (connector->encoder == encoder) { amdgpu_connector = to_amdgpu_connector(connector); break; } } + drm_connector_list_iter_end(&iter); if (!amdgpu_connector) { DRM_ERROR("Couldn't find encoder's connector\n"); @@ -1164,21 +1173,25 @@ static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder, static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector = NULL; u8 *sadb = NULL; int sad_count; u32 tmp; - list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { if (connector->encoder == encoder) { amdgpu_connector = to_amdgpu_connector(connector); break; } } + drm_connector_list_iter_end(&iter); if (!amdgpu_connector) { DRM_ERROR("Couldn't find encoder's connector\n"); @@ -1221,10 +1234,12 @@ static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder) static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector = NULL; struct cea_sad *sads; int i, sad_count; @@ -1244,12 +1259,14 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder) { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO }, }; - list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { if (connector->encoder == encoder) { amdgpu_connector = to_amdgpu_connector(connector); break; } } + drm_connector_list_iter_end(&iter); if (!amdgpu_connector) { DRM_ERROR("Couldn't find encoder's connector\n"); @@ -1257,10 +1274,10 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder) } sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads); - if (sad_count <= 0) { + if (sad_count < 0) DRM_ERROR("Couldn't read SADs: %d\n", sad_count); + if (sad_count <= 0) return; - } for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { u32 tmp = 0; @@ -1632,6 +1649,7 @@ static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder, struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector = NULL; int em = amdgpu_atombios_encoder_get_encoder_mode(encoder); int bpc = 8; @@ -1639,12 +1657,14 @@ static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder, if (!dig || !dig->afmt) return; - list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { if (connector->encoder == encoder) { amdgpu_connector = to_amdgpu_connector(connector); break; } } + drm_connector_list_iter_end(&iter); if (!amdgpu_connector) { DRM_ERROR("Couldn't find encoder's connector\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index a16c5e9e610e..f06c9022c1fd 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -275,9 +275,11 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev) { struct drm_device *dev = adev->ddev; struct drm_connector *connector; + struct drm_connector_list_iter iter; u32 tmp; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) @@ -303,6 +305,7 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev) dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd); amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); } + drm_connector_list_iter_end(&iter); } /** @@ -317,9 +320,11 @@ static void dce_v8_0_hpd_fini(struct amdgpu_device *adev) { struct drm_device *dev = adev->ddev; struct drm_connector *connector; + struct drm_connector_list_iter iter; u32 tmp; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) @@ -331,6 +336,7 @@ static void dce_v8_0_hpd_fini(struct amdgpu_device *adev) amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); } + drm_connector_list_iter_end(&iter); } static u32 dce_v8_0_hpd_get_gpio_reg(struct amdgpu_device *adev) @@ -1157,10 +1163,12 @@ static void dce_v8_0_afmt_audio_select_pin(struct drm_encoder *encoder) static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder, struct drm_display_mode *mode) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector = NULL; u32 tmp = 0, offset; @@ -1169,12 +1177,14 @@ static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder, offset = dig->afmt->pin->offset; - list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { if (connector->encoder == encoder) { amdgpu_connector = to_amdgpu_connector(connector); break; } } + drm_connector_list_iter_end(&iter); if (!amdgpu_connector) { DRM_ERROR("Couldn't find encoder's connector\n"); @@ -1214,10 +1224,12 @@ static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder, static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector = NULL; u32 offset, tmp; u8 *sadb = NULL; @@ -1228,12 +1240,14 @@ static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder) offset = dig->afmt->pin->offset; - list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { if (connector->encoder == encoder) { amdgpu_connector = to_amdgpu_connector(connector); break; } } + drm_connector_list_iter_end(&iter); if (!amdgpu_connector) { DRM_ERROR("Couldn't find encoder's connector\n"); @@ -1263,11 +1277,13 @@ static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder) static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; u32 offset; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector = NULL; struct cea_sad *sads; int i, sad_count; @@ -1292,12 +1308,14 @@ static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder) offset = dig->afmt->pin->offset; - list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { if (connector->encoder == encoder) { amdgpu_connector = to_amdgpu_connector(connector); break; } } + drm_connector_list_iter_end(&iter); if (!amdgpu_connector) { DRM_ERROR("Couldn't find encoder's connector\n"); @@ -1305,10 +1323,10 @@ static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder) } sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads); - if (sad_count <= 0) { + if (sad_count < 0) DRM_ERROR("Couldn't read SADs: %d\n", sad_count); + if (sad_count <= 0) return; - } BUG_ON(!sads); for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { diff --git a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c index 844c03868248..d6221298b477 100644 --- a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c +++ b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c @@ -33,6 +33,10 @@ static void df_v1_7_sw_init(struct amdgpu_device *adev) { } +static void df_v1_7_sw_fini(struct amdgpu_device *adev) +{ +} + static void df_v1_7_enable_broadcast_mode(struct amdgpu_device *adev, bool enable) { @@ -111,6 +115,7 @@ static void df_v1_7_enable_ecc_force_par_wr_rmw(struct amdgpu_device *adev, const struct amdgpu_df_funcs df_v1_7_funcs = { .sw_init = df_v1_7_sw_init, + .sw_fini = df_v1_7_sw_fini, .enable_broadcast_mode = df_v1_7_enable_broadcast_mode, .get_fb_channel_number = df_v1_7_get_fb_channel_number, .get_hbm_channel_number = df_v1_7_get_hbm_channel_number, diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c index 5850c8e34caa..16fbd2bc8ad1 100644 --- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c +++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c @@ -99,8 +99,8 @@ static uint64_t df_v3_6_get_fica(struct amdgpu_device *adev, unsigned long flags, address, data; uint32_t ficadl_val, ficadh_val; - address = adev->nbio_funcs->get_pcie_index_offset(adev); - data = adev->nbio_funcs->get_pcie_data_offset(adev); + address = adev->nbio.funcs->get_pcie_index_offset(adev); + data = adev->nbio.funcs->get_pcie_data_offset(adev); spin_lock_irqsave(&adev->pcie_idx_lock, flags); WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3); @@ -122,8 +122,8 @@ static void df_v3_6_set_fica(struct amdgpu_device *adev, uint32_t ficaa_val, { unsigned long flags, address, data; - address = adev->nbio_funcs->get_pcie_index_offset(adev); - data = adev->nbio_funcs->get_pcie_data_offset(adev); + address = adev->nbio.funcs->get_pcie_index_offset(adev); + data = adev->nbio.funcs->get_pcie_data_offset(adev); spin_lock_irqsave(&adev->pcie_idx_lock, flags); WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3); @@ -150,8 +150,8 @@ static void df_v3_6_perfmon_rreg(struct amdgpu_device *adev, { unsigned long flags, address, data; - address = adev->nbio_funcs->get_pcie_index_offset(adev); - data = adev->nbio_funcs->get_pcie_data_offset(adev); + address = adev->nbio.funcs->get_pcie_index_offset(adev); + data = adev->nbio.funcs->get_pcie_data_offset(adev); spin_lock_irqsave(&adev->pcie_idx_lock, flags); WREG32(address, lo_addr); @@ -172,8 +172,8 @@ static void df_v3_6_perfmon_wreg(struct amdgpu_device *adev, uint32_t lo_addr, { unsigned long flags, address, data; - address = adev->nbio_funcs->get_pcie_index_offset(adev); - data = adev->nbio_funcs->get_pcie_data_offset(adev); + address = adev->nbio.funcs->get_pcie_index_offset(adev); + data = adev->nbio.funcs->get_pcie_data_offset(adev); spin_lock_irqsave(&adev->pcie_idx_lock, flags); WREG32(address, lo_addr); @@ -220,6 +220,13 @@ static void df_v3_6_sw_init(struct amdgpu_device *adev) adev->df_perfmon_config_assign_mask[i] = 0; } +static void df_v3_6_sw_fini(struct amdgpu_device *adev) +{ + + device_remove_file(adev->dev, &dev_attr_df_cntr_avail); + +} + static void df_v3_6_enable_broadcast_mode(struct amdgpu_device *adev, bool enable) { @@ -537,6 +544,7 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev, const struct amdgpu_df_funcs df_v3_6_funcs = { .sw_init = df_v3_6_sw_init, + .sw_fini = df_v3_6_sw_fini, .enable_broadcast_mode = df_v3_6_enable_broadcast_mode, .get_fb_channel_number = df_v3_6_get_fb_channel_number, .get_hbm_channel_number = df_v3_6_get_hbm_channel_number, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 957811b73672..a93dd3dc0902 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -93,7 +93,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x00400014), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_CPF_CLK_CTRL, 0xfcff8fff, 0xf8000100), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xc0000000, 0xc0000100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xcd000000, 0x0d000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0x60000ff0, 0x60000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0x40000000, 0x40000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100), @@ -127,7 +127,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0x60000010, 0x479c0010), SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CGTT_CLK_CTRL, 0xfeff0fff, 0x40000100), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00800000, 0x00800000) + SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00c00000, 0x00c00000) }; static const struct soc15_reg_golden golden_settings_gc_10_0_nv10[] = @@ -140,7 +140,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_1[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x003c0014), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_GS_NGG_CLK_CTRL, 0xffff8fff, 0xffff8100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_IA_CLK_CTRL, 0xffff0fff, 0xffff0100), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xc0000000, 0xc0000100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xcd000000, 0x0d000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0xf8ff0fff, 0x60000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0x40000ff0, 0x40000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100), @@ -171,7 +171,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_1[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0x60000010, 0x479c0010), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00800000, 0x00800000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00c00000, 0x00c00000), }; static const struct soc15_reg_golden golden_settings_gc_10_1_2[] = @@ -179,7 +179,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0x003e001f, 0x003c0014), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_GS_NGG_CLK_CTRL, 0xffff8fff, 0xffff8100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_IA_CLK_CTRL, 0xffff0fff, 0xffff0100), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xff7f0fff, 0xc0000100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xff7f0fff, 0x0d000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0xffffcfff, 0x60000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0xffff0fff, 0x40000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100), @@ -564,6 +564,32 @@ static void gfx_v10_0_free_microcode(struct amdgpu_device *adev) kfree(adev->gfx.rlc.register_list_format); } +static void gfx_v10_0_check_fw_write_wait(struct amdgpu_device *adev) +{ + adev->gfx.cp_fw_write_wait = false; + + switch (adev->asic_type) { + case CHIP_NAVI10: + case CHIP_NAVI12: + case CHIP_NAVI14: + if ((adev->gfx.me_fw_version >= 0x00000046) && + (adev->gfx.me_feature_version >= 27) && + (adev->gfx.pfp_fw_version >= 0x00000068) && + (adev->gfx.pfp_feature_version >= 27) && + (adev->gfx.mec_fw_version >= 0x0000005b) && + (adev->gfx.mec_feature_version >= 27)) + adev->gfx.cp_fw_write_wait = true; + break; + default: + break; + } + + if (adev->gfx.cp_fw_write_wait == false) + DRM_WARN_ONCE("Warning: check cp_fw_version and update it to realize \ + GRBM requires 1-cycle delay in cp firmware\n"); +} + + static void gfx_v10_0_init_rlc_ext_microcode(struct amdgpu_device *adev) { const struct rlc_firmware_header_v2_1 *rlc_hdr; @@ -832,6 +858,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev) } } + gfx_v10_0_check_fw_write_wait(adev); out: if (err) { dev_err(adev->dev, @@ -1442,7 +1469,7 @@ static int gfx_v10_0_sw_fini(void *handle) amdgpu_ring_fini(&adev->gfx.compute_ring[i]); amdgpu_gfx_mqd_sw_fini(adev); - amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq); + amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring); amdgpu_gfx_kiq_fini(adev); gfx_v10_0_pfp_fini(adev); @@ -2443,7 +2470,7 @@ static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev) } if (amdgpu_emu_mode == 1) - adev->nbio_funcs->hdp_flush(adev, NULL); + adev->nbio.funcs->hdp_flush(adev, NULL); tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_CNTL); tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0); @@ -2513,7 +2540,7 @@ static int gfx_v10_0_cp_gfx_load_ce_microcode(struct amdgpu_device *adev) } if (amdgpu_emu_mode == 1) - adev->nbio_funcs->hdp_flush(adev, NULL); + adev->nbio.funcs->hdp_flush(adev, NULL); tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_CNTL); tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, VMID, 0); @@ -2582,7 +2609,7 @@ static int gfx_v10_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev) } if (amdgpu_emu_mode == 1) - adev->nbio_funcs->hdp_flush(adev, NULL); + adev->nbio.funcs->hdp_flush(adev, NULL); tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_CNTL); tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0); @@ -2903,7 +2930,7 @@ static int gfx_v10_0_cp_compute_load_microcode(struct amdgpu_device *adev) } if (amdgpu_emu_mode == 1) - adev->nbio_funcs->hdp_flush(adev, NULL); + adev->nbio.funcs->hdp_flush(adev, NULL); tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL); tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); @@ -3106,6 +3133,7 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring) memcpy(mqd, adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS], sizeof(*mqd)); /* reset the ring */ ring->wptr = 0; + adev->wb.wb[ring->wptr_offs] = 0; amdgpu_ring_clear_ring(ring); #ifdef BRING_UP_DEBUG mutex_lock(&adev->srbm_mutex); @@ -4357,7 +4385,7 @@ static void gfx_v10_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; u32 ref_and_mask, reg_mem_engine; - const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg; + const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { switch (ring->me) { @@ -4377,8 +4405,8 @@ static void gfx_v10_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) } gfx_v10_0_wait_reg_mem(ring, reg_mem_engine, 0, 1, - adev->nbio_funcs->get_hdp_flush_req_offset(adev), - adev->nbio_funcs->get_hdp_flush_done_offset(adev), + adev->nbio.funcs->get_hdp_flush_req_offset(adev), + adev->nbio.funcs->get_hdp_flush_done_offset(adev), ref_and_mask, ref_and_mask, 0x20); } @@ -4765,6 +4793,24 @@ static void gfx_v10_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, gfx_v10_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20); } +static void gfx_v10_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, + uint32_t reg0, uint32_t reg1, + uint32_t ref, uint32_t mask) +{ + int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); + struct amdgpu_device *adev = ring->adev; + bool fw_version_ok = false; + + fw_version_ok = adev->gfx.cp_fw_write_wait; + + if (fw_version_ok) + gfx_v10_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1, + ref, mask, 0x20); + else + amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1, + ref, mask); +} + static void gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, uint32_t me, uint32_t pipe, @@ -5155,6 +5201,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = { .emit_tmz = gfx_v10_0_ring_emit_tmz, .emit_wreg = gfx_v10_0_ring_emit_wreg, .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait, + .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait, }; static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = { @@ -5188,6 +5235,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = { .pad_ib = amdgpu_ring_generic_pad_ib, .emit_wreg = gfx_v10_0_ring_emit_wreg, .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait, + .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait, }; static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = { @@ -5218,6 +5266,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = { .emit_rreg = gfx_v10_0_ring_emit_rreg, .emit_wreg = gfx_v10_0_ring_emit_wreg, .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait, + .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait, }; static void gfx_v10_0_set_ring_funcs(struct amdgpu_device *adev) @@ -5283,15 +5332,12 @@ static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev) static void gfx_v10_0_set_gds_init(struct amdgpu_device *adev) { - /* init asic gds info */ - switch (adev->asic_type) { - case CHIP_NAVI10: - default: - adev->gds.gds_size = 0x10000; - adev->gds.gds_compute_max_wave_id = 0x4ff; - break; - } + unsigned total_cu = adev->gfx.config.max_cu_per_sh * + adev->gfx.config.max_sh_per_se * + adev->gfx.config.max_shader_engines; + adev->gds.gds_size = 0x10000; + adev->gds.gds_compute_max_wave_id = total_cu * 32 - 1; adev->gds.gws_size = 64; adev->gds.oa_size = 16; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 87dd55e9d72b..ffbde9136372 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -2103,7 +2103,7 @@ static int gfx_v8_0_sw_fini(void *handle) amdgpu_ring_fini(&adev->gfx.compute_ring[i]); amdgpu_gfx_mqd_sw_fini(adev); - amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq); + amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring); amdgpu_gfx_kiq_fini(adev); gfx_v8_0_mec_fini(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index dcadc73bffd2..3ebd5c20dfd3 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -131,6 +131,18 @@ MODULE_FIRMWARE("amdgpu/renoir_rlc.bin"); #define mmTCP_CHAN_STEER_5_ARCT 0x0b0c #define mmTCP_CHAN_STEER_5_ARCT_BASE_IDX 0 +struct ras_gfx_subblock_reg { + const char *name; + uint32_t hwip; + uint32_t inst; + uint32_t seg; + uint32_t reg_offset; + uint32_t sec_count_mask; + uint32_t sec_count_shift; + uint32_t ded_count_mask; + uint32_t ded_count_shift; +}; + enum ta_ras_gfx_subblock { /*CPC*/ TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0, @@ -517,9 +529,9 @@ static const struct soc15_reg_golden golden_settings_gc_9_0[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197), SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000) + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000) }; static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] = @@ -582,9 +594,9 @@ static const struct soc15_reg_golden golden_settings_gc_9_1[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff), SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000) + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000) }; static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] = @@ -676,9 +688,9 @@ static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x76325410), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000) + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000) }; static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] = @@ -691,6 +703,7 @@ static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_3_ARCT, 0x3fffffff, 0x2ebd9fe3), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_4_ARCT, 0x3fffffff, 0xb90f5b1), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_5_ARCT, 0x3ff, 0x135), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_CONFIG, 0xffffffff, 0x011A0000), }; static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] = @@ -973,6 +986,13 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev) adev->gfx.me_fw_write_wait = false; adev->gfx.mec_fw_write_wait = false; + if ((adev->gfx.mec_fw_version < 0x000001a5) || + (adev->gfx.mec_feature_version < 46) || + (adev->gfx.pfp_fw_version < 0x000000b7) || + (adev->gfx.pfp_feature_version < 46)) + DRM_WARN_ONCE("Warning: check cp_fw_version and update it to realize \ + GRBM requires 1-cycle delay in cp firmware\n"); + switch (adev->asic_type) { case CHIP_VEGA10: if ((adev->gfx.me_fw_version >= 0x0000009c) && @@ -1044,6 +1064,12 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev) AMD_PG_SUPPORT_CP | AMD_PG_SUPPORT_RLC_SMU_HS; break; + case CHIP_RENOIR: + if (adev->pm.pp_feature & PP_GFXOFF_MASK) + adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | + AMD_PG_SUPPORT_CP | + AMD_PG_SUPPORT_RLC_SMU_HS; + break; default: break; } @@ -1324,7 +1350,8 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev, /* TODO: Determine if MEC2 JT FW loading can be removed for all GFX V9 asic and above */ - if (adev->asic_type != CHIP_ARCTURUS) { + if (adev->asic_type != CHIP_ARCTURUS && + adev->asic_type != CHIP_RENOIR) { info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT]; info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT; info->fw = adev->gfx.mec2_fw; @@ -1956,190 +1983,6 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) return 0; } -static int gfx_v9_0_ngg_create_buf(struct amdgpu_device *adev, - struct amdgpu_ngg_buf *ngg_buf, - int size_se, - int default_size_se) -{ - int r; - - if (size_se < 0) { - dev_err(adev->dev, "Buffer size is invalid: %d\n", size_se); - return -EINVAL; - } - size_se = size_se ? size_se : default_size_se; - - ngg_buf->size = size_se * adev->gfx.config.max_shader_engines; - r = amdgpu_bo_create_kernel(adev, ngg_buf->size, - PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, - &ngg_buf->bo, - &ngg_buf->gpu_addr, - NULL); - if (r) { - dev_err(adev->dev, "(%d) failed to create NGG buffer\n", r); - return r; - } - ngg_buf->bo_size = amdgpu_bo_size(ngg_buf->bo); - - return r; -} - -static int gfx_v9_0_ngg_fini(struct amdgpu_device *adev) -{ - int i; - - for (i = 0; i < NGG_BUF_MAX; i++) - amdgpu_bo_free_kernel(&adev->gfx.ngg.buf[i].bo, - &adev->gfx.ngg.buf[i].gpu_addr, - NULL); - - memset(&adev->gfx.ngg.buf[0], 0, - sizeof(struct amdgpu_ngg_buf) * NGG_BUF_MAX); - - adev->gfx.ngg.init = false; - - return 0; -} - -static int gfx_v9_0_ngg_init(struct amdgpu_device *adev) -{ - int r; - - if (!amdgpu_ngg || adev->gfx.ngg.init == true) - return 0; - - /* GDS reserve memory: 64 bytes alignment */ - adev->gfx.ngg.gds_reserve_size = ALIGN(5 * 4, 0x40); - adev->gds.gds_size -= adev->gfx.ngg.gds_reserve_size; - adev->gfx.ngg.gds_reserve_addr = RREG32_SOC15(GC, 0, mmGDS_VMID0_BASE); - adev->gfx.ngg.gds_reserve_addr += RREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE); - - /* Primitive Buffer */ - r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PRIM], - amdgpu_prim_buf_per_se, - 64 * 1024); - if (r) { - dev_err(adev->dev, "Failed to create Primitive Buffer\n"); - goto err; - } - - /* Position Buffer */ - r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_POS], - amdgpu_pos_buf_per_se, - 256 * 1024); - if (r) { - dev_err(adev->dev, "Failed to create Position Buffer\n"); - goto err; - } - - /* Control Sideband */ - r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_CNTL], - amdgpu_cntl_sb_buf_per_se, - 256); - if (r) { - dev_err(adev->dev, "Failed to create Control Sideband Buffer\n"); - goto err; - } - - /* Parameter Cache, not created by default */ - if (amdgpu_param_buf_per_se <= 0) - goto out; - - r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PARAM], - amdgpu_param_buf_per_se, - 512 * 1024); - if (r) { - dev_err(adev->dev, "Failed to create Parameter Cache\n"); - goto err; - } - -out: - adev->gfx.ngg.init = true; - return 0; -err: - gfx_v9_0_ngg_fini(adev); - return r; -} - -static int gfx_v9_0_ngg_en(struct amdgpu_device *adev) -{ - struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0]; - int r; - u32 data, base; - - if (!amdgpu_ngg) - return 0; - - /* Program buffer size */ - data = REG_SET_FIELD(0, WD_BUF_RESOURCE_1, INDEX_BUF_SIZE, - adev->gfx.ngg.buf[NGG_PRIM].size >> 8); - data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, POS_BUF_SIZE, - adev->gfx.ngg.buf[NGG_POS].size >> 8); - WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_1, data); - - data = REG_SET_FIELD(0, WD_BUF_RESOURCE_2, CNTL_SB_BUF_SIZE, - adev->gfx.ngg.buf[NGG_CNTL].size >> 8); - data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, PARAM_BUF_SIZE, - adev->gfx.ngg.buf[NGG_PARAM].size >> 10); - WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_2, data); - - /* Program buffer base address */ - base = lower_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr); - data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE, BASE, base); - WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE, data); - - base = upper_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr); - data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE_HI, BASE_HI, base); - WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE_HI, data); - - base = lower_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr); - data = REG_SET_FIELD(0, WD_POS_BUF_BASE, BASE, base); - WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE, data); - - base = upper_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr); - data = REG_SET_FIELD(0, WD_POS_BUF_BASE_HI, BASE_HI, base); - WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE_HI, data); - - base = lower_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr); - data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE, BASE, base); - WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE, data); - - base = upper_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr); - data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE_HI, BASE_HI, base); - WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE_HI, data); - - /* Clear GDS reserved memory */ - r = amdgpu_ring_alloc(ring, 17); - if (r) { - DRM_ERROR("amdgpu: NGG failed to lock ring %s (%d).\n", - ring->name, r); - return r; - } - - gfx_v9_0_write_data_to_reg(ring, 0, false, - SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE), - (adev->gds.gds_size + - adev->gfx.ngg.gds_reserve_size)); - - amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5)); - amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC | - PACKET3_DMA_DATA_DST_SEL(1) | - PACKET3_DMA_DATA_SRC_SEL(2))); - amdgpu_ring_write(ring, 0); - amdgpu_ring_write(ring, 0); - amdgpu_ring_write(ring, adev->gfx.ngg.gds_reserve_addr); - amdgpu_ring_write(ring, 0); - amdgpu_ring_write(ring, PACKET3_DMA_DATA_CMD_RAW_WAIT | - adev->gfx.ngg.gds_reserve_size); - - gfx_v9_0_write_data_to_reg(ring, 0, false, - SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE), 0); - - amdgpu_ring_commit(ring); - - return 0; -} - static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, int mec, int pipe, int queue) { @@ -2307,10 +2150,6 @@ static int gfx_v9_0_sw_init(void *handle) if (r) return r; - r = gfx_v9_0_ngg_init(adev); - if (r) - return r; - return 0; } @@ -2320,19 +2159,7 @@ static int gfx_v9_0_sw_fini(void *handle) int i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX) && - adev->gfx.ras_if) { - struct ras_common_if *ras_if = adev->gfx.ras_if; - struct ras_ih_if ih_info = { - .head = *ras_if, - }; - - amdgpu_ras_debugfs_remove(adev, ras_if); - amdgpu_ras_sysfs_remove(adev, ras_if); - amdgpu_ras_interrupt_remove_handler(adev, &ih_info); - amdgpu_ras_feature_enable(adev, ras_if, 0); - kfree(ras_if); - } + amdgpu_gfx_ras_fini(adev); for (i = 0; i < adev->gfx.num_gfx_rings; i++) amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); @@ -2340,11 +2167,10 @@ static int gfx_v9_0_sw_fini(void *handle) amdgpu_ring_fini(&adev->gfx.compute_ring[i]); amdgpu_gfx_mqd_sw_fini(adev); - amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq); + amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring); amdgpu_gfx_kiq_fini(adev); gfx_v9_0_mec_fini(adev); - gfx_v9_0_ngg_fini(adev); amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj); if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_RENOIR) { amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, @@ -2912,7 +2738,10 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev) * And it's needed by gfxoff feature. */ if (adev->gfx.rlc.is_rlc_v2_1) { - gfx_v9_1_init_rlc_save_restore_list(adev); + if (adev->asic_type == CHIP_VEGA12 || + (adev->asic_type == CHIP_RAVEN && + adev->rev_id >= 8)) + gfx_v9_1_init_rlc_save_restore_list(adev); gfx_v9_0_enable_save_restore_machine(adev); } @@ -3883,12 +3712,6 @@ static int gfx_v9_0_hw_init(void *handle) if (r) return r; - if (adev->asic_type != CHIP_ARCTURUS) { - r = gfx_v9_0_ngg_en(adev); - if (r) - return r; - } - return r; } @@ -3930,8 +3753,10 @@ static int gfx_v9_0_hw_fini(void *handle) amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); - /* disable KCQ to avoid CPC touch memory not valid anymore */ - gfx_v9_0_kcq_disable(adev); + /* DF freeze and kcq disable will fail */ + if (!amdgpu_ras_intr_triggered()) + /* disable KCQ to avoid CPC touch memory not valid anymore */ + gfx_v9_0_kcq_disable(adev); if (amdgpu_sriov_vf(adev)) { gfx_v9_0_cp_gfx_enable(adev, false); @@ -4067,9 +3892,22 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev) uint64_t clock; mutex_lock(&adev->gfx.gpu_clock_mutex); - WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1); - clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) | - ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); + if (adev->asic_type == CHIP_VEGA10 && amdgpu_sriov_runtime(adev)) { + uint32_t tmp, lsb, msb, i = 0; + do { + if (i != 0) + udelay(1); + tmp = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_MSB); + lsb = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_LSB); + msb = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_MSB); + i++; + } while (unlikely(tmp != msb) && (i < adev->usec_timeout)); + clock = (uint64_t)lsb | ((uint64_t)msb << 32ULL); + } else { + WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1); + clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) | + ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); + } mutex_unlock(&adev->gfx.gpu_clock_mutex); return clock; } @@ -4184,6 +4022,7 @@ static const struct soc15_reg_entry sec_ded_counter_registers[] = { { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 1, 16}, { SOC15_REG_ENTRY(GC, 0, mmTCP_ATC_EDC_GATCL1_CNT), 0, 4, 16}, { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT), 0, 4, 16}, + { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 0, 4, 16}, { SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 0, 4, 16}, { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 0, 4, 6}, { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 0, 4, 16}, @@ -4203,6 +4042,10 @@ static int gfx_v9_0_do_edc_gds_workarounds(struct amdgpu_device *adev) struct amdgpu_ring *ring = &adev->gfx.compute_ring[0]; int i, r; + /* only support when RAS is enabled */ + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) + return 0; + r = amdgpu_ring_alloc(ring, 7); if (r) { DRM_ERROR("amdgpu: GDS workarounds failed to lock ring %s (%d).\n", @@ -4393,33 +4236,14 @@ static int gfx_v9_0_early_init(void *handle) return 0; } -static int gfx_v9_0_process_ras_data_cb(struct amdgpu_device *adev, - struct ras_err_data *err_data, - struct amdgpu_iv_entry *entry); - static int gfx_v9_0_ecc_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct ras_common_if **ras_if = &adev->gfx.ras_if; - struct ras_ih_if ih_info = { - .cb = gfx_v9_0_process_ras_data_cb, - }; - struct ras_fs_if fs_info = { - .sysfs_name = "gfx_err_count", - .debugfs_name = "gfx_err_inject", - }; - struct ras_common_if ras_block = { - .block = AMDGPU_RAS_BLOCK__GFX, - .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, - .sub_block_index = 0, - .name = "gfx", - }; int r; - if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) { - amdgpu_ras_feature_enable_on_boot(adev, &ras_block, 0); - return 0; - } + r = amdgpu_gfx_ras_late_init(adev); + if (r) + return r; r = gfx_v9_0_do_edc_gds_workarounds(adev); if (r) @@ -4430,72 +4254,7 @@ static int gfx_v9_0_ecc_late_init(void *handle) if (r) return r; - /* handle resume path. */ - if (*ras_if) { - /* resend ras TA enable cmd during resume. - * prepare to handle failure. - */ - ih_info.head = **ras_if; - r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1); - if (r) { - if (r == -EAGAIN) { - /* request a gpu reset. will run again. */ - amdgpu_ras_request_reset_on_boot(adev, - AMDGPU_RAS_BLOCK__GFX); - return 0; - } - /* fail to enable ras, cleanup all. */ - goto irq; - } - /* enable successfully. continue. */ - goto resume; - } - - *ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL); - if (!*ras_if) - return -ENOMEM; - - **ras_if = ras_block; - - r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1); - if (r) { - if (r == -EAGAIN) { - amdgpu_ras_request_reset_on_boot(adev, - AMDGPU_RAS_BLOCK__GFX); - r = 0; - } - goto feature; - } - - ih_info.head = **ras_if; - fs_info.head = **ras_if; - - r = amdgpu_ras_interrupt_add_handler(adev, &ih_info); - if (r) - goto interrupt; - - amdgpu_ras_debugfs_create(adev, &fs_info); - - r = amdgpu_ras_sysfs_create(adev, &fs_info); - if (r) - goto sysfs; -resume: - r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0); - if (r) - goto irq; - return 0; -irq: - amdgpu_ras_sysfs_remove(adev, *ras_if); -sysfs: - amdgpu_ras_debugfs_remove(adev, *ras_if); - amdgpu_ras_interrupt_remove_handler(adev, &ih_info); -interrupt: - amdgpu_ras_feature_enable(adev, *ras_if, 0); -feature: - kfree(*ras_if); - *ras_if = NULL; - return r; } static int gfx_v9_0_late_init(void *handle) @@ -4560,16 +4319,14 @@ static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev, { amdgpu_gfx_rlc_enter_safe_mode(adev); - if (is_support_sw_smu(adev) && !enable) - smu_set_gfx_cgpg(&adev->smu, enable); - if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) { gfx_v9_0_enable_gfx_cg_power_gating(adev, true); if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE) gfx_v9_0_enable_gfx_pipeline_powergating(adev, true); } else { gfx_v9_0_enable_gfx_cg_power_gating(adev, false); - gfx_v9_0_enable_gfx_pipeline_powergating(adev, false); + if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE) + gfx_v9_0_enable_gfx_pipeline_powergating(adev, false); } amdgpu_gfx_rlc_exit_safe_mode(adev); @@ -4838,8 +4595,6 @@ static int gfx_v9_0_set_powergating_state(void *handle, gfx_v9_0_enable_cp_power_gating(adev, false); /* update gfx cgpg state */ - if (is_support_sw_smu(adev) && enable) - smu_set_gfx_cgpg(&adev->smu, enable); gfx_v9_0_update_gfx_cg_power_gating(adev, enable); /* update mgcg state */ @@ -4970,7 +4725,7 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; u32 ref_and_mask, reg_mem_engine; - const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg; + const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { switch (ring->me) { @@ -4990,8 +4745,8 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) } gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1, - adev->nbio_funcs->get_hdp_flush_req_offset(adev), - adev->nbio_funcs->get_hdp_flush_done_offset(adev), + adev->nbio.funcs->get_hdp_flush_req_offset(adev), + adev->nbio.funcs->get_hdp_flush_done_offset(adev), ref_and_mask, ref_and_mask, 0x20); } @@ -5723,313 +5478,446 @@ static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev, return 0; } -static int gfx_v9_0_process_ras_data_cb(struct amdgpu_device *adev, - struct ras_err_data *err_data, - struct amdgpu_iv_entry *entry) -{ - /* TODO ue will trigger an interrupt. */ - kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); - if (adev->gfx.funcs->query_ras_error_count) - adev->gfx.funcs->query_ras_error_count(adev, err_data); - amdgpu_ras_reset_gpu(adev, 0); - return AMDGPU_RAS_SUCCESS; -} -static const struct { - const char *name; - uint32_t ip; - uint32_t inst; - uint32_t seg; - uint32_t reg_offset; - uint32_t per_se_instance; - int32_t num_instance; - uint32_t sec_count_mask; - uint32_t ded_count_mask; -} gfx_ras_edc_regs[] = { - { "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1, - REG_FIELD_MASK(CPC_EDC_SCRATCH_CNT, SEC_COUNT), - REG_FIELD_MASK(CPC_EDC_SCRATCH_CNT, DED_COUNT) }, - { "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1, - REG_FIELD_MASK(CPC_EDC_UCODE_CNT, SEC_COUNT), - REG_FIELD_MASK(CPC_EDC_UCODE_CNT, DED_COUNT) }, - { "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1, - REG_FIELD_MASK(CPF_EDC_ROQ_CNT, COUNT_ME1), 0 }, - { "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1, - REG_FIELD_MASK(CPF_EDC_ROQ_CNT, COUNT_ME2), 0 }, - { "CPF_TAG", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT), 0, 1, - REG_FIELD_MASK(CPF_EDC_TAG_CNT, SEC_COUNT), - REG_FIELD_MASK(CPF_EDC_TAG_CNT, DED_COUNT) }, - { "CPG_DMA_ROQ", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT), 0, 1, - REG_FIELD_MASK(CPG_EDC_DMA_CNT, ROQ_COUNT), 0 }, - { "CPG_DMA_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT), 0, 1, - REG_FIELD_MASK(CPG_EDC_DMA_CNT, TAG_SEC_COUNT), - REG_FIELD_MASK(CPG_EDC_DMA_CNT, TAG_DED_COUNT) }, - { "CPG_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT), 0, 1, - REG_FIELD_MASK(CPG_EDC_TAG_CNT, SEC_COUNT), - REG_FIELD_MASK(CPG_EDC_TAG_CNT, DED_COUNT) }, - { "DC_CSINVOC", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), 0, 1, - REG_FIELD_MASK(DC_EDC_CSINVOC_CNT, COUNT_ME1), 0 }, - { "DC_RESTORE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), 0, 1, - REG_FIELD_MASK(DC_EDC_RESTORE_CNT, COUNT_ME1), 0 }, - { "DC_STATE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT), 0, 1, - REG_FIELD_MASK(DC_EDC_STATE_CNT, COUNT_ME1), 0 }, - { "GDS_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1, - REG_FIELD_MASK(GDS_EDC_CNT, GDS_MEM_SEC), - REG_FIELD_MASK(GDS_EDC_CNT, GDS_MEM_DED) }, - { "GDS_INPUT_QUEUE", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1, - REG_FIELD_MASK(GDS_EDC_CNT, GDS_INPUT_QUEUE_SED), 0 }, +static const struct ras_gfx_subblock_reg ras_subblock_regs[] = { + { "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), + SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT), + SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT) + }, + { "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), + SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, SEC_COUNT), + SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, DED_COUNT) + }, + { "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), + SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME1), + 0, 0 + }, + { "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), + SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME2), + 0, 0 + }, + { "CPF_TAG", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT), + SOC15_REG_FIELD(CPF_EDC_TAG_CNT, SEC_COUNT), + SOC15_REG_FIELD(CPF_EDC_TAG_CNT, DED_COUNT) + }, + { "CPG_DMA_ROQ", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT), + SOC15_REG_FIELD(CPG_EDC_DMA_CNT, ROQ_COUNT), + 0, 0 + }, + { "CPG_DMA_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT), + SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_SEC_COUNT), + SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_DED_COUNT) + }, + { "CPG_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT), + SOC15_REG_FIELD(CPG_EDC_TAG_CNT, SEC_COUNT), + SOC15_REG_FIELD(CPG_EDC_TAG_CNT, DED_COUNT) + }, + { "DC_CSINVOC", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), + SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, COUNT_ME1), + 0, 0 + }, + { "DC_RESTORE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), + SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, COUNT_ME1), + 0, 0 + }, + { "DC_STATE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT), + SOC15_REG_FIELD(DC_EDC_STATE_CNT, COUNT_ME1), + 0, 0 + }, + { "GDS_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), + SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_DED) + }, + { "GDS_INPUT_QUEUE", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), + SOC15_REG_FIELD(GDS_EDC_CNT, GDS_INPUT_QUEUE_SED), + 0, 0 + }, { "GDS_ME0_CS_PIPE_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), - 0, 1, REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC), - REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED) }, + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED) + }, { "GDS_OA_PHY_PHY_CMD_RAM_MEM", - SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1, - REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC), - REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED) }, + SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED) + }, { "GDS_OA_PHY_PHY_DATA_RAM_MEM", - SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1, - REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SED), 0 }, + SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SED), + 0, 0 + }, { "GDS_OA_PIPE_ME1_PIPE0_PIPE_MEM", - SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1, - REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC), - REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED) }, + SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED) + }, { "GDS_OA_PIPE_ME1_PIPE1_PIPE_MEM", - SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1, - REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC), - REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED) }, + SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED) + }, { "GDS_OA_PIPE_ME1_PIPE2_PIPE_MEM", - SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1, - REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC), - REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED) }, + SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED) + }, { "GDS_OA_PIPE_ME1_PIPE3_PIPE_MEM", - SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1, - REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC), - REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED) }, - { "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 1, 1, - REG_FIELD_MASK(SPI_EDC_CNT, SPI_SR_MEM_SED_COUNT), 0 }, - { "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16, - REG_FIELD_MASK(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT), - REG_FIELD_MASK(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT) }, - { "TA_FS_AFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16, - REG_FIELD_MASK(TA_EDC_CNT, TA_FS_AFIFO_SED_COUNT), 0 }, - { "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16, - REG_FIELD_MASK(TA_EDC_CNT, TA_FL_LFIFO_SED_COUNT), 0 }, - { "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16, - REG_FIELD_MASK(TA_EDC_CNT, TA_FX_LFIFO_SED_COUNT), 0 }, - { "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16, - REG_FIELD_MASK(TA_EDC_CNT, TA_FS_CFIFO_SED_COUNT), 0 }, - { "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 2, - REG_FIELD_MASK(TCA_EDC_CNT, HOLE_FIFO_SED_COUNT), 0 }, - { "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 2, - REG_FIELD_MASK(TCA_EDC_CNT, REQ_FIFO_SED_COUNT), 0 }, - { "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16, - REG_FIELD_MASK(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT), - REG_FIELD_MASK(TCC_EDC_CNT, CACHE_DATA_DED_COUNT) }, - { "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16, - REG_FIELD_MASK(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT), - REG_FIELD_MASK(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT) }, - { "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16, - REG_FIELD_MASK(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT), - REG_FIELD_MASK(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT) }, - { "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16, - REG_FIELD_MASK(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT), - REG_FIELD_MASK(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT) }, - { "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16, - REG_FIELD_MASK(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT), - REG_FIELD_MASK(TCC_EDC_CNT, SRC_FIFO_DED_COUNT) }, - { "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16, - REG_FIELD_MASK(TCC_EDC_CNT, IN_USE_DEC_SED_COUNT), 0 }, - { "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16, - REG_FIELD_MASK(TCC_EDC_CNT, IN_USE_TRANSFER_SED_COUNT), 0 }, - { "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16, - REG_FIELD_MASK(TCC_EDC_CNT, LATENCY_FIFO_SED_COUNT), 0 }, - { "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16, - REG_FIELD_MASK(TCC_EDC_CNT, RETURN_DATA_SED_COUNT), 0 }, - { "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16, - REG_FIELD_MASK(TCC_EDC_CNT, RETURN_CONTROL_SED_COUNT), 0 }, - { "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16, - REG_FIELD_MASK(TCC_EDC_CNT, UC_ATOMIC_FIFO_SED_COUNT), 0 }, - { "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 16, - REG_FIELD_MASK(TCC_EDC_CNT2, WRITE_RETURN_SED_COUNT), 0 }, - { "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 16, - REG_FIELD_MASK(TCC_EDC_CNT2, WRITE_CACHE_READ_SED_COUNT), 0 }, - { "TCC_SRC_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, - 16, REG_FIELD_MASK(TCC_EDC_CNT2, SRC_FIFO_NEXT_RAM_SED_COUNT), 0 }, + SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED) + }, + { "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_SED_COUNT), + 0, 0 + }, + { "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT) + }, + { "TA_FS_AFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_SED_COUNT), + 0, 0 + }, + { "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_SED_COUNT), + 0, 0 + }, + { "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_SED_COUNT), + 0, 0 + }, + { "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_SED_COUNT), + 0, 0 + }, + { "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), + SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_SED_COUNT), + 0, 0 + }, + { "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), + SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_SED_COUNT), + 0, 0 + }, + { "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_DED_COUNT) + }, + { "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT) + }, + { "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT) + }, + { "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT) + }, + { "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_DED_COUNT) + }, + { "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_DEC_SED_COUNT), + 0, 0 + }, + { "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_TRANSFER_SED_COUNT), + 0, 0 + }, + { "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_SED_COUNT), + 0, 0 + }, + { "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_DATA_SED_COUNT), + 0, 0 + }, + { "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_CONTROL_SED_COUNT), + 0, 0 + }, + { "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, UC_ATOMIC_FIFO_SED_COUNT), + 0, 0 + }, + { "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_SED_COUNT), + 0, 0 + }, + { "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_SED_COUNT), + 0, 0 + }, + { "TCC_SRC_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, SRC_FIFO_NEXT_RAM_SED_COUNT), + 0, 0 + }, { "TCC_LATENCY_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), - 0, 16, REG_FIELD_MASK(TCC_EDC_CNT2, LATENCY_FIFO_NEXT_RAM_SED_COUNT), - 0 }, - { "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, - 16, REG_FIELD_MASK(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SED_COUNT), 0 }, + SOC15_REG_FIELD(TCC_EDC_CNT2, LATENCY_FIFO_NEXT_RAM_SED_COUNT), + 0, 0 + }, + { "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SED_COUNT), + 0, 0 + }, { "TCC_WRRET_TAG_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), - 0, 16, REG_FIELD_MASK(TCC_EDC_CNT2, WRRET_TAG_WRITE_RETURN_SED_COUNT), - 0 }, - { "TCC_ATOMIC_RETURN_BUFFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, - 16, REG_FIELD_MASK(TCC_EDC_CNT2, ATOMIC_RETURN_BUFFER_SED_COUNT), 0 }, - { "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT), 0, 72, - REG_FIELD_MASK(TCI_EDC_CNT, WRITE_RAM_SED_COUNT), 0 }, - { "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16, - REG_FIELD_MASK(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT), - REG_FIELD_MASK(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT) }, - { "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16, - REG_FIELD_MASK(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT), - REG_FIELD_MASK(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT) }, - { "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16, - REG_FIELD_MASK(TCP_EDC_CNT_NEW, CMD_FIFO_SED_COUNT), 0 }, - { "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16, - REG_FIELD_MASK(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT), 0 }, - { "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16, - REG_FIELD_MASK(TCP_EDC_CNT_NEW, DB_RAM_SED_COUNT), 0 }, - { "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16, - REG_FIELD_MASK(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT), - REG_FIELD_MASK(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT) }, - { "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16, - REG_FIELD_MASK(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT), - REG_FIELD_MASK(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT) }, - { "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 1, 16, - REG_FIELD_MASK(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT), - REG_FIELD_MASK(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT) }, - { "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 1, 16, - REG_FIELD_MASK(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT), - REG_FIELD_MASK(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT) }, - { "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 1, 16, - REG_FIELD_MASK(TD_EDC_CNT, CS_FIFO_SED_COUNT), 0 }, - { "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16, - REG_FIELD_MASK(SQ_EDC_CNT, LDS_D_SEC_COUNT), - REG_FIELD_MASK(SQ_EDC_CNT, LDS_D_DED_COUNT) }, - { "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16, - REG_FIELD_MASK(SQ_EDC_CNT, LDS_I_SEC_COUNT), - REG_FIELD_MASK(SQ_EDC_CNT, LDS_I_DED_COUNT) }, - { "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16, - REG_FIELD_MASK(SQ_EDC_CNT, SGPR_SEC_COUNT), - REG_FIELD_MASK(SQ_EDC_CNT, SGPR_DED_COUNT) }, - { "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16, - REG_FIELD_MASK(SQ_EDC_CNT, VGPR0_SEC_COUNT), - REG_FIELD_MASK(SQ_EDC_CNT, VGPR0_DED_COUNT) }, - { "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16, - REG_FIELD_MASK(SQ_EDC_CNT, VGPR1_SEC_COUNT), - REG_FIELD_MASK(SQ_EDC_CNT, VGPR1_DED_COUNT) }, - { "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16, - REG_FIELD_MASK(SQ_EDC_CNT, VGPR2_SEC_COUNT), - REG_FIELD_MASK(SQ_EDC_CNT, VGPR2_DED_COUNT) }, - { "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16, - REG_FIELD_MASK(SQ_EDC_CNT, VGPR3_SEC_COUNT), - REG_FIELD_MASK(SQ_EDC_CNT, VGPR3_DED_COUNT) }, + SOC15_REG_FIELD(TCC_EDC_CNT2, WRRET_TAG_WRITE_RETURN_SED_COUNT), + 0, 0 + }, + { "TCC_ATOMIC_RETURN_BUFFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, ATOMIC_RETURN_BUFFER_SED_COUNT), + 0, 0 + }, + { "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT), + SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_SED_COUNT), + 0, 0 + }, + { "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT) + }, + { "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT) + }, + { "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_SED_COUNT), + 0, 0 + }, + { "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT), + 0, 0 + }, + { "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, DB_RAM_SED_COUNT), + 0, 0 + }, + { "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT) + }, + { "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT) + }, + { "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), + SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT), + SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT) + }, + { "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), + SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT), + SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT) + }, + { "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), + SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_SED_COUNT), + 0, 0 + }, + { "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_DED_COUNT) + }, + { "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_DED_COUNT) + }, + { "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_DED_COUNT) + }, + { "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_DED_COUNT) + }, + { "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_DED_COUNT) + }, + { "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_DED_COUNT) + }, + { "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_DED_COUNT) + }, { "SQC_DATA_CU0_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), - 1, 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT), - REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT) }, - { "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT), - REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT) }, + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT) + }, + { "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT) + }, { "SQC_DATA_CU1_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), - 1, 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT), - REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT) }, - { "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT), - REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT) }, + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT) + }, + { "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT) + }, { "SQC_DATA_CU2_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), - 1, 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT), - REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT) }, - { "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT), - REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT) }, - { "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT), - REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT) }, - { "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT), - REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT) }, - { "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT), - REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT) }, - { "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT), - REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT) }, - { "SQC_INST_BANKA_UTCL1_MISS_FIFO", - SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, 6, - REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_UTCL1_MISS_FIFO_SED_COUNT), - 0 }, - { "SQC_INST_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_MISS_FIFO_SED_COUNT), 0 }, - { "SQC_DATA_BANKA_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_HIT_FIFO_SED_COUNT), 0 }, - { "SQC_DATA_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_MISS_FIFO_SED_COUNT), 0 }, - { "SQC_DATA_BANKA_DIRTY_BIT_RAM", - SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, 6, - REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_DIRTY_BIT_RAM_SED_COUNT), 0 }, - { "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, 6, - REG_FIELD_MASK(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT), - REG_FIELD_MASK(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT) }, - { "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT), - REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT) }, - { "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT), - REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT) }, - { "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT), - REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT) }, - { "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT), - REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT) }, - { "SQC_INST_BANKB_UTCL1_MISS_FIFO", - SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1, 6, - REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_UTCL1_MISS_FIFO_SED_COUNT), - 0 }, - { "SQC_INST_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_MISS_FIFO_SED_COUNT), 0 }, - { "SQC_DATA_BANKB_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_HIT_FIFO_SED_COUNT), 0 }, - { "SQC_DATA_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_MISS_FIFO_SED_COUNT), 0 }, - { "SQC_DATA_BANKB_DIRTY_BIT_RAM", - SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1, 6, - REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_DIRTY_BIT_RAM_SED_COUNT), 0 }, - { "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), - REG_FIELD_MASK(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT) }, - { "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), - REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT) }, - { "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), - REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT) }, - { "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT), - REG_FIELD_MASK(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT) }, - { "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT), - REG_FIELD_MASK(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT) }, - { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), 0 }, - { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), 0 }, - { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT), 0 }, - { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT), 0 }, - { "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT, IOWR_DATAMEM_SED_COUNT), 0 }, - { "GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), - REG_FIELD_MASK(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT) }, - { "GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), - REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT) }, - { "GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), - REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT) }, - { "GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), 0 }, - { "GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), 0 }, - { "MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT), 0 }, - { "MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT), 0 }, - { "MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT), 0 }, - { "MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT), 0 }, + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT) + }, + { "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT) + }, + { "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT) + }, + { "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT) + }, + { "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT) + }, + { "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT) + }, + { "SQC_INST_BANKA_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_UTCL1_MISS_FIFO_SED_COUNT), + 0, 0 + }, + { "SQC_INST_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_MISS_FIFO_SED_COUNT), + 0, 0 + }, + { "SQC_DATA_BANKA_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_HIT_FIFO_SED_COUNT), + 0, 0 + }, + { "SQC_DATA_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_MISS_FIFO_SED_COUNT), + 0, 0 + }, + { "SQC_DATA_BANKA_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_DIRTY_BIT_RAM_SED_COUNT), + 0, 0 + }, + { "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT) + }, + { "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), + SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT) + }, + { "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), + SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT) + }, + { "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), + SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT) + }, + { "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), + SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT) + }, + { "SQC_INST_BANKB_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), + SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_UTCL1_MISS_FIFO_SED_COUNT), + 0, 0 + }, + { "SQC_INST_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), + SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_MISS_FIFO_SED_COUNT), + 0, 0 + }, + { "SQC_DATA_BANKB_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), + SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_HIT_FIFO_SED_COUNT), + 0, 0 + }, + { "SQC_DATA_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), + SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_MISS_FIFO_SED_COUNT), + 0, 0 + }, + { "SQC_DATA_BANKB_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), + SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_DIRTY_BIT_RAM_SED_COUNT), + 0, 0 + }, + { "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT) + }, + { "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT) + }, + { "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT) + }, + { "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT) + }, + { "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT) + }, + { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), + 0, 0 + }, + { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), + 0, 0 + }, + { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT), + 0, 0 + }, + { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT), + 0, 0 + }, + { "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_DATAMEM_SED_COUNT), + 0, 0 + }, + { "GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT) + }, + { "GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT) + }, + { "GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT) + }, + { "GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), + 0, 0 + }, + { "GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), + 0, 0 + }, + { "MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT), + 0, 0 + }, + { "MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT), + 0, 0 + }, + { "MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT), + 0, 0 + }, + { "MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT), + 0, 0 + } }; static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev, @@ -6078,14 +5966,217 @@ static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev, return ret; } +static const char *vml2_mems[] = { + "UTC_VML2_BANK_CACHE_0_BIGK_MEM0", + "UTC_VML2_BANK_CACHE_0_BIGK_MEM1", + "UTC_VML2_BANK_CACHE_0_4K_MEM0", + "UTC_VML2_BANK_CACHE_0_4K_MEM1", + "UTC_VML2_BANK_CACHE_1_BIGK_MEM0", + "UTC_VML2_BANK_CACHE_1_BIGK_MEM1", + "UTC_VML2_BANK_CACHE_1_4K_MEM0", + "UTC_VML2_BANK_CACHE_1_4K_MEM1", + "UTC_VML2_BANK_CACHE_2_BIGK_MEM0", + "UTC_VML2_BANK_CACHE_2_BIGK_MEM1", + "UTC_VML2_BANK_CACHE_2_4K_MEM0", + "UTC_VML2_BANK_CACHE_2_4K_MEM1", + "UTC_VML2_BANK_CACHE_3_BIGK_MEM0", + "UTC_VML2_BANK_CACHE_3_BIGK_MEM1", + "UTC_VML2_BANK_CACHE_3_4K_MEM0", + "UTC_VML2_BANK_CACHE_3_4K_MEM1", +}; + +static const char *vml2_walker_mems[] = { + "UTC_VML2_CACHE_PDE0_MEM0", + "UTC_VML2_CACHE_PDE0_MEM1", + "UTC_VML2_CACHE_PDE1_MEM0", + "UTC_VML2_CACHE_PDE1_MEM1", + "UTC_VML2_CACHE_PDE2_MEM0", + "UTC_VML2_CACHE_PDE2_MEM1", + "UTC_VML2_RDIF_LOG_FIFO", +}; + +static const char *atc_l2_cache_2m_mems[] = { + "UTC_ATCL2_CACHE_2M_BANK0_WAY0_MEM", + "UTC_ATCL2_CACHE_2M_BANK0_WAY1_MEM", + "UTC_ATCL2_CACHE_2M_BANK1_WAY0_MEM", + "UTC_ATCL2_CACHE_2M_BANK1_WAY1_MEM", +}; + +static const char *atc_l2_cache_4k_mems[] = { + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM0", + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM1", + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM2", + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM3", + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM4", + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM5", + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM6", + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM7", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM0", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM1", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM2", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM3", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM4", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM5", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM6", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM7", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM0", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM1", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM2", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM3", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM4", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM5", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM6", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM7", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM0", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM1", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM2", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM3", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM4", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM5", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM6", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM7", +}; + +static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev, + struct ras_err_data *err_data) +{ + uint32_t i, data; + uint32_t sec_count, ded_count; + + WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0); + WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0); + + for (i = 0; i < 16; i++) { + WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i); + data = RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT); + + sec_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, SEC_COUNT); + if (sec_count) { + DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i, + vml2_mems[i], sec_count); + err_data->ce_count += sec_count; + } + + ded_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, DED_COUNT); + if (ded_count) { + DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i, + vml2_mems[i], ded_count); + err_data->ue_count += ded_count; + } + } + + for (i = 0; i < 7; i++) { + WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i); + data = RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT); + + sec_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT, + SEC_COUNT); + if (sec_count) { + DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i, + vml2_walker_mems[i], sec_count); + err_data->ce_count += sec_count; + } + + ded_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT, + DED_COUNT); + if (ded_count) { + DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i, + vml2_walker_mems[i], ded_count); + err_data->ue_count += ded_count; + } + } + + for (i = 0; i < 4; i++) { + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i); + data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT); + + sec_count = (data & 0x00006000L) >> 0xd; + if (sec_count) { + DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i, + atc_l2_cache_2m_mems[i], sec_count); + err_data->ce_count += sec_count; + } + } + + for (i = 0; i < 32; i++) { + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i); + data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT); + + sec_count = (data & 0x00006000L) >> 0xd; + if (sec_count) { + DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i, + atc_l2_cache_4k_mems[i], sec_count); + err_data->ce_count += sec_count; + } + + ded_count = (data & 0x00018000L) >> 0xf; + if (ded_count) { + DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i, + atc_l2_cache_4k_mems[i], ded_count); + err_data->ue_count += ded_count; + } + } + + WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255); + + return 0; +} + +static int __get_ras_error_count(const struct soc15_reg_entry *reg, + uint32_t se_id, uint32_t inst_id, uint32_t value, + uint32_t *sec_count, uint32_t *ded_count) +{ + uint32_t i; + uint32_t sec_cnt, ded_cnt; + + for (i = 0; i < ARRAY_SIZE(ras_subblock_regs); i++) { + if(ras_subblock_regs[i].reg_offset != reg->reg_offset || + ras_subblock_regs[i].seg != reg->seg || + ras_subblock_regs[i].inst != reg->inst) + continue; + + sec_cnt = (value & + ras_subblock_regs[i].sec_count_mask) >> + ras_subblock_regs[i].sec_count_shift; + if (sec_cnt) { + DRM_INFO("GFX SubBlock %s, Instance[%d][%d], SEC %d\n", + ras_subblock_regs[i].name, + se_id, inst_id, + sec_cnt); + *sec_count += sec_cnt; + } + + ded_cnt = (value & + ras_subblock_regs[i].ded_count_mask) >> + ras_subblock_regs[i].ded_count_shift; + if (ded_cnt) { + DRM_INFO("GFX SubBlock %s, Instance[%d][%d], DED %d\n", + ras_subblock_regs[i].name, + se_id, inst_id, + ded_cnt); + *ded_count += ded_cnt; + } + } + + return 0; +} + static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status) { struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; - uint32_t sec_count, ded_count; - uint32_t i; + uint32_t sec_count = 0, ded_count = 0; + uint32_t i, j, k; uint32_t reg_value; - uint32_t se_id, instance_id; if (adev->asic_type != CHIP_VEGA20) return -EINVAL; @@ -6094,71 +6185,29 @@ static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, err_data->ce_count = 0; mutex_lock(&adev->grbm_idx_mutex); - for (se_id = 0; se_id < adev->gfx.config.max_shader_engines; se_id++) { - for (instance_id = 0; instance_id < 256; instance_id++) { - for (i = 0; - i < sizeof(gfx_ras_edc_regs) / sizeof(gfx_ras_edc_regs[0]); - i++) { - if (se_id != 0 && - !gfx_ras_edc_regs[i].per_se_instance) - continue; - if (instance_id >= gfx_ras_edc_regs[i].num_instance) - continue; - - gfx_v9_0_select_se_sh(adev, se_id, 0, - instance_id); - - reg_value = RREG32( - adev->reg_offset[gfx_ras_edc_regs[i].ip] - [gfx_ras_edc_regs[i].inst] - [gfx_ras_edc_regs[i].seg] + - gfx_ras_edc_regs[i].reg_offset); - sec_count = reg_value & - gfx_ras_edc_regs[i].sec_count_mask; - ded_count = reg_value & - gfx_ras_edc_regs[i].ded_count_mask; - if (sec_count) { - DRM_INFO( - "Instance[%d][%d]: SubBlock %s, SEC %d\n", - se_id, instance_id, - gfx_ras_edc_regs[i].name, - sec_count); - err_data->ce_count++; - } - if (ded_count) { - DRM_INFO( - "Instance[%d][%d]: SubBlock %s, DED %d\n", - se_id, instance_id, - gfx_ras_edc_regs[i].name, - ded_count); - err_data->ue_count++; - } + for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++) { + for (j = 0; j < sec_ded_counter_registers[i].se_num; j++) { + for (k = 0; k < sec_ded_counter_registers[i].instance; k++) { + gfx_v9_0_select_se_sh(adev, j, 0, k); + reg_value = + RREG32(SOC15_REG_ENTRY_OFFSET(sec_ded_counter_registers[i])); + if (reg_value) + __get_ras_error_count(&sec_ded_counter_registers[i], + j, k, reg_value, + &sec_count, &ded_count); } } } - gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); - mutex_unlock(&adev->grbm_idx_mutex); - return 0; -} + err_data->ce_count += sec_count; + err_data->ue_count += ded_count; -static int gfx_v9_0_cp_ecc_error_irq(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - struct amdgpu_iv_entry *entry) -{ - struct ras_common_if *ras_if = adev->gfx.ras_if; - struct ras_dispatch_if ih_data = { - .entry = entry, - }; - - if (!ras_if) - return 0; + gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); - ih_data.head = *ras_if; + gfx_v9_0_query_utc_edc_status(adev, err_data); - DRM_ERROR("CP ECC ERROR IRQ\n"); - amdgpu_ras_interrupt_dispatch(adev, &ih_data); return 0; } @@ -6325,7 +6374,7 @@ static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = { static const struct amdgpu_irq_src_funcs gfx_v9_0_cp_ecc_error_irq_funcs = { .set = gfx_v9_0_set_cp_ecc_error_state, - .process = gfx_v9_0_cp_ecc_error_irq, + .process = amdgpu_gfx_cp_ecc_error_irq, }; diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c index 6ce37ce77d14..9ec4297e61e5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c @@ -178,6 +178,8 @@ static void gfxhub_v1_0_enable_system_domain(struct amdgpu_device *adev) tmp = RREG32_SOC15(GC, 0, mmVM_CONTEXT0_CNTL); tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, + RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); WREG32_SOC15(GC, 0, mmVM_CONTEXT0_CNTL, tmp); } diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c index 8b789f750b72..b4f32d853ca1 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c @@ -46,21 +46,25 @@ u64 gfxhub_v2_0_get_mc_fb_offset(struct amdgpu_device *adev) return (u64)RREG32_SOC15(GC, 0, mmGCMC_VM_FB_OFFSET) << 24; } -static void gfxhub_v2_0_init_gart_pt_regs(struct amdgpu_device *adev) +void gfxhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, + uint64_t page_table_base) { - uint64_t value = amdgpu_gmc_pd_addr(adev->gart.bo); + /* two registers distance between mmGCVM_CONTEXT0_* to mmGCVM_CONTEXT1_* */ + int offset = mmGCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 + - mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; + WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, + offset * vmid, lower_32_bits(page_table_base)); - WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, - lower_32_bits(value)); - - WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, - upper_32_bits(value)); + WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, + offset * vmid, upper_32_bits(page_table_base)); } static void gfxhub_v2_0_init_gart_aperture_regs(struct amdgpu_device *adev) { - gfxhub_v2_0_init_gart_pt_regs(adev); + uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); + + gfxhub_v2_0_setup_vm_pt_regs(adev, 0, pt_base); WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, (u32)(adev->gmc.gart_start >> 12)); @@ -151,6 +155,15 @@ static void gfxhub_v2_0_init_cache_regs(struct amdgpu_device *adev) WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL2, tmp); tmp = mmGCVM_L2_CNTL3_DEFAULT; + if (adev->gmc.translate_further) { + tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, BANK_SELECT, 12); + tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, + L2_CACHE_BIGK_FRAGMENT_SIZE, 9); + } else { + tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, BANK_SELECT, 9); + tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, + L2_CACHE_BIGK_FRAGMENT_SIZE, 6); + } WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL3, tmp); tmp = mmGCVM_L2_CNTL4_DEFAULT; @@ -166,6 +179,8 @@ static void gfxhub_v2_0_enable_system_domain(struct amdgpu_device *adev) tmp = RREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_CNTL); tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0); + tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, + RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_CNTL, tmp); } diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h index 06807940748b..392b8cd94fc0 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h @@ -31,5 +31,7 @@ void gfxhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev, bool value); void gfxhub_v2_0_init(struct amdgpu_device *adev); u64 gfxhub_v2_0_get_mc_fb_offset(struct amdgpu_device *adev); +void gfxhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, + uint64_t page_table_base); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index 241a4e57cf4a..27f68d32bfec 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -278,7 +278,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, int r; /* flush hdp cache */ - adev->nbio_funcs->hdp_flush(adev, NULL); + adev->nbio.funcs->hdp_flush(adev, NULL); mutex_lock(&adev->mman.gtt_window_lock); @@ -309,6 +309,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo); job->vm_needs_flush = true; + job->ibs->ptr[job->ibs->length_dw++] = ring->funcs->nop; amdgpu_ring_pad_ib(ring, &job->ibs[0]); r = amdgpu_job_submit(job, &adev->mman.entity, AMDGPU_FENCE_OWNER_UNDEFINED, &fence); @@ -343,11 +344,9 @@ static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid), upper_32_bits(pd_addr)); - amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_req + eng, req); - - /* wait for the invalidate to complete */ - amdgpu_ring_emit_reg_wait(ring, hub->vm_inv_eng0_ack + eng, - 1 << vmid, 1 << vmid); + amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng, + hub->vm_inv_eng0_ack + eng, + req, 1 << vmid); return pd_addr; } @@ -397,43 +396,23 @@ static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid * 1 system * 0 valid */ -static uint64_t gmc_v10_0_get_vm_pte_flags(struct amdgpu_device *adev, - uint32_t flags) -{ - uint64_t pte_flag = 0; - if (flags & AMDGPU_VM_PAGE_EXECUTABLE) - pte_flag |= AMDGPU_PTE_EXECUTABLE; - if (flags & AMDGPU_VM_PAGE_READABLE) - pte_flag |= AMDGPU_PTE_READABLE; - if (flags & AMDGPU_VM_PAGE_WRITEABLE) - pte_flag |= AMDGPU_PTE_WRITEABLE; - - switch (flags & AMDGPU_VM_MTYPE_MASK) { +static uint64_t gmc_v10_0_map_mtype(struct amdgpu_device *adev, uint32_t flags) +{ + switch (flags) { case AMDGPU_VM_MTYPE_DEFAULT: - pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_NC); - break; + return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC); case AMDGPU_VM_MTYPE_NC: - pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_NC); - break; + return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC); case AMDGPU_VM_MTYPE_WC: - pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_WC); - break; + return AMDGPU_PTE_MTYPE_NV10(MTYPE_WC); case AMDGPU_VM_MTYPE_CC: - pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_CC); - break; + return AMDGPU_PTE_MTYPE_NV10(MTYPE_CC); case AMDGPU_VM_MTYPE_UC: - pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_UC); - break; + return AMDGPU_PTE_MTYPE_NV10(MTYPE_UC); default: - pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_NC); - break; + return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC); } - - if (flags & AMDGPU_VM_PAGE_PRT) - pte_flag |= AMDGPU_PTE_PRT; - - return pte_flag; } static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level, @@ -460,12 +439,32 @@ static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level, } } +static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev, + struct amdgpu_bo_va_mapping *mapping, + uint64_t *flags) +{ + *flags &= ~AMDGPU_PTE_EXECUTABLE; + *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE; + + *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK; + *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK); + + if (mapping->flags & AMDGPU_PTE_PRT) { + *flags |= AMDGPU_PTE_PRT; + *flags |= AMDGPU_PTE_SNOOPED; + *flags |= AMDGPU_PTE_LOG; + *flags |= AMDGPU_PTE_SYSTEM; + *flags &= ~AMDGPU_PTE_VALID; + } +} + static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = { .flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb, .emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb, .emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping, - .get_vm_pte_flags = gmc_v10_0_get_vm_pte_flags, - .get_vm_pde = gmc_v10_0_get_vm_pde + .map_mtype = gmc_v10_0_map_mtype, + .get_vm_pde = gmc_v10_0_get_vm_pde, + .get_vm_pte = gmc_v10_0_get_vm_pte }; static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev) @@ -519,8 +518,7 @@ static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev, { u64 base = 0; - if (!amdgpu_sriov_vf(adev)) - base = gfxhub_v2_0_get_fb_location(adev); + base = gfxhub_v2_0_get_fb_location(adev); amdgpu_gmc_vram_location(adev, &adev->gmc, base); amdgpu_gmc_gart_location(adev, mc); @@ -540,24 +538,13 @@ static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev, */ static int gmc_v10_0_mc_init(struct amdgpu_device *adev) { - int chansize, numchan; - - if (!amdgpu_emu_mode) - adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev); - else { - /* hard code vram_width for emulation */ - chansize = 128; - numchan = 1; - adev->gmc.vram_width = numchan * chansize; - } - /* Could aper size report 0 ? */ adev->gmc.aper_base = pci_resource_start(adev->pdev, 0); adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); /* size in MB on si */ adev->gmc.mc_vram_size = - adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL; + adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL; adev->gmc.real_vram_size = adev->gmc.mc_vram_size; adev->gmc.visible_vram_size = adev->gmc.aper_size; @@ -636,7 +623,7 @@ static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev) static int gmc_v10_0_sw_init(void *handle) { - int r; + int r, vram_width = 0, vram_type = 0, vram_vendor = 0; struct amdgpu_device *adev = (struct amdgpu_device *)handle; gfxhub_v2_0_init(adev); @@ -644,7 +631,15 @@ static int gmc_v10_0_sw_init(void *handle) spin_lock_init(&adev->gmc.invalidate_lock); - adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev); + r = amdgpu_atomfirmware_get_vram_info(adev, + &vram_width, &vram_type, &vram_vendor); + if (!amdgpu_emu_mode) + adev->gmc.vram_width = vram_width; + else + adev->gmc.vram_width = 1 * 128; /* numchan * chansize */ + + adev->gmc.vram_type = vram_type; + adev->gmc.vram_vendor = vram_vendor; switch (adev->asic_type) { case CHIP_NAVI10: case CHIP_NAVI14: @@ -794,7 +789,7 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev) WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp); /* Flush HDP after it is initialized */ - adev->nbio_funcs->hdp_flush(adev, NULL); + adev->nbio.funcs->hdp_flush(adev, NULL); value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ? false : true; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 9fb1765e92d1..b205039350b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -386,27 +386,20 @@ static uint64_t gmc_v6_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, return pd_addr; } -static uint64_t gmc_v6_0_get_vm_pte_flags(struct amdgpu_device *adev, - uint32_t flags) -{ - uint64_t pte_flag = 0; - - if (flags & AMDGPU_VM_PAGE_READABLE) - pte_flag |= AMDGPU_PTE_READABLE; - if (flags & AMDGPU_VM_PAGE_WRITEABLE) - pte_flag |= AMDGPU_PTE_WRITEABLE; - if (flags & AMDGPU_VM_PAGE_PRT) - pte_flag |= AMDGPU_PTE_PRT; - - return pte_flag; -} - static void gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, int level, uint64_t *addr, uint64_t *flags) { BUG_ON(*addr & 0xFFFFFF0000000FFFULL); } +static void gmc_v6_0_get_vm_pte(struct amdgpu_device *adev, + struct amdgpu_bo_va_mapping *mapping, + uint64_t *flags) +{ + *flags &= ~AMDGPU_PTE_EXECUTABLE; + *flags &= ~AMDGPU_PTE_PRT; +} + static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev, bool value) { @@ -1153,7 +1146,7 @@ static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = { .emit_flush_gpu_tlb = gmc_v6_0_emit_flush_gpu_tlb, .set_prt = gmc_v6_0_set_prt, .get_vm_pde = gmc_v6_0_get_vm_pde, - .get_vm_pte_flags = gmc_v6_0_get_vm_pte_flags + .get_vm_pte = gmc_v6_0_get_vm_pte, }; static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 0c3d9bc3a641..f08e5330642d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -463,27 +463,20 @@ static void gmc_v7_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid, amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid); } -static uint64_t gmc_v7_0_get_vm_pte_flags(struct amdgpu_device *adev, - uint32_t flags) -{ - uint64_t pte_flag = 0; - - if (flags & AMDGPU_VM_PAGE_READABLE) - pte_flag |= AMDGPU_PTE_READABLE; - if (flags & AMDGPU_VM_PAGE_WRITEABLE) - pte_flag |= AMDGPU_PTE_WRITEABLE; - if (flags & AMDGPU_VM_PAGE_PRT) - pte_flag |= AMDGPU_PTE_PRT; - - return pte_flag; -} - static void gmc_v7_0_get_vm_pde(struct amdgpu_device *adev, int level, uint64_t *addr, uint64_t *flags) { BUG_ON(*addr & 0xFFFFFF0000000FFFULL); } +static void gmc_v7_0_get_vm_pte(struct amdgpu_device *adev, + struct amdgpu_bo_va_mapping *mapping, + uint64_t *flags) +{ + *flags &= ~AMDGPU_PTE_EXECUTABLE; + *flags &= ~AMDGPU_PTE_PRT; +} + /** * gmc_v8_0_set_fault_enable_default - update VM fault handling * @@ -1343,8 +1336,8 @@ static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = { .emit_flush_gpu_tlb = gmc_v7_0_emit_flush_gpu_tlb, .emit_pasid_mapping = gmc_v7_0_emit_pasid_mapping, .set_prt = gmc_v7_0_set_prt, - .get_vm_pte_flags = gmc_v7_0_get_vm_pte_flags, - .get_vm_pde = gmc_v7_0_get_vm_pde + .get_vm_pde = gmc_v7_0_get_vm_pde, + .get_vm_pte = gmc_v7_0_get_vm_pte }; static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index ea764dd9245d..6d96d40fbcb8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -686,29 +686,21 @@ static void gmc_v8_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid, * 0 valid */ -static uint64_t gmc_v8_0_get_vm_pte_flags(struct amdgpu_device *adev, - uint32_t flags) -{ - uint64_t pte_flag = 0; - - if (flags & AMDGPU_VM_PAGE_EXECUTABLE) - pte_flag |= AMDGPU_PTE_EXECUTABLE; - if (flags & AMDGPU_VM_PAGE_READABLE) - pte_flag |= AMDGPU_PTE_READABLE; - if (flags & AMDGPU_VM_PAGE_WRITEABLE) - pte_flag |= AMDGPU_PTE_WRITEABLE; - if (flags & AMDGPU_VM_PAGE_PRT) - pte_flag |= AMDGPU_PTE_PRT; - - return pte_flag; -} - static void gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, int level, uint64_t *addr, uint64_t *flags) { BUG_ON(*addr & 0xFFFFFF0000000FFFULL); } +static void gmc_v8_0_get_vm_pte(struct amdgpu_device *adev, + struct amdgpu_bo_va_mapping *mapping, + uint64_t *flags) +{ + *flags &= ~AMDGPU_PTE_EXECUTABLE; + *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE; + *flags &= ~AMDGPU_PTE_PRT; +} + /** * gmc_v8_0_set_fault_enable_default - update VM fault handling * @@ -1711,8 +1703,8 @@ static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = { .emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb, .emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping, .set_prt = gmc_v8_0_set_prt, - .get_vm_pte_flags = gmc_v8_0_get_vm_pte_flags, - .get_vm_pde = gmc_v8_0_get_vm_pde + .get_vm_pde = gmc_v8_0_get_vm_pde, + .get_vm_pte = gmc_v8_0_get_vm_pte }; static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index f91337030dc0..9f2a893871ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -51,10 +51,12 @@ #include "gfxhub_v1_1.h" #include "mmhub_v9_4.h" #include "umc_v6_1.h" +#include "umc_v6_0.h" #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h" #include "amdgpu_ras.h" +#include "amdgpu_xgmi.h" /* add these here since we already include dce12 headers and these are for DCN */ #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d @@ -243,44 +245,6 @@ static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev, return 0; } -static int gmc_v9_0_process_ras_data_cb(struct amdgpu_device *adev, - struct ras_err_data *err_data, - struct amdgpu_iv_entry *entry) -{ - kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); - if (adev->umc.funcs->query_ras_error_count) - adev->umc.funcs->query_ras_error_count(adev, err_data); - /* umc query_ras_error_address is also responsible for clearing - * error status - */ - if (adev->umc.funcs->query_ras_error_address) - adev->umc.funcs->query_ras_error_address(adev, err_data); - - /* only uncorrectable error needs gpu reset */ - if (err_data->ue_count) - amdgpu_ras_reset_gpu(adev, 0); - - return AMDGPU_RAS_SUCCESS; -} - -static int gmc_v9_0_process_ecc_irq(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - struct amdgpu_iv_entry *entry) -{ - struct ras_common_if *ras_if = adev->gmc.umc_ras_if; - struct ras_dispatch_if ih_data = { - .entry = entry, - }; - - if (!ras_if) - return 0; - - ih_data.head = *ras_if; - - amdgpu_ras_interrupt_dispatch(adev, &ih_data); - return 0; -} - static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type, @@ -355,6 +319,10 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, } /* If it's the first fault for this address, process it normally */ + if (retry_fault && !in_interrupt() && + amdgpu_vm_handle_fault(adev, entry->pasid, addr)) + return 1; /* This also prevents sending it to KFD */ + if (!amdgpu_sriov_vf(adev)) { /* * Issue a dummy read to wait for the status register to @@ -417,7 +385,7 @@ static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = { static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = { .set = gmc_v9_0_ecc_interrupt_state, - .process = gmc_v9_0_process_ecc_irq, + .process = amdgpu_umc_process_ecc_irq, }; static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev) @@ -584,44 +552,25 @@ static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid, * 0 valid */ -static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev, - uint32_t flags) +static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags) { - uint64_t pte_flag = 0; - - if (flags & AMDGPU_VM_PAGE_EXECUTABLE) - pte_flag |= AMDGPU_PTE_EXECUTABLE; - if (flags & AMDGPU_VM_PAGE_READABLE) - pte_flag |= AMDGPU_PTE_READABLE; - if (flags & AMDGPU_VM_PAGE_WRITEABLE) - pte_flag |= AMDGPU_PTE_WRITEABLE; - - switch (flags & AMDGPU_VM_MTYPE_MASK) { + switch (flags) { case AMDGPU_VM_MTYPE_DEFAULT: - pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC); - break; + return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC); case AMDGPU_VM_MTYPE_NC: - pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC); - break; + return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC); case AMDGPU_VM_MTYPE_WC: - pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_WC); - break; + return AMDGPU_PTE_MTYPE_VG10(MTYPE_WC); + case AMDGPU_VM_MTYPE_RW: + return AMDGPU_PTE_MTYPE_VG10(MTYPE_RW); case AMDGPU_VM_MTYPE_CC: - pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_CC); - break; + return AMDGPU_PTE_MTYPE_VG10(MTYPE_CC); case AMDGPU_VM_MTYPE_UC: - pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_UC); - break; + return AMDGPU_PTE_MTYPE_VG10(MTYPE_UC); default: - pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC); - break; + return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC); } - - if (flags & AMDGPU_VM_PAGE_PRT) - pte_flag |= AMDGPU_PTE_PRT; - - return pte_flag; } static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level, @@ -648,12 +597,34 @@ static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level, } } +static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev, + struct amdgpu_bo_va_mapping *mapping, + uint64_t *flags) +{ + *flags &= ~AMDGPU_PTE_EXECUTABLE; + *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE; + + *flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK; + *flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK; + + if (mapping->flags & AMDGPU_PTE_PRT) { + *flags |= AMDGPU_PTE_PRT; + *flags &= ~AMDGPU_PTE_VALID; + } + + if (adev->asic_type == CHIP_ARCTURUS && + !(*flags & AMDGPU_PTE_SYSTEM) && + mapping->bo_va->is_xgmi) + *flags |= AMDGPU_PTE_SNOOPED; +} + static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = { .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb, .emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb, .emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping, - .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags, - .get_vm_pde = gmc_v9_0_get_vm_pde + .map_mtype = gmc_v9_0_map_mtype, + .get_vm_pde = gmc_v9_0_get_vm_pde, + .get_vm_pte = gmc_v9_0_get_vm_pte }; static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev) @@ -664,6 +635,9 @@ static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev) static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) { switch (adev->asic_type) { + case CHIP_VEGA10: + adev->umc.funcs = &umc_v6_0_funcs; + break; case CHIP_VEGA20: adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM; adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM; @@ -681,7 +655,7 @@ static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev) { switch (adev->asic_type) { case CHIP_VEGA20: - adev->mmhub_funcs = &mmhub_v1_0_funcs; + adev->mmhub.funcs = &mmhub_v1_0_funcs; break; default: break; @@ -762,140 +736,10 @@ static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev) return 0; } -static int gmc_v9_0_ecc_ras_block_late_init(void *handle, - struct ras_fs_if *fs_info, struct ras_common_if *ras_block) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct ras_common_if **ras_if = NULL; - struct ras_ih_if ih_info = { - .cb = gmc_v9_0_process_ras_data_cb, - }; - int r; - - if (ras_block->block == AMDGPU_RAS_BLOCK__UMC) - ras_if = &adev->gmc.umc_ras_if; - else if (ras_block->block == AMDGPU_RAS_BLOCK__MMHUB) - ras_if = &adev->gmc.mmhub_ras_if; - else - BUG(); - - if (!amdgpu_ras_is_supported(adev, ras_block->block)) { - amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0); - return 0; - } - - /* handle resume path. */ - if (*ras_if) { - /* resend ras TA enable cmd during resume. - * prepare to handle failure. - */ - ih_info.head = **ras_if; - r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1); - if (r) { - if (r == -EAGAIN) { - /* request a gpu reset. will run again. */ - amdgpu_ras_request_reset_on_boot(adev, - ras_block->block); - return 0; - } - /* fail to enable ras, cleanup all. */ - goto irq; - } - /* enable successfully. continue. */ - goto resume; - } - - *ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL); - if (!*ras_if) - return -ENOMEM; - - **ras_if = *ras_block; - - r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1); - if (r) { - if (r == -EAGAIN) { - amdgpu_ras_request_reset_on_boot(adev, - ras_block->block); - r = 0; - } - goto feature; - } - - ih_info.head = **ras_if; - fs_info->head = **ras_if; - - if (ras_block->block == AMDGPU_RAS_BLOCK__UMC) { - r = amdgpu_ras_interrupt_add_handler(adev, &ih_info); - if (r) - goto interrupt; - } - - amdgpu_ras_debugfs_create(adev, fs_info); - - r = amdgpu_ras_sysfs_create(adev, fs_info); - if (r) - goto sysfs; -resume: - if (ras_block->block == AMDGPU_RAS_BLOCK__UMC) { - r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0); - if (r) - goto irq; - } - - return 0; -irq: - amdgpu_ras_sysfs_remove(adev, *ras_if); -sysfs: - amdgpu_ras_debugfs_remove(adev, *ras_if); - if (ras_block->block == AMDGPU_RAS_BLOCK__UMC) - amdgpu_ras_interrupt_remove_handler(adev, &ih_info); -interrupt: - amdgpu_ras_feature_enable(adev, *ras_if, 0); -feature: - kfree(*ras_if); - *ras_if = NULL; - return r; -} - -static int gmc_v9_0_ecc_late_init(void *handle) -{ - int r; - - struct ras_fs_if umc_fs_info = { - .sysfs_name = "umc_err_count", - .debugfs_name = "umc_err_inject", - }; - struct ras_common_if umc_ras_block = { - .block = AMDGPU_RAS_BLOCK__UMC, - .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, - .sub_block_index = 0, - .name = "umc", - }; - struct ras_fs_if mmhub_fs_info = { - .sysfs_name = "mmhub_err_count", - .debugfs_name = "mmhub_err_inject", - }; - struct ras_common_if mmhub_ras_block = { - .block = AMDGPU_RAS_BLOCK__MMHUB, - .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, - .sub_block_index = 0, - .name = "mmhub", - }; - - r = gmc_v9_0_ecc_ras_block_late_init(handle, - &umc_fs_info, &umc_ras_block); - if (r) - return r; - - r = gmc_v9_0_ecc_ras_block_late_init(handle, - &mmhub_fs_info, &mmhub_ras_block); - return r; -} - static int gmc_v9_0_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool r; + int r; if (!gmc_v9_0_keep_stolen_memory(adev)) amdgpu_bo_late_init(adev); @@ -929,7 +773,7 @@ static int gmc_v9_0_late_init(void *handle) } } - r = gmc_v9_0_ecc_late_init(handle); + r = amdgpu_gmc_ras_late_init(adev); if (r) return r; @@ -970,33 +814,11 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev, */ static int gmc_v9_0_mc_init(struct amdgpu_device *adev) { - int chansize, numchan; int r; - if (amdgpu_sriov_vf(adev)) { - /* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN, - * and DF related registers is not readable, seems hardcord is the - * only way to set the correct vram_width - */ - adev->gmc.vram_width = 2048; - } else if (amdgpu_emu_mode != 1) { - adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev); - } - - if (!adev->gmc.vram_width) { - /* hbm memory channel size */ - if (adev->flags & AMD_IS_APU) - chansize = 64; - else - chansize = 128; - - numchan = adev->df_funcs->get_hbm_channel_number(adev); - adev->gmc.vram_width = numchan * chansize; - } - /* size in MB on si */ adev->gmc.mc_vram_size = - adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL; + adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL; adev->gmc.real_vram_size = adev->gmc.mc_vram_size; if (!(adev->flags & AMD_IS_APU)) { @@ -1108,7 +930,7 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) static int gmc_v9_0_sw_init(void *handle) { - int r; + int r, vram_width = 0, vram_type = 0, vram_vendor = 0; struct amdgpu_device *adev = (struct amdgpu_device *)handle; gfxhub_v1_0_init(adev); @@ -1119,7 +941,32 @@ static int gmc_v9_0_sw_init(void *handle) spin_lock_init(&adev->gmc.invalidate_lock); - adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev); + r = amdgpu_atomfirmware_get_vram_info(adev, + &vram_width, &vram_type, &vram_vendor); + if (amdgpu_sriov_vf(adev)) + /* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN, + * and DF related registers is not readable, seems hardcord is the + * only way to set the correct vram_width + */ + adev->gmc.vram_width = 2048; + else if (amdgpu_emu_mode != 1) + adev->gmc.vram_width = vram_width; + + if (!adev->gmc.vram_width) { + int chansize, numchan; + + /* hbm memory channel size */ + if (adev->flags & AMD_IS_APU) + chansize = 64; + else + chansize = 128; + + numchan = adev->df_funcs->get_hbm_channel_number(adev); + adev->gmc.vram_width = numchan * chansize; + } + + adev->gmc.vram_type = vram_type; + adev->gmc.vram_vendor = vram_vendor; switch (adev->asic_type) { case CHIP_RAVEN: adev->num_vmhubs = 2; @@ -1240,33 +1087,7 @@ static int gmc_v9_0_sw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; void *stolen_vga_buf; - if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC) && - adev->gmc.umc_ras_if) { - struct ras_common_if *ras_if = adev->gmc.umc_ras_if; - struct ras_ih_if ih_info = { - .head = *ras_if, - }; - - /* remove fs first */ - amdgpu_ras_debugfs_remove(adev, ras_if); - amdgpu_ras_sysfs_remove(adev, ras_if); - /* remove the IH */ - amdgpu_ras_interrupt_remove_handler(adev, &ih_info); - amdgpu_ras_feature_enable(adev, ras_if, 0); - kfree(ras_if); - } - - if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB) && - adev->gmc.mmhub_ras_if) { - struct ras_common_if *ras_if = adev->gmc.mmhub_ras_if; - - /* remove fs and disable ras feature */ - amdgpu_ras_debugfs_remove(adev, ras_if); - amdgpu_ras_sysfs_remove(adev, ras_if); - amdgpu_ras_feature_enable(adev, ras_if, 0); - kfree(ras_if); - } - + amdgpu_gmc_ras_fini(adev); amdgpu_gem_force_release(adev); amdgpu_vm_manager_fini(adev); @@ -1316,13 +1137,7 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev) */ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) { - int r, i; - bool value; - u32 tmp; - - amdgpu_device_program_register_sequence(adev, - golden_settings_vega10_hdp, - ARRAY_SIZE(golden_settings_vega10_hdp)); + int r; if (adev->gart.bo == NULL) { dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); @@ -1332,15 +1147,6 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) if (r) return r; - switch (adev->asic_type) { - case CHIP_RAVEN: - /* TODO for renoir */ - mmhub_v1_0_update_power_gating(adev, true); - break; - default: - break; - } - r = gfxhub_v1_0_gart_enable(adev); if (r) return r; @@ -1352,6 +1158,49 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) if (r) return r; + DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", + (unsigned)(adev->gmc.gart_size >> 20), + (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo)); + adev->gart.ready = true; + return 0; +} + +static int gmc_v9_0_hw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + bool value; + int r, i; + u32 tmp; + + /* The sequence of these two function calls matters.*/ + gmc_v9_0_init_golden_registers(adev); + + if (adev->mode_info.num_crtc) { + if (adev->asic_type != CHIP_ARCTURUS) { + /* Lockout access through VGA aperture*/ + WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1); + + /* disable VGA render */ + WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); + } + } + + amdgpu_device_program_register_sequence(adev, + golden_settings_vega10_hdp, + ARRAY_SIZE(golden_settings_vega10_hdp)); + + switch (adev->asic_type) { + case CHIP_RAVEN: + /* TODO for renoir */ + mmhub_v1_0_update_power_gating(adev, true); + break; + case CHIP_ARCTURUS: + WREG32_FIELD15(HDP, 0, HDP_MMHUB_CNTL, HDP_MMHUB_GCC, 1); + break; + default: + break; + } + WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1); tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL); @@ -1361,7 +1210,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40)); /* After HDP is initialized, flush HDP.*/ - adev->nbio_funcs->hdp_flush(adev, NULL); + adev->nbio.funcs->hdp_flush(adev, NULL); if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) value = false; @@ -1377,28 +1226,8 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) for (i = 0; i < adev->num_vmhubs; ++i) gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0); - DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", - (unsigned)(adev->gmc.gart_size >> 20), - (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo)); - adev->gart.ready = true; - return 0; -} - -static int gmc_v9_0_hw_init(void *handle) -{ - int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - /* The sequence of these two function calls matters.*/ - gmc_v9_0_init_golden_registers(adev); - - if (adev->mode_info.num_crtc) { - /* Lockout access through VGA aperture*/ - WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1); - - /* disable VGA render */ - WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); - } + if (adev->umc.funcs && adev->umc.funcs->init_registers) + adev->umc.funcs->init_registers(adev); r = gmc_v9_0_gart_enable(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index 04cd4b6f95d4..6965e1e6fa9e 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -206,6 +206,8 @@ static void mmhub_v1_0_enable_system_domain(struct amdgpu_device *adev) tmp = RREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_CNTL); tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, + RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_CNTL, tmp); } @@ -616,5 +618,6 @@ static void mmhub_v1_0_query_ras_error_count(struct amdgpu_device *adev, } const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs = { + .ras_late_init = amdgpu_mmhub_ras_late_init, .query_ras_error_count = mmhub_v1_0_query_ras_error_count, }; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c index 3542c203c3c8..945533634711 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c @@ -31,20 +31,25 @@ #include "soc15_common.h" -static void mmhub_v2_0_init_gart_pt_regs(struct amdgpu_device *adev) +void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, + uint64_t page_table_base) { - uint64_t value = amdgpu_gmc_pd_addr(adev->gart.bo); + /* two registers distance between mmMMVM_CONTEXT0_* to mmMMVM_CONTEXT1_* */ + int offset = mmMMVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 + - mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; - WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, - lower_32_bits(value)); + WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, + offset * vmid, lower_32_bits(page_table_base)); - WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, - upper_32_bits(value)); + WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, + offset * vmid, upper_32_bits(page_table_base)); } static void mmhub_v2_0_init_gart_aperture_regs(struct amdgpu_device *adev) { - mmhub_v2_0_init_gart_pt_regs(adev); + uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); + + mmhub_v2_0_setup_vm_pt_regs(adev, 0, pt_base); WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, (u32)(adev->gmc.gart_start >> 12)); @@ -137,6 +142,15 @@ static void mmhub_v2_0_init_cache_regs(struct amdgpu_device *adev) WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL2, tmp); tmp = mmMMVM_L2_CNTL3_DEFAULT; + if (adev->gmc.translate_further) { + tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 12); + tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, + L2_CACHE_BIGK_FRAGMENT_SIZE, 9); + } else { + tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 9); + tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, + L2_CACHE_BIGK_FRAGMENT_SIZE, 6); + } WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL3, tmp); tmp = mmMMVM_L2_CNTL4_DEFAULT; @@ -152,6 +166,8 @@ static void mmhub_v2_0_enable_system_domain(struct amdgpu_device *adev) tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_CNTL); tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0); + tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, + RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_CNTL, tmp); } diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h index db16f3ece218..3ea4344f0315 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h @@ -31,5 +31,7 @@ void mmhub_v2_0_init(struct amdgpu_device *adev); int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev, enum amd_clockgating_state state); void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u32 *flags); +void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, + uint64_t page_table_base); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c index 0cf7ef44b4b5..2c5adfe803a2 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c @@ -219,6 +219,15 @@ static void mmhub_v9_4_init_cache_regs(struct amdgpu_device *adev, int hubid) hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp); tmp = mmVML2PF0_VM_L2_CNTL3_DEFAULT; + if (adev->gmc.translate_further) { + tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3, BANK_SELECT, 12); + tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3, + L2_CACHE_BIGK_FRAGMENT_SIZE, 9); + } else { + tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3, BANK_SELECT, 9); + tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3, + L2_CACHE_BIGK_FRAGMENT_SIZE, 6); + } WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL3, hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp); @@ -240,6 +249,8 @@ static void mmhub_v9_4_enable_system_domain(struct amdgpu_device *adev, hubid * MMHUB_INSTANCE_REGISTER_OFFSET); tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0); + tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT0_CNTL, + RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT0_CNTL, hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp); } diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c new file mode 100644 index 000000000000..0d8767eb7a70 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c @@ -0,0 +1,380 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu.h" +#include "nbio/nbio_2_3_offset.h" +#include "nbio/nbio_2_3_sh_mask.h" +#include "gc/gc_10_1_0_offset.h" +#include "gc/gc_10_1_0_sh_mask.h" +#include "soc15.h" +#include "navi10_ih.h" +#include "soc15_common.h" +#include "mxgpu_nv.h" +#include "mxgpu_ai.h" + +static void xgpu_nv_mailbox_send_ack(struct amdgpu_device *adev) +{ + WREG8(NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2); +} + +static void xgpu_nv_mailbox_set_valid(struct amdgpu_device *adev, bool val) +{ + WREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0); +} + +/* + * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine + * RCV_MSG_VALID filed of BIF_BX_PF_MAILBOX_CONTROL must already be set to 1 + * by host. + * + * if called no in IRQ routine, this peek_msg cannot guaranteed to return the + * correct value since it doesn't return the RCV_DW0 under the case that + * RCV_MSG_VALID is set by host. + */ +static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev) +{ + return RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, + mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW0)); +} + + +static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev, + enum idh_event event) +{ + u32 reg; + + reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, + mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW0)); + if (reg != event) + return -ENOENT; + + xgpu_nv_mailbox_send_ack(adev); + + return 0; +} + +static uint8_t xgpu_nv_peek_ack(struct amdgpu_device *adev) +{ + return RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2; +} + +static int xgpu_nv_poll_ack(struct amdgpu_device *adev) +{ + int timeout = NV_MAILBOX_POLL_ACK_TIMEDOUT; + u8 reg; + + do { + reg = RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE); + if (reg & 2) + return 0; + + mdelay(5); + timeout -= 5; + } while (timeout > 1); + + pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", NV_MAILBOX_POLL_ACK_TIMEDOUT); + + return -ETIME; +} + +static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event) +{ + int r, timeout = NV_MAILBOX_POLL_MSG_TIMEDOUT; + + do { + r = xgpu_nv_mailbox_rcv_msg(adev, event); + if (!r) + return 0; + + msleep(10); + timeout -= 10; + } while (timeout > 1); + + pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r); + + return -ETIME; +} + +static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev, + enum idh_request req, u32 data1, u32 data2, u32 data3) +{ + u32 reg; + int r; + uint8_t trn; + + /* IMPORTANT: + * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK + * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK + * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_nv_poll_ack() + * will return immediatly + */ + do { + xgpu_nv_mailbox_set_valid(adev, false); + trn = xgpu_nv_peek_ack(adev); + if (trn) { + pr_err("trn=%x ACK should not assert! wait again !\n", trn); + msleep(1); + } + } while (trn); + + reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, + mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0)); + reg = REG_SET_FIELD(reg, BIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0, + MSGBUF_DATA, req); + WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0), + reg); + WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW1), + data1); + WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW2), + data2); + WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW3), + data3); + + xgpu_nv_mailbox_set_valid(adev, true); + + /* start to poll ack */ + r = xgpu_nv_poll_ack(adev); + if (r) + pr_err("Doesn't get ack from pf, continue\n"); + + xgpu_nv_mailbox_set_valid(adev, false); +} + +static int xgpu_nv_send_access_requests(struct amdgpu_device *adev, + enum idh_request req) +{ + int r; + + xgpu_nv_mailbox_trans_msg(adev, req, 0, 0, 0); + + /* start to check msg if request is idh_req_gpu_init_access */ + if (req == IDH_REQ_GPU_INIT_ACCESS || + req == IDH_REQ_GPU_FINI_ACCESS || + req == IDH_REQ_GPU_RESET_ACCESS) { + r = xgpu_nv_poll_msg(adev, IDH_READY_TO_ACCESS_GPU); + if (r) { + pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n"); + return r; + } + /* Retrieve checksum from mailbox2 */ + if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) { + adev->virt.fw_reserve.checksum_key = + RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, + mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW2)); + } + } + + return 0; +} + +static int xgpu_nv_request_reset(struct amdgpu_device *adev) +{ + return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS); +} + +static int xgpu_nv_request_full_gpu_access(struct amdgpu_device *adev, + bool init) +{ + enum idh_request req; + + req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS; + return xgpu_nv_send_access_requests(adev, req); +} + +static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev, + bool init) +{ + enum idh_request req; + int r = 0; + + req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS; + r = xgpu_nv_send_access_requests(adev, req); + + return r; +} + +static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + DRM_DEBUG("get ack intr and do nothing.\n"); + return 0; +} + +static int xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL)); + + tmp = REG_SET_FIELD(tmp, BIF_BX_PF_MAILBOX_INT_CNTL, ACK_INT_EN, + (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0); + WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL), tmp); + + return 0; +} + +static void xgpu_nv_mailbox_flr_work(struct work_struct *work) +{ + struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work); + struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt); + int timeout = NV_MAILBOX_POLL_FLR_TIMEDOUT; + int locked; + + /* block amdgpu_gpu_recover till msg FLR COMPLETE received, + * otherwise the mailbox msg will be ruined/reseted by + * the VF FLR. + * + * we can unlock the lock_reset to allow "amdgpu_job_timedout" + * to run gpu_recover() after FLR_NOTIFICATION_CMPL received + * which means host side had finished this VF's FLR. + */ + locked = mutex_trylock(&adev->lock_reset); + if (locked) + adev->in_gpu_reset = 1; + + do { + if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL) + goto flr_done; + + msleep(10); + timeout -= 10; + } while (timeout > 1); + +flr_done: + if (locked) { + adev->in_gpu_reset = 0; + mutex_unlock(&adev->lock_reset); + } + + /* Trigger recovery for world switch failure if no TDR */ + if (amdgpu_device_should_recover_gpu(adev)) + amdgpu_device_gpu_recover(adev, NULL); +} + +static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL)); + + tmp = REG_SET_FIELD(tmp, BIF_BX_PF_MAILBOX_INT_CNTL, VALID_INT_EN, + (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0); + WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL), tmp); + + return 0; +} + +static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + enum idh_event event = xgpu_nv_mailbox_peek_msg(adev); + + switch (event) { + case IDH_FLR_NOTIFICATION: + if (amdgpu_sriov_runtime(adev)) + schedule_work(&adev->virt.flr_work); + break; + /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore + * it byfar since that polling thread will handle it, + * other msg like flr complete is not handled here. + */ + case IDH_CLR_MSG_BUF: + case IDH_FLR_NOTIFICATION_CMPL: + case IDH_READY_TO_ACCESS_GPU: + default: + break; + } + + return 0; +} + +static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_ack_irq_funcs = { + .set = xgpu_nv_set_mailbox_ack_irq, + .process = xgpu_nv_mailbox_ack_irq, +}; + +static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_rcv_irq_funcs = { + .set = xgpu_nv_set_mailbox_rcv_irq, + .process = xgpu_nv_mailbox_rcv_irq, +}; + +void xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->virt.ack_irq.num_types = 1; + adev->virt.ack_irq.funcs = &xgpu_nv_mailbox_ack_irq_funcs; + adev->virt.rcv_irq.num_types = 1; + adev->virt.rcv_irq.funcs = &xgpu_nv_mailbox_rcv_irq_funcs; +} + +int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev) +{ + int r; + + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq); + if (r) + return r; + + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq); + if (r) { + amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); + return r; + } + + return 0; +} + +int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev) +{ + int r; + + r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0); + if (r) + return r; + r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0); + if (r) { + amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); + return r; + } + + INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work); + + return 0; +} + +void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev) +{ + amdgpu_irq_put(adev, &adev->virt.ack_irq, 0); + amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); +} + +const struct amdgpu_virt_ops xgpu_nv_virt_ops = { + .req_full_gpu = xgpu_nv_request_full_gpu_access, + .rel_full_gpu = xgpu_nv_release_full_gpu_access, + .reset_gpu = xgpu_nv_request_reset, + .wait_reset = NULL, + .trans_msg = xgpu_nv_mailbox_trans_msg, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h new file mode 100644 index 000000000000..99b15f6865cb --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h @@ -0,0 +1,41 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __MXGPU_NV_H__ +#define __MXGPU_NV_H__ + +#define NV_MAILBOX_POLL_ACK_TIMEDOUT 500 +#define NV_MAILBOX_POLL_MSG_TIMEDOUT 12000 +#define NV_MAILBOX_POLL_FLR_TIMEDOUT 500 + +extern const struct amdgpu_virt_ops xgpu_nv_virt_ops; + +void xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device *adev); +int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev); +int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev); +void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev); + +#define NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE (SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_CONTROL) * 4) +#define NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE (SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_CONTROL) * 4 + 1) + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c index 9fe08408db58..9af73567e716 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c @@ -117,7 +117,7 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev) /* disable irqs */ navi10_ih_disable_interrupts(adev); - adev->nbio_funcs->ih_control(adev); + adev->nbio.funcs->ih_control(adev); /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/ WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, ih->gpu_addr >> 8); @@ -162,7 +162,7 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev) } WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR, ih_doorbell_rtpr); - adev->nbio_funcs->ih_doorbell_range(adev, ih->use_doorbell, + adev->nbio.funcs->ih_doorbell_range(adev, ih->use_doorbell, ih->doorbell_index); tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL); diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c b/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c index a56c93620e78..88efaecf9f70 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c @@ -24,7 +24,6 @@ #include "nv.h" #include "soc15_common.h" -#include "soc15_hw_ip.h" #include "navi10_ip_offset.h" int navi10_reg_base_init(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c b/drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c index cadc7603ca41..a786d159e5e9 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c @@ -24,7 +24,6 @@ #include "nv.h" #include "soc15_common.h" -#include "soc15_hw_ip.h" #include "navi12_ip_offset.h" int navi12_reg_base_init(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c b/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c index 3b5f0f65e096..4ea1e8fbb601 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c @@ -24,7 +24,6 @@ #include "nv.h" #include "soc15_common.h" -#include "soc15_hw_ip.h" #include "navi14_ip_offset.h" int navi14_reg_base_init(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c index c05d78d4efc6..f3a3fe746222 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c @@ -27,11 +27,21 @@ #include "nbio/nbio_2_3_default.h" #include "nbio/nbio_2_3_offset.h" #include "nbio/nbio_2_3_sh_mask.h" +#include <uapi/linux/kfd_ioctl.h> #define smnPCIE_CONFIG_CNTL 0x11180044 #define smnCPM_CONTROL 0x11180460 #define smnPCIE_CNTL2 0x11180070 + +static void nbio_v2_3_remap_hdp_registers(struct amdgpu_device *adev) +{ + WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL, + adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL); + WREG32_SOC15(NBIO, 0, mmREMAP_HDP_REG_FLUSH_CNTL, + adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL); +} + static u32 nbio_v2_3_get_rev_id(struct amdgpu_device *adev) { u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0); @@ -56,10 +66,9 @@ static void nbio_v2_3_hdp_flush(struct amdgpu_device *adev, struct amdgpu_ring *ring) { if (!ring || !ring->funcs->emit_wreg) - WREG32_SOC15_NO_KIQ(NBIO, 0, mmBIF_BX_PF_HDP_MEM_COHERENCY_FLUSH_CNTL, 0); + WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); else - amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET( - NBIO, 0, mmBIF_BX_PF_HDP_MEM_COHERENCY_FLUSH_CNTL), 0); + amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); } static u32 nbio_v2_3_get_memsize(struct amdgpu_device *adev) @@ -311,7 +320,6 @@ static void nbio_v2_3_init_registers(struct amdgpu_device *adev) } const struct amdgpu_nbio_funcs nbio_v2_3_funcs = { - .hdp_flush_reg = &nbio_v2_3_hdp_flush_reg, .get_hdp_flush_req_offset = nbio_v2_3_get_hdp_flush_req_offset, .get_hdp_flush_done_offset = nbio_v2_3_get_hdp_flush_done_offset, .get_pcie_index_offset = nbio_v2_3_get_pcie_index_offset, @@ -331,4 +339,5 @@ const struct amdgpu_nbio_funcs nbio_v2_3_funcs = { .ih_control = nbio_v2_3_ih_control, .init_registers = nbio_v2_3_init_registers, .detect_hw_virt = nbio_v2_3_detect_hw_virt, + .remap_hdp_registers = nbio_v2_3_remap_hdp_registers, }; diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h index 5ae52085f6b7..a43b60acf7f6 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h @@ -26,6 +26,7 @@ #include "soc15_common.h" +extern const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg; extern const struct amdgpu_nbio_funcs nbio_v2_3_funcs; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c index 6590143c3f75..635d9e1fc0a3 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c @@ -226,7 +226,7 @@ static u32 nbio_v6_1_get_pcie_data_offset(struct amdgpu_device *adev) return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2); } -static const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = { +const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = { .ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK, .ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK, .ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK, @@ -277,7 +277,6 @@ static void nbio_v6_1_init_registers(struct amdgpu_device *adev) } const struct amdgpu_nbio_funcs nbio_v6_1_funcs = { - .hdp_flush_reg = &nbio_v6_1_hdp_flush_reg, .get_hdp_flush_req_offset = nbio_v6_1_get_hdp_flush_req_offset, .get_hdp_flush_done_offset = nbio_v6_1_get_hdp_flush_done_offset, .get_pcie_index_offset = nbio_v6_1_get_pcie_index_offset, diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h index 0743a6f016f3..6dc743b73218 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h @@ -26,6 +26,7 @@ #include "soc15_common.h" +extern const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg; extern const struct amdgpu_nbio_funcs nbio_v6_1_funcs; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c index 74eecb768a82..d6cbf26074bc 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c @@ -292,7 +292,6 @@ static void nbio_v7_0_init_registers(struct amdgpu_device *adev) } const struct amdgpu_nbio_funcs nbio_v7_0_funcs = { - .hdp_flush_reg = &nbio_v7_0_hdp_flush_reg, .get_hdp_flush_req_offset = nbio_v7_0_get_hdp_flush_req_offset, .get_hdp_flush_done_offset = nbio_v7_0_get_hdp_flush_done_offset, .get_pcie_index_offset = nbio_v7_0_get_pcie_index_offset, diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h index 508d549c5029..e7aefb252550 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h @@ -26,6 +26,7 @@ #include "soc15_common.h" +extern const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg; extern const struct amdgpu_nbio_funcs nbio_v7_0_funcs; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c index 910fffced43b..0db458f9fafc 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c @@ -23,10 +23,12 @@ #include "amdgpu.h" #include "amdgpu_atombios.h" #include "nbio_v7_4.h" +#include "amdgpu_ras.h" #include "nbio/nbio_7_4_offset.h" #include "nbio/nbio_7_4_sh_mask.h" #include "nbio/nbio_7_4_0_smn.h" +#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h" #include <uapi/linux/kfd_ioctl.h> #define smnNBIF_MGCG_CTRL_LCLK 0x1013a21c @@ -266,7 +268,7 @@ static u32 nbio_v7_4_get_pcie_data_offset(struct amdgpu_device *adev) return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2); } -static const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = { +const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = { .ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK, .ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK, .ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK, @@ -306,17 +308,208 @@ static void nbio_v7_4_detect_hw_virt(struct amdgpu_device *adev) static void nbio_v7_4_init_registers(struct amdgpu_device *adev) { - uint32_t def, data; - def = data = RREG32_PCIE(smnPCIE_CI_CNTL); - data = REG_SET_FIELD(data, PCIE_CI_CNTL, CI_SLV_ORDERING_DIS, 1); +} - if (def != data) - WREG32_PCIE(smnPCIE_CI_CNTL, data); +static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device *adev) +{ + uint32_t bif_doorbell_intr_cntl; + + bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL); + if (REG_GET_FIELD(bif_doorbell_intr_cntl, + BIF_DOORBELL_INT_CNTL, RAS_CNTLR_INTERRUPT_STATUS)) { + /* driver has to clear the interrupt status when bif ring is disabled */ + bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl, + BIF_DOORBELL_INT_CNTL, + RAS_CNTLR_INTERRUPT_CLEAR, 1); + WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl); + + amdgpu_ras_global_ras_isr(adev); + } +} + +static void nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring(struct amdgpu_device *adev) +{ + uint32_t bif_doorbell_intr_cntl; + + bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL); + if (REG_GET_FIELD(bif_doorbell_intr_cntl, + BIF_DOORBELL_INT_CNTL, RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS)) { + /* driver has to clear the interrupt status when bif ring is disabled */ + bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl, + BIF_DOORBELL_INT_CNTL, + RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR, 1); + WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl); + + amdgpu_ras_global_ras_isr(adev); + } +} + + +static int nbio_v7_4_set_ras_controller_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + /* The ras_controller_irq enablement should be done in psp bl when it + * tries to enable ras feature. Driver only need to set the correct interrupt + * vector for bare-metal and sriov use case respectively + */ + uint32_t bif_intr_cntl; + + bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL); + if (state == AMDGPU_IRQ_STATE_ENABLE) { + /* set interrupt vector select bit to 0 to select + * vetcor 1 for bare metal case */ + bif_intr_cntl = REG_SET_FIELD(bif_intr_cntl, + BIF_INTR_CNTL, + RAS_INTR_VEC_SEL, 0); + WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl); + } + + return 0; +} + +static int nbio_v7_4_process_ras_controller_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + /* By design, the ih cookie for ras_controller_irq should be written + * to BIFring instead of general iv ring. However, due to known bif ring + * hw bug, it has to be disabled. There is no chance the process function + * will be involked. Just left it as a dummy one. + */ + return 0; +} + +static int nbio_v7_4_set_ras_err_event_athub_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + /* The ras_controller_irq enablement should be done in psp bl when it + * tries to enable ras feature. Driver only need to set the correct interrupt + * vector for bare-metal and sriov use case respectively + */ + uint32_t bif_intr_cntl; + + bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL); + if (state == AMDGPU_IRQ_STATE_ENABLE) { + /* set interrupt vector select bit to 0 to select + * vetcor 1 for bare metal case */ + bif_intr_cntl = REG_SET_FIELD(bif_intr_cntl, + BIF_INTR_CNTL, + RAS_INTR_VEC_SEL, 0); + WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl); + } + + return 0; +} + +static int nbio_v7_4_process_err_event_athub_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + /* By design, the ih cookie for err_event_athub_irq should be written + * to BIFring instead of general iv ring. However, due to known bif ring + * hw bug, it has to be disabled. There is no chance the process function + * will be involked. Just left it as a dummy one. + */ + return 0; +} + +static const struct amdgpu_irq_src_funcs nbio_v7_4_ras_controller_irq_funcs = { + .set = nbio_v7_4_set_ras_controller_irq_state, + .process = nbio_v7_4_process_ras_controller_irq, +}; + +static const struct amdgpu_irq_src_funcs nbio_v7_4_ras_err_event_athub_irq_funcs = { + .set = nbio_v7_4_set_ras_err_event_athub_irq_state, + .process = nbio_v7_4_process_err_event_athub_irq, +}; + +static int nbio_v7_4_init_ras_controller_interrupt (struct amdgpu_device *adev) +{ + int r; + + /* init the irq funcs */ + adev->nbio.ras_controller_irq.funcs = + &nbio_v7_4_ras_controller_irq_funcs; + adev->nbio.ras_controller_irq.num_types = 1; + + /* register ras controller interrupt */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, + NBIF_7_4__SRCID__RAS_CONTROLLER_INTERRUPT, + &adev->nbio.ras_controller_irq); + if (r) + return r; + + return 0; +} + +static int nbio_v7_4_init_ras_err_event_athub_interrupt (struct amdgpu_device *adev) +{ + + int r; + + /* init the irq funcs */ + adev->nbio.ras_err_event_athub_irq.funcs = + &nbio_v7_4_ras_err_event_athub_irq_funcs; + adev->nbio.ras_err_event_athub_irq.num_types = 1; + + /* register ras err event athub interrupt */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, + NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT, + &adev->nbio.ras_err_event_athub_irq); + if (r) + return r; + + return 0; +} + +static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status) +{ + uint32_t global_sts, central_sts, int_eoi; + uint32_t corr, fatal, non_fatal; + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + + global_sts = RREG32_PCIE(smnRAS_GLOBAL_STATUS_LO); + corr = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrCorr); + fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrFatal); + non_fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, + ParityErrNonFatal); + + if (corr) + err_data->ce_count++; + if (fatal) + err_data->ue_count++; + + if (corr || fatal || non_fatal) { + central_sts = RREG32_PCIE(smnBIFL_RAS_CENTRAL_STATUS); + /* clear error status register */ + WREG32_PCIE(smnRAS_GLOBAL_STATUS_LO, global_sts); + + if (REG_GET_FIELD(central_sts, BIFL_RAS_CENTRAL_STATUS, + BIFL_RasContller_Intr_Recv)) { + /* clear interrupt status register */ + WREG32_PCIE(smnBIFL_RAS_CENTRAL_STATUS, central_sts); + int_eoi = RREG32_PCIE(smnIOHC_INTERRUPT_EOI); + int_eoi = REG_SET_FIELD(int_eoi, + IOHC_INTERRUPT_EOI, SMI_EOI, 1); + WREG32_PCIE(smnIOHC_INTERRUPT_EOI, int_eoi); + } + } +} + +static void nbio_v7_4_enable_doorbell_interrupt(struct amdgpu_device *adev, + bool enable) +{ + WREG32_FIELD15(NBIO, 0, BIF_DOORBELL_INT_CNTL, + DOORBELL_INTERRUPT_DISABLE, enable ? 0 : 1); } const struct amdgpu_nbio_funcs nbio_v7_4_funcs = { - .hdp_flush_reg = &nbio_v7_4_hdp_flush_reg, .get_hdp_flush_req_offset = nbio_v7_4_get_hdp_flush_req_offset, .get_hdp_flush_done_offset = nbio_v7_4_get_hdp_flush_done_offset, .get_pcie_index_offset = nbio_v7_4_get_pcie_index_offset, @@ -330,6 +523,7 @@ const struct amdgpu_nbio_funcs nbio_v7_4_funcs = { .enable_doorbell_aperture = nbio_v7_4_enable_doorbell_aperture, .enable_doorbell_selfring_aperture = nbio_v7_4_enable_doorbell_selfring_aperture, .ih_doorbell_range = nbio_v7_4_ih_doorbell_range, + .enable_doorbell_interrupt = nbio_v7_4_enable_doorbell_interrupt, .update_medium_grain_clock_gating = nbio_v7_4_update_medium_grain_clock_gating, .update_medium_grain_light_sleep = nbio_v7_4_update_medium_grain_light_sleep, .get_clockgating_state = nbio_v7_4_get_clockgating_state, @@ -337,4 +531,10 @@ const struct amdgpu_nbio_funcs nbio_v7_4_funcs = { .init_registers = nbio_v7_4_init_registers, .detect_hw_virt = nbio_v7_4_detect_hw_virt, .remap_hdp_registers = nbio_v7_4_remap_hdp_registers, + .handle_ras_controller_intr_no_bifring = nbio_v7_4_handle_ras_controller_intr_no_bifring, + .handle_ras_err_event_athub_intr_no_bifring = nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring, + .init_ras_controller_interrupt = nbio_v7_4_init_ras_controller_interrupt, + .init_ras_err_event_athub_interrupt = nbio_v7_4_init_ras_err_event_athub_interrupt, + .query_ras_error_count = nbio_v7_4_query_ras_error_count, + .ras_late_init = amdgpu_nbio_ras_late_init, }; diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h index c442865bac4f..b1ac82872752 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h @@ -26,6 +26,7 @@ #include "soc15_common.h" +extern const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg; extern const struct amdgpu_nbio_funcs nbio_v7_4_funcs; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index de9b995b65b1..af68f9815f28 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -46,6 +46,7 @@ #include "gmc_v10_0.h" #include "gfxhub_v2_0.h" #include "mmhub_v2_0.h" +#include "nbio_v2_3.h" #include "nv.h" #include "navi10_ih.h" #include "gfx_v10_0.h" @@ -53,6 +54,7 @@ #include "vcn_v2_0.h" #include "dce_virtual.h" #include "mes_v10_1.h" +#include "mxgpu_nv.h" static const struct amd_ip_funcs nv_common_ip_funcs; @@ -63,8 +65,8 @@ static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg) { unsigned long flags, address, data; u32 r; - address = adev->nbio_funcs->get_pcie_index_offset(adev); - data = adev->nbio_funcs->get_pcie_data_offset(adev); + address = adev->nbio.funcs->get_pcie_index_offset(adev); + data = adev->nbio.funcs->get_pcie_data_offset(adev); spin_lock_irqsave(&adev->pcie_idx_lock, flags); WREG32(address, reg); @@ -78,8 +80,8 @@ static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) { unsigned long flags, address, data; - address = adev->nbio_funcs->get_pcie_index_offset(adev); - data = adev->nbio_funcs->get_pcie_data_offset(adev); + address = adev->nbio.funcs->get_pcie_index_offset(adev); + data = adev->nbio.funcs->get_pcie_data_offset(adev); spin_lock_irqsave(&adev->pcie_idx_lock, flags); WREG32(address, reg); @@ -119,7 +121,7 @@ static void nv_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) static u32 nv_get_config_memsize(struct amdgpu_device *adev) { - return adev->nbio_funcs->get_memsize(adev); + return adev->nbio.funcs->get_memsize(adev); } static u32 nv_get_xclk(struct amdgpu_device *adev) @@ -176,6 +178,7 @@ static struct soc15_allowed_register_entry nv_allowed_read_registers[] = { { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)}, { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)}, { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)}, + { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)}, { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)}, { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)}, { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)}, @@ -279,7 +282,7 @@ static int nv_asic_mode1_reset(struct amdgpu_device *adev) /* wait for asic to come out of reset */ for (i = 0; i < adev->usec_timeout; i++) { - u32 memsize = adev->nbio_funcs->get_memsize(adev); + u32 memsize = adev->nbio.funcs->get_memsize(adev); if (memsize != 0xffffffff) break; @@ -296,7 +299,7 @@ nv_asic_reset_method(struct amdgpu_device *adev) { struct smu_context *smu = &adev->smu; - if (smu_baco_is_support(smu)) + if (!amdgpu_sriov_vf(adev) && smu_baco_is_support(smu)) return AMD_RESET_METHOD_BACO; else return AMD_RESET_METHOD_MODE1; @@ -368,8 +371,8 @@ static void nv_program_aspm(struct amdgpu_device *adev) static void nv_enable_doorbell_aperture(struct amdgpu_device *adev, bool enable) { - adev->nbio_funcs->enable_doorbell_aperture(adev, enable); - adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable); + adev->nbio.funcs->enable_doorbell_aperture(adev, enable); + adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable); } static const struct amdgpu_ip_block_version nv_common_ip_block = @@ -423,9 +426,13 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) if (r) return r; - adev->nbio_funcs = &nbio_v2_3_funcs; + adev->nbio.funcs = &nbio_v2_3_funcs; + adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg; - adev->nbio_funcs->detect_hw_virt(adev); + adev->nbio.funcs->detect_hw_virt(adev); + + if (amdgpu_sriov_vf(adev)) + adev->virt.ops = &xgpu_nv_virt_ops; switch (adev->asic_type) { case CHIP_NAVI10: @@ -435,7 +442,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && - is_support_sw_smu(adev)) + is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); @@ -446,7 +453,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && - is_support_sw_smu(adev)) + is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); if (adev->enable_mes) @@ -458,7 +465,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && - is_support_sw_smu(adev)) + is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); @@ -469,7 +476,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && - is_support_sw_smu(adev)) + is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); break; @@ -482,12 +489,12 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) static uint32_t nv_get_rev_id(struct amdgpu_device *adev) { - return adev->nbio_funcs->get_rev_id(adev); + return adev->nbio.funcs->get_rev_id(adev); } static void nv_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) { - adev->nbio_funcs->hdp_flush(adev, ring); + adev->nbio.funcs->hdp_flush(adev, ring); } static void nv_invalidate_hdp(struct amdgpu_device *adev, @@ -532,6 +539,16 @@ static bool nv_need_reset_on_init(struct amdgpu_device *adev) return false; } +static uint64_t nv_get_pcie_replay_count(struct amdgpu_device *adev) +{ + + /* TODO + * dummy implement for pcie_replay_count sysfs interface + * */ + + return 0; +} + static void nv_init_doorbell_index(struct amdgpu_device *adev) { adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ; @@ -579,12 +596,16 @@ static const struct amdgpu_asic_funcs nv_asic_funcs = .need_full_reset = &nv_need_full_reset, .get_pcie_usage = &nv_get_pcie_usage, .need_reset_on_init = &nv_need_reset_on_init, + .get_pcie_replay_count = &nv_get_pcie_replay_count, }; static int nv_common_early_init(void *handle) { +#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE) struct amdgpu_device *adev = (struct amdgpu_device *)handle; + adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET; + adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET; adev->smc_rreg = NULL; adev->smc_wreg = NULL; adev->pcie_rreg = &nv_pcie_rreg; @@ -667,16 +688,31 @@ static int nv_common_early_init(void *handle) return -EINVAL; } + if (amdgpu_sriov_vf(adev)) { + amdgpu_virt_init_setting(adev); + xgpu_nv_mailbox_set_irq_funcs(adev); + } + return 0; } static int nv_common_late_init(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (amdgpu_sriov_vf(adev)) + xgpu_nv_mailbox_get_irq(adev); + return 0; } static int nv_common_sw_init(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (amdgpu_sriov_vf(adev)) + xgpu_nv_mailbox_add_irq_id(adev); + return 0; } @@ -694,7 +730,13 @@ static int nv_common_hw_init(void *handle) /* enable aspm */ nv_program_aspm(adev); /* setup nbio registers */ - adev->nbio_funcs->init_registers(adev); + adev->nbio.funcs->init_registers(adev); + /* remap HDP registers to a hole in mmio space, + * for the purpose of expose those registers + * to process space + */ + if (adev->nbio.funcs->remap_hdp_registers) + adev->nbio.funcs->remap_hdp_registers(adev); /* enable the doorbell aperture */ nv_enable_doorbell_aperture(adev, true); @@ -856,9 +898,9 @@ static int nv_common_set_clockgating_state(void *handle, case CHIP_NAVI10: case CHIP_NAVI14: case CHIP_NAVI12: - adev->nbio_funcs->update_medium_grain_clock_gating(adev, + adev->nbio.funcs->update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE ? true : false); - adev->nbio_funcs->update_medium_grain_light_sleep(adev, + adev->nbio.funcs->update_medium_grain_light_sleep(adev, state == AMD_CG_STATE_GATE ? true : false); nv_update_hdp_mem_power_gating(adev, state == AMD_CG_STATE_GATE ? true : false); @@ -886,7 +928,7 @@ static void nv_common_get_clockgating_state(void *handle, u32 *flags) if (amdgpu_sriov_vf(adev)) *flags = 0; - adev->nbio_funcs->get_clockgating_state(adev, flags); + adev->nbio.funcs->get_clockgating_state(adev, flags); /* AMD_CG_SUPPORT_HDP_MGCG */ tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c index 5d95e614369a..b345e69ba246 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c @@ -40,6 +40,9 @@ MODULE_FIRMWARE("amdgpu/raven_asd.bin"); MODULE_FIRMWARE("amdgpu/picasso_asd.bin"); MODULE_FIRMWARE("amdgpu/raven2_asd.bin"); +MODULE_FIRMWARE("amdgpu/picasso_ta.bin"); +MODULE_FIRMWARE("amdgpu/raven2_ta.bin"); +MODULE_FIRMWARE("amdgpu/raven_ta.bin"); static int psp_v10_0_init_microcode(struct psp_context *psp) { @@ -48,7 +51,7 @@ static int psp_v10_0_init_microcode(struct psp_context *psp) char fw_name[30]; int err = 0; const struct psp_firmware_header_v1_0 *hdr; - + const struct ta_firmware_header_v1_0 *ta_hdr; DRM_DEBUG("\n"); switch (adev->asic_type) { @@ -79,7 +82,45 @@ static int psp_v10_0_init_microcode(struct psp_context *psp) adev->psp.asd_start_addr = (uint8_t *)hdr + le32_to_cpu(hdr->header.ucode_array_offset_bytes); + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); + err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev); + if (err) { + release_firmware(adev->psp.ta_fw); + adev->psp.ta_fw = NULL; + dev_info(adev->dev, + "psp v10.0: Failed to load firmware \"%s\"\n", + fw_name); + } else { + err = amdgpu_ucode_validate(adev->psp.ta_fw); + if (err) + goto out2; + + ta_hdr = (const struct ta_firmware_header_v1_0 *) + adev->psp.ta_fw->data; + adev->psp.ta_hdcp_ucode_version = + le32_to_cpu(ta_hdr->ta_hdcp_ucode_version); + adev->psp.ta_hdcp_ucode_size = + le32_to_cpu(ta_hdr->ta_hdcp_size_bytes); + adev->psp.ta_hdcp_start_addr = + (uint8_t *)ta_hdr + + le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); + + adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); + + adev->psp.ta_dtm_ucode_version = + le32_to_cpu(ta_hdr->ta_dtm_ucode_version); + adev->psp.ta_dtm_ucode_size = + le32_to_cpu(ta_hdr->ta_dtm_size_bytes); + adev->psp.ta_dtm_start_addr = + (uint8_t *)adev->psp.ta_hdcp_start_addr + + le32_to_cpu(ta_hdr->ta_dtm_offset_bytes); + } + return 0; + +out2: + release_firmware(adev->psp.ta_fw); + adev->psp.ta_fw = NULL; out: if (err) { dev_err(adev->dev, @@ -228,6 +269,7 @@ static int psp_v10_0_cmd_submit(struct psp_context *psp, write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); write_frame->fence_value = index; + amdgpu_asic_flush_hdp(adev, NULL); /* Update the write Pointer in DWORDs */ psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index 10166104b8a3..ffeaa2f5588d 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -49,6 +49,7 @@ MODULE_FIRMWARE("amdgpu/navi12_sos.bin"); MODULE_FIRMWARE("amdgpu/navi12_asd.bin"); MODULE_FIRMWARE("amdgpu/arcturus_sos.bin"); MODULE_FIRMWARE("amdgpu/arcturus_asd.bin"); +MODULE_FIRMWARE("amdgpu/arcturus_ta.bin"); /* address block */ #define smnMP1_FIRMWARE_FLAGS 0x3010024 @@ -57,6 +58,8 @@ MODULE_FIRMWARE("amdgpu/arcturus_asd.bin"); #define mmRLC_GPM_UCODE_DATA_NV10 0x5b62 #define mmSDMA0_UCODE_ADDR_NV10 0x5880 #define mmSDMA0_UCODE_DATA_NV10 0x5881 +/* memory training timeout define */ +#define MEM_TRAIN_SEND_MSG_TIMEOUT_US 3000000 static int psp_v11_0_init_microcode(struct psp_context *psp) { @@ -155,6 +158,7 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) switch (adev->asic_type) { case CHIP_VEGA20: + case CHIP_ARCTURUS: snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev); if (err) { @@ -182,7 +186,6 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) case CHIP_NAVI10: case CHIP_NAVI14: case CHIP_NAVI12: - case CHIP_ARCTURUS: break; default: BUG(); @@ -205,18 +208,26 @@ out: return err; } +static bool psp_v11_0_is_sos_alive(struct psp_context *psp) +{ + struct amdgpu_device *adev = psp->adev; + uint32_t sol_reg; + + sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); + + return sol_reg != 0x0; +} + static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp) { int ret; uint32_t psp_gfxdrv_command_reg = 0; struct amdgpu_device *adev = psp->adev; - uint32_t sol_reg; /* Check tOS sign of life register to confirm sys driver and sOS * are already been loaded. */ - sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); - if (sol_reg) { + if (psp_v11_0_is_sos_alive(psp)) { psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58); dev_info(adev->dev, "sos fw version = 0x%x.\n", psp->sos_fw_version); return 0; @@ -233,7 +244,7 @@ static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp) /* Copy PSP KDB binary to memory */ memcpy(psp->fw_pri_buf, psp->kdb_start_addr, psp->kdb_bin_size); - /* Provide the sys driver to bootloader */ + /* Provide the PSP KDB to bootloader */ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, (uint32_t)(psp->fw_pri_mc_addr >> 20)); psp_gfxdrv_command_reg = PSP_BL__LOAD_KEY_DATABASE; @@ -252,13 +263,11 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp) int ret; uint32_t psp_gfxdrv_command_reg = 0; struct amdgpu_device *adev = psp->adev; - uint32_t sol_reg; /* Check sOS sign of life register to confirm sys driver and sOS * are already been loaded. */ - sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); - if (sol_reg) { + if (psp_v11_0_is_sos_alive(psp)) { psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58); dev_info(adev->dev, "sos fw version = 0x%x.\n", psp->sos_fw_version); return 0; @@ -296,13 +305,11 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp) int ret; unsigned int psp_gfxdrv_command_reg = 0; struct amdgpu_device *adev = psp->adev; - uint32_t sol_reg; /* Check sOS sign of life register to confirm sys driver and sOS * are already been loaded. */ - sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); - if (sol_reg) + if (psp_v11_0_is_sos_alive(psp)) return 0; /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */ @@ -398,6 +405,34 @@ static bool psp_v11_0_support_vmr_ring(struct psp_context *psp) return false; } +static int psp_v11_0_ring_stop(struct psp_context *psp, + enum psp_ring_type ring_type) +{ + int ret = 0; + struct amdgpu_device *adev = psp->adev; + + /* Write the ring destroy command*/ + if (psp_v11_0_support_vmr_ring(psp)) + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, + GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING); + else + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, + GFX_CTRL_CMD_ID_DESTROY_RINGS); + + /* there might be handshake issue with hardware which needs delay */ + mdelay(20); + + /* Wait for response flag (bit 31) */ + if (psp_v11_0_support_vmr_ring(psp)) + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), + 0x80000000, 0x80000000, false); + else + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), + 0x80000000, 0x80000000, false); + + return ret; +} + static int psp_v11_0_ring_create(struct psp_context *psp, enum psp_ring_type ring_type) { @@ -407,6 +442,12 @@ static int psp_v11_0_ring_create(struct psp_context *psp, struct amdgpu_device *adev = psp->adev; if (psp_v11_0_support_vmr_ring(psp)) { + ret = psp_v11_0_ring_stop(psp, ring_type); + if (ret) { + DRM_ERROR("psp_v11_0_ring_stop_sriov failed!\n"); + return ret; + } + /* Write low address of the ring to C2PMSG_102 */ psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr); WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_ring_reg); @@ -426,6 +467,14 @@ static int psp_v11_0_ring_create(struct psp_context *psp, 0x80000000, 0x8000FFFF, false); } else { + /* Wait for sOS ready for ring creation */ + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), + 0x80000000, 0x80000000, false); + if (ret) { + DRM_ERROR("Failed to wait for sOS ready for ring creation\n"); + return ret; + } + /* Write low address of the ring to C2PMSG_69 */ psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr); WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg); @@ -451,33 +500,6 @@ static int psp_v11_0_ring_create(struct psp_context *psp, return ret; } -static int psp_v11_0_ring_stop(struct psp_context *psp, - enum psp_ring_type ring_type) -{ - int ret = 0; - struct amdgpu_device *adev = psp->adev; - - /* Write the ring destroy command*/ - if (psp_v11_0_support_vmr_ring(psp)) - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, - GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING); - else - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, - GFX_CTRL_CMD_ID_DESTROY_RINGS); - - /* there might be handshake issue with hardware which needs delay */ - mdelay(20); - - /* Wait for response flag (bit 31) */ - if (psp_v11_0_support_vmr_ring(psp)) - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), - 0x80000000, 0x80000000, false); - else - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x80000000, false); - - return ret; -} static int psp_v11_0_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type) @@ -541,6 +563,7 @@ static int psp_v11_0_cmd_submit(struct psp_context *psp, write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); write_frame->fence_value = index; + amdgpu_asic_flush_hdp(adev, NULL); /* Update the write Pointer in DWORDs */ psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; @@ -889,6 +912,162 @@ static int psp_v11_0_rlc_autoload_start(struct psp_context *psp) return psp_rlc_autoload_start(psp); } +static int psp_v11_0_memory_training_send_msg(struct psp_context *psp, int msg) +{ + int ret; + int i; + uint32_t data_32; + int max_wait; + struct amdgpu_device *adev = psp->adev; + + data_32 = (psp->mem_train_ctx.c2p_train_data_offset >> 20); + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, data_32); + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, msg); + + max_wait = MEM_TRAIN_SEND_MSG_TIMEOUT_US / adev->usec_timeout; + for (i = 0; i < max_wait; i++) { + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), + 0x80000000, 0x80000000, false); + if (ret == 0) + break; + } + if (i < max_wait) + ret = 0; + else + ret = -ETIME; + + DRM_DEBUG("training %s %s, cost %d @ %d ms\n", + (msg == PSP_BL__DRAM_SHORT_TRAIN) ? "short" : "long", + (ret == 0) ? "succeed" : "failed", + i, adev->usec_timeout/1000); + return ret; +} + +static void psp_v11_0_memory_training_fini(struct psp_context *psp) +{ + struct psp_memory_training_context *ctx = &psp->mem_train_ctx; + + ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT; + kfree(ctx->sys_cache); + ctx->sys_cache = NULL; +} + +static int psp_v11_0_memory_training_init(struct psp_context *psp) +{ + int ret; + struct psp_memory_training_context *ctx = &psp->mem_train_ctx; + + if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) { + DRM_DEBUG("memory training is not supported!\n"); + return 0; + } + + ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL); + if (ctx->sys_cache == NULL) { + DRM_ERROR("alloc mem_train_ctx.sys_cache failed!\n"); + ret = -ENOMEM; + goto Err_out; + } + + DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n", + ctx->train_data_size, + ctx->p2c_train_data_offset, + ctx->c2p_train_data_offset); + ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS; + return 0; + +Err_out: + psp_v11_0_memory_training_fini(psp); + return ret; +} + +/* + * save and restore proces + */ +static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops) +{ + int ret; + uint32_t p2c_header[4]; + struct psp_memory_training_context *ctx = &psp->mem_train_ctx; + uint32_t *pcache = (uint32_t*)ctx->sys_cache; + + if (ctx->init == PSP_MEM_TRAIN_NOT_SUPPORT) { + DRM_DEBUG("Memory training is not supported.\n"); + return 0; + } else if (ctx->init != PSP_MEM_TRAIN_INIT_SUCCESS) { + DRM_ERROR("Memory training initialization failure.\n"); + return -EINVAL; + } + + if (psp_v11_0_is_sos_alive(psp)) { + DRM_DEBUG("SOS is alive, skip memory training.\n"); + return 0; + } + + amdgpu_device_vram_access(psp->adev, ctx->p2c_train_data_offset, p2c_header, sizeof(p2c_header), false); + DRM_DEBUG("sys_cache[%08x,%08x,%08x,%08x] p2c_header[%08x,%08x,%08x,%08x]\n", + pcache[0], pcache[1], pcache[2], pcache[3], + p2c_header[0], p2c_header[1], p2c_header[2], p2c_header[3]); + + if (ops & PSP_MEM_TRAIN_SEND_SHORT_MSG) { + DRM_DEBUG("Short training depends on restore.\n"); + ops |= PSP_MEM_TRAIN_RESTORE; + } + + if ((ops & PSP_MEM_TRAIN_RESTORE) && + pcache[0] != MEM_TRAIN_SYSTEM_SIGNATURE) { + DRM_DEBUG("sys_cache[0] is invalid, restore depends on save.\n"); + ops |= PSP_MEM_TRAIN_SAVE; + } + + if (p2c_header[0] == MEM_TRAIN_SYSTEM_SIGNATURE && + !(pcache[0] == MEM_TRAIN_SYSTEM_SIGNATURE && + pcache[3] == p2c_header[3])) { + DRM_DEBUG("sys_cache is invalid or out-of-date, need save training data to sys_cache.\n"); + ops |= PSP_MEM_TRAIN_SAVE; + } + + if ((ops & PSP_MEM_TRAIN_SAVE) && + p2c_header[0] != MEM_TRAIN_SYSTEM_SIGNATURE) { + DRM_DEBUG("p2c_header[0] is invalid, save depends on long training.\n"); + ops |= PSP_MEM_TRAIN_SEND_LONG_MSG; + } + + if (ops & PSP_MEM_TRAIN_SEND_LONG_MSG) { + ops &= ~PSP_MEM_TRAIN_SEND_SHORT_MSG; + ops |= PSP_MEM_TRAIN_SAVE; + } + + DRM_DEBUG("Memory training ops:%x.\n", ops); + + if (ops & PSP_MEM_TRAIN_SEND_LONG_MSG) { + ret = psp_v11_0_memory_training_send_msg(psp, PSP_BL__DRAM_LONG_TRAIN); + if (ret) { + DRM_ERROR("Send long training msg failed.\n"); + return ret; + } + } + + if (ops & PSP_MEM_TRAIN_SAVE) { + amdgpu_device_vram_access(psp->adev, ctx->p2c_train_data_offset, ctx->sys_cache, ctx->train_data_size, false); + } + + if (ops & PSP_MEM_TRAIN_RESTORE) { + amdgpu_device_vram_access(psp->adev, ctx->c2p_train_data_offset, ctx->sys_cache, ctx->train_data_size, true); + } + + if (ops & PSP_MEM_TRAIN_SEND_SHORT_MSG) { + ret = psp_v11_0_memory_training_send_msg(psp, (amdgpu_force_long_training > 0) ? + PSP_BL__DRAM_LONG_TRAIN : PSP_BL__DRAM_SHORT_TRAIN); + if (ret) { + DRM_ERROR("send training msg failed.\n"); + return ret; + } + } + ctx->training_cnt++; + return 0; +} + static const struct psp_funcs psp_v11_0_funcs = { .init_microcode = psp_v11_0_init_microcode, .bootloader_load_kdb = psp_v11_0_bootloader_load_kdb, @@ -909,6 +1088,9 @@ static const struct psp_funcs psp_v11_0_funcs = { .ras_trigger_error = psp_v11_0_ras_trigger_error, .ras_cure_posion = psp_v11_0_ras_cure_posion, .rlc_autoload_start = psp_v11_0_rlc_autoload_start, + .mem_training_init = psp_v11_0_memory_training_init, + .mem_training_fini = psp_v11_0_memory_training_fini, + .mem_training = psp_v11_0_memory_training, }; void psp_v11_0_set_psp_funcs(struct psp_context *psp) diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c index c72e43f8e0be..8f553f6f92d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c @@ -378,6 +378,7 @@ static int psp_v12_0_cmd_submit(struct psp_context *psp, write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); write_frame->fence_value = index; + amdgpu_asic_flush_hdp(adev, NULL); /* Update the write Pointer in DWORDs */ psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c index d2c727f6a8bd..fdc00938327b 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c @@ -454,6 +454,7 @@ static int psp_v3_1_cmd_submit(struct psp_context *psp, write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); write_frame->fence_value = index; + amdgpu_asic_flush_hdp(adev, NULL); /* Update the write Pointer in DWORDs */ psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 78452cf0115d..4ef4d31f5231 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -254,6 +254,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4_3[] = { SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000) }; static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev, @@ -746,13 +747,13 @@ static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; u32 ref_and_mask = 0; - const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg; + const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me; sdma_v4_0_wait_reg_mem(ring, 0, 1, - adev->nbio_funcs->get_hdp_flush_done_offset(adev), - adev->nbio_funcs->get_hdp_flush_req_offset(adev), + adev->nbio.funcs->get_hdp_flush_done_offset(adev), + adev->nbio.funcs->get_hdp_flush_req_offset(adev), ref_and_mask, ref_and_mask, 10); } @@ -1690,102 +1691,17 @@ static int sdma_v4_0_early_init(void *handle) } static int sdma_v4_0_process_ras_data_cb(struct amdgpu_device *adev, - struct ras_err_data *err_data, + void *err_data, struct amdgpu_iv_entry *entry); static int sdma_v4_0_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct ras_common_if **ras_if = &adev->sdma.ras_if; struct ras_ih_if ih_info = { .cb = sdma_v4_0_process_ras_data_cb, }; - struct ras_fs_if fs_info = { - .sysfs_name = "sdma_err_count", - .debugfs_name = "sdma_err_inject", - }; - struct ras_common_if ras_block = { - .block = AMDGPU_RAS_BLOCK__SDMA, - .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, - .sub_block_index = 0, - .name = "sdma", - }; - int r, i; - - if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) { - amdgpu_ras_feature_enable_on_boot(adev, &ras_block, 0); - return 0; - } - - /* handle resume path. */ - if (*ras_if) { - /* resend ras TA enable cmd during resume. - * prepare to handle failure. - */ - ih_info.head = **ras_if; - r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1); - if (r) { - if (r == -EAGAIN) { - /* request a gpu reset. will run again. */ - amdgpu_ras_request_reset_on_boot(adev, - AMDGPU_RAS_BLOCK__SDMA); - return 0; - } - /* fail to enable ras, cleanup all. */ - goto irq; - } - /* enable successfully. continue. */ - goto resume; - } - - *ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL); - if (!*ras_if) - return -ENOMEM; - - **ras_if = ras_block; - - r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1); - if (r) { - if (r == -EAGAIN) { - amdgpu_ras_request_reset_on_boot(adev, - AMDGPU_RAS_BLOCK__SDMA); - r = 0; - } - goto feature; - } - - ih_info.head = **ras_if; - fs_info.head = **ras_if; - - r = amdgpu_ras_interrupt_add_handler(adev, &ih_info); - if (r) - goto interrupt; - - amdgpu_ras_debugfs_create(adev, &fs_info); - r = amdgpu_ras_sysfs_create(adev, &fs_info); - if (r) - goto sysfs; -resume: - for (i = 0; i < adev->sdma.num_instances; i++) { - r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq, - AMDGPU_SDMA_IRQ_INSTANCE0 + i); - if (r) - goto irq; - } - - return 0; -irq: - amdgpu_ras_sysfs_remove(adev, *ras_if); -sysfs: - amdgpu_ras_debugfs_remove(adev, *ras_if); - amdgpu_ras_interrupt_remove_handler(adev, &ih_info); -interrupt: - amdgpu_ras_feature_enable(adev, *ras_if, 0); -feature: - kfree(*ras_if); - *ras_if = NULL; - return r; + return amdgpu_sdma_ras_late_init(adev, &ih_info); } static int sdma_v4_0_sw_init(void *handle) @@ -1857,21 +1773,7 @@ static int sdma_v4_0_sw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; int i; - if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA) && - adev->sdma.ras_if) { - struct ras_common_if *ras_if = adev->sdma.ras_if; - struct ras_ih_if ih_info = { - .head = *ras_if, - }; - - /*remove fs first*/ - amdgpu_ras_debugfs_remove(adev, ras_if); - amdgpu_ras_sysfs_remove(adev, ras_if); - /*remove the IH*/ - amdgpu_ras_interrupt_remove_handler(adev, &ih_info); - amdgpu_ras_feature_enable(adev, ras_if, 0); - kfree(ras_if); - } + amdgpu_sdma_ras_fini(adev); for (i = 0; i < adev->sdma.num_instances; i++) { amdgpu_ring_fini(&adev->sdma.instance[i].ring); @@ -1891,7 +1793,7 @@ static int sdma_v4_0_hw_init(void *handle) if ((adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_powergating_by_smu) || - adev->asic_type == CHIP_RENOIR) + (adev->asic_type == CHIP_RENOIR && !adev->in_gpu_reset)) amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false); if (!amdgpu_sriov_vf(adev)) @@ -2024,52 +1926,28 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev, } static int sdma_v4_0_process_ras_data_cb(struct amdgpu_device *adev, - struct ras_err_data *err_data, + void *err_data, struct amdgpu_iv_entry *entry) { - uint32_t err_source; int instance; + /* When “Full RAS” is enabled, the per-IP interrupt sources should + * be disabled and the driver should only look for the aggregated + * interrupt via sync flood + */ + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) + goto out; + instance = sdma_v4_0_irq_id_to_seq(entry->client_id); if (instance < 0) - return 0; - - switch (entry->src_id) { - case SDMA0_4_0__SRCID__SDMA_SRAM_ECC: - err_source = 0; - break; - case SDMA0_4_0__SRCID__SDMA_ECC: - err_source = 1; - break; - default: - return 0; - } - - kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); + goto out; - amdgpu_ras_reset_gpu(adev, 0); + amdgpu_sdma_process_ras_data_cb(adev, err_data, entry); +out: return AMDGPU_RAS_SUCCESS; } -static int sdma_v4_0_process_ecc_irq(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - struct amdgpu_iv_entry *entry) -{ - struct ras_common_if *ras_if = adev->sdma.ras_if; - struct ras_dispatch_if ih_data = { - .entry = entry, - }; - - if (!ras_if) - return 0; - - ih_data.head = *ras_if; - - amdgpu_ras_interrupt_dispatch(adev, &ih_data); - return 0; -} - static int sdma_v4_0_process_illegal_inst_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) @@ -2417,7 +2295,7 @@ static const struct amdgpu_irq_src_funcs sdma_v4_0_illegal_inst_irq_funcs = { static const struct amdgpu_irq_src_funcs sdma_v4_0_ecc_irq_funcs = { .set = sdma_v4_0_set_ecc_irq_state, - .process = sdma_v4_0_process_ecc_irq, + .process = amdgpu_sdma_process_ecc_irq, }; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index fa2f70ce2e2b..f4ad2990f973 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -406,7 +406,7 @@ static void sdma_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; u32 ref_and_mask = 0; - const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg; + const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; if (ring->me == 0) ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0; @@ -416,8 +416,8 @@ static void sdma_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) | SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */ - amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_done_offset(adev)) << 2); - amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_req_offset(adev)) << 2); + amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2); + amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2); amdgpu_ring_write(ring, ref_and_mask); /* reference */ amdgpu_ring_write(ring, ref_and_mask); /* mask */ amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | @@ -683,7 +683,7 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev) WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell); WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset); - adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, + adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, ring->doorbell_index, 20); if (amdgpu_sriov_vf(adev)) @@ -1129,7 +1129,7 @@ static void sdma_v5_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) amdgpu_ring_write(ring, addr & 0xfffffffc); amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff); amdgpu_ring_write(ring, seq); /* reference */ - amdgpu_ring_write(ring, 0xfffffff); /* mask */ + amdgpu_ring_write(ring, 0xffffffff); /* mask */ amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */ } @@ -1173,6 +1173,16 @@ static void sdma_v5_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); } +static void sdma_v5_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, + uint32_t reg0, uint32_t reg1, + uint32_t ref, uint32_t mask) +{ + amdgpu_ring_emit_wreg(ring, reg0, ref); + /* wait for a cycle to reset vm_inv_eng*_ack */ + amdgpu_ring_emit_reg_wait(ring, reg0, 0, 0); + amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask); +} + static int sdma_v5_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -1588,7 +1598,7 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = { 6 + /* sdma_v5_0_ring_emit_pipeline_sync */ /* sdma_v5_0_ring_emit_vm_flush */ SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + - SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 * 2 + 10 + 10 + 10, /* sdma_v5_0_ring_emit_fence x3 for user fence, vm fence */ .emit_ib_size = 7 + 6, /* sdma_v5_0_ring_emit_ib */ .emit_ib = sdma_v5_0_ring_emit_ib, @@ -1602,6 +1612,7 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = { .pad_ib = sdma_v5_0_ring_pad_ib, .emit_wreg = sdma_v5_0_ring_emit_wreg, .emit_reg_wait = sdma_v5_0_ring_emit_reg_wait, + .emit_reg_write_reg_wait = sdma_v5_0_ring_emit_reg_write_reg_wait, .init_cond_exec = sdma_v5_0_ring_init_cond_exec, .patch_cond_exec = sdma_v5_0_ring_patch_cond_exec, .preempt_ib = sdma_v5_0_ring_preempt_ib, diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index 493af42152f2..29024e64c886 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -975,6 +975,17 @@ static void si_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = { {GRBM_STATUS}, + {mmGRBM_STATUS2}, + {mmGRBM_STATUS_SE0}, + {mmGRBM_STATUS_SE1}, + {mmSRBM_STATUS}, + {mmSRBM_STATUS2}, + {DMA_STATUS_REG + DMA0_REGISTER_OFFSET}, + {DMA_STATUS_REG + DMA1_REGISTER_OFFSET}, + {mmCP_STAT}, + {mmCP_STALLED_STAT1}, + {mmCP_STALLED_STAT2}, + {mmCP_STALLED_STAT3}, {GB_ADDR_CONFIG}, {MC_ARB_RAMCFG}, {GB_TILE_MODE0}, diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index f8ab80c8801b..8e1640bc07af 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -58,6 +58,9 @@ #include "mmhub_v1_0.h" #include "df_v1_7.h" #include "df_v3_6.h" +#include "nbio_v6_1.h" +#include "nbio_v7_0.h" +#include "nbio_v7_4.h" #include "vega10_ih.h" #include "sdma_v4_0.h" #include "uvd_v7_0.h" @@ -91,8 +94,8 @@ static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg) { unsigned long flags, address, data; u32 r; - address = adev->nbio_funcs->get_pcie_index_offset(adev); - data = adev->nbio_funcs->get_pcie_data_offset(adev); + address = adev->nbio.funcs->get_pcie_index_offset(adev); + data = adev->nbio.funcs->get_pcie_data_offset(adev); spin_lock_irqsave(&adev->pcie_idx_lock, flags); WREG32(address, reg); @@ -106,8 +109,8 @@ static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) { unsigned long flags, address, data; - address = adev->nbio_funcs->get_pcie_index_offset(adev); - data = adev->nbio_funcs->get_pcie_data_offset(adev); + address = adev->nbio.funcs->get_pcie_index_offset(adev); + data = adev->nbio.funcs->get_pcie_data_offset(adev); spin_lock_irqsave(&adev->pcie_idx_lock, flags); WREG32(address, reg); @@ -121,8 +124,8 @@ static u64 soc15_pcie_rreg64(struct amdgpu_device *adev, u32 reg) { unsigned long flags, address, data; u64 r; - address = adev->nbio_funcs->get_pcie_index_offset(adev); - data = adev->nbio_funcs->get_pcie_data_offset(adev); + address = adev->nbio.funcs->get_pcie_index_offset(adev); + data = adev->nbio.funcs->get_pcie_data_offset(adev); spin_lock_irqsave(&adev->pcie_idx_lock, flags); /* read low 32 bit */ @@ -142,8 +145,8 @@ static void soc15_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v) { unsigned long flags, address, data; - address = adev->nbio_funcs->get_pcie_index_offset(adev); - data = adev->nbio_funcs->get_pcie_data_offset(adev); + address = adev->nbio.funcs->get_pcie_index_offset(adev); + data = adev->nbio.funcs->get_pcie_data_offset(adev); spin_lock_irqsave(&adev->pcie_idx_lock, flags); /* write low 32 bit */ @@ -262,7 +265,7 @@ static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) static u32 soc15_get_config_memsize(struct amdgpu_device *adev) { - return adev->nbio_funcs->get_memsize(adev); + return adev->nbio.funcs->get_memsize(adev); } static u32 soc15_get_xclk(struct amdgpu_device *adev) @@ -336,6 +339,7 @@ static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = { { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)}, { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)}, { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)}, + { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)}, { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)}, { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)}, { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)}, @@ -461,7 +465,7 @@ static int soc15_asic_mode1_reset(struct amdgpu_device *adev) /* wait for asic to come out of reset */ for (i = 0; i < adev->usec_timeout; i++) { - u32 memsize = adev->nbio_funcs->get_memsize(adev); + u32 memsize = adev->nbio.funcs->get_memsize(adev); if (memsize != 0xffffffff) break; @@ -475,42 +479,66 @@ static int soc15_asic_mode1_reset(struct amdgpu_device *adev) static int soc15_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap) { - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + if (is_support_sw_smu(adev)) { + struct smu_context *smu = &adev->smu; - if (!pp_funcs || !pp_funcs->get_asic_baco_capability) { - *cap = false; - return -ENOENT; - } + *cap = smu_baco_is_support(smu); + return 0; + } else { + void *pp_handle = adev->powerplay.pp_handle; + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs || !pp_funcs->get_asic_baco_capability) { + *cap = false; + return -ENOENT; + } - return pp_funcs->get_asic_baco_capability(pp_handle, cap); + return pp_funcs->get_asic_baco_capability(pp_handle, cap); + } } static int soc15_asic_baco_reset(struct amdgpu_device *adev) { - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); - if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state) - return -ENOENT; + /* avoid NBIF got stuck when do RAS recovery in BACO reset */ + if (ras && ras->supported) + adev->nbio.funcs->enable_doorbell_interrupt(adev, false); + + dev_info(adev->dev, "GPU BACO reset\n"); - /* enter BACO state */ - if (pp_funcs->set_asic_baco_state(pp_handle, 1)) - return -EIO; + if (is_support_sw_smu(adev)) { + struct smu_context *smu = &adev->smu; - /* exit BACO state */ - if (pp_funcs->set_asic_baco_state(pp_handle, 0)) - return -EIO; + if (smu_baco_reset(smu)) + return -EIO; + } else { + void *pp_handle = adev->powerplay.pp_handle; + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; - dev_info(adev->dev, "GPU BACO reset\n"); + if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state) + return -ENOENT; - adev->in_baco_reset = 1; + /* enter BACO state */ + if (pp_funcs->set_asic_baco_state(pp_handle, 1)) + return -EIO; + + /* exit BACO state */ + if (pp_funcs->set_asic_baco_state(pp_handle, 0)) + return -EIO; + } + + /* re-enable doorbell interrupt after BACO exit */ + if (ras && ras->supported) + adev->nbio.funcs->enable_doorbell_interrupt(adev, true); return 0; } static int soc15_mode2_reset(struct amdgpu_device *adev) { + if (is_support_sw_smu(adev)) + return smu_mode2_reset(&adev->smu); if (!adev->powerplay.pp_funcs || !adev->powerplay.pp_funcs->asic_reset_mode_2) return -ENOENT; @@ -525,6 +553,7 @@ soc15_asic_reset_method(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_RAVEN: + case CHIP_RENOIR: return AMD_RESET_METHOD_MODE2; case CHIP_VEGA10: case CHIP_VEGA12: @@ -626,8 +655,8 @@ static void soc15_program_aspm(struct amdgpu_device *adev) static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev, bool enable) { - adev->nbio_funcs->enable_doorbell_aperture(adev, enable); - adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable); + adev->nbio.funcs->enable_doorbell_aperture(adev, enable); + adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable); } static const struct amdgpu_ip_block_version vega10_common_ip_block = @@ -641,7 +670,7 @@ static const struct amdgpu_ip_block_version vega10_common_ip_block = static uint32_t soc15_get_rev_id(struct amdgpu_device *adev) { - return adev->nbio_funcs->get_rev_id(adev); + return adev->nbio.funcs->get_rev_id(adev); } int soc15_set_ip_blocks(struct amdgpu_device *adev) @@ -667,13 +696,17 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS) adev->gmc.xgmi.supported = true; - if (adev->flags & AMD_IS_APU) - adev->nbio_funcs = &nbio_v7_0_funcs; - else if (adev->asic_type == CHIP_VEGA20 || - adev->asic_type == CHIP_ARCTURUS) - adev->nbio_funcs = &nbio_v7_4_funcs; - else - adev->nbio_funcs = &nbio_v6_1_funcs; + if (adev->flags & AMD_IS_APU) { + adev->nbio.funcs = &nbio_v7_0_funcs; + adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg; + } else if (adev->asic_type == CHIP_VEGA20 || + adev->asic_type == CHIP_ARCTURUS) { + adev->nbio.funcs = &nbio_v7_4_funcs; + adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg; + } else { + adev->nbio.funcs = &nbio_v6_1_funcs; + adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg; + } if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS) adev->df_funcs = &df_v3_6_funcs; @@ -681,7 +714,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) adev->df_funcs = &df_v1_7_funcs; adev->rev_id = soc15_get_rev_id(adev); - adev->nbio_funcs->detect_hw_virt(adev); + adev->nbio.funcs->detect_hw_virt(adev); if (amdgpu_sriov_vf(adev)) adev->virt.ops = &xgpu_ai_virt_ops; @@ -750,13 +783,26 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) case CHIP_ARCTURUS: amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); - amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); + + if (amdgpu_sriov_vf(adev)) { + if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) + amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); + amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); + } else { + amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); + if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) + amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); + } + if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); - amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); - amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block); + if (!amdgpu_sriov_vf(adev)) + amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); + + if (unlikely(adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)) + amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block); break; case CHIP_RENOIR: amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); @@ -785,7 +831,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) { - adev->nbio_funcs->hdp_flush(adev, ring); + adev->nbio.funcs->hdp_flush(adev, ring); } static void soc15_invalidate_hdp(struct amdgpu_device *adev, @@ -1099,7 +1145,9 @@ static int soc15_common_early_init(void *handle) AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_VCN_MGCG; - adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN; + adev->pg_flags = AMD_PG_SUPPORT_SDMA | + AMD_PG_SUPPORT_VCN | + AMD_PG_SUPPORT_VCN_DPG; } else if (adev->pdev->device == 0x15d8) { adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS | @@ -1142,7 +1190,9 @@ static int soc15_common_early_init(void *handle) AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_VCN_MGCG; - adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN; + adev->pg_flags = AMD_PG_SUPPORT_SDMA | + AMD_PG_SUPPORT_VCN | + AMD_PG_SUPPORT_VCN_DPG; } break; case CHIP_ARCTURUS: @@ -1157,7 +1207,8 @@ static int soc15_common_early_init(void *handle) AMD_CG_SUPPORT_SDMA_MGCG | AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_MC_MGCG | - AMD_CG_SUPPORT_MC_LS; + AMD_CG_SUPPORT_MC_LS | + AMD_CG_SUPPORT_IH_CG; adev->pg_flags = 0; adev->external_rev_id = adev->rev_id + 0x32; break; @@ -1186,11 +1237,6 @@ static int soc15_common_early_init(void *handle) AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG; adev->external_rev_id = adev->rev_id + 0x91; - - if (adev->pm.pp_feature & PP_GFXOFF_MASK) - adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | - AMD_PG_SUPPORT_CP | - AMD_PG_SUPPORT_RLC_SMU_HS; break; default: /* FIXME: not supported yet */ @@ -1208,11 +1254,15 @@ static int soc15_common_early_init(void *handle) static int soc15_common_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r = 0; if (amdgpu_sriov_vf(adev)) xgpu_ai_mailbox_get_irq(adev); - return 0; + if (adev->nbio.funcs->ras_late_init) + r = adev->nbio.funcs->ras_late_init(adev); + + return r; } static int soc15_common_sw_init(void *handle) @@ -1229,6 +1279,10 @@ static int soc15_common_sw_init(void *handle) static int soc15_common_sw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + amdgpu_nbio_ras_fini(adev); + adev->df_funcs->sw_fini(adev); return 0; } @@ -1241,12 +1295,12 @@ static void soc15_doorbell_range_init(struct amdgpu_device *adev) if (!amdgpu_sriov_vf(adev)) { for (i = 0; i < adev->sdma.num_instances; i++) { ring = &adev->sdma.instance[i].ring; - adev->nbio_funcs->sdma_doorbell_range(adev, i, + adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, ring->doorbell_index, adev->doorbell_index.sdma_doorbell_range); } - adev->nbio_funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell, + adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell, adev->irq.ih.doorbell_index); } } @@ -1260,13 +1314,13 @@ static int soc15_common_hw_init(void *handle) /* enable aspm */ soc15_program_aspm(adev); /* setup nbio registers */ - adev->nbio_funcs->init_registers(adev); + adev->nbio.funcs->init_registers(adev); /* remap HDP registers to a hole in mmio space, * for the purpose of expose those registers * to process space */ - if (adev->nbio_funcs->remap_hdp_registers) - adev->nbio_funcs->remap_hdp_registers(adev); + if (adev->nbio.funcs->remap_hdp_registers) + adev->nbio.funcs->remap_hdp_registers(adev); /* enable the doorbell aperture */ soc15_enable_doorbell_aperture(adev, true); @@ -1289,6 +1343,14 @@ static int soc15_common_hw_fini(void *handle) if (amdgpu_sriov_vf(adev)) xgpu_ai_mailbox_put_irq(adev); + if (adev->nbio.ras_if && + amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) { + if (adev->nbio.funcs->init_ras_controller_interrupt) + amdgpu_irq_put(adev, &adev->nbio.ras_controller_irq, 0); + if (adev->nbio.funcs->init_ras_err_event_athub_interrupt) + amdgpu_irq_put(adev, &adev->nbio.ras_err_event_athub_irq, 0); + } + return 0; } @@ -1429,9 +1491,9 @@ static int soc15_common_set_clockgating_state(void *handle, case CHIP_VEGA10: case CHIP_VEGA12: case CHIP_VEGA20: - adev->nbio_funcs->update_medium_grain_clock_gating(adev, + adev->nbio.funcs->update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE ? true : false); - adev->nbio_funcs->update_medium_grain_light_sleep(adev, + adev->nbio.funcs->update_medium_grain_light_sleep(adev, state == AMD_CG_STATE_GATE ? true : false); soc15_update_hdp_light_sleep(adev, state == AMD_CG_STATE_GATE ? true : false); @@ -1446,9 +1508,9 @@ static int soc15_common_set_clockgating_state(void *handle, break; case CHIP_RAVEN: case CHIP_RENOIR: - adev->nbio_funcs->update_medium_grain_clock_gating(adev, + adev->nbio.funcs->update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE ? true : false); - adev->nbio_funcs->update_medium_grain_light_sleep(adev, + adev->nbio.funcs->update_medium_grain_light_sleep(adev, state == AMD_CG_STATE_GATE ? true : false); soc15_update_hdp_light_sleep(adev, state == AMD_CG_STATE_GATE ? true : false); @@ -1477,7 +1539,7 @@ static void soc15_common_get_clockgating_state(void *handle, u32 *flags) if (amdgpu_sriov_vf(adev)) *flags = 0; - adev->nbio_funcs->get_clockgating_state(adev, flags); + adev->nbio.funcs->get_clockgating_state(adev, flags); /* AMD_CG_SUPPORT_HDP_LS */ data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS)); diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h index a3dde0c31f57..9af6c6ffbfa2 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15.h @@ -67,6 +67,8 @@ struct soc15_allowed_register_entry { #define SOC15_REG_GOLDEN_VALUE(ip, inst, reg, and_mask, or_mask) \ { ip##_HWIP, inst, reg##_BASE_IDX, reg, and_mask, or_mask } +#define SOC15_REG_FIELD(reg, field) reg##__##field##_MASK, reg##__##field##__SHIFT + void soc15_grbm_select(struct amdgpu_device *adev, u32 me, u32 pipe, u32 queue, u32 vmid); int soc15_set_ip_blocks(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_0.c new file mode 100644 index 000000000000..0d6b50528d76 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_0.c @@ -0,0 +1,37 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "umc_v6_0.h" +#include "amdgpu.h" + +static void umc_v6_0_init_registers(struct amdgpu_device *adev) +{ + unsigned i,j; + + for (i = 0; i < 4; i++) + for (j = 0; j < 4; j++) + WREG32((i*0x100000 + 0x5010c + j*0x2000)/4, 0x1002); +} + +const struct amdgpu_umc_funcs umc_v6_0_funcs = { + .init_registers = umc_v6_0_init_registers, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_0.h b/drivers/gpu/drm/amd/amdgpu/umc_v6_0.h new file mode 100644 index 000000000000..109f1a57a46e --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_0.h @@ -0,0 +1,31 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __UMC_V6_0_H__ +#define __UMC_V6_0_H__ + +#include "soc15_common.h" +#include "amdgpu.h" + +extern const struct amdgpu_umc_funcs umc_v6_0_funcs; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c index 8502e736f721..47c4b96b14d1 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c @@ -75,6 +75,17 @@ static void umc_v6_1_disable_umc_index_mode(struct amdgpu_device *adev) RSMU_UMC_INDEX_MODE_EN, 0); } +static uint32_t umc_v6_1_get_umc_inst(struct amdgpu_device *adev) +{ + uint32_t rsmu_umc_index; + + rsmu_umc_index = RREG32_SOC15(RSMU, 0, + mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU); + return REG_GET_FIELD(rsmu_umc_index, + RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU, + RSMU_UMC_INDEX_INSTANCE); +} + static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev, uint32_t umc_reg_offset, unsigned long *error_count) @@ -165,7 +176,8 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev, uint32_t umc_reg_offset, uint32_t channel_index) { uint32_t lsb, mc_umc_status_addr; - uint64_t mc_umc_status, err_addr; + uint64_t mc_umc_status, err_addr, retired_page; + struct eeprom_table_record *err_rec; mc_umc_status_addr = SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0); @@ -177,6 +189,7 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev, return; } + err_rec = &err_data->err_addr[err_data->err_addr_cnt]; mc_umc_status = RREG64_UMC(mc_umc_status_addr + umc_reg_offset); /* calculate error address if ue/ce error is detected */ @@ -191,12 +204,24 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev, err_addr &= ~((0x1ULL << lsb) - 1); /* translate umc channel address to soc pa, 3 parts are included */ - err_data->err_addr[err_data->err_addr_cnt] = - ADDR_OF_8KB_BLOCK(err_addr) | - ADDR_OF_256B_BLOCK(channel_index) | - OFFSET_IN_256B_BLOCK(err_addr); - - err_data->err_addr_cnt++; + retired_page = ADDR_OF_8KB_BLOCK(err_addr) | + ADDR_OF_256B_BLOCK(channel_index) | + OFFSET_IN_256B_BLOCK(err_addr); + + /* we only save ue error information currently, ce is skipped */ + if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) + == 1) { + err_rec->address = err_addr; + /* page frame address is saved */ + err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT; + err_rec->ts = (uint64_t)ktime_get_real_seconds(); + err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE; + err_rec->cu = 0; + err_rec->mem_channel = channel_index; + err_rec->mcumc_id = umc_v6_1_get_umc_inst(adev); + + err_data->err_addr_cnt++; + } } /* clear umc status */ @@ -209,7 +234,7 @@ static void umc_v6_1_query_ras_error_address(struct amdgpu_device *adev, amdgpu_umc_for_each_channel(umc_v6_1_query_error_address); } -static void umc_v6_1_ras_init_per_channel(struct amdgpu_device *adev, +static void umc_v6_1_err_cnt_init_per_channel(struct amdgpu_device *adev, struct ras_err_data *err_data, uint32_t umc_reg_offset, uint32_t channel_index) { @@ -239,15 +264,16 @@ static void umc_v6_1_ras_init_per_channel(struct amdgpu_device *adev, WREG32(ecc_err_cnt_addr + umc_reg_offset, UMC_V6_1_CE_CNT_INIT); } -static void umc_v6_1_ras_init(struct amdgpu_device *adev) +static void umc_v6_1_err_cnt_init(struct amdgpu_device *adev) { void *ras_error_status = NULL; - amdgpu_umc_for_each_channel(umc_v6_1_ras_init_per_channel); + amdgpu_umc_for_each_channel(umc_v6_1_err_cnt_init_per_channel); } const struct amdgpu_umc_funcs umc_v6_1_funcs = { - .ras_init = umc_v6_1_ras_init, + .err_cnt_init = umc_v6_1_err_cnt_init, + .ras_late_init = amdgpu_umc_ras_late_init, .query_ras_error_count = umc_v6_1_query_ras_error_count, .query_ras_error_address = umc_v6_1_query_ras_error_address, .enable_umc_index_mode = umc_v6_1_enable_umc_index_mode, diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 670784a78512..217084d56ab8 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -206,13 +206,14 @@ static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring) * Open up a stream for HW test */ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, + struct amdgpu_bo *bo, struct dma_fence **fence) { const unsigned ib_size_dw = 16; struct amdgpu_job *job; struct amdgpu_ib *ib; struct dma_fence *f = NULL; - uint64_t dummy; + uint64_t addr; int i, r; r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); @@ -220,15 +221,15 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle return r; ib = &job->ibs[0]; - dummy = ib->gpu_addr + 1024; + addr = amdgpu_bo_gpu_offset(bo); ib->length_dw = 0; ib->ptr[ib->length_dw++] = 0x00000018; ib->ptr[ib->length_dw++] = 0x00000001; /* session info */ ib->ptr[ib->length_dw++] = handle; ib->ptr[ib->length_dw++] = 0x00010000; - ib->ptr[ib->length_dw++] = upper_32_bits(dummy); - ib->ptr[ib->length_dw++] = dummy; + ib->ptr[ib->length_dw++] = upper_32_bits(addr); + ib->ptr[ib->length_dw++] = addr; ib->ptr[ib->length_dw++] = 0x00000014; ib->ptr[ib->length_dw++] = 0x00000002; /* task info */ @@ -268,13 +269,14 @@ err: */ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, + struct amdgpu_bo *bo, struct dma_fence **fence) { const unsigned ib_size_dw = 16; struct amdgpu_job *job; struct amdgpu_ib *ib; struct dma_fence *f = NULL; - uint64_t dummy; + uint64_t addr; int i, r; r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); @@ -282,15 +284,15 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring, return r; ib = &job->ibs[0]; - dummy = ib->gpu_addr + 1024; + addr = amdgpu_bo_gpu_offset(bo); ib->length_dw = 0; ib->ptr[ib->length_dw++] = 0x00000018; ib->ptr[ib->length_dw++] = 0x00000001; /* session info */ ib->ptr[ib->length_dw++] = handle; ib->ptr[ib->length_dw++] = 0x00010000; - ib->ptr[ib->length_dw++] = upper_32_bits(dummy); - ib->ptr[ib->length_dw++] = dummy; + ib->ptr[ib->length_dw++] = upper_32_bits(addr); + ib->ptr[ib->length_dw++] = addr; ib->ptr[ib->length_dw++] = 0x00000014; ib->ptr[ib->length_dw++] = 0x00000002; /* task info */ @@ -327,13 +329,20 @@ err: static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct dma_fence *fence = NULL; + struct amdgpu_bo *bo = NULL; long r; - r = uvd_v6_0_enc_get_create_msg(ring, 1, NULL); + r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &bo, NULL, NULL); + if (r) + return r; + + r = uvd_v6_0_enc_get_create_msg(ring, 1, bo, NULL); if (r) goto error; - r = uvd_v6_0_enc_get_destroy_msg(ring, 1, &fence); + r = uvd_v6_0_enc_get_destroy_msg(ring, 1, bo, &fence); if (r) goto error; @@ -345,6 +354,8 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) error: dma_fence_put(fence); + amdgpu_bo_unreserve(bo); + amdgpu_bo_unref(&bo); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 01f658fa72c6..0995378d8263 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -214,13 +214,14 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring) * Open up a stream for HW test */ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, + struct amdgpu_bo *bo, struct dma_fence **fence) { const unsigned ib_size_dw = 16; struct amdgpu_job *job; struct amdgpu_ib *ib; struct dma_fence *f = NULL; - uint64_t dummy; + uint64_t addr; int i, r; r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); @@ -228,15 +229,15 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle return r; ib = &job->ibs[0]; - dummy = ib->gpu_addr + 1024; + addr = amdgpu_bo_gpu_offset(bo); ib->length_dw = 0; ib->ptr[ib->length_dw++] = 0x00000018; ib->ptr[ib->length_dw++] = 0x00000001; /* session info */ ib->ptr[ib->length_dw++] = handle; ib->ptr[ib->length_dw++] = 0x00000000; - ib->ptr[ib->length_dw++] = upper_32_bits(dummy); - ib->ptr[ib->length_dw++] = dummy; + ib->ptr[ib->length_dw++] = upper_32_bits(addr); + ib->ptr[ib->length_dw++] = addr; ib->ptr[ib->length_dw++] = 0x00000014; ib->ptr[ib->length_dw++] = 0x00000002; /* task info */ @@ -275,13 +276,14 @@ err: * Close up a stream for HW test or if userspace failed to do so */ static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, - struct dma_fence **fence) + struct amdgpu_bo *bo, + struct dma_fence **fence) { const unsigned ib_size_dw = 16; struct amdgpu_job *job; struct amdgpu_ib *ib; struct dma_fence *f = NULL; - uint64_t dummy; + uint64_t addr; int i, r; r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); @@ -289,15 +291,15 @@ static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handl return r; ib = &job->ibs[0]; - dummy = ib->gpu_addr + 1024; + addr = amdgpu_bo_gpu_offset(bo); ib->length_dw = 0; ib->ptr[ib->length_dw++] = 0x00000018; ib->ptr[ib->length_dw++] = 0x00000001; ib->ptr[ib->length_dw++] = handle; ib->ptr[ib->length_dw++] = 0x00000000; - ib->ptr[ib->length_dw++] = upper_32_bits(dummy); - ib->ptr[ib->length_dw++] = dummy; + ib->ptr[ib->length_dw++] = upper_32_bits(addr); + ib->ptr[ib->length_dw++] = addr; ib->ptr[ib->length_dw++] = 0x00000014; ib->ptr[ib->length_dw++] = 0x00000002; @@ -334,13 +336,20 @@ err: static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct dma_fence *fence = NULL; + struct amdgpu_bo *bo = NULL; long r; - r = uvd_v7_0_enc_get_create_msg(ring, 1, NULL); + r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &bo, NULL, NULL); + if (r) + return r; + + r = uvd_v7_0_enc_get_create_msg(ring, 1, bo, NULL); if (r) goto error; - r = uvd_v7_0_enc_get_destroy_msg(ring, 1, &fence); + r = uvd_v7_0_enc_get_destroy_msg(ring, 1, bo, &fence); if (r) goto error; @@ -352,6 +361,8 @@ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) error: dma_fence_put(fence); + amdgpu_bo_unreserve(bo); + amdgpu_bo_unref(&bo); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index 93b3500e522b..b4f84a820a44 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -202,7 +202,6 @@ static int vcn_v1_0_hw_init(void *handle) for (i = 0; i < adev->vcn.num_enc_rings; ++i) { ring = &adev->vcn.inst->ring_enc[i]; - ring->sched.ready = true; r = amdgpu_ring_test_helper(ring); if (r) goto done; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index 36ad0c0e8efb..38f787a560cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -244,33 +244,24 @@ static int vcn_v2_0_hw_init(void *handle) struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec; int i, r; - adev->nbio_funcs->vcn_doorbell_range(adev, ring->use_doorbell, + adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, ring->doorbell_index, 0); - ring->sched.ready = true; - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->sched.ready = false; + r = amdgpu_ring_test_helper(ring); + if (r) goto done; - } for (i = 0; i < adev->vcn.num_enc_rings; ++i) { ring = &adev->vcn.inst->ring_enc[i]; - ring->sched.ready = true; - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->sched.ready = false; + r = amdgpu_ring_test_helper(ring); + if (r) goto done; - } } ring = &adev->vcn.inst->ring_jpeg; - ring->sched.ready = true; - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->sched.ready = false; + r = amdgpu_ring_test_helper(ring); + if (r) goto done; - } done: if (!r) diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index 395c2259f979..03083e5f731a 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -25,6 +25,7 @@ #include "amdgpu.h" #include "amdgpu_vcn.h" +#include "amdgpu_pm.h" #include "soc15.h" #include "soc15d.h" #include "vcn_v2_0.h" @@ -255,32 +256,24 @@ static int vcn_v2_5_hw_init(void *handle) continue; ring = &adev->vcn.inst[j].ring_dec; - adev->nbio_funcs->vcn_doorbell_range(adev, ring->use_doorbell, + adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, ring->doorbell_index, j); - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->sched.ready = false; + r = amdgpu_ring_test_helper(ring); + if (r) goto done; - } for (i = 0; i < adev->vcn.num_enc_rings; ++i) { ring = &adev->vcn.inst[j].ring_enc[i]; - ring->sched.ready = false; - continue; - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->sched.ready = false; + r = amdgpu_ring_test_helper(ring); + if (r) goto done; - } } ring = &adev->vcn.inst[j].ring_jpeg; - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->sched.ready = false; + r = amdgpu_ring_test_helper(ring); + if (r) goto done; - } } done: if (!r) @@ -423,7 +416,6 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev) * vcn_v2_5_disable_clock_gating - disable VCN clock gating * * @adev: amdgpu_device pointer - * @sw: enable SW clock gating * * Disable clock gating for VCN block */ @@ -542,7 +534,6 @@ static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev) * vcn_v2_5_enable_clock_gating - enable VCN clock gating * * @adev: amdgpu_device pointer - * @sw: enable SW clock gating * * Enable clock gating for VCN block */ @@ -716,6 +707,9 @@ static int vcn_v2_5_start(struct amdgpu_device *adev) uint32_t rb_bufsz, tmp; int i, j, k, r; + if (adev->pm.dpm_enabled) + amdgpu_dpm_enable_uvd(adev, true); + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { if (adev->vcn.harvest_config & (1 << i)) continue; @@ -946,6 +940,9 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev) ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); } + if (adev->pm.dpm_enabled) + amdgpu_dpm_enable_uvd(adev, false); + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c index 9eae3536ddad..5cb7e231de5f 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c @@ -226,7 +226,7 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev) /* disable irqs */ vega10_ih_disable_interrupts(adev); - adev->nbio_funcs->ih_control(adev); + adev->nbio.funcs->ih_control(adev); ih = &adev->irq.ih; /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/ @@ -675,10 +675,49 @@ static int vega10_ih_soft_reset(void *handle) return 0; } +static void vega10_ih_update_clockgating_state(struct amdgpu_device *adev, + bool enable) +{ + uint32_t data, def, field_val; + + if (adev->cg_flags & AMD_CG_SUPPORT_IH_CG) { + def = data = RREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL); + field_val = enable ? 0 : 1; + /** + * Vega10 does not have IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE + * and IH_BUFFER_MEM_CLK_SOFT_OVERRIDE field. + */ + if (adev->asic_type > CHIP_VEGA10) { + data = REG_SET_FIELD(data, IH_CLK_CTRL, + IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE, field_val); + data = REG_SET_FIELD(data, IH_CLK_CTRL, + IH_BUFFER_MEM_CLK_SOFT_OVERRIDE, field_val); + } + + data = REG_SET_FIELD(data, IH_CLK_CTRL, + DBUS_MUX_CLK_SOFT_OVERRIDE, field_val); + data = REG_SET_FIELD(data, IH_CLK_CTRL, + OSSSYS_SHARE_CLK_SOFT_OVERRIDE, field_val); + data = REG_SET_FIELD(data, IH_CLK_CTRL, + LIMIT_SMN_CLK_SOFT_OVERRIDE, field_val); + data = REG_SET_FIELD(data, IH_CLK_CTRL, + DYN_CLK_SOFT_OVERRIDE, field_val); + data = REG_SET_FIELD(data, IH_CLK_CTRL, + REG_CLK_SOFT_OVERRIDE, field_val); + if (def != data) + WREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL, data); + } +} + static int vega10_ih_set_clockgating_state(void *handle, enum amd_clockgating_state state) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + vega10_ih_update_clockgating_state(adev, + state == AMD_CG_STATE_GATE ? true : false); return 0; + } static int vega10_ih_set_powergating_state(void *handle, diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c index bd0580334f83..6b52a539d51b 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c @@ -24,7 +24,6 @@ #include "soc15.h" #include "soc15_common.h" -#include "soc15_hw_ip.h" #include "vega10_ip_offset.h" int vega10_reg_base_init(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c index 587e33f5dcce..556f854e3551 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c @@ -24,7 +24,6 @@ #include "soc15.h" #include "soc15_common.h" -#include "soc15_hw_ip.h" #include "vega20_ip_offset.h" int vega20_reg_base_init(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 5f8c8786cac5..78e5cdc0c058 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -689,16 +689,50 @@ static int vi_gpu_pci_config_reset(struct amdgpu_device *adev) return -EINVAL; } +int smu7_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap) +{ + void *pp_handle = adev->powerplay.pp_handle; + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs || !pp_funcs->get_asic_baco_capability) { + *cap = false; + return -ENOENT; + } + + return pp_funcs->get_asic_baco_capability(pp_handle, cap); +} + +int smu7_asic_baco_reset(struct amdgpu_device *adev) +{ + void *pp_handle = adev->powerplay.pp_handle; + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state) + return -ENOENT; + + /* enter BACO state */ + if (pp_funcs->set_asic_baco_state(pp_handle, 1)) + return -EIO; + + /* exit BACO state */ + if (pp_funcs->set_asic_baco_state(pp_handle, 0)) + return -EIO; + + dev_info(adev->dev, "GPU BACO reset\n"); + + return 0; +} + /** - * vi_asic_reset - soft reset GPU + * vi_asic_pci_config_reset - soft reset GPU * * @adev: amdgpu_device pointer * - * Look up which blocks are hung and attempt - * to reset them. + * Use PCI Config method to reset the GPU. + * * Returns 0 for success. */ -static int vi_asic_reset(struct amdgpu_device *adev) +static int vi_asic_pci_config_reset(struct amdgpu_device *adev) { int r; @@ -714,7 +748,47 @@ static int vi_asic_reset(struct amdgpu_device *adev) static enum amd_reset_method vi_asic_reset_method(struct amdgpu_device *adev) { - return AMD_RESET_METHOD_LEGACY; + bool baco_reset; + + switch (adev->asic_type) { + case CHIP_FIJI: + case CHIP_TONGA: + case CHIP_POLARIS10: + case CHIP_POLARIS11: + case CHIP_POLARIS12: + case CHIP_TOPAZ: + smu7_asic_get_baco_capability(adev, &baco_reset); + break; + default: + baco_reset = false; + break; + } + + if (baco_reset) + return AMD_RESET_METHOD_BACO; + else + return AMD_RESET_METHOD_LEGACY; +} + +/** + * vi_asic_reset - soft reset GPU + * + * @adev: amdgpu_device pointer + * + * Look up which blocks are hung and attempt + * to reset them. + * Returns 0 for success. + */ +static int vi_asic_reset(struct amdgpu_device *adev) +{ + int r; + + if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) + r = smu7_asic_baco_reset(adev); + else + r = vi_asic_pci_config_reset(adev); + + return r; } static u32 vi_get_config_memsize(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/vi.h b/drivers/gpu/drm/amd/amdgpu/vi.h index 8de0772f986c..40d4174913a4 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.h +++ b/drivers/gpu/drm/amd/amdgpu/vi.h @@ -31,4 +31,7 @@ void vi_srbm_select(struct amdgpu_device *adev, int vi_set_ip_blocks(struct amdgpu_device *adev); void legacy_doorbell_index_init(struct amdgpu_device *adev); +int smu7_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap); +int smu7_asic_baco_reset(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c index 177d1e5329a5..9f59ba93cfe0 100644 --- a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c +++ b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c @@ -33,7 +33,9 @@ static bool cik_event_interrupt_isr(struct kfd_dev *dev, const struct cik_ih_ring_entry *ihre = (const struct cik_ih_ring_entry *)ih_ring_entry; const struct kfd2kgd_calls *f2g = dev->kfd2kgd; - unsigned int vmid, pasid; + unsigned int vmid; + uint16_t pasid; + bool ret; /* This workaround is due to HW/FW limitation on Hawaii that * VMID and PASID are not written into ih_ring_entry @@ -48,13 +50,13 @@ static bool cik_event_interrupt_isr(struct kfd_dev *dev, *tmp_ihre = *ihre; vmid = f2g->read_vmid_from_vmfault_reg(dev->kgd); - pasid = f2g->get_atc_vmid_pasid_mapping_pasid(dev->kgd, vmid); + ret = f2g->get_atc_vmid_pasid_mapping_info(dev->kgd, vmid, &pasid); tmp_ihre->ring_id &= 0x000000ff; tmp_ihre->ring_id |= vmid << 8; tmp_ihre->ring_id |= pasid << 16; - return (pasid != 0) && + return ret && (pasid != 0) && vmid >= dev->vm_info.first_vmid_kfd && vmid <= dev->vm_info.last_vmid_kfd; } diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h index 901fe3590165..d3400da6ab64 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h @@ -905,7 +905,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = { 0x7a5d0000, 0x807c817c, 0x807aff7a, 0x00000080, 0xbf0a717c, 0xbf85fff8, - 0xbf820141, 0xbef4037e, + 0xbf820142, 0xbef4037e, 0x8775ff7f, 0x0000ffff, 0x8875ff75, 0x00040000, 0xbef60380, 0xbef703ff, @@ -967,7 +967,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = { 0x725d0000, 0xe0304080, 0x725d0100, 0xe0304100, 0x725d0200, 0xe0304180, - 0x725d0300, 0xbf820031, + 0x725d0300, 0xbf820032, 0xbef603ff, 0x01000000, 0xbef20378, 0x8078ff78, 0x00000400, 0xbefc0384, @@ -992,83 +992,84 @@ static const uint32_t cwsr_trap_gfx10_hex[] = { 0x725d0000, 0xe0304100, 0x725d0100, 0xe0304200, 0x725d0200, 0xe0304300, - 0x725d0300, 0xb9782a05, - 0x80788178, 0x907c9973, - 0x877c817c, 0xbf06817c, - 0xbf850002, 0x8f788978, - 0xbf820001, 0x8f788a78, - 0xb9721e06, 0x8f728a72, - 0x80787278, 0x8078ff78, - 0x00000200, 0x80f8ff78, - 0x00000050, 0xbef603ff, - 0x01000000, 0xbefc03ff, - 0x0000006c, 0x80f89078, - 0xf429003a, 0xf0000000, - 0xbf8cc07f, 0x80fc847c, - 0xbf800000, 0xbe803100, - 0xbe823102, 0x80f8a078, - 0xf42d003a, 0xf0000000, - 0xbf8cc07f, 0x80fc887c, - 0xbf800000, 0xbe803100, - 0xbe823102, 0xbe843104, - 0xbe863106, 0x80f8c078, - 0xf431003a, 0xf0000000, - 0xbf8cc07f, 0x80fc907c, - 0xbf800000, 0xbe803100, - 0xbe823102, 0xbe843104, - 0xbe863106, 0xbe883108, - 0xbe8a310a, 0xbe8c310c, - 0xbe8e310e, 0xbf06807c, - 0xbf84fff0, 0xb9782a05, - 0x80788178, 0x907c9973, - 0x877c817c, 0xbf06817c, - 0xbf850002, 0x8f788978, - 0xbf820001, 0x8f788a78, - 0xb9721e06, 0x8f728a72, - 0x80787278, 0x8078ff78, - 0x00000200, 0xbef603ff, - 0x01000000, 0xf4211bfa, + 0x725d0300, 0xbf8c3f70, + 0xb9782a05, 0x80788178, + 0x907c9973, 0x877c817c, + 0xbf06817c, 0xbf850002, + 0x8f788978, 0xbf820001, + 0x8f788a78, 0xb9721e06, + 0x8f728a72, 0x80787278, + 0x8078ff78, 0x00000200, + 0x80f8ff78, 0x00000050, + 0xbef603ff, 0x01000000, + 0xbefc03ff, 0x0000006c, + 0x80f89078, 0xf429003a, + 0xf0000000, 0xbf8cc07f, + 0x80fc847c, 0xbf800000, + 0xbe803100, 0xbe823102, + 0x80f8a078, 0xf42d003a, + 0xf0000000, 0xbf8cc07f, + 0x80fc887c, 0xbf800000, + 0xbe803100, 0xbe823102, + 0xbe843104, 0xbe863106, + 0x80f8c078, 0xf431003a, + 0xf0000000, 0xbf8cc07f, + 0x80fc907c, 0xbf800000, + 0xbe803100, 0xbe823102, + 0xbe843104, 0xbe863106, + 0xbe883108, 0xbe8a310a, + 0xbe8c310c, 0xbe8e310e, + 0xbf06807c, 0xbf84fff0, + 0xb9782a05, 0x80788178, + 0x907c9973, 0x877c817c, + 0xbf06817c, 0xbf850002, + 0x8f788978, 0xbf820001, + 0x8f788a78, 0xb9721e06, + 0x8f728a72, 0x80787278, + 0x8078ff78, 0x00000200, + 0xbef603ff, 0x01000000, + 0xf4211bfa, 0xf0000000, + 0x80788478, 0xf4211b3a, 0xf0000000, 0x80788478, - 0xf4211b3a, 0xf0000000, - 0x80788478, 0xf4211b7a, + 0xf4211b7a, 0xf0000000, + 0x80788478, 0xf4211eba, 0xf0000000, 0x80788478, - 0xf4211eba, 0xf0000000, - 0x80788478, 0xf4211efa, + 0xf4211efa, 0xf0000000, + 0x80788478, 0xf4211c3a, 0xf0000000, 0x80788478, - 0xf4211c3a, 0xf0000000, - 0x80788478, 0xf4211c7a, + 0xf4211c7a, 0xf0000000, + 0x80788478, 0xf4211e7a, 0xf0000000, 0x80788478, - 0xf4211e7a, 0xf0000000, - 0x80788478, 0xf4211cfa, + 0xf4211cfa, 0xf0000000, + 0x80788478, 0xf4211bba, 0xf0000000, 0x80788478, + 0xbf8cc07f, 0xb9eef814, 0xf4211bba, 0xf0000000, 0x80788478, 0xbf8cc07f, - 0xb9eef814, 0xf4211bba, - 0xf0000000, 0x80788478, - 0xbf8cc07f, 0xb9eef815, - 0xbef2036d, 0x876dff72, - 0x0000ffff, 0xbefc036f, - 0xbefe037a, 0xbeff037b, - 0x876f71ff, 0x000003ff, - 0xb9ef4803, 0xb9f9f816, - 0x876f71ff, 0xfffff800, - 0x906f8b6f, 0xb9efa2c3, - 0xb9f3f801, 0x876fff72, - 0xfc000000, 0x906f9a6f, - 0x8f6f906f, 0xbef30380, + 0xb9eef815, 0xbef2036d, + 0x876dff72, 0x0000ffff, + 0xbefc036f, 0xbefe037a, + 0xbeff037b, 0x876f71ff, + 0x000003ff, 0xb9ef4803, + 0xb9f9f816, 0x876f71ff, + 0xfffff800, 0x906f8b6f, + 0xb9efa2c3, 0xb9f3f801, + 0x876fff72, 0xfc000000, + 0x906f9a6f, 0x8f6f906f, + 0xbef30380, 0x88736f73, + 0x876fff72, 0x02000000, + 0x906f996f, 0x8f6f8f6f, 0x88736f73, 0x876fff72, - 0x02000000, 0x906f996f, - 0x8f6f8f6f, 0x88736f73, - 0x876fff72, 0x01000000, - 0x906f986f, 0x8f6f996f, - 0x88736f73, 0x876fff70, - 0x00800000, 0x906f976f, - 0xb9f3f807, 0x87fe7e7e, - 0x87ea6a6a, 0xb9f0f802, - 0xbf8a0000, 0xbe80226c, - 0xbf810000, 0xbf9f0000, + 0x01000000, 0x906f986f, + 0x8f6f996f, 0x88736f73, + 0x876fff70, 0x00800000, + 0x906f976f, 0xb9f3f807, + 0x87fe7e7e, 0x87ea6a6a, + 0xb9f0f802, 0xbf8a0000, + 0xbe80226c, 0xbf810000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, + 0xbf9f0000, 0x00000000, }; static const uint32_t cwsr_trap_arcturus_hex[] = { 0xbf820001, 0xbf8202c4, diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm index cdaa523ce6be..4433bda2ce25 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm @@ -758,6 +758,7 @@ L_RESTORE_V0: buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256 buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*2 buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*3 + s_waitcnt vmcnt(0) /* restore SGPRs */ //will be 2+8+16*6 diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 1d3cd5c50d5f..9af45d07515b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -282,7 +282,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, goto err_bind_process; } - pr_debug("Creating queue for PASID %d on gpu 0x%x\n", + pr_debug("Creating queue for PASID 0x%x on gpu 0x%x\n", p->pasid, dev->id); @@ -332,7 +332,7 @@ static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p, int retval; struct kfd_ioctl_destroy_queue_args *args = data; - pr_debug("Destroying queue id %d for pasid %d\n", + pr_debug("Destroying queue id %d for pasid 0x%x\n", args->queue_id, p->pasid); @@ -378,7 +378,7 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p, properties.queue_percent = args->queue_percentage; properties.priority = args->queue_priority; - pr_debug("Updating queue id %d for pasid %d\n", + pr_debug("Updating queue id %d for pasid 0x%x\n", args->queue_id, p->pasid); mutex_lock(&p->mutex); @@ -855,7 +855,7 @@ static int kfd_ioctl_get_process_apertures(struct file *filp, struct kfd_process_device_apertures *pAperture; struct kfd_process_device *pdd; - dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid); + dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid); args->num_of_nodes = 0; @@ -913,7 +913,7 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp, uint32_t nodes = 0; int ret; - dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid); + dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid); if (args->num_of_nodes == 0) { /* Return number of nodes, so that user space can alloacate @@ -1128,7 +1128,7 @@ static int kfd_ioctl_set_scratch_backing_va(struct file *filep, mutex_unlock(&p->mutex); if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS && - pdd->qpd.vmid != 0) + pdd->qpd.vmid != 0 && dev->kfd2kgd->set_scratch_backing_va) dev->kfd2kgd->set_scratch_backing_va( dev->kgd, args->va_addr, pdd->qpd.vmid); @@ -1801,7 +1801,7 @@ static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) } else goto err_i1; - dev_dbg(kfd_device, "ioctl cmd 0x%x (#%d), arg 0x%lx\n", cmd, nr, arg); + dev_dbg(kfd_device, "ioctl cmd 0x%x (#0x%x), arg 0x%lx\n", cmd, nr, arg); process = kfd_get_process(current); if (IS_ERR(process)) { @@ -1856,7 +1856,8 @@ err_i1: kfree(kdata); if (retcode) - dev_dbg(kfd_device, "ret = %d\n", retcode); + dev_dbg(kfd_device, "ioctl cmd (#0x%x), arg 0x%lx, ret = %d\n", + nr, arg, retcode); return retcode; } @@ -1877,7 +1878,7 @@ static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process, vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - pr_debug("Process %d mapping mmio page\n" + pr_debug("pasid 0x%x mapping mmio page\n" " target user address == 0x%08llX\n" " physical address == 0x%08llX\n" " vm_flags == 0x%04lX\n" diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index 66387caf966e..de9f68d5c312 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -138,6 +138,7 @@ static struct kfd_gpu_cache_info carrizo_cache_info[] = { /* TODO - check & update Vega10 cache details */ #define vega10_cache_info carrizo_cache_info #define raven_cache_info carrizo_cache_info +#define renoir_cache_info carrizo_cache_info /* TODO - check & update Navi10 cache details */ #define navi10_cache_info carrizo_cache_info @@ -670,7 +671,13 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev, pcache_info = raven_cache_info; num_of_cache_types = ARRAY_SIZE(raven_cache_info); break; + case CHIP_RENOIR: + pcache_info = renoir_cache_info; + num_of_cache_types = ARRAY_SIZE(renoir_cache_info); + break; case CHIP_NAVI10: + case CHIP_NAVI12: + case CHIP_NAVI14: pcache_info = navi10_cache_info; num_of_cache_types = ARRAY_SIZE(navi10_cache_info); break; @@ -703,7 +710,7 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev, pcache_info, cu_info, mem_available, - cu_info->cu_bitmap[i][j], + cu_info->cu_bitmap[i % 4][j + i / 4], ct, cu_processor_id, k); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c index a3441b0e385b..d59f2cd056c6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c @@ -761,6 +761,7 @@ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p) { int status = 0; unsigned int vmid; + uint16_t queried_pasid; union SQ_CMD_BITS reg_sq_cmd; union GRBM_GFX_INDEX_BITS reg_gfx_index; struct kfd_process_device *pdd; @@ -782,19 +783,18 @@ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p) */ for (vmid = first_vmid_to_scan; vmid <= last_vmid_to_scan; vmid++) { - if (dev->kfd2kgd->get_atc_vmid_pasid_mapping_valid - (dev->kgd, vmid)) { - if (dev->kfd2kgd->get_atc_vmid_pasid_mapping_pasid - (dev->kgd, vmid) == p->pasid) { - pr_debug("Killing wave fronts of vmid %d and pasid %d\n", - vmid, p->pasid); - break; - } + status = dev->kfd2kgd->get_atc_vmid_pasid_mapping_info + (dev->kgd, vmid, &queried_pasid); + + if (status && queried_pasid == p->pasid) { + pr_debug("Killing wave fronts of vmid %d and pasid 0x%x\n", + vmid, p->pasid); + break; } } if (vmid > last_vmid_to_scan) { - pr_err("Didn't find vmid for pasid %d\n", p->pasid); + pr_err("Didn't find vmid for pasid 0x%x\n", p->pasid); return -EFAULT; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c index 9d4af961c5d1..9bfa50633654 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c @@ -96,7 +96,7 @@ bool kfd_dbgmgr_create(struct kfd_dbgmgr **ppmgr, struct kfd_dev *pdev) long kfd_dbgmgr_register(struct kfd_dbgmgr *pmgr, struct kfd_process *p) { if (pmgr->pasid != 0) { - pr_debug("H/W debugger is already active using pasid %d\n", + pr_debug("H/W debugger is already active using pasid 0x%x\n", pmgr->pasid); return -EBUSY; } @@ -117,7 +117,7 @@ long kfd_dbgmgr_unregister(struct kfd_dbgmgr *pmgr, struct kfd_process *p) { /* Is the requests coming from the already registered process? */ if (pmgr->pasid != p->pasid) { - pr_debug("H/W debugger is not registered by calling pasid %d\n", + pr_debug("H/W debugger is not registered by calling pasid 0x%x\n", p->pasid); return -EINVAL; } @@ -134,7 +134,7 @@ long kfd_dbgmgr_wave_control(struct kfd_dbgmgr *pmgr, { /* Is the requests coming from the already registered process? */ if (pmgr->pasid != wac_info->process->pasid) { - pr_debug("H/W debugger support was not registered for requester pasid %d\n", + pr_debug("H/W debugger support was not registered for requester pasid 0x%x\n", wac_info->process->pasid); return -EINVAL; } @@ -147,7 +147,7 @@ long kfd_dbgmgr_address_watch(struct kfd_dbgmgr *pmgr, { /* Is the requests coming from the already registered process? */ if (pmgr->pasid != adw_info->process->pasid) { - pr_debug("H/W debugger support was not registered for requester pasid %d\n", + pr_debug("H/W debugger support was not registered for requester pasid 0x%x\n", adw_info->process->pasid); return -EINVAL; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 0dc1084b5e82..4fa8834ce7cb 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -39,6 +39,41 @@ */ static atomic_t kfd_locked = ATOMIC_INIT(0); +#ifdef CONFIG_DRM_AMDGPU_CIK +extern const struct kfd2kgd_calls gfx_v7_kfd2kgd; +#endif +extern const struct kfd2kgd_calls gfx_v8_kfd2kgd; +extern const struct kfd2kgd_calls gfx_v9_kfd2kgd; +extern const struct kfd2kgd_calls arcturus_kfd2kgd; +extern const struct kfd2kgd_calls gfx_v10_kfd2kgd; + +static const struct kfd2kgd_calls *kfd2kgd_funcs[] = { +#ifdef KFD_SUPPORT_IOMMU_V2 +#ifdef CONFIG_DRM_AMDGPU_CIK + [CHIP_KAVERI] = &gfx_v7_kfd2kgd, +#endif + [CHIP_CARRIZO] = &gfx_v8_kfd2kgd, + [CHIP_RAVEN] = &gfx_v9_kfd2kgd, +#endif +#ifdef CONFIG_DRM_AMDGPU_CIK + [CHIP_HAWAII] = &gfx_v7_kfd2kgd, +#endif + [CHIP_TONGA] = &gfx_v8_kfd2kgd, + [CHIP_FIJI] = &gfx_v8_kfd2kgd, + [CHIP_POLARIS10] = &gfx_v8_kfd2kgd, + [CHIP_POLARIS11] = &gfx_v8_kfd2kgd, + [CHIP_POLARIS12] = &gfx_v8_kfd2kgd, + [CHIP_VEGAM] = &gfx_v8_kfd2kgd, + [CHIP_VEGA10] = &gfx_v9_kfd2kgd, + [CHIP_VEGA12] = &gfx_v9_kfd2kgd, + [CHIP_VEGA20] = &gfx_v9_kfd2kgd, + [CHIP_RENOIR] = &gfx_v9_kfd2kgd, + [CHIP_ARCTURUS] = &arcturus_kfd2kgd, + [CHIP_NAVI10] = &gfx_v10_kfd2kgd, + [CHIP_NAVI12] = &gfx_v10_kfd2kgd, + [CHIP_NAVI14] = &gfx_v10_kfd2kgd, +}; + #ifdef KFD_SUPPORT_IOMMU_V2 static const struct kfd_device_info kaveri_device_info = { .asic_family = CHIP_KAVERI, @@ -351,6 +386,24 @@ static const struct kfd_device_info arcturus_device_info = { .num_sdma_queues_per_engine = 8, }; +static const struct kfd_device_info renoir_device_info = { + .asic_family = CHIP_RENOIR, + .asic_name = "renoir", + .max_pasid_bits = 16, + .max_no_of_hqd = 24, + .doorbell_size = 8, + .ih_ring_entry_size = 8 * sizeof(uint32_t), + .event_interrupt_class = &event_interrupt_class_v9, + .num_of_watch_points = 4, + .mqd_size_aligned = MQD_SIZE_ALIGNED, + .supports_cwsr = true, + .needs_iommu_device = false, + .needs_pci_atomics = false, + .num_sdma_engines = 1, + .num_xgmi_sdma_engines = 0, + .num_sdma_queues_per_engine = 2, +}; + static const struct kfd_device_info navi10_device_info = { .asic_family = CHIP_NAVI10, .asic_name = "navi10", @@ -369,133 +422,64 @@ static const struct kfd_device_info navi10_device_info = { .num_sdma_queues_per_engine = 8, }; -struct kfd_deviceid { - unsigned short did; - const struct kfd_device_info *device_info; +static const struct kfd_device_info navi12_device_info = { + .asic_family = CHIP_NAVI12, + .asic_name = "navi12", + .max_pasid_bits = 16, + .max_no_of_hqd = 24, + .doorbell_size = 8, + .ih_ring_entry_size = 8 * sizeof(uint32_t), + .event_interrupt_class = &event_interrupt_class_v9, + .num_of_watch_points = 4, + .mqd_size_aligned = MQD_SIZE_ALIGNED, + .needs_iommu_device = false, + .supports_cwsr = true, + .needs_pci_atomics = false, + .num_sdma_engines = 2, + .num_xgmi_sdma_engines = 0, + .num_sdma_queues_per_engine = 8, +}; + +static const struct kfd_device_info navi14_device_info = { + .asic_family = CHIP_NAVI14, + .asic_name = "navi14", + .max_pasid_bits = 16, + .max_no_of_hqd = 24, + .doorbell_size = 8, + .ih_ring_entry_size = 8 * sizeof(uint32_t), + .event_interrupt_class = &event_interrupt_class_v9, + .num_of_watch_points = 4, + .mqd_size_aligned = MQD_SIZE_ALIGNED, + .needs_iommu_device = false, + .supports_cwsr = true, + .needs_pci_atomics = false, + .num_sdma_engines = 2, + .num_xgmi_sdma_engines = 0, + .num_sdma_queues_per_engine = 8, }; -static const struct kfd_deviceid supported_devices[] = { +/* For each entry, [0] is regular and [1] is virtualisation device. */ +static const struct kfd_device_info *kfd_supported_devices[][2] = { #ifdef KFD_SUPPORT_IOMMU_V2 - { 0x1304, &kaveri_device_info }, /* Kaveri */ - { 0x1305, &kaveri_device_info }, /* Kaveri */ - { 0x1306, &kaveri_device_info }, /* Kaveri */ - { 0x1307, &kaveri_device_info }, /* Kaveri */ - { 0x1309, &kaveri_device_info }, /* Kaveri */ - { 0x130A, &kaveri_device_info }, /* Kaveri */ - { 0x130B, &kaveri_device_info }, /* Kaveri */ - { 0x130C, &kaveri_device_info }, /* Kaveri */ - { 0x130D, &kaveri_device_info }, /* Kaveri */ - { 0x130E, &kaveri_device_info }, /* Kaveri */ - { 0x130F, &kaveri_device_info }, /* Kaveri */ - { 0x1310, &kaveri_device_info }, /* Kaveri */ - { 0x1311, &kaveri_device_info }, /* Kaveri */ - { 0x1312, &kaveri_device_info }, /* Kaveri */ - { 0x1313, &kaveri_device_info }, /* Kaveri */ - { 0x1315, &kaveri_device_info }, /* Kaveri */ - { 0x1316, &kaveri_device_info }, /* Kaveri */ - { 0x1317, &kaveri_device_info }, /* Kaveri */ - { 0x1318, &kaveri_device_info }, /* Kaveri */ - { 0x131B, &kaveri_device_info }, /* Kaveri */ - { 0x131C, &kaveri_device_info }, /* Kaveri */ - { 0x131D, &kaveri_device_info }, /* Kaveri */ - { 0x9870, &carrizo_device_info }, /* Carrizo */ - { 0x9874, &carrizo_device_info }, /* Carrizo */ - { 0x9875, &carrizo_device_info }, /* Carrizo */ - { 0x9876, &carrizo_device_info }, /* Carrizo */ - { 0x9877, &carrizo_device_info }, /* Carrizo */ - { 0x15DD, &raven_device_info }, /* Raven */ - { 0x15D8, &raven_device_info }, /* Raven */ + [CHIP_KAVERI] = {&kaveri_device_info, NULL}, + [CHIP_CARRIZO] = {&carrizo_device_info, NULL}, + [CHIP_RAVEN] = {&raven_device_info, NULL}, #endif - { 0x67A0, &hawaii_device_info }, /* Hawaii */ - { 0x67A1, &hawaii_device_info }, /* Hawaii */ - { 0x67A2, &hawaii_device_info }, /* Hawaii */ - { 0x67A8, &hawaii_device_info }, /* Hawaii */ - { 0x67A9, &hawaii_device_info }, /* Hawaii */ - { 0x67AA, &hawaii_device_info }, /* Hawaii */ - { 0x67B0, &hawaii_device_info }, /* Hawaii */ - { 0x67B1, &hawaii_device_info }, /* Hawaii */ - { 0x67B8, &hawaii_device_info }, /* Hawaii */ - { 0x67B9, &hawaii_device_info }, /* Hawaii */ - { 0x67BA, &hawaii_device_info }, /* Hawaii */ - { 0x67BE, &hawaii_device_info }, /* Hawaii */ - { 0x6920, &tonga_device_info }, /* Tonga */ - { 0x6921, &tonga_device_info }, /* Tonga */ - { 0x6928, &tonga_device_info }, /* Tonga */ - { 0x6929, &tonga_device_info }, /* Tonga */ - { 0x692B, &tonga_device_info }, /* Tonga */ - { 0x6938, &tonga_device_info }, /* Tonga */ - { 0x6939, &tonga_device_info }, /* Tonga */ - { 0x7300, &fiji_device_info }, /* Fiji */ - { 0x730F, &fiji_vf_device_info }, /* Fiji vf*/ - { 0x67C0, &polaris10_device_info }, /* Polaris10 */ - { 0x67C1, &polaris10_device_info }, /* Polaris10 */ - { 0x67C2, &polaris10_device_info }, /* Polaris10 */ - { 0x67C4, &polaris10_device_info }, /* Polaris10 */ - { 0x67C7, &polaris10_device_info }, /* Polaris10 */ - { 0x67C8, &polaris10_device_info }, /* Polaris10 */ - { 0x67C9, &polaris10_device_info }, /* Polaris10 */ - { 0x67CA, &polaris10_device_info }, /* Polaris10 */ - { 0x67CC, &polaris10_device_info }, /* Polaris10 */ - { 0x67CF, &polaris10_device_info }, /* Polaris10 */ - { 0x67D0, &polaris10_vf_device_info }, /* Polaris10 vf*/ - { 0x67DF, &polaris10_device_info }, /* Polaris10 */ - { 0x6FDF, &polaris10_device_info }, /* Polaris10 */ - { 0x67E0, &polaris11_device_info }, /* Polaris11 */ - { 0x67E1, &polaris11_device_info }, /* Polaris11 */ - { 0x67E3, &polaris11_device_info }, /* Polaris11 */ - { 0x67E7, &polaris11_device_info }, /* Polaris11 */ - { 0x67E8, &polaris11_device_info }, /* Polaris11 */ - { 0x67E9, &polaris11_device_info }, /* Polaris11 */ - { 0x67EB, &polaris11_device_info }, /* Polaris11 */ - { 0x67EF, &polaris11_device_info }, /* Polaris11 */ - { 0x67FF, &polaris11_device_info }, /* Polaris11 */ - { 0x6980, &polaris12_device_info }, /* Polaris12 */ - { 0x6981, &polaris12_device_info }, /* Polaris12 */ - { 0x6985, &polaris12_device_info }, /* Polaris12 */ - { 0x6986, &polaris12_device_info }, /* Polaris12 */ - { 0x6987, &polaris12_device_info }, /* Polaris12 */ - { 0x6995, &polaris12_device_info }, /* Polaris12 */ - { 0x6997, &polaris12_device_info }, /* Polaris12 */ - { 0x699F, &polaris12_device_info }, /* Polaris12 */ - { 0x694C, &vegam_device_info }, /* VegaM */ - { 0x694E, &vegam_device_info }, /* VegaM */ - { 0x694F, &vegam_device_info }, /* VegaM */ - { 0x6860, &vega10_device_info }, /* Vega10 */ - { 0x6861, &vega10_device_info }, /* Vega10 */ - { 0x6862, &vega10_device_info }, /* Vega10 */ - { 0x6863, &vega10_device_info }, /* Vega10 */ - { 0x6864, &vega10_device_info }, /* Vega10 */ - { 0x6867, &vega10_device_info }, /* Vega10 */ - { 0x6868, &vega10_device_info }, /* Vega10 */ - { 0x6869, &vega10_device_info }, /* Vega10 */ - { 0x686A, &vega10_device_info }, /* Vega10 */ - { 0x686B, &vega10_device_info }, /* Vega10 */ - { 0x686C, &vega10_vf_device_info }, /* Vega10 vf*/ - { 0x686D, &vega10_device_info }, /* Vega10 */ - { 0x686E, &vega10_device_info }, /* Vega10 */ - { 0x686F, &vega10_device_info }, /* Vega10 */ - { 0x687F, &vega10_device_info }, /* Vega10 */ - { 0x69A0, &vega12_device_info }, /* Vega12 */ - { 0x69A1, &vega12_device_info }, /* Vega12 */ - { 0x69A2, &vega12_device_info }, /* Vega12 */ - { 0x69A3, &vega12_device_info }, /* Vega12 */ - { 0x69AF, &vega12_device_info }, /* Vega12 */ - { 0x66a0, &vega20_device_info }, /* Vega20 */ - { 0x66a1, &vega20_device_info }, /* Vega20 */ - { 0x66a2, &vega20_device_info }, /* Vega20 */ - { 0x66a3, &vega20_device_info }, /* Vega20 */ - { 0x66a4, &vega20_device_info }, /* Vega20 */ - { 0x66a7, &vega20_device_info }, /* Vega20 */ - { 0x66af, &vega20_device_info }, /* Vega20 */ - { 0x738C, &arcturus_device_info }, /* Arcturus */ - { 0x7388, &arcturus_device_info }, /* Arcturus */ - { 0x738E, &arcturus_device_info }, /* Arcturus */ - { 0x7390, &arcturus_device_info }, /* Arcturus vf */ - { 0x7310, &navi10_device_info }, /* Navi10 */ - { 0x7312, &navi10_device_info }, /* Navi10 */ - { 0x7318, &navi10_device_info }, /* Navi10 */ - { 0x731a, &navi10_device_info }, /* Navi10 */ - { 0x731f, &navi10_device_info }, /* Navi10 */ + [CHIP_HAWAII] = {&hawaii_device_info, NULL}, + [CHIP_TONGA] = {&tonga_device_info, NULL}, + [CHIP_FIJI] = {&fiji_device_info, &fiji_vf_device_info}, + [CHIP_POLARIS10] = {&polaris10_device_info, &polaris10_vf_device_info}, + [CHIP_POLARIS11] = {&polaris11_device_info, NULL}, + [CHIP_POLARIS12] = {&polaris12_device_info, NULL}, + [CHIP_VEGAM] = {&vegam_device_info, NULL}, + [CHIP_VEGA10] = {&vega10_device_info, &vega10_vf_device_info}, + [CHIP_VEGA12] = {&vega12_device_info, NULL}, + [CHIP_VEGA20] = {&vega20_device_info, NULL}, + [CHIP_RENOIR] = {&renoir_device_info, NULL}, + [CHIP_ARCTURUS] = {&arcturus_device_info, &arcturus_device_info}, + [CHIP_NAVI10] = {&navi10_device_info, NULL}, + [CHIP_NAVI12] = {&navi12_device_info, &navi12_device_info}, + [CHIP_NAVI14] = {&navi14_device_info, NULL}, }; static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, @@ -504,32 +488,25 @@ static void kfd_gtt_sa_fini(struct kfd_dev *kfd); static int kfd_resume(struct kfd_dev *kfd); -static const struct kfd_device_info *lookup_device_info(unsigned short did) +struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, + struct pci_dev *pdev, unsigned int asic_type, bool vf) { - size_t i; + struct kfd_dev *kfd; + const struct kfd_device_info *device_info; + const struct kfd2kgd_calls *f2g; - for (i = 0; i < ARRAY_SIZE(supported_devices); i++) { - if (supported_devices[i].did == did) { - WARN_ON(!supported_devices[i].device_info); - return supported_devices[i].device_info; - } + if (asic_type >= sizeof(kfd_supported_devices) / (sizeof(void *) * 2) + || asic_type >= sizeof(kfd2kgd_funcs) / sizeof(void *)) { + dev_err(kfd_device, "asic_type %d out of range\n", asic_type); + return NULL; /* asic_type out of range */ } - dev_warn(kfd_device, "DID %04x is missing in supported_devices\n", - did); + device_info = kfd_supported_devices[asic_type][vf]; + f2g = kfd2kgd_funcs[asic_type]; - return NULL; -} - -struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, - struct pci_dev *pdev, const struct kfd2kgd_calls *f2g) -{ - struct kfd_dev *kfd; - const struct kfd_device_info *device_info = - lookup_device_info(pdev->device); - - if (!device_info) { - dev_err(kfd_device, "kgd2kfd_probe failed\n"); + if (!device_info || !f2g) { + dev_err(kfd_device, "%s %s not supported in kfd\n", + amdgpu_asic_name[asic_type], vf ? "VF" : ""); return NULL; } @@ -593,10 +570,12 @@ static void kfd_cwsr_init(struct kfd_dev *kfd) } bool kgd2kfd_device_init(struct kfd_dev *kfd, + struct drm_device *ddev, const struct kgd2kfd_shared_resources *gpu_resources) { unsigned int size; + kfd->ddev = ddev; kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd, KGD_ENGINE_MEC1); kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd, @@ -751,9 +730,6 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd) return 0; kgd2kfd_suspend(kfd); - /* hold dqm->lock to prevent further execution*/ - dqm_lock(kfd->dqm); - kfd_signal_reset_event(kfd); return 0; } @@ -771,8 +747,6 @@ int kgd2kfd_post_reset(struct kfd_dev *kfd) if (!kfd->init_complete) return 0; - dqm_unlock(kfd->dqm); - ret = kfd_resume(kfd); if (ret) return ret; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index d985e31fcc1e..984c2f2b24b6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -195,20 +195,30 @@ static int allocate_vmid(struct device_queue_manager *dqm, struct qcm_process_device *qpd, struct queue *q) { - int bit, allocated_vmid; + int allocated_vmid = -1, i; - if (dqm->vmid_bitmap == 0) - return -ENOMEM; + for (i = dqm->dev->vm_info.first_vmid_kfd; + i <= dqm->dev->vm_info.last_vmid_kfd; i++) { + if (!dqm->vmid_pasid[i]) { + allocated_vmid = i; + break; + } + } + + if (allocated_vmid < 0) { + pr_err("no more vmid to allocate\n"); + return -ENOSPC; + } + + pr_debug("vmid allocated: %d\n", allocated_vmid); + + dqm->vmid_pasid[allocated_vmid] = q->process->pasid; - bit = ffs(dqm->vmid_bitmap) - 1; - dqm->vmid_bitmap &= ~(1 << bit); + set_pasid_vmid_mapping(dqm, q->process->pasid, allocated_vmid); - allocated_vmid = bit + dqm->dev->vm_info.first_vmid_kfd; - pr_debug("vmid allocation %d\n", allocated_vmid); qpd->vmid = allocated_vmid; q->properties.vmid = allocated_vmid; - set_pasid_vmid_mapping(dqm, q->process->pasid, q->properties.vmid); program_sh_mem_settings(dqm, qpd); /* qpd->page_table_base is set earlier when register_process() @@ -220,8 +230,9 @@ static int allocate_vmid(struct device_queue_manager *dqm, /* invalidate the VM context after pasid and vmid mapping is set up */ kfd_flush_tlb(qpd_to_pdd(qpd)); - dqm->dev->kfd2kgd->set_scratch_backing_va( - dqm->dev->kgd, qpd->sh_hidden_private_base, qpd->vmid); + if (dqm->dev->kfd2kgd->set_scratch_backing_va) + dqm->dev->kfd2kgd->set_scratch_backing_va(dqm->dev->kgd, + qpd->sh_hidden_private_base, qpd->vmid); return 0; } @@ -248,8 +259,6 @@ static void deallocate_vmid(struct device_queue_manager *dqm, struct qcm_process_device *qpd, struct queue *q) { - int bit = qpd->vmid - dqm->dev->vm_info.first_vmid_kfd; - /* On GFX v7, CP doesn't flush TC at dequeue */ if (q->device->device_info->asic_family == CHIP_HAWAII) if (flush_texture_cache_nocpsch(q->device, qpd)) @@ -259,8 +268,8 @@ static void deallocate_vmid(struct device_queue_manager *dqm, /* Release the vmid mapping */ set_pasid_vmid_mapping(dqm, 0, qpd->vmid); + dqm->vmid_pasid[qpd->vmid] = 0; - dqm->vmid_bitmap |= (1 << bit); qpd->vmid = 0; q->properties.vmid = 0; } @@ -331,6 +340,10 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm, mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, &q->gart_mqd_addr, &q->properties); if (q->properties.is_active) { + if (!dqm->sched_running) { + WARN_ONCE(1, "Load non-HWS mqd while stopped\n"); + goto add_queue_to_list; + } if (WARN(q->process->mm != current->mm, "should only run in user thread")) @@ -342,6 +355,7 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm, goto out_free_mqd; } +add_queue_to_list: list_add(&q->list, &qpd->queues_list); qpd->queue_count++; if (q->properties.is_active) @@ -449,6 +463,11 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm, deallocate_doorbell(qpd, q); + if (!dqm->sched_running) { + WARN_ONCE(1, "Destroy non-HWS queue while stopped\n"); + return 0; + } + retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd, KFD_PREEMPT_TYPE_WAVEFRONT_RESET, KFD_UNMAP_LATENCY_MS, @@ -524,6 +543,12 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q) (q->properties.type == KFD_QUEUE_TYPE_COMPUTE || q->properties.type == KFD_QUEUE_TYPE_SDMA || q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) { + + if (!dqm->sched_running) { + WARN_ONCE(1, "Update non-HWS queue while stopped\n"); + goto out_unlock; + } + retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd, KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN, KFD_UNMAP_LATENCY_MS, q->pipe, q->queue); @@ -579,7 +604,7 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm, goto out; pdd = qpd_to_pdd(qpd); - pr_info_ratelimited("Evicting PASID %u queues\n", + pr_info_ratelimited("Evicting PASID 0x%x queues\n", pdd->process->pasid); /* Mark all queues as evicted. Deactivate all active queues on @@ -593,6 +618,11 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm, mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( q->properties.type)]; q->properties.is_active = false; + dqm->queue_count--; + + if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n")) + continue; + retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd, KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN, KFD_UNMAP_LATENCY_MS, q->pipe, q->queue); @@ -601,7 +631,6 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm, * maintain a consistent eviction state */ ret = retval; - dqm->queue_count--; } out: @@ -621,7 +650,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm, goto out; pdd = qpd_to_pdd(qpd); - pr_info_ratelimited("Evicting PASID %u queues\n", + pr_info_ratelimited("Evicting PASID 0x%x queues\n", pdd->process->pasid); /* Mark all queues as evicted. Deactivate all active queues on @@ -667,7 +696,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm, goto out; } - pr_info_ratelimited("Restoring PASID %u queues\n", + pr_info_ratelimited("Restoring PASID 0x%x queues\n", pdd->process->pasid); /* Update PD Base in QPD */ @@ -702,6 +731,11 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm, mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( q->properties.type)]; q->properties.is_active = true; + dqm->queue_count++; + + if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n")) + continue; + retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue, &q->properties, mm); if (retval && !ret) @@ -709,7 +743,6 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm, * maintain a consistent eviction state */ ret = retval; - dqm->queue_count++; } qpd->evicted = 0; out: @@ -739,7 +772,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm, goto out; } - pr_info_ratelimited("Restoring PASID %u queues\n", + pr_info_ratelimited("Restoring PASID 0x%x queues\n", pdd->process->pasid); /* Update PD Base in QPD */ @@ -879,7 +912,8 @@ static int initialize_nocpsch(struct device_queue_manager *dqm) dqm->allocated_queues[pipe] |= 1 << queue; } - dqm->vmid_bitmap = (1 << dqm->dev->vm_info.vmid_num_kfd) - 1; + memset(dqm->vmid_pasid, 0, sizeof(dqm->vmid_pasid)); + dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm)); dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm)); @@ -902,12 +936,20 @@ static void uninitialize(struct device_queue_manager *dqm) static int start_nocpsch(struct device_queue_manager *dqm) { init_interrupts(dqm); - return pm_init(&dqm->packets, dqm); + + if (dqm->dev->device_info->asic_family == CHIP_HAWAII) + return pm_init(&dqm->packets, dqm); + dqm->sched_running = true; + + return 0; } static int stop_nocpsch(struct device_queue_manager *dqm) { - pm_uninit(&dqm->packets); + if (dqm->dev->device_info->asic_family == CHIP_HAWAII) + pm_uninit(&dqm->packets); + dqm->sched_running = false; + return 0; } @@ -1058,6 +1100,7 @@ static int start_cpsch(struct device_queue_manager *dqm) dqm_lock(dqm); /* clear hang status when driver try to start the hw scheduler */ dqm->is_hws_hang = false; + dqm->sched_running = true; execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); dqm_unlock(dqm); @@ -1073,6 +1116,7 @@ static int stop_cpsch(struct device_queue_manager *dqm) { dqm_lock(dqm); unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0); + dqm->sched_running = false; dqm_unlock(dqm); kfd_gtt_sa_free(dqm->dev, dqm->fence_mem); @@ -1259,9 +1303,10 @@ static int map_queues_cpsch(struct device_queue_manager *dqm) { int retval; + if (!dqm->sched_running) + return 0; if (dqm->queue_count <= 0 || dqm->processes_count <= 0) return 0; - if (dqm->active_runlist) return 0; @@ -1283,6 +1328,8 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm, { int retval = 0; + if (!dqm->sched_running) + return 0; if (dqm->is_hws_hang) return -EIO; if (!dqm->active_runlist) @@ -1676,7 +1723,8 @@ static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm) struct kfd_dev *dev = dqm->dev; struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd; uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size * - dev->device_info->num_sdma_engines * + (dev->device_info->num_sdma_engines + + dev->device_info->num_xgmi_sdma_engines) * dev->device_info->num_sdma_queues_per_engine + dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size; @@ -1786,10 +1834,13 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: + case CHIP_RENOIR: case CHIP_ARCTURUS: device_queue_manager_init_v9(&dqm->asic_ops); break; case CHIP_NAVI10: + case CHIP_NAVI12: + case CHIP_NAVI14: device_queue_manager_init_v10_navi10(&dqm->asic_ops); break; default: @@ -1883,6 +1934,12 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data) int pipe, queue; int r = 0; + if (!dqm->sched_running) { + seq_printf(m, " Device is stopped\n"); + + return 0; + } + r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->kgd, KFD_CIK_HIQ_PIPE, KFD_CIK_HIQ_QUEUE, &dump, &n_regs); @@ -1917,7 +1974,8 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data) } } - for (pipe = 0; pipe < get_num_sdma_engines(dqm); pipe++) { + for (pipe = 0; pipe < get_num_sdma_engines(dqm) + + get_num_xgmi_sdma_engines(dqm); pipe++) { for (queue = 0; queue < dqm->dev->device_info->num_sdma_queues_per_engine; queue++) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index 90db2c9275f6..a8c37e6da027 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -32,6 +32,8 @@ #include "kfd_mqd_manager.h" +#define VMID_NUM 16 + struct device_process_node { struct qcm_process_device *qpd; struct list_head list; @@ -185,7 +187,8 @@ struct device_queue_manager { unsigned int *allocated_queues; uint64_t sdma_bitmap; uint64_t xgmi_sdma_bitmap; - unsigned int vmid_bitmap; + /* the pasid mapping for each kfd vmid */ + uint16_t vmid_pasid[VMID_NUM]; uint64_t pipelines_addr; struct kfd_mem_obj *pipeline_mem; uint64_t fence_gpu_addr; @@ -198,6 +201,7 @@ struct device_queue_manager { bool is_hws_hang; struct work_struct hw_exception_work; struct kfd_mem_obj hiq_sdma_mqd; + bool sched_running; }; void device_queue_manager_init_cik( diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index d674d4b3340f..908081c85de1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -852,8 +852,8 @@ static void lookup_events_by_type_and_signal(struct kfd_process *p, if (type == KFD_EVENT_TYPE_MEMORY) { dev_warn(kfd_device, - "Sending SIGSEGV to HSA Process with PID %d ", - p->lead_thread->pid); + "Sending SIGSEGV to process %d (pasid 0x%x)", + p->lead_thread->pid, p->pasid); send_sig(SIGSEGV, p->lead_thread, 0); } @@ -861,13 +861,13 @@ static void lookup_events_by_type_and_signal(struct kfd_process *p, if (send_signal) { if (send_sigterm) { dev_warn(kfd_device, - "Sending SIGTERM to HSA Process with PID %d ", - p->lead_thread->pid); + "Sending SIGTERM to process %d (pasid 0x%x)", + p->lead_thread->pid, p->pasid); send_sig(SIGTERM, p->lead_thread, 0); } else { dev_err(kfd_device, - "HSA Process (PID %d) got unhandled exception", - p->lead_thread->pid); + "Process %d (pasid 0x%x) got unhandled exception", + p->lead_thread->pid, p->pasid); } } } @@ -936,7 +936,8 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid, /* Workaround on Raven to not kill the process when memory is freed * before IOMMU is able to finish processing all the excessive PPRs */ - if (dev->device_info->asic_family != CHIP_RAVEN) { + if (dev->device_info->asic_family != CHIP_RAVEN && + dev->device_info->asic_family != CHIP_RENOIR) { mutex_lock(&p->event_mutex); /* Lookup events by type and signal them */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c index 9dc4bff8085e..bb77b8890e77 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c @@ -369,8 +369,13 @@ int kfd_init_apertures(struct kfd_process *process) /*Iterating over all devices*/ while (kfd_topology_enum_kfd_devices(id, &dev) == 0) { - if (!dev) { - id++; /* Skip non GPU devices */ + if (!dev || kfd_devcgroup_check_permission(dev)) { + /* Skip non GPU devices and devices to which the + * current process have no access to. Access can be + * limited by placing the process in a specific + * cgroup hierarchy + */ + id++; continue; } @@ -405,8 +410,11 @@ int kfd_init_apertures(struct kfd_process *process) case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: + case CHIP_RENOIR: case CHIP_ARCTURUS: case CHIP_NAVI10: + case CHIP_NAVI12: + case CHIP_NAVI14: kfd_init_apertures_v9(pdd, id); break; default: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c index 3ef67d2e0d9f..e05d75ecda21 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c @@ -54,8 +54,7 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev, memcpy(patched_ihre, ih_ring_entry, dev->device_info->ih_ring_entry_size); - pasid = dev->kfd2kgd->get_atc_vmid_pasid_mapping_pasid( - dev->kgd, vmid); + pasid = dev->dqm->vmid_pasid[vmid]; /* Patch the pasid field */ patched_ihre[3] = cpu_to_le32((le32_to_cpu(patched_ihre[3]) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c index c56ac47cd318..bc47f6a44456 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c @@ -62,6 +62,11 @@ int kfd_interrupt_init(struct kfd_dev *kfd) } kfd->ih_wq = alloc_workqueue("KFD IH", WQ_HIGHPRI, 1); + if (unlikely(!kfd->ih_wq)) { + kfifo_free(&kfd->ih_fifo); + dev_err(kfd_chardev(), "Failed to allocate KFD IH workqueue\n"); + return -ENOMEM; + } spin_lock_init(&kfd->interrupt_lock); INIT_WORK(&kfd->interrupt_work, interrupt_wq); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c index 5f35df23fb18..193e2835bd4d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c @@ -160,7 +160,7 @@ static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, int pasid) if (!p) return; - pr_debug("Unbinding process %d from IOMMU\n", pasid); + pr_debug("Unbinding process 0x%x from IOMMU\n", pasid); mutex_lock(kfd_get_dbgmgr_mutex()); @@ -194,7 +194,7 @@ static int iommu_invalid_ppr_cb(struct pci_dev *pdev, int pasid, struct kfd_dev *dev; dev_warn_ratelimited(kfd_device, - "Invalid PPR device %x:%x.%x pasid %d address 0x%lX flags 0x%X", + "Invalid PPR device %x:%x.%x pasid 0x%x address 0x%lX flags 0x%X", PCI_BUS_NUM(pdev->devfn), PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), @@ -235,7 +235,7 @@ static int kfd_bind_processes_to_device(struct kfd_dev *kfd) err = amd_iommu_bind_pasid(kfd->pdev, p->pasid, p->lead_thread); if (err < 0) { - pr_err("Unexpected pasid %d binding failure\n", + pr_err("Unexpected pasid 0x%x binding failure\n", p->pasid); mutex_unlock(&p->mutex); break; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c index 8b4564f71a7a..11d244891393 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c @@ -330,10 +330,13 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: + case CHIP_RENOIR: case CHIP_ARCTURUS: kernel_queue_init_v9(&kq->ops_asic_specific); break; case CHIP_NAVI10: + case CHIP_NAVI12: + case CHIP_NAVI14: kernel_queue_init_v10(&kq->ops_asic_specific); break; default: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c index 986ff52d5750..f4b7f7e6c40e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c @@ -82,7 +82,7 @@ static void kfd_exit(void) kfd_chardev_exit(); } -int kgd2kfd_init() +int kgd2kfd_init(void) { return kfd_init(); } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c index 9cd3eb2d90bd..4a236b2c2354 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c @@ -69,35 +69,13 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd, static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd, struct queue_properties *q) { - int retval; - struct kfd_mem_obj *mqd_mem_obj = NULL; + struct kfd_mem_obj *mqd_mem_obj; - /* From V9, for CWSR, the control stack is located on the next page - * boundary after the mqd, we will use the gtt allocation function - * instead of sub-allocation function. - */ - if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) { - mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_NOIO); - if (!mqd_mem_obj) - return NULL; - retval = amdgpu_amdkfd_alloc_gtt_mem(kfd->kgd, - ALIGN(q->ctl_stack_size, PAGE_SIZE) + - ALIGN(sizeof(struct v10_compute_mqd), PAGE_SIZE), - &(mqd_mem_obj->gtt_mem), - &(mqd_mem_obj->gpu_addr), - (void *)&(mqd_mem_obj->cpu_ptr), true); - } else { - retval = kfd_gtt_sa_allocate(kfd, sizeof(struct v10_compute_mqd), - &mqd_mem_obj); - } - - if (retval) { - kfree(mqd_mem_obj); + if (kfd_gtt_sa_allocate(kfd, sizeof(struct v10_compute_mqd), + &mqd_mem_obj)) return NULL; - } return mqd_mem_obj; - } static void init_mqd(struct mqd_manager *mm, void **mqd, @@ -250,14 +228,7 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd, static void free_mqd(struct mqd_manager *mm, void *mqd, struct kfd_mem_obj *mqd_mem_obj) { - struct kfd_dev *kfd = mm->dev; - - if (mqd_mem_obj->gtt_mem) { - amdgpu_amdkfd_free_gtt_mem(kfd->kgd, mqd_mem_obj->gtt_mem); - kfree(mqd_mem_obj); - } else { - kfd_gtt_sa_free(mm->dev, mqd_mem_obj); - } + kfd_gtt_sa_free(mm->dev, mqd_mem_obj); } static bool is_occupied(struct mqd_manager *mm, void *mqd, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c index 2c8624c5b42c..83ef4b3dd2fb 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c @@ -239,10 +239,13 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm) case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: + case CHIP_RENOIR: case CHIP_ARCTURUS: pm->pmf = &kfd_v9_pm_funcs; break; case CHIP_NAVI10: + case CHIP_NAVI12: + case CHIP_NAVI14: pm->pmf = &kfd_v10_pm_funcs; break; default: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index c89326125d71..060a9e8b301e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -36,6 +36,10 @@ #include <linux/seq_file.h> #include <linux/kref.h> #include <linux/sysfs.h> +#include <linux/device_cgroup.h> +#include <drm/drm_file.h> +#include <drm/drm_drv.h> +#include <drm/drm_device.h> #include <kgd_kfd_interface.h> #include "amd_shared.h" @@ -179,10 +183,6 @@ enum cache_policy { cache_policy_noncoherent }; -#define KFD_IS_VI(chip) ((chip) >= CHIP_CARRIZO && (chip) <= CHIP_POLARIS11) -#define KFD_IS_DGPU(chip) (((chip) >= CHIP_TONGA && \ - (chip) <= CHIP_NAVI10) || \ - (chip) == CHIP_HAWAII) #define KFD_IS_SOC15(chip) ((chip) >= CHIP_VEGA10) struct kfd_event_interrupt_class { @@ -230,6 +230,7 @@ struct kfd_dev { const struct kfd_device_info *device_info; struct pci_dev *pdev; + struct drm_device *ddev; unsigned int id; /* topology stub index */ @@ -687,7 +688,7 @@ struct kfd_process { /* We want to receive a notification when the mm_struct is destroyed */ struct mmu_notifier mmu_notifier; - unsigned int pasid; + uint16_t pasid; unsigned int doorbell_index; /* @@ -1040,6 +1041,21 @@ bool kfd_is_locked(void); void kfd_inc_compute_active(struct kfd_dev *dev); void kfd_dec_compute_active(struct kfd_dev *dev); +/* Cgroup Support */ +/* Check with device cgroup if @kfd device is accessible */ +static inline int kfd_devcgroup_check_permission(struct kfd_dev *kfd) +{ +#if defined(CONFIG_CGROUP_DEVICE) + struct drm_device *ddev = kfd->ddev; + + return devcgroup_check_permission(DEVCG_DEV_CHAR, ddev->driver->major, + ddev->render->index, + DEVCG_ACC_WRITE | DEVCG_ACC_READ); +#else + return 0; +#endif +} + /* Debugfs */ #if defined(CONFIG_DEBUG_FS) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 40e3fc0c6942..10f9af5784f2 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -416,7 +416,7 @@ static void kfd_process_destroy_pdds(struct kfd_process *p) list_for_each_entry_safe(pdd, temp, &p->per_device_data, per_device_list) { - pr_debug("Releasing pdd (topology id %d) for process (pasid %d)\n", + pr_debug("Releasing pdd (topology id %d) for process (pasid 0x%x)\n", pdd->dev->id, p->pasid); if (pdd->drm_file) { @@ -687,6 +687,8 @@ static int init_doorbell_bitmap(struct qcm_process_device *qpd, struct kfd_dev *dev) { unsigned int i; + int range_start = dev->shared_resources.non_cp_doorbells_start; + int range_end = dev->shared_resources.non_cp_doorbells_end; if (!KFD_IS_SOC15(dev->device_info->asic_family)) return 0; @@ -698,14 +700,16 @@ static int init_doorbell_bitmap(struct qcm_process_device *qpd, return -ENOMEM; /* Mask out doorbells reserved for SDMA, IH, and VCN on SOC15. */ + pr_debug("reserved doorbell 0x%03x - 0x%03x\n", range_start, range_end); + pr_debug("reserved doorbell 0x%03x - 0x%03x\n", + range_start + KFD_QUEUE_DOORBELL_MIRROR_OFFSET, + range_end + KFD_QUEUE_DOORBELL_MIRROR_OFFSET); + for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS / 2; i++) { - if (i >= dev->shared_resources.non_cp_doorbells_start - && i <= dev->shared_resources.non_cp_doorbells_end) { + if (i >= range_start && i <= range_end) { set_bit(i, qpd->doorbell_bitmap); set_bit(i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET, qpd->doorbell_bitmap); - pr_debug("reserved doorbell 0x%03x and 0x%03x\n", i, - i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET); } } @@ -1020,7 +1024,7 @@ static void evict_process_worker(struct work_struct *work) */ flush_delayed_work(&p->restore_work); - pr_debug("Started evicting pasid %d\n", p->pasid); + pr_debug("Started evicting pasid 0x%x\n", p->pasid); ret = kfd_process_evict_queues(p); if (!ret) { dma_fence_signal(p->ef); @@ -1029,9 +1033,9 @@ static void evict_process_worker(struct work_struct *work) queue_delayed_work(kfd_restore_wq, &p->restore_work, msecs_to_jiffies(PROCESS_RESTORE_TIME_MS)); - pr_debug("Finished evicting pasid %d\n", p->pasid); + pr_debug("Finished evicting pasid 0x%x\n", p->pasid); } else - pr_err("Failed to evict queues of pasid %d\n", p->pasid); + pr_err("Failed to evict queues of pasid 0x%x\n", p->pasid); } static void restore_process_worker(struct work_struct *work) @@ -1046,7 +1050,7 @@ static void restore_process_worker(struct work_struct *work) * lifetime of this thread, kfd_process p will be valid */ p = container_of(dwork, struct kfd_process, restore_work); - pr_debug("Started restoring pasid %d\n", p->pasid); + pr_debug("Started restoring pasid 0x%x\n", p->pasid); /* Setting last_restore_timestamp before successful restoration. * Otherwise this would have to be set by KGD (restore_process_bos) @@ -1062,7 +1066,7 @@ static void restore_process_worker(struct work_struct *work) ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info, &p->ef); if (ret) { - pr_debug("Failed to restore BOs of pasid %d, retry after %d ms\n", + pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n", p->pasid, PROCESS_BACK_OFF_TIME_MS); ret = queue_delayed_work(kfd_restore_wq, &p->restore_work, msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS)); @@ -1072,9 +1076,9 @@ static void restore_process_worker(struct work_struct *work) ret = kfd_process_restore_queues(p); if (!ret) - pr_debug("Finished restoring pasid %d\n", p->pasid); + pr_debug("Finished restoring pasid 0x%x\n", p->pasid); else - pr_err("Failed to restore queues of pasid %d\n", p->pasid); + pr_err("Failed to restore queues of pasid 0x%x\n", p->pasid); } void kfd_suspend_all_processes(void) @@ -1088,7 +1092,7 @@ void kfd_suspend_all_processes(void) cancel_delayed_work_sync(&p->restore_work); if (kfd_process_evict_queues(p)) - pr_err("Failed to suspend process %d\n", p->pasid); + pr_err("Failed to suspend process 0x%x\n", p->pasid); dma_fence_signal(p->ef); dma_fence_put(p->ef); p->ef = NULL; @@ -1171,7 +1175,7 @@ int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data) int idx = srcu_read_lock(&kfd_processes_srcu); hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { - seq_printf(m, "Process %d PASID %d:\n", + seq_printf(m, "Process %d PASID 0x%x:\n", p->lead_thread->tgid, p->pasid); mutex_lock(&p->mutex); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 7e6c3ee82f5b..2659d226c056 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -53,7 +53,7 @@ static int find_available_queue_slot(struct process_queue_manager *pqm, pr_debug("The new slot id %lu\n", found); if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) { - pr_info("Cannot open more queues for process with pasid %d\n", + pr_info("Cannot open more queues for process with pasid 0x%x\n", pqm->process->pasid); return -ENOMEM; } @@ -298,7 +298,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, } if (retval != 0) { - pr_err("Pasid %d DQM create queue %d failed. ret %d\n", + pr_err("Pasid 0x%x DQM create queue %d failed. ret %d\n", pqm->process->pasid, type, retval); goto err_create_queue; } @@ -377,7 +377,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid) dqm = pqn->q->device->dqm; retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q); if (retval) { - pr_err("Pasid %d destroy queue %d failed, ret %d\n", + pr_err("Pasid 0x%x destroy queue %d failed, ret %d\n", pqm->process->pasid, pqn->q->properties.queue_id, retval); if (retval != -ETIME) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 7551761f2aa9..69bd0628fdc6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -269,6 +269,8 @@ static ssize_t iolink_show(struct kobject *kobj, struct attribute *attr, buffer[0] = 0; iolink = container_of(attr, struct kfd_iolink_properties, attr); + if (iolink->gpu && kfd_devcgroup_check_permission(iolink->gpu)) + return -EPERM; sysfs_show_32bit_prop(buffer, "type", iolink->iolink_type); sysfs_show_32bit_prop(buffer, "version_major", iolink->ver_maj); sysfs_show_32bit_prop(buffer, "version_minor", iolink->ver_min); @@ -305,6 +307,8 @@ static ssize_t mem_show(struct kobject *kobj, struct attribute *attr, buffer[0] = 0; mem = container_of(attr, struct kfd_mem_properties, attr); + if (mem->gpu && kfd_devcgroup_check_permission(mem->gpu)) + return -EPERM; sysfs_show_32bit_prop(buffer, "heap_type", mem->heap_type); sysfs_show_64bit_prop(buffer, "size_in_bytes", mem->size_in_bytes); sysfs_show_32bit_prop(buffer, "flags", mem->flags); @@ -334,6 +338,8 @@ static ssize_t kfd_cache_show(struct kobject *kobj, struct attribute *attr, buffer[0] = 0; cache = container_of(attr, struct kfd_cache_properties, attr); + if (cache->gpu && kfd_devcgroup_check_permission(cache->gpu)) + return -EPERM; sysfs_show_32bit_prop(buffer, "processor_id_low", cache->processor_id_low); sysfs_show_32bit_prop(buffer, "level", cache->cache_level); @@ -414,6 +420,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, if (strcmp(attr->name, "gpu_id") == 0) { dev = container_of(attr, struct kfd_topology_device, attr_gpuid); + if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu)) + return -EPERM; return sysfs_show_32bit_val(buffer, dev->gpu_id); } @@ -421,11 +429,15 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, dev = container_of(attr, struct kfd_topology_device, attr_name); + if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu)) + return -EPERM; return sysfs_show_str_val(buffer, dev->node_props.name); } dev = container_of(attr, struct kfd_topology_device, attr_props); + if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu)) + return -EPERM; sysfs_show_32bit_prop(buffer, "cpu_cores_count", dev->node_props.cpu_cores_count); sysfs_show_32bit_prop(buffer, "simd_count", @@ -1098,6 +1110,9 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu) { struct kfd_topology_device *dev; struct kfd_topology_device *out_dev = NULL; + struct kfd_mem_properties *mem; + struct kfd_cache_properties *cache; + struct kfd_iolink_properties *iolink; down_write(&topology_lock); list_for_each_entry(dev, &topology_device_list, list) { @@ -1111,6 +1126,13 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu) if (!dev->gpu && (dev->node_props.simd_count > 0)) { dev->gpu = gpu; out_dev = dev; + + list_for_each_entry(mem, &dev->mem_props, list) + mem->gpu = dev->gpu; + list_for_each_entry(cache, &dev->cache_props, list) + cache->gpu = dev->gpu; + list_for_each_entry(iolink, &dev->io_link_props, list) + iolink->gpu = dev->gpu; break; } } @@ -1317,8 +1339,11 @@ int kfd_topology_add_device(struct kfd_dev *gpu) case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: + case CHIP_RENOIR: case CHIP_ARCTURUS: case CHIP_NAVI10: + case CHIP_NAVI12: + case CHIP_NAVI14: dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 << HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) & HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h index d4718d58d0f2..15843e0fc756 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h @@ -102,6 +102,7 @@ struct kfd_mem_properties { uint32_t flags; uint32_t width; uint32_t mem_clk_max; + struct kfd_dev *gpu; struct kobject *kobj; struct attribute attr; }; @@ -123,6 +124,7 @@ struct kfd_cache_properties { uint32_t cache_latency; uint32_t cache_type; uint8_t sibling_map[CRAT_SIBLINGMAP_SIZE]; + struct kfd_dev *gpu; struct kobject *kobj; struct attribute attr; }; @@ -141,6 +143,7 @@ struct kfd_iolink_properties { uint32_t max_bandwidth; uint32_t rec_transfer_size; uint32_t flags; + struct kfd_dev *gpu; struct kobject *kobj; struct attribute attr; }; diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig index 71991a28a775..313183b80032 100644 --- a/drivers/gpu/drm/amd/display/Kconfig +++ b/drivers/gpu/drm/amd/display/Kconfig @@ -23,16 +23,16 @@ config DRM_AMD_DC_DCN2_0 depends on DRM_AMD_DC && X86 depends on DRM_AMD_DC_DCN1_0 help - Choose this option if you want to have - Navi support for display engine + Choose this option if you want to have + Navi support for display engine config DRM_AMD_DC_DCN2_1 - bool "DCN 2.1 family" - depends on DRM_AMD_DC && X86 - depends on DRM_AMD_DC_DCN2_0 - help - Choose this option if you want to have - Renoir support for display engine + bool "DCN 2.1 family" + depends on DRM_AMD_DC && X86 + depends on DRM_AMD_DC_DCN2_0 + help + Choose this option if you want to have + Renoir support for display engine config DRM_AMD_DC_DSC_SUPPORT bool "DSC support" @@ -41,8 +41,16 @@ config DRM_AMD_DC_DSC_SUPPORT depends on DRM_AMD_DC_DCN1_0 depends on DRM_AMD_DC_DCN2_0 help - Choose this option if you want to have - Dynamic Stream Compression support + Choose this option if you want to have + Dynamic Stream Compression support + +config DRM_AMD_DC_HDCP + bool "Enable HDCP support in DC" + depends on DRM_AMD_DC + help + Choose this option + if you want to support + HDCP authentication config DEBUG_KERNEL_DC bool "Enable kgdb break in DC" diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile index 496cee000f10..36b3d6a5d04d 100644 --- a/drivers/gpu/drm/amd/display/Makefile +++ b/drivers/gpu/drm/amd/display/Makefile @@ -34,12 +34,19 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/color subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/info_packet subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/power +ifdef CONFIG_DRM_AMD_DC_HDCP +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/hdcp +endif #TODO: remove when Timing Sync feature is complete subdir-ccflags-y += -DBUILD_FEATURE_TIMING_SYNC=0 DAL_LIBS = amdgpu_dm dc modules/freesync modules/color modules/info_packet modules/power +ifdef CONFIG_DRM_AMD_DC_HDCP +DAL_LIBS += modules/hdcp +endif + AMD_DAL = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/,$(DAL_LIBS))) include $(AMD_DAL) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile index 94911871eb9b..9a3b7bf8ab0b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile @@ -31,6 +31,10 @@ ifneq ($(CONFIG_DRM_AMD_DC),) AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o amdgpu_dm_pp_smu.o endif +ifdef CONFIG_DRM_AMD_DC_HDCP +AMDGPUDM += amdgpu_dm_hdcp.o +endif + ifneq ($(CONFIG_DEBUG_FS),) AMDGPUDM += amdgpu_dm_crc.o amdgpu_dm_debugfs.o endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index c67d3c41db19..caba9ecac723 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -37,6 +37,9 @@ #include "amdgpu_ucode.h" #include "atom.h" #include "amdgpu_dm.h" +#ifdef CONFIG_DRM_AMD_DC_HDCP +#include "amdgpu_dm_hdcp.h" +#endif #include "amdgpu_pm.h" #include "amd_shared.h" @@ -67,6 +70,7 @@ #include <drm/drm_edid.h> #include <drm/drm_vblank.h> #include <drm/drm_audio_component.h> +#include <drm/drm_hdcp.h> #if defined(CONFIG_DRM_AMD_DC_DCN1_0) #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" @@ -143,6 +147,12 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, static void handle_cursor_update(struct drm_plane *plane, struct drm_plane_state *old_plane_state); +static void amdgpu_dm_set_psr_caps(struct dc_link *link); +static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream); +static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream); +static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream); + + /* * dm_vblank_get_counter * @@ -263,6 +273,13 @@ static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state) dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED; } +/** + * dm_pflip_high_irq() - Handle pageflip interrupt + * @interrupt_params: ignored + * + * Handles the pageflip interrupt by notifying all interested parties + * that the pageflip has been completed. + */ static void dm_pflip_high_irq(void *interrupt_params) { struct amdgpu_crtc *amdgpu_crtc; @@ -407,6 +424,13 @@ static void dm_vupdate_high_irq(void *interrupt_params) } } +/** + * dm_crtc_high_irq() - Handles CRTC interrupt + * @interrupt_params: ignored + * + * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK + * event handler. + */ static void dm_crtc_high_irq(void *interrupt_params) { struct common_irq_params *irq_params = interrupt_params; @@ -646,11 +670,18 @@ void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin) static int amdgpu_dm_init(struct amdgpu_device *adev) { struct dc_init_data init_data; +#ifdef CONFIG_DRM_AMD_DC_HDCP + struct dc_callback_init init_params; +#endif + adev->dm.ddev = adev->ddev; adev->dm.adev = adev; /* Zero all the fields */ memset(&init_data, 0, sizeof(init_data)); +#ifdef CONFIG_DRM_AMD_DC_HDCP + memset(&init_params, 0, sizeof(init_params)); +#endif mutex_init(&adev->dm.dc_lock); mutex_init(&adev->dm.audio_lock); @@ -697,6 +728,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK) init_data.flags.multi_mon_pp_mclk_switch = true; + if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK) + init_data.flags.disable_fractional_pwm = true; + init_data.flags.power_down_display_on_boot = true; #ifdef CONFIG_DRM_AMD_DC_DCN2_0 @@ -713,6 +747,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) goto error; } + dc_hardware_init(adev->dm.dc); + adev->dm.freesync_module = mod_freesync_create(adev->dm.dc); if (!adev->dm.freesync_module) { DRM_ERROR( @@ -723,6 +759,18 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) amdgpu_dm_init_color_mod(); +#ifdef CONFIG_DRM_AMD_DC_HDCP + if (adev->asic_type >= CHIP_RAVEN) { + adev->dm.hdcp_workqueue = hdcp_create_workqueue(&adev->psp, &init_params.cp_psp, adev->dm.dc); + + if (!adev->dm.hdcp_workqueue) + DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n"); + else + DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue); + + dc_init_callbacks(adev->dm.dc, &init_params); + } +#endif if (amdgpu_dm_initialize_drm_device(adev)) { DRM_ERROR( "amdgpu: failed to initialize sw for display support.\n"); @@ -764,6 +812,16 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev) amdgpu_dm_destroy_drm_device(&adev->dm); +#ifdef CONFIG_DRM_AMD_DC_HDCP + if (adev->dm.hdcp_workqueue) { + hdcp_destroy(adev->dm.hdcp_workqueue); + adev->dm.hdcp_workqueue = NULL; + } + + if (adev->dm.dc) + dc_deinit_callbacks(adev->dm.dc); +#endif + /* DC Destroy TODO: Replace destroy DAL */ if (adev->dm.dc) dc_destroy(&adev->dm.dc); @@ -897,27 +955,29 @@ static int detect_mst_link_for_all_connectors(struct drm_device *dev) { struct amdgpu_dm_connector *aconnector; struct drm_connector *connector; + struct drm_connector_list_iter iter; int ret = 0; - drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); - - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { aconnector = to_amdgpu_dm_connector(connector); if (aconnector->dc_link->type == dc_connection_mst_branch && aconnector->mst_mgr.aux) { DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n", - aconnector, aconnector->base.base.id); + aconnector, + aconnector->base.base.id); ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true); if (ret < 0) { DRM_ERROR("DM_MST: Failed to start MST\n"); - ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single; - return ret; - } + aconnector->dc_link->type = + dc_connection_single; + break; } + } } + drm_connector_list_iter_end(&iter); - drm_modeset_unlock(&dev->mode_config.connection_mutex); return ret; } @@ -940,6 +1000,11 @@ static int dm_late_init(void *handle) params.backlight_lut_array_size = 16; params.backlight_lut_array = linear_lut; + /* Min backlight level after ABM reduction, Don't allow below 1% + * 0xFFFF x 0.01 = 0x28F + */ + params.min_abm_backlight = 0x28F; + /* todo will enable for navi10 */ if (adev->asic_type <= CHIP_RAVEN) { ret = dmcu_load_iram(dmcu, params); @@ -955,14 +1020,13 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend) { struct amdgpu_dm_connector *aconnector; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct drm_dp_mst_topology_mgr *mgr; int ret; bool need_hotplug = false; - drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); - - list_for_each_entry(connector, &dev->mode_config.connector_list, - head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { aconnector = to_amdgpu_dm_connector(connector); if (aconnector->dc_link->type != dc_connection_mst_branch || aconnector->mst_port) @@ -973,15 +1037,14 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend) if (suspend) { drm_dp_mst_topology_mgr_suspend(mgr); } else { - ret = drm_dp_mst_topology_mgr_resume(mgr); + ret = drm_dp_mst_topology_mgr_resume(mgr, true); if (ret < 0) { drm_dp_mst_topology_mgr_set_mst(mgr, false); need_hotplug = true; } } } - - drm_modeset_unlock(&dev->mode_config.connection_mutex); + drm_connector_list_iter_end(&iter); if (need_hotplug) drm_kms_helper_hotplug_event(dev); @@ -989,7 +1052,7 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend) /** * dm_hw_init() - Initialize DC device - * @handle: The base driver device containing the amdpgu_dm device. + * @handle: The base driver device containing the amdgpu_dm device. * * Initialize the &struct amdgpu_display_manager device. This involves calling * the initializers of each DM component, then populating the struct with them. @@ -1019,7 +1082,7 @@ static int dm_hw_init(void *handle) /** * dm_hw_fini() - Teardown DC device - * @handle: The base driver device containing the amdpgu_dm device. + * @handle: The base driver device containing the amdgpu_dm device. * * Teardown components within &struct amdgpu_display_manager that require * cleanup. This involves cleaning up the DRM device, DC, and any modules that @@ -1163,6 +1226,7 @@ static int dm_resume(void *handle) struct amdgpu_display_manager *dm = &adev->dm; struct amdgpu_dm_connector *aconnector; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct drm_crtc *crtc; struct drm_crtc_state *new_crtc_state; struct dm_crtc_state *dm_new_crtc_state; @@ -1185,17 +1249,18 @@ static int dm_resume(void *handle) /* program HPD filter */ dc_resume(dm->dc); - /* On resume we need to rewrite the MSTM control bits to enamble MST*/ - s3_handle_mst(ddev, false); - /* * early enable HPD Rx IRQ, should be done before set mode as short * pulse interrupts are used for MST */ amdgpu_dm_irq_resume_early(adev); + /* On resume we need to rewrite the MSTM control bits to enable MST*/ + s3_handle_mst(ddev, false); + /* Do detection*/ - list_for_each_entry(connector, &ddev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(ddev, &iter); + drm_for_each_connector_iter(connector, &iter) { aconnector = to_amdgpu_dm_connector(connector); /* @@ -1223,6 +1288,7 @@ static int dm_resume(void *handle) amdgpu_dm_update_connector_after_detect(aconnector); mutex_unlock(&aconnector->hpd_lock); } + drm_connector_list_iter_end(&iter); /* Force mode set in atomic commit */ for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) @@ -1438,6 +1504,11 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector) dc_sink_release(aconnector->dc_sink); aconnector->dc_sink = NULL; aconnector->edid = NULL; +#ifdef CONFIG_DRM_AMD_DC_HDCP + /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */ + if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) + connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; +#endif } mutex_unlock(&dev->mode_config.mutex); @@ -1452,6 +1523,9 @@ static void handle_hpd_irq(void *param) struct drm_connector *connector = &aconnector->base; struct drm_device *dev = connector->dev; enum dc_connection_type new_connection_type = dc_connection_none; +#ifdef CONFIG_DRM_AMD_DC_HDCP + struct amdgpu_device *adev = dev->dev_private; +#endif /* * In case of failure or MST no need to update connector status or notify the OS @@ -1459,6 +1533,10 @@ static void handle_hpd_irq(void *param) */ mutex_lock(&aconnector->hpd_lock); +#ifdef CONFIG_DRM_AMD_DC_HDCP + if (adev->asic_type >= CHIP_RAVEN) + hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index); +#endif if (aconnector->fake_enable) aconnector->fake_enable = false; @@ -1577,6 +1655,12 @@ static void handle_hpd_rx_irq(void *param) struct dc_link *dc_link = aconnector->dc_link; bool is_mst_root_connector = aconnector->mst_mgr.mst_state; enum dc_connection_type new_connection_type = dc_connection_none; +#ifdef CONFIG_DRM_AMD_DC_HDCP + union hpd_irq_data hpd_irq_data; + struct amdgpu_device *adev = dev->dev_private; + + memset(&hpd_irq_data, 0, sizeof(hpd_irq_data)); +#endif /* * TODO:Temporary add mutex to protect hpd interrupt not have a gpio @@ -1586,7 +1670,12 @@ static void handle_hpd_rx_irq(void *param) if (dc_link->type != dc_connection_mst_branch) mutex_lock(&aconnector->hpd_lock); + +#ifdef CONFIG_DRM_AMD_DC_HDCP + if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) && +#else if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) && +#endif !is_mst_root_connector) { /* Downstream Port status changed. */ if (!dc_link_detect_sink(dc_link, &new_connection_type)) @@ -1621,6 +1710,10 @@ static void handle_hpd_rx_irq(void *param) drm_kms_helper_hotplug_event(dev); } } +#ifdef CONFIG_DRM_AMD_DC_HDCP + if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) + hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index); +#endif if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) || (dc_link->type == dc_connection_mst_branch)) dm_handle_hpd_rx_irq(aconnector); @@ -2334,6 +2427,8 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) } else if (dc_link_detect(link, DETECT_REASON_BOOT)) { amdgpu_dm_update_connector_after_detect(aconnector); register_backlight_device(dm, link); + if (amdgpu_dc_feature_mask & DC_PSR_MASK) + amdgpu_dm_set_psr_caps(link); } @@ -3311,8 +3406,12 @@ static void fill_stream_properties_from_drm_display_mode( { struct dc_crtc_timing *timing_out = &stream->timing; const struct drm_display_info *info = &connector->display_info; + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct hdmi_vendor_infoframe hv_frame; + struct hdmi_avi_infoframe avi_frame; - memset(timing_out, 0, sizeof(struct dc_crtc_timing)); + memset(&hv_frame, 0, sizeof(hv_frame)); + memset(&avi_frame, 0, sizeof(avi_frame)); timing_out->h_border_left = 0; timing_out->h_border_right = 0; @@ -3322,6 +3421,9 @@ static void fill_stream_properties_from_drm_display_mode( if (drm_mode_is_420_only(info, mode_in) && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; + else if (drm_mode_is_420_also(info, mode_in) + && aconnector->force_yuv420_output) + timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444) && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444; @@ -3346,6 +3448,13 @@ static void fill_stream_properties_from_drm_display_mode( timing_out->flags.VSYNC_POSITIVE_POLARITY = 1; } + if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { + drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in); + timing_out->vic = avi_frame.video_code; + drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in); + timing_out->hdmi_vic = hv_frame.vic; + } + timing_out->h_addressable = mode_in->crtc_hdisplay; timing_out->h_total = mode_in->crtc_htotal; timing_out->h_sync_width = @@ -3566,6 +3675,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, stream->dm_stream_context = aconnector; + stream->timing.flags.LTE_340MCSC_SCRAMBLE = + drm_connector->display_info.hdmi.scdc.scrambling.low_rates; + list_for_each_entry(preferred_mode, &aconnector->base.modes, head) { /* Search for preferred mode */ if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) { @@ -3621,8 +3733,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, dc_link_get_link_cap(aconnector->dc_link)); if (dsc_caps.is_dsc_supported) - if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc, + if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], &dsc_caps, + aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override, link_bandwidth_kbps, &stream->timing, &stream->timing.dsc_cfg)) @@ -3639,6 +3752,18 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, update_stream_signal(stream, sink); + if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) + mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false); + if (stream->link->psr_feature_enabled) { + struct dc *core_dc = stream->link->ctx->dc; + + if (dc_is_dmcu_initialized(core_dc)) { + struct dmcu *dmcu = core_dc->res_pool->dmcu; + + stream->psr_version = dmcu->dmcu_version.psr_version; + mod_build_vsc_infopacket(stream, &stream->vsc_infopacket); + } + } finish: dc_sink_release(sink); @@ -4114,8 +4239,8 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec result = MODE_OK; else DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n", - mode->vdisplay, mode->hdisplay, + mode->vdisplay, mode->clock, dc_result); @@ -4494,7 +4619,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane, tv.num_shared = 1; list_add(&tv.head, &list); - r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL, true); + r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL); if (r) { dev_err(adev->dev, "fail to reserve bo (%d)\n", r); return r; @@ -5088,6 +5213,10 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, drm_connector_attach_vrr_capable_property( &aconnector->base); +#ifdef CONFIG_DRM_AMD_DC_HDCP + if (adev->asic_type >= CHIP_RAVEN) + drm_connector_attach_content_protection_property(&aconnector->base, false); +#endif } } @@ -5330,6 +5459,53 @@ is_scaling_state_different(const struct dm_connector_state *dm_state, return false; } +#ifdef CONFIG_DRM_AMD_DC_HDCP +static bool is_content_protection_different(struct drm_connector_state *state, + const struct drm_connector_state *old_state, + const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w) +{ + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + + /* CP is being re enabled, ignore this */ + if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED && + state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { + state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED; + return false; + } + + /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */ + if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED && + state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) + state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; + + /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled + * hot-plug, headless s3, dpms + */ + if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON && + aconnector->dc_sink != NULL) + return true; + + if (old_state->content_protection == state->content_protection) + return false; + + if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) + return true; + + return false; +} + +static void update_content_protection(struct drm_connector_state *state, const struct drm_connector *connector, + struct hdcp_workqueue *hdcp_w) +{ + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + + if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) + hdcp_add_display(hdcp_w, aconnector->dc_link->link_index, aconnector); + else if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) + hdcp_remove_display(hdcp_w, aconnector->dc_link->link_index, aconnector->base.index); + +} +#endif static void remove_stream(struct amdgpu_device *adev, struct amdgpu_crtc *acrtc, struct dc_stream_state *stream) @@ -5671,6 +5847,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, uint32_t target_vblank, last_flip_vblank; bool vrr_active = amdgpu_dm_vrr_active(acrtc_state); bool pflip_present = false; + bool swizzle = true; struct { struct dc_surface_update surface_updates[MAX_SURFACES]; struct dc_plane_info plane_infos[MAX_SURFACES]; @@ -5716,6 +5893,9 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, dc_plane = dm_new_plane_state->dc_state; + if (dc_plane && !dc_plane->tiling_info.gfx9.swizzle) + swizzle = false; + bundle->surface_updates[planes_count].surface = dc_plane; if (new_pcrtc_state->color_mgmt_changed) { bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction; @@ -5870,6 +6050,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, /* Update the planes if changed or disable if we don't have any. */ if ((planes_count || acrtc_state->active_planes == 0) && acrtc_state->stream) { + bundle->stream_update.stream = acrtc_state->stream; if (new_pcrtc_state->mode_changed) { bundle->stream_update.src = acrtc_state->stream->src; bundle->stream_update.dst = acrtc_state->stream->dst; @@ -5905,14 +6086,29 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, &acrtc_state->vrr_params.adjust); spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); } - mutex_lock(&dm->dc_lock); + if ((acrtc_state->update_type > UPDATE_TYPE_FAST) && + acrtc_state->stream->link->psr_allow_active) + amdgpu_dm_psr_disable(acrtc_state->stream); + dc_commit_updates_for_stream(dm->dc, bundle->surface_updates, planes_count, acrtc_state->stream, &bundle->stream_update, dc_state); + + if ((acrtc_state->update_type > UPDATE_TYPE_FAST) && + acrtc_state->stream->psr_version && + !acrtc_state->stream->link->psr_feature_enabled) + amdgpu_dm_link_setup_psr(acrtc_state->stream); + else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) && + acrtc_state->stream->link->psr_feature_enabled && + !acrtc_state->stream->link->psr_allow_active && + swizzle) { + amdgpu_dm_psr_enable(acrtc_state->stream); + } + mutex_unlock(&dm->dc_lock); } @@ -6221,10 +6417,13 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) crtc->hwmode = new_crtc_state->mode; } else if (modereset_required(new_crtc_state)) { DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc); - /* i.e. reset mode */ - if (dm_old_crtc_state->stream) + if (dm_old_crtc_state->stream) { + if (dm_old_crtc_state->stream->link->psr_allow_active) + amdgpu_dm_psr_disable(dm_old_crtc_state->stream); + remove_stream(adev, acrtc, dm_old_crtc_state->stream); + } } } /* for_each_crtc_in_state() */ @@ -6254,6 +6453,30 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) acrtc->otg_inst = status->primary_otg_inst; } } +#ifdef CONFIG_DRM_AMD_DC_HDCP + for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { + struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + + new_crtc_state = NULL; + + if (acrtc) + new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); + + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + + if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL && + connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) { + hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index); + new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; + continue; + } + + if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue)) + update_content_protection(new_con_state, connector, adev->dm.hdcp_workqueue); + } +#endif /* Handle connector state changes */ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { @@ -6293,9 +6516,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) if (!scaling_changed && !abm_changed && !hdr_changed) continue; + stream_update.stream = dm_new_crtc_state->stream; if (scaling_changed) { update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode, - dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream); + dm_new_con_state, dm_new_crtc_state->stream); stream_update.src = dm_new_crtc_state->stream->src; stream_update.dst = dm_new_crtc_state->stream->dst; @@ -7164,7 +7388,7 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm, status = dc_stream_get_status_from_state(old_dm_state->context, new_dm_crtc_state->stream); - + stream_update.stream = new_dm_crtc_state->stream; /* * TODO: DC modifies the surface during this call so we need * to lock here - find a way to do this without locking. @@ -7575,3 +7799,92 @@ update: freesync_capable); } +static void amdgpu_dm_set_psr_caps(struct dc_link *link) +{ + uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE]; + + if (!(link->connector_signal & SIGNAL_TYPE_EDP)) + return; + if (link->type == dc_connection_none) + return; + if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT, + dpcd_data, sizeof(dpcd_data))) { + link->psr_feature_enabled = dpcd_data[0] ? true:false; + DRM_INFO("PSR support:%d\n", link->psr_feature_enabled); + } +} + +/* + * amdgpu_dm_link_setup_psr() - configure psr link + * @stream: stream state + * + * Return: true if success + */ +static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream) +{ + struct dc_link *link = NULL; + struct psr_config psr_config = {0}; + struct psr_context psr_context = {0}; + struct dc *dc = NULL; + bool ret = false; + + if (stream == NULL) + return false; + + link = stream->link; + dc = link->ctx->dc; + + psr_config.psr_version = dc->res_pool->dmcu->dmcu_version.psr_version; + + if (psr_config.psr_version > 0) { + psr_config.psr_exit_link_training_required = 0x1; + psr_config.psr_frame_capture_indication_req = 0; + psr_config.psr_rfb_setup_time = 0x37; + psr_config.psr_sdp_transmit_line_num_deadline = 0x20; + psr_config.allow_smu_optimizations = 0x0; + + ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context); + + } + DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_feature_enabled); + + return ret; +} + +/* + * amdgpu_dm_psr_enable() - enable psr f/w + * @stream: stream state + * + * Return: true if success + */ +bool amdgpu_dm_psr_enable(struct dc_stream_state *stream) +{ + struct dc_link *link = stream->link; + struct dc_static_screen_events triggers = {0}; + + DRM_DEBUG_DRIVER("Enabling psr...\n"); + + triggers.cursor_update = true; + triggers.overlay_update = true; + triggers.surface_update = true; + + dc_stream_set_static_screen_events(link->ctx->dc, + &stream, 1, + &triggers); + + return dc_link_set_psr_allow_active(link, true, false); +} + +/* + * amdgpu_dm_psr_disable() - disable psr f/w + * @stream: stream state + * + * Return: true if success + */ +static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream) +{ + + DRM_DEBUG_DRIVER("Disabling psr...\n"); + + return dc_link_set_psr_allow_active(stream->link, false, true); +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index c8c525a2b505..77c5166e6b08 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -108,6 +108,12 @@ struct amdgpu_dm_backlight_caps { * @display_indexes_num: Max number of display streams supported * @irq_handler_list_table_lock: Synchronizes access to IRQ tables * @backlight_dev: Backlight control device + * @backlight_link: Link on which to control backlight + * @backlight_caps: Capabilities of the backlight device + * @freesync_module: Module handling freesync calculations + * @fw_dmcu: Reference to DMCU firmware + * @dmcu_fw_version: Version of the DMCU firmware + * @soc_bounding_box: SOC bounding box values provided by gpu_info FW * @cached_state: Caches device atomic state for suspend/resume * @compressor: Frame buffer compression buffer. See &struct dm_comressor_info */ @@ -128,7 +134,7 @@ struct amdgpu_display_manager { u16 display_indexes_num; /** - * @atomic_obj + * @atomic_obj: * * In combination with &dm_atomic_state it helps manage * global atomic state that doesn't map cleanly into existing @@ -225,6 +231,9 @@ struct amdgpu_display_manager { struct amdgpu_dm_backlight_caps backlight_caps; struct mod_freesync *freesync_module; +#ifdef CONFIG_DRM_AMD_DC_HDCP + struct hdcp_workqueue *hdcp_workqueue; +#endif struct drm_atomic_state *cached_state; @@ -234,6 +243,8 @@ struct amdgpu_display_manager { uint32_t dmcu_fw_version; #ifdef CONFIG_DRM_AMD_DC_DCN2_0 /** + * @soc_bounding_box: + * * gpu_info FW provided soc bounding box struct or 0 if not * available in FW */ @@ -287,6 +298,7 @@ struct amdgpu_dm_connector { uint32_t debugfs_dpcd_address; uint32_t debugfs_dpcd_size; #endif + bool force_yuv420_output; }; #define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c index b43bb7f90e4e..2233d293a707 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c @@ -210,6 +210,8 @@ static int __set_legacy_tf(struct dc_transfer_func *func, res = mod_color_calculate_regamma_params(func, gamma, true, has_rom, NULL); + dc_gamma_release(&gamma); + return res ? 0 : -ENOMEM; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c index a549c7c717dd..eaad9099bc0b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c @@ -122,11 +122,16 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc, } /* Configure dithering */ - if (!dm_need_crc_dither(source)) + if (!dm_need_crc_dither(source)) { dc_stream_set_dither_option(stream_state, DITHER_OPTION_TRUN8); - else + dc_stream_set_dyn_expansion(stream_state->ctx->dc, stream_state, + DYN_EXPANSION_DISABLE); + } else { dc_stream_set_dither_option(stream_state, DITHER_OPTION_DEFAULT); + dc_stream_set_dyn_expansion(stream_state->ctx->dc, stream_state, + DYN_EXPANSION_AUTO); + } unlock: mutex_unlock(&adev->dm.dc_lock); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index f3dfb2887ae0..bdb37e611015 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -942,6 +942,52 @@ static const struct { {"aux_dpcd_data", &dp_dpcd_data_debugfs_fops} }; +/* + * Force YUV420 output if available from the given mode + */ +static int force_yuv420_output_set(void *data, u64 val) +{ + struct amdgpu_dm_connector *connector = data; + + connector->force_yuv420_output = (bool)val; + + return 0; +} + +/* + * Check if YUV420 is forced when available from the given mode + */ +static int force_yuv420_output_get(void *data, u64 *val) +{ + struct amdgpu_dm_connector *connector = data; + + *val = connector->force_yuv420_output; + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(force_yuv420_output_fops, force_yuv420_output_get, + force_yuv420_output_set, "%llu\n"); + +/* + * Read PSR state + */ +static int psr_get(void *data, u64 *val) +{ + struct amdgpu_dm_connector *connector = data; + struct dc_link *link = connector->dc_link; + uint32_t psr_state = 0; + + dc_link_get_psr_state(link, &psr_state); + + *val = psr_state; + + return 0; +} + + +DEFINE_DEBUGFS_ATTRIBUTE(psr_fops, psr_get, NULL, "%llu\n"); + void connector_debugfs_init(struct amdgpu_dm_connector *connector) { int i; @@ -955,6 +1001,12 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector) dp_debugfs_entries[i].fops); } } + if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) + debugfs_create_file_unsafe("psr_state", 0444, dir, connector, &psr_fops); + + debugfs_create_file_unsafe("force_yuv420_output", 0644, dir, connector, + &force_yuv420_output_fops); + } /* diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c new file mode 100644 index 000000000000..77181ddf6c8e --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -0,0 +1,346 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "amdgpu_dm_hdcp.h" +#include "amdgpu.h" +#include "amdgpu_dm.h" +#include "dm_helpers.h" +#include <drm/drm_hdcp.h> + +static bool +lp_write_i2c(void *handle, uint32_t address, const uint8_t *data, uint32_t size) +{ + + struct dc_link *link = handle; + struct i2c_payload i2c_payloads[] = {{true, address, size, (void *)data} }; + struct i2c_command cmd = {i2c_payloads, 1, I2C_COMMAND_ENGINE_HW, link->dc->caps.i2c_speed_in_khz}; + + return dm_helpers_submit_i2c(link->ctx, link, &cmd); +} + +static bool +lp_read_i2c(void *handle, uint32_t address, uint8_t offset, uint8_t *data, uint32_t size) +{ + struct dc_link *link = handle; + + struct i2c_payload i2c_payloads[] = {{true, address, 1, &offset}, {false, address, size, data} }; + struct i2c_command cmd = {i2c_payloads, 2, I2C_COMMAND_ENGINE_HW, link->dc->caps.i2c_speed_in_khz}; + + return dm_helpers_submit_i2c(link->ctx, link, &cmd); +} + +static bool +lp_write_dpcd(void *handle, uint32_t address, const uint8_t *data, uint32_t size) +{ + struct dc_link *link = handle; + + return dm_helpers_dp_write_dpcd(link->ctx, link, address, data, size); +} + +static bool +lp_read_dpcd(void *handle, uint32_t address, uint8_t *data, uint32_t size) +{ + struct dc_link *link = handle; + + return dm_helpers_dp_read_dpcd(link->ctx, link, address, data, size); +} + +static void process_output(struct hdcp_workqueue *hdcp_work) +{ + struct mod_hdcp_output output = hdcp_work->output; + + if (output.callback_stop) + cancel_delayed_work(&hdcp_work->callback_dwork); + + if (output.callback_needed) + schedule_delayed_work(&hdcp_work->callback_dwork, + msecs_to_jiffies(output.callback_delay)); + + if (output.watchdog_timer_stop) + cancel_delayed_work(&hdcp_work->watchdog_timer_dwork); + + if (output.watchdog_timer_needed) + schedule_delayed_work(&hdcp_work->watchdog_timer_dwork, + msecs_to_jiffies(output.watchdog_timer_delay)); + +} + +void hdcp_add_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index, struct amdgpu_dm_connector *aconnector) +{ + struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; + struct mod_hdcp_display *display = &hdcp_work[link_index].display; + struct mod_hdcp_link *link = &hdcp_work[link_index].link; + + mutex_lock(&hdcp_w->mutex); + hdcp_w->aconnector = aconnector; + + mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output); + + schedule_delayed_work(&hdcp_w->property_validate_dwork, msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS)); + + process_output(hdcp_w); + + mutex_unlock(&hdcp_w->mutex); + +} + +void hdcp_remove_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index, unsigned int display_index) +{ + struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; + + mutex_lock(&hdcp_w->mutex); + + mod_hdcp_remove_display(&hdcp_w->hdcp, display_index, &hdcp_w->output); + + cancel_delayed_work(&hdcp_w->property_validate_dwork); + hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + + process_output(hdcp_w); + + mutex_unlock(&hdcp_w->mutex); + +} + +void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index) +{ + struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; + + mutex_lock(&hdcp_w->mutex); + + mod_hdcp_reset_connection(&hdcp_w->hdcp, &hdcp_w->output); + + cancel_delayed_work(&hdcp_w->property_validate_dwork); + hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + + process_output(hdcp_w); + + mutex_unlock(&hdcp_w->mutex); +} + +void hdcp_handle_cpirq(struct hdcp_workqueue *hdcp_work, unsigned int link_index) +{ + struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; + + schedule_work(&hdcp_w->cpirq_work); +} + + + + +static void event_callback(struct work_struct *work) +{ + struct hdcp_workqueue *hdcp_work; + + hdcp_work = container_of(to_delayed_work(work), struct hdcp_workqueue, + callback_dwork); + + mutex_lock(&hdcp_work->mutex); + + cancel_delayed_work(&hdcp_work->watchdog_timer_dwork); + + mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CALLBACK, + &hdcp_work->output); + + process_output(hdcp_work); + + mutex_unlock(&hdcp_work->mutex); + + +} +static void event_property_update(struct work_struct *work) +{ + + struct hdcp_workqueue *hdcp_work = container_of(work, struct hdcp_workqueue, property_update_work); + struct amdgpu_dm_connector *aconnector = hdcp_work->aconnector; + struct drm_device *dev = hdcp_work->aconnector->base.dev; + long ret; + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + mutex_lock(&hdcp_work->mutex); + + + if (aconnector->base.state->commit) { + ret = wait_for_completion_interruptible_timeout(&aconnector->base.state->commit->hw_done, 10 * HZ); + + if (ret == 0) { + DRM_ERROR("HDCP state unknown! Setting it to DESIRED"); + hdcp_work->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + } + } + + if (hdcp_work->encryption_status == MOD_HDCP_ENCRYPTION_STATUS_HDCP1_ON) + drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_ENABLED); + else + drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_DESIRED); + + + mutex_unlock(&hdcp_work->mutex); + drm_modeset_unlock(&dev->mode_config.connection_mutex); +} + +static void event_property_validate(struct work_struct *work) +{ + struct hdcp_workqueue *hdcp_work = + container_of(to_delayed_work(work), struct hdcp_workqueue, property_validate_dwork); + struct mod_hdcp_display_query query; + struct amdgpu_dm_connector *aconnector = hdcp_work->aconnector; + + mutex_lock(&hdcp_work->mutex); + + query.encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + mod_hdcp_query_display(&hdcp_work->hdcp, aconnector->base.index, &query); + + if (query.encryption_status != hdcp_work->encryption_status) { + hdcp_work->encryption_status = query.encryption_status; + schedule_work(&hdcp_work->property_update_work); + } + + schedule_delayed_work(&hdcp_work->property_validate_dwork, msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS)); + + mutex_unlock(&hdcp_work->mutex); +} + +static void event_watchdog_timer(struct work_struct *work) +{ + struct hdcp_workqueue *hdcp_work; + + hdcp_work = container_of(to_delayed_work(work), + struct hdcp_workqueue, + watchdog_timer_dwork); + + mutex_lock(&hdcp_work->mutex); + + mod_hdcp_process_event(&hdcp_work->hdcp, + MOD_HDCP_EVENT_WATCHDOG_TIMEOUT, + &hdcp_work->output); + + process_output(hdcp_work); + + mutex_unlock(&hdcp_work->mutex); + +} + +static void event_cpirq(struct work_struct *work) +{ + struct hdcp_workqueue *hdcp_work; + + hdcp_work = container_of(work, struct hdcp_workqueue, cpirq_work); + + mutex_lock(&hdcp_work->mutex); + + mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CPIRQ, &hdcp_work->output); + + process_output(hdcp_work); + + mutex_unlock(&hdcp_work->mutex); + +} + + +void hdcp_destroy(struct hdcp_workqueue *hdcp_work) +{ + int i = 0; + + for (i = 0; i < hdcp_work->max_link; i++) { + cancel_delayed_work_sync(&hdcp_work[i].callback_dwork); + cancel_delayed_work_sync(&hdcp_work[i].watchdog_timer_dwork); + } + + kfree(hdcp_work); + +} + +static void update_config(void *handle, struct cp_psp_stream_config *config) +{ + struct hdcp_workqueue *hdcp_work = handle; + struct amdgpu_dm_connector *aconnector = config->dm_stream_ctx; + int link_index = aconnector->dc_link->link_index; + struct mod_hdcp_display *display = &hdcp_work[link_index].display; + struct mod_hdcp_link *link = &hdcp_work[link_index].link; + + memset(display, 0, sizeof(*display)); + memset(link, 0, sizeof(*link)); + + display->index = aconnector->base.index; + display->state = MOD_HDCP_DISPLAY_ACTIVE; + + if (aconnector->dc_sink != NULL) + link->mode = mod_hdcp_signal_type_to_operation_mode(aconnector->dc_sink->sink_signal); + + display->controller = CONTROLLER_ID_D0 + config->otg_inst; + display->dig_fe = config->stream_enc_inst; + link->dig_be = config->link_enc_inst; + link->ddc_line = aconnector->dc_link->ddc_hw_inst + 1; + link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw; + link->adjust.hdcp2.disable = 1; + +} + +struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *cp_psp, struct dc *dc) +{ + + int max_caps = dc->caps.max_links; + struct hdcp_workqueue *hdcp_work = kzalloc(max_caps*sizeof(*hdcp_work), GFP_KERNEL); + int i = 0; + + if (hdcp_work == NULL) + goto fail_alloc_context; + + hdcp_work->max_link = max_caps; + + for (i = 0; i < max_caps; i++) { + + mutex_init(&hdcp_work[i].mutex); + + INIT_WORK(&hdcp_work[i].cpirq_work, event_cpirq); + INIT_WORK(&hdcp_work[i].property_update_work, event_property_update); + INIT_DELAYED_WORK(&hdcp_work[i].callback_dwork, event_callback); + INIT_DELAYED_WORK(&hdcp_work[i].watchdog_timer_dwork, event_watchdog_timer); + INIT_DELAYED_WORK(&hdcp_work[i].property_validate_dwork, event_property_validate); + + hdcp_work[i].hdcp.config.psp.handle = psp_context; + hdcp_work[i].hdcp.config.ddc.handle = dc_get_link_at_index(dc, i); + hdcp_work[i].hdcp.config.ddc.funcs.write_i2c = lp_write_i2c; + hdcp_work[i].hdcp.config.ddc.funcs.read_i2c = lp_read_i2c; + hdcp_work[i].hdcp.config.ddc.funcs.write_dpcd = lp_write_dpcd; + hdcp_work[i].hdcp.config.ddc.funcs.read_dpcd = lp_read_dpcd; + } + + cp_psp->funcs.update_stream_config = update_config; + cp_psp->handle = hdcp_work; + + return hdcp_work; + +fail_alloc_context: + kfree(hdcp_work); + + return NULL; + + + +} + + + diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h new file mode 100644 index 000000000000..d3ba505d0696 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h @@ -0,0 +1,66 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef AMDGPU_DM_AMDGPU_DM_HDCP_H_ +#define AMDGPU_DM_AMDGPU_DM_HDCP_H_ + +#include "mod_hdcp.h" +#include "hdcp.h" +#include "dc.h" +#include "dm_cp_psp.h" + +struct mod_hdcp; +struct mod_hdcp_link; +struct mod_hdcp_display; +struct cp_psp; + +struct hdcp_workqueue { + struct work_struct cpirq_work; + struct work_struct property_update_work; + struct delayed_work callback_dwork; + struct delayed_work watchdog_timer_dwork; + struct delayed_work property_validate_dwork; + struct amdgpu_dm_connector *aconnector; + struct mutex mutex; + + struct mod_hdcp hdcp; + struct mod_hdcp_output output; + struct mod_hdcp_display display; + struct mod_hdcp_link link; + + enum mod_hdcp_encryption_status encryption_status; + uint8_t max_link; +}; + +void hdcp_add_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index, + struct amdgpu_dm_connector *aconnector); +void hdcp_remove_display(struct hdcp_workqueue *work, unsigned int link_index, unsigned int display_index); +void hdcp_reset_display(struct hdcp_workqueue *work, unsigned int link_index); +void hdcp_handle_cpirq(struct hdcp_workqueue *work, unsigned int link_index); +void hdcp_destroy(struct hdcp_workqueue *work); + +struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *cp_psp, struct dc *dc); + +#endif /* AMDGPU_DM_AMDGPU_DM_HDCP_H_ */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index ee1dc75f5ddc..11e5784aa62a 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -97,11 +97,10 @@ enum dc_edid_status dm_helpers_parse_edid_caps( (struct edid *) edid->raw_edid); sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads); - if (sad_count <= 0) { - DRM_INFO("SADs count is: %d, don't need to read it\n", - sad_count); + if (sad_count < 0) + DRM_ERROR("Couldn't read SADs: %d\n", sad_count); + if (sad_count <= 0) return result; - } edid_caps->audio_mode_count = sad_count < DC_MAX_AUDIO_DESC_COUNT ? sad_count : DC_MAX_AUDIO_DESC_COUNT; for (i = 0; i < edid_caps->audio_mode_count; ++i) { @@ -282,7 +281,7 @@ void dm_helpers_dp_mst_clear_payload_allocation_table( * Polls for ACT (allocation change trigger) handled and sends * ALLOCATE_PAYLOAD message. */ -bool dm_helpers_dp_mst_poll_for_allocation_change_trigger( +enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger( struct dc_context *ctx, const struct dc_stream_state *stream) { @@ -293,19 +292,19 @@ bool dm_helpers_dp_mst_poll_for_allocation_change_trigger( aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; if (!aconnector || !aconnector->mst_port) - return false; + return ACT_FAILED; mst_mgr = &aconnector->mst_port->mst_mgr; if (!mst_mgr->mst_state) - return false; + return ACT_FAILED; ret = drm_dp_check_act_status(mst_mgr); if (ret) - return false; + return ACT_FAILED; - return true; + return ACT_SUCCESS; } bool dm_helpers_dp_mst_send_payload_allocation( diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c index fa5d503d379c..64445c4cc4c2 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c @@ -732,8 +732,10 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev) { struct drm_device *dev = adev->ddev; struct drm_connector *connector; + struct drm_connector_list_iter iter; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); @@ -751,6 +753,7 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev) true); } } + drm_connector_list_iter_end(&iter); } /** @@ -765,8 +768,10 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev) { struct drm_device *dev = adev->ddev; struct drm_connector *connector; + struct drm_connector_list_iter iter; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); const struct dc_link *dc_link = amdgpu_dm_connector->dc_link; @@ -779,4 +784,5 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev) false); } } + drm_connector_list_iter_end(&iter); } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 5ec14efd4d8c..49cf39711dfc 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -113,6 +113,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, result = -EIO; break; case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY: + case AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE: result = -EBUSY; break; case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT: @@ -123,31 +124,14 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, return result; } -static enum drm_connector_status -dm_dp_mst_detect(struct drm_connector *connector, bool force) -{ - struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); - struct amdgpu_dm_connector *master = aconnector->mst_port; - - enum drm_connector_status status = - drm_dp_mst_detect_port( - connector, - &master->mst_mgr, - aconnector->port); - - return status; -} - static void dm_dp_mst_connector_destroy(struct drm_connector *connector) { struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); struct amdgpu_encoder *amdgpu_encoder = amdgpu_dm_connector->mst_encoder; - if (amdgpu_dm_connector->edid) { - kfree(amdgpu_dm_connector->edid); - amdgpu_dm_connector->edid = NULL; - } + kfree(amdgpu_dm_connector->edid); + amdgpu_dm_connector->edid = NULL; drm_encoder_cleanup(&amdgpu_encoder->base); kfree(amdgpu_encoder); @@ -177,7 +161,6 @@ amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector) } static const struct drm_connector_funcs dm_dp_mst_connector_funcs = { - .detect = dm_dp_mst_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = dm_dp_mst_connector_destroy, .reset = amdgpu_dm_connector_funcs_reset, @@ -245,17 +228,29 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) return ret; } -static struct drm_encoder *dm_mst_best_encoder(struct drm_connector *connector) +static struct drm_encoder * +dm_mst_atomic_best_encoder(struct drm_connector *connector, + struct drm_connector_state *connector_state) { - struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); + return &to_amdgpu_dm_connector(connector)->mst_encoder->base; +} + +static int +dm_dp_mst_detect(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, bool force) +{ + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct amdgpu_dm_connector *master = aconnector->mst_port; - return &amdgpu_dm_connector->mst_encoder->base; + return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr, + aconnector->port); } static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = { .get_modes = dm_dp_mst_get_modes, .mode_valid = amdgpu_dm_connector_mode_valid, - .best_encoder = dm_mst_best_encoder, + .atomic_best_encoder = dm_mst_atomic_best_encoder, + .detect_ctx = dm_dp_mst_detect, }; static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder) @@ -417,6 +412,10 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, drm_dp_aux_register(&aconnector->dm_dp_aux.aux); drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux, &aconnector->base); + + if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_eDP) + return; + aconnector->mst_mgr.cbs = &dm_mst_cbs; drm_dp_mst_topology_mgr_init( &aconnector->mst_mgr, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c index f4cfa0caeba8..55a520a63712 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c @@ -345,7 +345,7 @@ bool dm_pp_get_clock_levels_by_type( /* Error in pplib. Provide default values. */ return true; } - } else if (adev->smu.funcs && adev->smu.funcs->get_clock_by_type) { + } else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type) { if (smu_get_clock_by_type(&adev->smu, dc_to_pp_clock_type(clk_type), &pp_clks)) { @@ -365,7 +365,7 @@ bool dm_pp_get_clock_levels_by_type( validation_clks.memory_max_clock = 80000; validation_clks.level = 0; } - } else if (adev->smu.funcs && adev->smu.funcs->get_max_high_clocks) { + } else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_max_high_clocks) { if (smu_get_max_high_clocks(&adev->smu, &validation_clks)) { DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n"); validation_clks.engine_max_clock = 72000; @@ -506,8 +506,8 @@ bool dm_pp_apply_clock_for_voltage_request( ret = adev->powerplay.pp_funcs->display_clock_voltage_request( adev->powerplay.pp_handle, &pp_clock_request); - else if (adev->smu.funcs && - adev->smu.funcs->display_clock_voltage_request) + else if (adev->smu.ppt_funcs && + adev->smu.ppt_funcs->display_clock_voltage_request) ret = smu_display_clock_voltage_request(&adev->smu, &pp_clock_request); if (ret) @@ -527,7 +527,7 @@ bool dm_pp_get_static_clocks( ret = adev->powerplay.pp_funcs->get_current_clocks( adev->powerplay.pp_handle, &pp_clk_info); - else if (adev->smu.funcs) + else if (adev->smu.ppt_funcs) ret = smu_get_current_clocks(&adev->smu, &pp_clk_info); if (ret) return false; @@ -589,10 +589,9 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp, if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges) pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, &wm_with_clock_ranges); - else if (adev->smu.funcs && - adev->smu.funcs->set_watermarks_for_clock_ranges) + else smu_set_watermarks_for_clock_ranges(&adev->smu, - &wm_with_clock_ranges); + &wm_with_clock_ranges); } void pp_rv_set_pme_wa_enable(struct pp_smu *pp) @@ -604,7 +603,7 @@ void pp_rv_set_pme_wa_enable(struct pp_smu *pp) if (pp_funcs && pp_funcs->notify_smu_enable_pwe) pp_funcs->notify_smu_enable_pwe(pp_handle); - else if (adev->smu.funcs) + else if (adev->smu.ppt_funcs) smu_notify_smu_enable_pwe(&adev->smu); } @@ -665,7 +664,6 @@ enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp, { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; - struct smu_context *smu = &adev->smu; struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges; struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks = wm_with_clock_ranges.wm_dmif_clocks_ranges; @@ -708,15 +706,7 @@ enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp, ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000; } - if (!smu->funcs) - return PP_SMU_RESULT_UNSUPPORTED; - - /* 0: successful or smu.funcs->set_watermarks_for_clock_ranges = NULL; - * 1: fail - */ - if (smu_set_watermarks_for_clock_ranges(&adev->smu, - &wm_with_clock_ranges)) - return PP_SMU_RESULT_UNSUPPORTED; + smu_set_watermarks_for_clock_ranges(&adev->smu, &wm_with_clock_ranges); return PP_SMU_RESULT_OK; } @@ -727,10 +717,10 @@ enum pp_smu_status pp_nv_set_pme_wa_enable(struct pp_smu *pp) struct amdgpu_device *adev = ctx->driver_context; struct smu_context *smu = &adev->smu; - if (!smu->funcs) + if (!smu->ppt_funcs) return PP_SMU_RESULT_UNSUPPORTED; - /* 0: successful or smu.funcs->set_azalia_d3_pme = NULL; 1: fail */ + /* 0: successful or smu.ppt_funcs->set_azalia_d3_pme = NULL; 1: fail */ if (smu_set_azalia_d3_pme(smu)) return PP_SMU_RESULT_FAIL; @@ -743,10 +733,10 @@ enum pp_smu_status pp_nv_set_display_count(struct pp_smu *pp, int count) struct amdgpu_device *adev = ctx->driver_context; struct smu_context *smu = &adev->smu; - if (!smu->funcs) + if (!smu->ppt_funcs) return PP_SMU_RESULT_UNSUPPORTED; - /* 0: successful or smu.funcs->set_display_count = NULL; 1: fail */ + /* 0: successful or smu.ppt_funcs->set_display_count = NULL; 1: fail */ if (smu_set_display_count(smu, count)) return PP_SMU_RESULT_FAIL; @@ -759,10 +749,10 @@ enum pp_smu_status pp_nv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int mhz) struct amdgpu_device *adev = ctx->driver_context; struct smu_context *smu = &adev->smu; - if (!smu->funcs) + if (!smu->ppt_funcs) return PP_SMU_RESULT_UNSUPPORTED; - /* 0: successful or smu.funcs->set_deep_sleep_dcefclk = NULL;1: fail */ + /* 0: successful or smu.ppt_funcs->set_deep_sleep_dcefclk = NULL;1: fail */ if (smu_set_deep_sleep_dcefclk(smu, mhz)) return PP_SMU_RESULT_FAIL; @@ -777,13 +767,13 @@ enum pp_smu_status pp_nv_set_hard_min_dcefclk_by_freq( struct smu_context *smu = &adev->smu; struct pp_display_clock_request clock_req; - if (!smu->funcs) + if (!smu->ppt_funcs) return PP_SMU_RESULT_UNSUPPORTED; clock_req.clock_type = amd_pp_dcef_clock; clock_req.clock_freq_in_khz = mhz * 1000; - /* 0: successful or smu.funcs->display_clock_voltage_request = NULL + /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL * 1: fail */ if (smu_display_clock_voltage_request(smu, &clock_req)) @@ -799,13 +789,13 @@ enum pp_smu_status pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz) struct smu_context *smu = &adev->smu; struct pp_display_clock_request clock_req; - if (!smu->funcs) + if (!smu->ppt_funcs) return PP_SMU_RESULT_UNSUPPORTED; clock_req.clock_type = amd_pp_mem_clock; clock_req.clock_freq_in_khz = mhz * 1000; - /* 0: successful or smu.funcs->display_clock_voltage_request = NULL + /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL * 1: fail */ if (smu_display_clock_voltage_request(smu, &clock_req)) @@ -835,7 +825,7 @@ enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp, struct smu_context *smu = &adev->smu; struct pp_display_clock_request clock_req; - if (!smu->funcs) + if (!smu->ppt_funcs) return PP_SMU_RESULT_UNSUPPORTED; switch (clock_id) { @@ -853,7 +843,7 @@ enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp, } clock_req.clock_freq_in_khz = mhz * 1000; - /* 0: successful or smu.funcs->display_clock_voltage_request = NULL + /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL * 1: fail */ if (smu_display_clock_voltage_request(smu, &clock_req)) @@ -869,13 +859,13 @@ enum pp_smu_status pp_nv_get_maximum_sustainable_clocks( struct amdgpu_device *adev = ctx->driver_context; struct smu_context *smu = &adev->smu; - if (!smu->funcs) + if (!smu->ppt_funcs) return PP_SMU_RESULT_UNSUPPORTED; - if (!smu->funcs->get_max_sustainable_clocks_by_dc) + if (!smu->ppt_funcs->get_max_sustainable_clocks_by_dc) return PP_SMU_RESULT_UNSUPPORTED; - if (!smu->funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks)) + if (!smu_get_max_sustainable_clocks_by_dc(smu, max_clocks)) return PP_SMU_RESULT_OK; return PP_SMU_RESULT_FAIL; @@ -894,13 +884,97 @@ enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp, if (!smu->ppt_funcs->get_uclk_dpm_states) return PP_SMU_RESULT_UNSUPPORTED; - if (!smu->ppt_funcs->get_uclk_dpm_states(smu, + if (!smu_get_uclk_dpm_states(smu, clock_values_in_khz, num_states)) return PP_SMU_RESULT_OK; return PP_SMU_RESULT_FAIL; } +#ifdef CONFIG_DRM_AMD_DC_DCN2_1 +enum pp_smu_status pp_rn_get_dpm_clock_table( + struct pp_smu *pp, struct dpm_clocks *clock_table) +{ + const struct dc_context *ctx = pp->dm; + struct amdgpu_device *adev = ctx->driver_context; + struct smu_context *smu = &adev->smu; + + if (!smu->ppt_funcs) + return PP_SMU_RESULT_UNSUPPORTED; + + if (!smu->ppt_funcs->get_dpm_clock_table) + return PP_SMU_RESULT_UNSUPPORTED; + + if (!smu_get_dpm_clock_table(smu, clock_table)) + return PP_SMU_RESULT_OK; + + return PP_SMU_RESULT_FAIL; +} + +enum pp_smu_status pp_rn_set_wm_ranges(struct pp_smu *pp, + struct pp_smu_wm_range_sets *ranges) +{ + const struct dc_context *ctx = pp->dm; + struct amdgpu_device *adev = ctx->driver_context; + struct smu_context *smu = &adev->smu; + struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges; + struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks = + wm_with_clock_ranges.wm_dmif_clocks_ranges; + struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks = + wm_with_clock_ranges.wm_mcif_clocks_ranges; + int32_t i; + + if (!smu->ppt_funcs) + return PP_SMU_RESULT_UNSUPPORTED; + + wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets; + wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets; + + for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) { + if (ranges->reader_wm_sets[i].wm_inst > 3) + wm_dce_clocks[i].wm_set_id = WM_SET_A; + else + wm_dce_clocks[i].wm_set_id = + ranges->reader_wm_sets[i].wm_inst; + + wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz = + ranges->reader_wm_sets[i].min_drain_clk_mhz; + + wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz = + ranges->reader_wm_sets[i].max_drain_clk_mhz; + + wm_dce_clocks[i].wm_min_mem_clk_in_khz = + ranges->reader_wm_sets[i].min_fill_clk_mhz; + + wm_dce_clocks[i].wm_max_mem_clk_in_khz = + ranges->reader_wm_sets[i].max_fill_clk_mhz; + } + + for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) { + if (ranges->writer_wm_sets[i].wm_inst > 3) + wm_soc_clocks[i].wm_set_id = WM_SET_A; + else + wm_soc_clocks[i].wm_set_id = + ranges->writer_wm_sets[i].wm_inst; + wm_soc_clocks[i].wm_min_socclk_clk_in_khz = + ranges->writer_wm_sets[i].min_fill_clk_mhz; + + wm_soc_clocks[i].wm_max_socclk_clk_in_khz = + ranges->writer_wm_sets[i].max_fill_clk_mhz; + + wm_soc_clocks[i].wm_min_mem_clk_in_khz = + ranges->writer_wm_sets[i].min_drain_clk_mhz; + + wm_soc_clocks[i].wm_max_mem_clk_in_khz = + ranges->writer_wm_sets[i].max_drain_clk_mhz; + } + + smu_set_watermarks_for_clock_ranges(&adev->smu, &wm_with_clock_ranges); + + return PP_SMU_RESULT_OK; +} +#endif + void dm_pp_get_funcs( struct dc_context *ctx, struct pp_smu_funcs *funcs) @@ -945,6 +1019,15 @@ void dm_pp_get_funcs( funcs->nv_funcs.set_pstate_handshake_support = pp_nv_set_pstate_handshake_support; break; #endif + +#ifdef CONFIG_DRM_AMD_DC_DCN2_1 + case DCN_VERSION_2_1: + funcs->ctx.ver = PP_SMU_VER_RN; + funcs->rn_funcs.pp_smu.dm = ctx; + funcs->rn_funcs.set_wm_ranges = pp_rn_set_wm_ranges; + funcs->rn_funcs.get_dpm_clock_table = pp_rn_get_dpm_clock_table; + break; +#endif default: DRM_ERROR("smu version is not supported !\n"); break; diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile index 627982cb15d2..a160512a2f04 100644 --- a/drivers/gpu/drm/amd/display/dc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/Makefile @@ -48,6 +48,10 @@ DC_LIBS += dce110 DC_LIBS += dce100 DC_LIBS += dce80 +ifdef CONFIG_DRM_AMD_DC_HDCP +DC_LIBS += hdcp +endif + AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LIBS))) include $(AMD_DC) diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c index 221e0f56389f..823843cd2613 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c @@ -2543,7 +2543,6 @@ static enum bp_result construct_integrated_info( /* Sort voltage table from low to high*/ if (result == BP_RESULT_OK) { - struct clock_voltage_caps temp = {0, 0}; uint32_t i; uint32_t j; @@ -2553,10 +2552,8 @@ static enum bp_result construct_integrated_info( info->disp_clk_voltage[j].max_supported_clk < info->disp_clk_voltage[j-1].max_supported_clk) { /* swap j and j - 1*/ - temp = info->disp_clk_voltage[j-1]; - info->disp_clk_voltage[j-1] = - info->disp_clk_voltage[j]; - info->disp_clk_voltage[j] = temp; + swap(info->disp_clk_voltage[j - 1], + info->disp_clk_voltage[j]); } } } diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index dff65c0fe82f..7873abea4112 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -1613,8 +1613,6 @@ static enum bp_result construct_integrated_info( struct atom_common_table_header *header; struct atom_data_revision revision; - - struct clock_voltage_caps temp = {0, 0}; uint32_t i; uint32_t j; @@ -1644,10 +1642,8 @@ static enum bp_result construct_integrated_info( info->disp_clk_voltage[j-1].max_supported_clk ) { /* swap j and j - 1*/ - temp = info->disp_clk_voltage[j-1]; - info->disp_clk_voltage[j-1] = - info->disp_clk_voltage[j]; - info->disp_clk_voltage[j] = temp; + swap(info->disp_clk_voltage[j - 1], + info->disp_clk_voltage[j]); } } } diff --git a/drivers/gpu/drm/amd/display/dc/calcs/Makefile b/drivers/gpu/drm/amd/display/dc/calcs/Makefile index 985633c08a26..26c6d735cdc7 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/Makefile +++ b/drivers/gpu/drm/amd/display/dc/calcs/Makefile @@ -24,15 +24,20 @@ # It calculates Bandwidth and Watermarks values for HW programming # -ifneq ($(call cc-option, -mpreferred-stack-boundary=4),) - cc_stack_align := -mpreferred-stack-boundary=4 -else ifneq ($(call cc-option, -mstack-alignment=16),) - cc_stack_align := -mstack-alignment=16 -endif +calcs_ccflags := -mhard-float -msse -calcs_ccflags := -mhard-float -msse $(cc_stack_align) +ifdef CONFIG_CC_IS_GCC +ifeq ($(call cc-ifversion, -lt, 0701, y), y) +IS_OLD_GCC = 1 +endif +endif -ifdef CONFIG_CC_IS_CLANG +ifdef IS_OLD_GCC +# Stack alignment mismatch, proceed with caution. +# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 +# (8B stack alignment). +calcs_ccflags += -mpreferred-stack-boundary=4 +else calcs_ccflags += -msse2 endif diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c index c43797bea413..8828dd9c3783 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c @@ -65,6 +65,31 @@ int clk_mgr_helper_get_active_display_cnt( return display_count; } +void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr) +{ + struct dc_link *edp_link = get_edp_link(dc); + + if (dc->hwss.exit_optimized_pwr_state) + dc->hwss.exit_optimized_pwr_state(dc, dc->current_state); + + if (edp_link) { + clk_mgr->psr_allow_active_cache = edp_link->psr_allow_active; + dc_link_set_psr_allow_active(edp_link, false, false); + } + +} + +void clk_mgr_optimize_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr) +{ + struct dc_link *edp_link = get_edp_link(dc); + + if (edp_link) + dc_link_set_psr_allow_active(edp_link, clk_mgr->psr_allow_active_cache, false); + + if (dc->hwss.optimize_pwr_state) + dc->hwss.optimize_pwr_state(dc, dc->current_state); + +} struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *pp_smu, struct dccg *dccg) { diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c index c5c8c4901eed..26db1c5d4e4d 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c @@ -147,7 +147,7 @@ int dce_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base) /* Calculate the current DFS clock, in kHz.*/ dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR - * clk_mgr->dentist_vco_freq_khz) / target_div; + * clk_mgr->base.dentist_vco_freq_khz) / target_div; return dce_adjust_dp_ref_freq_for_ss(clk_mgr, dp_ref_clk_khz); } @@ -239,7 +239,7 @@ int dce_set_clock( /* Make sure requested clock isn't lower than minimum threshold*/ if (requested_clk_khz > 0) requested_clk_khz = max(requested_clk_khz, - clk_mgr_dce->dentist_vco_freq_khz / 64); + clk_mgr_dce->base.dentist_vco_freq_khz / 64); /* Prepare to program display clock*/ pxl_clk_params.target_pixel_clock_100hz = requested_clk_khz * 10; @@ -276,11 +276,11 @@ static void dce_clock_read_integrated_info(struct clk_mgr_internal *clk_mgr_dce) int i; if (bp->integrated_info) - clk_mgr_dce->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq; - if (clk_mgr_dce->dentist_vco_freq_khz == 0) { - clk_mgr_dce->dentist_vco_freq_khz = bp->fw_info.smu_gpu_pll_output_freq; - if (clk_mgr_dce->dentist_vco_freq_khz == 0) - clk_mgr_dce->dentist_vco_freq_khz = 3600000; + clk_mgr_dce->base.dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq; + if (clk_mgr_dce->base.dentist_vco_freq_khz == 0) { + clk_mgr_dce->base.dentist_vco_freq_khz = bp->fw_info.smu_gpu_pll_output_freq; + if (clk_mgr_dce->base.dentist_vco_freq_khz == 0) + clk_mgr_dce->base.dentist_vco_freq_khz = 3600000; } /*update the maximum display clock for each power state*/ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c index 7c746ef1e32e..a6c46e903ff9 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c @@ -81,7 +81,7 @@ int dce112_set_clock(struct clk_mgr *clk_mgr_base, int requested_clk_khz) /* Make sure requested clock isn't lower than minimum threshold*/ if (requested_clk_khz > 0) requested_clk_khz = max(requested_clk_khz, - clk_mgr_dce->dentist_vco_freq_khz / 62); + clk_mgr_dce->base.dentist_vco_freq_khz / 62); dce_clk_params.target_clock_frequency = requested_clk_khz; dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS; @@ -135,7 +135,7 @@ int dce112_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_clk_khz) /* Make sure requested clock isn't lower than minimum threshold*/ if (requested_clk_khz > 0) requested_clk_khz = max(requested_clk_khz, - clk_mgr->dentist_vco_freq_khz / 62); + clk_mgr->base.dentist_vco_freq_khz / 62); dce_clk_params.target_clock_frequency = requested_clk_khz; dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c index 47f529ce280a..3fab9296918a 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c @@ -139,6 +139,9 @@ static void rv1_update_clocks(struct clk_mgr *clk_mgr_base, ASSERT(clk_mgr->pp_smu); + if (dc->work_arounds.skip_clock_update) + return; + pp_smu = &clk_mgr->pp_smu->rv_funcs; display_count = clk_mgr_helper_get_active_display_cnt(dc, context); @@ -266,11 +269,11 @@ void rv1_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_ clk_mgr->base.dprefclk_khz = 600000; if (bp->integrated_info) - clk_mgr->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq; - if (bp->fw_info_valid && clk_mgr->dentist_vco_freq_khz == 0) { - clk_mgr->dentist_vco_freq_khz = bp->fw_info.smu_gpu_pll_output_freq; - if (clk_mgr->dentist_vco_freq_khz == 0) - clk_mgr->dentist_vco_freq_khz = 3600000; + clk_mgr->base.dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq; + if (bp->fw_info_valid && clk_mgr->base.dentist_vco_freq_khz == 0) { + clk_mgr->base.dentist_vco_freq_khz = bp->fw_info.smu_gpu_pll_output_freq; + if (clk_mgr->base.dentist_vco_freq_khz == 0) + clk_mgr->base.dentist_vco_freq_khz = 3600000; } if (!debug->disable_dfs_bypass && bp->integrated_info) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c index 3e8ac303bd52..25d7b7c6681c 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c @@ -104,84 +104,39 @@ void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr, { int i; + clk_mgr->dccg->ref_dppclk = clk_mgr->base.clks.dppclk_khz; for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) { int dpp_inst, dppclk_khz; - if (!context->res_ctx.pipe_ctx[i].plane_state) - continue; - - dpp_inst = context->res_ctx.pipe_ctx[i].plane_res.dpp->inst; + /* Loop index will match dpp->inst if resource exists, + * and we want to avoid dependency on dpp object + */ + dpp_inst = i; dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz; + clk_mgr->dccg->funcs->update_dpp_dto( - clk_mgr->dccg, dpp_inst, dppclk_khz, false); + clk_mgr->dccg, dpp_inst, dppclk_khz); } } -static void update_global_dpp_clk(struct clk_mgr_internal *clk_mgr, unsigned int khz) +void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr) { int dpp_divider = DENTIST_DIVIDER_RANGE_SCALE_FACTOR - * clk_mgr->dentist_vco_freq_khz / khz; - - uint32_t dppclk_wdivider = dentist_get_did_from_divider(dpp_divider); - - REG_UPDATE(DENTIST_DISPCLK_CNTL, - DENTIST_DPPCLK_WDIVIDER, dppclk_wdivider); - REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_CHG_DONE, 1, 5, 100); -} - -static void update_display_clk(struct clk_mgr_internal *clk_mgr, unsigned int khz) -{ + * clk_mgr->base.dentist_vco_freq_khz / clk_mgr->base.clks.dppclk_khz; int disp_divider = DENTIST_DIVIDER_RANGE_SCALE_FACTOR - * clk_mgr->dentist_vco_freq_khz / khz; + * clk_mgr->base.dentist_vco_freq_khz / clk_mgr->base.clks.dispclk_khz; + uint32_t dppclk_wdivider = dentist_get_did_from_divider(dpp_divider); uint32_t dispclk_wdivider = dentist_get_did_from_divider(disp_divider); REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_wdivider); +// REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 5, 100); + REG_UPDATE(DENTIST_DISPCLK_CNTL, + DENTIST_DPPCLK_WDIVIDER, dppclk_wdivider); + REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_CHG_DONE, 1, 5, 100); } -static void request_voltage_and_program_disp_clk(struct clk_mgr *clk_mgr_base, unsigned int khz) -{ - struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); - struct dc *dc = clk_mgr_base->ctx->dc; - struct pp_smu_funcs_nv *pp_smu = NULL; - bool going_up = clk_mgr->base.clks.dispclk_khz < khz; - - if (dc->res_pool->pp_smu) - pp_smu = &dc->res_pool->pp_smu->nv_funcs; - - clk_mgr->base.clks.dispclk_khz = khz; - - if (going_up && pp_smu && pp_smu->set_voltage_by_freq) - pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_DISPCLK, clk_mgr_base->clks.dispclk_khz / 1000); - - update_display_clk(clk_mgr, khz); - - if (!going_up && pp_smu && pp_smu->set_voltage_by_freq) - pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_DISPCLK, clk_mgr_base->clks.dispclk_khz / 1000); -} - -static void request_voltage_and_program_global_dpp_clk(struct clk_mgr *clk_mgr_base, unsigned int khz) -{ - struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); - struct dc *dc = clk_mgr_base->ctx->dc; - struct pp_smu_funcs_nv *pp_smu = NULL; - bool going_up = clk_mgr->base.clks.dppclk_khz < khz; - - if (dc->res_pool->pp_smu) - pp_smu = &dc->res_pool->pp_smu->nv_funcs; - - clk_mgr->base.clks.dppclk_khz = khz; - clk_mgr->dccg->ref_dppclk = khz; - - if (going_up && pp_smu && pp_smu->set_voltage_by_freq) - pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_PIXELCLK, clk_mgr_base->clks.dppclk_khz / 1000); - - update_global_dpp_clk(clk_mgr, khz); - - if (!going_up && pp_smu && pp_smu->set_voltage_by_freq) - pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_PIXELCLK, clk_mgr_base->clks.dppclk_khz / 1000); -} void dcn2_update_clocks(struct clk_mgr *clk_mgr_base, struct dc_state *context, @@ -192,11 +147,12 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base, struct dc *dc = clk_mgr_base->ctx->dc; struct pp_smu_funcs_nv *pp_smu = NULL; int display_count; + bool update_dppclk = false; bool update_dispclk = false; bool enter_display_off = false; + bool dpp_clock_lowered = false; struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu; bool force_reset = false; - int i; if (dc->work_arounds.skip_clock_update) return; @@ -251,12 +207,10 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base, if (should_update_pstate_support(safe_to_lower, new_clocks->p_state_change_support, clk_mgr_base->clks.p_state_change_support)) { clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support; - clk_mgr_base->clks.p_state_change_support = new_clocks->p_state_change_support; if (pp_smu && pp_smu->set_pstate_handshake_support) pp_smu->set_pstate_handshake_support(&pp_smu->pp_smu, clk_mgr_base->clks.p_state_change_support); } - clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support; if (should_set_clock(safe_to_lower, new_clocks->dramclk_khz, clk_mgr_base->clks.dramclk_khz)) { clk_mgr_base->clks.dramclk_khz = new_clocks->dramclk_khz; @@ -264,50 +218,40 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base, pp_smu->set_hard_min_uclk_by_freq(&pp_smu->pp_smu, clk_mgr_base->clks.dramclk_khz / 1000); } - if (dc->config.forced_clocks == false) { - // First update display clock - if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) - request_voltage_and_program_disp_clk(clk_mgr_base, new_clocks->dispclk_khz); - - // Updating DPP clock requires some more logic - if (!safe_to_lower) { - // For pre-programming, we need to make sure any DPP clock that will go up has to go up + if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) { + if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz) + dpp_clock_lowered = true; + clk_mgr->base.clks.dppclk_khz = new_clocks->dppclk_khz; - // First raise the global reference if needed - if (new_clocks->dppclk_khz > clk_mgr_base->clks.dppclk_khz) - request_voltage_and_program_global_dpp_clk(clk_mgr_base, new_clocks->dppclk_khz); + if (pp_smu && pp_smu->set_voltage_by_freq) + pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_PIXELCLK, clk_mgr_base->clks.dppclk_khz / 1000); - // Then raise any dividers that need raising - for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) { - int dpp_inst, dppclk_khz; + update_dppclk = true; + } - if (!context->res_ctx.pipe_ctx[i].plane_state) - continue; + if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) { + clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; + if (pp_smu && pp_smu->set_voltage_by_freq) + pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_DISPCLK, clk_mgr_base->clks.dispclk_khz / 1000); - dpp_inst = context->res_ctx.pipe_ctx[i].plane_res.dpp->inst; - dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz; + update_dispclk = true; + } - clk_mgr->dccg->funcs->update_dpp_dto(clk_mgr->dccg, dpp_inst, dppclk_khz, true); - } + if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) { + if (dpp_clock_lowered) { + // if clock is being lowered, increase DTO before lowering refclk + dcn20_update_clocks_update_dpp_dto(clk_mgr, context); + dcn20_update_clocks_update_dentist(clk_mgr); } else { - // For post-programming, we can lower ref clk if needed, and unconditionally set all the DTOs - - if (new_clocks->dppclk_khz < clk_mgr_base->clks.dppclk_khz) - request_voltage_and_program_global_dpp_clk(clk_mgr_base, new_clocks->dppclk_khz); - - for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) { - int dpp_inst, dppclk_khz; - - if (!context->res_ctx.pipe_ctx[i].plane_state) - continue; - - dpp_inst = context->res_ctx.pipe_ctx[i].plane_res.dpp->inst; - dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz; - - clk_mgr->dccg->funcs->update_dpp_dto(clk_mgr->dccg, dpp_inst, dppclk_khz, false); - } + // if clock is being raised, increase refclk before lowering DTO + if (update_dppclk || update_dispclk) + dcn20_update_clocks_update_dentist(clk_mgr); + // always update dtos unless clock is lowered and not safe to lower + if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz) + dcn20_update_clocks_update_dpp_dto(clk_mgr, context); } } + if (update_dispclk && dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { /*update dmcu for wait_loop count*/ @@ -320,6 +264,8 @@ void dcn2_update_clocks_fpga(struct clk_mgr *clk_mgr, struct dc_state *context, bool safe_to_lower) { + struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr); + struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; /* Min fclk = 1.2GHz since all the extra scemi logic seems to run off of it */ int fclk_adj = new_clocks->fclk_khz > 1200000 ? new_clocks->fclk_khz : 1200000; @@ -357,14 +303,18 @@ void dcn2_update_clocks_fpga(struct clk_mgr *clk_mgr, clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz; } - /* Both fclk and dppclk ref are run on the same scemi clock so we - * need to keep the same value for both + /* Both fclk and ref_dppclk run on the same scemi clock. + * So take the higher value since the DPP DTO is typically programmed + * such that max dppclk is 1:1 with ref_dppclk. */ if (clk_mgr->clks.fclk_khz > clk_mgr->clks.dppclk_khz) clk_mgr->clks.dppclk_khz = clk_mgr->clks.fclk_khz; if (clk_mgr->clks.dppclk_khz > clk_mgr->clks.fclk_khz) clk_mgr->clks.fclk_khz = clk_mgr->clks.dppclk_khz; + // Both fclk and ref_dppclk run on the same scemi clock. + clk_mgr_int->dccg->ref_dppclk = clk_mgr->clks.fclk_khz; + dm_set_dcn_clocks(clk_mgr->ctx, &clk_mgr->clks); } @@ -409,12 +359,36 @@ void dcn2_get_clock(struct clk_mgr *clk_mgr, } } +static bool dcn2_are_clock_states_equal(struct dc_clocks *a, + struct dc_clocks *b) +{ + if (a->dispclk_khz != b->dispclk_khz) + return false; + else if (a->dppclk_khz != b->dppclk_khz) + return false; + else if (a->dcfclk_khz != b->dcfclk_khz) + return false; + else if (a->socclk_khz != b->socclk_khz) + return false; + else if (a->dcfclk_deep_sleep_khz != b->dcfclk_deep_sleep_khz) + return false; + else if (a->phyclk_khz != b->phyclk_khz) + return false; + else if (a->dramclk_khz != b->dramclk_khz) + return false; + else if (a->p_state_change_support != b->p_state_change_support) + return false; + + return true; +} + static struct clk_mgr_funcs dcn2_funcs = { .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, .update_clocks = dcn2_update_clocks, .init_clocks = dcn2_init_clocks, .enable_pme_wa = dcn2_enable_pme_wa, .get_clock = dcn2_get_clock, + .are_clock_states_equal = dcn2_are_clock_states_equal, }; @@ -442,7 +416,7 @@ void dcn20_clk_mgr_construct( if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { dcn2_funcs.update_clocks = dcn2_update_clocks_fpga; - clk_mgr->dentist_vco_freq_khz = 3850000; + clk_mgr->base.dentist_vco_freq_khz = 3850000; } else { /* DFS Slice 2 should be used for DPREFCLK */ @@ -466,15 +440,15 @@ void dcn20_clk_mgr_construct( pll_req = dc_fixpt_mul_int(pll_req, 100000); /* integer part is now VCO frequency in kHz */ - clk_mgr->dentist_vco_freq_khz = dc_fixpt_floor(pll_req); + clk_mgr->base.dentist_vco_freq_khz = dc_fixpt_floor(pll_req); /* in case we don't get a value from the register, use default */ - if (clk_mgr->dentist_vco_freq_khz == 0) - clk_mgr->dentist_vco_freq_khz = 3850000; + if (clk_mgr->base.dentist_vco_freq_khz == 0) + clk_mgr->base.dentist_vco_freq_khz = 3850000; /* Calculate the DPREFCLK in kHz.*/ clk_mgr->base.dprefclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR - * clk_mgr->dentist_vco_freq_khz) / target_div; + * clk_mgr->base.dentist_vco_freq_khz) / target_div; } //Integrated_info table does not exist on dGPU projects so should not be referenced //anywhere in code for dGPUs. diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h index ac31a9787305..c9fd824f3c23 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h @@ -50,4 +50,5 @@ void dcn2_get_clock(struct clk_mgr *clk_mgr, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg); +void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr); #endif //__DCN20_CLK_MGR_H__ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c index 787f94d815f4..790a2d211bd6 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c @@ -52,6 +52,45 @@ #define REG(reg_name) \ (CLK_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name) + +/* TODO: evaluate how to lower or disable all dcn clocks in screen off case */ +int rn_get_active_display_cnt_wa( + struct dc *dc, + struct dc_state *context) +{ + int i, display_count; + bool hdmi_present = false; + + display_count = 0; + for (i = 0; i < context->stream_count; i++) { + const struct dc_stream_state *stream = context->streams[i]; + + if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) + hdmi_present = true; + } + + for (i = 0; i < dc->link_count; i++) { + const struct dc_link *link = dc->links[i]; + + /* + * Only notify active stream or virtual stream. + * Need to notify virtual stream to work around + * headless case. HPD does not fire when system is in + * S0i2. + */ + /* abusing the fact that the dig and phy are coupled to see if the phy is enabled */ + if (link->connector_signal == SIGNAL_TYPE_VIRTUAL || + link->link_enc->funcs->is_dig_enabled(link->link_enc)) + display_count++; + } + + /* WA for hang on HDMI after display off back back on*/ + if (display_count == 0 && hdmi_present) + display_count = 1; + + return display_count; +} + void rn_update_clocks(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool safe_to_lower) @@ -62,17 +101,36 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base, int display_count; bool update_dppclk = false; bool update_dispclk = false; - bool enter_display_off = false; bool dpp_clock_lowered = false; - struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu; - display_count = clk_mgr_helper_get_active_display_cnt(dc, context); + struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu; - if (display_count == 0) - enter_display_off = true; + if (dc->work_arounds.skip_clock_update) + return; - if (enter_display_off == safe_to_lower) { - rn_vbios_smu_set_display_count(clk_mgr, display_count); + /* + * if it is safe to lower, but we are already in the lower state, we don't have to do anything + * also if safe to lower is false, we just go in the higher state + */ + if (safe_to_lower) { + /* check that we're not already in lower */ + if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) { + + display_count = rn_get_active_display_cnt_wa(dc, context); + /* if we can go lower, go lower */ + if (display_count == 0) { + rn_vbios_smu_set_dcn_low_power_state(clk_mgr, DCN_PWR_STATE_LOW_POWER); + /* update power state */ + clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER; + } + } + } else { + /* check that we're not already in D0 */ + if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_MISSION_MODE) { + rn_vbios_smu_set_dcn_low_power_state(clk_mgr, DCN_PWR_STATE_MISSION_MODE); + /* update power state */ + clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_MISSION_MODE; + } } if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr_base->clks.phyclk_khz)) { @@ -113,7 +171,8 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base, // if clock is being raised, increase refclk before lowering DTO if (update_dppclk || update_dispclk) rn_vbios_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz); - if (update_dppclk) + // always update dtos unless clock is lowered and not safe to lower + if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz) dcn20_update_clocks_update_dpp_dto(clk_mgr, context); } @@ -319,7 +378,7 @@ void rn_get_clk_states(struct clk_mgr *clk_mgr_base, struct clk_states *s) rn_dump_clk_registers(&sb, clk_mgr_base, &log_info); - s->dprefclk_khz = sb.dprefclk; + s->dprefclk_khz = sb.dprefclk * 1000; } void rn_enable_pme_wa(struct clk_mgr *clk_mgr_base) @@ -329,12 +388,96 @@ void rn_enable_pme_wa(struct clk_mgr *clk_mgr_base) rn_vbios_smu_enable_pme_wa(clk_mgr); } +void rn_init_clocks(struct clk_mgr *clk_mgr) +{ + memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); + // Assumption is that boot state always supports pstate + clk_mgr->clks.p_state_change_support = true; + clk_mgr->clks.prev_p_state_change_support = true; + clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN; +} + +void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_range_sets *ranges) +{ + int i, num_valid_sets; + + num_valid_sets = 0; + + for (i = 0; i < WM_SET_COUNT; i++) { + /* skip empty entries, the smu array has no holes*/ + if (!bw_params->wm_table.entries[i].valid) + continue; + + ranges->reader_wm_sets[num_valid_sets].wm_inst = bw_params->wm_table.entries[i].wm_inst; + ranges->reader_wm_sets[num_valid_sets].wm_type = bw_params->wm_table.entries[i].wm_type;; + /* We will not select WM based on dcfclk, so leave it as unconstrained */ + ranges->reader_wm_sets[num_valid_sets].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; + ranges->reader_wm_sets[num_valid_sets].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; + /* fclk wil be used to select WM*/ + + if (ranges->reader_wm_sets[num_valid_sets].wm_type == WM_TYPE_PSTATE_CHG) { + if (i == 0) + ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = 0; + else { + /* add 1 to make it non-overlapping with next lvl */ + ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = bw_params->clk_table.entries[i - 1].fclk_mhz + 1; + } + ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = bw_params->clk_table.entries[i].fclk_mhz; + + } else { + /* unconstrained for memory retraining */ + ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; + ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; + + /* Modify previous watermark range to cover up to max */ + ranges->reader_wm_sets[num_valid_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; + } + num_valid_sets++; + } + + ASSERT(num_valid_sets != 0); /* Must have at least one set of valid watermarks */ + ranges->num_reader_wm_sets = num_valid_sets; + + /* modify the min and max to make sure we cover the whole range*/ + ranges->reader_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; + ranges->reader_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; + ranges->reader_wm_sets[ranges->num_reader_wm_sets - 1].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; + ranges->reader_wm_sets[ranges->num_reader_wm_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; + + /* This is for writeback only, does not matter currently as no writeback support*/ + ranges->num_writer_wm_sets = 1; + ranges->writer_wm_sets[0].wm_inst = WM_A; + ranges->writer_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; + ranges->writer_wm_sets[0].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; + ranges->writer_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; + ranges->writer_wm_sets[0].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; + +} + +static void rn_notify_wm_ranges(struct clk_mgr *clk_mgr_base) +{ + struct dc_debug_options *debug = &clk_mgr_base->ctx->dc->debug; + struct pp_smu_wm_range_sets ranges = {0}; + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + struct pp_smu_funcs *pp_smu = clk_mgr->pp_smu; + + if (!debug->disable_pplib_wm_range) { + build_watermark_ranges(clk_mgr_base->bw_params, &ranges); + + /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */ + if (pp_smu && pp_smu->rn_funcs.set_wm_ranges) + pp_smu->rn_funcs.set_wm_ranges(&pp_smu->rn_funcs.pp_smu, &ranges); + } + +} + static struct clk_mgr_funcs dcn21_funcs = { .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, .update_clocks = rn_update_clocks, - .init_clocks = dcn2_init_clocks, + .init_clocks = rn_init_clocks, .enable_pme_wa = rn_enable_pme_wa, - /* .dump_clk_registers = rn_dump_clk_registers */ + /* .dump_clk_registers = rn_dump_clk_registers, */ + .notify_wm_ranges = rn_notify_wm_ranges }; struct clk_bw_params rn_bw_params = { @@ -405,80 +548,50 @@ struct clk_bw_params rn_bw_params = { } }; -void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_range_sets *ranges) +static unsigned int find_dcfclk_for_voltage(struct dpm_clocks *clock_table, unsigned int voltage) { - int i, num_valid_sets; - - num_valid_sets = 0; - - for (i = 0; i < WM_SET_COUNT; i++) { - /* skip empty entries, the smu array has no holes*/ - if (!bw_params->wm_table.entries[i].valid) - continue; - - ranges->reader_wm_sets[num_valid_sets].wm_inst = bw_params->wm_table.entries[i].wm_inst; - ranges->reader_wm_sets[num_valid_sets].wm_type = bw_params->wm_table.entries[i].wm_type;; - /* We will not select WM based on dcfclk, so leave it as unconstrained */ - ranges->reader_wm_sets[num_valid_sets].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; - ranges->reader_wm_sets[num_valid_sets].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; - /* fclk wil be used to select WM*/ - - if (ranges->reader_wm_sets[num_valid_sets].wm_type == WM_TYPE_PSTATE_CHG) { - if (i == 0) - ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = 0; - else { - /* add 1 to make it non-overlapping with next lvl */ - ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = bw_params->clk_table.entries[i - 1].fclk_mhz + 1; - } - ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = bw_params->clk_table.entries[i].fclk_mhz; - - } else { - /* unconstrained for memory retraining */ - ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; - ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; + int i; - /* Modify previous watermark range to cover up to max */ - ranges->reader_wm_sets[num_valid_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; - } - num_valid_sets++; + for (i = 0; i < PP_SMU_NUM_DCFCLK_DPM_LEVELS; i++) { + if (clock_table->DcfClocks[i].Vol == voltage) + return clock_table->DcfClocks[i].Freq; } - ASSERT(num_valid_sets != 0); /* Must have at least one set of valid watermarks */ - ranges->num_reader_wm_sets = num_valid_sets; - - /* modify the min and max to make sure we cover the whole range*/ - ranges->reader_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; - ranges->reader_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; - ranges->reader_wm_sets[ranges->num_reader_wm_sets - 1].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; - ranges->reader_wm_sets[ranges->num_reader_wm_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; - - /* This is for writeback only, does not matter currently as no writeback support*/ - ranges->num_writer_wm_sets = 1; - ranges->writer_wm_sets[0].wm_inst = WM_A; - ranges->writer_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; - ranges->writer_wm_sets[0].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; - ranges->writer_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; - ranges->writer_wm_sets[0].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; - + ASSERT(0); + return 0; } -void clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params, struct dpm_clocks *clock_table, struct hw_asic_id *asic_id) +static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params, struct dpm_clocks *clock_table, struct hw_asic_id *asic_id) { - int i; + int i, j = 0; + + j = -1; ASSERT(PP_SMU_NUM_FCLK_DPM_LEVELS <= MAX_NUM_DPM_LVL); - for (i = 0; i < PP_SMU_NUM_FCLK_DPM_LEVELS; i++) { - if (clock_table->FClocks[i].Freq == 0) + /* Find lowest DPM, FCLK is filled in reverse order*/ + + for (i = PP_SMU_NUM_FCLK_DPM_LEVELS - 1; i >= 0; i--) { + if (clock_table->FClocks[i].Freq != 0) { + j = i; break; + } + } + + if (j == -1) { + /* clock table is all 0s, just use our own hardcode */ + ASSERT(0); + return; + } + + bw_params->clk_table.num_entries = j + 1; - bw_params->clk_table.entries[i].dcfclk_mhz = clock_table->DcfClocks[i].Freq; - bw_params->clk_table.entries[i].fclk_mhz = clock_table->FClocks[i].Freq; - bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemClocks[i].Freq; - bw_params->clk_table.entries[i].socclk_mhz = clock_table->SocClocks[i].Freq; - bw_params->clk_table.entries[i].voltage = clock_table->FClocks[i].Vol; + for (i = 0; i < bw_params->clk_table.num_entries; i++, j--) { + bw_params->clk_table.entries[i].fclk_mhz = clock_table->FClocks[j].Freq; + bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemClocks[j].Freq; + bw_params->clk_table.entries[i].voltage = clock_table->FClocks[j].Vol; + bw_params->clk_table.entries[i].dcfclk_mhz = find_dcfclk_for_voltage(clock_table, clock_table->FClocks[j].Vol); } - bw_params->clk_table.num_entries = i; bw_params->vram_type = asic_id->vram_type; bw_params->num_channels = asic_id->vram_width / DDR4_DRAM_WIDTH; @@ -486,7 +599,7 @@ void clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params, struct d for (i = 0; i < WM_SET_COUNT; i++) { bw_params->wm_table.entries[i].wm_inst = i; - if (clock_table->FClocks[i].Freq == 0) { + if (i >= bw_params->clk_table.num_entries) { bw_params->wm_table.entries[i].valid = false; continue; } @@ -534,57 +647,42 @@ void rn_clk_mgr_construct( if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { dcn21_funcs.update_clocks = dcn2_update_clocks_fpga; - clk_mgr->dentist_vco_freq_khz = 3600000; + clk_mgr->base.dentist_vco_freq_khz = 3600000; clk_mgr->base.dprefclk_khz = 600000; } else { struct clk_log_info log_info = {0}; /* TODO: Check we get what we expect during bringup */ - clk_mgr->dentist_vco_freq_khz = get_vco_frequency_from_reg(clk_mgr); + clk_mgr->base.dentist_vco_freq_khz = get_vco_frequency_from_reg(clk_mgr); /* in case we don't get a value from the register, use default */ - if (clk_mgr->dentist_vco_freq_khz == 0) - clk_mgr->dentist_vco_freq_khz = 3600000; + if (clk_mgr->base.dentist_vco_freq_khz == 0) + clk_mgr->base.dentist_vco_freq_khz = 3600000; rn_dump_clk_registers(&s, &clk_mgr->base, &log_info); - clk_mgr->base.dprefclk_khz = s.dprefclk; - - if (clk_mgr->base.dprefclk_khz != 600000) { - clk_mgr->base.dprefclk_khz = 600000; - ASSERT(1); //TODO: Renoir follow up. - } + /* Convert dprefclk units from MHz to KHz */ + /* Value already divided by 10, some resolution lost */ + clk_mgr->base.dprefclk_khz = s.dprefclk * 1000; /* in case we don't get a value from the register, use default */ - if (clk_mgr->base.dprefclk_khz == 0) + if (clk_mgr->base.dprefclk_khz == 0) { + ASSERT(clk_mgr->base.dprefclk_khz == 600000); clk_mgr->base.dprefclk_khz = 600000; + } } dce_clock_read_ss_info(clk_mgr); clk_mgr->base.bw_params = &rn_bw_params; - if (pp_smu) { + if (pp_smu && pp_smu->rn_funcs.get_dpm_clock_table) { pp_smu->rn_funcs.get_dpm_clock_table(&pp_smu->rn_funcs.pp_smu, &clock_table); - clk_mgr_helper_populate_bw_params(clk_mgr->base.bw_params, &clock_table, &ctx->asic_id); + rn_clk_mgr_helper_populate_bw_params(clk_mgr->base.bw_params, &clock_table, &ctx->asic_id); } - /* - * Notify SMU which set of WM should be selected for different ranges of fclk - * On Renoir there is a maximumum of 4 DF pstates supported, could be less - * depending on DDR speed and fused maximum fclk. - */ - if (!debug->disable_pplib_wm_range) { - struct pp_smu_wm_range_sets ranges = {0}; - - build_watermark_ranges(clk_mgr->base.bw_params, &ranges); - - /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */ - if (pp_smu && pp_smu->rn_funcs.set_wm_ranges) - pp_smu->rn_funcs.set_wm_ranges(&pp_smu->rn_funcs.pp_smu, &ranges); + if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment) && clk_mgr->smu_ver >= 0x00371500) { + /* enable powerfeatures when displaycount goes to 0 */ + rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(clk_mgr, !debug->disable_48mhz_pwrdwn); } - - /* enable powerfeatures when displaycount goes to 0 */ - if (!debug->disable_48mhz_pwrdwn) - rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(clk_mgr); } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h index aadec06fde10..e4322fa5475b 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h @@ -26,11 +26,13 @@ #ifndef __RN_CLK_MGR_H__ #define __RN_CLK_MGR_H__ +#include "clk_mgr.h" +#include "dm_pp_smu.h" + struct rn_clk_registers { uint32_t CLK1_CLK0_CURRENT_CNT; /* DPREFCLK */ }; - void rn_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_mgr, struct pp_smu_funcs *pp_smu, diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c index 50984c1811bb..cb7c0e8b7e1b 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c @@ -33,7 +33,7 @@ #include "mp/mp_12_0_0_sh_mask.h" #define REG(reg_name) \ - (MP1_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name) + (MP0_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name) #define FN(reg_name, field) \ FD(reg_name##__##field) @@ -84,16 +84,12 @@ int rn_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dis int actual_dispclk_set_mhz = -1; struct dc *core_dc = clk_mgr->base.ctx->dc; struct dmcu *dmcu = core_dc->res_pool->dmcu; - uint32_t clk = requested_dispclk_khz / 1000; - - if (clk <= 100) - clk = 101; /* Unit of SMU msg parameter is Mhz */ actual_dispclk_set_mhz = rn_vbios_smu_send_msg_with_param( clk_mgr, VBIOSSMC_MSG_SetDispclkFreq, - clk); + requested_dispclk_khz / 1000); if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { @@ -124,7 +120,7 @@ int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int reque { int actual_dcfclk_set_mhz = -1; - if (clk_mgr->smu_ver < 0xFFFFFFFF) + if (clk_mgr->smu_ver < 0x370c00) return actual_dcfclk_set_mhz; actual_dcfclk_set_mhz = rn_vbios_smu_send_msg_with_param( @@ -139,7 +135,7 @@ int rn_vbios_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int { int actual_min_ds_dcfclk_mhz = -1; - if (clk_mgr->smu_ver < 0xFFFFFFFF) + if (clk_mgr->smu_ver < 0x370c00) return actual_min_ds_dcfclk_mhz; actual_min_ds_dcfclk_mhz = rn_vbios_smu_send_msg_with_param( @@ -162,33 +158,35 @@ int rn_vbios_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_ { int actual_dppclk_set_mhz = -1; - uint32_t clk = requested_dpp_khz / 1000; - - if (clk <= 100) - clk = 101; - actual_dppclk_set_mhz = rn_vbios_smu_send_msg_with_param( clk_mgr, VBIOSSMC_MSG_SetDppclkFreq, - clk); + requested_dpp_khz / 1000); return actual_dppclk_set_mhz * 1000; } -void rn_vbios_smu_set_display_count(struct clk_mgr_internal *clk_mgr, int display_count) +void rn_vbios_smu_set_dcn_low_power_state(struct clk_mgr_internal *clk_mgr, enum dcn_pwr_state state) { + int disp_count; + + if (state == DCN_PWR_STATE_LOW_POWER) + disp_count = 0; + else + disp_count = 1; + rn_vbios_smu_send_msg_with_param( - clk_mgr, - VBIOSSMC_MSG_SetDisplayCount, - display_count); + clk_mgr, + VBIOSSMC_MSG_SetDisplayCount, + disp_count); } -void rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr) +void rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable) { rn_vbios_smu_send_msg_with_param( clk_mgr, VBIOSSMC_MSG_EnableTmdp48MHzRefclkPwrDown, - 0); + enable); } void rn_vbios_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h index da3a49487c6d..ccc01879c9d4 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h @@ -33,8 +33,8 @@ int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int reque int rn_vbios_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_min_ds_dcfclk_khz); void rn_vbios_smu_set_phyclk(struct clk_mgr_internal *clk_mgr, int requested_phyclk_khz); int rn_vbios_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_khz); -void rn_vbios_smu_set_display_count(struct clk_mgr_internal *clk_mgr, int display_count); -void rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr); +void rn_vbios_smu_set_dcn_low_power_state(struct clk_mgr_internal *clk_mgr, int display_count); +void rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable); void rn_vbios_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr); #endif /* DAL_DC_DCN10_RV1_CLK_MGR_VBIOS_SMU_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 5d1adeda4d90..32f31bf91915 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -194,7 +194,7 @@ static bool create_links( } } - if (!should_destory_link) { + if (dc->config.force_enum_edp || !should_destory_link) { dc->links[dc->link_count] = link; link->dc = dc; ++dc->link_count; @@ -411,6 +411,27 @@ bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream, return false; } +void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream, + enum dc_dynamic_expansion option) +{ + /* OPP FMT dyn expansion updates*/ + int i = 0; + struct pipe_ctx *pipe_ctx; + + for (i = 0; i < MAX_PIPES; i++) { + if (dc->current_state->res_ctx.pipe_ctx[i].stream + == stream) { + pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; + pipe_ctx->stream_res.opp->dyn_expansion = option; + pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion( + pipe_ctx->stream_res.opp, + COLOR_SPACE_YCBCR601, + stream->timing.display_color_depth, + stream->signal); + } + } +} + void dc_stream_set_dither_option(struct dc_stream_state *stream, enum dc_dither_option option) { @@ -580,6 +601,10 @@ static bool construct(struct dc *dc, #ifdef CONFIG_DRM_AMD_DC_DCN2_0 // Allocate memory for the vm_helper dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL); + if (!dc->vm_helper) { + dm_error("%s: failed to create dc->vm_helper\n", __func__); + goto fail; + } #endif memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides)); @@ -761,8 +786,13 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context) #if defined(CONFIG_DRM_AMD_DC_DCN2_0) disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context); #endif - dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context); + if (dc->hwss.apply_ctx_for_surface) + dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context); } +#if defined(CONFIG_DRM_AMD_DC_DCN2_0) + if (dc->hwss.program_front_end_for_ctx) + dc->hwss.program_front_end_for_ctx(dc, dangling_context); +#endif } current_ctx = dc->current_state; @@ -785,9 +815,6 @@ struct dc *dc_create(const struct dc_init_data *init_params) if (false == construct(dc, init_params)) goto construct_fail; - /*TODO: separate HW and SW initialization*/ - dc->hwss.init_hw(dc); - full_pipe_count = dc->res_pool->pipe_count; if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE) full_pipe_count--; @@ -820,9 +847,24 @@ alloc_fail: return NULL; } +void dc_hardware_init(struct dc *dc) +{ + dc->hwss.init_hw(dc); +} + void dc_init_callbacks(struct dc *dc, const struct dc_callback_init *init_params) { +#ifdef CONFIG_DRM_AMD_DC_HDCP + dc->ctx->cp_psp = init_params->cp_psp; +#endif +} + +void dc_deinit_callbacks(struct dc *dc) +{ +#ifdef CONFIG_DRM_AMD_DC_HDCP + memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp)); +#endif } void dc_destroy(struct dc **dc) @@ -901,15 +943,11 @@ static void program_timing_sync( /* set first pipe with plane as master */ for (j = 0; j < group_size; j++) { - struct pipe_ctx *temp; - if (pipe_set[j]->plane_state) { if (j == 0) break; - temp = pipe_set[0]; - pipe_set[0] = pipe_set[j]; - pipe_set[j] = temp; + swap(pipe_set[0], pipe_set[j]); break; } } @@ -966,40 +1004,87 @@ bool dc_validate_seamless_boot_timing(const struct dc *dc, struct dc_crtc_timing *crtc_timing) { struct timing_generator *tg; + struct stream_encoder *se = NULL; + + struct dc_crtc_timing hw_crtc_timing = {0}; + struct dc_link *link = sink->link; - unsigned int enc_inst, tg_inst; + unsigned int i, enc_inst, tg_inst = 0; + + // Seamless port only support single DP and EDP so far + if (sink->sink_signal != SIGNAL_TYPE_DISPLAY_PORT && + sink->sink_signal != SIGNAL_TYPE_EDP) + return false; /* Check for enabled DIG to identify enabled display */ if (!link->link_enc->funcs->is_dig_enabled(link->link_enc)) return false; - /* Check for which front end is used by this encoder. - * Note the inst is 1 indexed, where 0 is undefined. - * Note that DIG_FE can source from different OTG but our - * current implementation always map 1-to-1, so this code makes - * the same assumption and doesn't check OTG source. - */ enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc); - /* Instance should be within the range of the pool */ - if (enc_inst >= dc->res_pool->pipe_count) + if (enc_inst == ENGINE_ID_UNKNOWN) return false; - if (enc_inst >= dc->res_pool->stream_enc_count) - return false; + for (i = 0; i < dc->res_pool->stream_enc_count; i++) { + if (dc->res_pool->stream_enc[i]->id == enc_inst) { + + se = dc->res_pool->stream_enc[i]; + + tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg( + dc->res_pool->stream_enc[i]); + break; + } + } - tg_inst = dc->res_pool->stream_enc[enc_inst]->funcs->dig_source_otg( - dc->res_pool->stream_enc[enc_inst]); + // tg_inst not found + if (i == dc->res_pool->stream_enc_count) + return false; if (tg_inst >= dc->res_pool->timing_generator_count) return false; tg = dc->res_pool->timing_generators[tg_inst]; - if (!tg->funcs->is_matching_timing) + if (!tg->funcs->get_hw_timing) + return false; + + if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing)) + return false; + + if (crtc_timing->h_total != hw_crtc_timing.h_total) + return false; + + if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left) + return false; + + if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable) + return false; + + if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right) + return false; + + if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch) + return false; + + if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width) + return false; + + if (crtc_timing->v_total != hw_crtc_timing.v_total) + return false; + + if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top) return false; - if (!tg->funcs->is_matching_timing(tg, crtc_timing)) + if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable) + return false; + + if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom) + return false; + + if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch) + return false; + + if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width) return false; if (dc_is_dp_signal(link->connector_signal)) { @@ -1012,6 +1097,20 @@ bool dc_validate_seamless_boot_timing(const struct dc *dc, if (crtc_timing->pix_clk_100hz != pix_clk_100hz) return false; + if (!se->funcs->dp_get_pixel_format) + return false; + + if (!se->funcs->dp_get_pixel_format( + se, + &hw_crtc_timing.pixel_encoding, + &hw_crtc_timing.display_color_depth)) + return false; + + if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth) + return false; + + if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding) + return false; } return true; @@ -1073,15 +1172,20 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c /* re-program planes for existing stream, in case we need to * free up plane resource for later use */ - for (i = 0; i < context->stream_count; i++) { - if (context->streams[i]->mode_changed) - continue; + if (dc->hwss.apply_ctx_for_surface) + for (i = 0; i < context->stream_count; i++) { + if (context->streams[i]->mode_changed) + continue; - dc->hwss.apply_ctx_for_surface( - dc, context->streams[i], - context->stream_status[i].plane_count, - context); /* use new pipe config in new context */ - } + dc->hwss.apply_ctx_for_surface( + dc, context->streams[i], + context->stream_status[i].plane_count, + context); /* use new pipe config in new context */ + } +#if defined(CONFIG_DRM_AMD_DC_DCN2_0) + if (dc->hwss.program_front_end_for_ctx) + dc->hwss.program_front_end_for_ctx(dc, context); +#endif /* Program hardware */ for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -1100,16 +1204,21 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c } /* Program all planes within new context*/ +#if defined(CONFIG_DRM_AMD_DC_DCN2_0) + if (dc->hwss.program_front_end_for_ctx) + dc->hwss.program_front_end_for_ctx(dc, context); +#endif for (i = 0; i < context->stream_count; i++) { const struct dc_link *link = context->streams[i]->link; if (!context->streams[i]->mode_changed) continue; - dc->hwss.apply_ctx_for_surface( - dc, context->streams[i], - context->stream_status[i].plane_count, - context); + if (dc->hwss.apply_ctx_for_surface) + dc->hwss.apply_ctx_for_surface( + dc, context->streams[i], + context->stream_status[i].plane_count, + context); /* * enable stereo @@ -1136,15 +1245,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c dc_enable_stereo(dc, context, dc_streams, context->stream_count); - if (!dc->optimize_seamless_boot) - /* pplib is notified if disp_num changed */ - dc->hwss.optimize_bandwidth(dc, context); - for (i = 0; i < context->stream_count; i++) context->streams[i]->mode_changed = false; - memset(&context->commit_hints, 0, sizeof(context->commit_hints)); - dc_release_state(dc->current_state); dc->current_state = context; @@ -1492,20 +1595,15 @@ static enum surface_update_type det_surface_update(const struct dc *dc, enum surface_update_type overall_type = UPDATE_TYPE_FAST; union surface_update_flags *update_flags = &u->surface->update_flags; - update_flags->raw = 0; // Reset all flags - if (u->flip_addr) update_flags->bits.addr_update = 1; - if (!is_surface_in_context(context, u->surface)) { - update_flags->bits.new_plane = 1; + if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) { + update_flags->raw = 0xFFFFFFFF; return UPDATE_TYPE_FULL; } - if (u->surface->force_full_update) { - update_flags->bits.full_update = 1; - return UPDATE_TYPE_FULL; - } + update_flags->raw = 0; // Reset all flags type = get_plane_info_update_type(u); elevate_update_type(&overall_type, type); @@ -1563,40 +1661,43 @@ static enum surface_update_type check_update_surfaces_for_stream( enum surface_update_type overall_type = UPDATE_TYPE_FAST; if (stream_status == NULL || stream_status->plane_count != surface_count) - return UPDATE_TYPE_FULL; + overall_type = UPDATE_TYPE_FULL; /* some stream updates require passive update */ if (stream_update) { - if ((stream_update->src.height != 0) && - (stream_update->src.width != 0)) - return UPDATE_TYPE_FULL; + union stream_update_flags *su_flags = &stream_update->stream->update_flags; - if ((stream_update->dst.height != 0) && - (stream_update->dst.width != 0)) - return UPDATE_TYPE_FULL; + if ((stream_update->src.height != 0 && stream_update->src.width != 0) || + (stream_update->dst.height != 0 && stream_update->dst.width != 0)) + su_flags->bits.scaling = 1; if (stream_update->out_transfer_func) - return UPDATE_TYPE_FULL; + su_flags->bits.out_tf = 1; if (stream_update->abm_level) - return UPDATE_TYPE_FULL; + su_flags->bits.abm_level = 1; if (stream_update->dpms_off) - return UPDATE_TYPE_FULL; + su_flags->bits.dpms_off = 1; + + if (stream_update->gamut_remap) + su_flags->bits.gamut_remap = 1; #if defined(CONFIG_DRM_AMD_DC_DCN2_0) if (stream_update->wb_update) - return UPDATE_TYPE_FULL; + su_flags->bits.wb_update = 1; #endif + if (su_flags->raw != 0) + overall_type = UPDATE_TYPE_FULL; + + if (stream_update->output_csc_transform || stream_update->output_color_space) + su_flags->bits.out_csc = 1; } for (i = 0 ; i < surface_count; i++) { enum surface_update_type type = det_surface_update(dc, &updates[i]); - if (type == UPDATE_TYPE_FULL) - return type; - elevate_update_type(&overall_type, type); } @@ -1618,16 +1719,29 @@ enum surface_update_type dc_check_update_surfaces_for_stream( int i; enum surface_update_type type; + if (stream_update) + stream_update->stream->update_flags.raw = 0; for (i = 0; i < surface_count; i++) updates[i].surface->update_flags.raw = 0; type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status); - if (type == UPDATE_TYPE_FULL) + if (type == UPDATE_TYPE_FULL) { + if (stream_update) + stream_update->stream->update_flags.raw = 0xFFFFFFFF; for (i = 0; i < surface_count; i++) updates[i].surface->update_flags.raw = 0xFFFFFFFF; + } - if (type == UPDATE_TYPE_FAST && memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) - dc->optimized_required = true; + if (type == UPDATE_TYPE_FAST) { + // If there's an available clock comparator, we use that. + if (dc->clk_mgr->funcs->are_clock_states_equal) { + if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk)) + dc->optimized_required = true; + // Else we fallback to mem compare. + } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) { + dc->optimized_required = true; + } + } return type; } @@ -1868,6 +1982,7 @@ static void commit_planes_do_stream_update(struct dc *dc, struct dc_state *context) { int j; + bool should_program_abm; // Stream updates for (j = 0; j < dc->res_pool->pipe_count; j++) { @@ -1948,14 +2063,21 @@ static void commit_planes_do_stream_update(struct dc *dc, } if (stream_update->abm_level && pipe_ctx->stream_res.abm) { - if (pipe_ctx->stream_res.tg->funcs->is_blanked) { - // if otg funcs defined check if blanked before programming - if (!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) + should_program_abm = true; + + // if otg funcs defined check if blanked before programming + if (pipe_ctx->stream_res.tg->funcs->is_blanked) + if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) + should_program_abm = false; + + if (should_program_abm) { + if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) { + pipe_ctx->stream_res.abm->funcs->set_abm_immediate_disable(pipe_ctx->stream_res.abm); + } else { pipe_ctx->stream_res.abm->funcs->set_abm_level( pipe_ctx->stream_res.abm, stream->abm_level); - } else - pipe_ctx->stream_res.abm->funcs->set_abm_level( - pipe_ctx->stream_res.abm, stream->abm_level); + } + } } } } @@ -2000,7 +2122,13 @@ static void commit_planes_for_stream(struct dc *dc, * In case of turning off screen, no need to program front end a second time. * just return after program blank. */ - dc->hwss.apply_ctx_for_surface(dc, stream, 0, context); + if (dc->hwss.apply_ctx_for_surface) + dc->hwss.apply_ctx_for_surface(dc, stream, 0, context); +#if defined(CONFIG_DRM_AMD_DC_DCN2_0) + if (dc->hwss.program_front_end_for_ctx) + dc->hwss.program_front_end_for_ctx(dc, context); +#endif + return; } @@ -2060,10 +2188,15 @@ static void commit_planes_for_stream(struct dc *dc, stream_status = stream_get_status(context, pipe_ctx->stream); - dc->hwss.apply_ctx_for_surface( + if (dc->hwss.apply_ctx_for_surface) + dc->hwss.apply_ctx_for_surface( dc, pipe_ctx->stream, stream_status->plane_count, context); } } +#if defined(CONFIG_DRM_AMD_DC_DCN2_0) + if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) + dc->hwss.program_front_end_for_ctx(dc, context); +#endif // Update Type FAST, Surface updates if (update_type == UPDATE_TYPE_FAST) { diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index ca20b150afcc..12ba6fdf89b7 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -79,7 +79,6 @@ static void destruct(struct dc_link *link) int i; if (link->hpd_gpio != NULL) { - dal_gpio_close(link->hpd_gpio); dal_gpio_destroy_irq(&link->hpd_gpio); link->hpd_gpio = NULL; } @@ -520,7 +519,7 @@ static void link_disconnect_remap(struct dc_sink *prev_sink, struct dc_link *lin } -static void read_edp_current_link_settings_on_detect(struct dc_link *link) +static void read_current_link_settings_on_detect(struct dc_link *link) { union lane_count_set lane_count_set = { {0} }; uint8_t link_bw_set; @@ -555,17 +554,23 @@ static void read_edp_current_link_settings_on_detect(struct dc_link *link) &link_bw_set, sizeof(link_bw_set)); if (link_bw_set == 0) { - /* If standard link rates are not being used, - * Read DPCD 00115h to find the link rate set used - */ - core_link_read_dpcd(link, DP_LINK_RATE_SET, - &link_rate_set, sizeof(link_rate_set)); - - if (link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) { - link->cur_link_settings.link_rate = - link->dpcd_caps.edp_supported_link_rates[link_rate_set]; - link->cur_link_settings.link_rate_set = link_rate_set; - link->cur_link_settings.use_link_rate_set = true; + if (link->connector_signal == SIGNAL_TYPE_EDP) { + /* If standard link rates are not being used, + * Read DPCD 00115h to find the edp link rate set used + */ + core_link_read_dpcd(link, DP_LINK_RATE_SET, + &link_rate_set, sizeof(link_rate_set)); + + // edp_supported_link_rates_count = 0 for DP + if (link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) { + link->cur_link_settings.link_rate = + link->dpcd_caps.edp_supported_link_rates[link_rate_set]; + link->cur_link_settings.link_rate_set = link_rate_set; + link->cur_link_settings.use_link_rate_set = true; + } + } else { + // Link Rate not found. Seamless boot may not work. + ASSERT(false); } } else { link->cur_link_settings.link_rate = link_bw_set; @@ -680,7 +685,7 @@ static bool is_same_edid(struct dc_edid *old_edid, struct dc_edid *new_edid) return (memcmp(old_edid->raw_edid, new_edid->raw_edid, new_edid->length) == 0); } -bool wait_for_alt_mode(struct dc_link *link) +static bool wait_for_alt_mode(struct dc_link *link) { /** @@ -738,7 +743,8 @@ bool wait_for_alt_mode(struct dc_link *link) * This does not create remote sinks but will trigger DM * to start MST detection if a branch is detected. */ -bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) +static bool dc_link_detect_helper(struct dc_link *link, + enum dc_detect_reason reason) { struct dc_sink_init_data sink_init_data = { 0 }; struct display_sink_capability sink_caps = { 0 }; @@ -753,6 +759,8 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) struct dpcd_caps prev_dpcd_caps; bool same_dpcd = true; enum dc_connection_type new_connection_type = dc_connection_none; + bool perform_dp_seamless_boot = false; + DC_LOGGER_INIT(link->ctx->logger); if (dc_is_virtual_signal(link->connector_signal)) @@ -809,15 +817,15 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) } case SIGNAL_TYPE_EDP: { - read_edp_current_link_settings_on_detect(link); + read_current_link_settings_on_detect(link); detect_edp_sink_caps(link); - sink_caps.transaction_type = - DDC_TRANSACTION_TYPE_I2C_OVER_AUX; + sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX; sink_caps.signal = SIGNAL_TYPE_EDP; break; } case SIGNAL_TYPE_DISPLAY_PORT: { + /* wa HPD high coming too early*/ if (link->link_enc->features.flags.bits.DP_IS_USB_C == 1) { @@ -865,12 +873,24 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) * empty which leads to allocate_mst_payload() has "0" * pbn_per_slot value leading to exception on dc_fixpt_div() */ - link->verified_link_cap = link->reported_link_cap; + dp_verify_mst_link_cap(link); + if (prev_sink != NULL) dc_sink_release(prev_sink); return false; } + // For seamless boot, to skip verify link cap, we read UEFI settings and set them as verified. + if (reason == DETECT_REASON_BOOT && + dc_ctx->dc->config.power_down_display_on_boot == false && + link->link_status.link_active == true) + perform_dp_seamless_boot = true; + + if (perform_dp_seamless_boot) { + read_current_link_settings_on_detect(link); + link->verified_link_cap = link->reported_link_cap; + } + break; } @@ -955,10 +975,11 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) * two link trainings */ - /* deal with non-mst cases */ - dp_verify_link_cap_with_retries(link, - &link->reported_link_cap, - LINK_TRAINING_MAX_VERIFY_RETRY); + // verify link cap for SST non-seamless boot + if (!perform_dp_seamless_boot) + dp_verify_link_cap_with_retries(link, + &link->reported_link_cap, + LINK_TRAINING_MAX_VERIFY_RETRY); } else { // If edid is the same, then discard new sink and revert back to original sink if (same_edid) { @@ -1047,6 +1068,23 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) dc_sink_release(prev_sink); return true; + +} + +bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) +{ + const struct dc *dc = link->dc; + bool ret; + + /* get out of low power state */ + clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr); + + ret = dc_link_detect_helper(link, reason); + + /* Go back to power optimized state */ + clk_mgr_optimize_pwr_state(dc, dc->clk_mgr); + + return ret; } bool dc_link_get_hpd_state(struct dc_link *dc_link) @@ -1492,7 +1530,7 @@ static enum dc_status enable_link_dp( pipe_ctx->stream_res.pix_clk_params.requested_sym_clk = link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ; - if (!apply_seamless_boot_optimization) + if (state->clk_mgr && !apply_seamless_boot_optimization) state->clk_mgr->funcs->update_clocks(state->clk_mgr, state, false); dp_enable_link_phy( @@ -2169,8 +2207,10 @@ static void disable_link(struct dc_link *link, enum signal_type signal) dp_set_fec_ready(link, false); } #endif - } else - link->link_enc->funcs->disable_output(link->link_enc, signal); + } else { + if (signal != SIGNAL_TYPE_VIRTUAL) + link->link_enc->funcs->disable_output(link->link_enc, signal); + } if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { /* MST disable link only when no stream use the link */ @@ -2217,7 +2257,7 @@ static bool dp_active_dongle_validate_timing( break; } - if (dongle_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER || + if (dpcd_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER || dongle_caps->extendedCapValid == false) return true; @@ -2381,17 +2421,206 @@ bool dc_link_set_abm_disable(const struct dc_link *link) return true; } -bool dc_link_set_psr_enable(const struct dc_link *link, bool enable, bool wait) +bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active, bool wait) { struct dc *core_dc = link->ctx->dc; struct dmcu *dmcu = core_dc->res_pool->dmcu; - if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_enabled) - dmcu->funcs->set_psr_enable(dmcu, enable, wait); + + + if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_feature_enabled) + dmcu->funcs->set_psr_enable(dmcu, allow_active, wait); + + link->psr_allow_active = allow_active; return true; } +bool dc_link_get_psr_state(const struct dc_link *link, uint32_t *psr_state) +{ + struct dc *core_dc = link->ctx->dc; + struct dmcu *dmcu = core_dc->res_pool->dmcu; + + if (dmcu != NULL && link->psr_feature_enabled) + dmcu->funcs->get_psr_state(dmcu, psr_state); + + return true; +} + +static inline enum physical_phy_id +transmitter_to_phy_id(enum transmitter transmitter_value) +{ + switch (transmitter_value) { + case TRANSMITTER_UNIPHY_A: + return PHYLD_0; + case TRANSMITTER_UNIPHY_B: + return PHYLD_1; + case TRANSMITTER_UNIPHY_C: + return PHYLD_2; + case TRANSMITTER_UNIPHY_D: + return PHYLD_3; + case TRANSMITTER_UNIPHY_E: + return PHYLD_4; + case TRANSMITTER_UNIPHY_F: + return PHYLD_5; + case TRANSMITTER_NUTMEG_CRT: + return PHYLD_6; + case TRANSMITTER_TRAVIS_CRT: + return PHYLD_7; + case TRANSMITTER_TRAVIS_LCD: + return PHYLD_8; + case TRANSMITTER_UNIPHY_G: + return PHYLD_9; + case TRANSMITTER_COUNT: + return PHYLD_COUNT; + case TRANSMITTER_UNKNOWN: + return PHYLD_UNKNOWN; + default: + WARN_ONCE(1, "Unknown transmitter value %d\n", + transmitter_value); + return PHYLD_UNKNOWN; + } +} + +bool dc_link_setup_psr(struct dc_link *link, + const struct dc_stream_state *stream, struct psr_config *psr_config, + struct psr_context *psr_context) +{ + struct dc *core_dc; + struct dmcu *dmcu; + int i; + /* updateSinkPsrDpcdConfig*/ + union dpcd_psr_configuration psr_configuration; + + psr_context->controllerId = CONTROLLER_ID_UNDEFINED; + + if (!link) + return false; + + core_dc = link->ctx->dc; + dmcu = core_dc->res_pool->dmcu; + + if (!dmcu) + return false; + + + memset(&psr_configuration, 0, sizeof(psr_configuration)); + + psr_configuration.bits.ENABLE = 1; + psr_configuration.bits.CRC_VERIFICATION = 1; + psr_configuration.bits.FRAME_CAPTURE_INDICATION = + psr_config->psr_frame_capture_indication_req; + + /* Check for PSR v2*/ + if (psr_config->psr_version == 0x2) { + /* For PSR v2 selective update. + * Indicates whether sink should start capturing + * immediately following active scan line, + * or starting with the 2nd active scan line. + */ + psr_configuration.bits.LINE_CAPTURE_INDICATION = 0; + /*For PSR v2, determines whether Sink should generate + * IRQ_HPD when CRC mismatch is detected. + */ + psr_configuration.bits.IRQ_HPD_WITH_CRC_ERROR = 1; + } + + dm_helpers_dp_write_dpcd( + link->ctx, + link, + 368, + &psr_configuration.raw, + sizeof(psr_configuration.raw)); + + psr_context->channel = link->ddc->ddc_pin->hw_info.ddc_channel; + psr_context->transmitterId = link->link_enc->transmitter; + psr_context->engineId = link->link_enc->preferred_engine; + + for (i = 0; i < MAX_PIPES; i++) { + if (core_dc->current_state->res_ctx.pipe_ctx[i].stream + == stream) { + /* dmcu -1 for all controller id values, + * therefore +1 here + */ + psr_context->controllerId = + core_dc->current_state->res_ctx. + pipe_ctx[i].stream_res.tg->inst + 1; + break; + } + } + + /* Hardcoded for now. Can be Pcie or Uniphy (or Unknown)*/ + psr_context->phyType = PHY_TYPE_UNIPHY; + /*PhyId is associated with the transmitter id*/ + psr_context->smuPhyId = + transmitter_to_phy_id(link->link_enc->transmitter); + + psr_context->crtcTimingVerticalTotal = stream->timing.v_total; + psr_context->vsyncRateHz = div64_u64(div64_u64((stream-> + timing.pix_clk_100hz * 100), + stream->timing.v_total), + stream->timing.h_total); + + psr_context->psrSupportedDisplayConfig = true; + psr_context->psrExitLinkTrainingRequired = + psr_config->psr_exit_link_training_required; + psr_context->sdpTransmitLineNumDeadline = + psr_config->psr_sdp_transmit_line_num_deadline; + psr_context->psrFrameCaptureIndicationReq = + psr_config->psr_frame_capture_indication_req; + + psr_context->skipPsrWaitForPllLock = 0; /* only = 1 in KV */ + + psr_context->numberOfControllers = + link->dc->res_pool->timing_generator_count; + + psr_context->rfb_update_auto_en = true; + + /* 2 frames before enter PSR. */ + psr_context->timehyst_frames = 2; + /* half a frame + * (units in 100 lines, i.e. a value of 1 represents 100 lines) + */ + psr_context->hyst_lines = stream->timing.v_total / 2 / 100; + psr_context->aux_repeats = 10; + + psr_context->psr_level.u32all = 0; + +#if defined(CONFIG_DRM_AMD_DC_DCN1_0) + /*skip power down the single pipe since it blocks the cstate*/ + if (ASICREV_IS_RAVEN(link->ctx->asic_id.hw_internal_rev)) + psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true; +#endif + + /* SMU will perform additional powerdown sequence. + * For unsupported ASICs, set psr_level flag to skip PSR + * static screen notification to SMU. + * (Always set for DAL2, did not check ASIC) + */ + psr_context->allow_smu_optimizations = psr_config->allow_smu_optimizations; + + /* Complete PSR entry before aborting to prevent intermittent + * freezes on certain eDPs + */ + psr_context->psr_level.bits.DISABLE_PSR_ENTRY_ABORT = 1; + + /* Controls additional delay after remote frame capture before + * continuing power down, default = 0 + */ + psr_context->frame_delay = 0; + + link->psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context); + + /* psr_enabled == 0 indicates setup_psr did not succeed, but this + * should not happen since firmware should be running at this point + */ + if (link->psr_feature_enabled == 0) + ASSERT(0); + + return true; + +} + const struct dc_link_status *dc_link_get_status(const struct dc_link *link) { return &link->link_status; @@ -2510,7 +2739,7 @@ static void update_mst_stream_alloc_table( /* convert link_mst_stream_alloc_table to dm dp_mst_stream_alloc_table * because stream_encoder is not exposed to dm */ -static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx) +enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx) { struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; @@ -2521,6 +2750,7 @@ static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx) struct fixed31_32 pbn; struct fixed31_32 pbn_per_slot; uint8_t i; + enum act_return_status ret; DC_LOGGER_INIT(link->ctx->logger); /* enable_link_dp_mst already check link->enabled_stream_count @@ -2568,14 +2798,16 @@ static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx) &link->mst_stream_alloc_table); /* send down message */ - dm_helpers_dp_mst_poll_for_allocation_change_trigger( + ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger( stream->ctx, stream); - dm_helpers_dp_mst_send_payload_allocation( - stream->ctx, - stream, - true); + if (ret != ACT_LINK_LOST) { + dm_helpers_dp_mst_send_payload_allocation( + stream->ctx, + stream, + true); + } /* slot X.Y for only current stream */ pbn_per_slot = get_pbn_per_slot(stream); @@ -2667,6 +2899,24 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) return DC_OK; } +#if defined(CONFIG_DRM_AMD_DC_HDCP) +static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off) +{ + struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp; + if (cp_psp && cp_psp->funcs.update_stream_config) { + struct cp_psp_stream_config config; + + memset(&config, 0, sizeof(config)); + + config.otg_inst = (uint8_t) pipe_ctx->stream_res.tg->inst; + config.stream_enc_inst = (uint8_t) pipe_ctx->stream_res.stream_enc->id; + config.link_enc_inst = pipe_ctx->stream->link->link_enc_hw_inst; + config.dpms_off = dpms_off; + config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context; + cp_psp->funcs.update_stream_config(cp_psp->handle, &config); + } +} +#endif void core_link_enable_stream( struct dc_state *state, @@ -2677,6 +2927,10 @@ void core_link_enable_stream( enum dc_status status; DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); + if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment) && + dc_is_virtual_signal(pipe_ctx->stream->signal)) + return; + if (!dc_is_virtual_signal(pipe_ctx->stream->signal)) { stream->link->link_enc->funcs->setup( stream->link->link_enc, @@ -2727,6 +2981,9 @@ void core_link_enable_stream( /* Do not touch link on seamless boot optimization. */ if (pipe_ctx->stream->apply_seamless_boot_optimization) { pipe_ctx->stream->dpms_off = false; +#if defined(CONFIG_DRM_AMD_DC_HDCP) + update_psp_stream_config(pipe_ctx, false); +#endif return; } @@ -2734,6 +2991,9 @@ void core_link_enable_stream( if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP && apply_edp_fast_boot_optimization) { pipe_ctx->stream->dpms_off = false; +#if defined(CONFIG_DRM_AMD_DC_HDCP) + update_psp_stream_config(pipe_ctx, false); +#endif return; } @@ -2786,13 +3046,16 @@ void core_link_enable_stream( #endif if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) - allocate_mst_payload(pipe_ctx); + dc_link_allocate_mst_payload(pipe_ctx); core_dc->hwss.unblank_stream(pipe_ctx, &pipe_ctx->stream->link->cur_link_settings); if (dc_is_dp_signal(pipe_ctx->stream->signal)) enable_stream_features(pipe_ctx); +#if defined(CONFIG_DRM_AMD_DC_HDCP) + update_psp_stream_config(pipe_ctx, false); +#endif } #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT else { // if (IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) @@ -2810,6 +3073,14 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx) struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->sink->link; + if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment) && + dc_is_virtual_signal(pipe_ctx->stream->signal)) + return; + +#if defined(CONFIG_DRM_AMD_DC_HDCP) + update_psp_stream_config(pipe_ctx, true); +#endif + core_dc->hwss.blank_stream(pipe_ctx); if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c index 505967b48e14..7f904d55c1bc 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c @@ -374,6 +374,7 @@ void dal_ddc_service_i2c_query_dp_dual_mode_adaptor( enum display_dongle_type *dongle = &sink_cap->dongle_type; uint8_t type2_dongle_buf[DP_ADAPTOR_TYPE2_SIZE]; bool is_type2_dongle = false; + int retry_count = 2; struct dp_hdmi_dongle_signature_data *dongle_signature; /* Assume we have no valid DP passive dongle connected */ @@ -386,13 +387,24 @@ void dal_ddc_service_i2c_query_dp_dual_mode_adaptor( DP_HDMI_DONGLE_ADDRESS, type2_dongle_buf, sizeof(type2_dongle_buf))) { - *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE; - sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_DVI_MAX_TMDS_CLK; + /* Passive HDMI dongles can sometimes fail here without retrying*/ + while (retry_count > 0) { + if (i2c_read(ddc, + DP_HDMI_DONGLE_ADDRESS, + type2_dongle_buf, + sizeof(type2_dongle_buf))) + break; + retry_count--; + } + if (retry_count == 0) { + *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE; + sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_DVI_MAX_TMDS_CLK; - CONN_DATA_DETECT(ddc->link, type2_dongle_buf, sizeof(type2_dongle_buf), - "DP-DVI passive dongle %dMhz: ", - DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000); - return; + CONN_DATA_DETECT(ddc->link, type2_dongle_buf, sizeof(type2_dongle_buf), + "DP-DVI passive dongle %dMhz: ", + DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000); + return; + } } /* Check if Type 2 dongle.*/ @@ -496,7 +508,7 @@ bool dal_ddc_service_query_ddc_data( uint8_t *read_buf, uint32_t read_size) { - bool ret; + bool ret = false; uint32_t payload_size = dal_ddc_service_is_in_aux_transaction_mode(ddc) ? DEFAULT_AUX_MAX_DATA_SIZE : EDID_SEGMENT_SIZE; @@ -515,34 +527,32 @@ bool dal_ddc_service_query_ddc_data( /*TODO: len of payload data for i2c and aux is uint8!!!!, * but we want to read 256 over i2c!!!!*/ if (dal_ddc_service_is_in_aux_transaction_mode(ddc)) { - struct aux_payload write_payload = { - .i2c_over_aux = true, - .write = true, - .mot = true, - .address = address, - .length = write_size, - .data = write_buf, - .reply = NULL, - .defer_delay = get_defer_delay(ddc), - }; - - struct aux_payload read_payload = { - .i2c_over_aux = true, - .write = false, - .mot = false, - .address = address, - .length = read_size, - .data = read_buf, - .reply = NULL, - .defer_delay = get_defer_delay(ddc), - }; - - ret = dc_link_aux_transfer_with_retries(ddc, &write_payload); + struct aux_payload payload; + bool read_available = true; + + payload.i2c_over_aux = true; + payload.address = address; + payload.reply = NULL; + payload.defer_delay = get_defer_delay(ddc); + + if (write_size != 0) { + payload.write = true; + payload.mot = false; + payload.length = write_size; + payload.data = write_buf; + + ret = dal_ddc_submit_aux_command(ddc, &payload); + read_available = ret; + } - if (!ret) - return false; + if (read_size != 0 && read_available) { + payload.write = false; + payload.mot = false; + payload.length = read_size; + payload.data = read_buf; - ret = dc_link_aux_transfer_with_retries(ddc, &read_payload); + ret = dal_ddc_submit_aux_command(ddc, &payload); + } } else { struct i2c_payloads *payloads = dal_ddc_i2c_payloads_create(ddc->ctx, payloads_num); @@ -573,6 +583,41 @@ bool dal_ddc_service_query_ddc_data( return ret; } +bool dal_ddc_submit_aux_command(struct ddc_service *ddc, + struct aux_payload *payload) +{ + uint8_t retrieved = 0; + bool ret = 0; + + if (!ddc) + return false; + + if (!payload) + return false; + + do { + struct aux_payload current_payload; + bool is_end_of_payload = (retrieved + DEFAULT_AUX_MAX_DATA_SIZE) > + payload->length ? true : false; + + current_payload.address = payload->address; + current_payload.data = &payload->data[retrieved]; + current_payload.defer_delay = payload->defer_delay; + current_payload.i2c_over_aux = payload->i2c_over_aux; + current_payload.length = is_end_of_payload ? + payload->length - retrieved : DEFAULT_AUX_MAX_DATA_SIZE; + current_payload.mot = !is_end_of_payload; + current_payload.reply = payload->reply; + current_payload.write = payload->write; + + ret = dc_link_aux_transfer_with_retries(ddc, ¤t_payload); + + retrieved += current_payload.length; + } while (retrieved < payload->length && ret == true); + + return ret; +} + /* dc_link_aux_transfer_raw() - Attempt to transfer * the given aux payload. This function does not perform * retries or handle error states. The reply is returned @@ -601,6 +646,20 @@ bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc, return dce_aux_transfer_with_retries(ddc, payload); } + +enum dc_status dc_link_aux_configure_timeout(struct ddc_service *ddc, + uint32_t timeout) +{ + enum dc_status status = DC_OK; + struct ddc *ddc_pin = ddc->ddc_pin; + + if (ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout == NULL) + return DC_ERROR_UNEXPECTED; + if (!ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout(ddc, timeout)) + status = DC_ERROR_UNEXPECTED; + return status; +} + /*test only function*/ void dal_ddc_service_set_ddc_pin( struct ddc_service *ddc_service, diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index f5742719b5d9..0f59b68aa4c2 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -1409,6 +1409,9 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link) if (link->link_enc->features.flags.bits.IS_HBR3_CAPABLE) max_link_cap.link_rate = LINK_RATE_HIGH3; + if (link->link_enc->funcs->get_max_link_cap) + link->link_enc->funcs->get_max_link_cap(link->link_enc, &max_link_cap); + /* Lower link settings based on sink's link cap */ if (link->reported_link_cap.lane_count < max_link_cap.lane_count) max_link_cap.lane_count = @@ -1653,11 +1656,14 @@ bool dp_verify_link_cap_with_retries( for (i = 0; i < attempts; i++) { int fail_count = 0; - enum dc_connection_type type; + enum dc_connection_type type = dc_connection_none; memset(&link->verified_link_cap, 0, sizeof(struct dc_link_settings)); - if (!dc_link_detect_sink(link, &type)) { + if (!dc_link_detect_sink(link, &type) || type == dc_connection_none) { + link->verified_link_cap.lane_count = LANE_COUNT_ONE; + link->verified_link_cap.link_rate = LINK_RATE_LOW; + link->verified_link_cap.link_spread = LINK_SPREAD_DISABLED; break; } else if (dp_verify_link_cap(link, &link->reported_link_cap, @@ -1670,6 +1676,19 @@ bool dp_verify_link_cap_with_retries( return success; } +bool dp_verify_mst_link_cap( + struct dc_link *link) +{ + struct dc_link_settings max_link_cap = {0}; + + max_link_cap = get_max_link_cap(link); + link->verified_link_cap = get_common_supported_link_settings( + link->reported_link_cap, + max_link_cap); + + return true; +} + static struct dc_link_settings get_common_supported_link_settings( struct dc_link_settings link_setting_a, struct dc_link_settings link_setting_b) @@ -2057,11 +2076,11 @@ static bool allow_hpd_rx_irq(const struct dc_link *link) return false; } -static bool handle_hpd_irq_psr_sink(const struct dc_link *link) +static bool handle_hpd_irq_psr_sink(struct dc_link *link) { union dpcd_psr_configuration psr_configuration; - if (!link->psr_enabled) + if (!link->psr_feature_enabled) return false; dm_helpers_dp_read_dpcd( @@ -2100,8 +2119,8 @@ static bool handle_hpd_irq_psr_sink(const struct dc_link *link) sizeof(psr_error_status.raw)); /* PSR error, disable and re-enable PSR */ - dc_link_set_psr_enable(link, false, true); - dc_link_set_psr_enable(link, true, true); + dc_link_set_psr_allow_active(link, false, true); + dc_link_set_psr_allow_active(link, true, true); return true; } else if (psr_sink_psr_status.bits.SINK_SELF_REFRESH_STATUS == @@ -2364,6 +2383,8 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd enum dc_status result; bool status = false; + struct pipe_ctx *pipe_ctx; + int i; if (out_link_loss) *out_link_loss = false; @@ -2440,6 +2461,15 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd &link->cur_link_settings, true, LINK_TRAINING_ATTEMPTS); + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link && + pipe_ctx->stream->dpms_off == false && + pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + dc_link_allocate_mst_payload(pipe_ctx); + } + } + status = false; if (out_link_loss) *out_link_loss = true; @@ -2545,6 +2575,7 @@ static void get_active_converter_info( uint8_t data, struct dc_link *link) { union dp_downstream_port_present ds_port = { .byte = data }; + memset(&link->dpcd_caps.dongle_caps, 0, sizeof(link->dpcd_caps.dongle_caps)); /* decode converter info*/ if (!ds_port.fields.PORT_PRESENT) { @@ -2691,6 +2722,7 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data, * keep receiver powered all the time.*/ case DP_BRANCH_DEVICE_ID_0010FA: case DP_BRANCH_DEVICE_ID_0080E1: + case DP_BRANCH_DEVICE_ID_00E04C: link->wa_flags.dp_keep_receiver_powered = true; break; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c index 79438c4f1e20..a519dbc5ecb6 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c @@ -277,7 +277,8 @@ void dp_retrain_link_dp_test(struct dc_link *link, if (pipes[i].stream != NULL && !pipes[i].top_pipe && !pipes[i].prev_odm_pipe && pipes[i].stream->link != NULL && - pipes[i].stream_res.stream_enc != NULL) { + pipes[i].stream_res.stream_enc != NULL && + pipes[i].stream->link == link) { udelay(100); pipes[i].stream_res.stream_enc->funcs->dp_blank( diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 8f70295179ff..37698305a2dc 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -404,6 +404,9 @@ bool resource_are_streams_timing_synchronizable( if (stream1->view_format != stream2->view_format) return false; + if (stream1->ignore_msa_timing_param || stream2->ignore_msa_timing_param) + return false; + return true; } static bool is_dp_and_hdmi_sharable( @@ -948,7 +951,7 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx) data->inits.v_c_bot = dc_fixpt_add(data->inits.v_c, data->ratios.vert_c); } -static bool are_rect_integer_multiples(struct rect src, struct rect dest) +static bool are_rects_integer_multiples(struct rect src, struct rect dest) { if (dest.width >= src.width && dest.width % src.width == 0 && dest.height >= src.height && dest.height % src.height == 0) @@ -956,6 +959,38 @@ static bool are_rect_integer_multiples(struct rect src, struct rect dest) return false; } + +static void calculate_integer_scaling(struct pipe_ctx *pipe_ctx) +{ + if (!pipe_ctx->plane_state->scaling_quality.integer_scaling) + return; + + //for Centered Mode + if (pipe_ctx->stream->dst.width == pipe_ctx->stream->src.width && + pipe_ctx->stream->dst.height == pipe_ctx->stream->src.height) { + // calculate maximum # of replication of src onto addressable + unsigned int integer_multiple = min( + pipe_ctx->stream->timing.h_addressable / pipe_ctx->stream->src.width, + pipe_ctx->stream->timing.v_addressable / pipe_ctx->stream->src.height); + + //scale dst + pipe_ctx->stream->dst.width = integer_multiple * pipe_ctx->stream->src.width; + pipe_ctx->stream->dst.height = integer_multiple * pipe_ctx->stream->src.height; + + //center dst onto addressable + pipe_ctx->stream->dst.x = (pipe_ctx->stream->timing.h_addressable - pipe_ctx->stream->dst.width)/2; + pipe_ctx->stream->dst.y = (pipe_ctx->stream->timing.v_addressable - pipe_ctx->stream->dst.height)/2; + } + + //disable taps if src & dst are integer ratio + if (are_rects_integer_multiples(pipe_ctx->stream->src, pipe_ctx->stream->dst)) { + pipe_ctx->plane_state->scaling_quality.v_taps = 1; + pipe_ctx->plane_state->scaling_quality.h_taps = 1; + pipe_ctx->plane_state->scaling_quality.v_taps_c = 1; + pipe_ctx->plane_state->scaling_quality.h_taps_c = 1; + } +} + bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) { const struct dc_plane_state *plane_state = pipe_ctx->plane_state; @@ -969,6 +1004,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface( pipe_ctx->plane_state->format); + calculate_integer_scaling(pipe_ctx); + calculate_scaling_ratios(pipe_ctx); calculate_viewport(pipe_ctx); @@ -999,13 +1036,6 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps( pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality); - if (res && - plane_state->scaling_quality.integer_scaling && - are_rect_integer_multiples(pipe_ctx->plane_res.scl_data.viewport, - pipe_ctx->plane_res.scl_data.recout)) { - pipe_ctx->plane_res.scl_data.taps.v_taps = 1; - pipe_ctx->plane_res.scl_data.taps.h_taps = 1; - } if (!res) { /* Try 24 bpp linebuffer */ @@ -1540,6 +1570,9 @@ bool dc_is_stream_unchanged( if (!are_stream_backends_same(old_stream, stream)) return false; + if (old_stream->ignore_msa_timing_param != stream->ignore_msa_timing_param) + return false; + return true; } @@ -1629,7 +1662,8 @@ static int acquire_first_free_pipe( static struct audio *find_first_free_audio( struct resource_context *res_ctx, const struct resource_pool *pool, - enum engine_id id) + enum engine_id id, + enum dce_version dc_version) { int i, available_audio_count; @@ -1848,28 +1882,28 @@ static int acquire_resource_from_hw_enabled_state( struct dc_stream_state *stream) { struct dc_link *link = stream->link; - unsigned int inst, tg_inst; + unsigned int i, inst, tg_inst = 0; /* Check for enabled DIG to identify enabled display */ if (!link->link_enc->funcs->is_dig_enabled(link->link_enc)) return -1; - /* Check for which front end is used by this encoder. - * Note the inst is 1 indexed, where 0 is undefined. - * Note that DIG_FE can source from different OTG but our - * current implementation always map 1-to-1, so this code makes - * the same assumption and doesn't check OTG source. - */ inst = link->link_enc->funcs->get_dig_frontend(link->link_enc); - /* Instance should be within the range of the pool */ - if (inst >= pool->pipe_count) - return -1; + if (inst == ENGINE_ID_UNKNOWN) + return false; - if (inst >= pool->stream_enc_count) - return -1; + for (i = 0; i < pool->stream_enc_count; i++) { + if (pool->stream_enc[i]->id == inst) { + tg_inst = pool->stream_enc[i]->funcs->dig_source_otg( + pool->stream_enc[i]); + break; + } + } - tg_inst = pool->stream_enc[inst]->funcs->dig_source_otg(pool->stream_enc[inst]); + // tg_inst not found + if (i == pool->stream_enc_count) + return false; if (tg_inst >= pool->timing_generator_count) return false; @@ -1965,7 +1999,7 @@ enum dc_status resource_map_pool_resources( dc_is_audio_capable_signal(pipe_ctx->stream->signal) && stream->audio_info.mode_count && stream->audio_info.flags.all) { pipe_ctx->stream_res.audio = find_first_free_audio( - &context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id); + &context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id, dc_ctx->dce_version); /* * Audio assigned in order first come first get. diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index bf1d7bb90e0f..bb09243758fe 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -423,10 +423,10 @@ bool dc_stream_add_writeback(struct dc *dc, if (dwb->funcs->is_enabled(dwb)) { /* writeback pipe already enabled, only need to update */ - dc->hwss.update_writeback(dc, stream_status, wb_info); + dc->hwss.update_writeback(dc, stream_status, wb_info, dc->current_state); } else { /* Enable writeback pipe from scratch*/ - dc->hwss.enable_writeback(dc, stream_status, wb_info); + dc->hwss.enable_writeback(dc, stream_status, wb_info, dc->current_state); } } diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index a82352a87808..0416a17b0897 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -39,7 +39,7 @@ #include "inc/hw/dmcu.h" #include "dml/display_mode_lib.h" -#define DC_VER "3.2.48" +#define DC_VER "3.2.56" #define MAX_SURFACES 3 #define MAX_PLANES 6 @@ -111,19 +111,20 @@ struct dc_caps { bool force_dp_tps4_for_cp2520; bool disable_dp_clk_share; bool psp_setup_panel_mode; + bool extended_aux_timeout_support; #ifdef CONFIG_DRM_AMD_DC_DCN2_0 bool hw_3d_lut; #endif struct dc_plane_cap planes[MAX_PLANES]; }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct dc_bug_wa { +#if defined(CONFIG_DRM_AMD_DC_DCN2_0) bool no_connect_phy_config; bool dedcn20_305_wa; +#endif bool skip_clock_update; }; -#endif struct dc_dcc_surface_param { struct dc_size surface_size; @@ -219,7 +220,9 @@ struct dc_config { bool allow_seamless_boot_optimization; bool power_down_display_on_boot; bool edp_not_connected; + bool force_enum_edp; bool forced_clocks; + bool disable_extended_timeout_support; // Used to disable extended timeout and lttpr feature as well bool multi_mon_pp_mclk_switch; }; @@ -227,6 +230,7 @@ enum visual_confirm { VISUAL_CONFIRM_DISABLE = 0, VISUAL_CONFIRM_SURFACE = 1, VISUAL_CONFIRM_HDR = 2, + VISUAL_CONFIRM_MPCTREE = 4, }; enum dcc_option { @@ -245,6 +249,19 @@ enum wm_report_mode { WM_REPORT_DEFAULT = 0, WM_REPORT_OVERRIDE = 1, }; +enum dtm_pstate{ + dtm_level_p0 = 0,/*highest voltage*/ + dtm_level_p1, + dtm_level_p2, + dtm_level_p3, + dtm_level_p4,/*when active_display_count = 0*/ +}; + +enum dcn_pwr_state { + DCN_PWR_STATE_UNKNOWN = -1, + DCN_PWR_STATE_MISSION_MODE = 0, + DCN_PWR_STATE_LOW_POWER = 3, +}; /* * For any clocks that may differ per pipe @@ -252,11 +269,7 @@ enum wm_report_mode { */ struct dc_clocks { int dispclk_khz; - int max_supported_dppclk_khz; - int max_supported_dispclk_khz; int dppclk_khz; - int bw_dppclk_khz; /*a copy of dppclk_khz*/ - int bw_dispclk_khz; int dcfclk_khz; int socclk_khz; int dcfclk_deep_sleep_khz; @@ -264,12 +277,17 @@ struct dc_clocks { int phyclk_khz; int dramclk_khz; bool p_state_change_support; - + enum dcn_pwr_state pwr_state; /* * Elements below are not compared for the purposes of * optimization required */ bool prev_p_state_change_support; + enum dtm_pstate dtm_level; + int max_supported_dppclk_khz; + int max_supported_dispclk_khz; + int bw_dppclk_khz; /*a copy of dppclk_khz*/ + int bw_dispclk_khz; }; struct dc_bw_validation_profile { @@ -347,6 +365,7 @@ struct dc_debug_options { bool disable_hubp_power_gate; #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT bool disable_dsc_power_gate; + int dsc_min_slice_height_override; #endif bool disable_pplib_wm_range; enum wm_report_mode pplib_wm_report_mode; @@ -462,9 +481,7 @@ struct dc { struct dc_config config; struct dc_debug_options debug; struct dc_bounding_box_overrides bb_overrides; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct dc_bug_wa work_arounds; -#endif struct dc_context *ctx; #ifdef CONFIG_DRM_AMD_DC_DCN2_0 struct dc_phy_addr_space_config vm_pa_config; @@ -553,10 +570,16 @@ struct dc_init_data { }; struct dc_callback_init { +#ifdef CONFIG_DRM_AMD_DC_HDCP + struct cp_psp cp_psp; +#else uint8_t reserved; +#endif }; struct dc *dc_create(const struct dc_init_data *init_params); +void dc_hardware_init(struct dc *dc); + int dc_get_vmid_use_vector(struct dc *dc); #ifdef CONFIG_DRM_AMD_DC_DCN2_0 void dc_setup_vm_context(struct dc *dc, struct dc_virtual_addr_space_config *va_config, int vmid); @@ -565,6 +588,7 @@ int dc_setup_system_context(struct dc *dc, struct dc_phy_addr_space_config *pa_c #endif void dc_init_callbacks(struct dc *dc, const struct dc_callback_init *init_params); +void dc_deinit_callbacks(struct dc *dc); void dc_destroy(struct dc **dc); /******************************************************************************* diff --git a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h index 4ef97f65e55d..4f8f576d5fcf 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h @@ -49,7 +49,8 @@ enum aux_channel_operation_result { AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN, AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY, AUX_CHANNEL_OPERATION_FAILED_TIMEOUT, - AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON + AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON, + AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE }; diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h index 6e42209f0e20..0ed2962add5a 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h @@ -30,6 +30,7 @@ #define DP_DSC_BRANCH_OVERALL_THROUGHPUT_0 0x0a0 /* DP 1.4a SCR */ #define DP_DSC_BRANCH_OVERALL_THROUGHPUT_1 0x0a1 #define DP_DSC_BRANCH_MAX_LINE_WIDTH 0x0a2 +#include "dc_types.h" struct dc_dsc_bw_range { uint32_t min_kbps; /* Bandwidth if min_target_bpp_x16 is used */ @@ -39,13 +40,21 @@ struct dc_dsc_bw_range { uint32_t stream_kbps; /* Uncompressed stream bandwidth */ }; +struct display_stream_compressor { + const struct dsc_funcs *funcs; +#ifndef AMD_EDID_UTILITY + struct dc_context *ctx; + int inst; +#endif +}; bool dc_dsc_parse_dsc_dpcd(const uint8_t *dpcd_dsc_basic_data, const uint8_t *dpcd_dsc_ext_data, struct dsc_dec_dpcd_caps *dsc_sink_caps); bool dc_dsc_compute_bandwidth_range( - const struct dc *dc, + const struct display_stream_compressor *dsc, + const uint32_t dsc_min_slice_height_override, const uint32_t min_kbps, const uint32_t max_kbps, const struct dsc_dec_dpcd_caps *dsc_sink_caps, @@ -53,8 +62,9 @@ bool dc_dsc_compute_bandwidth_range( struct dc_dsc_bw_range *range); bool dc_dsc_compute_config( - const struct dc *dc, + const struct display_stream_compressor *dsc, const struct dsc_dec_dpcd_caps *dsc_sink_caps, + const uint32_t dsc_min_slice_height_override, uint32_t target_bandwidth_kbps, const struct dc_crtc_timing *timing, struct dc_dsc_config *dsc_cfg); diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h index 0b8700a8a94a..e0856bb8511f 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h @@ -26,6 +26,8 @@ #ifndef DC_HW_TYPES_H #define DC_HW_TYPES_H +#ifndef AMD_EDID_UTILITY + #include "os_types.h" #include "fixed31_32.h" #include "signal_types.h" @@ -124,20 +126,6 @@ struct plane_size { int chroma_pitch; struct rect surface_size; struct rect chroma_size; - - union { - struct { - struct rect surface_size; - int surface_pitch; - } grph; - - struct { - struct rect luma_size; - int luma_pitch; - struct rect chroma_size; - int chroma_pitch; - } video; - }; }; struct dc_plane_dcc_param { @@ -148,21 +136,6 @@ struct dc_plane_dcc_param { int meta_pitch_c; bool independent_64b_blks_c; - - union { - struct { - int meta_pitch; - bool independent_64b_blks; - } grph; - - struct { - int meta_pitch_l; - bool independent_64b_blks_l; - - int meta_pitch_c; - bool independent_64b_blks_c; - } video; - }; }; /*Displayable pixel format in fb*/ @@ -605,6 +578,11 @@ enum dc_quantization_range { QUANTIZATION_RANGE_LIMITED }; +enum dc_dynamic_expansion { + DYN_EXPANSION_AUTO, + DYN_EXPANSION_DISABLE +}; + /* XFM */ /* used in struct dc_plane_state */ @@ -616,6 +594,8 @@ struct scaling_taps { bool integer_scaling; }; +#endif /* AMD_EDID_UTILITY */ + enum dc_timing_standard { DC_TIMING_STANDARD_UNDEFINED, DC_TIMING_STANDARD_DMT, @@ -737,30 +717,6 @@ enum dc_timing_3d_format { TIMING_3D_FORMAT_MAX, }; -enum trigger_delay { - TRIGGER_DELAY_NEXT_PIXEL = 0, - TRIGGER_DELAY_NEXT_LINE, -}; - -enum crtc_event { - CRTC_EVENT_VSYNC_RISING = 0, - CRTC_EVENT_VSYNC_FALLING -}; - -struct crtc_trigger_info { - bool enabled; - struct dc_stream_state *event_source; - enum crtc_event event; - enum trigger_delay delay; -}; - -struct dc_crtc_timing_adjust { - uint32_t v_total_min; - uint32_t v_total_max; - uint32_t v_total_mid; - uint32_t v_total_mid_frame_num; -}; - #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT struct dc_dsc_config { uint32_t num_slices_h; /* Number of DSC slices - horizontal */ @@ -804,6 +760,33 @@ struct dc_crtc_timing { #endif }; +#ifndef AMD_EDID_UTILITY + +enum trigger_delay { + TRIGGER_DELAY_NEXT_PIXEL = 0, + TRIGGER_DELAY_NEXT_LINE, +}; + +enum crtc_event { + CRTC_EVENT_VSYNC_RISING = 0, + CRTC_EVENT_VSYNC_FALLING +}; + +struct crtc_trigger_info { + bool enabled; + struct dc_stream_state *event_source; + enum crtc_event event; + enum trigger_delay delay; +}; + +struct dc_crtc_timing_adjust { + uint32_t v_total_min; + uint32_t v_total_max; + uint32_t v_total_mid; + uint32_t v_total_mid_frame_num; +}; + + /* Passed on init */ enum vram_type { VIDEO_MEMORY_TYPE_GDDR5 = 2, @@ -874,5 +857,7 @@ struct tg_color { uint16_t color_b_cb; }; +#endif /* AMD_EDID_UTILITY */ + #endif /* DC_HW_TYPES_H */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index 9ea75db3484e..f24fd19ed93d 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -126,7 +126,8 @@ struct dc_link { unsigned short chip_caps; unsigned int dpcd_sink_count; enum edp_revision edp_revision; - bool psr_enabled; + bool psr_feature_enabled; + bool psr_allow_active; /* MST record stream using this link */ struct link_flags { @@ -158,6 +159,18 @@ static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_ return dc->links[link_index]; } +static inline struct dc_link *get_edp_link(const struct dc *dc) +{ + int i; + + // report any eDP links, even unconnected DDI's + for (i = 0; i < dc->link_count; i++) { + if (dc->links[i]->connector_signal == SIGNAL_TYPE_EDP) + return dc->links[i]; + } + return NULL; +} + /* Set backlight level of an embedded panel (eDP, LVDS). * backlight_pwm_u16_16 is unsigned 32 bit with 16 bit integer * and 16 bit fractional, where 1.0 is max backlight value. @@ -170,7 +183,7 @@ int dc_link_get_backlight_level(const struct dc_link *dc_link); bool dc_link_set_abm_disable(const struct dc_link *dc_link); -bool dc_link_set_psr_enable(const struct dc_link *dc_link, bool enable, bool wait); +bool dc_link_set_psr_allow_active(struct dc_link *dc_link, bool enable, bool wait); bool dc_link_get_psr_state(const struct dc_link *dc_link, uint32_t *psr_state); @@ -192,6 +205,7 @@ enum dc_detect_reason { bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason); bool dc_link_get_hpd_state(struct dc_link *dc_link); +enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx); /* Notify DC about DP RX Interrupt (aka Short Pulse Interrupt). * Return: diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index 0fa1c26bc20d..fdb6adc37857 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -113,6 +113,21 @@ struct periodic_interrupt_config { int lines_offset; }; +union stream_update_flags { + struct { + uint32_t scaling:1; + uint32_t out_tf:1; + uint32_t out_csc:1; + uint32_t abm_level:1; + uint32_t dpms_off:1; + uint32_t gamut_remap:1; +#if defined(CONFIG_DRM_AMD_DC_DCN2_0) + uint32_t wb_update:1; +#endif + } bits; + + uint32_t raw; +}; struct dc_stream_state { // sink is deprecated, new code should not reference @@ -214,9 +229,14 @@ struct dc_stream_state { #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT bool is_dsc_enabled; #endif + union stream_update_flags update_flags; }; +#define ABM_LEVEL_IMMEDIATE_DISABLE 0xFFFFFFFF + struct dc_stream_update { + struct dc_stream_state *stream; + struct rect src; struct rect dst; struct dc_transfer_func *out_transfer_func; @@ -431,6 +451,9 @@ void dc_stream_set_static_screen_events(struct dc *dc, int num_streams, const struct dc_static_screen_events *events); +void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream, + enum dc_dynamic_expansion option); + void dc_stream_set_dither_option(struct dc_stream_state *stream, enum dc_dither_option option); diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index b273735b6a3e..d9be8fc3889f 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -25,6 +25,11 @@ #ifndef DC_TYPES_H_ #define DC_TYPES_H_ +#ifndef AMD_EDID_UTILITY +/* AND EdidUtility only needs a portion + * of this file, including the rest only + * causes additional issues. + */ #include "os_types.h" #include "fixed31_32.h" #include "irq_types.h" @@ -33,6 +38,10 @@ #include "dal_types.h" #include "grph_object_defs.h" +#ifdef CONFIG_DRM_AMD_DC_HDCP +#include "dm_cp_psp.h" +#endif + /* forward declarations */ struct dc_plane_state; struct dc_stream_state; @@ -100,6 +109,9 @@ struct dc_context { uint32_t dc_sink_id_count; uint32_t dc_stream_id_count; uint64_t fbc_gpu_addr; +#ifdef CONFIG_DRM_AMD_DC_HDCP + struct cp_psp cp_psp; +#endif }; @@ -159,6 +171,12 @@ enum dc_edid_status { EDID_THE_SAME, }; +enum act_return_status { + ACT_SUCCESS, + ACT_LINK_LOST, + ACT_FAILED +}; + /* audio capability from EDID*/ struct dc_cea_audio_mode { uint8_t format_code; /* ucData[0] [6:3]*/ @@ -739,6 +757,9 @@ struct dc_clock_config { uint32_t current_clock_khz;/*current clock in use*/ }; +#endif /*AMD_EDID_UTILITY*/ +//AMD EDID UTILITY does not need any of the above structures + #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT /* DSC DPCD capabilities */ union dsc_slice_caps1 { @@ -810,4 +831,5 @@ struct dsc_dec_dpcd_caps { uint32_t branch_max_line_width; }; #endif + #endif /* DC_TYPES_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c index 58bd131d5b48..b8a3fc505c9b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c @@ -77,6 +77,9 @@ static bool dce_abm_set_pipe(struct abm *abm, uint32_t controller_id) /* notifyDMCUMsg */ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1); + REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, + 1, 80000); + return true; } @@ -401,6 +404,10 @@ static bool dce_abm_init_backlight(struct abm *abm) /* Enable the backlight output */ REG_UPDATE(BL_PWM_CNTL, BL_PWM_EN, 1); + /* Disable fractional pwm if configured */ + REG_UPDATE(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN, + abm->ctx->dc->config.disable_fractional_pwm ? 0 : 1); + /* Unlock group 2 backlight registers */ REG_UPDATE(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_REG_LOCK, 0); @@ -489,9 +496,6 @@ void dce_abm_destroy(struct abm **abm) { struct dce_abm *abm_dce = TO_DCE_ABM(*abm); - if (abm_dce->base.dmcu_is_running == true) - abm_dce->base.funcs->set_abm_immediate_disable(*abm); - kfree(abm_dce); *abm = NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c index c3f9f4185ce8..e472608faf33 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c @@ -42,6 +42,10 @@ #include "reg_helper.h" +#undef FN +#define FN(reg_name, field_name) \ + aux110->shift->field_name, aux110->mask->field_name + #define FROM_AUX_ENGINE(ptr) \ container_of((ptr), struct aux_engine_dce110, base) @@ -55,6 +59,14 @@ enum { AUX_TIMED_OUT_RETRY_COUNTER = 2, AUX_DEFER_RETRY_COUNTER = 6 }; + +#define TIME_OUT_INCREMENT 1016 +#define TIME_OUT_MULTIPLIER_8 8 +#define TIME_OUT_MULTIPLIER_16 16 +#define TIME_OUT_MULTIPLIER_32 32 +#define TIME_OUT_MULTIPLIER_64 64 +#define MAX_TIMEOUT_LENGTH 127 + static void release_engine( struct dce_aux *engine) { @@ -198,7 +210,7 @@ static void submit_channel_request( REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1); REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0, - 10, aux110->timeout_period/10); + 10, aux110->polling_timeout_period/10); /* set the delay and the number of bytes to write */ @@ -327,7 +339,7 @@ static enum aux_channel_operation_result get_channel_status( /* poll to make sure that SW_DONE is asserted */ REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 1, - 10, aux110->timeout_period/10); + 10, aux110->polling_timeout_period/10); value = REG_READ(AUX_SW_STATUS); /* in case HPD is LOW, exit AUX transaction */ @@ -414,20 +426,77 @@ void dce110_engine_destroy(struct dce_aux **engine) *engine = NULL; } + +static bool dce_aux_configure_timeout(struct ddc_service *ddc, + uint32_t timeout_in_us) +{ + uint32_t multiplier = 0; + uint32_t length = 0; + struct ddc *ddc_pin = ddc->ddc_pin; + struct dce_aux *aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]; + struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(aux_engine); + + /* 1-Update polling timeout period */ + aux110->polling_timeout_period = timeout_in_us * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER; + + /* 2-Update aux timeout period length and multiplier */ + if (timeout_in_us <= TIME_OUT_INCREMENT) { + multiplier = 0; + length = timeout_in_us/TIME_OUT_MULTIPLIER_8; + if (timeout_in_us % TIME_OUT_MULTIPLIER_8 != 0) + length++; + } else if (timeout_in_us <= 2 * TIME_OUT_INCREMENT) { + multiplier = 1; + length = timeout_in_us/TIME_OUT_MULTIPLIER_16; + if (timeout_in_us % TIME_OUT_MULTIPLIER_16 != 0) + length++; + } else if (timeout_in_us <= 4 * TIME_OUT_INCREMENT) { + multiplier = 2; + length = timeout_in_us/TIME_OUT_MULTIPLIER_32; + if (timeout_in_us % TIME_OUT_MULTIPLIER_32 != 0) + length++; + } else if (timeout_in_us > 4 * TIME_OUT_INCREMENT) { + multiplier = 3; + length = timeout_in_us/TIME_OUT_MULTIPLIER_64; + if (timeout_in_us % TIME_OUT_MULTIPLIER_64 != 0) + length++; + } + + length = (length < MAX_TIMEOUT_LENGTH) ? length : MAX_TIMEOUT_LENGTH; + + REG_UPDATE_SEQ_2(AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN, length, AUX_RX_TIMEOUT_LEN_MUL, multiplier); + + return true; +} + +static struct dce_aux_funcs aux_functions = { + .configure_timeout = NULL, + .destroy = NULL, +}; + struct dce_aux *dce110_aux_engine_construct(struct aux_engine_dce110 *aux_engine110, struct dc_context *ctx, uint32_t inst, uint32_t timeout_period, - const struct dce110_aux_registers *regs) + const struct dce110_aux_registers *regs, + const struct dce110_aux_registers_mask *mask, + const struct dce110_aux_registers_shift *shift, + bool is_ext_aux_timeout_configurable) { aux_engine110->base.ddc = NULL; aux_engine110->base.ctx = ctx; aux_engine110->base.delay = 0; aux_engine110->base.max_defer_write_retry = 0; aux_engine110->base.inst = inst; - aux_engine110->timeout_period = timeout_period; + aux_engine110->polling_timeout_period = timeout_period; aux_engine110->regs = regs; + aux_engine110->mask = mask; + aux_engine110->shift = shift; + aux_engine110->base.funcs = &aux_functions; + if (is_ext_aux_timeout_configurable) + aux_engine110->base.funcs->configure_timeout = &dce_aux_configure_timeout; + return &aux_engine110->base; } @@ -464,8 +533,10 @@ int dce_aux_transfer_raw(struct ddc_service *ddc, memset(&aux_rep, 0, sizeof(aux_rep)); aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]; - if (!acquire(aux_engine, ddc_pin)) + if (!acquire(aux_engine, ddc_pin)) { + *operation_result = AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE; return -1; + } if (payload->i2c_over_aux) aux_req.type = AUX_TRANSACTION_TYPE_I2C; @@ -475,7 +546,7 @@ int dce_aux_transfer_raw(struct ddc_service *ddc, aux_req.action = i2caux_action_from_payload(payload); aux_req.address = payload->address; - aux_req.delay = payload->defer_delay * 10; + aux_req.delay = 0; aux_req.length = payload->length; aux_req.data = payload->data; @@ -544,8 +615,15 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc, case AUX_TRANSACTION_REPLY_AUX_DEFER: case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK: case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER: - if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES) + if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES) { goto fail; + } else { + if ((*payload->reply == AUX_TRANSACTION_REPLY_AUX_DEFER) || + (*payload->reply == AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER)) { + if (payload->defer_delay > 0) + msleep(payload->defer_delay); + } + } break; case AUX_TRANSACTION_REPLY_I2C_DEFER: @@ -582,6 +660,7 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc, break; case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON: + case AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE: case AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN: default: goto fail; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h index ed7fec8fe253..b4b2c79a8073 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h @@ -29,6 +29,7 @@ #include "i2caux_interface.h" #include "inc/hw/aux_engine.h" + #ifdef CONFIG_DRM_AMD_DC_DCN2_0 #define AUX_COMMON_REG_LIST0(id)\ SRI(AUX_CONTROL, DP_AUX, id), \ @@ -36,6 +37,7 @@ SRI(AUX_SW_DATA, DP_AUX, id), \ SRI(AUX_SW_CONTROL, DP_AUX, id), \ SRI(AUX_INTERRUPT_CONTROL, DP_AUX, id), \ + SRI(AUX_DPHY_RX_CONTROL1, DP_AUX, id), \ SRI(AUX_SW_STATUS, DP_AUX, id) #endif @@ -55,6 +57,7 @@ struct dce110_aux_registers { uint32_t AUX_SW_DATA; uint32_t AUX_SW_CONTROL; uint32_t AUX_INTERRUPT_CONTROL; + uint32_t AUX_DPHY_RX_CONTROL1; uint32_t AUX_SW_STATUS; uint32_t AUXN_IMPCAL; uint32_t AUXP_IMPCAL; @@ -62,6 +65,156 @@ struct dce110_aux_registers { uint32_t AUX_RESET_MASK; }; +#define DCE_AUX_REG_FIELD_LIST(type)\ + type AUX_EN;\ + type AUX_RESET;\ + type AUX_RESET_DONE;\ + type AUX_REG_RW_CNTL_STATUS;\ + type AUX_SW_USE_AUX_REG_REQ;\ + type AUX_SW_DONE_USING_AUX_REG;\ + type AUX_SW_AUTOINCREMENT_DISABLE;\ + type AUX_SW_DATA_RW;\ + type AUX_SW_INDEX;\ + type AUX_SW_GO;\ + type AUX_SW_DATA;\ + type AUX_SW_REPLY_BYTE_COUNT;\ + type AUX_SW_DONE;\ + type AUX_SW_DONE_ACK;\ + type AUXN_IMPCAL_ENABLE;\ + type AUXP_IMPCAL_ENABLE;\ + type AUXN_IMPCAL_OVERRIDE_ENABLE;\ + type AUXP_IMPCAL_OVERRIDE_ENABLE;\ + type AUX_RX_TIMEOUT_LEN;\ + type AUX_RX_TIMEOUT_LEN_MUL;\ + type AUXN_CALOUT_ERROR_AK;\ + type AUXP_CALOUT_ERROR_AK;\ + type AUX_SW_START_DELAY;\ + type AUX_SW_WR_BYTES + +#define DCE10_AUX_MASK_SH_LIST(mask_sh)\ + AUX_SF(AUX_CONTROL, AUX_EN, mask_sh),\ + AUX_SF(AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\ + AUX_SF(AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\ + AUX_SF(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\ + AUX_SF(AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\ + AUX_SF(AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\ + AUX_SF(AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\ + AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\ + AUX_SF(AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\ + AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\ + AUX_SF(AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\ + AUX_SF(AUX_SW_DATA, AUX_SW_DATA, mask_sh),\ + AUX_SF(AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\ + AUX_SF(AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\ + AUX_SF(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\ + AUX_SF(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK, mask_sh),\ + AUX_SF(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK, mask_sh),\ + AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_ENABLE, mask_sh),\ + AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_ENABLE, mask_sh),\ + AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE, mask_sh),\ + AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_OVERRIDE_ENABLE, mask_sh) + +#define DCE_AUX_MASK_SH_LIST(mask_sh)\ + AUX_SF(AUX_CONTROL, AUX_EN, mask_sh),\ + AUX_SF(AUX_CONTROL, AUX_RESET, mask_sh),\ + AUX_SF(AUX_CONTROL, AUX_RESET_DONE, mask_sh),\ + AUX_SF(AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\ + AUX_SF(AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\ + AUX_SF(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\ + AUX_SF(AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\ + AUX_SF(AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\ + AUX_SF(AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\ + AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\ + AUX_SF(AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\ + AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\ + AUX_SF(AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\ + AUX_SF(AUX_SW_DATA, AUX_SW_DATA, mask_sh),\ + AUX_SF(AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\ + AUX_SF(AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\ + AUX_SF(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\ + AUX_SF(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK, mask_sh),\ + AUX_SF(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK, mask_sh),\ + AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_ENABLE, mask_sh),\ + AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_ENABLE, mask_sh),\ + AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE, mask_sh),\ + AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_OVERRIDE_ENABLE, mask_sh) + +#define DCE12_AUX_MASK_SH_LIST(mask_sh)\ + AUX_SF(DP_AUX0_AUX_CONTROL, AUX_EN, mask_sh),\ + AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET, mask_sh),\ + AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET_DONE, mask_sh),\ + AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\ + AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\ + AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\ + AUX_SF(DP_AUX0_AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\ + AUX_SF(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK, mask_sh),\ + AUX_SF(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK, mask_sh),\ + AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_ENABLE, mask_sh),\ + AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_ENABLE, mask_sh),\ + AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE, mask_sh),\ + AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_OVERRIDE_ENABLE, mask_sh) + +/* DCN10 MASK */ +#define DCN10_AUX_MASK_SH_LIST(mask_sh)\ + AUX_SF(DP_AUX0_AUX_CONTROL, AUX_EN, mask_sh),\ + AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET, mask_sh),\ + AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET_DONE, mask_sh),\ + AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\ + AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\ + AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\ + AUX_SF(DP_AUX0_AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\ + AUX_SF(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK, mask_sh),\ + AUX_SF(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK, mask_sh),\ + AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_ENABLE, mask_sh),\ + AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_ENABLE, mask_sh),\ + AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE, mask_sh),\ + AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_OVERRIDE_ENABLE, mask_sh) + +/* for all other DCN */ +#define DCN_AUX_MASK_SH_LIST(mask_sh)\ + AUX_SF(DP_AUX0_AUX_CONTROL, AUX_EN, mask_sh),\ + AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET, mask_sh),\ + AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET_DONE, mask_sh),\ + AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\ + AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\ + AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\ + AUX_SF(DP_AUX0_AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\ + AUX_SF(DP_AUX0_AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN, mask_sh),\ + AUX_SF(DP_AUX0_AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN_MUL, mask_sh) + +#define AUX_SF(reg_name, field_name, post_fix)\ + .field_name = reg_name ## __ ## field_name ## post_fix + enum { /* This is the timeout as defined in DP 1.2a, * 2.3.4 "Detailed uPacket TX AUX CH State Description". */ @@ -97,20 +250,34 @@ struct dce_aux { uint32_t max_defer_write_retry; bool acquire_reset; + struct dce_aux_funcs *funcs; +}; + +struct dce110_aux_registers_mask { + DCE_AUX_REG_FIELD_LIST(uint32_t); +}; + +struct dce110_aux_registers_shift { + DCE_AUX_REG_FIELD_LIST(uint8_t); }; + struct aux_engine_dce110 { struct dce_aux base; const struct dce110_aux_registers *regs; + const struct dce110_aux_registers_mask *mask; + const struct dce110_aux_registers_shift *shift; struct { uint32_t aux_control; uint32_t aux_arb_control; uint32_t aux_sw_data; uint32_t aux_sw_control; uint32_t aux_interrupt_control; + uint32_t aux_dphy_rx_control1; + uint32_t aux_dphy_rx_control0; uint32_t aux_sw_status; } addr; - uint32_t timeout_period; + uint32_t polling_timeout_period; }; struct aux_engine_dce110_init_data { @@ -120,12 +287,15 @@ struct aux_engine_dce110_init_data { const struct dce110_aux_registers *regs; }; -struct dce_aux *dce110_aux_engine_construct( - struct aux_engine_dce110 *aux_engine110, +struct dce_aux *dce110_aux_engine_construct(struct aux_engine_dce110 *aux_engine110, struct dc_context *ctx, uint32_t inst, uint32_t timeout_period, - const struct dce110_aux_registers *regs); + const struct dce110_aux_registers *regs, + + const struct dce110_aux_registers_mask *mask, + const struct dce110_aux_registers_shift *shift, + bool is_ext_aux_timeout_configurable); void dce110_engine_destroy(struct dce_aux **engine); @@ -139,4 +309,13 @@ int dce_aux_transfer_raw(struct ddc_service *ddc, bool dce_aux_transfer_with_retries(struct ddc_service *ddc, struct aux_payload *cmd); + +struct dce_aux_funcs { + bool (*configure_timeout) + (struct ddc_service *ddc, + uint32_t timeout); + void (*destroy) + (struct aux_engine **ptr); +}; + #endif diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c index 0b86cee4876f..ba995d3f2318 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c @@ -907,9 +907,6 @@ void dce_dmcu_destroy(struct dmcu **dmcu) { struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(*dmcu); - if (dmcu_dce->base.dmcu_state == DMCU_RUNNING) - dmcu_dce->base.funcs->set_psr_enable(*dmcu, false, true); - kfree(dmcu_dce); *dmcu = NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h index ac04d77058f0..32d145a0d6fc 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h @@ -679,6 +679,7 @@ struct dce_hwseq_registers { HWS_SF(, DOMAIN17_PG_STATUS, DOMAIN17_PGFSM_PWR_STATUS, mask_sh), \ HWS_SF(, DOMAIN18_PG_STATUS, DOMAIN18_PGFSM_PWR_STATUS, mask_sh), \ HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \ + HWSEQ_LVTMA_MASK_SH_LIST(mask_sh), \ HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \ HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh) #endif diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c index 31b698bf9cfc..8aa937f496c4 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c @@ -606,11 +606,11 @@ static void dce_mi_allocate_dmif( } if (dce_mi->wa.single_head_rdreq_dmif_limit) { - uint32_t eanble = (total_stream_num > 1) ? 0 : + uint32_t enable = (total_stream_num > 1) ? 0 : dce_mi->wa.single_head_rdreq_dmif_limit; REG_UPDATE(MC_HUB_RDREQ_DMIF_LIMIT, - ENABLE, eanble); + ENABLE, enable); } } @@ -636,11 +636,11 @@ static void dce_mi_free_dmif( 10, 3500); if (dce_mi->wa.single_head_rdreq_dmif_limit) { - uint32_t eanble = (total_stream_num > 1) ? 0 : + uint32_t enable = (total_stream_num > 1) ? 0 : dce_mi->wa.single_head_rdreq_dmif_limit; REG_UPDATE(MC_HUB_RDREQ_DMIF_LIMIT, - ENABLE, eanble); + ENABLE, enable); } } diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c index 76d54885374a..a5e122c721ec 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c @@ -399,6 +399,37 @@ static const struct dc_plane_cap plane_cap = { #define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8 #endif +static int map_transmitter_id_to_phy_instance( + enum transmitter transmitter) +{ + switch (transmitter) { + case TRANSMITTER_UNIPHY_A: + return 0; + break; + case TRANSMITTER_UNIPHY_B: + return 1; + break; + case TRANSMITTER_UNIPHY_C: + return 2; + break; + case TRANSMITTER_UNIPHY_D: + return 3; + break; + case TRANSMITTER_UNIPHY_E: + return 4; + break; + case TRANSMITTER_UNIPHY_F: + return 5; + break; + case TRANSMITTER_UNIPHY_G: + return 6; + break; + default: + ASSERT(0); + return 0; + } +} + static void read_dce_straps( struct dc_context *ctx, struct resource_straps *straps) @@ -506,6 +537,14 @@ static const struct dce_mem_input_mask mi_masks = { .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE_MASK }; +static const struct dce110_aux_registers_shift aux_shift = { + DCE10_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCE10_AUX_MASK_SH_LIST(_MASK) +}; + static struct mem_input *dce100_mem_input_create( struct dc_context *ctx, uint32_t inst) @@ -571,14 +610,18 @@ struct link_encoder *dce100_link_encoder_create( { struct dce110_link_encoder *enc110 = kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); + int link_regs_id; if (!enc110) return NULL; + link_regs_id = + map_transmitter_id_to_phy_instance(enc_init_data->transmitter); + dce110_link_encoder_construct(enc110, enc_init_data, &link_enc_feature, - &link_enc_regs[enc_init_data->transmitter], + &link_enc_regs[link_regs_id], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source]); return &enc110->base; @@ -611,7 +654,10 @@ struct dce_aux *dce100_aux_engine_create( dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst]); + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); return &aux_engine->base; } @@ -997,6 +1043,8 @@ static bool construct( dc->caps.max_cursor_size = 128; dc->caps.dual_link_dvi = true; dc->caps.disable_dp_clk_share = true; + dc->caps.extended_aux_timeout_support = false; + for (i = 0; i < pool->base.pipe_count; i++) { pool->base.timing_generators[i] = dce100_timing_generator_create( diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 01a924bf477a..f0e837d14000 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -944,7 +944,6 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx) { /* notify audio driver for audio modes of monitor */ struct dc *core_dc; - struct pp_smu_funcs *pp_smu = NULL; struct clk_mgr *clk_mgr; unsigned int i, num_audio = 1; @@ -957,9 +956,6 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx) if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == true) return; - if (core_dc->res_pool->pp_smu) - pp_smu = core_dc->res_pool->pp_smu; - if (pipe_ctx->stream_res.audio) { for (i = 0; i < MAX_PIPES; i++) { /*current_state not updated yet*/ @@ -984,7 +980,6 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx) void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx) { struct dc *dc; - struct pp_smu_funcs *pp_smu = NULL; struct clk_mgr *clk_mgr; if (!pipe_ctx || !pipe_ctx->stream) @@ -1001,9 +996,6 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx) if (pipe_ctx->stream_res.audio) { pipe_ctx->stream_res.audio->enabled = false; - if (dc->res_pool->pp_smu) - pp_smu = dc->res_pool->pp_smu; - if (dc_is_dp_signal(pipe_ctx->stream->signal)) pipe_ctx->stream_res.stream_enc->funcs->dp_audio_disable( pipe_ctx->stream_res.stream_enc); @@ -1169,8 +1161,9 @@ static void build_audio_output( } } - if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT || - pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + if (state->clk_mgr && + (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT || + pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)) { audio_output->pll_info.dp_dto_source_clock_in_khz = state->clk_mgr->funcs->get_dp_ref_clk_frequency( state->clk_mgr); @@ -1418,7 +1411,7 @@ static enum dc_status apply_single_controller_ctx_to_hw( pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0; - pipe_ctx->stream->link->psr_enabled = false; + pipe_ctx->stream->link->psr_feature_enabled = false; return DC_OK; } @@ -1428,8 +1421,6 @@ static enum dc_status apply_single_controller_ctx_to_hw( static void power_down_encoders(struct dc *dc) { int i; - enum connector_id connector_id; - enum signal_type signal = SIGNAL_TYPE_NONE; /* do not know BIOS back-front mapping, simply blank all. It will not * hurt for non-DP @@ -1440,15 +1431,12 @@ static void power_down_encoders(struct dc *dc) } for (i = 0; i < dc->link_count; i++) { - connector_id = dal_graphics_object_id_get_connector_id(dc->links[i]->link_id); - if ((connector_id == CONNECTOR_ID_DISPLAY_PORT) || - (connector_id == CONNECTOR_ID_EDP)) { + enum signal_type signal = dc->links[i]->connector_signal; + if ((signal == SIGNAL_TYPE_EDP) || + (signal == SIGNAL_TYPE_DISPLAY_PORT)) if (!dc->links[i]->wa_flags.dp_keep_receiver_powered) dp_receiver_power_ctrl(dc->links[i], false); - if (connector_id == CONNECTOR_ID_EDP) - signal = SIGNAL_TYPE_EDP; - } dc->links[i]->link_enc->funcs->disable_output( dc->links[i]->link_enc, signal); @@ -1529,18 +1517,6 @@ static struct dc_stream_state *get_edp_stream(struct dc_state *context) return NULL; } -static struct dc_link *get_edp_link(struct dc *dc) -{ - int i; - - // report any eDP links, even unconnected DDI's - for (i = 0; i < dc->link_count; i++) { - if (dc->links[i]->connector_signal == SIGNAL_TYPE_EDP) - return dc->links[i]; - } - return NULL; -} - static struct dc_link *get_edp_link_with_sink( struct dc *dc, struct dc_state *context) @@ -1834,7 +1810,7 @@ static bool should_enable_fbc(struct dc *dc, return false; /* PSR should not be enabled */ - if (pipe_ctx->stream->link->psr_enabled) + if (pipe_ctx->stream->link->psr_feature_enabled) return false; /* Nothing to compress */ @@ -2464,7 +2440,6 @@ static void dce110_program_front_end_for_pipe( struct dc *dc, struct pipe_ctx *pipe_ctx) { struct mem_input *mi = pipe_ctx->plane_res.mi; - struct pipe_ctx *old_pipe = NULL; struct dc_plane_state *plane_state = pipe_ctx->plane_state; struct xfm_grph_csc_adjustment adjust; struct out_csc_color_matrix tbl_entry; @@ -2472,9 +2447,6 @@ static void dce110_program_front_end_for_pipe( DC_LOGGER_INIT(); memset(&tbl_entry, 0, sizeof(tbl_entry)); - if (dc->current_state) - old_pipe = &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx]; - memset(&adjust, 0, sizeof(adjust)); adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c index 89620adc81d8..83a4dbf6d76e 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c @@ -275,6 +275,14 @@ static const struct dce_stream_encoder_mask se_mask = { SE_COMMON_MASK_SH_LIST_DCE110(_MASK) }; +static const struct dce110_aux_registers_shift aux_shift = { + DCE_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCE_AUX_MASK_SH_LIST(_MASK) +}; + #define opp_regs(id)\ [id] = {\ OPP_DCE_110_REG_LIST(id),\ @@ -440,6 +448,37 @@ static const struct dc_plane_cap underlay_plane_cap = { #define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8 #endif +static int map_transmitter_id_to_phy_instance( + enum transmitter transmitter) +{ + switch (transmitter) { + case TRANSMITTER_UNIPHY_A: + return 0; + break; + case TRANSMITTER_UNIPHY_B: + return 1; + break; + case TRANSMITTER_UNIPHY_C: + return 2; + break; + case TRANSMITTER_UNIPHY_D: + return 3; + break; + case TRANSMITTER_UNIPHY_E: + return 4; + break; + case TRANSMITTER_UNIPHY_F: + return 5; + break; + case TRANSMITTER_UNIPHY_G: + return 6; + break; + default: + ASSERT(0); + return 0; + } +} + static void read_dce_straps( struct dc_context *ctx, struct resource_straps *straps) @@ -617,14 +656,18 @@ static struct link_encoder *dce110_link_encoder_create( { struct dce110_link_encoder *enc110 = kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); + int link_regs_id; if (!enc110) return NULL; + link_regs_id = + map_transmitter_id_to_phy_instance(enc_init_data->transmitter); + dce110_link_encoder_construct(enc110, enc_init_data, &link_enc_feature, - &link_enc_regs[enc_init_data->transmitter], + &link_enc_regs[link_regs_id], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source]); return &enc110->base; @@ -657,7 +700,10 @@ struct dce_aux *dce110_aux_engine_create( dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst]); + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); return &aux_engine->base; } @@ -1293,6 +1339,7 @@ static bool construct( dc->caps.i2c_speed_in_khz = 100; dc->caps.max_cursor_size = 128; dc->caps.is_apu = true; + dc->caps.extended_aux_timeout_support = false; /************************************************* * Create resources * diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c index 21a657e79306..97dcc5d0862b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c @@ -172,6 +172,14 @@ static const struct dce_abm_mask abm_mask = { ABM_MASK_SH_LIST_DCE110(_MASK) }; +static const struct dce110_aux_registers_shift aux_shift = { + DCE_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCE_AUX_MASK_SH_LIST(_MASK) +}; + #define ipp_regs(id)\ [id] = {\ IPP_DCE110_REG_LIST_DCE_BASE(id)\ @@ -417,6 +425,37 @@ static const struct dc_plane_cap plane_cap = { #define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8 #endif +static int map_transmitter_id_to_phy_instance( + enum transmitter transmitter) +{ + switch (transmitter) { + case TRANSMITTER_UNIPHY_A: + return 0; + break; + case TRANSMITTER_UNIPHY_B: + return 1; + break; + case TRANSMITTER_UNIPHY_C: + return 2; + break; + case TRANSMITTER_UNIPHY_D: + return 3; + break; + case TRANSMITTER_UNIPHY_E: + return 4; + break; + case TRANSMITTER_UNIPHY_F: + return 5; + break; + case TRANSMITTER_UNIPHY_G: + return 6; + break; + default: + ASSERT(0); + return 0; + } +} + static void read_dce_straps( struct dc_context *ctx, struct resource_straps *straps) @@ -575,14 +614,18 @@ struct link_encoder *dce112_link_encoder_create( { struct dce110_link_encoder *enc110 = kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); + int link_regs_id; if (!enc110) return NULL; + link_regs_id = + map_transmitter_id_to_phy_instance(enc_init_data->transmitter); + dce110_link_encoder_construct(enc110, enc_init_data, &link_enc_feature, - &link_enc_regs[enc_init_data->transmitter], + &link_enc_regs[link_regs_id], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source]); return &enc110->base; @@ -630,7 +673,10 @@ struct dce_aux *dce112_aux_engine_create( dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst]); + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); return &aux_engine->base; } @@ -1163,7 +1209,7 @@ static bool construct( dc->caps.i2c_speed_in_khz = 100; dc->caps.max_cursor_size = 128; dc->caps.dual_link_dvi = true; - + dc->caps.extended_aux_timeout_support = false; /************************************************* * Create resources * diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c index 7c52f7f9196c..63543f6918ff 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c @@ -293,6 +293,14 @@ static const struct dce_stream_encoder_mask se_mask = { SE_COMMON_MASK_SH_LIST_DCE120(_MASK) }; +static const struct dce110_aux_registers_shift aux_shift = { + DCE12_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCE12_AUX_MASK_SH_LIST(_MASK) +}; + #define opp_regs(id)\ [id] = {\ OPP_DCE_120_REG_LIST(id),\ @@ -356,6 +364,37 @@ static const struct dce_audio_mask audio_mask = { DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) }; +static int map_transmitter_id_to_phy_instance( + enum transmitter transmitter) +{ + switch (transmitter) { + case TRANSMITTER_UNIPHY_A: + return 0; + break; + case TRANSMITTER_UNIPHY_B: + return 1; + break; + case TRANSMITTER_UNIPHY_C: + return 2; + break; + case TRANSMITTER_UNIPHY_D: + return 3; + break; + case TRANSMITTER_UNIPHY_E: + return 4; + break; + case TRANSMITTER_UNIPHY_F: + return 5; + break; + case TRANSMITTER_UNIPHY_G: + return 6; + break; + default: + ASSERT(0); + return 0; + } +} + #define clk_src_regs(index, id)\ [index] = {\ CS_COMMON_REG_LIST_DCE_112(id),\ @@ -404,7 +443,10 @@ struct dce_aux *dce120_aux_engine_create( dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst]); + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); return &aux_engine->base; } @@ -655,14 +697,18 @@ static struct link_encoder *dce120_link_encoder_create( { struct dce110_link_encoder *enc110 = kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); + int link_regs_id; if (!enc110) return NULL; + link_regs_id = + map_transmitter_id_to_phy_instance(enc_init_data->transmitter); + dce110_link_encoder_construct(enc110, enc_init_data, &link_enc_feature, - &link_enc_regs[enc_init_data->transmitter], + &link_enc_regs[link_regs_id], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source]); @@ -1006,7 +1052,7 @@ static bool construct( dc->caps.max_cursor_size = 128; dc->caps.dual_link_dvi = true; dc->caps.psp_setup_panel_mode = true; - + dc->caps.extended_aux_timeout_support = false; dc->debug = debug_defaults; /************************************************* diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c index 643ccb0ade00..3e8d4b49f279 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c @@ -288,6 +288,14 @@ static const struct dce_opp_mask opp_mask = { OPP_COMMON_MASK_SH_LIST_DCE_80(_MASK) }; +static const struct dce110_aux_registers_shift aux_shift = { + DCE10_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCE10_AUX_MASK_SH_LIST(_MASK) +}; + #define aux_engine_regs(id)\ [id] = {\ AUX_COMMON_REG_LIST(id), \ @@ -431,6 +439,37 @@ static const struct dce_abm_mask abm_mask = { #define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8 #endif +static int map_transmitter_id_to_phy_instance( + enum transmitter transmitter) +{ + switch (transmitter) { + case TRANSMITTER_UNIPHY_A: + return 0; + break; + case TRANSMITTER_UNIPHY_B: + return 1; + break; + case TRANSMITTER_UNIPHY_C: + return 2; + break; + case TRANSMITTER_UNIPHY_D: + return 3; + break; + case TRANSMITTER_UNIPHY_E: + return 4; + break; + case TRANSMITTER_UNIPHY_F: + return 5; + break; + case TRANSMITTER_UNIPHY_G: + return 6; + break; + default: + ASSERT(0); + return 0; + } +} + static void read_dce_straps( struct dc_context *ctx, struct resource_straps *straps) @@ -491,7 +530,10 @@ struct dce_aux *dce80_aux_engine_create( dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst]); + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); return &aux_engine->base; } @@ -669,14 +711,18 @@ struct link_encoder *dce80_link_encoder_create( { struct dce110_link_encoder *enc110 = kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); + int link_regs_id; if (!enc110) return NULL; + link_regs_id = + map_transmitter_id_to_phy_instance(enc_init_data->transmitter); + dce110_link_encoder_construct(enc110, enc_init_data, &link_enc_feature, - &link_enc_regs[enc_init_data->transmitter], + &link_enc_regs[link_regs_id], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source]); return &enc110->base; @@ -895,6 +941,7 @@ static bool dce80_construct( dc->caps.i2c_speed_in_khz = 40; dc->caps.max_cursor_size = 128; dc->caps.dual_link_dvi = true; + dc->caps.extended_aux_timeout_support = false; /************************************************* * Create resources * diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c index 01c7e30b9ce1..bbd6e01b3eca 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c @@ -393,6 +393,10 @@ bool cm_helper_translate_curve_to_hw_format( rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index]; rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index]; + rgb_resulted[hw_points].red = rgb_resulted[hw_points - 1].red; + rgb_resulted[hw_points].green = rgb_resulted[hw_points - 1].green; + rgb_resulted[hw_points].blue = rgb_resulted[hw_points - 1].blue; + // All 3 color channels have same x corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2), dc_fixpt_from_int(region_start)); @@ -464,13 +468,6 @@ bool cm_helper_translate_curve_to_hw_format( i = 1; while (i != hw_points + 1) { - if (dc_fixpt_lt(rgb_plus_1->red, rgb->red)) - rgb_plus_1->red = rgb->red; - if (dc_fixpt_lt(rgb_plus_1->green, rgb->green)) - rgb_plus_1->green = rgb->green; - if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue)) - rgb_plus_1->blue = rgb->blue; - rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red); rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green); rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue); @@ -562,6 +559,10 @@ bool cm_helper_translate_curve_to_degamma_hw_format( rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index]; rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index]; + rgb_resulted[hw_points].red = rgb_resulted[hw_points - 1].red; + rgb_resulted[hw_points].green = rgb_resulted[hw_points - 1].green; + rgb_resulted[hw_points].blue = rgb_resulted[hw_points - 1].blue; + corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2), dc_fixpt_from_int(region_start)); corner_points[0].green.x = corner_points[0].red.x; @@ -624,13 +625,6 @@ bool cm_helper_translate_curve_to_degamma_hw_format( i = 1; while (i != hw_points + 1) { - if (dc_fixpt_lt(rgb_plus_1->red, rgb->red)) - rgb_plus_1->red = rgb->red; - if (dc_fixpt_lt(rgb_plus_1->green, rgb->green)) - rgb_plus_1->green = rgb->green; - if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue)) - rgb_plus_1->blue = rgb->blue; - rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red); rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green); rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c index d8b2da18db39..997e9582edc7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c @@ -129,7 +129,7 @@ void dpp_set_gamut_remap_bypass(struct dcn10_dpp *dpp) #define IDENTITY_RATIO(ratio) (dc_fixpt_u2d19(ratio) == (1 << 19)) -static bool dpp_get_optimal_number_of_taps( +bool dpp1_get_optimal_number_of_taps( struct dpp *dpp, struct scaler_data *scl_data, const struct scaling_taps *in_taps) @@ -521,7 +521,7 @@ static const struct dpp_funcs dcn10_dpp_funcs = { .dpp_read_state = dpp_read_state, .dpp_reset = dpp_reset, .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale, - .dpp_get_optimal_number_of_taps = dpp_get_optimal_number_of_taps, + .dpp_get_optimal_number_of_taps = dpp1_get_optimal_number_of_taps, .dpp_set_gamut_remap = dpp1_cm_set_gamut_remap, .dpp_set_csc_adjustment = dpp1_cm_set_output_csc_adjustment, .dpp_set_csc_default = dpp1_cm_set_output_csc_default, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h index e2c613611ac9..1d4a7d640334 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h @@ -1504,6 +1504,11 @@ void dpp1_set_hdr_multiplier( struct dpp *dpp_base, uint32_t multiplier); +bool dpp1_get_optimal_number_of_taps( + struct dpp *dpp, + struct scaler_data *scl_data, + const struct scaling_taps *in_taps); + void dpp1_construct(struct dcn10_dpp *dpp1, struct dc_context *ctx, uint32_t inst, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index 001db49e4bb2..14d1be6c66e6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c @@ -841,6 +841,14 @@ void min_set_viewport( REG_SET_2(DCSURF_PRI_VIEWPORT_START_C, 0, PRI_VIEWPORT_X_START_C, viewport_c->x, PRI_VIEWPORT_Y_START_C, viewport_c->y); + + REG_SET_2(DCSURF_SEC_VIEWPORT_DIMENSION_C, 0, + SEC_VIEWPORT_WIDTH_C, viewport_c->width, + SEC_VIEWPORT_HEIGHT_C, viewport_c->height); + + REG_SET_2(DCSURF_SEC_VIEWPORT_START_C, 0, + SEC_VIEWPORT_X_START_C, viewport_c->x, + SEC_VIEWPORT_Y_START_C, viewport_c->y); } void hubp1_read_state_common(struct hubp *hubp) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h index cb20d10288c0..ae70d9c0aa1d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h @@ -47,6 +47,8 @@ SRI(DCSURF_SEC_VIEWPORT_START, HUBP, id), \ SRI(DCSURF_PRI_VIEWPORT_DIMENSION_C, HUBP, id), \ SRI(DCSURF_PRI_VIEWPORT_START_C, HUBP, id), \ + SRI(DCSURF_SEC_VIEWPORT_DIMENSION_C, HUBP, id), \ + SRI(DCSURF_SEC_VIEWPORT_START_C, HUBP, id), \ SRI(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, HUBPREQ, id),\ SRI(DCSURF_PRIMARY_SURFACE_ADDRESS, HUBPREQ, id),\ SRI(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, HUBPREQ, id),\ @@ -57,8 +59,12 @@ SRI(DCSURF_SECONDARY_META_SURFACE_ADDRESS, HUBPREQ, id),\ SRI(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id),\ SRI(DCSURF_PRIMARY_SURFACE_ADDRESS_C, HUBPREQ, id),\ + SRI(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id),\ + SRI(DCSURF_SECONDARY_SURFACE_ADDRESS_C, HUBPREQ, id),\ SRI(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id),\ SRI(DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, HUBPREQ, id),\ + SRI(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id),\ + SRI(DCSURF_SECONDARY_META_SURFACE_ADDRESS_C, HUBPREQ, id),\ SRI(DCSURF_SURFACE_INUSE, HUBPREQ, id),\ SRI(DCSURF_SURFACE_INUSE_HIGH, HUBPREQ, id),\ SRI(DCSURF_SURFACE_INUSE_C, HUBPREQ, id),\ @@ -150,6 +156,8 @@ uint32_t DCSURF_SEC_VIEWPORT_START; \ uint32_t DCSURF_PRI_VIEWPORT_DIMENSION_C; \ uint32_t DCSURF_PRI_VIEWPORT_START_C; \ + uint32_t DCSURF_SEC_VIEWPORT_DIMENSION_C; \ + uint32_t DCSURF_SEC_VIEWPORT_START_C; \ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH; \ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS; \ uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH; \ @@ -160,8 +168,12 @@ uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS; \ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C; \ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_C; \ + uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C; \ + uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS_C; \ uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C; \ uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_C; \ + uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C; \ + uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS_C; \ uint32_t DCSURF_SURFACE_INUSE; \ uint32_t DCSURF_SURFACE_INUSE_HIGH; \ uint32_t DCSURF_SURFACE_INUSE_C; \ @@ -279,6 +291,10 @@ HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C, PRI_VIEWPORT_HEIGHT_C, mask_sh),\ HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_START_C, PRI_VIEWPORT_X_START_C, mask_sh),\ HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_START_C, PRI_VIEWPORT_Y_START_C, mask_sh),\ + HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C, SEC_VIEWPORT_WIDTH_C, mask_sh),\ + HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C, SEC_VIEWPORT_HEIGHT_C, mask_sh),\ + HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_START_C, SEC_VIEWPORT_X_START_C, mask_sh),\ + HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_START_C, SEC_VIEWPORT_Y_START_C, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, PRIMARY_SURFACE_ADDRESS_HIGH, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS, PRIMARY_SURFACE_ADDRESS, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, SECONDARY_SURFACE_ADDRESS_HIGH, mask_sh),\ @@ -289,8 +305,12 @@ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS, SECONDARY_META_SURFACE_ADDRESS, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, PRIMARY_SURFACE_ADDRESS_HIGH_C, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C, PRIMARY_SURFACE_ADDRESS_C, mask_sh),\ + HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C, SECONDARY_SURFACE_ADDRESS_HIGH_C, mask_sh),\ + HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_C, SECONDARY_SURFACE_ADDRESS_C, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, PRIMARY_META_SURFACE_ADDRESS_HIGH_C, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, PRIMARY_META_SURFACE_ADDRESS_C, mask_sh),\ + HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C, SECONDARY_META_SURFACE_ADDRESS_HIGH_C, mask_sh),\ + HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C, SECONDARY_META_SURFACE_ADDRESS_C, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_INUSE, SURFACE_INUSE_ADDRESS, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH, SURFACE_INUSE_ADDRESS_HIGH, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_INUSE_C, SURFACE_INUSE_ADDRESS_C, mask_sh),\ @@ -469,6 +489,10 @@ type PRI_VIEWPORT_HEIGHT_C; \ type PRI_VIEWPORT_X_START_C; \ type PRI_VIEWPORT_Y_START_C; \ + type SEC_VIEWPORT_WIDTH_C; \ + type SEC_VIEWPORT_HEIGHT_C; \ + type SEC_VIEWPORT_X_START_C; \ + type SEC_VIEWPORT_Y_START_C; \ type PRIMARY_SURFACE_ADDRESS_HIGH;\ type PRIMARY_SURFACE_ADDRESS;\ type SECONDARY_SURFACE_ADDRESS_HIGH;\ @@ -479,8 +503,12 @@ type SECONDARY_META_SURFACE_ADDRESS;\ type PRIMARY_SURFACE_ADDRESS_HIGH_C;\ type PRIMARY_SURFACE_ADDRESS_C;\ + type SECONDARY_SURFACE_ADDRESS_HIGH_C;\ + type SECONDARY_SURFACE_ADDRESS_C;\ type PRIMARY_META_SURFACE_ADDRESS_HIGH_C;\ type PRIMARY_META_SURFACE_ADDRESS_C;\ + type SECONDARY_META_SURFACE_ADDRESS_HIGH_C;\ + type SECONDARY_META_SURFACE_ADDRESS_C;\ type SURFACE_INUSE_ADDRESS;\ type SURFACE_INUSE_ADDRESS_HIGH;\ type SURFACE_INUSE_ADDRESS_C;\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 60123db7ba02..eb91432621ab 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -670,6 +670,10 @@ static void dcn10_bios_golden_init(struct dc *dc) int i; bool allow_self_fresh_force_enable = true; +#if defined(CONFIG_DRM_AMD_DC_DCN2_1) + if (dc->hwss.s0i3_golden_init_wa && dc->hwss.s0i3_golden_init_wa(dc)) + return; +#endif if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled) allow_self_fresh_force_enable = dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub); @@ -1300,6 +1304,10 @@ static void dcn10_init_hw(struct dc *dc) } dc->hwss.enable_power_gating_plane(dc->hwseq, true); + + if (dc->clk_mgr->funcs->notify_wm_ranges) + dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr); + } static void dcn10_reset_hw_ctx_wrap( @@ -1452,15 +1460,15 @@ static void log_tf(struct dc_context *ctx, DC_LOG_ALL_TF_CHANNELS("Logging all channels..."); for (i = 0; i < hw_points_num; i++) { - DC_LOG_GAMMA("R\t%d\t%llu\n", i, tf->tf_pts.red[i].value); - DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu\n", i, tf->tf_pts.green[i].value); - DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu\n", i, tf->tf_pts.blue[i].value); + DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value); + DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value); + DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value); } for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) { - DC_LOG_ALL_GAMMA("R\t%d\t%llu\n", i, tf->tf_pts.red[i].value); - DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu\n", i, tf->tf_pts.green[i].value); - DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu\n", i, tf->tf_pts.blue[i].value); + DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value); + DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value); + DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value); } } @@ -2304,8 +2312,7 @@ void update_dchubp_dpp( dc->res_pool->dccg->funcs->update_dpp_dto( dc->res_pool->dccg, dpp->inst, - pipe_ctx->plane_res.bw.dppclk_khz, - false); + pipe_ctx->plane_res.bw.dppclk_khz); else dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ? dc->clk_mgr->clks.dispclk_khz / 2 : @@ -2512,8 +2519,10 @@ static void program_all_pipe_in_tree( pipe_ctx->stream_res.tg->funcs->set_vtg_params( pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); - dc->hwss.blank_pixel_data(dc, pipe_ctx, blank); + if (dc->hwss.setup_vupdate_interrupt) + dc->hwss.setup_vupdate_interrupt(pipe_ctx); + dc->hwss.blank_pixel_data(dc, pipe_ctx, blank); } if (pipe_ctx->plane_state != NULL) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h index 8bf5f0f2301d..88fcc395adf5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h @@ -113,6 +113,20 @@ struct dcn10_link_enc_registers { uint32_t DIG_LANE_ENABLE; /* UNIPHY */ uint32_t CHANNEL_XBAR_CNTL; + /* DPCS */ + uint32_t RDPCSTX_PHY_CNTL3; + uint32_t RDPCSTX_PHY_CNTL4; + uint32_t RDPCSTX_PHY_CNTL5; + uint32_t RDPCSTX_PHY_CNTL6; + uint32_t RDPCSTX_PHY_CNTL7; + uint32_t RDPCSTX_PHY_CNTL8; + uint32_t RDPCSTX_PHY_CNTL9; + uint32_t RDPCSTX_PHY_CNTL10; + uint32_t RDPCSTX_PHY_CNTL11; + uint32_t RDPCSTX_PHY_CNTL12; + uint32_t RDPCSTX_PHY_CNTL13; + uint32_t RDPCSTX_PHY_CNTL14; + uint32_t RDPCSTX_PHY_CNTL15; /* indirect registers */ uint32_t RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_2; uint32_t RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_3; @@ -250,6 +264,10 @@ struct dcn10_link_enc_registers { type RDPCS_EXT_REFCLK_EN;\ type RDPCS_TX_FIFO_EN;\ type UNIPHY_LINK_ENABLE;\ + type UNIPHY_CHANNEL0_XBAR_SOURCE;\ + type UNIPHY_CHANNEL1_XBAR_SOURCE;\ + type UNIPHY_CHANNEL2_XBAR_SOURCE;\ + type UNIPHY_CHANNEL3_XBAR_SOURCE;\ type UNIPHY_CHANNEL0_INVERT;\ type UNIPHY_CHANNEL1_INVERT;\ type UNIPHY_CHANNEL2_INVERT;\ @@ -337,16 +355,46 @@ struct dcn10_link_enc_registers { type RDPCS_TX_FIFO_ERROR_MASK;\ type RDPCS_DPALT_DISABLE_TOGGLE_MASK;\ type RDPCS_DPALT_4LANE_TOGGLE_MASK;\ + type RDPCS_PHY_DPALT_DP4;\ type RDPCS_PHY_DPALT_DISABLE;\ type RDPCS_PHY_DPALT_DISABLE_ACK;\ type RDPCS_PHY_DP_MPLLB_V2I;\ type RDPCS_PHY_DP_MPLLB_FREQ_VCO;\ + type RDPCS_PHY_DP_MPLLB_CP_INT_GS;\ + type RDPCS_PHY_RX_VREF_CTRL;\ type RDPCS_PHY_DP_MPLLB_CP_INT;\ type RDPCS_PHY_DP_MPLLB_CP_PROP;\ type RDPCS_PHY_RX_REF_LD_VAL;\ type RDPCS_PHY_RX_VCO_LD_VAL;\ type DPCSTX_DEBUG_CONFIG; \ - type RDPCSTX_DEBUG_CONFIG + type RDPCSTX_DEBUG_CONFIG; \ + type RDPCS_PHY_DP_TX0_EQ_MAIN;\ + type RDPCS_PHY_DP_TX0_EQ_PRE;\ + type RDPCS_PHY_DP_TX0_EQ_POST;\ + type RDPCS_PHY_DP_TX1_EQ_MAIN;\ + type RDPCS_PHY_DP_TX1_EQ_PRE;\ + type RDPCS_PHY_DP_TX1_EQ_POST;\ + type RDPCS_PHY_DP_TX2_EQ_MAIN;\ + type RDPCS_PHY_DP_MPLLB_CP_PROP_GS;\ + type RDPCS_PHY_DP_TX2_EQ_PRE;\ + type RDPCS_PHY_DP_TX2_EQ_POST;\ + type RDPCS_PHY_DP_TX3_EQ_MAIN;\ + type RDPCS_PHY_DCO_RANGE;\ + type RDPCS_PHY_DCO_FINETUNE;\ + type RDPCS_PHY_DP_TX3_EQ_PRE;\ + type RDPCS_PHY_DP_TX3_EQ_POST;\ + type RDPCS_PHY_SUP_PRE_HP;\ + type RDPCS_PHY_DP_TX0_VREGDRV_BYP;\ + type RDPCS_PHY_DP_TX1_VREGDRV_BYP;\ + type RDPCS_PHY_DP_TX2_VREGDRV_BYP;\ + type RDPCS_PHY_DP_TX3_VREGDRV_BYP;\ + type RDPCS_DMCU_DPALT_DIS_BLOCK_REG;\ + type UNIPHYA_SOFT_RESET;\ + type UNIPHYB_SOFT_RESET;\ + type UNIPHYC_SOFT_RESET;\ + type UNIPHYD_SOFT_RESET;\ + type UNIPHYE_SOFT_RESET;\ + type UNIPHYF_SOFT_RESET #define DCN20_LINK_ENCODER_REG_FIELD_LIST(type) \ type DIG_LANE0EN;\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c index e9ebbbe256b4..0a9ad692f541 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c @@ -168,7 +168,10 @@ static void opp1_set_pixel_encoding( REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 0); break; case PIXEL_ENCODING_YCBCR422: - REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 1); + REG_UPDATE_3(FMT_CONTROL, + FMT_PIXEL_ENCODING, 1, + FMT_SUBSAMPLING_MODE, 2, + FMT_CBCR_BIT_REDUCTION_BYPASS, 0); break; case PIXEL_ENCODING_YCBCR420: REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 2); @@ -237,6 +240,9 @@ void opp1_set_dyn_expansion( FMT_DYNAMIC_EXP_EN, 0, FMT_DYNAMIC_EXP_MODE, 0); + if (opp->dyn_expansion == DYN_EXPANSION_DISABLE) + return; + /*00 - 10-bit -> 12-bit dynamic expansion*/ /*01 - 8-bit -> 12-bit dynamic expansion*/ if (signal == SIGNAL_TYPE_HDMI_TYPE_A || diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h index 0f10adea000c..2c0ecfa5a643 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h @@ -116,6 +116,8 @@ type FMT_RAND_G_SEED; \ type FMT_RAND_B_SEED; \ type FMT_PIXEL_ENCODING; \ + type FMT_SUBSAMPLING_MODE; \ + type FMT_CBCR_BIT_REDUCTION_BYPASS; \ type FMT_CLAMP_DATA_EN; \ type FMT_CLAMP_COLOR_FORMAT; \ type FMT_DYNAMIC_EXP_EN; \ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c index e74a07d03fde..dabccbd49ad4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c @@ -1230,59 +1230,25 @@ bool optc1_is_stereo_left_eye(struct timing_generator *optc) return ret; } -bool optc1_is_matching_timing(struct timing_generator *tg, - const struct dc_crtc_timing *otg_timing) +bool optc1_get_hw_timing(struct timing_generator *tg, + struct dc_crtc_timing *hw_crtc_timing) { - struct dc_crtc_timing hw_crtc_timing = {0}; struct dcn_otg_state s = {0}; - if (tg == NULL || otg_timing == NULL) + if (tg == NULL || hw_crtc_timing == NULL) return false; optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s); - hw_crtc_timing.h_total = s.h_total + 1; - hw_crtc_timing.h_addressable = s.h_total - ((s.h_total - s.h_blank_start) + s.h_blank_end); - hw_crtc_timing.h_front_porch = s.h_total + 1 - s.h_blank_start; - hw_crtc_timing.h_sync_width = s.h_sync_a_end - s.h_sync_a_start; + hw_crtc_timing->h_total = s.h_total + 1; + hw_crtc_timing->h_addressable = s.h_total - ((s.h_total - s.h_blank_start) + s.h_blank_end); + hw_crtc_timing->h_front_porch = s.h_total + 1 - s.h_blank_start; + hw_crtc_timing->h_sync_width = s.h_sync_a_end - s.h_sync_a_start; - hw_crtc_timing.v_total = s.v_total + 1; - hw_crtc_timing.v_addressable = s.v_total - ((s.v_total - s.v_blank_start) + s.v_blank_end); - hw_crtc_timing.v_front_porch = s.v_total + 1 - s.v_blank_start; - hw_crtc_timing.v_sync_width = s.v_sync_a_end - s.v_sync_a_start; - - if (otg_timing->h_total != hw_crtc_timing.h_total) - return false; - - if (otg_timing->h_border_left != hw_crtc_timing.h_border_left) - return false; - - if (otg_timing->h_addressable != hw_crtc_timing.h_addressable) - return false; - - if (otg_timing->h_border_right != hw_crtc_timing.h_border_right) - return false; - - if (otg_timing->h_front_porch != hw_crtc_timing.h_front_porch) - return false; - - if (otg_timing->h_sync_width != hw_crtc_timing.h_sync_width) - return false; - - if (otg_timing->v_total != hw_crtc_timing.v_total) - return false; - - if (otg_timing->v_border_top != hw_crtc_timing.v_border_top) - return false; - - if (otg_timing->v_addressable != hw_crtc_timing.v_addressable) - return false; - - if (otg_timing->v_border_bottom != hw_crtc_timing.v_border_bottom) - return false; - - if (otg_timing->v_sync_width != hw_crtc_timing.v_sync_width) - return false; + hw_crtc_timing->v_total = s.v_total + 1; + hw_crtc_timing->v_addressable = s.v_total - ((s.v_total - s.v_blank_start) + s.v_blank_end); + hw_crtc_timing->v_front_porch = s.v_total + 1 - s.v_blank_start; + hw_crtc_timing->v_sync_width = s.v_sync_a_end - s.v_sync_a_start; return true; } @@ -1486,7 +1452,6 @@ static const struct timing_generator_funcs dcn10_tg_funcs = { .get_frame_count = optc1_get_vblank_counter, .get_scanoutpos = optc1_get_crtc_scanoutpos, .get_otg_active_size = optc1_get_otg_active_size, - .is_matching_timing = optc1_is_matching_timing, .set_early_control = optc1_set_early_control, /* used by enable_timing_synchronization. Not need for FPGA */ .wait_for_state = optc1_wait_for_state, @@ -1514,7 +1479,8 @@ static const struct timing_generator_funcs dcn10_tg_funcs = { .configure_crc = optc1_configure_crc, .set_vtg_params = optc1_set_vtg_params, .program_manual_trigger = optc1_program_manual_trigger, - .setup_manual_trigger = optc1_setup_manual_trigger + .setup_manual_trigger = optc1_setup_manual_trigger, + .get_hw_timing = optc1_get_hw_timing, }; void dcn10_timing_generator_init(struct optc *optc1) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h index 83575599672e..c8d795b335ba 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h @@ -547,9 +547,8 @@ struct dcn_otg_state { void optc1_read_otg_state(struct optc *optc1, struct dcn_otg_state *s); -bool optc1_is_matching_timing( - struct timing_generator *tg, - const struct dc_crtc_timing *otg_timing); +bool optc1_get_hw_timing(struct timing_generator *tg, + struct dc_crtc_timing *hw_crtc_timing); bool optc1_validate_timing( struct timing_generator *optc, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 1599bb971111..15640aedd664 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -319,6 +319,14 @@ static const struct dcn10_link_enc_mask le_mask = { LINK_ENCODER_MASK_SH_LIST_DCN10(_MASK) }; +static const struct dce110_aux_registers_shift aux_shift = { + DCN10_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCN10_AUX_MASK_SH_LIST(_MASK) +}; + #define ipp_regs(id)\ [id] = {\ IPP_REG_LIST_DCN10(id),\ @@ -471,6 +479,28 @@ static const struct dcn_hubbub_mask hubbub_mask = { HUBBUB_MASK_SH_LIST_DCN10(_MASK) }; +static int map_transmitter_id_to_phy_instance( + enum transmitter transmitter) +{ + switch (transmitter) { + case TRANSMITTER_UNIPHY_A: + return 0; + break; + case TRANSMITTER_UNIPHY_B: + return 1; + break; + case TRANSMITTER_UNIPHY_C: + return 2; + break; + case TRANSMITTER_UNIPHY_D: + return 3; + break; + default: + ASSERT(0); + return 0; + } +} + #define clk_src_regs(index, pllid)\ [index] = {\ CS_COMMON_REG_LIST_DCN1_0(index, pllid),\ @@ -642,7 +672,10 @@ struct dce_aux *dcn10_aux_engine_create( dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst]); + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); return &aux_engine->base; } @@ -751,14 +784,18 @@ struct link_encoder *dcn10_link_encoder_create( { struct dcn10_link_encoder *enc10 = kzalloc(sizeof(struct dcn10_link_encoder), GFP_KERNEL); + int link_regs_id; if (!enc10) return NULL; + link_regs_id = + map_transmitter_id_to_phy_instance(enc_init_data->transmitter); + dcn10_link_encoder_construct(enc10, enc_init_data, &link_enc_feature, - &link_enc_regs[enc_init_data->transmitter], + &link_enc_regs[link_regs_id], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source], &le_shift, @@ -1308,6 +1345,8 @@ static bool construct( dc->caps.max_slave_planes = 1; dc->caps.is_apu = true; dc->caps.post_blend_color_processing = false; + dc->caps.extended_aux_timeout_support = false; + /* Raven DP PHY HBR2 eye diagram pattern is not stable. Use TP4 */ dc->caps.force_dp_tps4_for_cp2520 = true; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c index 9aa258f3550b..06e5bbb4545c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c @@ -1553,6 +1553,66 @@ unsigned int enc1_dig_source_otg( return tg_inst; } +bool enc1_stream_encoder_dp_get_pixel_format( + struct stream_encoder *enc, + enum dc_pixel_encoding *encoding, + enum dc_color_depth *depth) +{ + uint32_t hw_encoding = 0; + uint32_t hw_depth = 0; + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + + if (enc == NULL || + encoding == NULL || + depth == NULL) + return false; + + REG_GET_2(DP_PIXEL_FORMAT, + DP_PIXEL_ENCODING, &hw_encoding, + DP_COMPONENT_DEPTH, &hw_depth); + + switch (hw_depth) { + case DP_COMPONENT_PIXEL_DEPTH_6BPC: + *depth = COLOR_DEPTH_666; + break; + case DP_COMPONENT_PIXEL_DEPTH_8BPC: + *depth = COLOR_DEPTH_888; + break; + case DP_COMPONENT_PIXEL_DEPTH_10BPC: + *depth = COLOR_DEPTH_101010; + break; + case DP_COMPONENT_PIXEL_DEPTH_12BPC: + *depth = COLOR_DEPTH_121212; + break; + case DP_COMPONENT_PIXEL_DEPTH_16BPC: + *depth = COLOR_DEPTH_161616; + break; + default: + *depth = COLOR_DEPTH_UNDEFINED; + break; + } + + switch (hw_encoding) { + case DP_PIXEL_ENCODING_TYPE_RGB444: + *encoding = PIXEL_ENCODING_RGB; + break; + case DP_PIXEL_ENCODING_TYPE_YCBCR422: + *encoding = PIXEL_ENCODING_YCBCR422; + break; + case DP_PIXEL_ENCODING_TYPE_YCBCR444: + case DP_PIXEL_ENCODING_TYPE_Y_ONLY: + *encoding = PIXEL_ENCODING_YCBCR444; + break; + case DP_PIXEL_ENCODING_TYPE_YCBCR420: + *encoding = PIXEL_ENCODING_YCBCR420; + break; + default: + *encoding = PIXEL_ENCODING_UNDEFINED; + break; + } + return true; +} + static const struct stream_encoder_funcs dcn10_str_enc_funcs = { .dp_set_stream_attribute = enc1_stream_encoder_dp_set_stream_attribute, @@ -1589,6 +1649,8 @@ static const struct stream_encoder_funcs dcn10_str_enc_funcs = { .dig_connect_to_otg = enc1_dig_connect_to_otg, .hdmi_reset_stream_attribute = enc1_reset_hdmi_stream_attribute, .dig_source_otg = enc1_dig_source_otg, + + .dp_get_pixel_format = enc1_stream_encoder_dp_get_pixel_format, }; void dcn10_stream_encoder_construct( diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h index a512cbea00d1..c9cbc21d121e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h @@ -621,4 +621,9 @@ void get_audio_clock_info( void enc1_reset_hdmi_stream_attribute( struct stream_encoder *enc); +bool enc1_stream_encoder_dp_get_pixel_format( + struct stream_encoder *enc, + enum dc_pixel_encoding *encoding, + enum dc_color_depth *depth); + #endif /* __DC_STREAM_ENCODER_DCN10_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile index ddb8d5649e79..63f3bddba7da 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile @@ -10,15 +10,20 @@ ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT DCN20 += dcn20_dsc.o endif -ifneq ($(call cc-option, -mpreferred-stack-boundary=4),) - cc_stack_align := -mpreferred-stack-boundary=4 -else ifneq ($(call cc-option, -mstack-alignment=16),) - cc_stack_align := -mstack-alignment=16 -endif +CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -msse -CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -msse $(cc_stack_align) +ifdef CONFIG_CC_IS_GCC +ifeq ($(call cc-ifversion, -lt, 0701, y), y) +IS_OLD_GCC = 1 +endif +endif -ifdef CONFIG_CC_IS_CLANG +ifdef IS_OLD_GCC +# Stack alignment mismatch, proceed with caution. +# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 +# (8B stack alignment). +CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o += -mpreferred-stack-boundary=4 +else CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o += -msse2 endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c index 16476ed25536..1e1151356e60 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c @@ -44,16 +44,12 @@ #define DC_LOGGER \ dccg->ctx->logger -void dccg2_update_dpp_dto(struct dccg *dccg, - int dpp_inst, - int req_dppclk, - bool reduce_divider_only) +void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); if (dccg->ref_dppclk && req_dppclk) { int ref_dppclk = dccg->ref_dppclk; - int current_phase, current_modulo; ASSERT(req_dppclk <= ref_dppclk); /* need to clamp to 8 bits */ @@ -65,28 +61,9 @@ void dccg2_update_dpp_dto(struct dccg *dccg, if (req_dppclk > ref_dppclk) req_dppclk = ref_dppclk; } - - REG_GET_2(DPPCLK_DTO_PARAM[dpp_inst], - DPPCLK0_DTO_PHASE, ¤t_phase, - DPPCLK0_DTO_MODULO, ¤t_modulo); - - if (reduce_divider_only) { - // requested phase/modulo greater than current - if (req_dppclk * current_modulo >= current_phase * ref_dppclk) { - REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0, - DPPCLK0_DTO_PHASE, req_dppclk, - DPPCLK0_DTO_MODULO, ref_dppclk); - } else { - REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0, - DPPCLK0_DTO_PHASE, current_phase, - DPPCLK0_DTO_MODULO, current_modulo); - } - } else { - REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0, - DPPCLK0_DTO_PHASE, req_dppclk, - DPPCLK0_DTO_MODULO, ref_dppclk); - } - + REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0, + DPPCLK0_DTO_PHASE, req_dppclk, + DPPCLK0_DTO_MODULO, ref_dppclk); REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_ENABLE[dpp_inst], 1); } else { @@ -119,32 +96,6 @@ void dccg2_get_dccg_ref_freq(struct dccg *dccg, void dccg2_init(struct dccg *dccg) { - struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); - - // Fallthrough intentional to program all available dpp_dto's - switch (dccg_dcn->base.ctx->dc->res_pool->pipe_count) { - case 6: - REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[5], 1); - /* Fall through */ - case 5: - REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[4], 1); - /* Fall through */ - case 4: - REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[3], 1); - /* Fall through */ - case 3: - REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[2], 1); - /* Fall through */ - case 2: - REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[1], 1); - /* Fall through */ - case 1: - REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[0], 1); - break; - default: - ASSERT(false); - break; - } } static const struct dccg_funcs dccg2_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h index 74a074a873cd..2205cb0204e7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h @@ -97,7 +97,7 @@ struct dcn_dccg { const struct dccg_mask *dccg_mask; }; -void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk, bool raise_divider_only); +void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk); void dccg2_get_dccg_ref_freq(struct dccg *dccg, unsigned int xtalin_freq_inKhz, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c index 2f5aade1e882..4d7e45892f08 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c @@ -376,13 +376,6 @@ bool dpp2_get_optimal_number_of_taps( struct scaler_data *scl_data, const struct scaling_taps *in_taps) { - uint32_t pixel_width; - - if (scl_data->viewport.width > scl_data->recout.width) - pixel_width = scl_data->recout.width; - else - pixel_width = scl_data->viewport.width; - /* Some ASICs does not support FP16 scaling, so we reject modes require this*/ if (scl_data->viewport.width != scl_data->h_active && scl_data->viewport.height != scl_data->v_active && @@ -464,7 +457,7 @@ static struct dpp_funcs dcn20_dpp_funcs = { .dpp_read_state = dpp20_read_state, .dpp_reset = dpp_reset, .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale, - .dpp_get_optimal_number_of_taps = dpp2_get_optimal_number_of_taps, + .dpp_get_optimal_number_of_taps = dpp1_get_optimal_number_of_taps, .dpp_set_gamut_remap = dpp1_cm_set_gamut_remap, .dpp_set_csc_adjustment = NULL, .dpp_set_csc_default = NULL, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h index 290b2854bd2c..5b03b737b1d6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h @@ -30,16 +30,20 @@ #define TO_DCN20_DPP(dpp)\ container_of(dpp, struct dcn20_dpp, base) -#define TF_REG_LIST_DCN20(id) \ - TF_REG_LIST_DCN(id), \ +#define TF_REG_LIST_DCN20_COMMON_UPDATED(id) \ SRI(CM_BLNDGAM_LUT_WRITE_EN_MASK, CM, id), \ + SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM, id), \ + SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM, id), \ + SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM, id), \ + SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM, id), \ + SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM, id), \ + SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM, id) + +#define TF_REG_LIST_DCN20_COMMON(id) \ SRI(CM_BLNDGAM_CONTROL, CM, id), \ SRI(CM_BLNDGAM_RAMB_START_CNTL_B, CM, id), \ SRI(CM_BLNDGAM_RAMB_START_CNTL_G, CM, id), \ SRI(CM_BLNDGAM_RAMB_START_CNTL_R, CM, id), \ - SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM, id), \ - SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM, id), \ - SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM, id), \ SRI(CM_BLNDGAM_RAMB_END_CNTL1_B, CM, id), \ SRI(CM_BLNDGAM_RAMB_END_CNTL2_B, CM, id), \ SRI(CM_BLNDGAM_RAMB_END_CNTL1_G, CM, id), \ @@ -66,9 +70,6 @@ SRI(CM_BLNDGAM_RAMA_START_CNTL_B, CM, id), \ SRI(CM_BLNDGAM_RAMA_START_CNTL_G, CM, id), \ SRI(CM_BLNDGAM_RAMA_START_CNTL_R, CM, id), \ - SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM, id), \ - SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM, id), \ - SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM, id), \ SRI(CM_BLNDGAM_RAMA_END_CNTL1_B, CM, id), \ SRI(CM_BLNDGAM_RAMA_END_CNTL2_B, CM, id), \ SRI(CM_BLNDGAM_RAMA_END_CNTL1_G, CM, id), \ @@ -147,7 +148,12 @@ SRI(CM_SHAPER_RAMA_REGION_28_29, CM, id), \ SRI(CM_SHAPER_RAMA_REGION_30_31, CM, id), \ SRI(CM_SHAPER_RAMA_REGION_32_33, CM, id), \ - SRI(CM_SHAPER_LUT_INDEX, CM, id), \ + SRI(CM_SHAPER_LUT_INDEX, CM, id) + +#define TF_REG_LIST_DCN20(id) \ + TF_REG_LIST_DCN(id), \ + TF_REG_LIST_DCN20_COMMON(id), \ + TF_REG_LIST_DCN20_COMMON_UPDATED(id), \ SRI(CURSOR_CONTROL, CURSOR0_, id), \ SRI(ALPHA_2BIT_LUT, CNVC_CFG, id), \ SRI(FCNV_FP_BIAS_R, CNVC_CFG, id), \ @@ -166,27 +172,41 @@ SRI(OBUF_MEM_PWR_CTRL, DSCL, id),\ SRI(DSCL_MEM_PWR_CTRL, DSCL, id) -#define TF_REG_LIST_SH_MASK_DCN20(mask_sh)\ - TF_REG_LIST_SH_MASK_DCN(mask_sh), \ + +#define TF_REG_LIST_SH_MASK_DCN20_UPDATED(mask_sh)\ + TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_B, CM_BLNDGAM_RAMB_EXP_REGION_END_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_G, CM_BLNDGAM_RAMB_EXP_REGION_END_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_R, CM_BLNDGAM_RAMB_EXP_REGION_END_R, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_B, CM_BLNDGAM_RAMA_EXP_REGION_END_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_G, CM_BLNDGAM_RAMA_EXP_REGION_END_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_R, CM_BLNDGAM_RAMA_EXP_REGION_END_R, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_B, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_G, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_R, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_CONTROL, CM_BLNDGAM_LUT_MODE, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_EN_MASK, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_SEL, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_CONFIG_STATUS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_CONTROL, CM_SHAPER_LUT_MODE, mask_sh) + + +#define TF_REG_LIST_SH_MASK_DCN20_COMMON(mask_sh)\ + TF_SF(CM0_CM_3DLUT_MODE, CM_3DLUT_MODE, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_START_B, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_START_G, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_START_R, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_B, CM_BLNDGAM_RAMB_EXP_REGION_END_B, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_B, CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_B, CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_G, CM_BLNDGAM_RAMB_EXP_REGION_END_G, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_G, CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_G, CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_R, CM_BLNDGAM_RAMB_EXP_REGION_END_R, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_R, CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_R, CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET, mask_sh), \ @@ -261,18 +281,9 @@ TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_START_R, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_B, CM_BLNDGAM_RAMA_EXP_REGION_END_B, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_B, CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_B, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_G, CM_BLNDGAM_RAMA_EXP_REGION_END_G, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_G, CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_G, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_R, CM_BLNDGAM_RAMA_EXP_REGION_END_R, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_R, CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_R, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh), \ @@ -341,9 +352,6 @@ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_EN_MASK, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_SEL, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_CONFIG_STATUS, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_LUT_INDEX, CM_BLNDGAM_LUT_INDEX, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_LUT_DATA, CM_BLNDGAM_LUT_DATA, mask_sh), \ TF_SF(CM0_CM_MEM_PWR_CTRL, BLNDGAM_MEM_PWR_FORCE, mask_sh), \ @@ -356,7 +364,6 @@ TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_WRITE_EN_MASK, mask_sh), \ TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_RAM_SEL, mask_sh), \ TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_30BIT_EN, mask_sh), \ - TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_CONFIG_STATUS, mask_sh), \ TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_READ_SEL, mask_sh), \ TF_SF(CM0_CM_SHAPER_CONTROL, CM_SHAPER_LUT_MODE, mask_sh), \ TF_SF(CM0_CM_SHAPER_RAMB_START_CNTL_B, CM_SHAPER_RAMB_EXP_REGION_START_B, mask_sh), \ @@ -521,9 +528,14 @@ TF_SF(CM0_CM_SHAPER_RAMA_REGION_32_33, CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS, mask_sh), \ TF_SF(CM0_CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_LUT_WRITE_EN_MASK, mask_sh), \ TF_SF(CM0_CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_LUT_WRITE_SEL, mask_sh), \ - TF_SF(CM0_CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_CONFIG_STATUS, mask_sh), \ TF_SF(CM0_CM_SHAPER_LUT_INDEX, CM_SHAPER_LUT_INDEX, mask_sh), \ - TF_SF(CM0_CM_SHAPER_LUT_DATA, CM_SHAPER_LUT_DATA, mask_sh), \ + TF_SF(CM0_CM_SHAPER_LUT_DATA, CM_SHAPER_LUT_DATA, mask_sh) + + +#define TF_REG_LIST_SH_MASK_DCN20(mask_sh)\ + TF_REG_LIST_SH_MASK_DCN(mask_sh), \ + TF_REG_LIST_SH_MASK_DCN20_COMMON(mask_sh), \ + TF_REG_LIST_SH_MASK_DCN20_UPDATED(mask_sh), \ TF_SF(CM0_CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_CONFIG_STATUS, mask_sh), \ TF_SF(CM0_CM_CONTROL, CM_BYPASS, mask_sh), \ TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \ @@ -560,6 +572,7 @@ TF_SF(DSCL0_OBUF_MEM_PWR_CTRL, OBUF_MEM_PWR_FORCE, mask_sh),\ TF_SF(DSCL0_DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, mask_sh) + #define TF_REG_FIELD_LIST_DCN2_0(type) \ TF_REG_FIELD_LIST(type) \ type CM_BLNDGAM_LUT_DATA; \ @@ -593,6 +606,7 @@ type OBUF_MEM_PWR_FORCE;\ type LUT_MEM_PWR_FORCE + struct dcn2_dpp_shift { TF_REG_FIELD_LIST_DCN2_0(uint8_t); }; @@ -691,11 +705,6 @@ void dpp2_set_hdr_multiplier( struct dpp *dpp_base, uint32_t multiplier); -bool dpp2_get_optimal_number_of_taps( - struct dpp *dpp, - struct scaler_data *scl_data, - const struct scaling_taps *in_taps); - bool dpp2_construct(struct dcn20_dpp *dpp2, struct dc_context *ctx, uint32_t inst, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c index 1b419407af94..63eb377ed9c0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c @@ -118,7 +118,7 @@ static void dsc2_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock dsc_enc_caps->color_formats.bits.RGB = 1; dsc_enc_caps->color_formats.bits.YCBCR_444 = 1; - dsc_enc_caps->color_formats.bits.YCBCR_SIMPLE_422 = 0; + dsc_enc_caps->color_formats.bits.YCBCR_SIMPLE_422 = 1; dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 0; dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_420 = 1; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c index cd8bc92ce3ba..880954ac0b02 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c @@ -722,7 +722,6 @@ bool dwb_program_horz_scalar(struct dcn20_dwbc *dwbc20, struct scaling_taps num_taps) { uint32_t h_ratio_luma = 1; - uint32_t h_ratio_chroma = 1; uint32_t h_taps_luma = num_taps.h_taps; uint32_t h_taps_chroma = num_taps.h_taps_c; int32_t h_init_phase_luma = 0; @@ -747,7 +746,6 @@ bool dwb_program_horz_scalar(struct dcn20_dwbc *dwbc20, h_ratio_luma = -1; else h_ratio_luma = dc_fixpt_u3d19(tmp_h_ratio_luma) << 5; - h_ratio_chroma = h_ratio_luma * 2; /*Program ratio*/ REG_UPDATE(WBSCL_HORZ_FILTER_SCALE_RATIO, WBSCL_H_SCALE_RATIO, h_ratio_luma); @@ -803,7 +801,6 @@ bool dwb_program_vert_scalar(struct dcn20_dwbc *dwbc20, enum dwb_subsample_position subsample_position) { uint32_t v_ratio_luma = 1; - uint32_t v_ratio_chroma = 1; uint32_t v_taps_luma = num_taps.v_taps; uint32_t v_taps_chroma = num_taps.v_taps_c; int32_t v_init_phase_luma = 0; @@ -827,7 +824,6 @@ bool dwb_program_vert_scalar(struct dcn20_dwbc *dwbc20, v_ratio_luma = -1; else v_ratio_luma = dc_fixpt_u3d19(tmp_v_ratio_luma) << 5; - v_ratio_chroma = v_ratio_luma * 2; /*Program ratio*/ REG_UPDATE(WBSCL_VERT_FILTER_SCALE_RATIO, WBSCL_V_SCALE_RATIO, v_ratio_luma); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c index b83c022e2c6f..8b8438566101 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c @@ -186,14 +186,13 @@ static void hubbub2_get_blk256_size(unsigned int *blk256_width, unsigned int *bl } static void hubbub2_det_request_size( + unsigned int detile_buf_size, unsigned int height, unsigned int width, unsigned int bpe, bool *req128_horz_wc, bool *req128_vert_wc) { - unsigned int detile_buf_size = 164 * 1024; /* 164KB for DCN1.0 */ - unsigned int blk256_height = 0; unsigned int blk256_width = 0; unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc; @@ -236,7 +235,8 @@ bool hubbub2_get_dcc_compression_cap(struct hubbub *hubbub, &segment_order_horz, &segment_order_vert)) return false; - hubbub2_det_request_size(input->surface_size.height, input->surface_size.width, + hubbub2_det_request_size(TO_DCN20_HUBBUB(hubbub)->detile_buf_size, + input->surface_size.height, input->surface_size.width, bpe, &req128_horz_wc, &req128_vert_wc); if (!req128_horz_wc && !req128_vert_wc) { @@ -588,7 +588,7 @@ static void hubbub2_program_watermarks( DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz); REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 180); - hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter); + hubbub->funcs->allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter); } static const struct hubbub_funcs hubbub2_funcs = { @@ -600,7 +600,8 @@ static const struct hubbub_funcs hubbub2_funcs = { .get_dcc_compression_cap = hubbub2_get_dcc_compression_cap, .wm_read_state = hubbub2_wm_read_state, .get_dchub_ref_freq = hubbub2_get_dchub_ref_freq, - .program_watermarks = hubbub2_program_watermarks + .program_watermarks = hubbub2_program_watermarks, + .allow_self_refresh_control = hubbub1_allow_self_refresh_control }; void hubbub2_construct(struct dcn20_hubbub *hubbub, @@ -618,4 +619,5 @@ void hubbub2_construct(struct dcn20_hubbub *hubbub, hubbub->masks = hubbub_mask; hubbub->debug_test_index_pstate = 0xB; + hubbub->detile_buf_size = 164 * 1024; /* 164KB for DCN2.0 */ } diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h index 626117d3b4e9..501532dd523a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h @@ -81,6 +81,7 @@ struct dcn20_hubbub { unsigned int debug_test_index_pstate; struct dcn_watermark_set watermarks; struct dcn20_vmid vmid[16]; + unsigned int detile_buf_size; }; void hubbub2_construct(struct dcn20_hubbub *hubbub, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 1212da12c414..921a36668ced 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -688,7 +688,7 @@ bool dcn20_set_output_transfer_func(struct pipe_ctx *pipe_ctx, return true; } -static bool dcn20_set_blend_lut( +bool dcn20_set_blend_lut( struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state) { struct dpp *dpp_base = pipe_ctx->plane_res.dpp; @@ -710,7 +710,7 @@ static bool dcn20_set_blend_lut( return result; } -static bool dcn20_set_shaper_3dlut( +bool dcn20_set_shaper_3dlut( struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state) { struct dpp *dpp_base = pipe_ctx->plane_res.dpp; @@ -999,72 +999,6 @@ void dcn20_enable_plane( } -static void dcn20_program_pipe( - struct dc *dc, - struct pipe_ctx *pipe_ctx, - struct dc_state *context) -{ - pipe_ctx->plane_state->update_flags.bits.full_update = - context->commit_hints.full_update_needed ? 1 : pipe_ctx->plane_state->update_flags.bits.full_update; - - if (pipe_ctx->plane_state->update_flags.bits.full_update) - dcn20_enable_plane(dc, pipe_ctx, context); - - update_dchubp_dpp(dc, pipe_ctx, context); - - set_hdr_multiplier(pipe_ctx); - - if (pipe_ctx->plane_state->update_flags.bits.full_update || - pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || - pipe_ctx->plane_state->update_flags.bits.gamma_change) - dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state); - - /* dcn10_translate_regamma_to_hw_format takes 750us to finish - * only do gamma programming for full update. - * TODO: This can be further optimized/cleaned up - * Always call this for now since it does memcmp inside before - * doing heavy calculation and programming - */ - if (pipe_ctx->plane_state->update_flags.bits.full_update) - dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream); -} - -static void dcn20_program_all_pipe_in_tree( - struct dc *dc, - struct pipe_ctx *pipe_ctx, - struct dc_state *context) -{ - if (pipe_ctx->top_pipe == NULL && !pipe_ctx->prev_odm_pipe) { - bool blank = !is_pipe_tree_visible(pipe_ctx); - - pipe_ctx->stream_res.tg->funcs->program_global_sync( - pipe_ctx->stream_res.tg, - pipe_ctx->pipe_dlg_param.vready_offset, - pipe_ctx->pipe_dlg_param.vstartup_start, - pipe_ctx->pipe_dlg_param.vupdate_offset, - pipe_ctx->pipe_dlg_param.vupdate_width); - - pipe_ctx->stream_res.tg->funcs->set_vtg_params( - pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); - - dc->hwss.blank_pixel_data(dc, pipe_ctx, blank); - - if (dc->hwss.update_odm) - dc->hwss.update_odm(dc, context, pipe_ctx); - } - - if (pipe_ctx->plane_state != NULL) - dcn20_program_pipe(dc, pipe_ctx, context); - - if (pipe_ctx->bottom_pipe != NULL) { - ASSERT(pipe_ctx->bottom_pipe != pipe_ctx); - dcn20_program_all_pipe_in_tree(dc, pipe_ctx->bottom_pipe, context); - } else if (pipe_ctx->next_odm_pipe != NULL) { - ASSERT(pipe_ctx->next_odm_pipe != pipe_ctx); - dcn20_program_all_pipe_in_tree(dc, pipe_ctx->next_odm_pipe, context); - } -} - void dcn20_pipe_control_lock_global( struct dc *dc, struct pipe_ctx *pipe, @@ -1124,114 +1058,456 @@ void dcn20_pipe_control_lock( } } -static void dcn20_apply_ctx_for_surface( - struct dc *dc, - const struct dc_stream_state *stream, - int num_planes, - struct dc_state *context) +static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx *new_pipe) { - const unsigned int TIMEOUT_FOR_PIPE_ENABLE_MS = 100; - int i; - struct timing_generator *tg; - bool removed_pipe[6] = { false }; - bool interdependent_update = false; - struct pipe_ctx *top_pipe_to_program = - find_top_pipe_for_stream(dc, context, stream); - struct pipe_ctx *prev_top_pipe_to_program = - find_top_pipe_for_stream(dc, dc->current_state, stream); - DC_LOGGER_INIT(dc->ctx->logger); + new_pipe->update_flags.raw = 0; - if (!top_pipe_to_program) + /* Exit on unchanged, unused pipe */ + if (!old_pipe->plane_state && !new_pipe->plane_state) return; + /* Detect pipe enable/disable */ + if (!old_pipe->plane_state && new_pipe->plane_state) { + new_pipe->update_flags.bits.enable = 1; + new_pipe->update_flags.bits.mpcc = 1; + new_pipe->update_flags.bits.dppclk = 1; + new_pipe->update_flags.bits.hubp_interdependent = 1; + new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1; + new_pipe->update_flags.bits.gamut_remap = 1; + new_pipe->update_flags.bits.scaler = 1; + new_pipe->update_flags.bits.viewport = 1; + if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) { + new_pipe->update_flags.bits.odm = 1; + new_pipe->update_flags.bits.global_sync = 1; + } + return; + } + if (old_pipe->plane_state && !new_pipe->plane_state) { + new_pipe->update_flags.bits.disable = 1; + return; + } - /* Carry over GSL groups in case the context is changing. */ - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - struct pipe_ctx *old_pipe_ctx = - &dc->current_state->res_ctx.pipe_ctx[i]; - - if (pipe_ctx->stream == stream && - pipe_ctx->stream == old_pipe_ctx->stream) - pipe_ctx->stream_res.gsl_group = - old_pipe_ctx->stream_res.gsl_group; + /* Detect top pipe only changes */ + if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) { + /* Detect odm changes */ + if ((old_pipe->next_odm_pipe && new_pipe->next_odm_pipe + && old_pipe->next_odm_pipe->pipe_idx != new_pipe->next_odm_pipe->pipe_idx) + || (!old_pipe->next_odm_pipe && new_pipe->next_odm_pipe) + || (old_pipe->next_odm_pipe && !new_pipe->next_odm_pipe) + || old_pipe->stream_res.opp != new_pipe->stream_res.opp) + new_pipe->update_flags.bits.odm = 1; + + /* Detect global sync changes */ + if (old_pipe->pipe_dlg_param.vready_offset != new_pipe->pipe_dlg_param.vready_offset + || old_pipe->pipe_dlg_param.vstartup_start != new_pipe->pipe_dlg_param.vstartup_start + || old_pipe->pipe_dlg_param.vupdate_offset != new_pipe->pipe_dlg_param.vupdate_offset + || old_pipe->pipe_dlg_param.vupdate_width != new_pipe->pipe_dlg_param.vupdate_width) + new_pipe->update_flags.bits.global_sync = 1; } - tg = top_pipe_to_program->stream_res.tg; + /* + * Detect opp / tg change, only set on change, not on enable + * Assume mpcc inst = pipe index, if not this code needs to be updated + * since mpcc is what is affected by these. In fact all of our sequence + * makes this assumption at the moment with how hubp reset is matched to + * same index mpcc reset. + */ + if (old_pipe->stream_res.opp != new_pipe->stream_res.opp) + new_pipe->update_flags.bits.opp_changed = 1; + if (old_pipe->stream_res.tg != new_pipe->stream_res.tg) + new_pipe->update_flags.bits.tg_changed = 1; + + /* Detect mpcc blending changes, only dpp inst and bot matter here */ + if (old_pipe->plane_res.dpp != new_pipe->plane_res.dpp + || old_pipe->stream_res.opp != new_pipe->stream_res.opp + || (!old_pipe->bottom_pipe && new_pipe->bottom_pipe) + || (old_pipe->bottom_pipe && !new_pipe->bottom_pipe) + || (old_pipe->bottom_pipe && new_pipe->bottom_pipe + && old_pipe->bottom_pipe->plane_res.mpcc_inst + != new_pipe->bottom_pipe->plane_res.mpcc_inst)) + new_pipe->update_flags.bits.mpcc = 1; + + /* Detect dppclk change */ + if (old_pipe->plane_res.bw.dppclk_khz != new_pipe->plane_res.bw.dppclk_khz) + new_pipe->update_flags.bits.dppclk = 1; + + /* Check for scl update */ + if (memcmp(&old_pipe->plane_res.scl_data, &new_pipe->plane_res.scl_data, sizeof(struct scaler_data))) + new_pipe->update_flags.bits.scaler = 1; + /* Check for vp update */ + if (memcmp(&old_pipe->plane_res.scl_data.viewport, &new_pipe->plane_res.scl_data.viewport, sizeof(struct rect)) + || memcmp(&old_pipe->plane_res.scl_data.viewport_c, + &new_pipe->plane_res.scl_data.viewport_c, sizeof(struct rect))) + new_pipe->update_flags.bits.viewport = 1; + + /* Detect dlg/ttu/rq updates */ + { + struct _vcs_dpi_display_dlg_regs_st old_dlg_attr = old_pipe->dlg_regs; + struct _vcs_dpi_display_ttu_regs_st old_ttu_attr = old_pipe->ttu_regs; + struct _vcs_dpi_display_dlg_regs_st *new_dlg_attr = &new_pipe->dlg_regs; + struct _vcs_dpi_display_ttu_regs_st *new_ttu_attr = &new_pipe->ttu_regs; + + /* Detect pipe interdependent updates */ + if (old_dlg_attr.dst_y_prefetch != new_dlg_attr->dst_y_prefetch || + old_dlg_attr.vratio_prefetch != new_dlg_attr->vratio_prefetch || + old_dlg_attr.vratio_prefetch_c != new_dlg_attr->vratio_prefetch_c || + old_dlg_attr.dst_y_per_vm_vblank != new_dlg_attr->dst_y_per_vm_vblank || + old_dlg_attr.dst_y_per_row_vblank != new_dlg_attr->dst_y_per_row_vblank || + old_dlg_attr.dst_y_per_vm_flip != new_dlg_attr->dst_y_per_vm_flip || + old_dlg_attr.dst_y_per_row_flip != new_dlg_attr->dst_y_per_row_flip || + old_dlg_attr.refcyc_per_meta_chunk_vblank_l != new_dlg_attr->refcyc_per_meta_chunk_vblank_l || + old_dlg_attr.refcyc_per_meta_chunk_vblank_c != new_dlg_attr->refcyc_per_meta_chunk_vblank_c || + old_dlg_attr.refcyc_per_meta_chunk_flip_l != new_dlg_attr->refcyc_per_meta_chunk_flip_l || + old_dlg_attr.refcyc_per_line_delivery_pre_l != new_dlg_attr->refcyc_per_line_delivery_pre_l || + old_dlg_attr.refcyc_per_line_delivery_pre_c != new_dlg_attr->refcyc_per_line_delivery_pre_c || + old_ttu_attr.refcyc_per_req_delivery_pre_l != new_ttu_attr->refcyc_per_req_delivery_pre_l || + old_ttu_attr.refcyc_per_req_delivery_pre_c != new_ttu_attr->refcyc_per_req_delivery_pre_c || + old_ttu_attr.refcyc_per_req_delivery_pre_cur0 != new_ttu_attr->refcyc_per_req_delivery_pre_cur0 || + old_ttu_attr.refcyc_per_req_delivery_pre_cur1 != new_ttu_attr->refcyc_per_req_delivery_pre_cur1 || + old_ttu_attr.min_ttu_vblank != new_ttu_attr->min_ttu_vblank || + old_ttu_attr.qos_level_flip != new_ttu_attr->qos_level_flip) { + old_dlg_attr.dst_y_prefetch = new_dlg_attr->dst_y_prefetch; + old_dlg_attr.vratio_prefetch = new_dlg_attr->vratio_prefetch; + old_dlg_attr.vratio_prefetch_c = new_dlg_attr->vratio_prefetch_c; + old_dlg_attr.dst_y_per_vm_vblank = new_dlg_attr->dst_y_per_vm_vblank; + old_dlg_attr.dst_y_per_row_vblank = new_dlg_attr->dst_y_per_row_vblank; + old_dlg_attr.dst_y_per_vm_flip = new_dlg_attr->dst_y_per_vm_flip; + old_dlg_attr.dst_y_per_row_flip = new_dlg_attr->dst_y_per_row_flip; + old_dlg_attr.refcyc_per_meta_chunk_vblank_l = new_dlg_attr->refcyc_per_meta_chunk_vblank_l; + old_dlg_attr.refcyc_per_meta_chunk_vblank_c = new_dlg_attr->refcyc_per_meta_chunk_vblank_c; + old_dlg_attr.refcyc_per_meta_chunk_flip_l = new_dlg_attr->refcyc_per_meta_chunk_flip_l; + old_dlg_attr.refcyc_per_line_delivery_pre_l = new_dlg_attr->refcyc_per_line_delivery_pre_l; + old_dlg_attr.refcyc_per_line_delivery_pre_c = new_dlg_attr->refcyc_per_line_delivery_pre_c; + old_ttu_attr.refcyc_per_req_delivery_pre_l = new_ttu_attr->refcyc_per_req_delivery_pre_l; + old_ttu_attr.refcyc_per_req_delivery_pre_c = new_ttu_attr->refcyc_per_req_delivery_pre_c; + old_ttu_attr.refcyc_per_req_delivery_pre_cur0 = new_ttu_attr->refcyc_per_req_delivery_pre_cur0; + old_ttu_attr.refcyc_per_req_delivery_pre_cur1 = new_ttu_attr->refcyc_per_req_delivery_pre_cur1; + old_ttu_attr.min_ttu_vblank = new_ttu_attr->min_ttu_vblank; + old_ttu_attr.qos_level_flip = new_ttu_attr->qos_level_flip; + new_pipe->update_flags.bits.hubp_interdependent = 1; + } + /* Detect any other updates to ttu/rq/dlg */ + if (memcmp(&old_dlg_attr, &new_pipe->dlg_regs, sizeof(old_dlg_attr)) || + memcmp(&old_ttu_attr, &new_pipe->ttu_regs, sizeof(old_ttu_attr)) || + memcmp(&old_pipe->rq_regs, &new_pipe->rq_regs, sizeof(old_pipe->rq_regs))) + new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1; + } +} - interdependent_update = top_pipe_to_program->plane_state && - top_pipe_to_program->plane_state->update_flags.bits.full_update; +static void dcn20_update_dchubp_dpp( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context) +{ + struct hubp *hubp = pipe_ctx->plane_res.hubp; + struct dpp *dpp = pipe_ctx->plane_res.dpp; + struct dc_plane_state *plane_state = pipe_ctx->plane_state; - if (interdependent_update) - lock_all_pipes(dc, context, true); - else - dcn20_pipe_control_lock(dc, top_pipe_to_program, true); + if (pipe_ctx->update_flags.bits.dppclk) + dpp->funcs->dpp_dppclk_control(dpp, false, true); - if (num_planes == 0) { - /* OTG blank before remove all front end */ - dc->hwss.blank_pixel_data(dc, top_pipe_to_program, true); + /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG + * VTG is within DCHUBBUB which is commond block share by each pipe HUBP. + * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG + */ + if (pipe_ctx->update_flags.bits.hubp_rq_dlg_ttu) { + hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst); + + hubp->funcs->hubp_setup( + hubp, + &pipe_ctx->dlg_regs, + &pipe_ctx->ttu_regs, + &pipe_ctx->rq_regs, + &pipe_ctx->pipe_dlg_param); + } + if (pipe_ctx->update_flags.bits.hubp_interdependent) + hubp->funcs->hubp_setup_interdependent( + hubp, + &pipe_ctx->dlg_regs, + &pipe_ctx->ttu_regs); + + if (pipe_ctx->update_flags.bits.enable || + plane_state->update_flags.bits.bpp_change || + plane_state->update_flags.bits.input_csc_change || + plane_state->update_flags.bits.color_space_change || + plane_state->update_flags.bits.coeff_reduction_change) { + struct dc_bias_and_scale bns_params = {0}; + + // program the input csc + dpp->funcs->dpp_setup(dpp, + plane_state->format, + EXPANSION_MODE_ZERO, + plane_state->input_csc_color_matrix, + plane_state->color_space, + NULL); + + if (dpp->funcs->dpp_program_bias_and_scale) { + //TODO :for CNVC set scale and bias registers if necessary + dcn10_build_prescale_params(&bns_params, plane_state); + dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params); + } } - /* Disconnect unused mpcc */ - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - struct pipe_ctx *old_pipe_ctx = - &dc->current_state->res_ctx.pipe_ctx[i]; - /* - * Powergate reused pipes that are not powergated - * fairly hacky right now, using opp_id as indicator - * TODO: After move dc_post to dc_update, this will - * be removed. - */ - if (pipe_ctx->plane_state && !old_pipe_ctx->plane_state) { - if (old_pipe_ctx->stream_res.tg == tg && - old_pipe_ctx->plane_res.hubp && - old_pipe_ctx->plane_res.hubp->opp_id != OPP_ID_INVALID) - dc->hwss.disable_plane(dc, old_pipe_ctx); + if (pipe_ctx->update_flags.bits.mpcc + || plane_state->update_flags.bits.global_alpha_change + || plane_state->update_flags.bits.per_pixel_alpha_change) { + /* Need mpcc to be idle if changing opp */ + if (pipe_ctx->update_flags.bits.opp_changed) { + struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx]; + int mpcc_inst; + + for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) { + if (!old_pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) + continue; + dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst); + old_pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false; + } } + dc->hwss.update_mpcc(dc, pipe_ctx); + } - if ((!pipe_ctx->plane_state || - pipe_ctx->stream_res.tg != old_pipe_ctx->stream_res.tg) && - old_pipe_ctx->plane_state && - old_pipe_ctx->stream_res.tg == tg) { + if (pipe_ctx->update_flags.bits.scaler || + plane_state->update_flags.bits.scaling_change || + plane_state->update_flags.bits.position_change || + plane_state->update_flags.bits.per_pixel_alpha_change || + pipe_ctx->stream->update_flags.bits.scaling) { + pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha; + ASSERT(pipe_ctx->plane_res.scl_data.lb_params.depth == LB_PIXEL_DEPTH_30BPP); + /* scaler configuration */ + pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler( + pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data); + } - dc->hwss.plane_atomic_disconnect(dc, old_pipe_ctx); - removed_pipe[i] = true; + if (pipe_ctx->update_flags.bits.viewport || + (context == dc->current_state && plane_state->update_flags.bits.scaling_change) || + (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) + hubp->funcs->mem_program_viewport( + hubp, + &pipe_ctx->plane_res.scl_data.viewport, + &pipe_ctx->plane_res.scl_data.viewport_c); + + /* Any updates are handled in dc interface, just need to apply existing for plane enable */ + if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed) + && pipe_ctx->stream->cursor_attributes.address.quad_part != 0) { + dc->hwss.set_cursor_position(pipe_ctx); + dc->hwss.set_cursor_attribute(pipe_ctx); + + if (dc->hwss.set_cursor_sdr_white_level) + dc->hwss.set_cursor_sdr_white_level(pipe_ctx); + } - DC_LOG_DC("Reset mpcc for pipe %d\n", - old_pipe_ctx->pipe_idx); - } + /* Any updates are handled in dc interface, just need + * to apply existing for plane enable / opp change */ + if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed + || pipe_ctx->stream->update_flags.bits.gamut_remap + || pipe_ctx->stream->update_flags.bits.out_csc) { + /* dpp/cm gamut remap*/ + dc->hwss.program_gamut_remap(pipe_ctx); + + /*call the dcn2 method which uses mpc csc*/ + dc->hwss.program_output_csc(dc, + pipe_ctx, + pipe_ctx->stream->output_color_space, + pipe_ctx->stream->csc_color_matrix.matrix, + hubp->opp_id); } - if (num_planes > 0) - dcn20_program_all_pipe_in_tree(dc, top_pipe_to_program, context); + if (pipe_ctx->update_flags.bits.enable || + pipe_ctx->update_flags.bits.opp_changed || + plane_state->update_flags.bits.pixel_format_change || + plane_state->update_flags.bits.horizontal_mirror_change || + plane_state->update_flags.bits.rotation_change || + plane_state->update_flags.bits.swizzle_change || + plane_state->update_flags.bits.dcc_change || + plane_state->update_flags.bits.bpp_change || + plane_state->update_flags.bits.scaling_change || + plane_state->update_flags.bits.plane_size_change) { + struct plane_size size = plane_state->plane_size; + + size.surface_size = pipe_ctx->plane_res.scl_data.viewport; + hubp->funcs->hubp_program_surface_config( + hubp, + plane_state->format, + &plane_state->tiling_info, + &size, + plane_state->rotation, + &plane_state->dcc, + plane_state->horizontal_mirror, + 0); + hubp->power_gated = false; + } - /* Program secondary blending tree and writeback pipes */ - if ((stream->num_wb_info > 0) && (dc->hwss.program_all_writeback_pipes_in_tree)) - dc->hwss.program_all_writeback_pipes_in_tree(dc, stream, context); + if (pipe_ctx->update_flags.bits.enable || plane_state->update_flags.bits.addr_update) + dc->hwss.update_plane_addr(dc, pipe_ctx); - if (interdependent_update) - for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (pipe_ctx->update_flags.bits.enable) + hubp->funcs->set_blank(hubp, false); +} + + +static void dcn20_program_pipe( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context) +{ + /* Only need to unblank on top pipe */ + if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.abm_level) + && !pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe) + dc->hwss.blank_pixel_data(dc, pipe_ctx, !pipe_ctx->plane_state->visible); + + if (pipe_ctx->update_flags.bits.global_sync) { + pipe_ctx->stream_res.tg->funcs->program_global_sync( + pipe_ctx->stream_res.tg, + pipe_ctx->pipe_dlg_param.vready_offset, + pipe_ctx->pipe_dlg_param.vstartup_start, + pipe_ctx->pipe_dlg_param.vupdate_offset, + pipe_ctx->pipe_dlg_param.vupdate_width); + + pipe_ctx->stream_res.tg->funcs->set_vtg_params( + pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); + + if (dc->hwss.setup_vupdate_interrupt) + dc->hwss.setup_vupdate_interrupt(pipe_ctx); + } + + if (pipe_ctx->update_flags.bits.odm) + dc->hwss.update_odm(dc, context, pipe_ctx); + + if (pipe_ctx->update_flags.bits.enable) + dcn20_enable_plane(dc, pipe_ctx, context); + + if (pipe_ctx->update_flags.raw || pipe_ctx->plane_state->update_flags.raw || pipe_ctx->stream->update_flags.raw) + dcn20_update_dchubp_dpp(dc, pipe_ctx, context); + + if (pipe_ctx->update_flags.bits.enable + || pipe_ctx->plane_state->update_flags.bits.sdr_white_level) + set_hdr_multiplier(pipe_ctx); + + if (pipe_ctx->update_flags.bits.enable || + pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || + pipe_ctx->plane_state->update_flags.bits.gamma_change) + dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state); + + /* dcn10_translate_regamma_to_hw_format takes 750us to finish + * only do gamma programming for powering on, internal memcmp to avoid + * updating on slave planes + */ + if (pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.out_tf) + dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream); + + /* If the pipe has been enabled or has a different opp, we + * should reprogram the fmt. This deals with cases where + * interation between mpc and odm combine on different streams + * causes a different pipe to be chosen to odm combine with. + */ + if (pipe_ctx->update_flags.bits.enable + || pipe_ctx->update_flags.bits.opp_changed) { + + pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion( + pipe_ctx->stream_res.opp, + COLOR_SPACE_YCBCR601, + pipe_ctx->stream->timing.display_color_depth, + pipe_ctx->stream->signal); + + pipe_ctx->stream_res.opp->funcs->opp_program_fmt( + pipe_ctx->stream_res.opp, + &pipe_ctx->stream->bit_depth_params, + &pipe_ctx->stream->clamping); + } +} + +static bool does_pipe_need_lock(struct pipe_ctx *pipe) +{ + if ((pipe->plane_state && pipe->plane_state->update_flags.raw) + || pipe->update_flags.raw) + return true; + if (pipe->bottom_pipe) + return does_pipe_need_lock(pipe->bottom_pipe); + + return false; +} + +static void dcn20_program_front_end_for_ctx( + struct dc *dc, + struct dc_state *context) +{ + const unsigned int TIMEOUT_FOR_PIPE_ENABLE_MS = 100; + int i; + bool pipe_locked[MAX_PIPES] = {false}; + DC_LOGGER_INIT(dc->ctx->logger); + + /* Carry over GSL groups in case the context is changing. */ + for (i = 0; i < dc->res_pool->pipe_count; i++) + if (context->res_ctx.pipe_ctx[i].stream == dc->current_state->res_ctx.pipe_ctx[i].stream) + context->res_ctx.pipe_ctx[i].stream_res.gsl_group = + dc->current_state->res_ctx.pipe_ctx[i].stream_res.gsl_group; + + /* Set pipe update flags and lock pipes */ + for (i = 0; i < dc->res_pool->pipe_count; i++) + dcn20_detect_pipe_changes(&dc->current_state->res_ctx.pipe_ctx[i], + &context->res_ctx.pipe_ctx[i]); + for (i = 0; i < dc->res_pool->pipe_count; i++) + if (!context->res_ctx.pipe_ctx[i].top_pipe && + does_pipe_need_lock(&context->res_ctx.pipe_ctx[i])) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - /* Skip inactive pipes and ones already updated */ - if (!pipe_ctx->stream || pipe_ctx->stream == stream || - !pipe_ctx->plane_state || !tg->funcs->is_tg_enabled(tg)) - continue; + if (pipe_ctx->update_flags.bits.tg_changed || pipe_ctx->update_flags.bits.enable) + dc->hwss.pipe_control_lock(dc, pipe_ctx, true); + if (!pipe_ctx->update_flags.bits.enable) + dc->hwss.pipe_control_lock(dc, &dc->current_state->res_ctx.pipe_ctx[i], true); + pipe_locked[i] = true; + } - pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent( - pipe_ctx->plane_res.hubp, - &pipe_ctx->dlg_regs, - &pipe_ctx->ttu_regs); + /* OTG blank before disabling all front ends */ + for (i = 0; i < dc->res_pool->pipe_count; i++) + if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable + && !context->res_ctx.pipe_ctx[i].top_pipe + && !context->res_ctx.pipe_ctx[i].prev_odm_pipe + && context->res_ctx.pipe_ctx[i].stream) + dc->hwss.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true); + + /* Disconnect mpcc */ + for (i = 0; i < dc->res_pool->pipe_count; i++) + if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable + || context->res_ctx.pipe_ctx[i].update_flags.bits.opp_changed) { + dc->hwss.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]); + DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx); } - if (interdependent_update) - lock_all_pipes(dc, context, false); - else - dcn20_pipe_control_lock(dc, top_pipe_to_program, false); + /* + * Program all updated pipes, order matters for mpcc setup. Start with + * top pipe and program all pipes that follow in order + */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + if (pipe->plane_state && !pipe->top_pipe) { + while (pipe) { + dcn20_program_pipe(dc, pipe, context); + pipe = pipe->bottom_pipe; + } + /* Program secondary blending tree and writeback pipes */ + pipe = &context->res_ctx.pipe_ctx[i]; + if (!pipe->prev_odm_pipe && pipe->stream->num_wb_info > 0 + && (pipe->update_flags.raw || pipe->plane_state->update_flags.raw || pipe->stream->update_flags.raw) + && dc->hwss.program_all_writeback_pipes_in_tree) + dc->hwss.program_all_writeback_pipes_in_tree(dc, pipe->stream, context); + } + } + /* Unlock all locked pipes */ for (i = 0; i < dc->res_pool->pipe_count; i++) - if (removed_pipe[i]) - dcn20_disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]); + if (pipe_locked[i]) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + + if (pipe_ctx->update_flags.bits.tg_changed || pipe_ctx->update_flags.bits.enable) + dc->hwss.pipe_control_lock(dc, pipe_ctx, false); + if (!pipe_ctx->update_flags.bits.enable) + dc->hwss.pipe_control_lock(dc, &dc->current_state->res_ctx.pipe_ctx[i], false); + } + + for (i = 0; i < dc->res_pool->pipe_count; i++) + if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) + dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]); /* * If we are enabling a pipe, we need to wait for pending clear as this is a critical @@ -1239,15 +1515,22 @@ static void dcn20_apply_ctx_for_surface( * will cause HW to perform an "immediate enable" (as opposed to "vsync enable") which * is unsupported on DCN. */ - i = 0; - if (num_planes > 0 && top_pipe_to_program && - (prev_top_pipe_to_program == NULL || prev_top_pipe_to_program->plane_state == NULL)) { - while (i < TIMEOUT_FOR_PIPE_ENABLE_MS && - top_pipe_to_program->plane_res.hubp->funcs->hubp_is_flip_pending(top_pipe_to_program->plane_res.hubp)) { - i += 1; - msleep(1); + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable) { + struct hubp *hubp = pipe->plane_res.hubp; + int j = 0; + + for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_MS + && hubp->funcs->hubp_is_flip_pending(hubp); j++) + msleep(1); } } + + /* WA to apply WM setting*/ + if (dc->hwseq->wa.DEGVIDCN21) + dc->res_pool->hubbub->funcs->apply_DEDCN21_147_wa(dc->res_pool->hubbub); } @@ -1319,8 +1602,12 @@ bool dcn20_update_bandwidth( pipe_ctx->stream_res.tg->funcs->set_vtg_params( pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); + if (pipe_ctx->prev_odm_pipe == NULL) dc->hwss.blank_pixel_data(dc, pipe_ctx, blank); + + if (dc->hwss.setup_vupdate_interrupt) + dc->hwss.setup_vupdate_interrupt(pipe_ctx); } pipe_ctx->plane_res.hubp->funcs->hubp_setup( @@ -1337,7 +1624,8 @@ bool dcn20_update_bandwidth( static void dcn20_enable_writeback( struct dc *dc, const struct dc_stream_status *stream_status, - struct dc_writeback_info *wb_info) + struct dc_writeback_info *wb_info, + struct dc_state *context) { struct dwbc *dwb; struct mcif_wb *mcif_wb; @@ -1354,7 +1642,7 @@ static void dcn20_enable_writeback( optc->funcs->set_dwb_source(optc, wb_info->dwb_pipe_inst); /* set MCIF_WB buffer and arbitration configuration */ mcif_wb->funcs->config_mcif_buf(mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height); - mcif_wb->funcs->config_mcif_arb(mcif_wb, &dc->current_state->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]); + mcif_wb->funcs->config_mcif_arb(mcif_wb, &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]); /* Enable MCIF_WB */ mcif_wb->funcs->enable_mcif(mcif_wb); /* Enable DWB */ @@ -1702,6 +1990,28 @@ static void dcn20_reset_hw_ctx_wrap( } } +void dcn20_get_mpctree_visual_confirm_color( + struct pipe_ctx *pipe_ctx, + struct tg_color *color) +{ + const struct tg_color pipe_colors[6] = { + {MAX_TG_COLOR_VALUE, 0, 0}, // red + {MAX_TG_COLOR_VALUE, 0, MAX_TG_COLOR_VALUE}, // yellow + {0, MAX_TG_COLOR_VALUE, 0}, // blue + {MAX_TG_COLOR_VALUE / 2, 0, MAX_TG_COLOR_VALUE / 2}, // purple + {0, 0, MAX_TG_COLOR_VALUE}, // green + {MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE * 2 / 3, 0}, // orange + }; + + struct pipe_ctx *top_pipe = pipe_ctx; + + while (top_pipe->top_pipe) { + top_pipe = top_pipe->top_pipe; + } + + *color = pipe_colors[top_pipe->pipe_idx]; +} + static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) { struct hubp *hubp = pipe_ctx->plane_res.hubp; @@ -1719,6 +2029,9 @@ static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) { dcn10_get_surface_visual_confirm_color( pipe_ctx, &blnd_cfg.black_color); + } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) { + dcn20_get_mpctree_visual_confirm_color( + pipe_ctx, &blnd_cfg.black_color); } if (per_pixel_alpha) @@ -1919,8 +2232,10 @@ static void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc, pipe_ctx->stream_res.stream_enc->id, true); - if (link->dc->hwss.program_dmdata_engine) - link->dc->hwss.program_dmdata_engine(pipe_ctx); + if (pipe_ctx->plane_state && pipe_ctx->plane_state->flip_immediate != 1) { + if (link->dc->hwss.program_dmdata_engine) + link->dc->hwss.program_dmdata_engine(pipe_ctx); + } link->dc->hwss.update_info_frame(pipe_ctx); @@ -2095,7 +2410,8 @@ void dcn20_hw_sequencer_construct(struct dc *dc) dc->hwss.program_triplebuffer = dcn20_program_tripleBuffer; dc->hwss.set_input_transfer_func = dcn20_set_input_transfer_func; dc->hwss.set_output_transfer_func = dcn20_set_output_transfer_func; - dc->hwss.apply_ctx_for_surface = dcn20_apply_ctx_for_surface; + dc->hwss.apply_ctx_for_surface = NULL; + dc->hwss.program_front_end_for_ctx = dcn20_program_front_end_for_ctx; dc->hwss.pipe_control_lock = dcn20_pipe_control_lock; dc->hwss.pipe_control_lock_global = dcn20_pipe_control_lock_global; dc->hwss.optimize_bandwidth = dcn20_optimize_bandwidth; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h index 92ab3dd91814..3098f1049ed7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h @@ -96,4 +96,20 @@ void dcn20_init_blank( struct dc *dc, struct timing_generator *tg); void dcn20_display_init(struct dc *dc); +void dcn20_pipe_control_lock( + struct dc *dc, + struct pipe_ctx *pipe, + bool lock); +void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn20_enable_plane( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context); +bool dcn20_set_blend_lut( + struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state); +bool dcn20_set_shaper_3dlut( + struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state); +void dcn20_get_mpctree_visual_confirm_color( + struct pipe_ctx *pipe_ctx, + struct tg_color *color); #endif /* __DC_HWSS_DCN20_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h index 3736b5548a25..0c98a0bbbd14 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h @@ -91,6 +91,13 @@ struct mpll_cfg { uint32_t ref_range; uint32_t ref_clk; bool hdmimode_enable; + bool sup_pre_hp; + bool dp_tx0_vergdrv_byp; + bool dp_tx1_vergdrv_byp; + bool dp_tx2_vergdrv_byp; + bool dp_tx3_vergdrv_byp; + + }; struct dpcssys_phy_seq_cfg { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c index 2137e2be2140..3b613fb93ef8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c @@ -287,6 +287,10 @@ void optc2_get_optc_source(struct timing_generator *optc, *num_of_src_opp = 2; else *num_of_src_opp = 1; + + /* Work around VBIOS not updating OPTC_NUM_OF_INPUT_SEGMENT */ + if (*src_opp_id_1 == 0xf) + *num_of_src_opp = 1; } void optc2_set_dwb_source(struct timing_generator *optc, @@ -456,7 +460,7 @@ static struct timing_generator_funcs dcn20_tg_funcs = { .set_vtg_params = optc1_set_vtg_params, .program_manual_trigger = optc2_program_manual_trigger, .setup_manual_trigger = optc2_setup_manual_trigger, - .is_matching_timing = optc1_is_matching_timing + .get_hw_timing = optc1_get_hw_timing, }; void dcn20_timing_generator_init(struct optc *optc1) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 5a2763daff4d..bbd1c98564be 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -581,11 +581,13 @@ static const struct dcn2_dpp_registers tf_regs[] = { }; static const struct dcn2_dpp_shift tf_shift = { - TF_REG_LIST_SH_MASK_DCN20(__SHIFT) + TF_REG_LIST_SH_MASK_DCN20(__SHIFT), + TF_DEBUG_REG_LIST_SH_DCN10 }; static const struct dcn2_dpp_mask tf_mask = { - TF_REG_LIST_SH_MASK_DCN20(_MASK) + TF_REG_LIST_SH_MASK_DCN20(_MASK), + TF_DEBUG_REG_LIST_MASK_DCN10 }; #define dwbc_regs_dcn2(id)\ @@ -732,6 +734,42 @@ static const struct dcn20_vmid_mask vmid_masks = { DCN20_VMID_MASK_SH_LIST(_MASK) }; +static const struct dce110_aux_registers_shift aux_shift = { + DCN_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCN_AUX_MASK_SH_LIST(_MASK) +}; + +static int map_transmitter_id_to_phy_instance( + enum transmitter transmitter) +{ + switch (transmitter) { + case TRANSMITTER_UNIPHY_A: + return 0; + break; + case TRANSMITTER_UNIPHY_B: + return 1; + break; + case TRANSMITTER_UNIPHY_C: + return 2; + break; + case TRANSMITTER_UNIPHY_D: + return 3; + break; + case TRANSMITTER_UNIPHY_E: + return 4; + break; + case TRANSMITTER_UNIPHY_F: + return 5; + break; + default: + ASSERT(0); + return 0; + } +} + #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #define dsc_regsDCN20(id)\ [id] = {\ @@ -814,7 +852,7 @@ static const struct resource_caps res_cap_nv14 = { .num_audio = 6, .num_stream_encoder = 5, .num_pll = 5, - .num_dwb = 0, + .num_dwb = 1, .num_ddc = 5, }; @@ -825,7 +863,7 @@ static const struct dc_debug_options debug_defaults_drv = { .clock_trace = true, .disable_pplib_clock_request = true, .pipe_split_policy = MPC_SPLIT_DYNAMIC, - .force_single_disp_pipe_split = true, + .force_single_disp_pipe_split = false, .disable_dcc = DCC_ENABLE, .vsr_support = true, .performance_trace = false, @@ -922,7 +960,10 @@ struct dce_aux *dcn20_aux_engine_create( dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst]); + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); return &aux_engine->base; } @@ -1042,14 +1083,18 @@ struct link_encoder *dcn20_link_encoder_create( { struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); + int link_regs_id; if (!enc20) return NULL; + link_regs_id = + map_transmitter_id_to_phy_instance(enc_init_data->transmitter); + dcn20_link_encoder_construct(enc20, enc_init_data, &link_enc_feature, - &link_enc_regs[enc_init_data->transmitter], + &link_enc_regs[link_regs_id], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source], &le_shift, @@ -1107,6 +1152,11 @@ struct stream_encoder *dcn20_stream_encoder_create( if (!enc1) return NULL; + if (ASICREV_IS_NAVI14_M(ctx->asic_id.hw_internal_rev)) { + if (eng_id >= ENGINE_ID_DIGD) + eng_id++; + } + dcn20_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, &stream_enc_regs[eng_id], &se_shift, &se_mask); @@ -1154,6 +1204,8 @@ static const struct resource_create_funcs res_create_maximus_funcs = { .create_hwseq = dcn20_hwseq_create, }; +static void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu); + void dcn20_clock_source_destroy(struct clock_source **clk_src) { kfree(TO_DCE110_CLK_SRC(*clk_src)); @@ -1596,7 +1648,7 @@ static void swizzle_to_dml_params( } } -static bool dcn20_split_stream_for_odm( +bool dcn20_split_stream_for_odm( struct resource_context *res_ctx, const struct resource_pool *pool, struct pipe_ctx *prev_odm_pipe, @@ -1617,7 +1669,6 @@ static bool dcn20_split_stream_for_odm( next_odm_pipe->stream_res.dsc = NULL; #endif if (prev_odm_pipe->next_odm_pipe && prev_odm_pipe->next_odm_pipe != next_odm_pipe) { - ASSERT(!next_odm_pipe->next_odm_pipe); next_odm_pipe->next_odm_pipe = prev_odm_pipe->next_odm_pipe; next_odm_pipe->next_odm_pipe->prev_odm_pipe = next_odm_pipe; } @@ -1674,7 +1725,7 @@ static bool dcn20_split_stream_for_odm( return true; } -static void dcn20_split_stream_for_mpc( +void dcn20_split_stream_for_mpc( struct resource_context *res_ctx, const struct resource_pool *pool, struct pipe_ctx *primary_pipe, @@ -1760,7 +1811,7 @@ int dcn20_populate_dml_pipes_from_context( pipe_cnt = i; continue; } - if (!resource_are_streams_timing_synchronizable( + if (dc->debug.disable_timing_sync || !resource_are_streams_timing_synchronizable( res_ctx->pipe_ctx[pipe_cnt].stream, res_ctx->pipe_ctx[i].stream)) { synchronized_vblank = false; @@ -1892,7 +1943,7 @@ int dcn20_populate_dml_pipes_from_context( break; case PIXEL_ENCODING_YCBCR420: pipes[pipe_cnt].dout.output_format = dm_420; - pipes[pipe_cnt].dout.output_bpp = (output_bpc * 3) / 2; + pipes[pipe_cnt].dout.output_bpp = (output_bpc * 3.0) / 2; break; case PIXEL_ENCODING_YCBCR422: if (true) /* todo */ @@ -1906,6 +1957,11 @@ int dcn20_populate_dml_pipes_from_context( pipes[pipe_cnt].dout.output_bpp = output_bpc * 3; } +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT + if (res_ctx->pipe_ctx[i].stream->timing.flags.DSC) + pipes[pipe_cnt].dout.output_bpp = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.bits_per_pixel / 16.0; +#endif + /* todo: default max for now, until there is logic reflecting this in dc*/ pipes[pipe_cnt].dout.output_bpc = 12; /* @@ -2127,7 +2183,7 @@ void dcn20_set_mcif_arb_params( } #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT -static bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx) +bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx) { int i; @@ -2162,7 +2218,7 @@ static bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx) } #endif -static struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc, +struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc, struct resource_context *res_ctx, const struct resource_pool *pool, const struct pipe_ctx *primary_pipe) @@ -2202,7 +2258,8 @@ static struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc, */ if (secondary_pipe == NULL) { for (j = dc->res_pool->pipe_count - 1; j >= 0; j--) { - if (dc->current_state->res_ctx.pipe_ctx[j].top_pipe == NULL) { + if (dc->current_state->res_ctx.pipe_ctx[j].top_pipe == NULL + && dc->current_state->res_ctx.pipe_ctx[j].prev_odm_pipe == NULL) { preferred_pipe_idx = j; if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) { @@ -2238,29 +2295,11 @@ static struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc, return secondary_pipe; } -bool dcn20_fast_validate_bw( +void dcn20_merge_pipes_for_validate( struct dc *dc, - struct dc_state *context, - display_e2e_pipe_params_st *pipes, - int *pipe_cnt_out, - int *pipe_split_from, - int *vlevel_out) + struct dc_state *context) { - bool out = false; - - int pipe_cnt, i, pipe_idx, vlevel, vlevel_unsplit; - bool odm_capable = context->bw_ctx.dml.ip.odm_capable; - bool force_split = false; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT - bool failed_non_odm_dsc = false; -#endif - int split_threshold = dc->res_pool->pipe_count / 2; - bool avoid_split = dc->debug.pipe_split_policy != MPC_SPLIT_DYNAMIC; - - - ASSERT(pipes); - if (!pipes) - return false; + int i; /* merge previously split odm pipes since mode support needs to make the decision */ for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -2315,51 +2354,19 @@ bool dcn20_fast_validate_bw( if (pipe->plane_state) resource_build_scaling_params(pipe); } +} - if (dc->res_pool->funcs->populate_dml_pipes) - pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, - &context->res_ctx, pipes); - else - pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, - &context->res_ctx, pipes); - - *pipe_cnt_out = pipe_cnt; - - if (!pipe_cnt) { - out = true; - goto validate_out; - } - - context->bw_ctx.dml.ip.odm_capable = 0; - - vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); - - context->bw_ctx.dml.ip.odm_capable = odm_capable; - -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT - /* 1 dsc per stream dsc validation */ - if (vlevel <= context->bw_ctx.dml.soc.num_states) - if (!dcn20_validate_dsc(dc, context)) { - failed_non_odm_dsc = true; - vlevel = context->bw_ctx.dml.soc.num_states + 1; - } -#endif - - if (vlevel > context->bw_ctx.dml.soc.num_states && odm_capable) - vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); - - if (vlevel > context->bw_ctx.dml.soc.num_states) - goto validate_fail; - - if ((context->stream_count > split_threshold && dc->current_state->stream_count <= split_threshold) - || (context->stream_count <= split_threshold && dc->current_state->stream_count > split_threshold)) - context->commit_hints.full_update_needed = true; - - /*initialize pipe_just_split_from to invalid idx*/ - for (i = 0; i < MAX_PIPES; i++) - pipe_split_from[i] = -1; +int dcn20_validate_apply_pipe_split_flags( + struct dc *dc, + struct dc_state *context, + int vlevel, + bool *split) +{ + int i, pipe_idx, vlevel_split; + bool force_split = false; + bool avoid_split = dc->debug.pipe_split_policy != MPC_SPLIT_DYNAMIC; - /* Single display only conditionals get set here */ + /* Single display loop, exits if there is more than one display */ for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; bool exit_loop = false; @@ -2386,38 +2393,107 @@ bool dcn20_fast_validate_bw( if (exit_loop) break; } - - if (context->stream_count > split_threshold) + /* TODO: fix dc bugs and remove this split threshold thing */ + if (context->stream_count > dc->res_pool->pipe_count / 2) avoid_split = true; - vlevel_unsplit = vlevel; + /* Avoid split loop looks for lowest voltage level that allows most unsplit pipes possible */ + if (avoid_split) { + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + if (!context->res_ctx.pipe_ctx[i].stream) + continue; + + for (vlevel_split = vlevel; vlevel <= context->bw_ctx.dml.soc.num_states; vlevel++) + if (context->bw_ctx.dml.vba.NoOfDPP[vlevel][0][pipe_idx] == 1) + break; + /* Impossible to not split this pipe */ + if (vlevel > context->bw_ctx.dml.soc.num_states) + vlevel = vlevel_split; + pipe_idx++; + } + context->bw_ctx.dml.vba.maxMpcComb = 0; + } + + /* Split loop sets which pipe should be split based on dml outputs and dc flags */ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + if (!context->res_ctx.pipe_ctx[i].stream) continue; - for (; vlevel_unsplit <= context->bw_ctx.dml.soc.num_states; vlevel_unsplit++) - if (context->bw_ctx.dml.vba.NoOfDPP[vlevel_unsplit][0][pipe_idx] == 1) - break; + + if (force_split || context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] > 1) + split[i] = true; + if ((pipe->stream->view_format == + VIEW_3D_FORMAT_SIDE_BY_SIDE || + pipe->stream->view_format == + VIEW_3D_FORMAT_TOP_AND_BOTTOM) && + (pipe->stream->timing.timing_3d_format == + TIMING_3D_FORMAT_TOP_AND_BOTTOM || + pipe->stream->timing.timing_3d_format == + TIMING_3D_FORMAT_SIDE_BY_SIDE)) + split[i] = true; + if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) { + split[i] = true; + context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx] = true; + } + context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx] = + context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx]; + /* Adjust dppclk when split is forced, do not bother with dispclk */ + if (split[i] && context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 1) + context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] /= 2; pipe_idx++; } + return vlevel; +} + +bool dcn20_fast_validate_bw( + struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int *pipe_cnt_out, + int *pipe_split_from, + int *vlevel_out) +{ + bool out = false; + bool split[MAX_PIPES] = { false }; + int pipe_cnt, i, pipe_idx, vlevel; + + ASSERT(pipes); + if (!pipes) + return false; + + dcn20_merge_pipes_for_validate(dc, context); + + pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, &context->res_ctx, pipes); + + *pipe_cnt_out = pipe_cnt; + + if (!pipe_cnt) { + out = true; + goto validate_out; + } + + vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); + + if (vlevel > context->bw_ctx.dml.soc.num_states) + goto validate_fail; + + vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split); + + /*initialize pipe_just_split_from to invalid idx*/ + for (i = 0; i < MAX_PIPES; i++) + pipe_split_from[i] = -1; + for (i = 0, pipe_idx = -1; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe; - bool need_split = true; - bool need_split3d; if (!pipe->stream || pipe_split_from[i] >= 0) continue; pipe_idx++; - if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) { - force_split = true; - context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx] = true; - context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx] = true; - } - if (force_split && context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 1) - context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] /= 2; if (!pipe->top_pipe && !pipe->plane_state && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) { hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe); ASSERT(hsplit_pipe); @@ -2435,40 +2511,26 @@ bool dcn20_fast_validate_bw( if (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state) continue; - need_split3d = ((pipe->stream->view_format == - VIEW_3D_FORMAT_SIDE_BY_SIDE || - pipe->stream->view_format == - VIEW_3D_FORMAT_TOP_AND_BOTTOM) && - (pipe->stream->timing.timing_3d_format == - TIMING_3D_FORMAT_TOP_AND_BOTTOM || - pipe->stream->timing.timing_3d_format == - TIMING_3D_FORMAT_SIDE_BY_SIDE)); - - if (avoid_split && vlevel_unsplit <= context->bw_ctx.dml.soc.num_states && !force_split && !need_split3d) { - need_split = false; - vlevel = vlevel_unsplit; - context->bw_ctx.dml.vba.maxMpcComb = 0; - } else - need_split = context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 2; - /* We do not support mpo + odm at the moment */ if (hsplit_pipe && hsplit_pipe->plane_state != pipe->plane_state && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) goto validate_fail; - if (need_split3d || need_split || force_split) { + if (split[i]) { if (!hsplit_pipe || hsplit_pipe->plane_state != pipe->plane_state) { /* pipe not split previously needs split */ hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe); - ASSERT(hsplit_pipe || force_split); - if (!hsplit_pipe) + ASSERT(hsplit_pipe); + if (!hsplit_pipe) { + context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] *= 2; continue; - + } if (context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) { if (!dcn20_split_stream_for_odm( &context->res_ctx, dc->res_pool, pipe, hsplit_pipe)) goto validate_fail; + dcn20_build_mapped_resource(dc, context, pipe->stream); } else dcn20_split_stream_for_mpc( &context->res_ctx, dc->res_pool, @@ -2482,7 +2544,7 @@ bool dcn20_fast_validate_bw( } #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT /* Actual dsc count per stream dsc validation*/ - if (failed_non_odm_dsc && !dcn20_validate_dsc(dc, context)) { + if (!dcn20_validate_dsc(dc, context)) { context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states] = DML_FAIL_DSC_VALIDATION_FAILURE; goto validate_fail; @@ -2501,7 +2563,7 @@ validate_out: return out; } -void dcn20_calculate_wm( +static void dcn20_calculate_wm( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, int *out_pipe_cnt, @@ -2522,7 +2584,7 @@ void dcn20_calculate_wm( context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx]; if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_idx] == pipe_idx) pipes[pipe_cnt].pipe.dest.odm_combine = - context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx]; + context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]; else pipes[pipe_cnt].pipe.dest.odm_combine = 0; pipe_idx++; @@ -2531,7 +2593,7 @@ void dcn20_calculate_wm( context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_split_from[i]]; if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_split_from[i]] == pipe_split_from[i]) pipes[pipe_cnt].pipe.dest.odm_combine = - context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_split_from[i]]; + context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_split_from[i]]; else pipes[pipe_cnt].pipe.dest.odm_combine = 0; } @@ -2574,6 +2636,11 @@ void dcn20_calculate_wm( context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +#if defined(CONFIG_DRM_AMD_DC_DCN2_1) + context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +#endif if (vlevel < 2) { pipes[0].clks_cfg.voltage = 2; @@ -2585,6 +2652,10 @@ void dcn20_calculate_wm( context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +#if defined(CONFIG_DRM_AMD_DC_DCN2_1) + context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +#endif if (vlevel < 3) { pipes[0].clks_cfg.voltage = 3; @@ -2596,6 +2667,10 @@ void dcn20_calculate_wm( context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +#if defined(CONFIG_DRM_AMD_DC_DCN2_1) + context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +#endif pipes[0].clks_cfg.voltage = vlevel; pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].dcfclk_mhz; @@ -2605,6 +2680,10 @@ void dcn20_calculate_wm( context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +#if defined(CONFIG_DRM_AMD_DC_DCN2_1) + context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +#endif } void dcn20_calculate_dlg_params( @@ -2624,7 +2703,7 @@ void dcn20_calculate_dlg_params( context->bw_ctx.bw.dcn.clk.socclk_khz = context->bw_ctx.dml.vba.SOCCLK * 1000; context->bw_ctx.bw.dcn.clk.dramclk_khz = context->bw_ctx.dml.vba.DRAMSpeed * 1000 / 16; context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = context->bw_ctx.dml.vba.DCFCLKDeepSleep * 1000; - context->bw_ctx.bw.dcn.clk.fclk_khz = 0; + context->bw_ctx.bw.dcn.clk.fclk_khz = context->bw_ctx.dml.vba.FabricClock * 1000; context->bw_ctx.bw.dcn.clk.p_state_change_support = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] != dm_dram_clock_change_unsupported; @@ -2640,8 +2719,8 @@ void dcn20_calculate_dlg_params( continue; if (!visited[pipe_idx]) { - display_pipe_source_params_st *src = &pipes[pipe_idx_unsplit].pipe.src; - display_pipe_dest_params_st *dst = &pipes[pipe_idx_unsplit].pipe.dest; + display_pipe_source_params_st *src = &pipes[pipe_idx].pipe.src; + display_pipe_dest_params_st *dst = &pipes[pipe_idx].pipe.dest; dst->vstartup_start = context->bw_ctx.dml.vba.VStartup[pipe_idx_unsplit]; dst->vupdate_offset = context->bw_ctx.dml.vba.VUpdateOffsetPix[pipe_idx_unsplit]; @@ -2801,7 +2880,6 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, ASSERT(false); restore_dml_state: - memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib)); context->bw_ctx.dml.soc.dram_clock_change_latency_us = p_state_latency_us; return voltage_supported; @@ -2887,6 +2965,7 @@ static struct resource_funcs dcn20_res_pool_funcs = { .populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context, .get_default_swizzle_mode = dcn20_get_default_swizzle_mode, .set_mcif_arb_params = dcn20_set_mcif_arb_params, + .populate_dml_pipes = dcn20_populate_dml_pipes_from_context, .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link }; @@ -2895,8 +2974,6 @@ bool dcn20_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) int i; uint32_t pipe_count = pool->res_cap->num_dwb; - ASSERT(pipe_count > 0); - for (i = 0; i < pipe_count; i++) { struct dcn20_dwbc *dwbc20 = kzalloc(sizeof(struct dcn20_dwbc), GFP_KERNEL); @@ -2942,7 +3019,7 @@ bool dcn20_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) return true; } -struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx) +static struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx) { struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL); @@ -2957,7 +3034,7 @@ struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx) return pp_smu; } -void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu) +static void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu) { if (pp_smu && *pp_smu) { kfree(*pp_smu); @@ -2965,7 +3042,7 @@ void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu) } } -static void cap_soc_clocks( +void dcn20_cap_soc_clocks( struct _vcs_dpi_soc_bounding_box_st *bb, struct pp_smu_nv_clock_table max_clocks) { @@ -3032,10 +3109,10 @@ static void cap_soc_clocks( } } -static void update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb, +void dcn20_update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb, struct pp_smu_nv_clock_table *max_clocks, unsigned int *uclk_states, unsigned int num_states) { - struct _vcs_dpi_voltage_scaling_st calculated_states[MAX_CLOCK_LIMIT_STATES] = {0}; + struct _vcs_dpi_voltage_scaling_st calculated_states[MAX_CLOCK_LIMIT_STATES]; int i; int num_calculated_states = 0; int min_dcfclk = 0; @@ -3043,12 +3120,18 @@ static void update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_ if (num_states == 0) return; + memset(calculated_states, 0, sizeof(calculated_states)); + if (dc->bb_overrides.min_dcfclk_mhz > 0) min_dcfclk = dc->bb_overrides.min_dcfclk_mhz; - else - // Accounting for SOC/DCF relationship, we can go as high as - // 506Mhz in Vmin. We need to code 507 since SMU will round down to 506. - min_dcfclk = 507; + else { + if (ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev)) + min_dcfclk = 310; + else + // Accounting for SOC/DCF relationship, we can go as high as + // 506Mhz in Vmin. + min_dcfclk = 506; + } for (i = 0; i < num_states; i++) { int min_fclk_required_by_uclk; @@ -3088,7 +3171,7 @@ static void update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_ bb->clock_limits[num_calculated_states].state = bb->num_states; } -static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb) +void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb) { kernel_fpu_begin(); if ((int)(bb->sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns @@ -3287,14 +3370,14 @@ static bool init_soc_bounding_box(struct dc *dc, } if (clock_limits_available && uclk_states_available && num_states) - update_bounding_box(dc, loaded_bb, &max_clocks, uclk_states, num_states); + dcn20_update_bounding_box(dc, loaded_bb, &max_clocks, uclk_states, num_states); else if (clock_limits_available) - cap_soc_clocks(loaded_bb, max_clocks); + dcn20_cap_soc_clocks(loaded_bb, max_clocks); } loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator; loaded_ip->max_num_dpp = pool->base.pipe_count; - patch_bounding_box(dc, loaded_bb); + dcn20_patch_bounding_box(dc, loaded_bb); return true; } @@ -3340,6 +3423,7 @@ static bool construct( dc->caps.post_blend_color_processing = true; dc->caps.force_dp_tps4_for_cp2520 = true; dc->caps.hw_3d_lut = true; + dc->caps.extended_aux_timeout_support = true; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) { dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h index 44f95aa0d61e..fef473d68a4a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h @@ -95,9 +95,12 @@ struct display_stream_compressor *dcn20_dsc_create( struct dc_context *ctx, uint32_t inst); void dcn20_dsc_destroy(struct display_stream_compressor **dsc); -struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx); -void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu); - +void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb); +void dcn20_cap_soc_clocks( + struct _vcs_dpi_soc_bounding_box_st *bb, + struct pp_smu_nv_clock_table max_clocks); +void dcn20_update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb, + struct pp_smu_nv_clock_table *max_clocks, unsigned int *uclk_states, unsigned int num_states); struct hubp *dcn20_hubp_create( struct dc_context *ctx, uint32_t inst); @@ -116,6 +119,31 @@ void dcn20_set_mcif_arb_params( display_e2e_pipe_params_st *pipes, int pipe_cnt); bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate); +void dcn20_merge_pipes_for_validate( + struct dc *dc, + struct dc_state *context); +int dcn20_validate_apply_pipe_split_flags( + struct dc *dc, + struct dc_state *context, + int vlevel, + bool *split); +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT +bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx); +#endif +void dcn20_split_stream_for_mpc( + struct resource_context *res_ctx, + const struct resource_pool *pool, + struct pipe_ctx *primary_pipe, + struct pipe_ctx *secondary_pipe); +bool dcn20_split_stream_for_odm( + struct resource_context *res_ctx, + const struct resource_pool *pool, + struct pipe_ctx *prev_odm_pipe, + struct pipe_ctx *next_odm_pipe); +struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc, + struct resource_context *res_ctx, + const struct resource_pool *pool, + const struct pipe_ctx *primary_pipe); bool dcn20_fast_validate_bw( struct dc *dc, struct dc_state *context, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c index 5ab9d6240498..4b3401616434 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c @@ -578,6 +578,10 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = { .set_avmute = enc1_stream_encoder_set_avmute, .dig_connect_to_otg = enc1_dig_connect_to_otg, .dig_source_otg = enc1_dig_source_otg, + + .dp_get_pixel_format = + enc1_stream_encoder_dp_get_pixel_format, + #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT .enc_read_state = enc2_read_state, .dp_set_dsc_config = enc2_dp_set_dsc_config, diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile index ef673bffc241..14113ccf498d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile @@ -1,17 +1,22 @@ # # Makefile for DCN21. -DCN21 = dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o +DCN21 = dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o dcn21_hwseq.o dcn21_link_encoder.o -ifneq ($(call cc-option, -mpreferred-stack-boundary=4),) - cc_stack_align := -mpreferred-stack-boundary=4 -else ifneq ($(call cc-option, -mstack-alignment=16),) - cc_stack_align := -mstack-alignment=16 -endif +CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -msse -CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -msse $(cc_stack_align) +ifdef CONFIG_CC_IS_GCC +ifeq ($(call cc-ifversion, -lt, 0701, y), y) +IS_OLD_GCC = 1 +endif +endif -ifdef CONFIG_CC_IS_CLANG +ifdef IS_OLD_GCC +# Stack alignment mismatch, proceed with caution. +# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 +# (8B stack alignment). +CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o += -mpreferred-stack-boundary=4 +else CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o += -msse2 endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c index d1266741763b..f546260c15b7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c @@ -22,6 +22,7 @@ * Authors: AMD * */ +#include <linux/delay.h> #include "dm_services.h" #include "dcn20/dcn20_hubbub.h" #include "dcn21_hubbub.h" @@ -51,7 +52,7 @@ #ifdef NUM_VMID #undef NUM_VMID #endif -#define NUM_VMID 1 +#define NUM_VMID 16 static uint32_t convert_and_clamp( uint32_t wm_ns, @@ -71,56 +72,76 @@ static uint32_t convert_and_clamp( void dcn21_dchvm_init(struct hubbub *hubbub) { struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); + uint32_t riommu_active; + int i; //Init DCHVM block REG_UPDATE(DCHVM_CTRL0, HOSTVM_INIT_REQ, 1); //Poll until RIOMMU_ACTIVE = 1 - //TODO: Figure out interval us and retry count - REG_WAIT(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, 1, 5, 100); + for (i = 0; i < 100; i++) { + REG_GET(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, &riommu_active); - //Reflect the power status of DCHUBBUB - REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, 1); + if (riommu_active) + break; + else + udelay(5); + } + + if (riommu_active) { + //Reflect the power status of DCHUBBUB + REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, 1); - //Start rIOMMU prefetching - REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, 1); + //Start rIOMMU prefetching + REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, 1); - // Enable dynamic clock gating - REG_UPDATE_4(DCHVM_CLK_CTRL, - HVM_DISPCLK_R_GATE_DIS, 0, - HVM_DISPCLK_G_GATE_DIS, 0, - HVM_DCFCLK_R_GATE_DIS, 0, - HVM_DCFCLK_G_GATE_DIS, 0); + // Enable dynamic clock gating + REG_UPDATE_4(DCHVM_CLK_CTRL, + HVM_DISPCLK_R_GATE_DIS, 0, + HVM_DISPCLK_G_GATE_DIS, 0, + HVM_DCFCLK_R_GATE_DIS, 0, + HVM_DCFCLK_G_GATE_DIS, 0); - //Poll until HOSTVM_PREFETCH_DONE = 1 - //TODO: Figure out interval us and retry count - REG_WAIT(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, 1, 5, 100); + //Poll until HOSTVM_PREFETCH_DONE = 1 + REG_WAIT(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, 1, 5, 100); + } } -static int hubbub21_init_dchub(struct hubbub *hubbub, +int hubbub21_init_dchub(struct hubbub *hubbub, struct dcn_hubbub_phys_addr_config *pa_config) { struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); + struct dcn_vmid_page_table_config phys_config; REG_SET(DCN_VM_FB_LOCATION_BASE, 0, - FB_BASE, pa_config->system_aperture.fb_base); + FB_BASE, pa_config->system_aperture.fb_base >> 24); REG_SET(DCN_VM_FB_LOCATION_TOP, 0, - FB_TOP, pa_config->system_aperture.fb_top); + FB_TOP, pa_config->system_aperture.fb_top >> 24); REG_SET(DCN_VM_FB_OFFSET, 0, - FB_OFFSET, pa_config->system_aperture.fb_offset); + FB_OFFSET, pa_config->system_aperture.fb_offset >> 24); REG_SET(DCN_VM_AGP_BOT, 0, - AGP_BOT, pa_config->system_aperture.agp_bot); + AGP_BOT, pa_config->system_aperture.agp_bot >> 24); REG_SET(DCN_VM_AGP_TOP, 0, - AGP_TOP, pa_config->system_aperture.agp_top); + AGP_TOP, pa_config->system_aperture.agp_top >> 24); REG_SET(DCN_VM_AGP_BASE, 0, - AGP_BASE, pa_config->system_aperture.agp_base); + AGP_BASE, pa_config->system_aperture.agp_base >> 24); + + if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) { + phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12; + phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12; + phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr | 1; //Note: hack + phys_config.depth = 0; + phys_config.block_size = 0; + // Init VMID 0 based on PA config + dcn20_vmid_setup(&hubbub1->vmid[0], &phys_config); + } dcn21_dchvm_init(hubbub); return NUM_VMID; } -static void hubbub21_program_urgent_watermarks( +void hubbub21_program_urgent_watermarks( struct hubbub *hubbub, struct dcn_watermark_set *watermarks, unsigned int refclk_mhz, @@ -160,6 +181,13 @@ static void hubbub21_program_urgent_watermarks( REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, 0, DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->a.frac_urg_bw_nom); } + if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub1->watermarks.a.urgent_latency_ns) { + hubbub1->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns; + prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns, + refclk_mhz, 0x1fffff); + REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0, + DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value); + } /* clock state B */ if (safe_to_lower || watermarks->b.urgent_ns > hubbub1->watermarks.b.urgent_ns) { @@ -192,6 +220,14 @@ static void hubbub21_program_urgent_watermarks( DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->a.frac_urg_bw_nom); } + if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub1->watermarks.b.urgent_latency_ns) { + hubbub1->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns; + prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns, + refclk_mhz, 0x1fffff); + REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0, + DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value); + } + /* clock state C */ if (safe_to_lower || watermarks->c.urgent_ns > hubbub1->watermarks.c.urgent_ns) { hubbub1->watermarks.c.urgent_ns = watermarks->c.urgent_ns; @@ -223,6 +259,14 @@ static void hubbub21_program_urgent_watermarks( DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, watermarks->a.frac_urg_bw_nom); } + if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub1->watermarks.c.urgent_latency_ns) { + hubbub1->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns; + prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns, + refclk_mhz, 0x1fffff); + REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0, + DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value); + } + /* clock state D */ if (safe_to_lower || watermarks->d.urgent_ns > hubbub1->watermarks.d.urgent_ns) { hubbub1->watermarks.d.urgent_ns = watermarks->d.urgent_ns; @@ -253,9 +297,17 @@ static void hubbub21_program_urgent_watermarks( REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, 0, DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, watermarks->a.frac_urg_bw_nom); } + + if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub1->watermarks.d.urgent_latency_ns) { + hubbub1->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns; + prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns, + refclk_mhz, 0x1fffff); + REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0, + DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value); + } } -static void hubbub21_program_stutter_watermarks( +void hubbub21_program_stutter_watermarks( struct hubbub *hubbub, struct dcn_watermark_set *watermarks, unsigned int refclk_mhz, @@ -389,7 +441,7 @@ static void hubbub21_program_stutter_watermarks( } } -static void hubbub21_program_pstate_watermarks( +void hubbub21_program_pstate_watermarks( struct hubbub *hubbub, struct dcn_watermark_set *watermarks, unsigned int refclk_mhz, @@ -564,17 +616,26 @@ void hubbub21_wm_read_state(struct hubbub *hubbub, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, &s->dram_clk_chanage); } +void hubbub21_apply_DEDCN21_147_wa(struct hubbub *hubbub) +{ + struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); + uint32_t prog_wm_value; + + prog_wm_value = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A); + REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value); +} static const struct hubbub_funcs hubbub21_funcs = { .update_dchub = hubbub2_update_dchub, .init_dchub_sys_ctx = hubbub21_init_dchub, - .init_vm_ctx = NULL, + .init_vm_ctx = hubbub2_init_vm_ctx, .dcc_support_swizzle = hubbub2_dcc_support_swizzle, .dcc_support_pixel_format = hubbub2_dcc_support_pixel_format, .get_dcc_compression_cap = hubbub2_get_dcc_compression_cap, .wm_read_state = hubbub21_wm_read_state, .get_dchub_ref_freq = hubbub2_get_dchub_ref_freq, .program_watermarks = hubbub21_program_watermarks, + .apply_DEDCN21_147_wa = hubbub21_apply_DEDCN21_147_wa, }; void hubbub21_construct(struct dcn20_hubbub *hubbub, @@ -592,4 +653,5 @@ void hubbub21_construct(struct dcn20_hubbub *hubbub, hubbub->masks = hubbub_mask; hubbub->debug_test_index_pstate = 0xB; + hubbub->detile_buf_size = 164 * 1024; /* 164KB for DCN2.0 */ } diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h index 6ff3cdb89178..c4840dfb1fa5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h @@ -36,6 +36,10 @@ SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B),\ SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C),\ SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D),\ + SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A),\ + SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B),\ + SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C),\ + SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D),\ SR(DCHUBBUB_ARB_HOSTVM_CNTL), \ SR(DCHVM_CTRL0), \ SR(DCHVM_MEM_CTRL), \ @@ -44,16 +48,9 @@ SR(DCHVM_RIOMMU_STAT0) #define HUBBUB_REG_LIST_DCN21()\ - HUBBUB_REG_LIST_DCN_COMMON(), \ + HUBBUB_REG_LIST_DCN20_COMMON(), \ HUBBUB_SR_WATERMARK_REG_LIST(), \ - HUBBUB_HVM_REG_LIST(), \ - SR(DCHUBBUB_CRC_CTRL), \ - SR(DCN_VM_FB_LOCATION_BASE),\ - SR(DCN_VM_FB_LOCATION_TOP),\ - SR(DCN_VM_FB_OFFSET),\ - SR(DCN_VM_AGP_BOT),\ - SR(DCN_VM_AGP_TOP),\ - SR(DCN_VM_AGP_BASE) + HUBBUB_HVM_REG_LIST() #define HUBBUB_MASK_SH_LIST_HVM(mask_sh) \ HUBBUB_SF(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD, mask_sh), \ @@ -102,7 +99,7 @@ HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, mask_sh) #define HUBBUB_MASK_SH_LIST_DCN21(mask_sh)\ - HUBBUB_MASK_SH_LIST_HVM(mask_sh),\ + HUBBUB_MASK_SH_LIST_HVM(mask_sh), \ HUBBUB_MASK_SH_LIST_DCN_COMMON(mask_sh), \ HUBBUB_MASK_SH_LIST_STUTTER(mask_sh), \ HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \ @@ -114,11 +111,28 @@ HUBBUB_SF(DCN_VM_AGP_BASE, AGP_BASE, mask_sh) void dcn21_dchvm_init(struct hubbub *hubbub); +int hubbub21_init_dchub(struct hubbub *hubbub, + struct dcn_hubbub_phys_addr_config *pa_config); void hubbub21_program_watermarks( struct hubbub *hubbub, struct dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); +void hubbub21_program_urgent_watermarks( + struct hubbub *hubbub, + struct dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower); +void hubbub21_program_stutter_watermarks( + struct hubbub *hubbub, + struct dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower); +void hubbub21_program_pstate_watermarks( + struct hubbub *hubbub, + struct dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower); void hubbub21_wm_read_state(struct hubbub *hubbub, struct dcn_hubbub_wm *wm); diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c index a00af513aa2b..2f5a5867e674 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c @@ -22,6 +22,8 @@ * Authors: AMD * */ + +#include "dcn10/dcn10_hubp.h" #include "dcn21_hubp.h" #include "dm_services.h" @@ -202,7 +204,7 @@ static struct hubp_funcs dcn21_hubp_funcs = { .hubp_enable_tripleBuffer = hubp2_enable_triplebuffer, .hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled, .hubp_program_surface_flip_and_addr = hubp2_program_surface_flip_and_addr, - .hubp_program_surface_config = hubp2_program_surface_config, + .hubp_program_surface_config = hubp1_program_surface_config, .hubp_is_flip_pending = hubp1_is_flip_pending, .hubp_setup = hubp21_setup, .hubp_setup_interdependent = hubp2_setup_interdependent, diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c new file mode 100644 index 000000000000..b25215cadf85 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c @@ -0,0 +1,122 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services.h" +#include "dm_helpers.h" +#include "core_types.h" +#include "resource.h" +#include "dce/dce_hwseq.h" +#include "dcn20/dcn20_hwseq.h" +#include "vmid.h" +#include "reg_helper.h" +#include "hw/clk_mgr.h" + + +#define DC_LOGGER_INIT(logger) + +#define CTX \ + hws->ctx +#define REG(reg)\ + hws->regs->reg + +#undef FN +#define FN(reg_name, field_name) \ + hws->shifts->field_name, hws->masks->field_name + +/* Temporary read settings, future will get values from kmd directly */ +static void mmhub_update_page_table_config(struct dcn_hubbub_phys_addr_config *config, + struct dce_hwseq *hws) +{ + uint32_t page_table_base_hi; + uint32_t page_table_base_lo; + + REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, + PAGE_DIRECTORY_ENTRY_HI32, &page_table_base_hi); + REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, + PAGE_DIRECTORY_ENTRY_LO32, &page_table_base_lo); + + config->gart_config.page_table_base_addr = ((uint64_t)page_table_base_hi << 32) | page_table_base_lo; + +} + +static int dcn21_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config) +{ + struct dcn_hubbub_phys_addr_config config; + + config.system_aperture.fb_top = pa_config->system_aperture.fb_top; + config.system_aperture.fb_offset = pa_config->system_aperture.fb_offset; + config.system_aperture.fb_base = pa_config->system_aperture.fb_base; + config.system_aperture.agp_top = pa_config->system_aperture.agp_top; + config.system_aperture.agp_bot = pa_config->system_aperture.agp_bot; + config.system_aperture.agp_base = pa_config->system_aperture.agp_base; + config.gart_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr; + config.gart_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr; + config.gart_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr; + + mmhub_update_page_table_config(&config, hws); + + return dc->res_pool->hubbub->funcs->init_dchub_sys_ctx(dc->res_pool->hubbub, &config); +} + +// work around for Renoir s0i3, if register is programmed, bypass golden init. + +static bool dcn21_s0i3_golden_init_wa(struct dc *dc) +{ + struct dce_hwseq *hws = dc->hwseq; + uint32_t value = 0; + + value = REG_READ(MICROSECOND_TIME_BASE_DIV); + + return value != 0x00120464; +} + +void dcn21_exit_optimized_pwr_state( + const struct dc *dc, + struct dc_state *context) +{ + dc->clk_mgr->funcs->update_clocks( + dc->clk_mgr, + context, + false); +} + +void dcn21_optimize_pwr_state( + const struct dc *dc, + struct dc_state *context) +{ + dc->clk_mgr->funcs->update_clocks( + dc->clk_mgr, + context, + true); +} + +void dcn21_hw_sequencer_construct(struct dc *dc) +{ + dcn20_hw_sequencer_construct(dc); + dc->hwss.init_sys_ctx = dcn21_init_sys_ctx; + dc->hwss.s0i3_golden_init_wa = dcn21_s0i3_golden_init_wa; + dc->hwss.optimize_pwr_state = dcn21_optimize_pwr_state; + dc->hwss.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state; +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h new file mode 100644 index 000000000000..be67b62e6fb1 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h @@ -0,0 +1,33 @@ +/* +* Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_HWSS_DCN21_H__ +#define __DC_HWSS_DCN21_H__ + +struct dc; + +void dcn21_hw_sequencer_construct(struct dc *dc); + +#endif /* __DC_HWSS_DCN21_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c new file mode 100644 index 000000000000..e8a504ca5890 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c @@ -0,0 +1,470 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "reg_helper.h" + +#include <linux/delay.h> +#include "core_types.h" +#include "link_encoder.h" +#include "dcn21_link_encoder.h" +#include "stream_encoder.h" + +#include "i2caux_interface.h" +#include "dc_bios_types.h" + +#include "gpio_service_interface.h" + +#define CTX \ + enc10->base.ctx +#define DC_LOGGER \ + enc10->base.ctx->logger + +#define REG(reg)\ + (enc10->link_regs->reg) + +#undef FN +#define FN(reg_name, field_name) \ + enc10->link_shift->field_name, enc10->link_mask->field_name + +#define IND_REG(index) \ + (enc10->link_regs->index) + +static struct mpll_cfg dcn21_mpll_cfg_ref[] = { + // RBR + { + .hdmimode_enable = 0, + .ref_range = 1, + .ref_clk_mpllb_div = 1, + .mpllb_ssc_en = 1, + .mpllb_div5_clk_en = 1, + .mpllb_multiplier = 238, + .mpllb_fracn_en = 0, + .mpllb_fracn_quot = 0, + .mpllb_fracn_rem = 0, + .mpllb_fracn_den = 1, + .mpllb_ssc_up_spread = 0, + .mpllb_ssc_peak = 44237, + .mpllb_ssc_stepsize = 59454, + .mpllb_div_clk_en = 0, + .mpllb_div_multiplier = 0, + .mpllb_hdmi_div = 0, + .mpllb_tx_clk_div = 2, + .tx_vboost_lvl = 5, + .mpllb_pmix_en = 1, + .mpllb_word_div2_en = 0, + .mpllb_ana_v2i = 2, + .mpllb_ana_freq_vco = 2, + .mpllb_ana_cp_int = 9, + .mpllb_ana_cp_prop = 15, + .hdmi_pixel_clk_div = 0, + }, + // HBR + { + .hdmimode_enable = 0, + .ref_range = 1, + .ref_clk_mpllb_div = 1, + .mpllb_ssc_en = 1, + .mpllb_div5_clk_en = 1, + .mpllb_multiplier = 192, + .mpllb_fracn_en = 1, + .mpllb_fracn_quot = 32768, + .mpllb_fracn_rem = 0, + .mpllb_fracn_den = 1, + .mpllb_ssc_up_spread = 0, + .mpllb_ssc_peak = 36864, + .mpllb_ssc_stepsize = 49545, + .mpllb_div_clk_en = 0, + .mpllb_div_multiplier = 0, + .mpllb_hdmi_div = 0, + .mpllb_tx_clk_div = 1, + .tx_vboost_lvl = 5, + .mpllb_pmix_en = 1, + .mpllb_word_div2_en = 0, + .mpllb_ana_v2i = 2, + .mpllb_ana_freq_vco = 3, + .mpllb_ana_cp_int = 9, + .mpllb_ana_cp_prop = 15, + .hdmi_pixel_clk_div = 0, + }, + //HBR2 + { + .hdmimode_enable = 0, + .ref_range = 1, + .ref_clk_mpllb_div = 1, + .mpllb_ssc_en = 1, + .mpllb_div5_clk_en = 1, + .mpllb_multiplier = 192, + .mpllb_fracn_en = 1, + .mpllb_fracn_quot = 32768, + .mpllb_fracn_rem = 0, + .mpllb_fracn_den = 1, + .mpllb_ssc_up_spread = 0, + .mpllb_ssc_peak = 36864, + .mpllb_ssc_stepsize = 49545, + .mpllb_div_clk_en = 0, + .mpllb_div_multiplier = 0, + .mpllb_hdmi_div = 0, + .mpllb_tx_clk_div = 0, + .tx_vboost_lvl = 5, + .mpllb_pmix_en = 1, + .mpllb_word_div2_en = 0, + .mpllb_ana_v2i = 2, + .mpllb_ana_freq_vco = 3, + .mpllb_ana_cp_int = 9, + .mpllb_ana_cp_prop = 15, + .hdmi_pixel_clk_div = 0, + }, + //HBR3 + { + .hdmimode_enable = 0, + .ref_range = 1, + .ref_clk_mpllb_div = 1, + .mpllb_ssc_en = 1, + .mpllb_div5_clk_en = 1, + .mpllb_multiplier = 304, + .mpllb_fracn_en = 1, + .mpllb_fracn_quot = 49152, + .mpllb_fracn_rem = 0, + .mpllb_fracn_den = 1, + .mpllb_ssc_up_spread = 0, + .mpllb_ssc_peak = 55296, + .mpllb_ssc_stepsize = 74318, + .mpllb_div_clk_en = 0, + .mpllb_div_multiplier = 0, + .mpllb_hdmi_div = 0, + .mpllb_tx_clk_div = 0, + .tx_vboost_lvl = 5, + .mpllb_pmix_en = 1, + .mpllb_word_div2_en = 0, + .mpllb_ana_v2i = 2, + .mpllb_ana_freq_vco = 1, + .mpllb_ana_cp_int = 7, + .mpllb_ana_cp_prop = 16, + .hdmi_pixel_clk_div = 0, + }, +}; + + +static bool update_cfg_data( + struct dcn10_link_encoder *enc10, + const struct dc_link_settings *link_settings, + struct dpcssys_phy_seq_cfg *cfg) +{ + int i; + + cfg->load_sram_fw = false; + cfg->use_calibration_setting = true; + + //TODO: need to implement a proper lane mapping for Renoir. + for (i = 0; i < 4; i++) + cfg->lane_en[i] = true; + + switch (link_settings->link_rate) { + case LINK_RATE_LOW: + cfg->mpll_cfg = dcn21_mpll_cfg_ref[0]; + break; + case LINK_RATE_HIGH: + cfg->mpll_cfg = dcn21_mpll_cfg_ref[1]; + break; + case LINK_RATE_HIGH2: + cfg->mpll_cfg = dcn21_mpll_cfg_ref[2]; + break; + case LINK_RATE_HIGH3: + cfg->mpll_cfg = dcn21_mpll_cfg_ref[3]; + break; + default: + DC_LOG_ERROR("%s: No supported link rate found %X!\n", + __func__, link_settings->link_rate); + return false; + } + + return true; +} + +void dcn21_link_encoder_get_max_link_cap(struct link_encoder *enc, + struct dc_link_settings *link_settings) +{ + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + uint32_t value; + + REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &value); + + if (!value && link_settings->lane_count > LANE_COUNT_TWO) + link_settings->lane_count = LANE_COUNT_TWO; +} + +bool dcn21_link_encoder_is_in_alt_mode(struct link_encoder *enc) +{ + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + uint32_t value; + + REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &value); + + // if value == 1 alt mode is disabled, otherwise it is enabled + return !value; +} + +bool dcn21_link_encoder_acquire_phy(struct link_encoder *enc) +{ + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + int value; + + if (enc->features.flags.bits.DP_IS_USB_C) { + REG_GET(RDPCSTX_PHY_CNTL6, + RDPCS_PHY_DPALT_DISABLE, &value); + + if (value == 1) { + ASSERT(0); + return false; + } + REG_UPDATE(RDPCSTX_PHY_CNTL6, + RDPCS_PHY_DPALT_DISABLE_ACK, 0); + + udelay(40); + + REG_GET(RDPCSTX_PHY_CNTL6, + RDPCS_PHY_DPALT_DISABLE, &value); + if (value == 1) { + ASSERT(0); + REG_UPDATE(RDPCSTX_PHY_CNTL6, + RDPCS_PHY_DPALT_DISABLE_ACK, 1); + return false; + } + } + + REG_UPDATE(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_REF_CLK_EN, 1); + + return true; +} + + + +static void dcn21_link_encoder_release_phy(struct link_encoder *enc) +{ + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + + if (enc->features.flags.bits.DP_IS_USB_C) { + REG_UPDATE(RDPCSTX_PHY_CNTL6, + RDPCS_PHY_DPALT_DISABLE_ACK, 1); + } + + REG_UPDATE(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_REF_CLK_EN, 0); + +} + +void dcn21_link_encoder_enable_dp_output( + struct link_encoder *enc, + const struct dc_link_settings *link_settings, + enum clock_source_id clock_source) +{ + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + struct dcn21_link_encoder *enc21 = (struct dcn21_link_encoder *) enc10; + struct dpcssys_phy_seq_cfg *cfg = &enc21->phy_seq_cfg; + + if (!dcn21_link_encoder_acquire_phy(enc)) + return; + + if (!enc->ctx->dc->debug.avoid_vbios_exec_table) { + dcn10_link_encoder_enable_dp_output(enc, link_settings, clock_source); + return; + } + + if (!update_cfg_data(enc10, link_settings, cfg)) + return; + + enc1_configure_encoder(enc10, link_settings); + + dcn10_link_encoder_setup(enc, SIGNAL_TYPE_DISPLAY_PORT); + +} + +void dcn21_link_encoder_enable_dp_mst_output( + struct link_encoder *enc, + const struct dc_link_settings *link_settings, + enum clock_source_id clock_source) +{ + if (!dcn21_link_encoder_acquire_phy(enc)) + return; + + dcn10_link_encoder_enable_dp_mst_output(enc, link_settings, clock_source); +} + +void dcn21_link_encoder_disable_output( + struct link_encoder *enc, + enum signal_type signal) +{ + dcn10_link_encoder_disable_output(enc, signal); + + if (dc_is_dp_signal(signal)) + dcn21_link_encoder_release_phy(enc); +} + + +static const struct link_encoder_funcs dcn21_link_enc_funcs = { +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT + .read_state = link_enc2_read_state, +#endif + .validate_output_with_stream = + dcn10_link_encoder_validate_output_with_stream, + .hw_init = enc2_hw_init, + .setup = dcn10_link_encoder_setup, + .enable_tmds_output = dcn10_link_encoder_enable_tmds_output, + .enable_dp_output = dcn21_link_encoder_enable_dp_output, + .enable_dp_mst_output = dcn21_link_encoder_enable_dp_mst_output, + .disable_output = dcn21_link_encoder_disable_output, + .dp_set_lane_settings = dcn10_link_encoder_dp_set_lane_settings, + .dp_set_phy_pattern = dcn10_link_encoder_dp_set_phy_pattern, + .update_mst_stream_allocation_table = + dcn10_link_encoder_update_mst_stream_allocation_table, + .psr_program_dp_dphy_fast_training = + dcn10_psr_program_dp_dphy_fast_training, + .psr_program_secondary_packet = dcn10_psr_program_secondary_packet, + .connect_dig_be_to_fe = dcn10_link_encoder_connect_dig_be_to_fe, + .enable_hpd = dcn10_link_encoder_enable_hpd, + .disable_hpd = dcn10_link_encoder_disable_hpd, + .is_dig_enabled = dcn10_is_dig_enabled, + .destroy = dcn10_link_encoder_destroy, + .fec_set_enable = enc2_fec_set_enable, + .fec_set_ready = enc2_fec_set_ready, + .fec_is_active = enc2_fec_is_active, + .get_dig_frontend = dcn10_get_dig_frontend, + .is_in_alt_mode = dcn21_link_encoder_is_in_alt_mode, + .get_max_link_cap = dcn21_link_encoder_get_max_link_cap, +}; + +void dcn21_link_encoder_construct( + struct dcn21_link_encoder *enc21, + const struct encoder_init_data *init_data, + const struct encoder_feature_support *enc_features, + const struct dcn10_link_enc_registers *link_regs, + const struct dcn10_link_enc_aux_registers *aux_regs, + const struct dcn10_link_enc_hpd_registers *hpd_regs, + const struct dcn10_link_enc_shift *link_shift, + const struct dcn10_link_enc_mask *link_mask) +{ + struct bp_encoder_cap_info bp_cap_info = {0}; + const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs; + enum bp_result result = BP_RESULT_OK; + struct dcn10_link_encoder *enc10 = &enc21->enc10; + + enc10->base.funcs = &dcn21_link_enc_funcs; + enc10->base.ctx = init_data->ctx; + enc10->base.id = init_data->encoder; + + enc10->base.hpd_source = init_data->hpd_source; + enc10->base.connector = init_data->connector; + + enc10->base.preferred_engine = ENGINE_ID_UNKNOWN; + + enc10->base.features = *enc_features; + + enc10->base.transmitter = init_data->transmitter; + + /* set the flag to indicate whether driver poll the I2C data pin + * while doing the DP sink detect + */ + +/* if (dal_adapter_service_is_feature_supported(as, + FEATURE_DP_SINK_DETECT_POLL_DATA_PIN)) + enc10->base.features.flags.bits. + DP_SINK_DETECT_POLL_DATA_PIN = true;*/ + + enc10->base.output_signals = + SIGNAL_TYPE_DVI_SINGLE_LINK | + SIGNAL_TYPE_DVI_DUAL_LINK | + SIGNAL_TYPE_LVDS | + SIGNAL_TYPE_DISPLAY_PORT | + SIGNAL_TYPE_DISPLAY_PORT_MST | + SIGNAL_TYPE_EDP | + SIGNAL_TYPE_HDMI_TYPE_A; + + /* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE. + * SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY. + * SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer + * DIG is per UNIPHY and used by SST DP, eDP, HDMI, DVI and LVDS. + * Prefer DIG assignment is decided by board design. + * For DCE 8.0, there are only max 6 UNIPHYs, we assume board design + * and VBIOS will filter out 7 UNIPHY for DCE 8.0. + * By this, adding DIGG should not hurt DCE 8.0. + * This will let DCE 8.1 share DCE 8.0 as much as possible + */ + + enc10->link_regs = link_regs; + enc10->aux_regs = aux_regs; + enc10->hpd_regs = hpd_regs; + enc10->link_shift = link_shift; + enc10->link_mask = link_mask; + + switch (enc10->base.transmitter) { + case TRANSMITTER_UNIPHY_A: + enc10->base.preferred_engine = ENGINE_ID_DIGA; + break; + case TRANSMITTER_UNIPHY_B: + enc10->base.preferred_engine = ENGINE_ID_DIGB; + break; + case TRANSMITTER_UNIPHY_C: + enc10->base.preferred_engine = ENGINE_ID_DIGC; + break; + case TRANSMITTER_UNIPHY_D: + enc10->base.preferred_engine = ENGINE_ID_DIGD; + break; + case TRANSMITTER_UNIPHY_E: + enc10->base.preferred_engine = ENGINE_ID_DIGE; + break; + case TRANSMITTER_UNIPHY_F: + enc10->base.preferred_engine = ENGINE_ID_DIGF; + break; + case TRANSMITTER_UNIPHY_G: + enc10->base.preferred_engine = ENGINE_ID_DIGG; + break; + default: + ASSERT_CRITICAL(false); + enc10->base.preferred_engine = ENGINE_ID_UNKNOWN; + } + + /* default to one to mirror Windows behavior */ + enc10->base.features.flags.bits.HDMI_6GB_EN = 1; + + result = bp_funcs->get_encoder_cap_info(enc10->base.ctx->dc_bios, + enc10->base.id, &bp_cap_info); + + /* Override features with DCE-specific values */ + if (result == BP_RESULT_OK) { + enc10->base.features.flags.bits.IS_HBR2_CAPABLE = + bp_cap_info.DP_HBR2_EN; + enc10->base.features.flags.bits.IS_HBR3_CAPABLE = + bp_cap_info.DP_HBR3_EN; + enc10->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN; + enc10->base.features.flags.bits.DP_IS_USB_C = + bp_cap_info.DP_IS_USB_C; + } else { + DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n", + __func__, + result); + } + if (enc10->base.ctx->dc->debug.hdmi20_disable) { + enc10->base.features.flags.bits.HDMI_6GB_EN = 0; + } +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h new file mode 100644 index 000000000000..1d7a1a51f13d --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h @@ -0,0 +1,61 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_LINK_ENCODER__DCN21_H__ +#define __DC_LINK_ENCODER__DCN21_H__ + +#include "dcn20/dcn20_link_encoder.h" + +struct dcn21_link_encoder { + struct dcn10_link_encoder enc10; + struct dpcssys_phy_seq_cfg phy_seq_cfg; +}; + +#define LINK_ENCODER_MASK_SH_LIST_DCN21(mask_sh)\ + LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh),\ + LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL0_XBAR_SOURCE, mask_sh),\ + LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL1_XBAR_SOURCE, mask_sh),\ + LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL2_XBAR_SOURCE, mask_sh),\ + LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL3_XBAR_SOURCE, mask_sh), \ + SRI(RDPCSTX_PHY_FUSE2, RDPCSTX, id), \ + SRI(RDPCSTX_PHY_FUSE3, RDPCSTX, id), \ + SR(RDPCSTX0_RDPCSTX_SCRATCH) + +void dcn21_link_encoder_enable_dp_output( + struct link_encoder *enc, + const struct dc_link_settings *link_settings, + enum clock_source_id clock_source); + +void dcn21_link_encoder_construct( + struct dcn21_link_encoder *enc21, + const struct encoder_init_data *init_data, + const struct encoder_feature_support *enc_features, + const struct dcn10_link_enc_registers *link_regs, + const struct dcn10_link_enc_aux_registers *aux_regs, + const struct dcn10_link_enc_hpd_registers *hpd_regs, + const struct dcn10_link_enc_shift *link_shift, + const struct dcn10_link_enc_mask *link_mask); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index de182185fe1f..459bd9a5caed 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -23,8 +23,6 @@ * */ -#include <linux/slab.h> - #include "dm_services.h" #include "dc.h" @@ -42,11 +40,11 @@ #include "irq/dcn21/irq_service_dcn21.h" #include "dcn20/dcn20_dpp.h" #include "dcn20/dcn20_optc.h" -#include "dcn20/dcn20_hwseq.h" +#include "dcn21/dcn21_hwseq.h" #include "dce110/dce110_hw_sequencer.h" #include "dcn20/dcn20_opp.h" #include "dcn20/dcn20_dsc.h" -#include "dcn20/dcn20_link_encoder.h" +#include "dcn21/dcn21_link_encoder.h" #include "dcn20/dcn20_stream_encoder.h" #include "dce/dce_clock_source.h" #include "dce/dce_audio.h" @@ -84,8 +82,9 @@ struct _vcs_dpi_ip_params_st dcn2_1_ip = { - .gpuvm_enable = 0, - .hostvm_enable = 0, + .odm_capable = 1, + .gpuvm_enable = 1, + .hostvm_enable = 1, .gpuvm_max_page_table_levels = 1, .hostvm_max_page_table_levels = 4, .hostvm_cached_page_table_levels = 2, @@ -205,11 +204,11 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = { .state = 4, .dcfclk_mhz = 810.0, .fabricclk_mhz = 1600.0, - .dispclk_mhz = 1015.0, - .dppclk_mhz = 1015.0, - .phyclk_mhz = 810.0, + .dispclk_mhz = 1395.0, + .dppclk_mhz = 1285.0, + .phyclk_mhz = 1325.0, .socclk_mhz = 953.0, - .dscclk_mhz = 318.334, + .dscclk_mhz = 489.0, .dram_speed_mts = 4266.0, }, /*Extra state, no dispclk ramping*/ @@ -217,18 +216,18 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = { .state = 5, .dcfclk_mhz = 810.0, .fabricclk_mhz = 1600.0, - .dispclk_mhz = 1015.0, - .dppclk_mhz = 1015.0, - .phyclk_mhz = 810.0, + .dispclk_mhz = 1395.0, + .dppclk_mhz = 1285.0, + .phyclk_mhz = 1325.0, .socclk_mhz = 953.0, - .dscclk_mhz = 318.334, + .dscclk_mhz = 489.0, .dram_speed_mts = 4266.0, }, }, - .sr_exit_time_us = 9.0, - .sr_enter_plus_exit_time_us = 11.0, + .sr_exit_time_us = 12.5, + .sr_enter_plus_exit_time_us = 17.0, .urgent_latency_us = 4.0, .urgent_latency_pixel_data_only_us = 4.0, .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, @@ -350,6 +349,30 @@ static const struct bios_registers bios_regs = { NBIO_SR(BIOS_SCRATCH_6) }; +static const struct dce_dmcu_registers dmcu_regs = { + DMCU_DCN10_REG_LIST() +}; + +static const struct dce_dmcu_shift dmcu_shift = { + DMCU_MASK_SH_LIST_DCN10(__SHIFT) +}; + +static const struct dce_dmcu_mask dmcu_mask = { + DMCU_MASK_SH_LIST_DCN10(_MASK) +}; + +static const struct dce_abm_registers abm_regs = { + ABM_DCN20_REG_LIST() +}; + +static const struct dce_abm_shift abm_shift = { + ABM_MASK_SH_LIST_DCN20(__SHIFT) +}; + +static const struct dce_abm_mask abm_mask = { + ABM_MASK_SH_LIST_DCN20(_MASK) +}; + #ifdef CONFIG_DRM_AMD_DC_DMUB static const struct dcn21_dmcub_registers dmcub_regs = { DMCUB_REG_LIST_DCN() @@ -628,6 +651,14 @@ static const struct dcn10_stream_enc_registers stream_enc_regs[] = { stream_enc_regs(4), }; +static const struct dce110_aux_registers_shift aux_shift = { + DCN_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCN_AUX_MASK_SH_LIST(_MASK) +}; + static const struct dcn10_stream_encoder_shift se_shift = { SE_COMMON_MASK_SH_LIST_DCN20(__SHIFT) }; @@ -636,6 +667,11 @@ static const struct dcn10_stream_encoder_mask se_mask = { SE_COMMON_MASK_SH_LIST_DCN20(_MASK) }; +static void dcn21_pp_smu_destroy(struct pp_smu_funcs **pp_smu); + +static int dcn21_populate_dml_pipes_from_context( + struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes); + static struct input_pixel_processor *dcn21_ipp_create( struct dc_context *ctx, uint32_t inst) { @@ -683,7 +719,10 @@ static struct dce_aux *dcn21_aux_engine_create( dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst]); + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); return &aux_engine->base; } @@ -726,11 +765,12 @@ static const struct resource_caps res_cap_rn = { .num_timing_generator = 4, .num_opp = 4, .num_video_plane = 4, - .num_audio = 6, // 6 audio endpoints. 4 audio streams + .num_audio = 4, // 4 audio endpoints. 4 audio streams .num_stream_encoder = 5, .num_pll = 5, // maybe 3 because the last two used for USB-c .num_dwb = 1, .num_ddc = 5, + .num_vmid = 1, #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT .num_dsc = 3, #endif @@ -796,15 +836,15 @@ static const struct dc_debug_options debug_defaults_drv = { .clock_trace = true, .disable_pplib_clock_request = true, .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, - .force_single_disp_pipe_split = true, + .force_single_disp_pipe_split = false, .disable_dcc = DCC_ENABLE, .vsr_support = true, .performance_trace = false, - .max_downscale_src_width = 5120,/*upto 5K*/ + .max_downscale_src_width = 3840, .disable_pplib_wm_range = false, .scl_reset_length10 = true, .sanity_checks = true, - .disable_48mhz_pwrdwn = true, + .disable_48mhz_pwrdwn = false, }; static const struct dc_debug_options debug_defaults_diags = { @@ -939,7 +979,7 @@ static void destruct(struct dcn21_resource_pool *pool) dcn_dccg_destroy(&pool->base.dccg); if (pool->base.pp_smu != NULL) - dcn20_pp_smu_destroy(&pool->base.pp_smu); + dcn21_pp_smu_destroy(&pool->base.pp_smu); } @@ -969,11 +1009,35 @@ static void calculate_wm_set_for_vlevel( #if defined(CONFIG_DRM_AMD_DC_DCN2_1) wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000; wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000; + wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000; #endif dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached; } +static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb) +{ + kernel_fpu_begin(); + if (dc->bb_overrides.sr_exit_time_ns) { + bb->sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0; + } + + if (dc->bb_overrides.sr_enter_plus_exit_time_ns) { + bb->sr_enter_plus_exit_time_us = + dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0; + } + + if (dc->bb_overrides.urgent_latency_ns) { + bb->urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0; + } + + if (dc->bb_overrides.dram_clock_change_latency_ns) { + bb->dram_clock_change_latency_us = + dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; + } + kernel_fpu_end(); +} + void dcn21_calculate_wm( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, @@ -988,6 +1052,8 @@ void dcn21_calculate_wm( ASSERT(bw_params); + patch_bounding_box(dc, &context->bw_ctx.dml.soc); + for (i = 0, pipe_idx = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { if (!context->res_ctx.pipe_ctx[i].stream) continue; @@ -1021,7 +1087,7 @@ void dcn21_calculate_wm( pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, &context->res_ctx, pipes); else - pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, + pipe_cnt = dcn21_populate_dml_pipes_from_context(dc, &context->res_ctx, pipes); } @@ -1271,6 +1337,12 @@ struct display_stream_compressor *dcn21_dsc_create( static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) { + /* + TODO: Fix this function to calcualte correct values. + There are known issues with this function currently + that will need to be investigated. Use hardcoded known good values for now. + + struct dcn21_resource_pool *pool = TO_DCN21_RES_POOL(dc->res_pool); struct clk_limit_table *clk_table = &bw_params->clk_table; int i; @@ -1278,7 +1350,6 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param dcn2_1_ip.max_num_otg = pool->base.res_cap->num_timing_generator; dcn2_1_ip.max_num_dpp = pool->base.pipe_count; dcn2_1_soc.num_chans = bw_params->num_channels; - dcn2_1_soc.num_states = 0; for (i = 0; i < clk_table->num_entries; i++) { @@ -1286,10 +1357,11 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param dcn2_1_soc.clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz; dcn2_1_soc.clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz; dcn2_1_soc.clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz; - /* This is probably wrong, TODO: find correct calculation */ dcn2_1_soc.clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 16 / 1000; - dcn2_1_soc.num_states++; } + dcn2_1_soc.clock_limits[i] = dcn2_1_soc.clock_limits[i - i]; + dcn2_1_soc.num_states = i; + */ } /* Temporary Place holder until we can get them from fuse */ @@ -1317,32 +1389,42 @@ static struct dpm_clocks dummy_clocks = { }; -enum pp_smu_status dummy_set_wm_ranges(struct pp_smu *pp, +static enum pp_smu_status dummy_set_wm_ranges(struct pp_smu *pp, struct pp_smu_wm_range_sets *ranges) { return PP_SMU_RESULT_OK; } -enum pp_smu_status dummy_get_dpm_clock_table(struct pp_smu *pp, +static enum pp_smu_status dummy_get_dpm_clock_table(struct pp_smu *pp, struct dpm_clocks *clock_table) { *clock_table = dummy_clocks; return PP_SMU_RESULT_OK; } -struct pp_smu_funcs *dcn21_pp_smu_create(struct dc_context *ctx) +static struct pp_smu_funcs *dcn21_pp_smu_create(struct dc_context *ctx) { struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL); - pp_smu->ctx.ver = PP_SMU_VER_RN; + if (!pp_smu) + return pp_smu; + + if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment) || IS_DIAG_DC(ctx->dce_environment)) { + pp_smu->ctx.ver = PP_SMU_VER_RN; + pp_smu->rn_funcs.get_dpm_clock_table = dummy_get_dpm_clock_table; + pp_smu->rn_funcs.set_wm_ranges = dummy_set_wm_ranges; + } else { - pp_smu->rn_funcs.get_dpm_clock_table = dummy_get_dpm_clock_table; - pp_smu->rn_funcs.set_wm_ranges = dummy_set_wm_ranges; + dm_pp_get_funcs(ctx, pp_smu); + + if (pp_smu->ctx.ver != PP_SMU_VER_RN) + pp_smu = memset(pp_smu, 0, sizeof(struct pp_smu_funcs)); + } return pp_smu; } -void dcn21_pp_smu_destroy(struct pp_smu_funcs **pp_smu) +static void dcn21_pp_smu_destroy(struct pp_smu_funcs **pp_smu) { if (pp_smu && *pp_smu) { kfree(*pp_smu); @@ -1400,6 +1482,7 @@ static struct dce_hwseq *dcn21_hwseq_create( hws->regs = &hwseq_reg; hws->shifts = &hwseq_shift; hws->masks = &hwseq_mask; + hws->wa.DEGVIDCN21 = true; } return hws; } @@ -1418,10 +1501,152 @@ static const struct resource_create_funcs res_create_maximus_funcs = { .create_hwseq = dcn21_hwseq_create, }; +static const struct encoder_feature_support link_enc_feature = { + .max_hdmi_deep_color = COLOR_DEPTH_121212, + .max_hdmi_pixel_clock = 600000, + .hdmi_ycbcr420_supported = true, + .dp_ycbcr420_supported = true, + .flags.bits.IS_HBR2_CAPABLE = true, + .flags.bits.IS_HBR3_CAPABLE = true, + .flags.bits.IS_TPS3_CAPABLE = true, + .flags.bits.IS_TPS4_CAPABLE = true +}; + + +#define link_regs(id, phyid)\ +[id] = {\ + LE_DCN10_REG_LIST(id), \ + UNIPHY_DCN2_REG_LIST(phyid), \ + SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \ +} + +static const struct dcn10_link_enc_registers link_enc_regs[] = { + link_regs(0, A), + link_regs(1, B), + link_regs(2, C), + link_regs(3, D), + link_regs(4, E), +}; + +#define aux_regs(id)\ +[id] = {\ + DCN2_AUX_REG_LIST(id)\ +} + +static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = { + aux_regs(0), + aux_regs(1), + aux_regs(2), + aux_regs(3), + aux_regs(4) +}; + +#define hpd_regs(id)\ +[id] = {\ + HPD_REG_LIST(id)\ +} + +static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { + hpd_regs(0), + hpd_regs(1), + hpd_regs(2), + hpd_regs(3), + hpd_regs(4) +}; + +static const struct dcn10_link_enc_shift le_shift = { + LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT) +}; + +static const struct dcn10_link_enc_mask le_mask = { + LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK) +}; + +static int map_transmitter_id_to_phy_instance( + enum transmitter transmitter) +{ + switch (transmitter) { + case TRANSMITTER_UNIPHY_A: + return 0; + break; + case TRANSMITTER_UNIPHY_B: + return 1; + break; + case TRANSMITTER_UNIPHY_C: + return 2; + break; + case TRANSMITTER_UNIPHY_D: + return 3; + break; + case TRANSMITTER_UNIPHY_E: + return 4; + break; + default: + ASSERT(0); + return 0; + } +} + +static struct link_encoder *dcn21_link_encoder_create( + const struct encoder_init_data *enc_init_data) +{ + struct dcn21_link_encoder *enc21 = + kzalloc(sizeof(struct dcn21_link_encoder), GFP_KERNEL); + int link_regs_id; + + if (!enc21) + return NULL; + + link_regs_id = + map_transmitter_id_to_phy_instance(enc_init_data->transmitter); + + dcn21_link_encoder_construct(enc21, + enc_init_data, + &link_enc_feature, + &link_enc_regs[link_regs_id], + &link_enc_aux_regs[enc_init_data->channel - 1], + &link_enc_hpd_regs[enc_init_data->hpd_source], + &le_shift, + &le_mask); + + return &enc21->enc10.base; +} +#define CTX ctx + +#define REG(reg_name) \ + (DCN_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name) + +static uint32_t read_pipe_fuses(struct dc_context *ctx) +{ + uint32_t value = REG_READ(CC_DC_PIPE_DIS); + /* RV1 support max 4 pipes */ + value = value & 0xf; + return value; +} + +static int dcn21_populate_dml_pipes_from_context( + struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes) +{ + uint32_t pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, res_ctx, pipes); + int i; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + + if (!res_ctx->pipe_ctx[i].stream) + continue; + + pipes[i].pipe.src.hostvm = 1; + pipes[i].pipe.src.gpuvm = 1; + } + + return pipe_cnt; +} + static struct resource_funcs dcn21_res_pool_funcs = { .destroy = dcn21_destroy_resource_pool, - .link_enc_create = dcn20_link_encoder_create, + .link_enc_create = dcn21_link_encoder_create, .validate_bandwidth = dcn21_validate_bandwidth, + .populate_dml_pipes = dcn21_populate_dml_pipes_from_context, .add_stream_to_ctx = dcn20_add_stream_to_ctx, .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, @@ -1437,9 +1662,11 @@ static bool construct( struct dc *dc, struct dcn21_resource_pool *pool) { - int i; + int i, j; struct dc_context *ctx = dc->ctx; struct irq_service_init_data init_data; + uint32_t pipe_fuses = read_pipe_fuses(ctx); + uint32_t num_pipes; ctx->dc_bios->regs = &bios_regs; @@ -1457,7 +1684,9 @@ static bool construct( *************************************************/ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; - pool->base.pipe_count = 4; + /* max pipe num for ASIC before check pipe fuses */ + pool->base.pipe_count = pool->base.res_cap->num_timing_generator; + dc->caps.max_downscale_ratio = 200; dc->caps.i2c_speed_in_khz = 100; dc->caps.max_cursor_size = 256; @@ -1467,6 +1696,7 @@ static bool construct( dc->caps.max_slave_planes = 1; dc->caps.post_blend_color_processing = true; dc->caps.force_dp_tps4_for_cp2520 = true; + dc->caps.extended_aux_timeout_support = true; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; @@ -1516,6 +1746,26 @@ static bool construct( goto create_fail; } + pool->base.dmcu = dcn20_dmcu_create(ctx, + &dmcu_regs, + &dmcu_shift, + &dmcu_mask); + if (pool->base.dmcu == NULL) { + dm_error("DC: failed to create dmcu!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + + pool->base.abm = dce_abm_create(ctx, + &abm_regs, + &abm_shift, + &abm_mask); + if (pool->base.abm == NULL) { + dm_error("DC: failed to create abm!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + #ifdef CONFIG_DRM_AMD_DC_DMUB pool->base.dmcub = dcn21_dmcub_create(ctx, &dmcub_regs, @@ -1530,6 +1780,14 @@ static bool construct( pool->base.pp_smu = dcn21_pp_smu_create(ctx); + num_pipes = dcn2_1_ip.max_num_dpp; + + for (i = 0; i < dcn2_1_ip.max_num_dpp; i++) + if (pipe_fuses & 1 << i) + num_pipes--; + dcn2_1_ip.max_num_dpp = num_pipes; + dcn2_1_ip.max_num_otg = num_pipes; + dml_init_instance(&dc->dml, &dcn2_1_soc, &dcn2_1_ip, DML_PROJECT_DCN21); init_data.ctx = dc->ctx; @@ -1537,8 +1795,15 @@ static bool construct( if (!pool->base.irqs) goto create_fail; + j = 0; /* mem input -> ipp -> dpp -> opp -> TG */ for (i = 0; i < pool->base.pipe_count; i++) { + /* if pipe is disabled, skip instance of HW pipe, + * i.e, skip ASIC register instance + */ + if ((pipe_fuses & (1 << i)) != 0) + continue; + pool->base.hubps[i] = dcn21_hubp_create(ctx, i); if (pool->base.hubps[i] == NULL) { BREAK_TO_DEBUGGER(); @@ -1562,6 +1827,23 @@ static bool construct( "DC: failed to create dpps!\n"); goto create_fail; } + + pool->base.opps[i] = dcn21_opp_create(ctx, i); + if (pool->base.opps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create output pixel processor!\n"); + goto create_fail; + } + + pool->base.timing_generators[i] = dcn21_timing_generator_create( + ctx, i); + if (pool->base.timing_generators[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create tg!\n"); + goto create_fail; + } + j++; } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { @@ -1582,27 +1864,9 @@ static bool construct( pool->base.sw_i2cs[i] = NULL; } - for (i = 0; i < pool->base.res_cap->num_opp; i++) { - pool->base.opps[i] = dcn21_opp_create(ctx, i); - if (pool->base.opps[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create output pixel processor!\n"); - goto create_fail; - } - } - - for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { - pool->base.timing_generators[i] = dcn21_timing_generator_create( - ctx, i); - if (pool->base.timing_generators[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create tg!\n"); - goto create_fail; - } - } - - pool->base.timing_generator_count = i; + pool->base.timing_generator_count = j; + pool->base.pipe_count = j; + pool->base.mpcc_count = j; pool->base.mpc = dcn21_mpc_create(ctx); if (pool->base.mpc == NULL) { @@ -1645,7 +1909,7 @@ static bool construct( &res_create_funcs : &res_create_maximus_funcs))) goto create_fail; - dcn20_hw_sequencer_construct(dc); + dcn21_hw_sequencer_construct(dc); dc->caps.max_planes = pool->base.pipe_count; diff --git a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h new file mode 100644 index 000000000000..626d22d437f4 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h @@ -0,0 +1,49 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef DM_CP_PSP_IF__H +#define DM_CP_PSP_IF__H + +struct dc_link; + +struct cp_psp_stream_config { + uint8_t otg_inst; + uint8_t link_enc_inst; + uint8_t stream_enc_inst; + void *dm_stream_ctx; + bool dpms_off; +}; + +struct cp_psp_funcs { + void (*update_stream_config)(void *handle, struct cp_psp_stream_config *config); +}; + +struct cp_psp { + void *handle; + struct cp_psp_funcs funcs; +}; + + +#endif /* DM_CP_PSP_IF__H */ diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h index b6b4333737f2..94b75e942607 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h +++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h @@ -74,7 +74,7 @@ void dm_helpers_dp_mst_clear_payload_allocation_table( /* * Polls for ACT (allocation change trigger) handled and */ -bool dm_helpers_dp_mst_poll_for_allocation_change_trigger( +enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger( struct dc_context *ctx, const struct dc_stream_state *stream); /* diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h index c03a441ee638..ef7df9ef6d7e 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h +++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h @@ -249,10 +249,8 @@ struct pp_smu_funcs_nv { }; #endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) - #define PP_SMU_NUM_SOCCLK_DPM_LEVELS 8 -#define PP_SMU_NUM_DCFCLK_DPM_LEVELS 4 +#define PP_SMU_NUM_DCFCLK_DPM_LEVELS 8 #define PP_SMU_NUM_FCLK_DPM_LEVELS 4 #define PP_SMU_NUM_MEMCLK_DPM_LEVELS 4 @@ -288,7 +286,6 @@ struct pp_smu_funcs_rn { enum pp_smu_status (*get_dpm_clock_table) (struct pp_smu *pp, struct dpm_clocks *clock_table); }; -#endif struct pp_smu_funcs { struct pp_smu ctx; diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile index 5b2a65b42403..8df251626e22 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile @@ -24,15 +24,20 @@ # It provides the general basic services required by other DAL # subcomponents. -ifneq ($(call cc-option, -mpreferred-stack-boundary=4),) - cc_stack_align := -mpreferred-stack-boundary=4 -else ifneq ($(call cc-option, -mstack-alignment=16),) - cc_stack_align := -mstack-alignment=16 -endif +dml_ccflags := -mhard-float -msse -dml_ccflags := -mhard-float -msse $(cc_stack_align) +ifdef CONFIG_CC_IS_GCC +ifeq ($(call cc-ifversion, -lt, 0701, y), y) +IS_OLD_GCC = 1 +endif +endif -ifdef CONFIG_CC_IS_CLANG +ifdef IS_OLD_GCC +# Stack alignment mismatch, proceed with caution. +# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 +# (8B stack alignment). +dml_ccflags += -mpreferred-stack-boundary=4 +else dml_ccflags += -msse2 endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c index 649883777f62..6c6c486b774a 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c @@ -2577,7 +2577,8 @@ static void dml20_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPer mode_lib->vba.MinActiveDRAMClockChangeMargin + mode_lib->vba.DRAMClockChangeLatency; - if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) { + if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 50) { + mode_lib->vba.DRAMClockChangeWatermark += 25; mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive; } else { if (mode_lib->vba.SynchronizedVBlank || mode_lib->vba.NumberOfActivePlanes == 1) { diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c index 0fafd693ffb4..3c70dd577292 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c @@ -38,6 +38,7 @@ #define BPP_INVALID 0 #define BPP_BLENDED_PIPE 0xffffffff +#define DCN20_MAX_DSC_IMAGE_WIDTH 5184 static double adjust_ReturnBW( struct display_mode_lib *mode_lib, @@ -2610,7 +2611,8 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP mode_lib->vba.MinActiveDRAMClockChangeMargin + mode_lib->vba.DRAMClockChangeLatency; - if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) { + if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 50) { + mode_lib->vba.DRAMClockChangeWatermark += 25; mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive; } else { if (mode_lib->vba.SynchronizedVBlank || mode_lib->vba.NumberOfActivePlanes == 1) { @@ -3901,6 +3903,10 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode mode_lib->vba.MaximumSwathWidthInLineBuffer); } for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { + double MaxMaxDispclkRoundedDown = RoundToDFSGranularityDown( + mode_lib->vba.MaxDispclk[mode_lib->vba.soc.num_states], + mode_lib->vba.DISPCLKDPPCLKVCOSpeed); + for (j = 0; j < 2; j++) { mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown( mode_lib->vba.MaxDispclk[i], @@ -3925,7 +3931,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode && i == mode_lib->vba.soc.num_states) mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2 * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); - if (mode_lib->vba.ODMCapability == false || mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine <= mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) { + if (mode_lib->vba.ODMCapability == false || + (locals->PlaneRequiredDISPCLKWithoutODMCombine <= MaxMaxDispclkRoundedDown + && (!locals->DSCEnabled[k] || locals->HActive[k] <= DCN20_MAX_DSC_IMAGE_WIDTH))) { locals->ODMCombineEnablePerState[i][k] = false; mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine; } else { diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c index 878bf4782ce6..2c7455e22a65 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c @@ -207,7 +207,7 @@ static void extract_rq_regs(struct display_mode_lib *mode_lib, rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height); rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height); - // FIXME: take the max between luma, chroma chunk size? + // TODO: take the max between luma, chroma chunk size? // okay for now, as we are setting chunk_bytes to 8kb anyways if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb rq_regs->drq_expansion_mode = 0; @@ -677,7 +677,7 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib, unsigned int meta_pitch = 0; unsigned int ppe = mode_422 ? 2 : 1; - // FIXME check if ppe apply for both luma and chroma in 422 case + // TODO check if ppe apply for both luma and chroma in 422 case if (is_chroma) { vp_width = pipe_src_param.viewport_width_c / ppe; vp_height = pipe_src_param.viewport_height_c; @@ -959,7 +959,7 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, // Source // dcc_en = src.dcc; dual_plane = is_dual_plane((enum source_format_class)(src->source_format)); - mode_422 = 0; // FIXME + mode_422 = 0; // TODO access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed // bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0); // bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1); @@ -1655,7 +1655,7 @@ static void calculate_ttu_cursor(struct display_mode_lib *mode_lib, cur_width_ub = dml_ceil((double) cur_src_width / (double) cur_req_width, 1) * (double) cur_req_width; cur_req_per_width = cur_width_ub / (double) cur_req_width; - hactive_cur = (double) cur_src_width / hscl_ratio; // FIXME: oswin to think about what to do for cursor + hactive_cur = (double) cur_src_width / hscl_ratio; // TODO: oswin to think about what to do for cursor if (vratio_pre_l <= 1.0) { *refcyc_per_req_delivery_pre_cur = hactive_cur * ref_freq_to_pix_freq diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c index ed8bf5f723c9..1e6aeb1bd2bf 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c @@ -207,7 +207,7 @@ static void extract_rq_regs(struct display_mode_lib *mode_lib, rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height); rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height); - // FIXME: take the max between luma, chroma chunk size? + // TODO: take the max between luma, chroma chunk size? // okay for now, as we are setting chunk_bytes to 8kb anyways if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb rq_regs->drq_expansion_mode = 0; @@ -677,7 +677,7 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib, unsigned int meta_pitch = 0; unsigned int ppe = mode_422 ? 2 : 1; - // FIXME check if ppe apply for both luma and chroma in 422 case + // TODO check if ppe apply for both luma and chroma in 422 case if (is_chroma) { vp_width = pipe_src_param.viewport_width_c / ppe; vp_height = pipe_src_param.viewport_height_c; @@ -959,7 +959,7 @@ static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, // Source // dcc_en = src.dcc; dual_plane = is_dual_plane((enum source_format_class)(src->source_format)); - mode_422 = 0; // FIXME + mode_422 = 0; // TODO access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed // bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0); // bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1); @@ -1655,7 +1655,7 @@ static void calculate_ttu_cursor(struct display_mode_lib *mode_lib, cur_width_ub = dml_ceil((double) cur_src_width / (double) cur_req_width, 1) * (double) cur_req_width; cur_req_per_width = cur_width_ub / (double) cur_req_width; - hactive_cur = (double) cur_src_width / hscl_ratio; // FIXME: oswin to think about what to do for cursor + hactive_cur = (double) cur_src_width / hscl_ratio; // TODO: oswin to think about what to do for cursor if (vratio_pre_l <= 1.0) { *refcyc_per_req_delivery_pre_cur = hactive_cur * ref_freq_to_pix_freq diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c index 3b6ed60dcd35..ba77957aefe3 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c @@ -65,6 +65,7 @@ typedef struct { #define BPP_INVALID 0 #define BPP_BLENDED_PIPE 0xffffffff +#define DCN21_MAX_DSC_IMAGE_WIDTH 5184 static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib); static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation( @@ -3379,6 +3380,8 @@ static unsigned int TruncToValidBPP( return 30; else if (DecimalBPP >= 24 && (DesiredBPP == 0 || DesiredBPP == 24)) return 24; + else if (DecimalBPP >= 18 && (DesiredBPP == 0 || DesiredBPP == 18)) + return 18; else return BPP_INVALID; } @@ -3936,6 +3939,10 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.MaximumSwathWidthInLineBuffer); } for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { + double MaxMaxDispclkRoundedDown = RoundToDFSGranularityDown( + mode_lib->vba.MaxDispclk[mode_lib->vba.soc.num_states], + mode_lib->vba.DISPCLKDPPCLKVCOSpeed); + for (j = 0; j < 2; j++) { mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown( mode_lib->vba.MaxDispclk[i], @@ -3965,7 +3972,9 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l && i == mode_lib->vba.soc.num_states) mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2 * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); - if (mode_lib->vba.ODMCapability == false || mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine <= mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) { + if (mode_lib->vba.ODMCapability == false || + (locals->PlaneRequiredDISPCLKWithoutODMCombine <= MaxMaxDispclkRoundedDown + && (!locals->DSCEnabled[k] || locals->HActive[k] <= DCN21_MAX_DSC_IMAGE_WIDTH))) { locals->ODMCombineEnablePerState[i][k] = false; mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine; } else { diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h index f4c1ef9046bf..cfacd6027467 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h @@ -269,7 +269,7 @@ struct writeback_st { struct _vcs_dpi_display_output_params_st { int dp_lanes; - int output_bpp; + double output_bpp; int dsc_enable; int wb_enable; int num_active_wb; @@ -318,6 +318,7 @@ struct _vcs_dpi_display_pipe_dest_params_st { unsigned int vupdate_width; unsigned int vready_offset; unsigned char interlaced; + unsigned char embedded; double pixel_rate_mhz; unsigned char synchronized_vblank_all_planes; unsigned char otg_inst; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c index 65cf4edddaff..7f9a5621922f 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c @@ -375,6 +375,7 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib) mode_lib->vba.pipe_plane[j] = mode_lib->vba.NumberOfActivePlanes; + mode_lib->vba.EmbeddedPanel[mode_lib->vba.NumberOfActivePlanes] = dst->embedded; mode_lib->vba.DPPPerPlane[mode_lib->vba.NumberOfActivePlanes] = 1; mode_lib->vba.SourceScan[mode_lib->vba.NumberOfActivePlanes] = (enum scan_direction_class) (src->source_scan); @@ -432,8 +433,12 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib) dst->recout_width; // TODO: or should this be full_recout_width???...maybe only when in hsplit mode? mode_lib->vba.ODMCombineEnabled[mode_lib->vba.NumberOfActivePlanes] = dst->odm_combine; + mode_lib->vba.ODMCombineTypeEnabled[mode_lib->vba.NumberOfActivePlanes] = + dst->odm_combine; mode_lib->vba.OutputFormat[mode_lib->vba.NumberOfActivePlanes] = (enum output_format_class) (dout->output_format); + mode_lib->vba.OutputBpp[mode_lib->vba.NumberOfActivePlanes] = + dout->output_bpp; mode_lib->vba.Output[mode_lib->vba.NumberOfActivePlanes] = (enum output_encoder_class) (dout->output_type); diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h index 91decac50557..1540ffbe3979 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h @@ -387,6 +387,7 @@ struct vba_vars_st { /* vba mode support */ /*inputs*/ + bool EmbeddedPanel[DC__NUM_DPP__MAX]; bool SupportGFX7CompatibleTilingIn32bppAnd64bpp; double MaxHSCLRatio; double MaxVSCLRatio; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c index ad8571f5a142..4c3e9cc30167 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c @@ -243,7 +243,7 @@ void dml1_extract_rq_regs( rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height); rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height); - /* FIXME: take the max between luma, chroma chunk size? + /* TODO: take the max between luma, chroma chunk size? * okay for now, as we are setting chunk_bytes to 8kb anyways */ if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { /*32kb */ @@ -602,7 +602,7 @@ static void get_surf_rq_param( unsigned int log2_dpte_group_length; unsigned int func_meta_row_height, func_dpte_row_height; - /* FIXME check if ppe apply for both luma and chroma in 422 case */ + /* TODO check if ppe apply for both luma and chroma in 422 case */ if (is_chroma) { vp_width = pipe_src_param.viewport_width_c / ppe; vp_height = pipe_src_param.viewport_height_c; @@ -1141,7 +1141,7 @@ void dml1_rq_dlg_get_dlg_params( ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int) dml_pow(2, 13)); disp_dlg_regs->dlg_vblank_end = interlaced ? (vblank_end / 2) : vblank_end; /* 15 bits */ - prefetch_xy_calc_in_dcfclk = 24.0; /* FIXME: ip_param */ + prefetch_xy_calc_in_dcfclk = 24.0; /* TODO: ip_param */ min_dcfclk_mhz = dlg_sys_param.deepsleep_dcfclk_mhz; t_calc_us = prefetch_xy_calc_in_dcfclk / min_dcfclk_mhz; min_ttu_vblank = dlg_sys_param.t_urg_wm_us; @@ -1182,7 +1182,7 @@ void dml1_rq_dlg_get_dlg_params( dcc_en = e2e_pipe_param.pipe.src.dcc; dual_plane = is_dual_plane( (enum source_format_class) e2e_pipe_param.pipe.src.source_format); - mode_422 = 0; /* FIXME */ + mode_422 = 0; /* TODO */ access_dir = (e2e_pipe_param.pipe.src.source_scan == dm_vert); /* vp access direction: horizontal or vertical accessed */ bytes_per_element_l = get_bytes_per_element( (enum source_format_class) e2e_pipe_param.pipe.src.source_format, @@ -1837,7 +1837,7 @@ void dml1_rq_dlg_get_dlg_params( cur0_width_ub = dml_ceil((double) cur0_src_width / (double) cur0_req_width, 1) * (double) cur0_req_width; cur0_req_per_width = cur0_width_ub / (double) cur0_req_width; - hactive_cur0 = (double) cur0_src_width / hratios_cur0; /* FIXME: oswin to think about what to do for cursor */ + hactive_cur0 = (double) cur0_src_width / hratios_cur0; /* TODO: oswin to think about what to do for cursor */ if (vratio_pre_l <= 1.0) { refcyc_per_req_delivery_pre_cur0 = hactive_cur0 * ref_freq_to_pix_freq diff --git a/drivers/gpu/drm/amd/display/dc/dsc/Makefile b/drivers/gpu/drm/amd/display/dc/dsc/Makefile index b456cd23c6fa..970737217e53 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dsc/Makefile @@ -1,15 +1,20 @@ # # Makefile for the 'dsc' sub-component of DAL. -ifneq ($(call cc-option, -mpreferred-stack-boundary=4),) - cc_stack_align := -mpreferred-stack-boundary=4 -else ifneq ($(call cc-option, -mstack-alignment=16),) - cc_stack_align := -mstack-alignment=16 -endif +dsc_ccflags := -mhard-float -msse -dsc_ccflags := -mhard-float -msse $(cc_stack_align) +ifdef CONFIG_CC_IS_GCC +ifeq ($(call cc-ifversion, -lt, 0701, y), y) +IS_OLD_GCC = 1 +endif +endif -ifdef CONFIG_CC_IS_CLANG +ifdef IS_OLD_GCC +# Stack alignment mismatch, proceed with caution. +# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 +# (8B stack alignment). +dsc_ccflags += -mpreferred-stack-boundary=4 +else dsc_ccflags += -msse2 endif diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index 5995bcdfed54..e60f760585e4 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -23,8 +23,7 @@ */ #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT -#include "dc.h" -#include "core_types.h" +#include "dc_hw_types.h" #include "dsc.h" #include <drm/drm_dp_helper.h> @@ -47,6 +46,59 @@ const struct dc_dsc_policy dsc_policy = { /* This module's internal functions */ +static uint32_t dc_dsc_bandwidth_in_kbps_from_timing( + const struct dc_crtc_timing *timing) +{ + uint32_t bits_per_channel = 0; + uint32_t kbps; + + if (timing->flags.DSC) { + kbps = (timing->pix_clk_100hz * timing->dsc_cfg.bits_per_pixel); + kbps = kbps / 160 + ((kbps % 160) ? 1 : 0); + return kbps; + } + + switch (timing->display_color_depth) { + case COLOR_DEPTH_666: + bits_per_channel = 6; + break; + case COLOR_DEPTH_888: + bits_per_channel = 8; + break; + case COLOR_DEPTH_101010: + bits_per_channel = 10; + break; + case COLOR_DEPTH_121212: + bits_per_channel = 12; + break; + case COLOR_DEPTH_141414: + bits_per_channel = 14; + break; + case COLOR_DEPTH_161616: + bits_per_channel = 16; + break; + default: + break; + } + + ASSERT(bits_per_channel != 0); + + kbps = timing->pix_clk_100hz / 10; + kbps *= bits_per_channel; + + if (timing->flags.Y_ONLY != 1) { + /*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/ + kbps *= 3; + if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) + kbps /= 2; + else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) + kbps = kbps * 2 / 3; + } + + return kbps; + +} + static bool dsc_buff_block_size_from_dpcd(int dpcd_buff_block_size, int *buff_block_size) { @@ -178,12 +230,11 @@ static bool dsc_bpp_increment_div_from_dpcd(int bpp_increment_dpcd, uint32_t *bp } static void get_dsc_enc_caps( - const struct dc *dc, + const struct display_stream_compressor *dsc, struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz) { // This is a static HW query, so we can use any DSC - struct display_stream_compressor *dsc = dc->res_pool->dscs[0]; memset(dsc_enc_caps, 0, sizeof(struct dsc_enc_caps)); if (dsc) @@ -290,7 +341,7 @@ static void get_dsc_bandwidth_range( struct dc_dsc_bw_range *range) { /* native stream bandwidth */ - range->stream_kbps = dc_bandwidth_in_kbps_from_timing(timing); + range->stream_kbps = dc_dsc_bandwidth_in_kbps_from_timing(timing); /* max dsc target bpp */ range->max_kbps = dsc_div_by_10_round_up(max_bpp * timing->pix_clk_100hz); @@ -512,6 +563,7 @@ static bool setup_dsc_config( const struct dsc_enc_caps *dsc_enc_caps, int target_bandwidth_kbps, const struct dc_crtc_timing *timing, + int min_slice_height_override, struct dc_dsc_config *dsc_cfg) { struct dsc_enc_caps dsc_common_caps; @@ -680,7 +732,10 @@ static bool setup_dsc_config( // Slice height (i.e. number of slices per column): start with policy and pick the first one that height is divisible by. // For 4:2:0 make sure the slice height is divisible by 2 as well. - slice_height = min(dsc_policy.min_sice_height, pic_height); + if (min_slice_height_override == 0) + slice_height = min(dsc_policy.min_sice_height, pic_height); + else + slice_height = min(min_slice_height_override, pic_height); while (slice_height < pic_height && (pic_height % slice_height != 0 || (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420 && slice_height % 2 != 0))) @@ -802,7 +857,8 @@ bool dc_dsc_parse_dsc_dpcd(const uint8_t *dpcd_dsc_basic_data, const uint8_t *dp * If DSC is not possible, leave '*range' untouched. */ bool dc_dsc_compute_bandwidth_range( - const struct dc *dc, + const struct display_stream_compressor *dsc, + const uint32_t dsc_min_slice_height_override, const uint32_t min_bpp, const uint32_t max_bpp, const struct dsc_dec_dpcd_caps *dsc_sink_caps, @@ -814,16 +870,14 @@ bool dc_dsc_compute_bandwidth_range( struct dsc_enc_caps dsc_common_caps; struct dc_dsc_config config; - get_dsc_enc_caps(dc, &dsc_enc_caps, timing->pix_clk_100hz); + get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz); is_dsc_possible = intersect_dsc_caps(dsc_sink_caps, &dsc_enc_caps, timing->pixel_encoding, &dsc_common_caps); if (is_dsc_possible) - is_dsc_possible = setup_dsc_config(dsc_sink_caps, - &dsc_enc_caps, - 0, - timing, &config); + is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, 0, timing, + dsc_min_slice_height_override, &config); if (is_dsc_possible) get_dsc_bandwidth_range(min_bpp, max_bpp, &dsc_common_caps, timing, range); @@ -832,8 +886,9 @@ bool dc_dsc_compute_bandwidth_range( } bool dc_dsc_compute_config( - const struct dc *dc, + const struct display_stream_compressor *dsc, const struct dsc_dec_dpcd_caps *dsc_sink_caps, + const uint32_t dsc_min_slice_height_override, uint32_t target_bandwidth_kbps, const struct dc_crtc_timing *timing, struct dc_dsc_config *dsc_cfg) @@ -841,11 +896,11 @@ bool dc_dsc_compute_config( bool is_dsc_possible = false; struct dsc_enc_caps dsc_enc_caps; - get_dsc_enc_caps(dc, &dsc_enc_caps, timing->pix_clk_100hz); + get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz); is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, target_bandwidth_kbps, - timing, dsc_cfg); + timing, dsc_min_slice_height_override, dsc_cfg); return is_dsc_possible; } #endif /* CONFIG_DRM_AMD_DC_DSC_SUPPORT */ diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c index ca51e83f8764..76c4b12d6824 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c @@ -177,7 +177,6 @@ void calc_rc_params(struct rc_params *rc, enum colour_mode cm, enum bits_per_com { float bpp_group; float initial_xmit_delay_factor; - int source_bpp; int padding_pixels; int i; @@ -217,8 +216,6 @@ void calc_rc_params(struct rc_params *rc, enum colour_mode cm, enum bits_per_com rc->initial_xmit_delay++; } - source_bpp = MODE_SELECT(bpc * 3, bpc * 2, bpc * 1.5); - rc->flatness_min_qp = ((bpc == BPC_8) ? (3) : ((bpc == BPC_10) ? (7) : (11))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); rc->flatness_max_qp = ((bpc == BPC_8) ? (12) : ((bpc == BPC_10) ? (16) : (20))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); rc->flatness_det_thresh = 2 << (bpc - 8); diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c index f8f85490e77e..f67c18375bfd 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c @@ -321,8 +321,6 @@ void dal_gpio_destroy( return; } - dal_gpio_close(*gpio); - switch ((*gpio)->id) { case GPIO_ID_DDC_DATA: kfree((*gpio)->hw_container.ddc); diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c index d03165e71dc6..92280cc05e2d 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c @@ -169,7 +169,6 @@ void dal_gpio_destroy_generic_mux( return; } - dal_gpio_close(*mux); dal_gpio_destroy(mux); kfree(*mux); @@ -460,7 +459,6 @@ void dal_gpio_destroy_irq( return; } - dal_gpio_close(*irq); dal_gpio_destroy(irq); kfree(*irq); diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/Makefile b/drivers/gpu/drm/amd/display/dc/hdcp/Makefile new file mode 100644 index 000000000000..4170b6eb9ec0 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hdcp/Makefile @@ -0,0 +1,28 @@ +# Copyright 2019 Advanced Micro Devices, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# +# Makefile for the 'hdcp' sub-component of DAL. +# + +HDCP_MSG = hdcp_msg.o + +AMD_DAL_HDCP_MSG = $(addprefix $(AMDDALPATH)/dc/hdcp/,$(HDCP_MSG)) + +AMD_DISPLAY_FILES += $(AMD_DAL_HDCP_MSG) diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c new file mode 100644 index 000000000000..6f730b5bfe42 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c @@ -0,0 +1,324 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include <linux/slab.h> + +#include "dm_services.h" +#include "dm_helpers.h" +#include "include/hdcp_types.h" +#include "include/i2caux_interface.h" +#include "include/signal_types.h" +#include "core_types.h" +#include "dc_link_ddc.h" +#include "link_hwss.h" + +#define DC_LOGGER \ + link->ctx->logger +#define HDCP14_KSV_SIZE 5 +#define HDCP14_MAX_KSV_FIFO_SIZE 127*HDCP14_KSV_SIZE + +static const bool hdcp_cmd_is_read[] = { + [HDCP_MESSAGE_ID_READ_BKSV] = true, + [HDCP_MESSAGE_ID_READ_RI_R0] = true, + [HDCP_MESSAGE_ID_READ_PJ] = true, + [HDCP_MESSAGE_ID_WRITE_AKSV] = false, + [HDCP_MESSAGE_ID_WRITE_AINFO] = false, + [HDCP_MESSAGE_ID_WRITE_AN] = false, + [HDCP_MESSAGE_ID_READ_VH_X] = true, + [HDCP_MESSAGE_ID_READ_VH_0] = true, + [HDCP_MESSAGE_ID_READ_VH_1] = true, + [HDCP_MESSAGE_ID_READ_VH_2] = true, + [HDCP_MESSAGE_ID_READ_VH_3] = true, + [HDCP_MESSAGE_ID_READ_VH_4] = true, + [HDCP_MESSAGE_ID_READ_BCAPS] = true, + [HDCP_MESSAGE_ID_READ_BSTATUS] = true, + [HDCP_MESSAGE_ID_READ_KSV_FIFO] = true, + [HDCP_MESSAGE_ID_READ_BINFO] = true, + [HDCP_MESSAGE_ID_HDCP2VERSION] = true, + [HDCP_MESSAGE_ID_RX_CAPS] = true, + [HDCP_MESSAGE_ID_WRITE_AKE_INIT] = false, + [HDCP_MESSAGE_ID_READ_AKE_SEND_CERT] = true, + [HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM] = false, + [HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM] = false, + [HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME] = true, + [HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO] = true, + [HDCP_MESSAGE_ID_WRITE_LC_INIT] = false, + [HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = true, + [HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = false, + [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = true, + [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = false, + [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = false, + [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = true, + [HDCP_MESSAGE_ID_READ_RXSTATUS] = true, + [HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = false +}; + +static const uint8_t hdcp_i2c_offsets[] = { + [HDCP_MESSAGE_ID_READ_BKSV] = 0x0, + [HDCP_MESSAGE_ID_READ_RI_R0] = 0x8, + [HDCP_MESSAGE_ID_READ_PJ] = 0xA, + [HDCP_MESSAGE_ID_WRITE_AKSV] = 0x10, + [HDCP_MESSAGE_ID_WRITE_AINFO] = 0x15, + [HDCP_MESSAGE_ID_WRITE_AN] = 0x18, + [HDCP_MESSAGE_ID_READ_VH_X] = 0x20, + [HDCP_MESSAGE_ID_READ_VH_0] = 0x20, + [HDCP_MESSAGE_ID_READ_VH_1] = 0x24, + [HDCP_MESSAGE_ID_READ_VH_2] = 0x28, + [HDCP_MESSAGE_ID_READ_VH_3] = 0x2C, + [HDCP_MESSAGE_ID_READ_VH_4] = 0x30, + [HDCP_MESSAGE_ID_READ_BCAPS] = 0x40, + [HDCP_MESSAGE_ID_READ_BSTATUS] = 0x41, + [HDCP_MESSAGE_ID_READ_KSV_FIFO] = 0x43, + [HDCP_MESSAGE_ID_READ_BINFO] = 0xFF, + [HDCP_MESSAGE_ID_HDCP2VERSION] = 0x50, + [HDCP_MESSAGE_ID_WRITE_AKE_INIT] = 0x60, + [HDCP_MESSAGE_ID_READ_AKE_SEND_CERT] = 0x80, + [HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM] = 0x60, + [HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM] = 0x60, + [HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME] = 0x80, + [HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO] = 0x80, + [HDCP_MESSAGE_ID_WRITE_LC_INIT] = 0x60, + [HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x80, + [HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x60, + [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x80, + [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x60, + [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x60, + [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x80, + [HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x70 +}; + +struct protection_properties { + bool supported; + bool (*process_transaction)( + struct dc_link *link, + struct hdcp_protection_message *message_info); +}; + +static const struct protection_properties non_supported_protection = { + .supported = false +}; + +static bool hdmi_14_process_transaction( + struct dc_link *link, + struct hdcp_protection_message *message_info) +{ + uint8_t *buff = NULL; + bool result; + const uint8_t hdcp_i2c_addr_link_primary = 0x3a; /* 0x74 >> 1*/ + const uint8_t hdcp_i2c_addr_link_secondary = 0x3b; /* 0x76 >> 1*/ + struct i2c_command i2c_command; + uint8_t offset = hdcp_i2c_offsets[message_info->msg_id]; + struct i2c_payload i2c_payloads[] = { + { true, 0, 1, &offset }, + /* actual hdcp payload, will be filled later, zeroed for now*/ + { 0 } + }; + + switch (message_info->link) { + case HDCP_LINK_SECONDARY: + i2c_payloads[0].address = hdcp_i2c_addr_link_secondary; + i2c_payloads[1].address = hdcp_i2c_addr_link_secondary; + break; + case HDCP_LINK_PRIMARY: + default: + i2c_payloads[0].address = hdcp_i2c_addr_link_primary; + i2c_payloads[1].address = hdcp_i2c_addr_link_primary; + break; + } + + if (hdcp_cmd_is_read[message_info->msg_id]) { + i2c_payloads[1].write = false; + i2c_command.number_of_payloads = ARRAY_SIZE(i2c_payloads); + i2c_payloads[1].length = message_info->length; + i2c_payloads[1].data = message_info->data; + } else { + i2c_command.number_of_payloads = 1; + buff = kzalloc(message_info->length + 1, GFP_KERNEL); + + if (!buff) + return false; + + buff[0] = offset; + memmove(&buff[1], message_info->data, message_info->length); + i2c_payloads[0].length = message_info->length + 1; + i2c_payloads[0].data = buff; + } + + i2c_command.payloads = i2c_payloads; + i2c_command.engine = I2C_COMMAND_ENGINE_HW;//only HW + i2c_command.speed = link->ddc->ctx->dc->caps.i2c_speed_in_khz; + + result = dm_helpers_submit_i2c( + link->ctx, + link, + &i2c_command); + kfree(buff); + + return result; +} + +static const struct protection_properties hdmi_14_protection = { + .supported = true, + .process_transaction = hdmi_14_process_transaction +}; + +static const uint32_t hdcp_dpcd_addrs[] = { + [HDCP_MESSAGE_ID_READ_BKSV] = 0x68000, + [HDCP_MESSAGE_ID_READ_RI_R0] = 0x68005, + [HDCP_MESSAGE_ID_READ_PJ] = 0xFFFFFFFF, + [HDCP_MESSAGE_ID_WRITE_AKSV] = 0x68007, + [HDCP_MESSAGE_ID_WRITE_AINFO] = 0x6803B, + [HDCP_MESSAGE_ID_WRITE_AN] = 0x6800c, + [HDCP_MESSAGE_ID_READ_VH_X] = 0x68014, + [HDCP_MESSAGE_ID_READ_VH_0] = 0x68014, + [HDCP_MESSAGE_ID_READ_VH_1] = 0x68018, + [HDCP_MESSAGE_ID_READ_VH_2] = 0x6801c, + [HDCP_MESSAGE_ID_READ_VH_3] = 0x68020, + [HDCP_MESSAGE_ID_READ_VH_4] = 0x68024, + [HDCP_MESSAGE_ID_READ_BCAPS] = 0x68028, + [HDCP_MESSAGE_ID_READ_BSTATUS] = 0x68029, + [HDCP_MESSAGE_ID_READ_KSV_FIFO] = 0x6802c, + [HDCP_MESSAGE_ID_READ_BINFO] = 0x6802a, + [HDCP_MESSAGE_ID_RX_CAPS] = 0x6921d, + [HDCP_MESSAGE_ID_WRITE_AKE_INIT] = 0x69000, + [HDCP_MESSAGE_ID_READ_AKE_SEND_CERT] = 0x6900b, + [HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM] = 0x69220, + [HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM] = 0x692a0, + [HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME] = 0x692c0, + [HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO] = 0x692e0, + [HDCP_MESSAGE_ID_WRITE_LC_INIT] = 0x692f0, + [HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x692f8, + [HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x69318, + [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x69330, + [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x693e0, + [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x693f0, + [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x69473, + [HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x69493, + [HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = 0x69494 +}; + +static bool dpcd_access_helper( + struct dc_link *link, + uint32_t length, + uint8_t *data, + uint32_t dpcd_addr, + bool is_read) +{ + enum dc_status status; + uint32_t cur_length = 0; + uint32_t offset = 0; + uint32_t ksv_read_size = 0x6803b - 0x6802c; + + /* Read KSV, need repeatedly handle */ + if (dpcd_addr == 0x6802c) { + if (length % HDCP14_KSV_SIZE) { + DC_LOG_ERROR("%s: KsvFifo Size(%d) is not a multiple of HDCP14_KSV_SIZE(%d)\n", + __func__, + length, + HDCP14_KSV_SIZE); + } + if (length > HDCP14_MAX_KSV_FIFO_SIZE) { + DC_LOG_ERROR("%s: KsvFifo Size(%d) is greater than HDCP14_MAX_KSV_FIFO_SIZE(%d)\n", + __func__, + length, + HDCP14_MAX_KSV_FIFO_SIZE); + } + + DC_LOG_ERROR("%s: Reading %d Ksv(s) from KsvFifo\n", + __func__, + length / HDCP14_KSV_SIZE); + + while (length > 0) { + if (length > ksv_read_size) { + status = core_link_read_dpcd( + link, + dpcd_addr + offset, + data + offset, + ksv_read_size); + + data += ksv_read_size; + length -= ksv_read_size; + } else { + status = core_link_read_dpcd( + link, + dpcd_addr + offset, + data + offset, + length); + + data += length; + length = 0; + } + + if (status != DC_OK) + return false; + } + } else { + while (length > 0) { + if (length > DEFAULT_AUX_MAX_DATA_SIZE) + cur_length = DEFAULT_AUX_MAX_DATA_SIZE; + else + cur_length = length; + + if (is_read) { + status = core_link_read_dpcd( + link, + dpcd_addr + offset, + data + offset, + cur_length); + } else { + status = core_link_write_dpcd( + link, + dpcd_addr + offset, + data + offset, + cur_length); + } + + if (status != DC_OK) + return false; + + length -= cur_length; + offset += cur_length; + } + } + return true; +} + +static bool dp_11_process_transaction( + struct dc_link *link, + struct hdcp_protection_message *message_info) +{ + return dpcd_access_helper( + link, + message_info->length, + message_info->data, + hdcp_dpcd_addrs[message_info->msg_id], + hdcp_cmd_is_read[message_info->msg_id]); +} + +static const struct protection_properties dp_11_protection = { + .supported = true, + .process_transaction = dp_11_process_transaction +}; + diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index f189307750ab..a831079607cd 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -52,7 +52,9 @@ void enable_surface_flip_reporting(struct dc_plane_state *plane_state, #include "clock_source.h" #include "audio.h" #include "dm_pp_smu.h" - +#ifdef CONFIG_DRM_AMD_DC_HDCP +#include "dm_cp_psp.h" +#endif /************ link *****************/ struct link_init_data { @@ -231,6 +233,7 @@ struct resource_pool { struct dcn_fe_bandwidth { int dppclk_khz; + }; struct stream_resource { @@ -395,10 +398,6 @@ struct dc_state { struct clk_mgr *clk_mgr; - struct { - bool full_update_needed : 1; - } commit_hints; - struct kref refcount; }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h index b1fab251c09b..14716ba35662 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h @@ -95,6 +95,9 @@ bool dal_ddc_service_query_ddc_data( uint8_t *read_buf, uint32_t read_size); +bool dal_ddc_submit_aux_command(struct ddc_service *ddc, + struct aux_payload *payload); + int dc_link_aux_transfer_raw(struct ddc_service *ddc, struct aux_payload *payload, enum aux_channel_operation_result *operation_result); @@ -102,6 +105,9 @@ int dc_link_aux_transfer_raw(struct ddc_service *ddc, bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc, struct aux_payload *payload); +enum dc_status dc_link_aux_configure_timeout(struct ddc_service *ddc, + uint32_t timeout); + void dal_ddc_service_write_scdc_data( struct ddc_service *ddc_service, uint32_t pix_clk, diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h index 08a4df2c61a8..045138dbdccb 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h @@ -28,6 +28,8 @@ #define LINK_TRAINING_ATTEMPTS 4 #define LINK_TRAINING_RETRY_DELAY 50 /* ms */ +#define LINK_AUX_DEFAULT_EXTENDED_TIMEOUT_PERIOD 32000 /*us*/ +#define LINK_AUX_DEFAULT_TIMEOUT_PERIOD 400 /*us*/ struct dc_link; struct dc_stream_state; @@ -43,6 +45,9 @@ bool dp_verify_link_cap_with_retries( struct dc_link_settings *known_limit_link_setting, int attempts); +bool dp_verify_mst_link_cap( + struct dc_link *link); + bool dp_validate_mode_timing( struct dc_link *link, const struct dc_crtc_timing *timing); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h index e79cd4e92919..e77b3a76766d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h @@ -140,6 +140,9 @@ struct write_command_context { struct aux_engine_funcs { + bool (*configure_timeout)( + struct ddc_service *ddc, + uint32_t timeout); void (*destroy)( struct aux_engine **ptr); bool (*acquire_engine)( diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h index 76f9ad1b23df..4e18e77dcf42 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h @@ -47,7 +47,7 @@ #ifdef CONFIG_DRM_AMD_DC_DCN2_1 /* Will these bw structures be ASIC specific? */ -#define MAX_NUM_DPM_LVL 4 +#define MAX_NUM_DPM_LVL 8 #define WM_SET_COUNT 4 @@ -180,13 +180,19 @@ struct clk_mgr_funcs { struct dc_state *context, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg); + + bool (*are_clock_states_equal) (struct dc_clocks *a, + struct dc_clocks *b); + void (*notify_wm_ranges)(struct clk_mgr *clk_mgr); }; struct clk_mgr { struct dc_context *ctx; struct clk_mgr_funcs *funcs; struct dc_clocks clks; + bool psr_allow_active_cache; int dprefclk_khz; // Used by program pixel clock in clock source funcs, need to figureout where this goes + int dentist_vco_freq_khz; #ifdef CONFIG_DRM_AMD_DC_DCN2_1 struct clk_bw_params *bw_params; #endif @@ -199,4 +205,8 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr); +void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr); + +void clk_mgr_optimize_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr); + #endif /* __DAL_CLK_MGR_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h index 7dd46eb96d67..a17a77192690 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h @@ -184,6 +184,21 @@ struct clk_mgr_registers { uint32_t MP1_SMN_C2PMSG_91; }; +enum clock_type { + clock_type_dispclk = 1, + clock_type_dcfclk, + clock_type_socclk, + clock_type_pixelclk, + clock_type_phyclk, + clock_type_dppclk, + clock_type_fclk, + clock_type_dcfdsclk, + clock_type_dscclk, + clock_type_uclk, + clock_type_dramclk, +}; + + struct state_dependent_clocks { int display_clk_khz; int pixel_clk_khz; @@ -210,8 +225,6 @@ struct clk_mgr_internal { struct state_dependent_clocks max_clks_by_state[DM_PP_CLOCKS_MAX_STATES]; /*TODO: figure out which of the below fields should be here vs in asic specific portion */ - int dentist_vco_freq_khz; - /* Cache the status of DFS-bypass feature*/ bool dfs_bypass_enabled; /* True if the DFS-bypass feature is enabled and active. */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h index d8e744f366e5..05ee5295d2c1 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h @@ -38,8 +38,7 @@ struct dccg { struct dccg_funcs { void (*update_dpp_dto)(struct dccg *dccg, int dpp_inst, - int req_dppclk, - bool reduce_divider_only); + int req_dppclk); void (*get_dccg_ref_freq)(struct dccg *dccg, unsigned int xtalin_freq_inKhz, unsigned int *dccg_ref_freq_inKhz); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h index a6297219d7fc..c81a17aeaa25 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h @@ -147,6 +147,7 @@ struct hubbub_funcs { bool (*is_allow_self_refresh_enabled)(struct hubbub *hubbub); void (*allow_self_refresh_control)(struct hubbub *hubbub, bool allow); + void (*apply_DEDCN21_147_wa)(struct hubbub *hubbub); }; struct hubbub { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h index 1ddb1c6fa149..c6ff3d78b435 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h @@ -28,7 +28,11 @@ #include "dc_dsc.h" #include "dc_hw_types.h" -#include "dc_dp_types.h" +#include "dc_types.h" +/* do not include any other headers + * or else it might break Edid Utility functionality. + */ + /* Input parameters for configuring DSC from the outside of DSC */ struct dsc_config { @@ -81,12 +85,6 @@ struct dsc_enc_caps { uint32_t bpp_increment_div; /* bpp increment divisor, e.g. if 16, it's 1/16th of a bit */ }; -struct display_stream_compressor { - const struct dsc_funcs *funcs; - struct dc_context *ctx; - int inst; -}; - struct dsc_funcs { void (*dsc_get_enc_caps)(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz); void (*dsc_read_state)(struct display_stream_compressor *dsc, struct dcn_dsc_state *s); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h index abb4e4237fb6..b21909216fb6 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h @@ -184,6 +184,10 @@ struct link_encoder_funcs { bool (*fec_is_active)(struct link_encoder *enc); #endif bool (*is_in_alt_mode) (struct link_encoder *enc); + + void (*get_max_link_cap)(struct link_encoder *enc, + struct dc_link_settings *link_settings); + enum signal_type (*get_dig_mode)( struct link_encoder *enc); }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h index e8668388581b..67b610d6d91f 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h @@ -43,6 +43,7 @@ struct dcn_watermarks { #if defined(CONFIG_DRM_AMD_DC_DCN2_1) uint32_t frac_urg_bw_nom; uint32_t frac_urg_bw_flip; + int32_t urgent_latency_ns; #endif struct cstate_pstate_watermarks_st cstate_pstate; }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h index 957e9047381a..18def2b6fafe 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h @@ -208,6 +208,7 @@ struct output_pixel_processor { struct mpc_tree mpc_tree_params; bool mpcc_disconnect_pending[MAX_PIPES]; const struct opp_funcs *funcs; + uint32_t dyn_expansion; }; enum fmt_stereo_action { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h index fe9b7a10a1c3..6305e388612a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h @@ -214,6 +214,11 @@ struct stream_encoder_funcs { unsigned int (*dig_source_otg)( struct stream_encoder *enc); + bool (*dp_get_pixel_format)( + struct stream_encoder *enc, + enum dc_pixel_encoding *encoding, + enum dc_color_depth *depth); + #if defined(CONFIG_DRM_AMD_DC_DCN2_0) #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT void (*enc_read_state)(struct stream_encoder *enc, struct enc_state *s); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h index 6196cc32356e..27c73caf74ee 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h @@ -261,6 +261,8 @@ struct timing_generator_funcs { void (*program_manual_trigger)(struct timing_generator *optc); void (*setup_manual_trigger)(struct timing_generator *optc); + bool (*get_hw_timing)(struct timing_generator *optc, + struct dc_crtc_timing *hw_crtc_timing); void (*set_vtg_params)(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index 3a938cd414ea..d39c1e11def5 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -114,6 +114,9 @@ struct hw_sequencer_funcs { int opp_id); #if defined(CONFIG_DRM_AMD_DC_DCN2_0) + void (*program_front_end_for_ctx)( + struct dc *dc, + struct dc_state *context); void (*program_triplebuffer)( const struct dc *dc, struct pipe_ctx *pipe_ctx, @@ -229,6 +232,13 @@ struct hw_sequencer_funcs { struct dc *dc, struct dc_state *context); + void (*exit_optimized_pwr_state)( + const struct dc *dc, + struct dc_state *context); + void (*optimize_pwr_state)( + const struct dc *dc, + struct dc_state *context); + #if defined(CONFIG_DRM_AMD_DC_DCN2_0) bool (*update_bandwidth)( struct dc *dc, @@ -321,10 +331,12 @@ struct hw_sequencer_funcs { struct dc_state *context); void (*update_writeback)(struct dc *dc, const struct dc_stream_status *stream_status, - struct dc_writeback_info *wb_info); + struct dc_writeback_info *wb_info, + struct dc_state *context); void (*enable_writeback)(struct dc *dc, const struct dc_stream_status *stream_status, - struct dc_writeback_info *wb_info); + struct dc_writeback_info *wb_info, + struct dc_state *context); void (*disable_writeback)(struct dc *dc, unsigned int dwb_pipe_inst); #endif @@ -337,6 +349,9 @@ struct hw_sequencer_funcs { enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg); +#if defined(CONFIG_DRM_AMD_DC_DCN2_1) + bool (*s0i3_golden_init_wa)(struct dc *dc); +#endif }; void color_space_to_black_color( diff --git a/drivers/gpu/drm/amd/display/include/ddc_service_types.h b/drivers/gpu/drm/amd/display/include/ddc_service_types.h index 18961707db23..9ad49da50a17 100644 --- a/drivers/gpu/drm/amd/display/include/ddc_service_types.h +++ b/drivers/gpu/drm/amd/display/include/ddc_service_types.h @@ -31,6 +31,8 @@ #define DP_BRANCH_DEVICE_ID_0022B9 0x0022B9 #define DP_BRANCH_DEVICE_ID_00001A 0x00001A #define DP_BRANCH_DEVICE_ID_0080E1 0x0080e1 +#define DP_BRANCH_DEVICE_ID_90CC24 0x90CC24 +#define DP_BRANCH_DEVICE_ID_00E04C 0x00E04C enum ddc_result { DDC_RESULT_UNKNOWN = 0, diff --git a/drivers/gpu/drm/amd/display/include/hdcp_types.h b/drivers/gpu/drm/amd/display/include/hdcp_types.h new file mode 100644 index 000000000000..f31e6befc8d6 --- /dev/null +++ b/drivers/gpu/drm/amd/display/include/hdcp_types.h @@ -0,0 +1,96 @@ +/* +* Copyright 2019 Advanced Micro Devices, Inc. +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +* OTHER DEALINGS IN THE SOFTWARE. +* +* Authors: AMD +* +*/ + +#ifndef __DC_HDCP_TYPES_H__ +#define __DC_HDCP_TYPES_H__ + +enum hdcp_message_id { + HDCP_MESSAGE_ID_INVALID = -1, + + /* HDCP 1.4 */ + + HDCP_MESSAGE_ID_READ_BKSV = 0, + /* HDMI is called Ri', DP is called R0' */ + HDCP_MESSAGE_ID_READ_RI_R0, + HDCP_MESSAGE_ID_READ_PJ, + HDCP_MESSAGE_ID_WRITE_AKSV, + HDCP_MESSAGE_ID_WRITE_AINFO, + HDCP_MESSAGE_ID_WRITE_AN, + HDCP_MESSAGE_ID_READ_VH_X, + HDCP_MESSAGE_ID_READ_VH_0, + HDCP_MESSAGE_ID_READ_VH_1, + HDCP_MESSAGE_ID_READ_VH_2, + HDCP_MESSAGE_ID_READ_VH_3, + HDCP_MESSAGE_ID_READ_VH_4, + HDCP_MESSAGE_ID_READ_BCAPS, + HDCP_MESSAGE_ID_READ_BSTATUS, + HDCP_MESSAGE_ID_READ_KSV_FIFO, + HDCP_MESSAGE_ID_READ_BINFO, + + /* HDCP 2.2 */ + + HDCP_MESSAGE_ID_HDCP2VERSION, + HDCP_MESSAGE_ID_RX_CAPS, + HDCP_MESSAGE_ID_WRITE_AKE_INIT, + HDCP_MESSAGE_ID_READ_AKE_SEND_CERT, + HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM, + HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM, + HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME, + HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO, + HDCP_MESSAGE_ID_WRITE_LC_INIT, + HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME, + HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS, + HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST, + HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK, + HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE, + HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY, + HDCP_MESSAGE_ID_READ_RXSTATUS, + HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE, + + HDCP_MESSAGE_ID_MAX +}; + +enum hdcp_version { + HDCP_Unknown = 0, + HDCP_VERSION_14, + HDCP_VERSION_22, +}; + +enum hdcp_link { + HDCP_LINK_PRIMARY, + HDCP_LINK_SECONDARY +}; + +struct hdcp_protection_message { + enum hdcp_version version; + /* relevant only for DVI */ + enum hdcp_link link; + enum hdcp_message_id msg_id; + uint32_t length; + uint8_t max_retries; + uint8_t *data; +}; + +#endif diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index 2d8f14b69117..1de4805cb8c7 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c @@ -373,7 +373,42 @@ static struct fixed31_32 translate_from_linear_space( return dc_fixpt_mul(args->arg, args->a1); } -static struct fixed31_32 calculate_gamma22(struct fixed31_32 arg) + +static struct fixed31_32 translate_from_linear_space_long( + struct translate_from_linear_space_args *args) +{ + const struct fixed31_32 one = dc_fixpt_from_int(1); + + if (dc_fixpt_lt(one, args->arg)) + return one; + + if (dc_fixpt_le(args->arg, dc_fixpt_neg(args->a0))) + return dc_fixpt_sub( + args->a2, + dc_fixpt_mul( + dc_fixpt_add( + one, + args->a3), + dc_fixpt_pow( + dc_fixpt_neg(args->arg), + dc_fixpt_recip(args->gamma)))); + else if (dc_fixpt_le(args->a0, args->arg)) + return dc_fixpt_sub( + dc_fixpt_mul( + dc_fixpt_add( + one, + args->a3), + dc_fixpt_pow( + args->arg, + dc_fixpt_recip(args->gamma))), + args->a2); + else + return dc_fixpt_mul( + args->arg, + args->a1); +} + +static struct fixed31_32 calculate_gamma22(struct fixed31_32 arg, bool use_eetf) { struct fixed31_32 gamma = dc_fixpt_from_fraction(22, 10); @@ -384,9 +419,13 @@ static struct fixed31_32 calculate_gamma22(struct fixed31_32 arg) scratch_gamma_args.a3 = dc_fixpt_zero; scratch_gamma_args.gamma = gamma; + if (use_eetf) + return translate_from_linear_space_long(&scratch_gamma_args); + return translate_from_linear_space(&scratch_gamma_args); } + static struct fixed31_32 translate_to_linear_space( struct fixed31_32 arg, struct fixed31_32 a0, @@ -920,11 +959,7 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma, if (fs_params->max_display < 100) // cap at 100 at the top max_display = dc_fixpt_from_int(100); - if (fs_params->min_content < fs_params->min_display) - use_eetf = true; - else - min_content = min_display; - + // only max used, we don't adjust min luminance if (fs_params->max_content > fs_params->max_display) use_eetf = true; else @@ -950,7 +985,7 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma, if (dc_fixpt_lt(scaledX, dc_fixpt_zero)) output = dc_fixpt_zero; else - output = calculate_gamma22(scaledX); + output = calculate_gamma22(scaledX, use_eetf); rgb->r = output; rgb->g = output; @@ -2173,5 +2208,3 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans, rgb_degamma_alloc_fail: return ret; } - - diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index ec70c9b12e1a..16e69bbc69aa 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -37,8 +37,8 @@ #define STATIC_SCREEN_RAMP_DELTA_REFRESH_RATE_PER_FRAME ((1000 / 60) * 65) /* Number of elements in the render times cache array */ #define RENDER_TIMES_MAX_COUNT 10 -/* Threshold to exit BTR (to avoid frequent enter-exits at the lower limit) */ -#define BTR_EXIT_MARGIN 2000 +/* Threshold to exit/exit BTR (to avoid frequent enter-exits at the lower limit) */ +#define BTR_MAX_MARGIN 2500 /* Threshold to change BTR multiplier (to avoid frequent changes) */ #define BTR_DRIFT_MARGIN 2000 /*Threshold to exit fixed refresh rate*/ @@ -234,6 +234,10 @@ static void update_v_total_for_static_ramp( current_duration_in_us) * (stream->timing.pix_clk_100hz / 10)), stream->timing.h_total), 1000); + /* v_total cannot be less than nominal */ + if (v_total < stream->timing.v_total) + v_total = stream->timing.v_total; + in_out_vrr->adjust.v_total_min = v_total; in_out_vrr->adjust.v_total_max = v_total; } @@ -250,24 +254,22 @@ static void apply_below_the_range(struct core_freesync *core_freesync, unsigned int delta_from_mid_point_in_us_1 = 0xFFFFFFFF; unsigned int delta_from_mid_point_in_us_2 = 0xFFFFFFFF; unsigned int frames_to_insert = 0; - unsigned int min_frame_duration_in_ns = 0; - unsigned int max_render_time_in_us = in_out_vrr->max_duration_in_us; unsigned int delta_from_mid_point_delta_in_us; - - min_frame_duration_in_ns = ((unsigned int) (div64_u64( - (1000000000ULL * 1000000), - in_out_vrr->max_refresh_in_uhz))); + unsigned int max_render_time_in_us = + in_out_vrr->max_duration_in_us - in_out_vrr->btr.margin_in_us; /* Program BTR */ - if (last_render_time_in_us + BTR_EXIT_MARGIN < max_render_time_in_us) { + if ((last_render_time_in_us + in_out_vrr->btr.margin_in_us / 2) < max_render_time_in_us) { /* Exit Below the Range */ if (in_out_vrr->btr.btr_active) { in_out_vrr->btr.frame_counter = 0; in_out_vrr->btr.btr_active = false; } - } else if (last_render_time_in_us > max_render_time_in_us) { + } else if (last_render_time_in_us > (max_render_time_in_us + in_out_vrr->btr.margin_in_us / 2)) { /* Enter Below the Range */ - in_out_vrr->btr.btr_active = true; + if (!in_out_vrr->btr.btr_active) { + in_out_vrr->btr.btr_active = true; + } } /* BTR set to "not active" so disengage */ @@ -323,7 +325,9 @@ static void apply_below_the_range(struct core_freesync *core_freesync, /* Choose number of frames to insert based on how close it * can get to the mid point of the variable range. */ - if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2) { + if ((frame_time_in_us / mid_point_frames_ceil) > in_out_vrr->min_duration_in_us && + (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2 || + mid_point_frames_floor < 2)) { frames_to_insert = mid_point_frames_ceil; delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_2 - delta_from_mid_point_in_us_1; @@ -339,7 +343,7 @@ static void apply_below_the_range(struct core_freesync *core_freesync, if (in_out_vrr->btr.frames_to_insert != 0 && delta_from_mid_point_delta_in_us < BTR_DRIFT_MARGIN) { if (((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) < - in_out_vrr->max_duration_in_us) && + max_render_time_in_us) && ((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) > in_out_vrr->min_duration_in_us)) frames_to_insert = in_out_vrr->btr.frames_to_insert; @@ -743,6 +747,10 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, nominal_field_rate_in_uhz = mod_freesync_calc_nominal_field_rate(stream); + /* Rounded to the nearest Hz */ + nominal_field_rate_in_uhz = 1000000ULL * + div_u64(nominal_field_rate_in_uhz + 500000, 1000000); + min_refresh_in_uhz = in_config->min_refresh_in_uhz; max_refresh_in_uhz = in_config->max_refresh_in_uhz; @@ -788,6 +796,11 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, refresh_range = in_out_vrr->max_refresh_in_uhz - in_out_vrr->min_refresh_in_uhz; + in_out_vrr->btr.margin_in_us = in_out_vrr->max_duration_in_us - + 2 * in_out_vrr->min_duration_in_us; + if (in_out_vrr->btr.margin_in_us > BTR_MAX_MARGIN) + in_out_vrr->btr.margin_in_us = BTR_MAX_MARGIN; + in_out_vrr->supported = true; } @@ -803,6 +816,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, in_out_vrr->btr.inserted_duration_in_us = 0; in_out_vrr->btr.frames_to_insert = 0; in_out_vrr->btr.frame_counter = 0; + in_out_vrr->btr.mid_point_in_us = (in_out_vrr->min_duration_in_us + in_out_vrr->max_duration_in_us) / 2; @@ -975,13 +989,9 @@ void mod_freesync_get_settings(struct mod_freesync *mod_freesync, unsigned int *inserted_frames, unsigned int *inserted_duration_in_us) { - struct core_freesync *core_freesync = NULL; - if (mod_freesync == NULL) return; - core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync); - if (vrr->supported) { *v_total_min = vrr->adjust.v_total_min; *v_total_max = vrr->adjust.v_total_max; @@ -996,14 +1006,13 @@ unsigned long long mod_freesync_calc_nominal_field_rate( const struct dc_stream_state *stream) { unsigned long long nominal_field_rate_in_uhz = 0; + unsigned int total = stream->timing.h_total * stream->timing.v_total; - /* Calculate nominal field rate for stream */ + /* Calculate nominal field rate for stream, rounded up to nearest integer */ nominal_field_rate_in_uhz = stream->timing.pix_clk_100hz / 10; nominal_field_rate_in_uhz *= 1000ULL * 1000ULL * 1000ULL; - nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz, - stream->timing.h_total); - nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz, - stream->timing.v_total); + + nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz, total); return nominal_field_rate_in_uhz; } diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/Makefile b/drivers/gpu/drm/amd/display/modules/hdcp/Makefile new file mode 100644 index 000000000000..1c3c6d47973a --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/hdcp/Makefile @@ -0,0 +1,32 @@ +# +# Copyright 2019 Advanced Micro Devices, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# +# +# Makefile for the 'hdcp' sub-module of DAL. +# + +HDCP = hdcp_ddc.o hdcp_log.o hdcp_psp.o hdcp.o \ + hdcp1_execution.o hdcp1_transition.o + +AMD_DAL_HDCP = $(addprefix $(AMDDALPATH)/modules/hdcp/,$(HDCP)) +#$(info ************ DAL-HDCP_MAKEFILE ************) + +AMD_DISPLAY_FILES += $(AMD_DAL_HDCP) diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c new file mode 100644 index 000000000000..d7ac445dec6f --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c @@ -0,0 +1,426 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "hdcp.h" + +static void push_error_status(struct mod_hdcp *hdcp, + enum mod_hdcp_status status) +{ + struct mod_hdcp_trace *trace = &hdcp->connection.trace; + + if (trace->error_count < MAX_NUM_OF_ERROR_TRACE) { + trace->errors[trace->error_count].status = status; + trace->errors[trace->error_count].state_id = hdcp->state.id; + trace->error_count++; + HDCP_ERROR_TRACE(hdcp, status); + } + + hdcp->connection.hdcp1_retry_count++; +} + +static uint8_t is_cp_desired_hdcp1(struct mod_hdcp *hdcp) +{ + int i, display_enabled = 0; + + /* if all displays on the link are disabled, hdcp is not desired */ + for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { + if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_INACTIVE && + !hdcp->connection.displays[i].adjust.disable) { + display_enabled = 1; + break; + } + } + + return (hdcp->connection.hdcp1_retry_count < MAX_NUM_OF_ATTEMPTS) && + display_enabled && !hdcp->connection.link.adjust.hdcp1.disable; +} + +static enum mod_hdcp_status execution(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + union mod_hdcp_transition_input *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (is_in_initialized_state(hdcp)) { + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { + event_ctx->unexpected_event = 1; + goto out; + } + /* initialize transition input */ + memset(input, 0, sizeof(union mod_hdcp_transition_input)); + } else if (is_in_cp_not_desired_state(hdcp)) { + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { + event_ctx->unexpected_event = 1; + goto out; + } + /* update topology event if hdcp is not desired */ + status = mod_hdcp_add_display_topology(hdcp); + } else if (is_in_hdcp1_states(hdcp)) { + status = mod_hdcp_hdcp1_execution(hdcp, event_ctx, &input->hdcp1); + } else if (is_in_hdcp1_dp_states(hdcp)) { + status = mod_hdcp_hdcp1_dp_execution(hdcp, + event_ctx, &input->hdcp1); + } +out: + return status; +} + +static enum mod_hdcp_status transition(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + union mod_hdcp_transition_input *input, + struct mod_hdcp_output *output) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->unexpected_event) + goto out; + + if (is_in_initialized_state(hdcp)) { + if (is_dp_hdcp(hdcp)) + if (is_cp_desired_hdcp1(hdcp)) { + callback_in_ms(0, output); + set_state_id(hdcp, output, D1_A0_DETERMINE_RX_HDCP_CAPABLE); + } else { + callback_in_ms(0, output); + set_state_id(hdcp, output, HDCP_CP_NOT_DESIRED); + } + else if (is_hdmi_dvi_sl_hdcp(hdcp)) + if (is_cp_desired_hdcp1(hdcp)) { + callback_in_ms(0, output); + set_state_id(hdcp, output, H1_A0_WAIT_FOR_ACTIVE_RX); + } else { + callback_in_ms(0, output); + set_state_id(hdcp, output, HDCP_CP_NOT_DESIRED); + } + else { + callback_in_ms(0, output); + set_state_id(hdcp, output, HDCP_CP_NOT_DESIRED); + } + } else if (is_in_cp_not_desired_state(hdcp)) { + increment_stay_counter(hdcp); + } else if (is_in_hdcp1_states(hdcp)) { + status = mod_hdcp_hdcp1_transition(hdcp, + event_ctx, &input->hdcp1, output); + } else if (is_in_hdcp1_dp_states(hdcp)) { + status = mod_hdcp_hdcp1_dp_transition(hdcp, + event_ctx, &input->hdcp1, output); + } else { + status = MOD_HDCP_STATUS_INVALID_STATE; + } +out: + return status; +} + +static enum mod_hdcp_status reset_authentication(struct mod_hdcp *hdcp, + struct mod_hdcp_output *output) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (is_hdcp1(hdcp)) { + if (hdcp->auth.trans_input.hdcp1.create_session != UNKNOWN) + mod_hdcp_hdcp1_destroy_session(hdcp); + + if (hdcp->auth.trans_input.hdcp1.add_topology == PASS) { + status = mod_hdcp_remove_display_topology(hdcp); + if (status != MOD_HDCP_STATUS_SUCCESS) { + output->callback_needed = 0; + output->watchdog_timer_needed = 0; + goto out; + } + } + HDCP_TOP_RESET_AUTH_TRACE(hdcp); + memset(&hdcp->auth, 0, sizeof(struct mod_hdcp_authentication)); + memset(&hdcp->state, 0, sizeof(struct mod_hdcp_state)); + set_state_id(hdcp, output, HDCP_INITIALIZED); + } else if (is_in_cp_not_desired_state(hdcp)) { + status = mod_hdcp_remove_display_topology(hdcp); + if (status != MOD_HDCP_STATUS_SUCCESS) { + output->callback_needed = 0; + output->watchdog_timer_needed = 0; + goto out; + } + HDCP_TOP_RESET_AUTH_TRACE(hdcp); + memset(&hdcp->auth, 0, sizeof(struct mod_hdcp_authentication)); + memset(&hdcp->state, 0, sizeof(struct mod_hdcp_state)); + set_state_id(hdcp, output, HDCP_INITIALIZED); + } + +out: + /* stop callback and watchdog requests from previous authentication*/ + output->watchdog_timer_stop = 1; + output->callback_stop = 1; + return status; +} + +static enum mod_hdcp_status reset_connection(struct mod_hdcp *hdcp, + struct mod_hdcp_output *output) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + memset(output, 0, sizeof(struct mod_hdcp_output)); + + status = reset_authentication(hdcp, output); + if (status != MOD_HDCP_STATUS_SUCCESS) + goto out; + + if (current_state(hdcp) != HDCP_UNINITIALIZED) { + HDCP_TOP_RESET_CONN_TRACE(hdcp); + set_state_id(hdcp, output, HDCP_UNINITIALIZED); + } + memset(&hdcp->connection, 0, sizeof(hdcp->connection)); +out: + return status; +} + +/* + * Implementation of functions in mod_hdcp.h + */ +size_t mod_hdcp_get_memory_size(void) +{ + return sizeof(struct mod_hdcp); +} + +enum mod_hdcp_status mod_hdcp_setup(struct mod_hdcp *hdcp, + struct mod_hdcp_config *config) +{ + struct mod_hdcp_output output; + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + memset(hdcp, 0, sizeof(struct mod_hdcp)); + memset(&output, 0, sizeof(output)); + hdcp->config = *config; + HDCP_TOP_INTERFACE_TRACE(hdcp); + status = reset_connection(hdcp, &output); + if (status != MOD_HDCP_STATUS_SUCCESS) + push_error_status(hdcp, status); + return status; +} + +enum mod_hdcp_status mod_hdcp_teardown(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + struct mod_hdcp_output output; + + HDCP_TOP_INTERFACE_TRACE(hdcp); + memset(&output, 0, sizeof(output)); + status = reset_connection(hdcp, &output); + if (status == MOD_HDCP_STATUS_SUCCESS) + memset(hdcp, 0, sizeof(struct mod_hdcp)); + else + push_error_status(hdcp, status); + return status; +} + +enum mod_hdcp_status mod_hdcp_add_display(struct mod_hdcp *hdcp, + struct mod_hdcp_link *link, struct mod_hdcp_display *display, + struct mod_hdcp_output *output) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + struct mod_hdcp_display *display_container = NULL; + + HDCP_TOP_INTERFACE_TRACE_WITH_INDEX(hdcp, display->index); + memset(output, 0, sizeof(struct mod_hdcp_output)); + + /* skip inactive display */ + if (display->state != MOD_HDCP_DISPLAY_ACTIVE) { + status = MOD_HDCP_STATUS_SUCCESS; + goto out; + } + + /* check existing display container */ + if (get_active_display_at_index(hdcp, display->index)) { + status = MOD_HDCP_STATUS_SUCCESS; + goto out; + } + + /* find an empty display container */ + display_container = get_empty_display_container(hdcp); + if (!display_container) { + status = MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND; + goto out; + } + + /* reset existing authentication status */ + status = reset_authentication(hdcp, output); + if (status != MOD_HDCP_STATUS_SUCCESS) + goto out; + + /* add display to connection */ + hdcp->connection.link = *link; + *display_container = *display; + + /* reset retry counters */ + reset_retry_counts(hdcp); + + /* reset error trace */ + memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace)); + + /* request authentication */ + if (current_state(hdcp) != HDCP_INITIALIZED) + set_state_id(hdcp, output, HDCP_INITIALIZED); + callback_in_ms(hdcp->connection.link.adjust.auth_delay * 1000, output); +out: + if (status != MOD_HDCP_STATUS_SUCCESS) + push_error_status(hdcp, status); + + return status; +} + +enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp, + uint8_t index, struct mod_hdcp_output *output) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + struct mod_hdcp_display *display = NULL; + + HDCP_TOP_INTERFACE_TRACE_WITH_INDEX(hdcp, index); + memset(output, 0, sizeof(struct mod_hdcp_output)); + + /* find display in connection */ + display = get_active_display_at_index(hdcp, index); + if (!display) { + status = MOD_HDCP_STATUS_SUCCESS; + goto out; + } + + /* stop current authentication */ + status = reset_authentication(hdcp, output); + if (status != MOD_HDCP_STATUS_SUCCESS) + goto out; + + /* remove display */ + display->state = MOD_HDCP_DISPLAY_INACTIVE; + + /* clear retry counters */ + reset_retry_counts(hdcp); + + /* reset error trace */ + memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace)); + + /* request authentication for remaining displays*/ + if (get_active_display_count(hdcp) > 0) + callback_in_ms(hdcp->connection.link.adjust.auth_delay * 1000, + output); +out: + if (status != MOD_HDCP_STATUS_SUCCESS) + push_error_status(hdcp, status); + return status; +} + +enum mod_hdcp_status mod_hdcp_query_display(struct mod_hdcp *hdcp, + uint8_t index, struct mod_hdcp_display_query *query) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + struct mod_hdcp_display *display = NULL; + + /* find display in connection */ + display = get_active_display_at_index(hdcp, index); + if (!display) { + status = MOD_HDCP_STATUS_DISPLAY_NOT_FOUND; + goto out; + } + + /* populate query */ + query->link = &hdcp->connection.link; + query->display = display; + query->trace = &hdcp->connection.trace; + query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + + mod_hdcp_hdcp1_get_link_encryption_status(hdcp, &query->encryption_status); + +out: + return status; +} + +enum mod_hdcp_status mod_hdcp_reset_connection(struct mod_hdcp *hdcp, + struct mod_hdcp_output *output) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + HDCP_TOP_INTERFACE_TRACE(hdcp); + status = reset_connection(hdcp, output); + if (status != MOD_HDCP_STATUS_SUCCESS) + push_error_status(hdcp, status); + + return status; +} + +enum mod_hdcp_status mod_hdcp_process_event(struct mod_hdcp *hdcp, + enum mod_hdcp_event event, struct mod_hdcp_output *output) +{ + enum mod_hdcp_status exec_status, trans_status, reset_status, status; + struct mod_hdcp_event_context event_ctx; + + HDCP_EVENT_TRACE(hdcp, event); + memset(output, 0, sizeof(struct mod_hdcp_output)); + memset(&event_ctx, 0, sizeof(struct mod_hdcp_event_context)); + event_ctx.event = event; + + /* execute and transition */ + exec_status = execution(hdcp, &event_ctx, &hdcp->auth.trans_input); + trans_status = transition( + hdcp, &event_ctx, &hdcp->auth.trans_input, output); + if (trans_status == MOD_HDCP_STATUS_SUCCESS) { + status = MOD_HDCP_STATUS_SUCCESS; + } else if (exec_status == MOD_HDCP_STATUS_SUCCESS) { + status = MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE; + push_error_status(hdcp, status); + } else { + status = exec_status; + push_error_status(hdcp, status); + } + + /* reset authentication if needed */ + if (trans_status == MOD_HDCP_STATUS_RESET_NEEDED) { + HDCP_FULL_DDC_TRACE(hdcp); + reset_status = reset_authentication(hdcp, output); + if (reset_status != MOD_HDCP_STATUS_SUCCESS) + push_error_status(hdcp, reset_status); + } + return status; +} + +enum mod_hdcp_operation_mode mod_hdcp_signal_type_to_operation_mode( + enum signal_type signal) +{ + enum mod_hdcp_operation_mode mode = MOD_HDCP_MODE_OFF; + + switch (signal) { + case SIGNAL_TYPE_DVI_SINGLE_LINK: + case SIGNAL_TYPE_HDMI_TYPE_A: + mode = MOD_HDCP_MODE_DEFAULT; + break; + case SIGNAL_TYPE_EDP: + case SIGNAL_TYPE_DISPLAY_PORT: + mode = MOD_HDCP_MODE_DP; + break; + case SIGNAL_TYPE_DISPLAY_PORT_MST: + mode = MOD_HDCP_MODE_DP_MST; + break; + default: + break; + }; + + return mode; +} diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h new file mode 100644 index 000000000000..5664bc0b5bd0 --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h @@ -0,0 +1,442 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef HDCP_H_ +#define HDCP_H_ + +#include "mod_hdcp.h" +#include "hdcp_log.h" + +#define BCAPS_READY_MASK 0x20 +#define BCAPS_REPEATER_MASK 0x40 +#define BSTATUS_DEVICE_COUNT_MASK 0X007F +#define BSTATUS_MAX_DEVS_EXCEEDED_MASK 0x0080 +#define BSTATUS_MAX_CASCADE_EXCEEDED_MASK 0x0800 +#define BCAPS_HDCP_CAPABLE_MASK_DP 0x01 +#define BCAPS_REPEATER_MASK_DP 0x02 +#define BSTATUS_READY_MASK_DP 0x01 +#define BSTATUS_R0_P_AVAILABLE_MASK_DP 0x02 +#define BSTATUS_LINK_INTEGRITY_FAILURE_MASK_DP 0x04 +#define BSTATUS_REAUTH_REQUEST_MASK_DP 0x08 +#define BINFO_DEVICE_COUNT_MASK_DP 0X007F +#define BINFO_MAX_DEVS_EXCEEDED_MASK_DP 0x0080 +#define BINFO_MAX_CASCADE_EXCEEDED_MASK_DP 0x0800 + +#define RXSTATUS_MSG_SIZE_MASK 0x03FF +#define RXSTATUS_READY_MASK 0x0400 +#define RXSTATUS_REAUTH_REQUEST_MASK 0x0800 +#define RXIDLIST_DEVICE_COUNT_LOWER_MASK 0xf0 +#define RXIDLIST_DEVICE_COUNT_UPPER_MASK 0x01 +#define RXCAPS_BYTE0_HDCP_CAPABLE_MASK_DP 0x02 +#define RXSTATUS_READY_MASK_DP 0x0001 +#define RXSTATUS_H_P_AVAILABLE_MASK_DP 0x0002 +#define RXSTATUS_PAIRING_AVAILABLE_MASK_DP 0x0004 +#define RXSTATUS_REAUTH_REQUEST_MASK_DP 0x0008 +#define RXSTATUS_LINK_INTEGRITY_FAILURE_MASK_DP 0x0010 + +enum mod_hdcp_trans_input_result { + UNKNOWN = 0, + PASS, + FAIL +}; + +struct mod_hdcp_transition_input_hdcp1 { + uint8_t bksv_read; + uint8_t bksv_validation; + uint8_t add_topology; + uint8_t create_session; + uint8_t an_write; + uint8_t aksv_write; + uint8_t ainfo_write; + uint8_t bcaps_read; + uint8_t r0p_read; + uint8_t rx_validation; + uint8_t encryption; + uint8_t link_maintenance; + uint8_t ready_check; + uint8_t bstatus_read; + uint8_t max_cascade_check; + uint8_t max_devs_check; + uint8_t device_count_check; + uint8_t ksvlist_read; + uint8_t vp_read; + uint8_t ksvlist_vp_validation; + + uint8_t hdcp_capable_dp; + uint8_t binfo_read_dp; + uint8_t r0p_available_dp; + uint8_t link_integiry_check; + uint8_t reauth_request_check; + uint8_t stream_encryption_dp; +}; + +union mod_hdcp_transition_input { + struct mod_hdcp_transition_input_hdcp1 hdcp1; +}; + +struct mod_hdcp_message_hdcp1 { + uint8_t an[8]; + uint8_t aksv[5]; + uint8_t ainfo; + uint8_t bksv[5]; + uint16_t r0p; + uint8_t bcaps; + uint16_t bstatus; + uint8_t ksvlist[635]; + uint16_t ksvlist_size; + uint8_t vp[20]; + + uint16_t binfo_dp; +}; + +union mod_hdcp_message { + struct mod_hdcp_message_hdcp1 hdcp1; +}; + +struct mod_hdcp_auth_counters { + uint8_t stream_management_retry_count; +}; + +/* contains values per connection */ +struct mod_hdcp_connection { + struct mod_hdcp_link link; + struct mod_hdcp_display displays[MAX_NUM_OF_DISPLAYS]; + uint8_t is_repeater; + uint8_t is_km_stored; + struct mod_hdcp_trace trace; + uint8_t hdcp1_retry_count; +}; + +/* contains values per authentication cycle */ +struct mod_hdcp_authentication { + uint32_t id; + union mod_hdcp_message msg; + union mod_hdcp_transition_input trans_input; + struct mod_hdcp_auth_counters count; +}; + +/* contains values per state change */ +struct mod_hdcp_state { + uint8_t id; + uint32_t stay_count; +}; + +/* per event in a state */ +struct mod_hdcp_event_context { + enum mod_hdcp_event event; + uint8_t rx_id_list_ready; + uint8_t unexpected_event; +}; + +struct mod_hdcp { + /* per link */ + struct mod_hdcp_config config; + /* per connection */ + struct mod_hdcp_connection connection; + /* per authentication attempt */ + struct mod_hdcp_authentication auth; + /* per state in an authentication */ + struct mod_hdcp_state state; + /* reserved memory buffer */ + uint8_t buf[2025]; +}; + +enum mod_hdcp_initial_state_id { + HDCP_UNINITIALIZED = 0x0, + HDCP_INITIAL_STATE_START = HDCP_UNINITIALIZED, + HDCP_INITIALIZED, + HDCP_CP_NOT_DESIRED, + HDCP_INITIAL_STATE_END = HDCP_CP_NOT_DESIRED +}; + +enum mod_hdcp_hdcp1_state_id { + HDCP1_STATE_START = HDCP_INITIAL_STATE_END, + H1_A0_WAIT_FOR_ACTIVE_RX, + H1_A1_EXCHANGE_KSVS, + H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER, + H1_A45_AUTHENTICATED, + H1_A8_WAIT_FOR_READY, + H1_A9_READ_KSV_LIST, + HDCP1_STATE_END = H1_A9_READ_KSV_LIST +}; + +enum mod_hdcp_hdcp1_dp_state_id { + HDCP1_DP_STATE_START = HDCP1_STATE_END, + D1_A0_DETERMINE_RX_HDCP_CAPABLE, + D1_A1_EXCHANGE_KSVS, + D1_A23_WAIT_FOR_R0_PRIME, + D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER, + D1_A4_AUTHENTICATED, + D1_A6_WAIT_FOR_READY, + D1_A7_READ_KSV_LIST, + HDCP1_DP_STATE_END = D1_A7_READ_KSV_LIST, +}; + +/* hdcp1 executions and transitions */ +typedef enum mod_hdcp_status (*mod_hdcp_action)(struct mod_hdcp *hdcp); +uint8_t mod_hdcp_execute_and_set( + mod_hdcp_action func, uint8_t *flag, + enum mod_hdcp_status *status, struct mod_hdcp *hdcp, char *str); +enum mod_hdcp_status mod_hdcp_hdcp1_execution(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input); +enum mod_hdcp_status mod_hdcp_hdcp1_dp_execution(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input); +enum mod_hdcp_status mod_hdcp_hdcp1_transition(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input, + struct mod_hdcp_output *output); +enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input, + struct mod_hdcp_output *output); + +/* log functions */ +void mod_hdcp_dump_binary_message(uint8_t *msg, uint32_t msg_size, + uint8_t *buf, uint32_t buf_size); +/* TODO: add adjustment log */ + +/* psp functions */ +enum mod_hdcp_status mod_hdcp_add_display_topology( + struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_remove_display_topology( + struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption( + struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp1_link_maintenance(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp1_get_link_encryption_status(struct mod_hdcp *hdcp, + enum mod_hdcp_encryption_status *encryption_status); +/* ddc functions */ +enum mod_hdcp_status mod_hdcp_read_bksv(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_bcaps(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_bstatus(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_r0p(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_ksvlist(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_vp(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_binfo(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_write_aksv(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_write_ainfo(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_write_an(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_rxcaps(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_rxstatus(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_ake_cert(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_h_prime(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_pairing_info(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_l_prime(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_rx_id_list(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_stream_ready(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_write_ake_init(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_write_no_stored_km(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_write_stored_km(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_write_lc_init(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_write_eks(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_write_repeater_auth_ack(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_write_stream_manage(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_write_content_type(struct mod_hdcp *hdcp); + +/* hdcp version helpers */ +static inline uint8_t is_dp_hdcp(struct mod_hdcp *hdcp) +{ + return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP || + hdcp->connection.link.mode == MOD_HDCP_MODE_DP_MST); +} + +static inline uint8_t is_dp_mst_hdcp(struct mod_hdcp *hdcp) +{ + return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP_MST); +} + +static inline uint8_t is_hdmi_dvi_sl_hdcp(struct mod_hdcp *hdcp) +{ + return (hdcp->connection.link.mode == MOD_HDCP_MODE_DEFAULT); +} + +/* hdcp state helpers */ +static inline uint8_t current_state(struct mod_hdcp *hdcp) +{ + return hdcp->state.id; +} + +static inline void set_state_id(struct mod_hdcp *hdcp, + struct mod_hdcp_output *output, uint8_t id) +{ + memset(&hdcp->state, 0, sizeof(hdcp->state)); + hdcp->state.id = id; + /* callback timer should be reset per state */ + output->callback_stop = 1; + output->watchdog_timer_stop = 1; + HDCP_NEXT_STATE_TRACE(hdcp, id, output); +} + +static inline uint8_t is_in_hdcp1_states(struct mod_hdcp *hdcp) +{ + return (current_state(hdcp) > HDCP1_STATE_START && + current_state(hdcp) <= HDCP1_STATE_END); +} + +static inline uint8_t is_in_hdcp1_dp_states(struct mod_hdcp *hdcp) +{ + return (current_state(hdcp) > HDCP1_DP_STATE_START && + current_state(hdcp) <= HDCP1_DP_STATE_END); +} + +static inline uint8_t is_hdcp1(struct mod_hdcp *hdcp) +{ + return (is_in_hdcp1_states(hdcp) || is_in_hdcp1_dp_states(hdcp)); +} + +static inline uint8_t is_in_cp_not_desired_state(struct mod_hdcp *hdcp) +{ + return current_state(hdcp) == HDCP_CP_NOT_DESIRED; +} + +static inline uint8_t is_in_initialized_state(struct mod_hdcp *hdcp) +{ + return current_state(hdcp) == HDCP_INITIALIZED; +} + +/* transition operation helpers */ +static inline void increment_stay_counter(struct mod_hdcp *hdcp) +{ + hdcp->state.stay_count++; +} + +static inline void fail_and_restart_in_ms(uint16_t time, + enum mod_hdcp_status *status, + struct mod_hdcp_output *output) +{ + output->callback_needed = 1; + output->callback_delay = time; + output->watchdog_timer_needed = 0; + output->watchdog_timer_delay = 0; + *status = MOD_HDCP_STATUS_RESET_NEEDED; +} + +static inline void callback_in_ms(uint16_t time, struct mod_hdcp_output *output) +{ + output->callback_needed = 1; + output->callback_delay = time; +} + +static inline void set_watchdog_in_ms(struct mod_hdcp *hdcp, uint16_t time, + struct mod_hdcp_output *output) +{ + output->watchdog_timer_needed = 1; + output->watchdog_timer_delay = time; +} + +/* connection topology helpers */ +static inline uint8_t is_display_active(struct mod_hdcp_display *display) +{ + return display->state >= MOD_HDCP_DISPLAY_ACTIVE; +} + +static inline uint8_t is_display_added(struct mod_hdcp_display *display) +{ + return display->state >= MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED; +} + +static inline uint8_t is_display_encryption_enabled(struct mod_hdcp_display *display) +{ + return display->state >= MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED; +} + +static inline uint8_t get_active_display_count(struct mod_hdcp *hdcp) +{ + uint8_t added_count = 0; + uint8_t i; + + for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) + if (is_display_active(&hdcp->connection.displays[i])) + added_count++; + return added_count; +} + +static inline uint8_t get_added_display_count(struct mod_hdcp *hdcp) +{ + uint8_t added_count = 0; + uint8_t i; + + for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) + if (is_display_added(&hdcp->connection.displays[i])) + added_count++; + return added_count; +} + +static inline struct mod_hdcp_display *get_first_added_display( + struct mod_hdcp *hdcp) +{ + uint8_t i; + struct mod_hdcp_display *display = NULL; + + for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) + if (is_display_added(&hdcp->connection.displays[i])) { + display = &hdcp->connection.displays[i]; + break; + } + return display; +} + +static inline struct mod_hdcp_display *get_active_display_at_index( + struct mod_hdcp *hdcp, uint8_t index) +{ + uint8_t i; + struct mod_hdcp_display *display = NULL; + + for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) + if (hdcp->connection.displays[i].index == index && + is_display_active(&hdcp->connection.displays[i])) { + display = &hdcp->connection.displays[i]; + break; + } + return display; +} + +static inline struct mod_hdcp_display *get_empty_display_container( + struct mod_hdcp *hdcp) +{ + uint8_t i; + struct mod_hdcp_display *display = NULL; + + for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) + if (!is_display_active(&hdcp->connection.displays[i])) { + display = &hdcp->connection.displays[i]; + break; + } + return display; +} + +static inline void reset_retry_counts(struct mod_hdcp *hdcp) +{ + hdcp->connection.hdcp1_retry_count = 0; +} + +#endif /* HDCP_H_ */ diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c new file mode 100644 index 000000000000..3db4a7da414f --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c @@ -0,0 +1,531 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "hdcp.h" + +static inline enum mod_hdcp_status validate_bksv(struct mod_hdcp *hdcp) +{ + uint64_t n = *(uint64_t *)hdcp->auth.msg.hdcp1.bksv; + uint8_t count = 0; + + while (n) { + count++; + n &= (n - 1); + } + return (count == 20) ? MOD_HDCP_STATUS_SUCCESS : + MOD_HDCP_STATUS_HDCP1_INVALID_BKSV; +} + +static inline enum mod_hdcp_status check_ksv_ready(struct mod_hdcp *hdcp) +{ + if (is_dp_hdcp(hdcp)) + return (hdcp->auth.msg.hdcp1.bstatus & BSTATUS_READY_MASK_DP) ? + MOD_HDCP_STATUS_SUCCESS : + MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY; + return (hdcp->auth.msg.hdcp1.bcaps & BCAPS_READY_MASK) ? + MOD_HDCP_STATUS_SUCCESS : + MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY; +} + +static inline enum mod_hdcp_status check_hdcp_capable_dp(struct mod_hdcp *hdcp) +{ + return (hdcp->auth.msg.hdcp1.bcaps & BCAPS_HDCP_CAPABLE_MASK_DP) ? + MOD_HDCP_STATUS_SUCCESS : + MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE; +} + +static inline enum mod_hdcp_status check_r0p_available_dp(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + if (is_dp_hdcp(hdcp)) { + status = (hdcp->auth.msg.hdcp1.bstatus & + BSTATUS_R0_P_AVAILABLE_MASK_DP) ? + MOD_HDCP_STATUS_SUCCESS : + MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING; + } else { + status = MOD_HDCP_STATUS_INVALID_OPERATION; + } + return status; +} + +static inline enum mod_hdcp_status check_link_integrity_dp( + struct mod_hdcp *hdcp) +{ + return (hdcp->auth.msg.hdcp1.bstatus & + BSTATUS_LINK_INTEGRITY_FAILURE_MASK_DP) ? + MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE : + MOD_HDCP_STATUS_SUCCESS; +} + +static inline enum mod_hdcp_status check_no_reauthentication_request_dp( + struct mod_hdcp *hdcp) +{ + return (hdcp->auth.msg.hdcp1.bstatus & BSTATUS_REAUTH_REQUEST_MASK_DP) ? + MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED : + MOD_HDCP_STATUS_SUCCESS; +} + +static inline enum mod_hdcp_status check_no_max_cascade(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) + status = (hdcp->auth.msg.hdcp1.binfo_dp & + BINFO_MAX_CASCADE_EXCEEDED_MASK_DP) ? + MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE : + MOD_HDCP_STATUS_SUCCESS; + else + status = (hdcp->auth.msg.hdcp1.bstatus & + BSTATUS_MAX_CASCADE_EXCEEDED_MASK) ? + MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE : + MOD_HDCP_STATUS_SUCCESS; + return status; +} + +static inline enum mod_hdcp_status check_no_max_devs(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) + status = (hdcp->auth.msg.hdcp1.binfo_dp & + BINFO_MAX_DEVS_EXCEEDED_MASK_DP) ? + MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE : + MOD_HDCP_STATUS_SUCCESS; + else + status = (hdcp->auth.msg.hdcp1.bstatus & + BSTATUS_MAX_DEVS_EXCEEDED_MASK) ? + MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE : + MOD_HDCP_STATUS_SUCCESS; + return status; +} + +static inline uint8_t get_device_count(struct mod_hdcp *hdcp) +{ + return is_dp_hdcp(hdcp) ? + (hdcp->auth.msg.hdcp1.binfo_dp & BINFO_DEVICE_COUNT_MASK_DP) : + (hdcp->auth.msg.hdcp1.bstatus & BSTATUS_DEVICE_COUNT_MASK); +} + +static inline enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp) +{ + /* device count must be greater than or equal to tracked hdcp displays */ + return (get_device_count(hdcp) < get_added_display_count(hdcp)) ? + MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE : + MOD_HDCP_STATUS_SUCCESS; +} + +static enum mod_hdcp_status wait_for_active_rx(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!mod_hdcp_execute_and_set(mod_hdcp_read_bksv, + &input->bksv_read, &status, + hdcp, "bksv_read")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_read_bcaps, + &input->bcaps_read, &status, + hdcp, "bcaps_read")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status exchange_ksvs(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!mod_hdcp_execute_and_set(mod_hdcp_add_display_topology, + &input->add_topology, &status, + hdcp, "add_topology")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_create_session, + &input->create_session, &status, + hdcp, "create_session")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_write_an, + &input->an_write, &status, + hdcp, "an_write")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_write_aksv, + &input->aksv_write, &status, + hdcp, "aksv_write")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_read_bksv, + &input->bksv_read, &status, + hdcp, "bksv_read")) + goto out; + if (!mod_hdcp_execute_and_set(validate_bksv, + &input->bksv_validation, &status, + hdcp, "bksv_validation")) + goto out; + if (hdcp->auth.msg.hdcp1.ainfo) { + if (!mod_hdcp_execute_and_set(mod_hdcp_write_ainfo, + &input->ainfo_write, &status, + hdcp, "ainfo_write")) + goto out; + } +out: + return status; +} + +static enum mod_hdcp_status computations_validate_rx_test_for_repeater( + struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!mod_hdcp_execute_and_set(mod_hdcp_read_r0p, + &input->r0p_read, &status, + hdcp, "r0p_read")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_validate_rx, + &input->rx_validation, &status, + hdcp, "rx_validation")) + goto out; + if (hdcp->connection.is_repeater) { + if (!hdcp->connection.link.adjust.hdcp1.postpone_encryption) + if (!mod_hdcp_execute_and_set( + mod_hdcp_hdcp1_enable_encryption, + &input->encryption, &status, + hdcp, "encryption")) + goto out; + } else { + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_enable_encryption, + &input->encryption, &status, + hdcp, "encryption")) + goto out; + if (is_dp_mst_hdcp(hdcp)) + if (!mod_hdcp_execute_and_set( + mod_hdcp_hdcp1_enable_dp_stream_encryption, + &input->stream_encryption_dp, &status, + hdcp, "stream_encryption_dp")) + goto out; + } +out: + return status; +} + +static enum mod_hdcp_status authenticated(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_link_maintenance, + &input->link_maintenance, &status, + hdcp, "link_maintenance")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status wait_for_ready(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK && + event_ctx->event != MOD_HDCP_EVENT_CPIRQ && + event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (is_dp_hdcp(hdcp)) { + if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus, + &input->bstatus_read, &status, + hdcp, "bstatus_read")) + goto out; + if (!mod_hdcp_execute_and_set(check_link_integrity_dp, + &input->link_integiry_check, &status, + hdcp, "link_integiry_check")) + goto out; + if (!mod_hdcp_execute_and_set(check_no_reauthentication_request_dp, + &input->reauth_request_check, &status, + hdcp, "reauth_request_check")) + goto out; + } else { + if (!mod_hdcp_execute_and_set(mod_hdcp_read_bcaps, + &input->bcaps_read, &status, + hdcp, "bcaps_read")) + goto out; + } + if (!mod_hdcp_execute_and_set(check_ksv_ready, + &input->ready_check, &status, + hdcp, "ready_check")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status read_ksv_list(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + uint8_t device_count; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (is_dp_hdcp(hdcp)) { + if (!mod_hdcp_execute_and_set(mod_hdcp_read_binfo, + &input->binfo_read_dp, &status, + hdcp, "binfo_read_dp")) + goto out; + } else { + if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus, + &input->bstatus_read, &status, + hdcp, "bstatus_read")) + goto out; + } + if (!mod_hdcp_execute_and_set(check_no_max_cascade, + &input->max_cascade_check, &status, + hdcp, "max_cascade_check")) + goto out; + if (!mod_hdcp_execute_and_set(check_no_max_devs, + &input->max_devs_check, &status, + hdcp, "max_devs_check")) + goto out; + if (!mod_hdcp_execute_and_set(check_device_count, + &input->device_count_check, &status, + hdcp, "device_count_check")) + goto out; + device_count = get_device_count(hdcp); + hdcp->auth.msg.hdcp1.ksvlist_size = device_count*5; + if (!mod_hdcp_execute_and_set(mod_hdcp_read_ksvlist, + &input->ksvlist_read, &status, + hdcp, "ksvlist_read")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_read_vp, + &input->vp_read, &status, + hdcp, "vp_read")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_validate_ksvlist_vp, + &input->ksvlist_vp_validation, &status, + hdcp, "ksvlist_vp_validation")) + goto out; + if (input->encryption != PASS) + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_enable_encryption, + &input->encryption, &status, + hdcp, "encryption")) + goto out; + if (is_dp_mst_hdcp(hdcp)) + if (!mod_hdcp_execute_and_set( + mod_hdcp_hdcp1_enable_dp_stream_encryption, + &input->stream_encryption_dp, &status, + hdcp, "stream_encryption_dp")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status determine_rx_hdcp_capable_dp(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!mod_hdcp_execute_and_set(mod_hdcp_read_bcaps, + &input->bcaps_read, &status, + hdcp, "bcaps_read")) + goto out; + if (!mod_hdcp_execute_and_set(check_hdcp_capable_dp, + &input->hdcp_capable_dp, &status, + hdcp, "hdcp_capable_dp")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status wait_for_r0_prime_dp(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CPIRQ && + event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus, + &input->bstatus_read, &status, + hdcp, "bstatus_read")) + goto out; + if (!mod_hdcp_execute_and_set(check_r0p_available_dp, + &input->r0p_available_dp, &status, + hdcp, "r0p_available_dp")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status authenticated_dp(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CPIRQ) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus, + &input->bstatus_read, &status, + hdcp, "bstatus_read")) + goto out; + if (!mod_hdcp_execute_and_set(check_link_integrity_dp, + &input->link_integiry_check, &status, + hdcp, "link_integiry_check")) + goto out; + if (!mod_hdcp_execute_and_set(check_no_reauthentication_request_dp, + &input->reauth_request_check, &status, + hdcp, "reauth_request_check")) + goto out; +out: + return status; +} + +uint8_t mod_hdcp_execute_and_set( + mod_hdcp_action func, uint8_t *flag, + enum mod_hdcp_status *status, struct mod_hdcp *hdcp, char *str) +{ + *status = func(hdcp); + if (*status == MOD_HDCP_STATUS_SUCCESS && *flag != PASS) { + HDCP_INPUT_PASS_TRACE(hdcp, str); + *flag = PASS; + } else if (*status != MOD_HDCP_STATUS_SUCCESS && *flag != FAIL) { + HDCP_INPUT_FAIL_TRACE(hdcp, str); + *flag = FAIL; + } + return (*status == MOD_HDCP_STATUS_SUCCESS); +} + +enum mod_hdcp_status mod_hdcp_hdcp1_execution(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + switch (current_state(hdcp)) { + case H1_A0_WAIT_FOR_ACTIVE_RX: + status = wait_for_active_rx(hdcp, event_ctx, input); + break; + case H1_A1_EXCHANGE_KSVS: + status = exchange_ksvs(hdcp, event_ctx, input); + break; + case H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER: + status = computations_validate_rx_test_for_repeater(hdcp, + event_ctx, input); + break; + case H1_A45_AUTHENTICATED: + status = authenticated(hdcp, event_ctx, input); + break; + case H1_A8_WAIT_FOR_READY: + status = wait_for_ready(hdcp, event_ctx, input); + break; + case H1_A9_READ_KSV_LIST: + status = read_ksv_list(hdcp, event_ctx, input); + break; + default: + status = MOD_HDCP_STATUS_INVALID_STATE; + break; + } + + return status; +} + +extern enum mod_hdcp_status mod_hdcp_hdcp1_dp_execution(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + switch (current_state(hdcp)) { + case D1_A0_DETERMINE_RX_HDCP_CAPABLE: + status = determine_rx_hdcp_capable_dp(hdcp, event_ctx, input); + break; + case D1_A1_EXCHANGE_KSVS: + status = exchange_ksvs(hdcp, event_ctx, input); + break; + case D1_A23_WAIT_FOR_R0_PRIME: + status = wait_for_r0_prime_dp(hdcp, event_ctx, input); + break; + case D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER: + status = computations_validate_rx_test_for_repeater( + hdcp, event_ctx, input); + break; + case D1_A4_AUTHENTICATED: + status = authenticated_dp(hdcp, event_ctx, input); + break; + case D1_A6_WAIT_FOR_READY: + status = wait_for_ready(hdcp, event_ctx, input); + break; + case D1_A7_READ_KSV_LIST: + status = read_ksv_list(hdcp, event_ctx, input); + break; + default: + status = MOD_HDCP_STATUS_INVALID_STATE; + break; + } + + return status; +} diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c new file mode 100644 index 000000000000..136b8011ff3f --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c @@ -0,0 +1,307 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "hdcp.h" + +enum mod_hdcp_status mod_hdcp_hdcp1_transition(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input, + struct mod_hdcp_output *output) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + struct mod_hdcp_connection *conn = &hdcp->connection; + struct mod_hdcp_link_adjustment *adjust = &hdcp->connection.link.adjust; + + switch (current_state(hdcp)) { + case H1_A0_WAIT_FOR_ACTIVE_RX: + if (input->bksv_read != PASS || input->bcaps_read != PASS) { + /* 1A-04: repeatedly attempts on port access failure */ + callback_in_ms(500, output); + increment_stay_counter(hdcp); + break; + } + callback_in_ms(0, output); + set_state_id(hdcp, output, H1_A1_EXCHANGE_KSVS); + break; + case H1_A1_EXCHANGE_KSVS: + if (input->add_topology != PASS || + input->create_session != PASS) { + /* out of sync with psp state */ + adjust->hdcp1.disable = 1; + fail_and_restart_in_ms(0, &status, output); + break; + } else if (input->an_write != PASS || + input->aksv_write != PASS || + input->bksv_read != PASS || + input->bksv_validation != PASS || + input->ainfo_write == FAIL) { + /* 1A-05: consider invalid bksv a failure */ + fail_and_restart_in_ms(0, &status, output); + break; + } + callback_in_ms(300, output); + set_state_id(hdcp, output, + H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER); + break; + case H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER: + if (input->bcaps_read != PASS || + input->r0p_read != PASS || + input->rx_validation != PASS || + (!conn->is_repeater && input->encryption != PASS)) { + /* 1A-06: consider invalid r0' a failure */ + /* 1A-08: consider bksv listed in SRM a failure */ + fail_and_restart_in_ms(0, &status, output); + break; + } + if (conn->is_repeater) { + callback_in_ms(0, output); + set_watchdog_in_ms(hdcp, 5000, output); + set_state_id(hdcp, output, H1_A8_WAIT_FOR_READY); + } else { + callback_in_ms(0, output); + set_state_id(hdcp, output, H1_A45_AUTHENTICATED); + HDCP_FULL_DDC_TRACE(hdcp); + } + break; + case H1_A45_AUTHENTICATED: + if (input->link_maintenance != PASS) { + /* 1A-07: consider invalid ri' a failure */ + /* 1A-07a: consider read ri' not returned a failure */ + fail_and_restart_in_ms(0, &status, output); + break; + } + callback_in_ms(500, output); + increment_stay_counter(hdcp); + break; + case H1_A8_WAIT_FOR_READY: + if (input->ready_check != PASS) { + if (event_ctx->event == + MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { + /* 1B-03: fail hdcp on ksv list READY timeout */ + /* prevent black screen in next attempt */ + adjust->hdcp1.postpone_encryption = 1; + fail_and_restart_in_ms(0, &status, output); + } else { + /* continue ksv list READY polling*/ + callback_in_ms(500, output); + increment_stay_counter(hdcp); + } + break; + } + callback_in_ms(0, output); + set_state_id(hdcp, output, H1_A9_READ_KSV_LIST); + break; + case H1_A9_READ_KSV_LIST: + if (input->bstatus_read != PASS || + input->max_cascade_check != PASS || + input->max_devs_check != PASS || + input->device_count_check != PASS || + input->ksvlist_read != PASS || + input->vp_read != PASS || + input->ksvlist_vp_validation != PASS || + input->encryption != PASS) { + /* 1B-06: consider MAX_CASCADE_EXCEEDED a failure */ + /* 1B-05: consider MAX_DEVS_EXCEEDED a failure */ + /* 1B-04: consider invalid v' a failure */ + fail_and_restart_in_ms(0, &status, output); + break; + } + callback_in_ms(0, output); + set_state_id(hdcp, output, H1_A45_AUTHENTICATED); + HDCP_FULL_DDC_TRACE(hdcp); + break; + default: + status = MOD_HDCP_STATUS_INVALID_STATE; + fail_and_restart_in_ms(0, &status, output); + break; + } + + return status; +} + +enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input, + struct mod_hdcp_output *output) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + struct mod_hdcp_connection *conn = &hdcp->connection; + struct mod_hdcp_link_adjustment *adjust = &hdcp->connection.link.adjust; + + switch (current_state(hdcp)) { + case D1_A0_DETERMINE_RX_HDCP_CAPABLE: + if (input->bcaps_read != PASS) { + /* 1A-04: no authentication on bcaps read failure */ + fail_and_restart_in_ms(0, &status, output); + break; + } else if (input->hdcp_capable_dp != PASS) { + adjust->hdcp1.disable = 1; + fail_and_restart_in_ms(0, &status, output); + break; + } + callback_in_ms(0, output); + set_state_id(hdcp, output, D1_A1_EXCHANGE_KSVS); + break; + case D1_A1_EXCHANGE_KSVS: + if (input->add_topology != PASS || + input->create_session != PASS) { + /* out of sync with psp state */ + adjust->hdcp1.disable = 1; + fail_and_restart_in_ms(0, &status, output); + break; + } else if (input->an_write != PASS || + input->aksv_write != PASS || + input->bksv_read != PASS || + input->bksv_validation != PASS || + input->ainfo_write == FAIL) { + /* 1A-05: consider invalid bksv a failure */ + fail_and_restart_in_ms(0, &status, output); + break; + } + set_watchdog_in_ms(hdcp, 100, output); + set_state_id(hdcp, output, D1_A23_WAIT_FOR_R0_PRIME); + break; + case D1_A23_WAIT_FOR_R0_PRIME: + if (input->bstatus_read != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } else if (input->r0p_available_dp != PASS) { + if (event_ctx->event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) + fail_and_restart_in_ms(0, &status, output); + else + increment_stay_counter(hdcp); + break; + } + callback_in_ms(0, output); + set_state_id(hdcp, output, D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER); + break; + case D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER: + if (input->r0p_read != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } else if (input->rx_validation != PASS) { + if (hdcp->state.stay_count < 2) { + /* allow 2 additional retries */ + callback_in_ms(0, output); + increment_stay_counter(hdcp); + } else { + /* + * 1A-06: consider invalid r0' a failure + * after 3 attempts. + * 1A-08: consider bksv listed in SRM a failure + */ + fail_and_restart_in_ms(0, &status, output); + } + break; + } else if ((!conn->is_repeater && input->encryption != PASS) || + (!conn->is_repeater && is_dp_mst_hdcp(hdcp) && input->stream_encryption_dp != PASS)) { + fail_and_restart_in_ms(0, &status, output); + break; + } + if (conn->is_repeater) { + set_watchdog_in_ms(hdcp, 5000, output); + set_state_id(hdcp, output, D1_A6_WAIT_FOR_READY); + } else { + set_state_id(hdcp, output, D1_A4_AUTHENTICATED); + HDCP_FULL_DDC_TRACE(hdcp); + } + break; + case D1_A4_AUTHENTICATED: + if (input->link_integiry_check != PASS || + input->reauth_request_check != PASS) { + /* 1A-07: restart hdcp on a link integrity failure */ + fail_and_restart_in_ms(0, &status, output); + break; + } + break; + case D1_A6_WAIT_FOR_READY: + if (input->link_integiry_check == FAIL || + input->reauth_request_check == FAIL) { + fail_and_restart_in_ms(0, &status, output); + break; + } else if (input->ready_check != PASS) { + if (event_ctx->event == + MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { + /* 1B-04: fail hdcp on ksv list READY timeout */ + /* prevent black screen in next attempt */ + adjust->hdcp1.postpone_encryption = 1; + fail_and_restart_in_ms(0, &status, output); + } else { + increment_stay_counter(hdcp); + } + break; + } + callback_in_ms(0, output); + set_state_id(hdcp, output, D1_A7_READ_KSV_LIST); + break; + case D1_A7_READ_KSV_LIST: + if (input->binfo_read_dp != PASS || + input->max_cascade_check != PASS || + input->max_devs_check != PASS) { + /* 1B-06: consider MAX_DEVS_EXCEEDED a failure */ + /* 1B-07: consider MAX_CASCADE_EXCEEDED a failure */ + fail_and_restart_in_ms(0, &status, output); + break; + } else if (input->device_count_check != PASS) { + /* + * some slow dongle doesn't update + * device count as soon as downstream is connected. + * give it more time to react. + */ + adjust->hdcp1.postpone_encryption = 1; + fail_and_restart_in_ms(1000, &status, output); + break; + } else if (input->ksvlist_read != PASS || + input->vp_read != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } else if (input->ksvlist_vp_validation != PASS) { + if (hdcp->state.stay_count < 2) { + /* allow 2 additional retries */ + callback_in_ms(0, output); + increment_stay_counter(hdcp); + } else { + /* + * 1B-05: consider invalid v' a failure + * after 3 attempts. + */ + fail_and_restart_in_ms(0, &status, output); + } + break; + } else if (input->encryption != PASS || + (is_dp_mst_hdcp(hdcp) && input->stream_encryption_dp != PASS)) { + fail_and_restart_in_ms(0, &status, output); + break; + } + set_state_id(hdcp, output, D1_A4_AUTHENTICATED); + HDCP_FULL_DDC_TRACE(hdcp); + break; + default: + fail_and_restart_in_ms(0, &status, output); + break; + } + + return status; +} diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c new file mode 100644 index 000000000000..e7baae059b85 --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c @@ -0,0 +1,305 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "hdcp.h" + +#define MIN(a, b) ((a) < (b) ? (a) : (b)) +#define HDCP_I2C_ADDR 0x3a /* 0x74 >> 1*/ +#define KSV_READ_SIZE 0xf /* 0x6803b - 0x6802c */ +#define HDCP_MAX_AUX_TRANSACTION_SIZE 16 + +enum mod_hdcp_ddc_message_id { + MOD_HDCP_MESSAGE_ID_INVALID = -1, + + /* HDCP 1.4 */ + + MOD_HDCP_MESSAGE_ID_READ_BKSV = 0, + MOD_HDCP_MESSAGE_ID_READ_RI_R0, + MOD_HDCP_MESSAGE_ID_WRITE_AKSV, + MOD_HDCP_MESSAGE_ID_WRITE_AINFO, + MOD_HDCP_MESSAGE_ID_WRITE_AN, + MOD_HDCP_MESSAGE_ID_READ_VH_X, + MOD_HDCP_MESSAGE_ID_READ_VH_0, + MOD_HDCP_MESSAGE_ID_READ_VH_1, + MOD_HDCP_MESSAGE_ID_READ_VH_2, + MOD_HDCP_MESSAGE_ID_READ_VH_3, + MOD_HDCP_MESSAGE_ID_READ_VH_4, + MOD_HDCP_MESSAGE_ID_READ_BCAPS, + MOD_HDCP_MESSAGE_ID_READ_BSTATUS, + MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO, + MOD_HDCP_MESSAGE_ID_READ_BINFO, + + MOD_HDCP_MESSAGE_ID_MAX +}; + +static const uint8_t hdcp_i2c_offsets[] = { + [MOD_HDCP_MESSAGE_ID_READ_BKSV] = 0x0, + [MOD_HDCP_MESSAGE_ID_READ_RI_R0] = 0x8, + [MOD_HDCP_MESSAGE_ID_WRITE_AKSV] = 0x10, + [MOD_HDCP_MESSAGE_ID_WRITE_AINFO] = 0x15, + [MOD_HDCP_MESSAGE_ID_WRITE_AN] = 0x18, + [MOD_HDCP_MESSAGE_ID_READ_VH_X] = 0x20, + [MOD_HDCP_MESSAGE_ID_READ_VH_0] = 0x20, + [MOD_HDCP_MESSAGE_ID_READ_VH_1] = 0x24, + [MOD_HDCP_MESSAGE_ID_READ_VH_2] = 0x28, + [MOD_HDCP_MESSAGE_ID_READ_VH_3] = 0x2C, + [MOD_HDCP_MESSAGE_ID_READ_VH_4] = 0x30, + [MOD_HDCP_MESSAGE_ID_READ_BCAPS] = 0x40, + [MOD_HDCP_MESSAGE_ID_READ_BSTATUS] = 0x41, + [MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO] = 0x43, + [MOD_HDCP_MESSAGE_ID_READ_BINFO] = 0xFF, +}; + +static const uint32_t hdcp_dpcd_addrs[] = { + [MOD_HDCP_MESSAGE_ID_READ_BKSV] = 0x68000, + [MOD_HDCP_MESSAGE_ID_READ_RI_R0] = 0x68005, + [MOD_HDCP_MESSAGE_ID_WRITE_AKSV] = 0x68007, + [MOD_HDCP_MESSAGE_ID_WRITE_AINFO] = 0x6803B, + [MOD_HDCP_MESSAGE_ID_WRITE_AN] = 0x6800c, + [MOD_HDCP_MESSAGE_ID_READ_VH_X] = 0x68014, + [MOD_HDCP_MESSAGE_ID_READ_VH_0] = 0x68014, + [MOD_HDCP_MESSAGE_ID_READ_VH_1] = 0x68018, + [MOD_HDCP_MESSAGE_ID_READ_VH_2] = 0x6801c, + [MOD_HDCP_MESSAGE_ID_READ_VH_3] = 0x68020, + [MOD_HDCP_MESSAGE_ID_READ_VH_4] = 0x68024, + [MOD_HDCP_MESSAGE_ID_READ_BCAPS] = 0x68028, + [MOD_HDCP_MESSAGE_ID_READ_BSTATUS] = 0x68029, + [MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO] = 0x6802c, + [MOD_HDCP_MESSAGE_ID_READ_BINFO] = 0x6802a, +}; + +static enum mod_hdcp_status read(struct mod_hdcp *hdcp, + enum mod_hdcp_ddc_message_id msg_id, + uint8_t *buf, + uint32_t buf_len) +{ + bool success = true; + uint32_t cur_size = 0; + uint32_t data_offset = 0; + + if (is_dp_hdcp(hdcp)) { + while (buf_len > 0) { + cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE); + success = hdcp->config.ddc.funcs.read_dpcd(hdcp->config.ddc.handle, + hdcp_dpcd_addrs[msg_id] + data_offset, + buf + data_offset, + cur_size); + + if (!success) + break; + + buf_len -= cur_size; + data_offset += cur_size; + } + } else { + success = hdcp->config.ddc.funcs.read_i2c( + hdcp->config.ddc.handle, + HDCP_I2C_ADDR, + hdcp_i2c_offsets[msg_id], + buf, + (uint32_t)buf_len); + } + + return success ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_DDC_FAILURE; +} + +static enum mod_hdcp_status read_repeatedly(struct mod_hdcp *hdcp, + enum mod_hdcp_ddc_message_id msg_id, + uint8_t *buf, + uint32_t buf_len, + uint8_t read_size) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_DDC_FAILURE; + uint32_t cur_size = 0; + uint32_t data_offset = 0; + + while (buf_len > 0) { + cur_size = MIN(buf_len, read_size); + status = read(hdcp, msg_id, buf + data_offset, cur_size); + + if (status != MOD_HDCP_STATUS_SUCCESS) + break; + + buf_len -= cur_size; + data_offset += cur_size; + } + + return status; +} + +static enum mod_hdcp_status write(struct mod_hdcp *hdcp, + enum mod_hdcp_ddc_message_id msg_id, + uint8_t *buf, + uint32_t buf_len) +{ + bool success = true; + uint32_t cur_size = 0; + uint32_t data_offset = 0; + + if (is_dp_hdcp(hdcp)) { + while (buf_len > 0) { + cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE); + success = hdcp->config.ddc.funcs.write_dpcd( + hdcp->config.ddc.handle, + hdcp_dpcd_addrs[msg_id] + data_offset, + buf + data_offset, + cur_size); + + if (!success) + break; + + buf_len -= cur_size; + data_offset += cur_size; + } + } else { + hdcp->buf[0] = hdcp_i2c_offsets[msg_id]; + memmove(&hdcp->buf[1], buf, buf_len); + success = hdcp->config.ddc.funcs.write_i2c( + hdcp->config.ddc.handle, + HDCP_I2C_ADDR, + hdcp->buf, + (uint32_t)(buf_len+1)); + } + + return success ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_DDC_FAILURE; +} + +enum mod_hdcp_status mod_hdcp_read_bksv(struct mod_hdcp *hdcp) +{ + return read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BKSV, + hdcp->auth.msg.hdcp1.bksv, + sizeof(hdcp->auth.msg.hdcp1.bksv)); +} + +enum mod_hdcp_status mod_hdcp_read_bcaps(struct mod_hdcp *hdcp) +{ + return read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BCAPS, + &hdcp->auth.msg.hdcp1.bcaps, + sizeof(hdcp->auth.msg.hdcp1.bcaps)); +} + +enum mod_hdcp_status mod_hdcp_read_bstatus(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BSTATUS, + (uint8_t *)&hdcp->auth.msg.hdcp1.bstatus, + 1); + else + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BSTATUS, + (uint8_t *)&hdcp->auth.msg.hdcp1.bstatus, + sizeof(hdcp->auth.msg.hdcp1.bstatus)); + return status; +} + +enum mod_hdcp_status mod_hdcp_read_r0p(struct mod_hdcp *hdcp) +{ + return read(hdcp, MOD_HDCP_MESSAGE_ID_READ_RI_R0, + (uint8_t *)&hdcp->auth.msg.hdcp1.r0p, + sizeof(hdcp->auth.msg.hdcp1.r0p)); +} + +/* special case, reading repeatedly at the same address, don't use read() */ +enum mod_hdcp_status mod_hdcp_read_ksvlist(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) + status = read_repeatedly(hdcp, MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO, + hdcp->auth.msg.hdcp1.ksvlist, + hdcp->auth.msg.hdcp1.ksvlist_size, + KSV_READ_SIZE); + else + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO, + (uint8_t *)&hdcp->auth.msg.hdcp1.ksvlist, + hdcp->auth.msg.hdcp1.ksvlist_size); + return status; +} + +enum mod_hdcp_status mod_hdcp_read_vp(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_0, + &hdcp->auth.msg.hdcp1.vp[0], 4); + if (status != MOD_HDCP_STATUS_SUCCESS) + goto out; + + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_1, + &hdcp->auth.msg.hdcp1.vp[4], 4); + if (status != MOD_HDCP_STATUS_SUCCESS) + goto out; + + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_2, + &hdcp->auth.msg.hdcp1.vp[8], 4); + if (status != MOD_HDCP_STATUS_SUCCESS) + goto out; + + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_3, + &hdcp->auth.msg.hdcp1.vp[12], 4); + if (status != MOD_HDCP_STATUS_SUCCESS) + goto out; + + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_4, + &hdcp->auth.msg.hdcp1.vp[16], 4); +out: + return status; +} + +enum mod_hdcp_status mod_hdcp_read_binfo(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BINFO, + (uint8_t *)&hdcp->auth.msg.hdcp1.binfo_dp, + sizeof(hdcp->auth.msg.hdcp1.binfo_dp)); + else + status = MOD_HDCP_STATUS_INVALID_OPERATION; + + return status; +} + +enum mod_hdcp_status mod_hdcp_write_aksv(struct mod_hdcp *hdcp) +{ + return write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKSV, + hdcp->auth.msg.hdcp1.aksv, + sizeof(hdcp->auth.msg.hdcp1.aksv)); +} + +enum mod_hdcp_status mod_hdcp_write_ainfo(struct mod_hdcp *hdcp) +{ + return write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AINFO, + &hdcp->auth.msg.hdcp1.ainfo, + sizeof(hdcp->auth.msg.hdcp1.ainfo)); +} + +enum mod_hdcp_status mod_hdcp_write_an(struct mod_hdcp *hdcp) +{ + return write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AN, + hdcp->auth.msg.hdcp1.an, + sizeof(hdcp->auth.msg.hdcp1.an)); +} diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c new file mode 100644 index 000000000000..3982ced5f969 --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c @@ -0,0 +1,163 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#include "hdcp.h" + +void mod_hdcp_dump_binary_message(uint8_t *msg, uint32_t msg_size, + uint8_t *buf, uint32_t buf_size) +{ + const uint8_t bytes_per_line = 16, + byte_size = 3, + newline_size = 1, + terminator_size = 1; + uint32_t line_count = msg_size / bytes_per_line, + trailing_bytes = msg_size % bytes_per_line; + uint32_t target_size = (byte_size * bytes_per_line + newline_size) * line_count + + byte_size * trailing_bytes + newline_size + terminator_size; + uint32_t buf_pos = 0; + uint32_t i = 0; + + if (buf_size >= target_size) { + for (i = 0; i < msg_size; i++) { + if (i % bytes_per_line == 0) + buf[buf_pos++] = '\n'; + sprintf(&buf[buf_pos], "%02X ", msg[i]); + buf_pos += byte_size; + } + buf[buf_pos++] = '\0'; + } +} + +char *mod_hdcp_status_to_str(int32_t status) +{ + switch (status) { + case MOD_HDCP_STATUS_SUCCESS: + return "MOD_HDCP_STATUS_SUCCESS"; + case MOD_HDCP_STATUS_FAILURE: + return "MOD_HDCP_STATUS_FAILURE"; + case MOD_HDCP_STATUS_RESET_NEEDED: + return "MOD_HDCP_STATUS_RESET_NEEDED"; + case MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND: + return "MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND"; + case MOD_HDCP_STATUS_DISPLAY_NOT_FOUND: + return "MOD_HDCP_STATUS_DISPLAY_NOT_FOUND"; + case MOD_HDCP_STATUS_INVALID_STATE: + return "MOD_HDCP_STATUS_INVALID_STATE"; + case MOD_HDCP_STATUS_NOT_IMPLEMENTED: + return "MOD_HDCP_STATUS_NOT_IMPLEMENTED"; + case MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE: + return "MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE"; + case MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE: + return "MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE"; + case MOD_HDCP_STATUS_CREATE_PSP_SERVICE_FAILURE: + return "MOD_HDCP_STATUS_CREATE_PSP_SERVICE_FAILURE"; + case MOD_HDCP_STATUS_DESTROY_PSP_SERVICE_FAILURE: + return "MOD_HDCP_STATUS_DESTROY_PSP_SERVICE_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE: + return "MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE: + return "MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_VALIDATE_ENCRYPTION_FAILURE: + return "MOD_HDCP_STATUS_HDCP1_VALIDATE_ENCRYPTION_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_NOT_HDCP_REPEATER: + return "MOD_HDCP_STATUS_HDCP1_NOT_HDCP_REPEATER"; + case MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE: + return "MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE"; + case MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING: + return "MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING"; + case MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE: + return "MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY: + return "MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY"; + case MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE: + return "MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION: + return "MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION"; + case MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE: + return "MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE: + return "MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE: + return "MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE: + return "MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE: + return "MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED: + return "MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED"; + case MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE: + return "MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_INVALID_BKSV: + return "MOD_HDCP_STATUS_HDCP1_INVALID_BKSV"; + case MOD_HDCP_STATUS_DDC_FAILURE: + return "MOD_HDCP_STATUS_DDC_FAILURE"; + case MOD_HDCP_STATUS_INVALID_OPERATION: + return "MOD_HDCP_STATUS_INVALID_OPERATION"; + default: + return "MOD_HDCP_STATUS_UNKNOWN"; + } +} + +char *mod_hdcp_state_id_to_str(int32_t id) +{ + switch (id) { + case HDCP_UNINITIALIZED: + return "HDCP_UNINITIALIZED"; + case HDCP_INITIALIZED: + return "HDCP_INITIALIZED"; + case HDCP_CP_NOT_DESIRED: + return "HDCP_CP_NOT_DESIRED"; + case H1_A0_WAIT_FOR_ACTIVE_RX: + return "H1_A0_WAIT_FOR_ACTIVE_RX"; + case H1_A1_EXCHANGE_KSVS: + return "H1_A1_EXCHANGE_KSVS"; + case H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER: + return "H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER"; + case H1_A45_AUTHENTICATED: + return "H1_A45_AUTHENTICATED"; + case H1_A8_WAIT_FOR_READY: + return "H1_A8_WAIT_FOR_READY"; + case H1_A9_READ_KSV_LIST: + return "H1_A9_READ_KSV_LIST"; + case D1_A0_DETERMINE_RX_HDCP_CAPABLE: + return "D1_A0_DETERMINE_RX_HDCP_CAPABLE"; + case D1_A1_EXCHANGE_KSVS: + return "D1_A1_EXCHANGE_KSVS"; + case D1_A23_WAIT_FOR_R0_PRIME: + return "D1_A23_WAIT_FOR_R0_PRIME"; + case D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER: + return "D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER"; + case D1_A4_AUTHENTICATED: + return "D1_A4_AUTHENTICATED"; + case D1_A6_WAIT_FOR_READY: + return "D1_A6_WAIT_FOR_READY"; + case D1_A7_READ_KSV_LIST: + return "D1_A7_READ_KSV_LIST"; + default: + return "UNKNOWN_STATE_ID"; + }; +} + diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h new file mode 100644 index 000000000000..2fd0e0a893ef --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h @@ -0,0 +1,139 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef MOD_HDCP_LOG_H_ +#define MOD_HDCP_LOG_H_ + +#ifdef CONFIG_DRM_AMD_DC_HDCP +#define HDCP_LOG_ERR(hdcp, ...) DRM_ERROR(__VA_ARGS__) +#define HDCP_LOG_VER(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__) +#define HDCP_LOG_FSM(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__) +#define HDCP_LOG_TOP(hdcp, ...) pr_debug("[HDCP_TOP]:"__VA_ARGS__) +#define HDCP_LOG_DDC(hdcp, ...) pr_debug("[HDCP_DDC]:"__VA_ARGS__) +#endif + +/* default logs */ +#define HDCP_ERROR_TRACE(hdcp, status) \ + HDCP_LOG_ERR(hdcp, \ + "[Link %d] ERROR %s IN STATE %s", \ + hdcp->config.index, \ + mod_hdcp_status_to_str(status), \ + mod_hdcp_state_id_to_str(hdcp->state.id)) +#define HDCP_HDCP1_ENABLED_TRACE(hdcp, displayIndex) \ + HDCP_LOG_VER(hdcp, \ + "[Link %d] HDCP 1.4 enabled on display %d", \ + hdcp->config.index, displayIndex) +/* state machine logs */ +#define HDCP_REMOVE_DISPLAY_TRACE(hdcp, displayIndex) \ + HDCP_LOG_FSM(hdcp, \ + "[Link %d] HDCP_REMOVE_DISPLAY index %d", \ + hdcp->config.index, displayIndex) +#define HDCP_INPUT_PASS_TRACE(hdcp, str) \ + HDCP_LOG_FSM(hdcp, \ + "[Link %d]\tPASS %s", \ + hdcp->config.index, str) +#define HDCP_INPUT_FAIL_TRACE(hdcp, str) \ + HDCP_LOG_FSM(hdcp, \ + "[Link %d]\tFAIL %s", \ + hdcp->config.index, str) +#define HDCP_NEXT_STATE_TRACE(hdcp, id, output) do { \ + if (output->watchdog_timer_needed) \ + HDCP_LOG_FSM(hdcp, \ + "[Link %d] > %s with %d ms watchdog", \ + hdcp->config.index, \ + mod_hdcp_state_id_to_str(id), output->watchdog_timer_delay); \ + else \ + HDCP_LOG_FSM(hdcp, \ + "[Link %d] > %s", hdcp->config.index, \ + mod_hdcp_state_id_to_str(id)); \ +} while (0) +#define HDCP_TIMEOUT_TRACE(hdcp) \ + HDCP_LOG_FSM(hdcp, "[Link %d] --> TIMEOUT", hdcp->config.index) +#define HDCP_CPIRQ_TRACE(hdcp) \ + HDCP_LOG_FSM(hdcp, "[Link %d] --> CPIRQ", hdcp->config.index) +#define HDCP_EVENT_TRACE(hdcp, event) \ + if (event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) \ + HDCP_TIMEOUT_TRACE(hdcp); \ + else if (event == MOD_HDCP_EVENT_CPIRQ) \ + HDCP_CPIRQ_TRACE(hdcp) +/* TODO: find some way to tell if logging is off to save time */ +#define HDCP_DDC_READ_TRACE(hdcp, msg_name, msg, msg_size) do { \ + mod_hdcp_dump_binary_message(msg, msg_size, hdcp->buf, \ + sizeof(hdcp->buf)); \ + HDCP_LOG_DDC(hdcp, "[Link %d] Read %s%s", hdcp->config.index, \ + msg_name, hdcp->buf); \ +} while (0) +#define HDCP_DDC_WRITE_TRACE(hdcp, msg_name, msg, msg_size) do { \ + mod_hdcp_dump_binary_message(msg, msg_size, hdcp->buf, \ + sizeof(hdcp->buf)); \ + HDCP_LOG_DDC(hdcp, "[Link %d] Write %s%s", \ + hdcp->config.index, msg_name,\ + hdcp->buf); \ +} while (0) +#define HDCP_FULL_DDC_TRACE(hdcp) do { \ + HDCP_DDC_READ_TRACE(hdcp, "BKSV", hdcp->auth.msg.hdcp1.bksv, \ + sizeof(hdcp->auth.msg.hdcp1.bksv)); \ + HDCP_DDC_READ_TRACE(hdcp, "BCAPS", &hdcp->auth.msg.hdcp1.bcaps, \ + sizeof(hdcp->auth.msg.hdcp1.bcaps)); \ + HDCP_DDC_WRITE_TRACE(hdcp, "AN", hdcp->auth.msg.hdcp1.an, \ + sizeof(hdcp->auth.msg.hdcp1.an)); \ + HDCP_DDC_WRITE_TRACE(hdcp, "AKSV", hdcp->auth.msg.hdcp1.aksv, \ + sizeof(hdcp->auth.msg.hdcp1.aksv)); \ + HDCP_DDC_WRITE_TRACE(hdcp, "AINFO", &hdcp->auth.msg.hdcp1.ainfo, \ + sizeof(hdcp->auth.msg.hdcp1.ainfo)); \ + HDCP_DDC_READ_TRACE(hdcp, "RI' / R0'", \ + (uint8_t *)&hdcp->auth.msg.hdcp1.r0p, \ + sizeof(hdcp->auth.msg.hdcp1.r0p)); \ + HDCP_DDC_READ_TRACE(hdcp, "BINFO", \ + (uint8_t *)&hdcp->auth.msg.hdcp1.binfo_dp, \ + sizeof(hdcp->auth.msg.hdcp1.binfo_dp)); \ + HDCP_DDC_READ_TRACE(hdcp, "KSVLIST", hdcp->auth.msg.hdcp1.ksvlist, \ + hdcp->auth.msg.hdcp1.ksvlist_size); \ + HDCP_DDC_READ_TRACE(hdcp, "V'", hdcp->auth.msg.hdcp1.vp, \ + sizeof(hdcp->auth.msg.hdcp1.vp)); \ +} while (0) +#define HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, i) \ + HDCP_LOG_TOP(hdcp, "[Link %d]\tadd display %d", \ + hdcp->config.index, i) +#define HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, i) \ + HDCP_LOG_TOP(hdcp, "[Link %d]\tremove display %d", \ + hdcp->config.index, i) +#define HDCP_TOP_HDCP1_DESTROY_SESSION_TRACE(hdcp) \ + HDCP_LOG_TOP(hdcp, "[Link %d]\tdestroy hdcp1 session", \ + hdcp->config.index) +#define HDCP_TOP_RESET_AUTH_TRACE(hdcp) \ + HDCP_LOG_TOP(hdcp, "[Link %d]\treset authentication", hdcp->config.index) +#define HDCP_TOP_RESET_CONN_TRACE(hdcp) \ + HDCP_LOG_TOP(hdcp, "[Link %d]\treset connection", hdcp->config.index) +#define HDCP_TOP_INTERFACE_TRACE(hdcp) do { \ + HDCP_LOG_TOP(hdcp, "\n"); \ + HDCP_LOG_TOP(hdcp, "[Link %d] %s", hdcp->config.index, __func__); \ +} while (0) +#define HDCP_TOP_INTERFACE_TRACE_WITH_INDEX(hdcp, i) do { \ + HDCP_LOG_TOP(hdcp, "\n"); \ + HDCP_LOG_TOP(hdcp, "[Link %d] %s display %d", hdcp->config.index, __func__, i); \ +} while (0) + +#endif // MOD_HDCP_LOG_H_ diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c new file mode 100644 index 000000000000..646d909bbc37 --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c @@ -0,0 +1,328 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#define MAX_NUM_DISPLAYS 24 + + +#include "hdcp.h" + +#include "amdgpu.h" +#include "hdcp_psp.h" + +enum mod_hdcp_status mod_hdcp_remove_display_topology(struct mod_hdcp *hdcp) +{ + + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_dtm_shared_memory *dtm_cmd; + struct mod_hdcp_display *display = NULL; + uint8_t i; + + dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf; + + for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { + if (hdcp->connection.displays[i].state == MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED) { + + memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory)); + + display = &hdcp->connection.displays[i]; + + dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2; + dtm_cmd->dtm_in_message.topology_update_v2.display_handle = display->index; + dtm_cmd->dtm_in_message.topology_update_v2.is_active = 0; + dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE; + + psp_dtm_invoke(psp, dtm_cmd->cmd_id); + + if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) + return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE; + + display->state = MOD_HDCP_DISPLAY_ACTIVE; + HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, display->index); + } + } + + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_add_display_topology(struct mod_hdcp *hdcp) +{ + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_dtm_shared_memory *dtm_cmd; + struct mod_hdcp_display *display = NULL; + struct mod_hdcp_link *link = &hdcp->connection.link; + uint8_t i; + + if (!psp->dtm_context.dtm_initialized) { + DRM_ERROR("Failed to add display topology, DTM TA is not initialized."); + return MOD_HDCP_STATUS_FAILURE; + } + + dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf; + + for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { + if (hdcp->connection.displays[i].state == MOD_HDCP_DISPLAY_ACTIVE) { + display = &hdcp->connection.displays[i]; + + memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory)); + + dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2; + dtm_cmd->dtm_in_message.topology_update_v2.display_handle = display->index; + dtm_cmd->dtm_in_message.topology_update_v2.is_active = 1; + dtm_cmd->dtm_in_message.topology_update_v2.controller = display->controller; + dtm_cmd->dtm_in_message.topology_update_v2.ddc_line = link->ddc_line; + dtm_cmd->dtm_in_message.topology_update_v2.dig_be = link->dig_be; + dtm_cmd->dtm_in_message.topology_update_v2.dig_fe = display->dig_fe; + dtm_cmd->dtm_in_message.topology_update_v2.dp_mst_vcid = display->vc_id; + dtm_cmd->dtm_in_message.topology_update_v2.max_hdcp_supported_version = + TA_DTM_HDCP_VERSION_MAX_SUPPORTED__1_x; + dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE; + + psp_dtm_invoke(psp, dtm_cmd->cmd_id); + + if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) + return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE; + + display->state = MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED; + HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, display->index); + } + } + + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp) +{ + + struct psp_context *psp = hdcp->config.psp.handle; + struct mod_hdcp_display *display = get_first_added_display(hdcp); + struct ta_hdcp_shared_memory *hdcp_cmd; + + if (!psp->hdcp_context.hdcp_initialized) { + DRM_ERROR("Failed to create hdcp session. HDCP TA is not initialized."); + return MOD_HDCP_STATUS_FAILURE; + } + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + hdcp_cmd->in_msg.hdcp1_create_session.display_handle = display->index; + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_CREATE_SESSION; + + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE; + + hdcp->auth.id = hdcp_cmd->out_msg.hdcp1_create_session.session_handle; + hdcp->auth.msg.hdcp1.ainfo = hdcp_cmd->out_msg.hdcp1_create_session.ainfo_primary; + memcpy(hdcp->auth.msg.hdcp1.aksv, hdcp_cmd->out_msg.hdcp1_create_session.aksv_primary, + sizeof(hdcp->auth.msg.hdcp1.aksv)); + memcpy(hdcp->auth.msg.hdcp1.an, hdcp_cmd->out_msg.hdcp1_create_session.an_primary, + sizeof(hdcp->auth.msg.hdcp1.an)); + + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp) +{ + + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + hdcp_cmd->in_msg.hdcp1_destroy_session.session_handle = hdcp->auth.id; + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_DESTROY_SESSION; + + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE; + + HDCP_TOP_HDCP1_DESTROY_SESSION_TRACE(hdcp); + + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp) +{ + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + hdcp_cmd->in_msg.hdcp1_first_part_authentication.session_handle = hdcp->auth.id; + + memcpy(hdcp_cmd->in_msg.hdcp1_first_part_authentication.bksv_primary, hdcp->auth.msg.hdcp1.bksv, + TA_HDCP__HDCP1_KSV_SIZE); + + hdcp_cmd->in_msg.hdcp1_first_part_authentication.r0_prime_primary = hdcp->auth.msg.hdcp1.r0p; + hdcp_cmd->in_msg.hdcp1_first_part_authentication.bcaps = hdcp->auth.msg.hdcp1.bcaps; + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_FIRST_PART_AUTHENTICATION; + + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE; + + if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status == + TA_HDCP_AUTHENTICATION_STATUS__HDCP1_FIRST_PART_COMPLETE) { + /* needs second part of authentication */ + hdcp->connection.is_repeater = 1; + } else if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status == + TA_HDCP_AUTHENTICATION_STATUS__HDCP1_AUTHENTICATED) { + hdcp->connection.is_repeater = 0; + } else + return MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE; + + + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp) +{ + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + struct mod_hdcp_display *display = get_first_added_display(hdcp); + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + hdcp_cmd->in_msg.hdcp1_enable_encryption.session_handle = hdcp->auth.id; + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_ENABLE_ENCRYPTION; + + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION; + + if (!is_dp_mst_hdcp(hdcp)) { + display->state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED; + HDCP_HDCP1_ENABLED_TRACE(hdcp, display->index); + } + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp) +{ + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + hdcp_cmd->in_msg.hdcp1_second_part_authentication.session_handle = hdcp->auth.id; + + hdcp_cmd->in_msg.hdcp1_second_part_authentication.ksv_list_size = hdcp->auth.msg.hdcp1.ksvlist_size; + memcpy(hdcp_cmd->in_msg.hdcp1_second_part_authentication.ksv_list, hdcp->auth.msg.hdcp1.ksvlist, + hdcp->auth.msg.hdcp1.ksvlist_size); + + memcpy(hdcp_cmd->in_msg.hdcp1_second_part_authentication.v_prime, hdcp->auth.msg.hdcp1.vp, + sizeof(hdcp->auth.msg.hdcp1.vp)); + + hdcp_cmd->in_msg.hdcp1_second_part_authentication.bstatus_binfo = + is_dp_hdcp(hdcp) ? hdcp->auth.msg.hdcp1.binfo_dp : hdcp->auth.msg.hdcp1.bstatus; + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_SECOND_PART_AUTHENTICATION; + + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE; + + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp *hdcp) +{ + + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + int i = 0; + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + + for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { + + if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED || + hdcp->connection.displays[i].adjust.disable) + continue; + + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + hdcp_cmd->in_msg.hdcp1_enable_dp_stream_encryption.session_handle = hdcp->auth.id; + hdcp_cmd->in_msg.hdcp1_enable_dp_stream_encryption.display_handle = hdcp->connection.displays[i].index; + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_ENABLE_DP_STREAM_ENCRYPTION; + + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE; + + hdcp->connection.displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED; + HDCP_HDCP1_ENABLED_TRACE(hdcp, hdcp->connection.displays[i].index); + } + + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp1_link_maintenance(struct mod_hdcp *hdcp) +{ + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + hdcp_cmd->in_msg.hdcp1_get_encryption_status.session_handle = hdcp->auth.id; + + hdcp_cmd->out_msg.hdcp1_get_encryption_status.protection_level = 0; + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_GET_ENCRYPTION_STATUS; + + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE; + + return (hdcp_cmd->out_msg.hdcp1_get_encryption_status.protection_level == 1) + ? MOD_HDCP_STATUS_SUCCESS + : MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE; +} + +enum mod_hdcp_status mod_hdcp_hdcp1_get_link_encryption_status(struct mod_hdcp *hdcp, + enum mod_hdcp_encryption_status *encryption_status) +{ + *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + + if (mod_hdcp_hdcp1_link_maintenance(hdcp) != MOD_HDCP_STATUS_SUCCESS) + return MOD_HDCP_STATUS_FAILURE; + + *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP1_ON; + + return MOD_HDCP_STATUS_SUCCESS; +} + diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h new file mode 100644 index 000000000000..986fc07ea9ea --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h @@ -0,0 +1,272 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef MODULES_HDCP_HDCP_PSP_H_ +#define MODULES_HDCP_HDCP_PSP_H_ + +/* + * NOTE: These parameters are a one-to-one copy of the + * parameters required by PSP + */ +enum bgd_security_hdcp_encryption_level { + HDCP_ENCRYPTION_LEVEL__INVALID = 0, + HDCP_ENCRYPTION_LEVEL__OFF, + HDCP_ENCRYPTION_LEVEL__ON +}; + +enum ta_dtm_command { + TA_DTM_COMMAND__UNUSED_1 = 1, + TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2, + TA_DTM_COMMAND__TOPOLOGY_ASSR_ENABLE +}; + +/* DTM related enumerations */ +/**********************************************************/ + +enum ta_dtm_status { + TA_DTM_STATUS__SUCCESS = 0x00, + TA_DTM_STATUS__GENERIC_FAILURE = 0x01, + TA_DTM_STATUS__INVALID_PARAMETER = 0x02, + TA_DTM_STATUS__NULL_POINTER = 0x3 +}; + +/* input/output structures for DTM commands */ +/**********************************************************/ +/** + * Input structures + */ +enum ta_dtm_hdcp_version_max_supported { + TA_DTM_HDCP_VERSION_MAX_SUPPORTED__NONE = 0, + TA_DTM_HDCP_VERSION_MAX_SUPPORTED__1_x = 10, + TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_0 = 20, + TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_1 = 21, + TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_2 = 22, + TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_3 = 23 +}; + +struct ta_dtm_topology_update_input_v2 { + /* display handle is unique across the driver and is used to identify a display */ + /* for all security interfaces which reference displays such as HDCP */ + uint32_t display_handle; + uint32_t is_active; + uint32_t is_miracast; + uint32_t controller; + uint32_t ddc_line; + uint32_t dig_be; + uint32_t dig_fe; + uint32_t dp_mst_vcid; + uint32_t is_assr; + uint32_t max_hdcp_supported_version; +}; + +struct ta_dtm_topology_assr_enable { + uint32_t display_topology_dig_be_index; +}; + +/** + * Output structures + */ + +/* No output structures yet */ + +union ta_dtm_cmd_input { + struct ta_dtm_topology_update_input_v2 topology_update_v2; + struct ta_dtm_topology_assr_enable topology_assr_enable; +}; + +union ta_dtm_cmd_output { + uint32_t reserved; +}; + +struct ta_dtm_shared_memory { + uint32_t cmd_id; + uint32_t resp_id; + enum ta_dtm_status dtm_status; + uint32_t reserved; + union ta_dtm_cmd_input dtm_in_message; + union ta_dtm_cmd_output dtm_out_message; +}; + +int psp_cmd_submit_buf(struct psp_context *psp, struct amdgpu_firmware_info *ucode, struct psp_gfx_cmd_resp *cmd, + uint64_t fence_mc_addr); + +enum ta_hdcp_command { + TA_HDCP_COMMAND__INITIALIZE, + TA_HDCP_COMMAND__HDCP1_CREATE_SESSION, + TA_HDCP_COMMAND__HDCP1_DESTROY_SESSION, + TA_HDCP_COMMAND__HDCP1_FIRST_PART_AUTHENTICATION, + TA_HDCP_COMMAND__HDCP1_SECOND_PART_AUTHENTICATION, + TA_HDCP_COMMAND__HDCP1_ENABLE_ENCRYPTION, + TA_HDCP_COMMAND__HDCP1_ENABLE_DP_STREAM_ENCRYPTION, + TA_HDCP_COMMAND__HDCP1_GET_ENCRYPTION_STATUS, +}; + + +/* HDCP related enumerations */ +/**********************************************************/ +#define TA_HDCP__INVALID_SESSION 0xFFFF +#define TA_HDCP__HDCP1_AN_SIZE 8 +#define TA_HDCP__HDCP1_KSV_SIZE 5 +#define TA_HDCP__HDCP1_KSV_LIST_MAX_ENTRIES 127 +#define TA_HDCP__HDCP1_V_PRIME_SIZE 20 + +enum ta_hdcp_status { + TA_HDCP_STATUS__SUCCESS = 0x00, + TA_HDCP_STATUS__GENERIC_FAILURE = 0x01, + TA_HDCP_STATUS__NULL_POINTER = 0x02, + TA_HDCP_STATUS__FAILED_ALLOCATING_SESSION = 0x03, + TA_HDCP_STATUS__FAILED_SETUP_TX = 0x04, + TA_HDCP_STATUS__INVALID_PARAMETER = 0x05, + TA_HDCP_STATUS__VHX_ERROR = 0x06, + TA_HDCP_STATUS__SESSION_NOT_CLOSED_PROPERLY = 0x07, + TA_HDCP_STATUS__SRM_FAILURE = 0x08, + TA_HDCP_STATUS__MST_AUTHENTICATED_ALREADY_STARTED = 0x09, + TA_HDCP_STATUS__AKE_SEND_CERT_FAILURE = 0x0A, + TA_HDCP_STATUS__AKE_NO_STORED_KM_FAILURE = 0x0B, + TA_HDCP_STATUS__AKE_SEND_HPRIME_FAILURE = 0x0C, + TA_HDCP_STATUS__LC_SEND_LPRIME_FAILURE = 0x0D, + TA_HDCP_STATUS__SKE_SEND_EKS_FAILURE = 0x0E, + TA_HDCP_STATUS__REPAUTH_SEND_RXIDLIST_FAILURE = 0x0F, + TA_HDCP_STATUS__REPAUTH_STREAM_READY_FAILURE = 0x10, + TA_HDCP_STATUS__ASD_GENERIC_FAILURE = 0x11, + TA_HDCP_STATUS__UNWRAP_SECRET_FAILURE = 0x12, + TA_HDCP_STATUS__ENABLE_ENCR_FAILURE = 0x13, + TA_HDCP_STATUS__DISABLE_ENCR_FAILURE = 0x14, + TA_HDCP_STATUS__NOT_ENOUGH_MEMORY_FAILURE = 0x15, + TA_HDCP_STATUS__UNKNOWN_MESSAGE = 0x16, + TA_HDCP_STATUS__TOO_MANY_STREAM = 0x17 +}; + +enum ta_hdcp_authentication_status { + TA_HDCP_AUTHENTICATION_STATUS__NOT_STARTED = 0x00, + TA_HDCP_AUTHENTICATION_STATUS__HDCP1_FIRST_PART_FAILED = 0x01, + TA_HDCP_AUTHENTICATION_STATUS__HDCP1_FIRST_PART_COMPLETE = 0x02, + TA_HDCP_AUTHENTICATION_STATUS__HDCP1_SECOND_PART_FAILED = 0x03, + TA_HDCP_AUTHENTICATION_STATUS__HDCP1_AUTHENTICATED = 0x04, + TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_VALIDATION_FAILED = 0x09 +}; + + +/* input/output structures for HDCP commands */ +/**********************************************************/ +struct ta_hdcp_cmd_hdcp1_create_session_input { + uint8_t display_handle; +}; + +struct ta_hdcp_cmd_hdcp1_create_session_output { + uint32_t session_handle; + uint8_t an_primary[TA_HDCP__HDCP1_AN_SIZE]; + uint8_t aksv_primary[TA_HDCP__HDCP1_KSV_SIZE]; + uint8_t ainfo_primary; + uint8_t an_secondary[TA_HDCP__HDCP1_AN_SIZE]; + uint8_t aksv_secondary[TA_HDCP__HDCP1_KSV_SIZE]; + uint8_t ainfo_secondary; +}; + +struct ta_hdcp_cmd_hdcp1_destroy_session_input { + uint32_t session_handle; +}; + +struct ta_hdcp_cmd_hdcp1_first_part_authentication_input { + uint32_t session_handle; + uint8_t bksv_primary[TA_HDCP__HDCP1_KSV_SIZE]; + uint8_t bksv_secondary[TA_HDCP__HDCP1_KSV_SIZE]; + uint8_t bcaps; + uint16_t r0_prime_primary; + uint16_t r0_prime_secondary; +}; + +struct ta_hdcp_cmd_hdcp1_first_part_authentication_output { + enum ta_hdcp_authentication_status authentication_status; +}; + +struct ta_hdcp_cmd_hdcp1_second_part_authentication_input { + uint32_t session_handle; + uint16_t bstatus_binfo; + uint8_t ksv_list[TA_HDCP__HDCP1_KSV_LIST_MAX_ENTRIES][TA_HDCP__HDCP1_KSV_SIZE]; + uint32_t ksv_list_size; + uint8_t pj_prime; + uint8_t v_prime[TA_HDCP__HDCP1_V_PRIME_SIZE]; +}; + +struct ta_hdcp_cmd_hdcp1_second_part_authentication_output { + enum ta_hdcp_authentication_status authentication_status; +}; + +struct ta_hdcp_cmd_hdcp1_enable_encryption_input { + uint32_t session_handle; +}; + +struct ta_hdcp_cmd_hdcp1_enable_dp_stream_encryption_input { + uint32_t session_handle; + uint32_t display_handle; +}; + +struct ta_hdcp_cmd_hdcp1_get_encryption_status_input { + uint32_t session_handle; +}; + +struct ta_hdcp_cmd_hdcp1_get_encryption_status_output { + uint32_t protection_level; +}; + +/**********************************************************/ +/* Common input structure for HDCP callbacks */ +union ta_hdcp_cmd_input { + struct ta_hdcp_cmd_hdcp1_create_session_input hdcp1_create_session; + struct ta_hdcp_cmd_hdcp1_destroy_session_input hdcp1_destroy_session; + struct ta_hdcp_cmd_hdcp1_first_part_authentication_input hdcp1_first_part_authentication; + struct ta_hdcp_cmd_hdcp1_second_part_authentication_input hdcp1_second_part_authentication; + struct ta_hdcp_cmd_hdcp1_enable_encryption_input hdcp1_enable_encryption; + struct ta_hdcp_cmd_hdcp1_enable_dp_stream_encryption_input hdcp1_enable_dp_stream_encryption; + struct ta_hdcp_cmd_hdcp1_get_encryption_status_input hdcp1_get_encryption_status; +}; + +/* Common output structure for HDCP callbacks */ +union ta_hdcp_cmd_output { + struct ta_hdcp_cmd_hdcp1_create_session_output hdcp1_create_session; + struct ta_hdcp_cmd_hdcp1_first_part_authentication_output hdcp1_first_part_authentication; + struct ta_hdcp_cmd_hdcp1_second_part_authentication_output hdcp1_second_part_authentication; + struct ta_hdcp_cmd_hdcp1_get_encryption_status_output hdcp1_get_encryption_status; +}; +/**********************************************************/ + +struct ta_hdcp_shared_memory { + uint32_t cmd_id; + enum ta_hdcp_status hdcp_status; + uint32_t reserved; + union ta_hdcp_cmd_input in_msg; + union ta_hdcp_cmd_output out_msg; +}; + +enum psp_status { + PSP_STATUS__SUCCESS = 0, + PSP_STATUS__ERROR_INVALID_PARAMS, + PSP_STATUS__ERROR_GENERIC, + PSP_STATUS__ERROR_OUT_OF_MEMORY, + PSP_STATUS__ERROR_UNSUPPORTED_FEATURE +}; + +#endif /* MODULES_HDCP_HDCP_PSP_H_ */ diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h index dc187844d10b..dbe7835aabcf 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h @@ -92,6 +92,7 @@ struct mod_vrr_params_btr { uint32_t inserted_duration_in_us; uint32_t frames_to_insert; uint32_t frame_counter; + uint32_t margin_in_us; }; struct mod_vrr_params_fixed_refresh { diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h new file mode 100644 index 000000000000..dea21702edff --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h @@ -0,0 +1,289 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef MOD_HDCP_H_ +#define MOD_HDCP_H_ + +#include "os_types.h" +#include "signal_types.h" + +/* Forward Declarations */ +struct mod_hdcp; + +#define MAX_NUM_OF_DISPLAYS 6 +#define MAX_NUM_OF_ATTEMPTS 4 +#define MAX_NUM_OF_ERROR_TRACE 10 + +/* detailed return status */ +enum mod_hdcp_status { + MOD_HDCP_STATUS_SUCCESS = 0, + MOD_HDCP_STATUS_FAILURE, + MOD_HDCP_STATUS_RESET_NEEDED, + MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND, + MOD_HDCP_STATUS_DISPLAY_NOT_FOUND, + MOD_HDCP_STATUS_INVALID_STATE, + MOD_HDCP_STATUS_NOT_IMPLEMENTED, + MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE, + MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE, + MOD_HDCP_STATUS_CREATE_PSP_SERVICE_FAILURE, + MOD_HDCP_STATUS_DESTROY_PSP_SERVICE_FAILURE, + MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE, + MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE, + MOD_HDCP_STATUS_HDCP1_VALIDATE_ENCRYPTION_FAILURE, + MOD_HDCP_STATUS_HDCP1_NOT_HDCP_REPEATER, + MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE, + MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING, + MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE, + MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY, + MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE, + MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION, + MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE, + MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE, + MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE, + MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE, + MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE, + MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED, + MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE, + MOD_HDCP_STATUS_HDCP1_INVALID_BKSV, + MOD_HDCP_STATUS_DDC_FAILURE, /* TODO: specific errors */ + MOD_HDCP_STATUS_INVALID_OPERATION, + MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE, + MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE, + MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE, + MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE, + MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING, + MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING, + MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING, + MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE, + MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE, + MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE, + MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE, + MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING, + MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE, + MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE, + MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE, + MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY, + MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE, + MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION, + MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING, + MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE, + MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE, + MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST, + MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE, + MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE, +}; + +struct mod_hdcp_displayport { + uint8_t rev; + uint8_t assr_supported; +}; + +struct mod_hdcp_hdmi { + uint8_t reserved; +}; +enum mod_hdcp_operation_mode { + MOD_HDCP_MODE_OFF, + MOD_HDCP_MODE_DEFAULT, + MOD_HDCP_MODE_DP, + MOD_HDCP_MODE_DP_MST +}; + +enum mod_hdcp_display_state { + MOD_HDCP_DISPLAY_INACTIVE = 0, + MOD_HDCP_DISPLAY_ACTIVE, + MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED, + MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED +}; + +struct mod_hdcp_ddc { + void *handle; + struct { + bool (*read_i2c)(void *handle, + uint32_t address, + uint8_t offset, + uint8_t *data, + uint32_t size); + bool (*write_i2c)(void *handle, + uint32_t address, + const uint8_t *data, + uint32_t size); + bool (*read_dpcd)(void *handle, + uint32_t address, + uint8_t *data, + uint32_t size); + bool (*write_dpcd)(void *handle, + uint32_t address, + const uint8_t *data, + uint32_t size); + } funcs; +}; + +struct mod_hdcp_psp { + void *handle; + void *funcs; +}; + +struct mod_hdcp_display_adjustment { + uint8_t disable : 1; + uint8_t reserved : 7; +}; + +struct mod_hdcp_link_adjustment_hdcp1 { + uint8_t disable : 1; + uint8_t postpone_encryption : 1; + uint8_t reserved : 6; +}; + +struct mod_hdcp_link_adjustment_hdcp2 { + uint8_t disable : 1; + uint8_t disable_type1 : 1; + uint8_t force_no_stored_km : 1; + uint8_t increase_h_prime_timeout: 1; + uint8_t reserved : 4; +}; + +struct mod_hdcp_link_adjustment { + uint8_t auth_delay; + struct mod_hdcp_link_adjustment_hdcp1 hdcp1; + struct mod_hdcp_link_adjustment_hdcp2 hdcp2; +}; + +struct mod_hdcp_error { + enum mod_hdcp_status status; + uint8_t state_id; +}; + +struct mod_hdcp_trace { + struct mod_hdcp_error errors[MAX_NUM_OF_ERROR_TRACE]; + uint8_t error_count; +}; + +enum mod_hdcp_encryption_status { + MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF = 0, + MOD_HDCP_ENCRYPTION_STATUS_HDCP1_ON, + MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON, + MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON +}; + +/* per link events dm has to notify to hdcp module */ +enum mod_hdcp_event { + MOD_HDCP_EVENT_CALLBACK = 0, + MOD_HDCP_EVENT_WATCHDOG_TIMEOUT, + MOD_HDCP_EVENT_CPIRQ +}; + +/* output flags from module requesting timer operations */ +struct mod_hdcp_output { + uint8_t callback_needed; + uint8_t callback_stop; + uint8_t watchdog_timer_needed; + uint8_t watchdog_timer_stop; + uint16_t callback_delay; + uint16_t watchdog_timer_delay; +}; + +/* used to represent per display info */ +struct mod_hdcp_display { + enum mod_hdcp_display_state state; + uint8_t index; + uint8_t controller; + uint8_t dig_fe; + union { + uint8_t vc_id; + }; + struct mod_hdcp_display_adjustment adjust; +}; + +/* used to represent per link info */ +/* in case a link has multiple displays, they share the same link info */ +struct mod_hdcp_link { + enum mod_hdcp_operation_mode mode; + uint8_t dig_be; + uint8_t ddc_line; + union { + struct mod_hdcp_displayport dp; + struct mod_hdcp_hdmi hdmi; + }; + struct mod_hdcp_link_adjustment adjust; +}; + +/* a query structure for a display's hdcp information */ +struct mod_hdcp_display_query { + const struct mod_hdcp_display *display; + const struct mod_hdcp_link *link; + const struct mod_hdcp_trace *trace; + enum mod_hdcp_encryption_status encryption_status; +}; + +/* contains values per on external display configuration change */ +struct mod_hdcp_config { + struct mod_hdcp_psp psp; + struct mod_hdcp_ddc ddc; + uint8_t index; +}; + +struct mod_hdcp; + +/* dm allocates memory of mod_hdcp per dc_link on dm init based on memory size*/ +size_t mod_hdcp_get_memory_size(void); + +/* called per link on link creation */ +enum mod_hdcp_status mod_hdcp_setup(struct mod_hdcp *hdcp, + struct mod_hdcp_config *config); + +/* called per link on link destroy */ +enum mod_hdcp_status mod_hdcp_teardown(struct mod_hdcp *hdcp); + +/* called per display on cp_desired set to true */ +enum mod_hdcp_status mod_hdcp_add_display(struct mod_hdcp *hdcp, + struct mod_hdcp_link *link, struct mod_hdcp_display *display, + struct mod_hdcp_output *output); + +/* called per display on cp_desired set to false */ +enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp, + uint8_t index, struct mod_hdcp_output *output); + +/* called to query hdcp information on a specific index */ +enum mod_hdcp_status mod_hdcp_query_display(struct mod_hdcp *hdcp, + uint8_t index, struct mod_hdcp_display_query *query); + +/* called per link on connectivity change */ +enum mod_hdcp_status mod_hdcp_reset_connection(struct mod_hdcp *hdcp, + struct mod_hdcp_output *output); + +/* called per link on events (i.e. callback, watchdog, CP_IRQ) */ +enum mod_hdcp_status mod_hdcp_process_event(struct mod_hdcp *hdcp, + enum mod_hdcp_event event, struct mod_hdcp_output *output); + +/* called to convert enum mod_hdcp_status to c string */ +char *mod_hdcp_status_to_str(int32_t status); + +/* called to convert state id to c string */ +char *mod_hdcp_state_id_to_str(int32_t id); + +/* called to convert signal type to operation mode */ +enum mod_hdcp_operation_mode mod_hdcp_signal_type_to_operation_mode( + enum signal_type signal); +#endif /* MOD_HDCP_H_ */ diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h index d930bdecb117..ca8ce3c55337 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h @@ -35,4 +35,7 @@ struct mod_vrr_params; void mod_build_vsc_infopacket(const struct dc_stream_state *stream, struct dc_info_packet *info_packet); +void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream, + struct dc_info_packet *info_packet, int ALLMEnabled, int ALLMValue); + #endif diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c index d885d642ed7f..db6b08f6d093 100644 --- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c +++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c @@ -31,6 +31,7 @@ #include "dc.h" #define HDMI_INFOFRAME_TYPE_VENDOR 0x81 +#define HF_VSIF_VERSION 1 // VTEM Byte Offset #define VTEM_PB0 0 @@ -395,3 +396,100 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream, } +/** + ***************************************************************************** + * Function: mod_build_hf_vsif_infopacket + * + * @brief + * Prepare HDMI Vendor Specific info frame. + * Follows HDMI Spec to build up Vendor Specific info frame + * + * @param [in] stream: contains data we may need to construct VSIF (i.e. timing_3d_format, etc.) + * @param [out] info_packet: output structure where to store VSIF + ***************************************************************************** + */ +void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream, + struct dc_info_packet *info_packet, int ALLMEnabled, int ALLMValue) +{ + unsigned int length = 5; + bool hdmi_vic_mode = false; + uint8_t checksum = 0; + uint32_t i = 0; + enum dc_timing_3d_format format; + bool bALLM = (bool)ALLMEnabled; + bool bALLMVal = (bool)ALLMValue; + + info_packet->valid = false; + format = stream->timing.timing_3d_format; + if (stream->view_format == VIEW_3D_FORMAT_NONE) + format = TIMING_3D_FORMAT_NONE; + + if (stream->timing.hdmi_vic != 0 + && stream->timing.h_total >= 3840 + && stream->timing.v_total >= 2160 + && format == TIMING_3D_FORMAT_NONE) + hdmi_vic_mode = true; + + if ((format == TIMING_3D_FORMAT_NONE) && !hdmi_vic_mode && !bALLM) + return; + + info_packet->sb[1] = 0x03; + info_packet->sb[2] = 0x0C; + info_packet->sb[3] = 0x00; + + if (bALLM) { + info_packet->sb[1] = 0xD8; + info_packet->sb[2] = 0x5D; + info_packet->sb[3] = 0xC4; + info_packet->sb[4] = HF_VSIF_VERSION; + } + + if (format != TIMING_3D_FORMAT_NONE) + info_packet->sb[4] = (2 << 5); + + else if (hdmi_vic_mode) + info_packet->sb[4] = (1 << 5); + + switch (format) { + case TIMING_3D_FORMAT_HW_FRAME_PACKING: + case TIMING_3D_FORMAT_SW_FRAME_PACKING: + info_packet->sb[5] = (0x0 << 4); + break; + + case TIMING_3D_FORMAT_SIDE_BY_SIDE: + case TIMING_3D_FORMAT_SBS_SW_PACKED: + info_packet->sb[5] = (0x8 << 4); + length = 6; + break; + + case TIMING_3D_FORMAT_TOP_AND_BOTTOM: + case TIMING_3D_FORMAT_TB_SW_PACKED: + info_packet->sb[5] = (0x6 << 4); + break; + + default: + break; + } + + if (hdmi_vic_mode) + info_packet->sb[5] = stream->timing.hdmi_vic; + + info_packet->hb0 = HDMI_INFOFRAME_TYPE_VENDOR; + info_packet->hb1 = 0x01; + info_packet->hb2 = (uint8_t) (length); + + if (bALLM) + info_packet->sb[5] = (info_packet->sb[5] & ~0x02) | (bALLMVal << 1); + + checksum += info_packet->hb0; + checksum += info_packet->hb1; + checksum += info_packet->hb2; + + for (i = 1; i <= length; i++) + checksum += info_packet->sb[i]; + + info_packet->sb[0] = (uint8_t) (0x100 - checksum); + + info_packet->valid = true; +} + diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c index 05e2be856037..4e2f615c3566 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c @@ -80,18 +80,18 @@ struct abm_parameters { static const struct abm_parameters abm_settings_config0[abm_defines_max_level] = { // min_red max_red bright_pos dark_pos brightness_gain contrast deviation min_knee max_knee - {0xff, 0xbf, 0x20, 0x00, 0xff, 0x99, 0xb3, 0x40, 0xE0}, - {0xff, 0x85, 0x20, 0x00, 0xff, 0x90, 0xa8, 0x40, 0xE0}, - {0xff, 0x40, 0x20, 0x00, 0xff, 0x90, 0x68, 0x40, 0xE0}, - {0x82, 0x4d, 0x20, 0x00, 0x00, 0x90, 0xb3, 0x70, 0x70}, + {0xff, 0xbf, 0x20, 0x00, 0xff, 0x99, 0xb3, 0x40, 0xe0}, + {0xde, 0x85, 0x20, 0x00, 0xff, 0x90, 0xa8, 0x40, 0xdf}, + {0xb0, 0x50, 0x20, 0x00, 0xc0, 0x88, 0x78, 0x70, 0xa0}, + {0x82, 0x40, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70}, }; static const struct abm_parameters abm_settings_config1[abm_defines_max_level] = { // min_red max_red bright_pos dark_pos brightness_gain contrast deviation min_knee max_knee - {0xf0, 0xd9, 0x20, 0x00, 0x00, 0xa8, 0xb3, 0x70, 0x70}, - {0xcd, 0xa5, 0x20, 0x00, 0x00, 0xa8, 0xb3, 0x70, 0x70}, - {0x99, 0x65, 0x20, 0x00, 0x00, 0xa8, 0xb3, 0x70, 0x70}, - {0x82, 0x4d, 0x20, 0x00, 0x00, 0xa8, 0xb3, 0x70, 0x70}, + {0xf0, 0xd9, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70}, + {0xcd, 0xa5, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70}, + {0x99, 0x65, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70}, + {0x82, 0x4d, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70}, }; static const struct abm_parameters * const abm_settings[] = { @@ -115,7 +115,7 @@ static const struct abm_parameters * const abm_settings[] = { /* NOTE: iRAM is 256B in size */ struct iram_table_v_2 { /* flags */ - uint16_t flags; /* 0x00 U16 */ + uint16_t min_abm_backlight; /* 0x00 U16 */ /* parameters for ABM2.0 algorithm */ uint8_t min_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x02 U0.8 */ @@ -140,10 +140,10 @@ struct iram_table_v_2 { /* For reading PSR State directly from IRAM */ uint8_t psr_state; /* 0xf0 */ - uint8_t dmcu_mcp_interface_version; /* 0xf1 */ - uint8_t dmcu_abm_feature_version; /* 0xf2 */ - uint8_t dmcu_psr_feature_version; /* 0xf3 */ - uint16_t dmcu_version; /* 0xf4 */ + uint8_t dmcu_mcp_interface_version; /* 0xf1 */ + uint8_t dmcu_abm_feature_version; /* 0xf2 */ + uint8_t dmcu_psr_feature_version; /* 0xf3 */ + uint16_t dmcu_version; /* 0xf4 */ uint8_t dmcu_state; /* 0xf6 */ uint16_t blRampReduction; /* 0xf7 */ @@ -164,42 +164,43 @@ struct iram_table_v_2_2 { uint8_t max_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x16 U0.8 */ uint8_t bright_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x2a U2.6 */ uint8_t dark_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x3e U2.6 */ - uint8_t hybrid_factor[NUM_AGGR_LEVEL]; /* 0x52 U0.8 */ - uint8_t contrast_factor[NUM_AGGR_LEVEL]; /* 0x56 U0.8 */ - uint8_t deviation_gain[NUM_AGGR_LEVEL]; /* 0x5a U0.8 */ - uint8_t iir_curve[NUM_AMBI_LEVEL]; /* 0x5e U0.8 */ - uint8_t min_knee[NUM_AGGR_LEVEL]; /* 0x63 U0.8 */ - uint8_t max_knee[NUM_AGGR_LEVEL]; /* 0x67 U0.8 */ - uint8_t pad[21]; /* 0x6b U0.8 */ + uint8_t hybrid_factor[NUM_AGGR_LEVEL]; /* 0x52 U0.8 */ + uint8_t contrast_factor[NUM_AGGR_LEVEL]; /* 0x56 U0.8 */ + uint8_t deviation_gain[NUM_AGGR_LEVEL]; /* 0x5a U0.8 */ + uint8_t iir_curve[NUM_AMBI_LEVEL]; /* 0x5e U0.8 */ + uint8_t min_knee[NUM_AGGR_LEVEL]; /* 0x63 U0.8 */ + uint8_t max_knee[NUM_AGGR_LEVEL]; /* 0x67 U0.8 */ + uint16_t min_abm_backlight; /* 0x6b U16 */ + uint8_t pad[19]; /* 0x6d U0.8 */ /* parameters for crgb conversion */ - uint16_t crgb_thresh[NUM_POWER_FN_SEGS]; /* 0x80 U3.13 */ - uint16_t crgb_offset[NUM_POWER_FN_SEGS]; /* 0x90 U1.15 */ - uint16_t crgb_slope[NUM_POWER_FN_SEGS]; /* 0xa0 U4.12 */ + uint16_t crgb_thresh[NUM_POWER_FN_SEGS]; /* 0x80 U3.13 */ + uint16_t crgb_offset[NUM_POWER_FN_SEGS]; /* 0x90 U1.15 */ + uint16_t crgb_slope[NUM_POWER_FN_SEGS]; /* 0xa0 U4.12 */ /* parameters for custom curve */ /* thresholds for brightness --> backlight */ - uint16_t backlight_thresholds[NUM_BL_CURVE_SEGS]; /* 0xb0 U16.0 */ + uint16_t backlight_thresholds[NUM_BL_CURVE_SEGS]; /* 0xb0 U16.0 */ /* offsets for brightness --> backlight */ - uint16_t backlight_offsets[NUM_BL_CURVE_SEGS]; /* 0xd0 U16.0 */ + uint16_t backlight_offsets[NUM_BL_CURVE_SEGS]; /* 0xd0 U16.0 */ /* For reading PSR State directly from IRAM */ - uint8_t psr_state; /* 0xf0 */ - uint8_t dmcu_mcp_interface_version; /* 0xf1 */ - uint8_t dmcu_abm_feature_version; /* 0xf2 */ - uint8_t dmcu_psr_feature_version; /* 0xf3 */ - uint16_t dmcu_version; /* 0xf4 */ - uint8_t dmcu_state; /* 0xf6 */ - - uint8_t dummy1; /* 0xf7 */ - uint8_t dummy2; /* 0xf8 */ - uint8_t dummy3; /* 0xf9 */ - uint8_t dummy4; /* 0xfa */ - uint8_t dummy5; /* 0xfb */ - uint8_t dummy6; /* 0xfc */ - uint8_t dummy7; /* 0xfd */ - uint8_t dummy8; /* 0xfe */ - uint8_t dummy9; /* 0xff */ + uint8_t psr_state; /* 0xf0 */ + uint8_t dmcu_mcp_interface_version; /* 0xf1 */ + uint8_t dmcu_abm_feature_version; /* 0xf2 */ + uint8_t dmcu_psr_feature_version; /* 0xf3 */ + uint16_t dmcu_version; /* 0xf4 */ + uint8_t dmcu_state; /* 0xf6 */ + + uint8_t dummy1; /* 0xf7 */ + uint8_t dummy2; /* 0xf8 */ + uint8_t dummy3; /* 0xf9 */ + uint8_t dummy4; /* 0xfa */ + uint8_t dummy5; /* 0xfb */ + uint8_t dummy6; /* 0xfc */ + uint8_t dummy7; /* 0xfd */ + uint8_t dummy8; /* 0xfe */ + uint8_t dummy9; /* 0xff */ }; #pragma pack(pop) @@ -271,7 +272,8 @@ void fill_iram_v_2(struct iram_table_v_2 *ram_table, struct dmcu_iram_parameters { unsigned int set = params.set; - ram_table->flags = 0x0; + ram_table->min_abm_backlight = + cpu_to_be16(params.min_abm_backlight); ram_table->deviation_gain = 0xb3; ram_table->blRampReduction = @@ -445,6 +447,9 @@ void fill_iram_v_2_2(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parame ram_table->flags = 0x0; + ram_table->min_abm_backlight = + cpu_to_be16(params.min_abm_backlight); + ram_table->deviation_gain[0] = 0xb3; ram_table->deviation_gain[1] = 0xa8; ram_table->deviation_gain[2] = 0x98; @@ -588,6 +593,10 @@ void fill_iram_v_2_3(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parame unsigned int set = params.set; ram_table->flags = 0x0; + + ram_table->min_abm_backlight = + cpu_to_be16(params.min_abm_backlight); + for (i = 0; i < NUM_AGGR_LEVEL; i++) { ram_table->hybrid_factor[i] = abm_settings[set][i].brightness_gain; ram_table->contrast_factor[i] = abm_settings[set][i].contrast_factor; diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h index da5df00fedce..e54157026330 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h @@ -38,6 +38,7 @@ struct dmcu_iram_parameters { unsigned int backlight_lut_array_size; unsigned int backlight_ramping_reduction; unsigned int backlight_ramping_start; + unsigned int min_abm_backlight; unsigned int set; }; diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index 8889aaceec60..dc7eb28f0296 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -143,6 +143,8 @@ enum PP_FEATURE_MASK { enum DC_FEATURE_MASK { DC_FBC_MASK = 0x1, DC_MULTI_MON_PP_MCLK_SWITCH_MASK = 0x2, + DC_DISABLE_FRACTIONAL_PWM_MASK = 0x4, + DC_PSR_MASK = 0x8, }; enum amd_dpm_forced_level; diff --git a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_d.h b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_d.h index a761ba07f937..fce965984e76 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_d.h @@ -27,6 +27,7 @@ #define mmMM_INDEX 0x0 #define mmMM_INDEX_HI 0x6 #define mmMM_DATA 0x1 +#define mmCC_BIF_BX_FUSESTRAP0 0x14D7 #define mmBUS_CNTL 0x1508 #define mmCONFIG_CNTL 0x1509 #define mmCONFIG_MEMSIZE 0x150a diff --git a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_sh_mask.h index 8fbfd0261d27..39cc4880beb4 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_sh_mask.h @@ -32,6 +32,8 @@ #define MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0 #define MM_DATA__MM_DATA_MASK 0xffffffff #define MM_DATA__MM_DATA__SHIFT 0x0 +#define CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE_MASK 0x2 +#define CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE__SHIFT 0x1 #define BUS_CNTL__BIOS_ROM_WRT_EN_MASK 0x1 #define BUS_CNTL__BIOS_ROM_WRT_EN__SHIFT 0x0 #define BUS_CNTL__BIOS_ROM_DIS_MASK 0x2 diff --git a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h index 809759f7bb81..8d05d6ca1c8d 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h @@ -27,6 +27,7 @@ #define mmMM_INDEX 0x0 #define mmMM_INDEX_HI 0x6 #define mmMM_DATA 0x1 +#define mmCC_BIF_BX_FUSESTRAP0 0x14D7 #define mmCC_BIF_BX_STRAP2 0x152A #define mmBIF_MM_INDACCESS_CNTL 0x1500 #define mmBIF_DOORBELL_APER_EN 0x1501 diff --git a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_sh_mask.h index adc71b01f793..73435687d049 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_sh_mask.h @@ -32,6 +32,8 @@ #define MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0 #define MM_DATA__MM_DATA_MASK 0xffffffff #define MM_DATA__MM_DATA__SHIFT 0x0 +#define CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE_MASK 0x2 +#define CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE__SHIFT 0x1 #define BIF_MM_INDACCESS_CNTL__MM_INDACCESS_DIS_MASK 0x2 #define BIF_MM_INDACCESS_CNTL__MM_INDACCESS_DIS__SHIFT 0x1 #define BIF_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x1 diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h index be4249adb356..eddf83ec1c39 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h @@ -9859,6 +9859,8 @@ #define mmDP0_DP_STEER_FIFO_BASE_IDX 2 #define mmDP0_DP_MSA_MISC 0x210e #define mmDP0_DP_MSA_MISC_BASE_IDX 2 +#define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f +#define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP0_DP_VID_TIMING 0x2110 #define mmDP0_DP_VID_TIMING_BASE_IDX 2 #define mmDP0_DP_VID_N 0x2111 @@ -10187,6 +10189,8 @@ #define mmDP1_DP_STEER_FIFO_BASE_IDX 2 #define mmDP1_DP_MSA_MISC 0x220e #define mmDP1_DP_MSA_MISC_BASE_IDX 2 +#define mmDP1_DP_DPHY_INTERNAL_CTRL 0x220f +#define mmDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP1_DP_VID_TIMING 0x2210 #define mmDP1_DP_VID_TIMING_BASE_IDX 2 #define mmDP1_DP_VID_N 0x2211 @@ -10515,6 +10519,8 @@ #define mmDP2_DP_STEER_FIFO_BASE_IDX 2 #define mmDP2_DP_MSA_MISC 0x230e #define mmDP2_DP_MSA_MISC_BASE_IDX 2 +#define mmDP2_DP_DPHY_INTERNAL_CTRL 0x230f +#define mmDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP2_DP_VID_TIMING 0x2310 #define mmDP2_DP_VID_TIMING_BASE_IDX 2 #define mmDP2_DP_VID_N 0x2311 @@ -10843,6 +10849,8 @@ #define mmDP3_DP_STEER_FIFO_BASE_IDX 2 #define mmDP3_DP_MSA_MISC 0x240e #define mmDP3_DP_MSA_MISC_BASE_IDX 2 +#define mmDP3_DP_DPHY_INTERNAL_CTRL 0x240f +#define mmDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP3_DP_VID_TIMING 0x2410 #define mmDP3_DP_VID_TIMING_BASE_IDX 2 #define mmDP3_DP_VID_N 0x2411 @@ -11171,6 +11179,8 @@ #define mmDP4_DP_STEER_FIFO_BASE_IDX 2 #define mmDP4_DP_MSA_MISC 0x250e #define mmDP4_DP_MSA_MISC_BASE_IDX 2 +#define mmDP4_DP_DPHY_INTERNAL_CTRL 0x250f +#define mmDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP4_DP_VID_TIMING 0x2510 #define mmDP4_DP_VID_TIMING_BASE_IDX 2 #define mmDP4_DP_VID_N 0x2511 diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h index ca16d9125fbc..2bfaaa8157d0 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h @@ -1146,7 +1146,14 @@ #define mmATC_L2_MEM_POWER_LS_BASE_IDX 0 #define mmATC_L2_CGTT_CLK_CTRL 0x080c #define mmATC_L2_CGTT_CLK_CTRL_BASE_IDX 0 - +#define mmATC_L2_CACHE_4K_EDC_INDEX 0x080e +#define mmATC_L2_CACHE_4K_EDC_INDEX_BASE_IDX 0 +#define mmATC_L2_CACHE_2M_EDC_INDEX 0x080f +#define mmATC_L2_CACHE_2M_EDC_INDEX_BASE_IDX 0 +#define mmATC_L2_CACHE_4K_EDC_CNT 0x0810 +#define mmATC_L2_CACHE_4K_EDC_CNT_BASE_IDX 0 +#define mmATC_L2_CACHE_2M_EDC_CNT 0x0811 +#define mmATC_L2_CACHE_2M_EDC_CNT_BASE_IDX 0 // addressBlock: gc_utcl2_vml2pfdec // base address: 0xa100 @@ -1206,7 +1213,14 @@ #define mmVM_L2_CACHE_PARITY_CNTL_BASE_IDX 0 #define mmVM_L2_CGTT_CLK_CTRL 0x085e #define mmVM_L2_CGTT_CLK_CTRL_BASE_IDX 0 - +#define mmVM_L2_MEM_ECC_INDEX 0x0860 +#define mmVM_L2_MEM_ECC_INDEX_BASE_IDX 0 +#define mmVM_L2_WALKER_MEM_ECC_INDEX 0x0861 +#define mmVM_L2_WALKER_MEM_ECC_INDEX_BASE_IDX 0 +#define mmVM_L2_MEM_ECC_CNT 0x0862 +#define mmVM_L2_MEM_ECC_CNT_BASE_IDX 0 +#define mmVM_L2_WALKER_MEM_ECC_CNT 0x0863 +#define mmVM_L2_WALKER_MEM_ECC_CNT_BASE_IDX 0 // addressBlock: gc_utcl2_vml2vcdec // base address: 0xa200 diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h index 064c4bb1dc62..d4c613a85352 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h @@ -6661,7 +6661,6 @@ #define ATC_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00FF0000L #define ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0xFF000000L - // addressBlock: gc_utcl2_vml2pfdec //VM_L2_CNTL #define VM_L2_CNTL__ENABLE_L2_CACHE__SHIFT 0x0 @@ -6991,7 +6990,22 @@ #define VM_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L #define VM_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00FF0000L #define VM_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0xFF000000L - +//VM_L2_MEM_ECC_INDEX +#define VM_L2_MEM_ECC_INDEX__INDEX__SHIFT 0x0 +#define VM_L2_MEM_ECC_INDEX__INDEX_MASK 0x000000FFL +//VM_L2_WALKER_MEM_ECC_INDEX +#define VM_L2_WALKER_MEM_ECC_INDEX__INDEX__SHIFT 0x0 +#define VM_L2_WALKER_MEM_ECC_INDEX__INDEX_MASK 0x000000FFL +//VM_L2_MEM_ECC_CNT +#define VM_L2_MEM_ECC_CNT__SEC_COUNT__SHIFT 0xc +#define VM_L2_MEM_ECC_CNT__DED_COUNT__SHIFT 0xe +#define VM_L2_MEM_ECC_CNT__SEC_COUNT_MASK 0x00003000L +#define VM_L2_MEM_ECC_CNT__DED_COUNT_MASK 0x0000C000L +//VM_L2_WALKER_MEM_ECC_CNT +#define VM_L2_WALKER_MEM_ECC_CNT__SEC_COUNT__SHIFT 0xc +#define VM_L2_WALKER_MEM_ECC_CNT__DED_COUNT__SHIFT 0xe +#define VM_L2_WALKER_MEM_ECC_CNT__SEC_COUNT_MASK 0x00003000L +#define VM_L2_WALKER_MEM_ECC_CNT__DED_COUNT_MASK 0x0000C000L // addressBlock: gc_utcl2_vml2vcdec //VM_CONTEXT0_CNTL diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h index 4bcacf529852..991128bb9476 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h +++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h @@ -22,6 +22,9 @@ #ifndef _nbio_7_4_0_SMN_HEADER #define _nbio_7_4_0_SMN_HEADER +// addressBlock: nbio_nbif0_bif_ras_bif_ras_regblk +// base address: 0x10100000 +#define smnBIFL_RAS_CENTRAL_STATUS 0x10139040 #define smnNBIF_MGCG_CTRL_LCLK 0x1013a21c #define smnCPM_CONTROL 0x11180460 @@ -53,4 +56,13 @@ #define smnPCIE_RX_NUM_NAK 0x11180038 #define smnPCIE_RX_NUM_NAK_GENERATED 0x1118003c +// addressBlock: nbio_iohub_nb_misc_misc_cfgdec +// base address: 0x13a10000 +#define smnIOHC_INTERRUPT_EOI 0x13a10120 + +// addressBlock: nbio_iohub_nb_rascfg_ras_cfgdec +// base address: 0x13a20000 +#define smnRAS_GLOBAL_STATUS_LO 0x13a20020 +#define smnRAS_GLOBAL_STATUS_HI 0x13a20024 + #endif // _nbio_7_4_0_SMN_HEADER diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h index 994e796a28d7..ce5830ebe095 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h @@ -2793,8 +2793,8 @@ #define mmBIF_DOORBELL_INT_CNTL_BASE_IDX 2 #define mmBIF_FB_EN 0x00ff #define mmBIF_FB_EN_BASE_IDX 2 -#define mmBIF_BUSY_DELAY_CNTR 0x0100 -#define mmBIF_BUSY_DELAY_CNTR_BASE_IDX 2 +#define mmBIF_INTR_CNTL 0x0100 +#define mmBIF_INTR_CNTL_BASE_IDX 2 #define mmBIF_MST_TRANS_PENDING_VF 0x0109 #define mmBIF_MST_TRANS_PENDING_VF_BASE_IDX 2 #define mmBIF_SLV_TRANS_PENDING_VF 0x010a diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h index d467b939c971..07f04b2b5bdd 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h @@ -20420,9 +20420,9 @@ #define BIF_FB_EN__FB_WRITE_EN__SHIFT 0x1 #define BIF_FB_EN__FB_READ_EN_MASK 0x00000001L #define BIF_FB_EN__FB_WRITE_EN_MASK 0x00000002L -//BIF_BUSY_DELAY_CNTR -#define BIF_BUSY_DELAY_CNTR__DELAY_CNT__SHIFT 0x0 -#define BIF_BUSY_DELAY_CNTR__DELAY_CNT_MASK 0x0000003FL +//BIF_INTR_CNTL +#define BIF_INTR_CNTL__RAS_INTR_VEC_SEL__SHIFT 0x0 +#define BIF_INTR_CNTL__RAS_INTR_VEC_SEL_MASK 0x00000001L //BIF_MST_TRANS_PENDING_VF #define BIF_MST_TRANS_PENDING_VF__BIF_MST_TRANS_PENDING__SHIFT 0x0 #define BIF_MST_TRANS_PENDING_VF__BIF_MST_TRANS_PENDING_MASK 0x7FFFFFFFL @@ -48436,4 +48436,47 @@ #define RCC_DEV0_EPF0_VF15_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L #define RCC_DEV0_EPF0_VF15_GFXMSIX_PBA__MSIX_PENDING_BITS_2_MASK 0x00000004L +//IOHC_INTERRUPT_EOI +#define IOHC_INTERRUPT_EOI__SMI_EOI__SHIFT 0x0 +#define IOHC_INTERRUPT_EOI__SCI_EOI__SHIFT 0x1 +#define IOHC_INTERRUPT_EOI__NMI_EOI__SHIFT 0x2 +#define IOHC_INTERRUPT_EOI__SMI_EOI_MASK 0x00000001L +#define IOHC_INTERRUPT_EOI__SCI_EOI_MASK 0x00000002L +#define IOHC_INTERRUPT_EOI__NMI_EOI_MASK 0x00000004L + +//RAS_GLOBAL_STATUS_LO +#define RAS_GLOBAL_STATUS_LO__ParityErrCorr__SHIFT 0x0 +#define RAS_GLOBAL_STATUS_LO__ParityErrNonFatal__SHIFT 0x1 +#define RAS_GLOBAL_STATUS_LO__ParityErrFatal__SHIFT 0x2 +#define RAS_GLOBAL_STATUS_LO__ParityErrSerr__SHIFT 0x3 +#define RAS_GLOBAL_STATUS_LO__HPLGWA_NMI__SHIFT 0x6 +#define RAS_GLOBAL_STATUS_LO__HPLGWA_SCI__SHIFT 0x7 +#define RAS_GLOBAL_STATUS_LO__HPLGWA_SMI__SHIFT 0x8 +#define RAS_GLOBAL_STATUS_LO__SW_SMI__SHIFT 0x9 +#define RAS_GLOBAL_STATUS_LO__SW_SCI__SHIFT 0xa +#define RAS_GLOBAL_STATUS_LO__SW_NMI__SHIFT 0xb +#define RAS_GLOBAL_STATUS_LO__APML_NMI__SHIFT 0xc +#define RAS_GLOBAL_STATUS_LO__APML_SyncFld__SHIFT 0xd +#define RAS_GLOBAL_STATUS_LO__PIN_SyncFld_NMI__SHIFT 0xe +#define RAS_GLOBAL_STATUS_LO__APML_SyncFld_Private__SHIFT 0xf +#define RAS_GLOBAL_STATUS_LO__ParityErrCorr_MASK 0x00000001L +#define RAS_GLOBAL_STATUS_LO__ParityErrNonFatal_MASK 0x00000002L +#define RAS_GLOBAL_STATUS_LO__ParityErrFatal_MASK 0x00000004L +#define RAS_GLOBAL_STATUS_LO__ParityErrSerr_MASK 0x00000008L +#define RAS_GLOBAL_STATUS_LO__HPLGWA_NMI_MASK 0x00000040L +#define RAS_GLOBAL_STATUS_LO__HPLGWA_SCI_MASK 0x00000080L +#define RAS_GLOBAL_STATUS_LO__HPLGWA_SMI_MASK 0x00000100L +#define RAS_GLOBAL_STATUS_LO__SW_SMI_MASK 0x00000200L +#define RAS_GLOBAL_STATUS_LO__SW_SCI_MASK 0x00000400L +#define RAS_GLOBAL_STATUS_LO__SW_NMI_MASK 0x00000800L +#define RAS_GLOBAL_STATUS_LO__APML_NMI_MASK 0x00001000L +#define RAS_GLOBAL_STATUS_LO__APML_SyncFld_MASK 0x00002000L +#define RAS_GLOBAL_STATUS_LO__PIN_SyncFld_NMI_MASK 0x00004000L +#define RAS_GLOBAL_STATUS_LO__APML_SyncFld_Private_MASK 0x00008000L +//RAS_GLOBAL_STATUS_HI +#define RAS_GLOBAL_STATUS_HI__PCIE0PortAErr__SHIFT 0x0 +#define RAS_GLOBAL_STATUS_HI__NBIF0PortAErr__SHIFT 0x1 +#define RAS_GLOBAL_STATUS_HI__PCIE0PortAErr_MASK 0x00000001L +#define RAS_GLOBAL_STATUS_HI__NBIF0PortAErr_MASK 0x00000002L + #endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_sh_mask.h index dc9895a684fe..096d878eb1de 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_sh_mask.h @@ -588,11 +588,15 @@ #define IH_STORM_CLIENT_LIST_CNTL__CLIENT30_IS_STORM_CLIENT_MASK 0x40000000L #define IH_STORM_CLIENT_LIST_CNTL__CLIENT31_IS_STORM_CLIENT_MASK 0x80000000L //IH_CLK_CTRL +#define IH_CLK_CTRL__IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x19 +#define IH_CLK_CTRL__IH_BUFFER_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x1a #define IH_CLK_CTRL__DBUS_MUX_CLK_SOFT_OVERRIDE__SHIFT 0x1b #define IH_CLK_CTRL__OSSSYS_SHARE_CLK_SOFT_OVERRIDE__SHIFT 0x1c #define IH_CLK_CTRL__LIMIT_SMN_CLK_SOFT_OVERRIDE__SHIFT 0x1d #define IH_CLK_CTRL__DYN_CLK_SOFT_OVERRIDE__SHIFT 0x1e #define IH_CLK_CTRL__REG_CLK_SOFT_OVERRIDE__SHIFT 0x1f +#define IH_CLK_CTRL__IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE_MASK 0x02000000L +#define IH_CLK_CTRL__IH_BUFFER_MEM_CLK_SOFT_OVERRIDE_MASK 0x04000000L #define IH_CLK_CTRL__DBUS_MUX_CLK_SOFT_OVERRIDE_MASK 0x08000000L #define IH_CLK_CTRL__OSSSYS_SHARE_CLK_SOFT_OVERRIDE_MASK 0x10000000L #define IH_CLK_CTRL__LIMIT_SMN_CLK_SOFT_OVERRIDE_MASK 0x20000000L diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h index dbc2e723f659..71169daa701a 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h @@ -49,6 +49,7 @@ #define ixCG_SPLL_FUNC_CNTL_5 0xc0500150 #define ixCG_SPLL_FUNC_CNTL_6 0xc0500154 #define ixCG_SPLL_FUNC_CNTL_7 0xc0500158 +#define ixCG_SPLL_STATUS 0xC050015C #define ixSPLL_CNTL_MODE 0xc0500160 #define ixCG_SPLL_SPREAD_SPECTRUM 0xc0500164 #define ixCG_SPLL_SPREAD_SPECTRUM_2 0xc0500168 diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h index 6af9f0217b34..61a9a84e0c3a 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h @@ -194,6 +194,8 @@ #define CG_SPLL_FUNC_CNTL_6__SPLL_LF_CNTR__SHIFT 0x19 #define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL_MASK 0xfff #define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL__SHIFT 0x0 +#define CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK 0x2 +#define CG_SPLL_STATUS__SPLL_CHG_STATUS__SHIFT 0x1 #define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK 0x1 #define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL__SHIFT 0x0 #define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV_MASK 0x2 diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h index bd3685166779..351446754c72 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h @@ -49,6 +49,7 @@ #define ixCG_SPLL_FUNC_CNTL_5 0xc0500150 #define ixCG_SPLL_FUNC_CNTL_6 0xc0500154 #define ixCG_SPLL_FUNC_CNTL_7 0xc0500158 +#define ixCG_SPLL_STATUS 0xC050015C #define ixSPLL_CNTL_MODE 0xc0500160 #define ixCG_SPLL_SPREAD_SPECTRUM 0xc0500164 #define ixCG_SPLL_SPREAD_SPECTRUM_2 0xc0500168 diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h index 627906674fe8..4bfd5f8ba66c 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h @@ -194,6 +194,8 @@ #define CG_SPLL_FUNC_CNTL_6__SPLL_LF_CNTR__SHIFT 0x19 #define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL_MASK 0xfff #define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL__SHIFT 0x0 +#define CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK 0x2 +#define CG_SPLL_STATUS__SPLL_CHG_STATUS__SHIFT 0x1 #define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK 0x1 #define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL__SHIFT 0x0 #define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV_MASK 0x2 diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h index f35aba72e640..21da61c398f5 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h @@ -52,6 +52,7 @@ #define ixCG_SPLL_FUNC_CNTL_5 0xc0500150 #define ixCG_SPLL_FUNC_CNTL_6 0xc0500154 #define ixCG_SPLL_FUNC_CNTL_7 0xc0500158 +#define ixCG_SPLL_STATUS 0xC050015C #define ixSPLL_CNTL_MODE 0xc0500160 #define ixCG_SPLL_SPREAD_SPECTRUM 0xc0500164 #define ixCG_SPLL_SPREAD_SPECTRUM_2 0xc0500168 diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h index 481ee6560aa9..f64fe0fbcb32 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h @@ -220,6 +220,8 @@ #define CG_SPLL_FUNC_CNTL_6__SPLL_LF_CNTR__SHIFT 0x19 #define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL_MASK 0xfff #define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL__SHIFT 0x0 +#define CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK 0x2 +#define CG_SPLL_STATUS__SPLL_CHG_STATUS__SHIFT 0x1 #define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK 0x1 #define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL__SHIFT 0x0 #define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV_MASK 0x2 diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_offset.h index d3876052562b..687d6843c258 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_offset.h @@ -121,6 +121,98 @@ #define mmCKSVII2C_IC_COMP_VERSION_BASE_IDX 0 #define mmCKSVII2C_IC_COMP_TYPE 0x006d #define mmCKSVII2C_IC_COMP_TYPE_BASE_IDX 0 +#define mmCKSVII2C1_IC_CON 0x0080 +#define mmCKSVII2C1_IC_CON_BASE_IDX 0 +#define mmCKSVII2C1_IC_TAR 0x0081 +#define mmCKSVII2C1_IC_TAR_BASE_IDX 0 +#define mmCKSVII2C1_IC_SAR 0x0082 +#define mmCKSVII2C1_IC_SAR_BASE_IDX 0 +#define mmCKSVII2C1_IC_HS_MADDR 0x0083 +#define mmCKSVII2C1_IC_HS_MADDR_BASE_IDX 0 +#define mmCKSVII2C1_IC_DATA_CMD 0x0084 +#define mmCKSVII2C1_IC_DATA_CMD_BASE_IDX 0 +#define mmCKSVII2C1_IC_SS_SCL_HCNT 0x0085 +#define mmCKSVII2C1_IC_SS_SCL_HCNT_BASE_IDX 0 +#define mmCKSVII2C1_IC_SS_SCL_LCNT 0x0086 +#define mmCKSVII2C1_IC_SS_SCL_LCNT_BASE_IDX 0 +#define mmCKSVII2C1_IC_FS_SCL_HCNT 0x0087 +#define mmCKSVII2C1_IC_FS_SCL_HCNT_BASE_IDX 0 +#define mmCKSVII2C1_IC_FS_SCL_LCNT 0x0088 +#define mmCKSVII2C1_IC_FS_SCL_LCNT_BASE_IDX 0 +#define mmCKSVII2C1_IC_HS_SCL_HCNT 0x0089 +#define mmCKSVII2C1_IC_HS_SCL_HCNT_BASE_IDX 0 +#define mmCKSVII2C1_IC_HS_SCL_LCNT 0x008a +#define mmCKSVII2C1_IC_HS_SCL_LCNT_BASE_IDX 0 +#define mmCKSVII2C1_IC_INTR_STAT 0x008b +#define mmCKSVII2C1_IC_INTR_STAT_BASE_IDX 0 +#define mmCKSVII2C1_IC_INTR_MASK 0x008c +#define mmCKSVII2C1_IC_INTR_MASK_BASE_IDX 0 +#define mmCKSVII2C1_IC_RAW_INTR_STAT 0x008d +#define mmCKSVII2C1_IC_RAW_INTR_STAT_BASE_IDX 0 +#define mmCKSVII2C1_IC_RX_TL 0x008e +#define mmCKSVII2C1_IC_RX_TL_BASE_IDX 0 +#define mmCKSVII2C1_IC_TX_TL 0x008f +#define mmCKSVII2C1_IC_TX_TL_BASE_IDX 0 +#define mmCKSVII2C1_IC_CLR_INTR 0x0090 +#define mmCKSVII2C1_IC_CLR_INTR_BASE_IDX 0 +#define mmCKSVII2C1_IC_CLR_RX_UNDER 0x0091 +#define mmCKSVII2C1_IC_CLR_RX_UNDER_BASE_IDX 0 +#define mmCKSVII2C1_IC_CLR_RX_OVER 0x0092 +#define mmCKSVII2C1_IC_CLR_RX_OVER_BASE_IDX 0 +#define mmCKSVII2C1_IC_CLR_TX_OVER 0x0093 +#define mmCKSVII2C1_IC_CLR_TX_OVER_BASE_IDX 0 +#define mmCKSVII2C1_IC_CLR_RD_REQ 0x0094 +#define mmCKSVII2C1_IC_CLR_RD_REQ_BASE_IDX 0 +#define mmCKSVII2C1_IC_CLR_TX_ABRT 0x0095 +#define mmCKSVII2C1_IC_CLR_TX_ABRT_BASE_IDX 0 +#define mmCKSVII2C1_IC_CLR_RX_DONE 0x0096 +#define mmCKSVII2C1_IC_CLR_RX_DONE_BASE_IDX 0 +#define mmCKSVII2C1_IC_CLR_ACTIVITY 0x0097 +#define mmCKSVII2C1_IC_CLR_ACTIVITY_BASE_IDX 0 +#define mmCKSVII2C1_IC_CLR_STOP_DET 0x0098 +#define mmCKSVII2C1_IC_CLR_STOP_DET_BASE_IDX 0 +#define mmCKSVII2C1_IC_CLR_START_DET 0x0099 +#define mmCKSVII2C1_IC_CLR_START_DET_BASE_IDX 0 +#define mmCKSVII2C1_IC_CLR_GEN_CALL 0x009a +#define mmCKSVII2C1_IC_CLR_GEN_CALL_BASE_IDX 0 +#define mmCKSVII2C1_IC_ENABLE 0x009b +#define mmCKSVII2C1_IC_ENABLE_BASE_IDX 0 +#define mmCKSVII2C1_IC_STATUS 0x009c +#define mmCKSVII2C1_IC_STATUS_BASE_IDX 0 +#define mmCKSVII2C1_IC_TXFLR 0x009d +#define mmCKSVII2C1_IC_TXFLR_BASE_IDX 0 +#define mmCKSVII2C1_IC_RXFLR 0x009e +#define mmCKSVII2C1_IC_RXFLR_BASE_IDX 0 +#define mmCKSVII2C1_IC_SDA_HOLD 0x009f +#define mmCKSVII2C1_IC_SDA_HOLD_BASE_IDX 0 +#define mmCKSVII2C1_IC_TX_ABRT_SOURCE 0x00a0 +#define mmCKSVII2C1_IC_TX_ABRT_SOURCE_BASE_IDX 0 +#define mmCKSVII2C1_IC_SLV_DATA_NACK_ONLY 0x00a1 +#define mmCKSVII2C1_IC_SLV_DATA_NACK_ONLY_BASE_IDX 0 +#define mmCKSVII2C1_IC_DMA_CR 0x00a2 +#define mmCKSVII2C1_IC_DMA_CR_BASE_IDX 0 +#define mmCKSVII2C1_IC_DMA_TDLR 0x00a3 +#define mmCKSVII2C1_IC_DMA_TDLR_BASE_IDX 0 +#define mmCKSVII2C1_IC_DMA_RDLR 0x00a4 +#define mmCKSVII2C1_IC_DMA_RDLR_BASE_IDX 0 +#define mmCKSVII2C1_IC_SDA_SETUP 0x00a5 +#define mmCKSVII2C1_IC_SDA_SETUP_BASE_IDX 0 +#define mmCKSVII2C1_IC_ACK_GENERAL_CALL 0x00a6 +#define mmCKSVII2C1_IC_ACK_GENERAL_CALL_BASE_IDX 0 +#define mmCKSVII2C1_IC_ENABLE_STATUS 0x00a7 +#define mmCKSVII2C1_IC_ENABLE_STATUS_BASE_IDX 0 +#define mmCKSVII2C1_IC_FS_SPKLEN 0x00a8 +#define mmCKSVII2C1_IC_FS_SPKLEN_BASE_IDX 0 +#define mmCKSVII2C1_IC_HS_SPKLEN 0x00a9 +#define mmCKSVII2C1_IC_HS_SPKLEN_BASE_IDX 0 +#define mmCKSVII2C1_IC_CLR_RESTART_DET 0x00aa +#define mmCKSVII2C1_IC_CLR_RESTART_DET_BASE_IDX 0 +#define mmCKSVII2C1_IC_COMP_PARAM_1 0x00ab +#define mmCKSVII2C1_IC_COMP_PARAM_1_BASE_IDX 0 +#define mmCKSVII2C1_IC_COMP_VERSION 0x00ac +#define mmCKSVII2C1_IC_COMP_VERSION_BASE_IDX 0 +#define mmCKSVII2C1_IC_COMP_TYPE 0x00ad +#define mmCKSVII2C1_IC_COMP_TYPE_BASE_IDX 0 #define mmSMUIO_MP_RESET_INTR 0x00c1 #define mmSMUIO_MP_RESET_INTR_BASE_IDX 0 #define mmSMUIO_SOC_HALT 0x00c2 diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_sh_mask.h index f8afa3518bf2..6905a9618127 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_sh_mask.h @@ -268,6 +268,182 @@ //CKSVII2C_IC_COMP_TYPE #define CKSVII2C_IC_COMP_TYPE__COMP_TYPE__SHIFT 0x0 #define CKSVII2C_IC_COMP_TYPE__COMP_TYPE_MASK 0xFFFFFFFFL +//CKSVII2C1_IC_CON +#define CKSVII2C1_IC_CON__IC1_MASTER_MODE__SHIFT 0x0 +#define CKSVII2C1_IC_CON__IC1_MAX_SPEED_MODE__SHIFT 0x1 +#define CKSVII2C1_IC_CON__IC1_10BITADDR_SLAVE__SHIFT 0x3 +#define CKSVII2C1_IC_CON__IC1_10BITADDR_MASTER__SHIFT 0x4 +#define CKSVII2C1_IC_CON__IC1_RESTART_EN__SHIFT 0x5 +#define CKSVII2C1_IC_CON__IC1_SLAVE_DISABLE__SHIFT 0x6 +#define CKSVII2C1_IC_CON__STOP1_DET_IFADDRESSED__SHIFT 0x7 +#define CKSVII2C1_IC_CON__TX1_EMPTY_CTRL__SHIFT 0x8 +#define CKSVII2C1_IC_CON__RX1_FIFO_FULL_HLD_CTRL__SHIFT 0x9 +#define CKSVII2C1_IC_CON__IC1_MASTER_MODE_MASK 0x00000001L +#define CKSVII2C1_IC_CON__IC1_MAX_SPEED_MODE_MASK 0x00000006L +#define CKSVII2C1_IC_CON__IC1_10BITADDR_SLAVE_MASK 0x00000008L +#define CKSVII2C1_IC_CON__IC1_10BITADDR_MASTER_MASK 0x00000010L +#define CKSVII2C1_IC_CON__IC1_RESTART_EN_MASK 0x00000020L +#define CKSVII2C1_IC_CON__IC1_SLAVE_DISABLE_MASK 0x00000040L +#define CKSVII2C1_IC_CON__STOP1_DET_IFADDRESSED_MASK 0x00000080L +#define CKSVII2C1_IC_CON__TX1_EMPTY_CTRL_MASK 0x00000100L +#define CKSVII2C1_IC_CON__RX1_FIFO_FULL_HLD_CTRL_MASK 0x00000200L +//CKSVII2C1_IC_TAR +#define CKSVII2C1_IC_TAR__IC1_TAR__SHIFT 0x0 +#define CKSVII2C1_IC_TAR__GC1_OR_START__SHIFT 0xa +#define CKSVII2C1_IC_TAR__SPECIAL1__SHIFT 0xb +#define CKSVII2C1_IC_TAR__IC1_10BITADDR_MASTER__SHIFT 0xc +#define CKSVII2C1_IC_TAR__IC1_TAR_MASK 0x000003FFL +#define CKSVII2C1_IC_TAR__GC1_OR_START_MASK 0x00000400L +#define CKSVII2C1_IC_TAR__SPECIAL1_MASK 0x00000800L +#define CKSVII2C1_IC_TAR__IC1_10BITADDR_MASTER_MASK 0x00001000L +//CKSVII2C1_IC_SAR +#define CKSVII2C1_IC_SAR__IC1_SAR__SHIFT 0x0 +#define CKSVII2C1_IC_SAR__IC1_SAR_MASK 0x000003FFL +//CKSVII2C1_IC_HS_MADDR +#define CKSVII2C1_IC_HS_MADDR__IC1_HS_MADDR__SHIFT 0x0 +#define CKSVII2C1_IC_HS_MADDR__IC1_HS_MADDR_MASK 0x00000007L +//CKSVII2C1_IC_DATA_CMD +#define CKSVII2C1_IC_DATA_CMD__DAT1__SHIFT 0x0 +#define CKSVII2C1_IC_DATA_CMD__CMD1__SHIFT 0x8 +#define CKSVII2C1_IC_DATA_CMD__STOP1__SHIFT 0x9 +#define CKSVII2C1_IC_DATA_CMD__RESTART1__SHIFT 0xa +#define CKSVII2C1_IC_DATA_CMD__DAT1_MASK 0x000000FFL +#define CKSVII2C1_IC_DATA_CMD__CMD1_MASK 0x00000100L +#define CKSVII2C1_IC_DATA_CMD__STOP1_MASK 0x00000200L +#define CKSVII2C1_IC_DATA_CMD__RESTART1_MASK 0x00000400L +//CKSVII2C1_IC_SS_SCL_HCNT +#define CKSVII2C1_IC_SS_SCL_HCNT__IC1_SS_SCL_HCNT__SHIFT 0x0 +#define CKSVII2C1_IC_SS_SCL_HCNT__IC1_SS_SCL_HCNT_MASK 0x0000FFFFL +//CKSVII2C1_IC_SS_SCL_LCNT +#define CKSVII2C1_IC_SS_SCL_LCNT__IC1_SS_SCL_LCNT__SHIFT 0x0 +#define CKSVII2C1_IC_SS_SCL_LCNT__IC1_SS_SCL_LCNT_MASK 0x0000FFFFL +//CKSVII2C1_IC_FS_SCL_HCNT +#define CKSVII2C1_IC_FS_SCL_HCNT__IC1_FS_SCL_HCNT__SHIFT 0x0 +#define CKSVII2C1_IC_FS_SCL_HCNT__IC1_FS_SCL_HCNT_MASK 0x0000FFFFL +//CKSVII2C1_IC_FS_SCL_LCNT +#define CKSVII2C1_IC_FS_SCL_LCNT__IC1_FS_SCL_LCNT__SHIFT 0x0 +#define CKSVII2C1_IC_FS_SCL_LCNT__IC1_FS_SCL_LCNT_MASK 0x0000FFFFL +//CKSVII2C1_IC_HS_SCL_HCNT +#define CKSVII2C1_IC_HS_SCL_HCNT__IC1_HS_SCL_HCNT__SHIFT 0x0 +#define CKSVII2C1_IC_HS_SCL_HCNT__IC1_HS_SCL_HCNT_MASK 0x0000FFFFL +//CKSVII2C1_IC_HS_SCL_LCNT +#define CKSVII2C1_IC_HS_SCL_LCNT__IC1_HS_SCL_LCNT__SHIFT 0x0 +#define CKSVII2C1_IC_HS_SCL_LCNT__IC1_HS_SCL_LCNT_MASK 0x0000FFFFL +//CKSVII2C1_IC_INTR_STAT +#define CKSVII2C1_IC_INTR_STAT__R1_RX_UNDER__SHIFT 0x0 +#define CKSVII2C1_IC_INTR_STAT__R1_RX_OVER__SHIFT 0x1 +#define CKSVII2C1_IC_INTR_STAT__R1_RX_FULL__SHIFT 0x2 +#define CKSVII2C1_IC_INTR_STAT__R1_TX_OVER__SHIFT 0x3 +#define CKSVII2C1_IC_INTR_STAT__R1_TX_EMPTY__SHIFT 0x4 +#define CKSVII2C1_IC_INTR_STAT__R1_RD_REQ__SHIFT 0x5 +#define CKSVII2C1_IC_INTR_STAT__R1_TX_ABRT__SHIFT 0x6 +#define CKSVII2C1_IC_INTR_STAT__R1_RX_DONE__SHIFT 0x7 +#define CKSVII2C1_IC_INTR_STAT__R1_ACTIVITY__SHIFT 0x8 +#define CKSVII2C1_IC_INTR_STAT__R1_STOP_DET__SHIFT 0x9 +#define CKSVII2C1_IC_INTR_STAT__R1_START_DET__SHIFT 0xa +#define CKSVII2C1_IC_INTR_STAT__R1_GEN_CALL__SHIFT 0xb +#define CKSVII2C1_IC_INTR_STAT__R1_RESTART_DET__SHIFT 0xc +#define CKSVII2C1_IC_INTR_STAT__R1_MST_ON_HOLD__SHIFT 0xd +#define CKSVII2C1_IC_INTR_STAT__R1_RX_UNDER_MASK 0x00000001L +#define CKSVII2C1_IC_INTR_STAT__R1_RX_OVER_MASK 0x00000002L +#define CKSVII2C1_IC_INTR_STAT__R1_RX_FULL_MASK 0x00000004L +#define CKSVII2C1_IC_INTR_STAT__R1_TX_OVER_MASK 0x00000008L +#define CKSVII2C1_IC_INTR_STAT__R1_TX_EMPTY_MASK 0x00000010L +#define CKSVII2C1_IC_INTR_STAT__R1_RD_REQ_MASK 0x00000020L +#define CKSVII2C1_IC_INTR_STAT__R1_TX_ABRT_MASK 0x00000040L +#define CKSVII2C1_IC_INTR_STAT__R1_RX_DONE_MASK 0x00000080L +#define CKSVII2C1_IC_INTR_STAT__R1_ACTIVITY_MASK 0x00000100L +#define CKSVII2C1_IC_INTR_STAT__R1_STOP_DET_MASK 0x00000200L +#define CKSVII2C1_IC_INTR_STAT__R1_START_DET_MASK 0x00000400L +#define CKSVII2C1_IC_INTR_STAT__R1_GEN_CALL_MASK 0x00000800L +#define CKSVII2C1_IC_INTR_STAT__R1_RESTART_DET_MASK 0x00001000L +#define CKSVII2C1_IC_INTR_STAT__R1_MST_ON_HOLD_MASK 0x00002000L +//CKSVII2C1_IC_INTR_MASK +#define CKSVII2C1_IC_INTR_MASK__M1_RX_UNDER__SHIFT 0x0 +#define CKSVII2C1_IC_INTR_MASK__M1_RX_OVER__SHIFT 0x1 +#define CKSVII2C1_IC_INTR_MASK__M1_RX_FULL__SHIFT 0x2 +#define CKSVII2C1_IC_INTR_MASK__M1_TX_OVER__SHIFT 0x3 +#define CKSVII2C1_IC_INTR_MASK__M1_TX_EMPTY__SHIFT 0x4 +#define CKSVII2C1_IC_INTR_MASK__M1_RD_REQ__SHIFT 0x5 +#define CKSVII2C1_IC_INTR_MASK__M1_TX_ABRT__SHIFT 0x6 +#define CKSVII2C1_IC_INTR_MASK__M1_RX_DONE__SHIFT 0x7 +#define CKSVII2C1_IC_INTR_MASK__M1_ACTIVITY__SHIFT 0x8 +#define CKSVII2C1_IC_INTR_MASK__M1_STOP_DET__SHIFT 0x9 +#define CKSVII2C1_IC_INTR_MASK__M1_START_DET__SHIFT 0xa +#define CKSVII2C1_IC_INTR_MASK__M1_GEN_CALL__SHIFT 0xb +#define CKSVII2C1_IC_INTR_MASK__M1_RESTART_DET__SHIFT 0xc +#define CKSVII2C1_IC_INTR_MASK__M1_MST_ON_HOLD__SHIFT 0xd +#define CKSVII2C1_IC_INTR_MASK__M1_RX_UNDER_MASK 0x00000001L +#define CKSVII2C1_IC_INTR_MASK__M1_RX_OVER_MASK 0x00000002L +#define CKSVII2C1_IC_INTR_MASK__M1_RX_FULL_MASK 0x00000004L +#define CKSVII2C1_IC_INTR_MASK__M1_TX_OVER_MASK 0x00000008L +#define CKSVII2C1_IC_INTR_MASK__M1_TX_EMPTY_MASK 0x00000010L +#define CKSVII2C1_IC_INTR_MASK__M1_RD_REQ_MASK 0x00000020L +#define CKSVII2C1_IC_INTR_MASK__M1_TX_ABRT_MASK 0x00000040L +#define CKSVII2C1_IC_INTR_MASK__M1_RX_DONE_MASK 0x00000080L +#define CKSVII2C1_IC_INTR_MASK__M1_ACTIVITY_MASK 0x00000100L +#define CKSVII2C1_IC_INTR_MASK__M1_STOP_DET_MASK 0x00000200L +#define CKSVII2C1_IC_INTR_MASK__M1_START_DET_MASK 0x00000400L +#define CKSVII2C1_IC_INTR_MASK__M1_GEN_CALL_MASK 0x00000800L +#define CKSVII2C1_IC_INTR_MASK__M1_RESTART_DET_MASK 0x00001000L +#define CKSVII2C1_IC_INTR_MASK__M1_MST_ON_HOLD_MASK 0x00002000L +//CKSVII2C1_IC_RAW_INTR_STAT +//CKSVII2C1_IC_RX_TL +//CKSVII2C1_IC_TX_TL +//CKSVII2C1_IC_CLR_INTR +//CKSVII2C1_IC_CLR_RX_UNDER +//CKSVII2C1_IC_CLR_RX_OVER +//CKSVII2C1_IC_CLR_TX_OVER +//CKSVII2C1_IC_CLR_RD_REQ +//CKSVII2C1_IC_CLR_TX_ABRT +//CKSVII2C1_IC_CLR_RX_DONE +//CKSVII2C1_IC_CLR_ACTIVITY +//CKSVII2C1_IC_CLR_STOP_DET +//CKSVII2C1_IC_CLR_START_DET +//CKSVII2C1_IC_CLR_GEN_CALL +//CKSVII2C1_IC_ENABLE +#define CKSVII2C1_IC_ENABLE__ENABLE1__SHIFT 0x0 +#define CKSVII2C1_IC_ENABLE__ABORT1__SHIFT 0x1 +#define CKSVII2C1_IC_ENABLE__ENABLE1_MASK 0x00000001L +#define CKSVII2C1_IC_ENABLE__ABORT1_MASK 0x00000002L +//CKSVII2C1_IC_STATUS +#define CKSVII2C1_IC_STATUS__ACTIVITY1__SHIFT 0x0 +#define CKSVII2C1_IC_STATUS__TFNF1__SHIFT 0x1 +#define CKSVII2C1_IC_STATUS__TFE1__SHIFT 0x2 +#define CKSVII2C1_IC_STATUS__RFNE1__SHIFT 0x3 +#define CKSVII2C1_IC_STATUS__RFF1__SHIFT 0x4 +#define CKSVII2C1_IC_STATUS__MST1_ACTIVITY__SHIFT 0x5 +#define CKSVII2C1_IC_STATUS__SLV1_ACTIVITY__SHIFT 0x6 +#define CKSVII2C1_IC_STATUS__ACTIVITY1_MASK 0x00000001L +#define CKSVII2C1_IC_STATUS__TFNF1_MASK 0x00000002L +#define CKSVII2C1_IC_STATUS__TFE1_MASK 0x00000004L +#define CKSVII2C1_IC_STATUS__RFNE1_MASK 0x00000008L +#define CKSVII2C1_IC_STATUS__RFF1_MASK 0x00000010L +#define CKSVII2C1_IC_STATUS__MST1_ACTIVITY_MASK 0x00000020L +#define CKSVII2C1_IC_STATUS__SLV1_ACTIVITY_MASK 0x00000040L +//CKSVII2C1_IC_TXFLR +//CKSVII2C1_IC_RXFLR +//CKSVII2C1_IC_SDA_HOLD +#define CKSVII2C1_IC_SDA_HOLD__IC1_SDA_HOLD__SHIFT 0x0 +#define CKSVII2C1_IC_SDA_HOLD__IC1_SDA_HOLD_MASK 0x00FFFFFFL +//CKSVII2C1_IC_TX_ABRT_SOURCE +//CKSVII2C1_IC_SLV_DATA_NACK_ONLY +//CKSVII2C1_IC_DMA_CR +//CKSVII2C1_IC_DMA_TDLR +//CKSVII2C1_IC_DMA_RDLR +//CKSVII2C1_IC_SDA_SETUP +#define CKSVII2C1_IC_SDA_SETUP__SDA1_SETUP__SHIFT 0x0 +#define CKSVII2C1_IC_SDA_SETUP__SDA1_SETUP_MASK 0x000000FFL +//CKSVII2C1_IC_ACK_GENERAL_CALL +#define CKSVII2C1_IC_ACK_GENERAL_CALL__ACK1_GENERAL_CALL__SHIFT 0x0 +#define CKSVII2C1_IC_ACK_GENERAL_CALL__ACK1_GENERAL_CALL_MASK 0x00000001L +//CKSVII2C1_IC_ENABLE_STATUS +#define CKSVII2C1_IC_ENABLE_STATUS__IC1_EN__SHIFT 0x0 +#define CKSVII2C1_IC_ENABLE_STATUS__SLV1_RX_ABORTED__SHIFT 0x1 +#define CKSVII2C1_IC_ENABLE_STATUS__SLV1_FIFO_FILLED_AND_FLUSHED__SHIFT 0x2 +#define CKSVII2C1_IC_ENABLE_STATUS__IC1_EN_MASK 0x00000001L +#define CKSVII2C1_IC_ENABLE_STATUS__SLV1_RX_ABORTED_MASK 0x00000002L +#define CKSVII2C1_IC_ENABLE_STATUS__SLV1_FIFO_FILLED_AND_FLUSHED_MASK 0x00000004L //SMUIO_MP_RESET_INTR #define SMUIO_MP_RESET_INTR__SMUIO_MP_RESET_INTR__SHIFT 0x0 #define SMUIO_MP_RESET_INTR__SMUIO_MP_RESET_INTR_MASK 0x00000001L diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_offset.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_offset.h index cf2149cc12ee..90350f46a0c4 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_offset.h @@ -24,6 +24,18 @@ // addressBlock: uvd0_mmsch_dec // base address: 0x1e000 +#define mmMMSCH_VF_VMID 0x000b +#define mmMMSCH_VF_VMID_BASE_IDX 0 +#define mmMMSCH_VF_CTX_ADDR_LO 0x000c +#define mmMMSCH_VF_CTX_ADDR_LO_BASE_IDX 0 +#define mmMMSCH_VF_CTX_ADDR_HI 0x000d +#define mmMMSCH_VF_CTX_ADDR_HI_BASE_IDX 0 +#define mmMMSCH_VF_CTX_SIZE 0x000e +#define mmMMSCH_VF_CTX_SIZE_BASE_IDX 0 +#define mmMMSCH_VF_MAILBOX_HOST 0x0012 +#define mmMMSCH_VF_MAILBOX_HOST_BASE_IDX 0 +#define mmMMSCH_VF_MAILBOX_RESP 0x0013 +#define mmMMSCH_VF_MAILBOX_RESP_BASE_IDX 0 // addressBlock: uvd0_jpegnpdec diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h index e88541d67aa0..dd7cbc00a0aa 100644 --- a/drivers/gpu/drm/amd/include/atomfirmware.h +++ b/drivers/gpu/drm/amd/include/atomfirmware.h @@ -492,12 +492,13 @@ struct atom_firmware_info_v3_1 /* Total 32bit cap indication */ enum atombios_firmware_capability { - ATOM_FIRMWARE_CAP_FIRMWARE_POSTED = 0x00000001, - ATOM_FIRMWARE_CAP_GPU_VIRTUALIZATION = 0x00000002, - ATOM_FIRMWARE_CAP_WMI_SUPPORT = 0x00000040, - ATOM_FIRMWARE_CAP_HWEMU_ENABLE = 0x00000080, - ATOM_FIRMWARE_CAP_HWEMU_UMC_CFG = 0x00000100, - ATOM_FIRMWARE_CAP_SRAM_ECC = 0x00000200, + ATOM_FIRMWARE_CAP_FIRMWARE_POSTED = 0x00000001, + ATOM_FIRMWARE_CAP_GPU_VIRTUALIZATION = 0x00000002, + ATOM_FIRMWARE_CAP_WMI_SUPPORT = 0x00000040, + ATOM_FIRMWARE_CAP_HWEMU_ENABLE = 0x00000080, + ATOM_FIRMWARE_CAP_HWEMU_UMC_CFG = 0x00000100, + ATOM_FIRMWARE_CAP_SRAM_ECC = 0x00000200, + ATOM_FIRMWARE_CAP_ENABLE_2STAGE_BIST_TRAINING = 0x00000400, }; enum atom_cooling_solution_id{ @@ -671,6 +672,20 @@ struct vram_usagebyfirmware_v2_1 uint16_t used_by_driver_in_kb; }; +/* This is part of vram_usagebyfirmware_v2_1 */ +struct vram_reserve_block +{ + uint32_t start_address_in_kb; + uint16_t used_by_firmware_in_kb; + uint16_t used_by_driver_in_kb; +}; + +/* Definitions for constance */ +enum atomfirmware_internal_constants +{ + ONE_KiB = 0x400, + ONE_MiB = 0x100000, +}; /* *************************************************************************** diff --git a/drivers/gpu/drm/amd/include/discovery.h b/drivers/gpu/drm/amd/include/discovery.h index 5dcb776548d8..7ec4331e67f2 100644 --- a/drivers/gpu/drm/amd/include/discovery.h +++ b/drivers/gpu/drm/amd/include/discovery.h @@ -25,7 +25,6 @@ #define _DISCOVERY_H_ #define PSP_HEADER_SIZE 256 -#define BINARY_MAX_SIZE (64 << 10) #define BINARY_SIGNATURE 0x28211407 #define DISCOVERY_TABLE_SIGNATURE 0x53445049 diff --git a/drivers/gpu/drm/amd/include/ivsrcid/nbio/irqsrcs_nbif_7_4.h b/drivers/gpu/drm/amd/include/ivsrcid/nbio/irqsrcs_nbif_7_4.h new file mode 100644 index 000000000000..79af4258f259 --- /dev/null +++ b/drivers/gpu/drm/amd/include/ivsrcid/nbio/irqsrcs_nbif_7_4.h @@ -0,0 +1,42 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __IRQSRCS_NBIF_7_4_H__ +#define __IRQSRCS_NBIF_7_4_H__ + +#define NBIF_7_4__SRCID__CHIP_ERR_INT_EVENT 0x5E // Error generated +#define NBIF_7_4__SRCID__DOORBELL_INTERRUPT 0x5F // Interrupt for doorbell event during VDDGFX off +#define NBIF_7_4__SRCID__RAS_CONTROLLER_INTERRUPT 0x60 // Interrupt for ras_intr_valid from RAS controller +#define NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT 0x61 // Interrupt for SDP ErrEvent received from ATHUB +#define NBIF_7_4__SRCID__PF_VF_MSGBUF_VALID 0x87 // Valid message in PF->VF mailbox message buffer (The interrupt is sent on behalf of PF) +#define NBIF_7_4__SRCID__PF_VF_MSGBUF_ACK 0x88 // Acknowledge message in PF->VF mailbox message buffer (The interrupt is sent on behalf of VF) +#define NBIF_7_4__SRCID__VF_PF_MSGBUF_VALID 0x89 // Valid message in VF->PF mailbox message buffer (The interrupt is sent on behalf of VF) +#define NBIF_7_4__SRCID__VF_PF_MSGBUF_ACK 0x8A // Acknowledge message in VF->PF mailbox message buffer (The interrupt is sent on behalf of PF) +#define NBIF_7_4__SRCID__CHIP_DPA_INT_EVENT 0xA0 // BIF_CHIP_DPA_INT_EVENT +#define NBIF_7_4__SRCID__CHIP_SLOT_POWER_CHG_INT_EVENT 0xA1 // BIF_CHIP_SLOT_POWER_CHG_INT_EVENT +#define NBIF_7_4__SRCID__ATOMIC_UR_OPCODE 0xCE // BIF receives unsupported atomic opcode from MC +#define NBIF_7_4__SRCID__ATOMIC_REQESTEREN_LOW 0xCF // BIF receive atomic request from MC while AtomicOp Requester is not enabled in PCIE config space + +#endif // __IRQSRCS_NBIF_7_4_H__ diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index 98b9533e672b..2cd217e60125 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -291,15 +291,18 @@ struct kfd2kgd_calls { uint32_t (*address_watch_get_offset)(struct kgd_dev *kgd, unsigned int watch_point_id, unsigned int reg_offset); - bool (*get_atc_vmid_pasid_mapping_valid)( + bool (*get_atc_vmid_pasid_mapping_info)( struct kgd_dev *kgd, - uint8_t vmid); - uint16_t (*get_atc_vmid_pasid_mapping_pasid)( - struct kgd_dev *kgd, - uint8_t vmid); + uint8_t vmid, + uint16_t *p_pasid); + /* No longer needed from GFXv9 onward. The scratch base address is + * passed to the shader by the CP. It's the user mode driver's + * responsibility. + */ void (*set_scratch_backing_va)(struct kgd_dev *kgd, uint64_t va, uint32_t vmid); + int (*get_tile_config)(struct kgd_dev *kgd, struct tile_config *config); void (*set_vm_context_page_table_base)(struct kgd_dev *kgd, diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index 27cf0afaa0b4..a7f92d0b3a90 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -179,6 +179,11 @@ enum pp_mp1_state { PP_MP1_STATE_RESET, }; +enum pp_df_cstate { + DF_CSTATE_DISALLOW = 0, + DF_CSTATE_ALLOW, +}; + #define PP_GROUP_MASK 0xF0000000 #define PP_GROUP_SHIFT 28 @@ -215,6 +220,9 @@ enum pp_mp1_state { ((group) << PP_GROUP_SHIFT | (block) << PP_BLOCK_SHIFT | \ (support) << PP_STATE_SUPPORT_SHIFT | (state) << PP_STATE_SHIFT) +#define XGMI_MODE_PSTATE_D3 0 +#define XGMI_MODE_PSTATE_D0 1 + struct seq_file; enum amd_pp_clock_type; struct amd_pp_simple_clock_info; @@ -312,6 +320,8 @@ struct amd_pm_funcs { int (*get_ppfeature_status)(void *handle, char *buf); int (*set_ppfeature_status)(void *handle, uint64_t ppfeature_masks); int (*asic_reset_mode_2)(void *handle); + int (*set_df_cstate)(void *handle, enum pp_df_cstate state); + int (*set_xgmi_pstate)(void *handle, uint32_t pstate); }; #endif diff --git a/drivers/gpu/drm/amd/include/renoir_ip_offset.h b/drivers/gpu/drm/amd/include/renoir_ip_offset.h index 094648cac392..07633e22e99a 100644 --- a/drivers/gpu/drm/amd/include/renoir_ip_offset.h +++ b/drivers/gpu/drm/amd/include/renoir_ip_offset.h @@ -169,6 +169,11 @@ static const struct IP_BASE NBIF0_BASE ={ { { { 0x00000000, 0x00000014, 0x00000D { { 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE DCN_BASE ={ { { { 0x00000012, 0x000000C0, 0x000034C0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } } } }; static const struct IP_BASE OSSSYS_BASE ={ { { { 0x000010A0, 0x0240A000, 0, 0, 0 } }, { { 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0 } }, @@ -1361,4 +1366,33 @@ static const struct IP_BASE UVD0_BASE ={ { { { 0x00007800, 0x00007E00, 0x0240300 #define UVD0_BASE__INST6_SEG3 0 #define UVD0_BASE__INST6_SEG4 0 +#define DCN_BASE__INST0_SEG0 0x00000012 +#define DCN_BASE__INST0_SEG1 0x000000C0 +#define DCN_BASE__INST0_SEG2 0x000034C0 +#define DCN_BASE__INST0_SEG3 0 +#define DCN_BASE__INST0_SEG4 0 + +#define DCN_BASE__INST1_SEG0 0 +#define DCN_BASE__INST1_SEG1 0 +#define DCN_BASE__INST1_SEG2 0 +#define DCN_BASE__INST1_SEG3 0 +#define DCN_BASE__INST1_SEG4 0 + +#define DCN_BASE__INST2_SEG0 0 +#define DCN_BASE__INST2_SEG1 0 +#define DCN_BASE__INST2_SEG2 0 +#define DCN_BASE__INST2_SEG3 0 +#define DCN_BASE__INST2_SEG4 0 + +#define DCN_BASE__INST3_SEG0 0 +#define DCN_BASE__INST3_SEG1 0 +#define DCN_BASE__INST3_SEG2 0 +#define DCN_BASE__INST3_SEG3 0 +#define DCN_BASE__INST3_SEG4 0 + +#define DCN_BASE__INST4_SEG0 0 +#define DCN_BASE__INST4_SEG1 0 +#define DCN_BASE__INST4_SEG2 0 +#define DCN_BASE__INST4_SEG3 0 +#define DCN_BASE__INST4_SEG4 0 #endif diff --git a/drivers/gpu/drm/amd/include/vega10_enum.h b/drivers/gpu/drm/amd/include/vega10_enum.h index c14ba65a2415..adf1b754666e 100644 --- a/drivers/gpu/drm/amd/include/vega10_enum.h +++ b/drivers/gpu/drm/amd/include/vega10_enum.h @@ -1037,6 +1037,7 @@ TCC_CACHE_POLICY_STREAM = 0x00000001, typedef enum MTYPE { MTYPE_NC = 0x00000000, MTYPE_WC = 0x00000001, +MTYPE_RW = 0x00000001, MTYPE_CC = 0x00000002, MTYPE_UC = 0x00000003, } MTYPE; diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index fa8ad7db2b3a..7932eb163a00 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -969,6 +969,14 @@ static int pp_dpm_switch_power_profile(void *handle, workload = hwmgr->workload_setting[index]; } + if (type == PP_SMC_POWER_PROFILE_COMPUTE && + hwmgr->hwmgr_func->disable_power_features_for_compute_performance) { + if (hwmgr->hwmgr_func->disable_power_features_for_compute_performance(hwmgr, en)) { + mutex_unlock(&hwmgr->smu_lock); + return -EINVAL; + } + } + if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0); mutex_unlock(&hwmgr->smu_lock); @@ -1421,6 +1429,7 @@ static int pp_get_asic_baco_capability(void *handle, bool *cap) { struct pp_hwmgr *hwmgr = handle; + *cap = false; if (!hwmgr) return -EINVAL; @@ -1548,6 +1557,40 @@ static int pp_smu_i2c_bus_access(void *handle, bool acquire) return ret; } +static int pp_set_df_cstate(void *handle, enum pp_df_cstate state) +{ + struct pp_hwmgr *hwmgr = handle; + + if (!hwmgr) + return -EINVAL; + + if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_df_cstate) + return 0; + + mutex_lock(&hwmgr->smu_lock); + hwmgr->hwmgr_func->set_df_cstate(hwmgr, state); + mutex_unlock(&hwmgr->smu_lock); + + return 0; +} + +static int pp_set_xgmi_pstate(void *handle, uint32_t pstate) +{ + struct pp_hwmgr *hwmgr = handle; + + if (!hwmgr) + return -EINVAL; + + if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_xgmi_pstate) + return 0; + + mutex_lock(&hwmgr->smu_lock); + hwmgr->hwmgr_func->set_xgmi_pstate(hwmgr, pstate); + mutex_unlock(&hwmgr->smu_lock); + + return 0; +} + static const struct amd_pm_funcs pp_dpm_funcs = { .load_firmware = pp_dpm_load_fw, .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete, @@ -1606,4 +1649,6 @@ static const struct amd_pm_funcs pp_dpm_funcs = { .set_ppfeature_status = pp_set_ppfeature_status, .asic_reset_mode_2 = pp_asic_reset_mode_2, .smu_i2c_bus_access = pp_smu_i2c_bus_access, + .set_df_cstate = pp_set_df_cstate, + .set_xgmi_pstate = pp_set_xgmi_pstate, }; diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index 4acf139ea014..1e2da4d37567 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -25,11 +25,16 @@ #include "pp_debug.h" #include "amdgpu.h" #include "amdgpu_smu.h" +#include "smu_internal.h" #include "soc15_common.h" #include "smu_v11_0.h" #include "smu_v12_0.h" #include "atom.h" #include "amd_pcie.h" +#include "vega20_ppt.h" +#include "arcturus_ppt.h" +#include "navi10_ppt.h" +#include "renoir_ppt.h" #undef __SMU_DUMMY_MAP #define __SMU_DUMMY_MAP(type) #type @@ -67,6 +72,8 @@ size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf) uint32_t sort_feature[SMU_FEATURE_COUNT]; uint64_t hw_feature_count = 0; + mutex_lock(&smu->mutex); + ret = smu_feature_get_enabled_mask(smu, feature_mask, 2); if (ret) goto failed; @@ -92,9 +99,57 @@ size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf) } failed: + mutex_unlock(&smu->mutex); + return size; } +static int smu_feature_update_enable_state(struct smu_context *smu, + uint64_t feature_mask, + bool enabled) +{ + struct smu_feature *feature = &smu->smu_feature; + uint32_t feature_low = 0, feature_high = 0; + int ret = 0; + + if (!smu->pm_enabled) + return ret; + + feature_low = (feature_mask >> 0 ) & 0xffffffff; + feature_high = (feature_mask >> 32) & 0xffffffff; + + if (enabled) { + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow, + feature_low); + if (ret) + return ret; + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh, + feature_high); + if (ret) + return ret; + } else { + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow, + feature_low); + if (ret) + return ret; + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh, + feature_high); + if (ret) + return ret; + } + + mutex_lock(&feature->mutex); + if (enabled) + bitmap_or(feature->enabled, feature->enabled, + (unsigned long *)(&feature_mask), SMU_FEATURE_MAX); + else + bitmap_andnot(feature->enabled, feature->enabled, + (unsigned long *)(&feature_mask), SMU_FEATURE_MAX); + mutex_unlock(&feature->mutex); + + return ret; +} + int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask) { int ret = 0; @@ -103,9 +158,11 @@ int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask) uint64_t feature_2_disabled = 0; uint64_t feature_enables = 0; + mutex_lock(&smu->mutex); + ret = smu_feature_get_enabled_mask(smu, feature_mask, 2); if (ret) - return ret; + goto out; feature_enables = ((uint64_t)feature_mask[1] << 32 | (uint64_t)feature_mask[0]); @@ -115,14 +172,17 @@ int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask) if (feature_2_enabled) { ret = smu_feature_update_enable_state(smu, feature_2_enabled, true); if (ret) - return ret; + goto out; } if (feature_2_disabled) { ret = smu_feature_update_enable_state(smu, feature_2_disabled, false); if (ret) - return ret; + goto out; } +out: + mutex_unlock(&smu->mutex); + return ret; } @@ -159,8 +219,7 @@ int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max) { - int ret = 0, clk_id = 0; - uint32_t param; + int ret = 0; if (min <= 0 && max <= 0) return -EINVAL; @@ -168,27 +227,7 @@ int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, if (!smu_clk_dpm_is_enabled(smu, clk_type)) return 0; - clk_id = smu_clk_get_index(smu, clk_type); - if (clk_id < 0) - return clk_id; - - if (max > 0) { - param = (uint32_t)((clk_id << 16) | (max & 0xffff)); - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq, - param); - if (ret) - return ret; - } - - if (min > 0) { - param = (uint32_t)((clk_id << 16) | (min & 0xffff)); - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq, - param); - if (ret) - return ret; - } - - + ret = smu_set_soft_freq_limited_range(smu, clk_type, min, max); return ret; } @@ -229,7 +268,7 @@ int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, } int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, - uint32_t *min, uint32_t *max) + uint32_t *min, uint32_t *max, bool lock_needed) { uint32_t clock_limit; int ret = 0; @@ -237,6 +276,9 @@ int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, if (!min && !max) return -EINVAL; + if (lock_needed) + mutex_lock(&smu->mutex); + if (!smu_clk_dpm_is_enabled(smu, clk_type)) { switch (clk_type) { case SMU_MCLK: @@ -260,14 +302,17 @@ int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, *min = clock_limit / 100; if (max) *max = clock_limit / 100; - - return 0; + } else { + /* + * Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the + * core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs). + */ + ret = smu_get_dpm_ultimate_freq(smu, clk_type, min, max); } - /* - * Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the - * core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs). - */ - ret = smu_get_dpm_ultimate_freq(smu, clk_type, min, max); + + if (lock_needed) + mutex_unlock(&smu->mutex); + return ret; } @@ -338,7 +383,20 @@ bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type) return true; } - +/** + * smu_dpm_set_power_gate - power gate/ungate the specific IP block + * + * @smu: smu_context pointer + * @block_type: the IP block to power gate/ungate + * @gate: to power gate if true, ungate otherwise + * + * This API uses no smu->mutex lock protection due to: + * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce). + * This is guarded to be race condition free by the caller. + * 2. Or get called on user setting request of power_dpm_force_performance_level. + * Under this case, the smu->mutex lock protection is already enforced on + * the parent API smu_force_performance_level of the call path. + */ int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type, bool gate) { @@ -364,12 +422,6 @@ int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type, return ret; } -enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu) -{ - /* not support power state */ - return POWER_STATE_TYPE_DEFAULT; -} - int smu_get_power_num_states(struct smu_context *smu, struct pp_states_info *state_info) { @@ -439,7 +491,7 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int int ret = 0; int table_id = smu_table_get_index(smu, table_index); - if (!table_data || table_id >= smu_table->table_count || table_id < 0) + if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0) return -EINVAL; table = &smu_table->tables[table_index]; @@ -463,7 +515,7 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int return ret; /* flush hdp cache */ - adev->nbio_funcs->hdp_flush(adev, NULL); + adev->nbio.funcs->hdp_flush(adev, NULL); if (!drv2smu) memcpy(table_data, table->cpu_addr, table->size); @@ -483,7 +535,7 @@ bool is_support_sw_smu(struct amdgpu_device *adev) bool is_support_sw_smu_xgmi(struct amdgpu_device *adev) { - if (amdgpu_dpm != 1) + if (!is_support_sw_smu(adev)) return false; if (adev->asic_type == CHIP_VEGA20) @@ -495,16 +547,23 @@ bool is_support_sw_smu_xgmi(struct amdgpu_device *adev) int smu_sys_get_pp_table(struct smu_context *smu, void **table) { struct smu_table_context *smu_table = &smu->smu_table; + uint32_t powerplay_table_size; if (!smu_table->power_play_table && !smu_table->hardcode_pptable) return -EINVAL; + mutex_lock(&smu->mutex); + if (smu_table->hardcode_pptable) *table = smu_table->hardcode_pptable; else *table = smu_table->power_play_table; - return smu_table->power_play_table_size; + powerplay_table_size = smu_table->power_play_table_size; + + mutex_unlock(&smu->mutex); + + return powerplay_table_size; } int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size) @@ -531,14 +590,11 @@ int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size) memcpy(smu_table->hardcode_pptable, buf, size); smu_table->power_play_table = smu_table->hardcode_pptable; smu_table->power_play_table_size = size; - mutex_unlock(&smu->mutex); ret = smu_reset(smu); if (ret) pr_info("smu reset failed, ret = %d\n", ret); - return ret; - failed: mutex_unlock(&smu->mutex); return ret; @@ -569,41 +625,7 @@ int smu_feature_init_dpm(struct smu_context *smu) return ret; } -int smu_feature_update_enable_state(struct smu_context *smu, uint64_t feature_mask, bool enabled) -{ - uint32_t feature_low = 0, feature_high = 0; - int ret = 0; - - if (!smu->pm_enabled) - return ret; - - feature_low = (feature_mask >> 0 ) & 0xffffffff; - feature_high = (feature_mask >> 32) & 0xffffffff; - - if (enabled) { - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow, - feature_low); - if (ret) - return ret; - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh, - feature_high); - if (ret) - return ret; - - } else { - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow, - feature_low); - if (ret) - return ret; - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh, - feature_high); - if (ret) - return ret; - - } - return ret; -} int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask) { @@ -633,8 +655,6 @@ int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask, { struct smu_feature *feature = &smu->smu_feature; int feature_id; - uint64_t feature_mask = 0; - int ret = 0; feature_id = smu_feature_get_index(smu, mask); if (feature_id < 0) @@ -642,22 +662,9 @@ int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask, WARN_ON(feature_id > feature->feature_num); - feature_mask = 1ULL << feature_id; - - mutex_lock(&feature->mutex); - ret = smu_feature_update_enable_state(smu, feature_mask, enable); - if (ret) - goto failed; - - if (enable) - test_and_set_bit(feature_id, feature->enabled); - else - test_and_clear_bit(feature_id, feature->enabled); - -failed: - mutex_unlock(&feature->mutex); - - return ret; + return smu_feature_update_enable_state(smu, + 1ULL << feature_id, + enable); } int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask) @@ -707,20 +714,25 @@ static int smu_set_funcs(struct amdgpu_device *adev) { struct smu_context *smu = &adev->smu; + if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) + smu->od_enabled = true; + switch (adev->asic_type) { case CHIP_VEGA20: + vega20_set_ppt_funcs(smu); + break; case CHIP_NAVI10: case CHIP_NAVI14: case CHIP_NAVI12: + navi10_set_ppt_funcs(smu); + break; case CHIP_ARCTURUS: - if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) - smu->od_enabled = true; - smu_v11_0_set_smu_funcs(smu); + arcturus_set_ppt_funcs(smu); + /* OD is not supported on Arcturus */ + smu->od_enabled =false; break; case CHIP_RENOIR: - if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) - smu->od_enabled = true; - smu_v12_0_set_smu_funcs(smu); + renoir_set_ppt_funcs(smu); break; default: return -EINVAL; @@ -736,6 +748,7 @@ static int smu_early_init(void *handle) smu->adev = adev; smu->pm_enabled = !!amdgpu_dpm; + smu->is_apu = false; mutex_init(&smu->mutex); return smu_set_funcs(adev); @@ -749,11 +762,10 @@ static int smu_late_init(void *handle) if (!smu->pm_enabled) return 0; - mutex_lock(&smu->mutex); smu_handle_task(&adev->smu, smu->smu_dpm.dpm_level, - AMD_PP_TASK_COMPLETE_INIT); - mutex_unlock(&smu->mutex); + AMD_PP_TASK_COMPLETE_INIT, + false); return 0; } @@ -919,14 +931,9 @@ static int smu_init_fb_allocations(struct smu_context *smu) struct amdgpu_device *adev = smu->adev; struct smu_table_context *smu_table = &smu->smu_table; struct smu_table *tables = smu_table->tables; - uint32_t table_count = smu_table->table_count; - uint32_t i = 0; - int32_t ret = 0; + int ret, i; - if (table_count <= 0) - return -EINVAL; - - for (i = 0 ; i < table_count; i++) { + for (i = 0; i < SMU_TABLE_COUNT; i++) { if (tables[i].size == 0) continue; ret = amdgpu_bo_create_kernel(adev, @@ -942,7 +949,7 @@ static int smu_init_fb_allocations(struct smu_context *smu) return 0; failed: - for (; i > 0; i--) { + while (--i >= 0) { if (tables[i].size == 0) continue; amdgpu_bo_free_kernel(&tables[i].bo, @@ -957,13 +964,12 @@ static int smu_fini_fb_allocations(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; struct smu_table *tables = smu_table->tables; - uint32_t table_count = smu_table->table_count; uint32_t i = 0; - if (table_count == 0 || tables == NULL) + if (!tables) return 0; - for (i = 0 ; i < table_count; i++) { + for (i = 0; i < SMU_TABLE_COUNT; i++) { if (tables[i].size == 0) continue; amdgpu_bo_free_kernel(&tables[i].bo, @@ -974,50 +980,6 @@ static int smu_fini_fb_allocations(struct smu_context *smu) return 0; } -static int smu_override_pcie_parameters(struct smu_context *smu) -{ - struct amdgpu_device *adev = smu->adev; - uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg; - int ret; - - if (adev->flags & AMD_IS_APU) - return 0; - - if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4) - pcie_gen = 3; - else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) - pcie_gen = 2; - else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) - pcie_gen = 1; - else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1) - pcie_gen = 0; - - /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1 - * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4 - * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 - */ - if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16) - pcie_width = 6; - else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12) - pcie_width = 5; - else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8) - pcie_width = 4; - else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4) - pcie_width = 3; - else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2) - pcie_width = 2; - else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1) - pcie_width = 1; - - smu_pcie_arg = (1 << 16) | (pcie_gen << 8) | pcie_width; - ret = smu_send_smc_msg_with_param(smu, - SMU_MSG_OverridePcieParameters, - smu_pcie_arg); - if (ret) - pr_err("[%s] Attempt to override pcie params failed!\n", __func__); - return ret; -} - static int smu_smc_table_hw_init(struct smu_context *smu, bool initialize) { @@ -1092,8 +1054,8 @@ static int smu_smc_table_hw_init(struct smu_context *smu, if (ret) return ret; - /* issue RunAfllBtc msg */ - ret = smu_run_afll_btc(smu); + /* issue Run*Btc msg */ + ret = smu_run_btc(smu); if (ret) return ret; @@ -1147,7 +1109,7 @@ static int smu_smc_table_hw_init(struct smu_context *smu, if (ret) return ret; - ret = smu_get_power_limit(smu, &smu->default_power_limit, true); + ret = smu_get_power_limit(smu, &smu->default_power_limit, true, false); if (ret) return ret; } @@ -1226,29 +1188,46 @@ static int smu_free_memory_pool(struct smu_context *smu) return ret; } -static int smu_hw_init(void *handle) +static int smu_start_smc_engine(struct smu_context *smu) { - int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct smu_context *smu = &adev->smu; + struct amdgpu_device *adev = smu->adev; + int ret = 0; if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { if (adev->asic_type < CHIP_NAVI10) { - ret = smu_load_microcode(smu); - if (ret) - return ret; + if (smu->ppt_funcs->load_microcode) { + ret = smu->ppt_funcs->load_microcode(smu); + if (ret) + return ret; + } } } - ret = smu_check_fw_status(smu); + if (smu->ppt_funcs->check_fw_status) { + ret = smu->ppt_funcs->check_fw_status(smu); + if (ret) + pr_err("SMC is not ready\n"); + } + + return ret; +} + +static int smu_hw_init(void *handle) +{ + int ret; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct smu_context *smu = &adev->smu; + + ret = smu_start_smc_engine(smu); if (ret) { - pr_err("SMC firmware status is not correct\n"); + pr_err("SMU is not ready yet!\n"); return ret; } if (adev->flags & AMD_IS_APU) { smu_powergate_sdma(&adev->smu, false); smu_powergate_vcn(&adev->smu, false); + smu_set_gfx_cgpg(&adev->smu, true); } if (!smu->pm_enabled) @@ -1291,6 +1270,11 @@ failed: return ret; } +static int smu_stop_dpms(struct smu_context *smu) +{ + return smu_send_smc_msg(smu, SMU_MSG_DisableAllSmuFeatures); +} + static int smu_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -1303,6 +1287,18 @@ static int smu_hw_fini(void *handle) smu_powergate_vcn(&adev->smu, true); } + ret = smu_stop_thermal_control(smu); + if (ret) { + pr_warn("Fail to stop thermal control!\n"); + return ret; + } + + ret = smu_stop_dpms(smu); + if (ret) { + pr_warn("Fail to stop Dpms!\n"); + return ret; + } + kfree(table_context->driver_pptable); table_context->driver_pptable = NULL; @@ -1344,7 +1340,10 @@ static int smu_suspend(void *handle) int ret; struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct smu_context *smu = &adev->smu; - bool baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT); + bool baco_feature_is_enabled = false; + + if(!(adev->flags & AMD_IS_APU)) + baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT); ret = smu_system_features_control(smu, false); if (ret) @@ -1363,6 +1362,8 @@ static int smu_suspend(void *handle) if (adev->asic_type >= CHIP_NAVI10 && adev->gfx.rlc.funcs->stop) adev->gfx.rlc.funcs->stop(adev); + if (smu->is_apu) + smu_set_gfx_cgpg(&adev->smu, false); return 0; } @@ -1375,7 +1376,11 @@ static int smu_resume(void *handle) pr_info("SMU is resuming...\n"); - mutex_lock(&smu->mutex); + ret = smu_start_smc_engine(smu); + if (ret) { + pr_err("SMU is not ready yet!\n"); + goto failed; + } ret = smu_smc_table_hw_init(smu, false); if (ret) @@ -1385,13 +1390,16 @@ static int smu_resume(void *handle) if (ret) goto failed; - mutex_unlock(&smu->mutex); + if (smu->is_apu) + smu_set_gfx_cgpg(&adev->smu, true); + + smu->disable_uclk_switch = 0; pr_info("SMU is resumed successfully!\n"); return 0; + failed: - mutex_unlock(&smu->mutex); return ret; } @@ -1409,8 +1417,9 @@ int smu_display_configuration_change(struct smu_context *smu, mutex_lock(&smu->mutex); - smu_set_deep_sleep_dcefclk(smu, - display_config->min_dcef_deep_sleep_set_clk / 100); + if (smu->ppt_funcs->set_deep_sleep_dcefclk) + smu->ppt_funcs->set_deep_sleep_dcefclk(smu, + display_config->min_dcef_deep_sleep_set_clk / 100); for (index = 0; index < display_config->num_path_including_non_display; index++) { if (display_config->displays[index].controller_id != 0) @@ -1529,7 +1538,8 @@ static int smu_enable_umd_pstate(void *handle, struct smu_context *smu = (struct smu_context*)(handle); struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); - if (!smu->pm_enabled || !smu_dpm_ctx->dpm_context) + + if (!smu->is_apu && (!smu->pm_enabled || !smu_dpm_ctx->dpm_context)) return -EINVAL; if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) { @@ -1587,9 +1597,9 @@ static int smu_default_set_performance_level(struct smu_context *smu, enum amd_d &soc_mask); if (ret) return ret; - smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask); - smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask); - smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask); + smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false); + smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask, false); + smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask, false); break; case AMD_DPM_FORCED_LEVEL_MANUAL: case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: @@ -1653,7 +1663,7 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu, workload = smu->workload_setting[index]; if (smu->power_profile_mode != workload) - smu_set_power_profile_mode(smu, &workload, 0); + smu_set_power_profile_mode(smu, &workload, 0, false); } return ret; @@ -1661,18 +1671,22 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu, int smu_handle_task(struct smu_context *smu, enum amd_dpm_forced_level level, - enum amd_pp_task task_id) + enum amd_pp_task task_id, + bool lock_needed) { int ret = 0; + if (lock_needed) + mutex_lock(&smu->mutex); + switch (task_id) { case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE: ret = smu_pre_display_config_changed(smu); if (ret) - return ret; + goto out; ret = smu_set_cpu_power_state(smu); if (ret) - return ret; + goto out; ret = smu_adjust_power_state_dynamic(smu, level, false); break; case AMD_PP_TASK_COMPLETE_INIT: @@ -1683,6 +1697,10 @@ int smu_handle_task(struct smu_context *smu, break; } +out: + if (lock_needed) + mutex_unlock(&smu->mutex); + return ret; } @@ -1715,7 +1733,7 @@ int smu_switch_power_profile(struct smu_context *smu, } if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) - smu_set_power_profile_mode(smu, &workload, 0); + smu_set_power_profile_mode(smu, &workload, 0, false); mutex_unlock(&smu->mutex); @@ -1727,7 +1745,7 @@ enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu) struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); enum amd_dpm_forced_level level; - if (!smu_dpm_ctx->dpm_context) + if (!smu->is_apu && !smu_dpm_ctx->dpm_context) return -EINVAL; mutex_lock(&(smu->mutex)); @@ -1742,15 +1760,22 @@ int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_lev struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); int ret = 0; - if (!smu_dpm_ctx->dpm_context) + if (!smu->is_apu && !smu_dpm_ctx->dpm_context) return -EINVAL; + mutex_lock(&smu->mutex); + ret = smu_enable_umd_pstate(smu, &level); - if (ret) + if (ret) { + mutex_unlock(&smu->mutex); return ret; + } ret = smu_handle_task(smu, level, - AMD_PP_TASK_READJUST_POWER_STATE); + AMD_PP_TASK_READJUST_POWER_STATE, + false); + + mutex_unlock(&smu->mutex); return ret; } @@ -1766,6 +1791,144 @@ int smu_set_display_count(struct smu_context *smu, uint32_t count) return ret; } +int smu_force_clk_levels(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t mask, + bool lock_needed) +{ + struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); + int ret = 0; + + if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { + pr_debug("force clock level is for dpm manual mode only.\n"); + return -EINVAL; + } + + if (lock_needed) + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) + ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask); + + if (lock_needed) + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_set_mp1_state(struct smu_context *smu, + enum pp_mp1_state mp1_state) +{ + uint16_t msg; + int ret; + + /* + * The SMC is not fully ready. That may be + * expected as the IP may be masked. + * So, just return without error. + */ + if (!smu->pm_enabled) + return 0; + + mutex_lock(&smu->mutex); + + switch (mp1_state) { + case PP_MP1_STATE_SHUTDOWN: + msg = SMU_MSG_PrepareMp1ForShutdown; + break; + case PP_MP1_STATE_UNLOAD: + msg = SMU_MSG_PrepareMp1ForUnload; + break; + case PP_MP1_STATE_RESET: + msg = SMU_MSG_PrepareMp1ForReset; + break; + case PP_MP1_STATE_NONE: + default: + mutex_unlock(&smu->mutex); + return 0; + } + + /* some asics may not support those messages */ + if (smu_msg_get_index(smu, msg) < 0) { + mutex_unlock(&smu->mutex); + return 0; + } + + ret = smu_send_smc_msg(smu, msg); + if (ret) + pr_err("[PrepareMp1] Failed!\n"); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_set_df_cstate(struct smu_context *smu, + enum pp_df_cstate state) +{ + int ret = 0; + + /* + * The SMC is not fully ready. That may be + * expected as the IP may be masked. + * So, just return without error. + */ + if (!smu->pm_enabled) + return 0; + + if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate) + return 0; + + mutex_lock(&smu->mutex); + + ret = smu->ppt_funcs->set_df_cstate(smu, state); + if (ret) + pr_err("[SetDfCstate] failed!\n"); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_write_watermarks_table(struct smu_context *smu) +{ + int ret = 0; + struct smu_table_context *smu_table = &smu->smu_table; + struct smu_table *table = NULL; + + table = &smu_table->tables[SMU_TABLE_WATERMARKS]; + + if (!table->cpu_addr) + return -EINVAL; + + ret = smu_update_table(smu, SMU_TABLE_WATERMARKS, 0, table->cpu_addr, + true); + + return ret; +} + +int smu_set_watermarks_for_clock_ranges(struct smu_context *smu, + struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges) +{ + int ret = 0; + struct smu_table *watermarks = &smu->smu_table.tables[SMU_TABLE_WATERMARKS]; + void *table = watermarks->cpu_addr; + + mutex_lock(&smu->mutex); + + if (!smu->disable_watermark && + smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) && + smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { + smu_set_watermarks_table(smu, table, clock_ranges); + smu->watermarks_bitmap |= WATERMARKS_EXIST; + smu->watermarks_bitmap &= ~WATERMARKS_LOADED; + } + + mutex_unlock(&smu->mutex); + + return ret; +} + const struct amd_ip_funcs smu_ip_funcs = { .name = "smu", .early_init = smu_early_init, @@ -1802,3 +1965,549 @@ const struct amdgpu_ip_block_version smu_v12_0_ip_block = .rev = 0, .funcs = &smu_ip_funcs, }; + +int smu_load_microcode(struct smu_context *smu) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->load_microcode) + ret = smu->ppt_funcs->load_microcode(smu); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_check_fw_status(struct smu_context *smu) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->check_fw_status) + ret = smu->ppt_funcs->check_fw_status(smu); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->set_gfx_cgpg) + ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->set_fan_speed_rpm) + ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_get_power_limit(struct smu_context *smu, + uint32_t *limit, + bool def, + bool lock_needed) +{ + int ret = 0; + + if (lock_needed) + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->get_power_limit) + ret = smu->ppt_funcs->get_power_limit(smu, limit, def); + + if (lock_needed) + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_set_power_limit(struct smu_context *smu, uint32_t limit) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->set_power_limit) + ret = smu->ppt_funcs->set_power_limit(smu, limit); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->print_clk_levels) + ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->get_od_percentage) + ret = smu->ppt_funcs->get_od_percentage(smu, type); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->set_od_percentage) + ret = smu->ppt_funcs->set_od_percentage(smu, type, value); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_od_edit_dpm_table(struct smu_context *smu, + enum PP_OD_DPM_TABLE_COMMAND type, + long *input, uint32_t size) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->od_edit_dpm_table) + ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_read_sensor(struct smu_context *smu, + enum amd_pp_sensors sensor, + void *data, uint32_t *size) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->read_sensor) + ret = smu->ppt_funcs->read_sensor(smu, sensor, data, size); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_get_power_profile_mode(struct smu_context *smu, char *buf) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->get_power_profile_mode) + ret = smu->ppt_funcs->get_power_profile_mode(smu, buf); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_set_power_profile_mode(struct smu_context *smu, + long *param, + uint32_t param_size, + bool lock_needed) +{ + int ret = 0; + + if (lock_needed) + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->set_power_profile_mode) + ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size); + + if (lock_needed) + mutex_unlock(&smu->mutex); + + return ret; +} + + +int smu_get_fan_control_mode(struct smu_context *smu) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->get_fan_control_mode) + ret = smu->ppt_funcs->get_fan_control_mode(smu); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_set_fan_control_mode(struct smu_context *smu, int value) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->set_fan_control_mode) + ret = smu->ppt_funcs->set_fan_control_mode(smu, value); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->get_fan_speed_percent) + ret = smu->ppt_funcs->get_fan_speed_percent(smu, speed); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->set_fan_speed_percent) + ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->get_fan_speed_rpm) + ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->set_deep_sleep_dcefclk) + ret = smu->ppt_funcs->set_deep_sleep_dcefclk(smu, clk); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_set_active_display_count(struct smu_context *smu, uint32_t count) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->set_active_display_count) + ret = smu->ppt_funcs->set_active_display_count(smu, count); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_get_clock_by_type(struct smu_context *smu, + enum amd_pp_clock_type type, + struct amd_pp_clocks *clocks) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->get_clock_by_type) + ret = smu->ppt_funcs->get_clock_by_type(smu, type, clocks); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_get_max_high_clocks(struct smu_context *smu, + struct amd_pp_simple_clock_info *clocks) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->get_max_high_clocks) + ret = smu->ppt_funcs->get_max_high_clocks(smu, clocks); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_get_clock_by_type_with_latency(struct smu_context *smu, + enum smu_clk_type clk_type, + struct pp_clock_levels_with_latency *clocks) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->get_clock_by_type_with_latency) + ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_get_clock_by_type_with_voltage(struct smu_context *smu, + enum amd_pp_clock_type type, + struct pp_clock_levels_with_voltage *clocks) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->get_clock_by_type_with_voltage) + ret = smu->ppt_funcs->get_clock_by_type_with_voltage(smu, type, clocks); + + mutex_unlock(&smu->mutex); + + return ret; +} + + +int smu_display_clock_voltage_request(struct smu_context *smu, + struct pp_display_clock_request *clock_req) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->display_clock_voltage_request) + ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req); + + mutex_unlock(&smu->mutex); + + return ret; +} + + +int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch) +{ + int ret = -EINVAL; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->display_disable_memory_clock_switch) + ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_notify_smu_enable_pwe(struct smu_context *smu) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->notify_smu_enable_pwe) + ret = smu->ppt_funcs->notify_smu_enable_pwe(smu); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_set_xgmi_pstate(struct smu_context *smu, + uint32_t pstate) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->set_xgmi_pstate) + ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_set_azalia_d3_pme(struct smu_context *smu) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->set_azalia_d3_pme) + ret = smu->ppt_funcs->set_azalia_d3_pme(smu); + + mutex_unlock(&smu->mutex); + + return ret; +} + +bool smu_baco_is_support(struct smu_context *smu) +{ + bool ret = false; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->baco_is_support) + ret = smu->ppt_funcs->baco_is_support(smu); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state) +{ + if (smu->ppt_funcs->baco_get_state) + return -EINVAL; + + mutex_lock(&smu->mutex); + *state = smu->ppt_funcs->baco_get_state(smu); + mutex_unlock(&smu->mutex); + + return 0; +} + +int smu_baco_reset(struct smu_context *smu) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->baco_reset) + ret = smu->ppt_funcs->baco_reset(smu); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_mode2_reset(struct smu_context *smu) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->mode2_reset) + ret = smu->ppt_funcs->mode2_reset(smu); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu, + struct pp_smu_nv_clock_table *max_clocks) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc) + ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_get_uclk_dpm_states(struct smu_context *smu, + unsigned int *clock_values_in_khz, + unsigned int *num_states) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->get_uclk_dpm_states) + ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states); + + mutex_unlock(&smu->mutex); + + return ret; +} + +enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu) +{ + enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->get_current_power_state) + pm_state = smu->ppt_funcs->get_current_power_state(smu); + + mutex_unlock(&smu->mutex); + + return pm_state; +} + +int smu_get_dpm_clock_table(struct smu_context *smu, + struct dpm_clocks *clock_table) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->get_dpm_clock_table) + ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table); + + mutex_unlock(&smu->mutex); + + return ret; +} diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c index d493a3f8c07a..3099ac256bd3 100644 --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c @@ -25,6 +25,7 @@ #include <linux/firmware.h> #include "amdgpu.h" #include "amdgpu_smu.h" +#include "smu_internal.h" #include "atomfirmware.h" #include "amdgpu_atomfirmware.h" #include "smu_v11_0.h" @@ -36,6 +37,12 @@ #include "smu_v11_0_pptable.h" #include "arcturus_ppsmc.h" #include "nbio/nbio_7_4_sh_mask.h" +#include "amdgpu_xgmi.h" +#include <linux/i2c.h> +#include <linux/pci.h> +#include "amdgpu_ras.h" + +#define to_amdgpu_device(x) (container_of(x, struct amdgpu_ras, eeprom_control.eeprom_accessor))->adev #define CTF_OFFSET_EDGE 5 #define CTF_OFFSET_HOTSPOT 5 @@ -112,8 +119,7 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_message_map[SMU_MSG_MAX_COUNT] MSG_MAP(PrepareMp1ForShutdown, PPSMC_MSG_PrepareMp1ForShutdown), MSG_MAP(SoftReset, PPSMC_MSG_SoftReset), MSG_MAP(RunAfllBtc, PPSMC_MSG_RunAfllBtc), - MSG_MAP(RunGfxDcBtc, PPSMC_MSG_RunGfxDcBtc), - MSG_MAP(RunSocDcBtc, PPSMC_MSG_RunSocDcBtc), + MSG_MAP(RunDcBtc, PPSMC_MSG_RunDcBtc), MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh), MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow), MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize), @@ -172,6 +178,7 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_table_map[SMU_TABLE_COUNT] = { TAB_MAP(SMU_METRICS), TAB_MAP(DRIVER_SMU_CONFIG), TAB_MAP(OVERDRIVE), + TAB_MAP(I2C_COMMANDS), }; static struct smu_11_0_cmn2aisc_mapping arcturus_pwr_src_map[SMU_POWER_SOURCE_COUNT] = { @@ -294,6 +301,9 @@ static int arcturus_tables_init(struct smu_context *smu, struct smu_table *table SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); if (!smu_table->metrics_table) return -ENOMEM; @@ -528,9 +538,17 @@ static int arcturus_append_powerplay_table(struct smu_context *smu) return 0; } -static int arcturus_run_btc_afll(struct smu_context *smu) +static int arcturus_run_btc(struct smu_context *smu) { - return smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc); + int ret = 0; + + ret = smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc); + if (ret) { + pr_err("RunAfllBtc failed!\n"); + return ret; + } + + return smu_send_smc_msg(smu, SMU_MSG_RunDcBtc); } static int arcturus_populate_umd_state_clk(struct smu_context *smu) @@ -610,12 +628,17 @@ static int arcturus_print_clk_levels(struct smu_context *smu, return ret; } + /* + * For DPM disabled case, there will be only one clock level. + * And it's safe to assume that is always the current clock. + */ for (i = 0; i < clocks.num_levels; i++) size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, - arcturus_freqs_in_same_level( + (clocks.num_levels == 1) ? "*" : + (arcturus_freqs_in_same_level( clocks.data[i].clocks_in_khz / 1000, - now / 100) ? "*" : ""); + now / 100) ? "*" : "")); break; case SMU_MCLK: @@ -635,9 +658,10 @@ static int arcturus_print_clk_levels(struct smu_context *smu, for (i = 0; i < clocks.num_levels; i++) size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, - arcturus_freqs_in_same_level( + (clocks.num_levels == 1) ? "*" : + (arcturus_freqs_in_same_level( clocks.data[i].clocks_in_khz / 1000, - now / 100) ? "*" : ""); + now / 100) ? "*" : "")); break; case SMU_SOCCLK: @@ -657,9 +681,10 @@ static int arcturus_print_clk_levels(struct smu_context *smu, for (i = 0; i < clocks.num_levels; i++) size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, - arcturus_freqs_in_same_level( + (clocks.num_levels == 1) ? "*" : + (arcturus_freqs_in_same_level( clocks.data[i].clocks_in_khz / 1000, - now / 100) ? "*" : ""); + now / 100) ? "*" : "")); break; case SMU_FCLK: @@ -679,9 +704,10 @@ static int arcturus_print_clk_levels(struct smu_context *smu, for (i = 0; i < single_dpm_table->count; i++) size += sprintf(buf + size, "%d: %uMhz %s\n", i, single_dpm_table->dpm_levels[i].value, - arcturus_freqs_in_same_level( + (clocks.num_levels == 1) ? "*" : + (arcturus_freqs_in_same_level( clocks.data[i].clocks_in_khz / 1000, - now / 100) ? "*" : ""); + now / 100) ? "*" : "")); break; default: @@ -756,8 +782,6 @@ static int arcturus_force_clk_levels(struct smu_context *smu, uint32_t soft_min_level, soft_max_level; int ret = 0; - mutex_lock(&(smu->mutex)); - soft_min_level = mask ? (ffs(mask) - 1) : 0; soft_max_level = mask ? (fls(mask) - 1) : 0; @@ -792,91 +816,19 @@ static int arcturus_force_clk_levels(struct smu_context *smu, break; case SMU_MCLK: - single_dpm_table = &(dpm_table->mem_table); - - if (soft_max_level >= single_dpm_table->count) { - pr_err("Clock level specified %d is over max allowed %d\n", - soft_max_level, single_dpm_table->count - 1); - ret = -EINVAL; - break; - } - - single_dpm_table->dpm_state.soft_min_level = - single_dpm_table->dpm_levels[soft_min_level].value; - single_dpm_table->dpm_state.soft_max_level = - single_dpm_table->dpm_levels[soft_max_level].value; - - ret = arcturus_upload_dpm_level(smu, false, FEATURE_DPM_UCLK_MASK); - if (ret) { - pr_err("Failed to upload boot level to lowest!\n"); - break; - } - - ret = arcturus_upload_dpm_level(smu, true, FEATURE_DPM_UCLK_MASK); - if (ret) - pr_err("Failed to upload dpm max level to highest!\n"); - - break; - case SMU_SOCCLK: - single_dpm_table = &(dpm_table->soc_table); - - if (soft_max_level >= single_dpm_table->count) { - pr_err("Clock level specified %d is over max allowed %d\n", - soft_max_level, single_dpm_table->count - 1); - ret = -EINVAL; - break; - } - - single_dpm_table->dpm_state.soft_min_level = - single_dpm_table->dpm_levels[soft_min_level].value; - single_dpm_table->dpm_state.soft_max_level = - single_dpm_table->dpm_levels[soft_max_level].value; - - ret = arcturus_upload_dpm_level(smu, false, FEATURE_DPM_SOCCLK_MASK); - if (ret) { - pr_err("Failed to upload boot level to lowest!\n"); - break; - } - - ret = arcturus_upload_dpm_level(smu, true, FEATURE_DPM_SOCCLK_MASK); - if (ret) - pr_err("Failed to upload dpm max level to highest!\n"); - - break; - case SMU_FCLK: - single_dpm_table = &(dpm_table->fclk_table); - - if (soft_max_level >= single_dpm_table->count) { - pr_err("Clock level specified %d is over max allowed %d\n", - soft_max_level, single_dpm_table->count - 1); - ret = -EINVAL; - break; - } - - single_dpm_table->dpm_state.soft_min_level = - single_dpm_table->dpm_levels[soft_min_level].value; - single_dpm_table->dpm_state.soft_max_level = - single_dpm_table->dpm_levels[soft_max_level].value; - - ret = arcturus_upload_dpm_level(smu, false, FEATURE_DPM_FCLK_MASK); - if (ret) { - pr_err("Failed to upload boot level to lowest!\n"); - break; - } - - ret = arcturus_upload_dpm_level(smu, true, FEATURE_DPM_FCLK_MASK); - if (ret) - pr_err("Failed to upload dpm max level to highest!\n"); - + /* + * Should not arrive here since Arcturus does not + * support mclk/socclk/fclk softmin/softmax settings + */ + ret = -EINVAL; break; default: break; } - mutex_unlock(&(smu->mutex)); return ret; } @@ -1043,7 +995,7 @@ static int arcturus_read_sensor(struct smu_context *smu, *size = 4; break; default: - ret = smu_smc_read_sensor(smu, sensor, data, size); + ret = smu_v11_0_read_sensor(smu, sensor, data, size); } mutex_unlock(&smu->sensor_lock); @@ -1186,6 +1138,7 @@ static int arcturus_force_dpm_limit_value(struct smu_context *smu, bool highest) { struct arcturus_dpm_table *dpm_table = (struct arcturus_dpm_table *)smu->smu_dpm.dpm_context; + struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(smu->adev, 0); uint32_t soft_level; int ret = 0; @@ -1199,40 +1152,27 @@ static int arcturus_force_dpm_limit_value(struct smu_context *smu, bool highest) dpm_table->gfx_table.dpm_state.soft_max_level = dpm_table->gfx_table.dpm_levels[soft_level].value; - /* uclk */ - if (highest) - soft_level = arcturus_find_highest_dpm_level(&(dpm_table->mem_table)); - else - soft_level = arcturus_find_lowest_dpm_level(&(dpm_table->mem_table)); - - dpm_table->mem_table.dpm_state.soft_min_level = - dpm_table->mem_table.dpm_state.soft_max_level = - dpm_table->mem_table.dpm_levels[soft_level].value; - - /* socclk */ - if (highest) - soft_level = arcturus_find_highest_dpm_level(&(dpm_table->soc_table)); - else - soft_level = arcturus_find_lowest_dpm_level(&(dpm_table->soc_table)); - - dpm_table->soc_table.dpm_state.soft_min_level = - dpm_table->soc_table.dpm_state.soft_max_level = - dpm_table->soc_table.dpm_levels[soft_level].value; - - ret = arcturus_upload_dpm_level(smu, false, 0xFFFFFFFF); + ret = arcturus_upload_dpm_level(smu, false, FEATURE_DPM_GFXCLK_MASK); if (ret) { pr_err("Failed to upload boot level to %s!\n", highest ? "highest" : "lowest"); return ret; } - ret = arcturus_upload_dpm_level(smu, true, 0xFFFFFFFF); + ret = arcturus_upload_dpm_level(smu, true, FEATURE_DPM_GFXCLK_MASK); if (ret) { pr_err("Failed to upload dpm max level to %s!\n!", highest ? "highest" : "lowest"); return ret; } + if (hive) + /* + * Force XGMI Pstate to highest or lowest + * TODO: revise this when xgmi dpm is functional + */ + ret = smu_v11_0_set_xgmi_pstate(smu, highest ? 1 : 0); + return ret; } @@ -1240,6 +1180,7 @@ static int arcturus_unforce_dpm_levels(struct smu_context *smu) { struct arcturus_dpm_table *dpm_table = (struct arcturus_dpm_table *)smu->smu_dpm.dpm_context; + struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(smu->adev, 0); uint32_t soft_min_level, soft_max_level; int ret = 0; @@ -1251,34 +1192,25 @@ static int arcturus_unforce_dpm_levels(struct smu_context *smu) dpm_table->gfx_table.dpm_state.soft_max_level = dpm_table->gfx_table.dpm_levels[soft_max_level].value; - /* uclk */ - soft_min_level = arcturus_find_lowest_dpm_level(&(dpm_table->mem_table)); - soft_max_level = arcturus_find_highest_dpm_level(&(dpm_table->mem_table)); - dpm_table->mem_table.dpm_state.soft_min_level = - dpm_table->gfx_table.dpm_levels[soft_min_level].value; - dpm_table->mem_table.dpm_state.soft_max_level = - dpm_table->gfx_table.dpm_levels[soft_max_level].value; - - /* socclk */ - soft_min_level = arcturus_find_lowest_dpm_level(&(dpm_table->soc_table)); - soft_max_level = arcturus_find_highest_dpm_level(&(dpm_table->soc_table)); - dpm_table->soc_table.dpm_state.soft_min_level = - dpm_table->soc_table.dpm_levels[soft_min_level].value; - dpm_table->soc_table.dpm_state.soft_max_level = - dpm_table->soc_table.dpm_levels[soft_max_level].value; - - ret = arcturus_upload_dpm_level(smu, false, 0xFFFFFFFF); + ret = arcturus_upload_dpm_level(smu, false, FEATURE_DPM_GFXCLK_MASK); if (ret) { pr_err("Failed to upload DPM Bootup Levels!"); return ret; } - ret = arcturus_upload_dpm_level(smu, true, 0xFFFFFFFF); + ret = arcturus_upload_dpm_level(smu, true, FEATURE_DPM_GFXCLK_MASK); if (ret) { pr_err("Failed to upload DPM Max Levels!"); return ret; } + if (hive) + /* + * Reset XGMI Pstate back to default + * TODO: revise this when xgmi dpm is functional + */ + ret = smu_v11_0_set_xgmi_pstate(smu, 0); + return ret; } @@ -1891,6 +1823,253 @@ static bool arcturus_is_dpm_running(struct smu_context *smu) return !!(feature_enabled & SMC_DPM_FEATURE); } +static int arcturus_dpm_set_uvd_enable(struct smu_context *smu, bool enable) +{ + struct smu_power_context *smu_power = &smu->smu_power; + struct smu_power_gate *power_gate = &smu_power->power_gate; + int ret = 0; + + if (enable) { + if (!smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { + ret = smu_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, 1); + if (ret) { + pr_err("[EnableVCNDPM] failed!\n"); + return ret; + } + } + power_gate->vcn_gated = false; + } else { + if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { + ret = smu_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, 0); + if (ret) { + pr_err("[DisableVCNDPM] failed!\n"); + return ret; + } + } + power_gate->vcn_gated = true; + } + + return ret; +} + + +static void arcturus_fill_eeprom_i2c_req(SwI2cRequest_t *req, bool write, + uint8_t address, uint32_t numbytes, + uint8_t *data) +{ + int i; + + BUG_ON(numbytes > MAX_SW_I2C_COMMANDS); + + req->I2CcontrollerPort = 0; + req->I2CSpeed = 2; + req->SlaveAddress = address; + req->NumCmds = numbytes; + + for (i = 0; i < numbytes; i++) { + SwI2cCmd_t *cmd = &req->SwI2cCmds[i]; + + /* First 2 bytes are always write for lower 2b EEPROM address */ + if (i < 2) + cmd->Cmd = 1; + else + cmd->Cmd = write; + + + /* Add RESTART for read after address filled */ + cmd->CmdConfig |= (i == 2 && !write) ? CMDCONFIG_RESTART_MASK : 0; + + /* Add STOP in the end */ + cmd->CmdConfig |= (i == (numbytes - 1)) ? CMDCONFIG_STOP_MASK : 0; + + /* Fill with data regardless if read or write to simplify code */ + cmd->RegisterAddr = data[i]; + } +} + +static int arcturus_i2c_eeprom_read_data(struct i2c_adapter *control, + uint8_t address, + uint8_t *data, + uint32_t numbytes) +{ + uint32_t i, ret = 0; + SwI2cRequest_t req; + struct amdgpu_device *adev = to_amdgpu_device(control); + struct smu_table_context *smu_table = &adev->smu.smu_table; + struct smu_table *table = &smu_table->tables[SMU_TABLE_I2C_COMMANDS]; + + memset(&req, 0, sizeof(req)); + arcturus_fill_eeprom_i2c_req(&req, false, address, numbytes, data); + + mutex_lock(&adev->smu.mutex); + /* Now read data starting with that address */ + ret = smu_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req, + true); + mutex_unlock(&adev->smu.mutex); + + if (!ret) { + SwI2cRequest_t *res = (SwI2cRequest_t *)table->cpu_addr; + + /* Assume SMU fills res.SwI2cCmds[i].Data with read bytes */ + for (i = 0; i < numbytes; i++) + data[i] = res->SwI2cCmds[i].Data; + + pr_debug("arcturus_i2c_eeprom_read_data, address = %x, bytes = %d, data :", + (uint16_t)address, numbytes); + + print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE, + 8, 1, data, numbytes, false); + } else + pr_err("arcturus_i2c_eeprom_read_data - error occurred :%x", ret); + + return ret; +} + +static int arcturus_i2c_eeprom_write_data(struct i2c_adapter *control, + uint8_t address, + uint8_t *data, + uint32_t numbytes) +{ + uint32_t ret; + SwI2cRequest_t req; + struct amdgpu_device *adev = to_amdgpu_device(control); + + memset(&req, 0, sizeof(req)); + arcturus_fill_eeprom_i2c_req(&req, true, address, numbytes, data); + + mutex_lock(&adev->smu.mutex); + ret = smu_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req, true); + mutex_unlock(&adev->smu.mutex); + + if (!ret) { + pr_debug("arcturus_i2c_write(), address = %x, bytes = %d , data: ", + (uint16_t)address, numbytes); + + print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE, + 8, 1, data, numbytes, false); + /* + * According to EEPROM spec there is a MAX of 10 ms required for + * EEPROM to flush internal RX buffer after STOP was issued at the + * end of write transaction. During this time the EEPROM will not be + * responsive to any more commands - so wait a bit more. + */ + msleep(10); + + } else + pr_err("arcturus_i2c_write- error occurred :%x", ret); + + return ret; +} + +static int arcturus_i2c_eeprom_i2c_xfer(struct i2c_adapter *i2c_adap, + struct i2c_msg *msgs, int num) +{ + uint32_t i, j, ret, data_size, data_chunk_size, next_eeprom_addr = 0; + uint8_t *data_ptr, data_chunk[MAX_SW_I2C_COMMANDS] = { 0 }; + + for (i = 0; i < num; i++) { + /* + * SMU interface allows at most MAX_SW_I2C_COMMANDS bytes of data at + * once and hence the data needs to be spliced into chunks and sent each + * chunk separately + */ + data_size = msgs[i].len - 2; + data_chunk_size = MAX_SW_I2C_COMMANDS - 2; + next_eeprom_addr = (msgs[i].buf[0] << 8 & 0xff00) | (msgs[i].buf[1] & 0xff); + data_ptr = msgs[i].buf + 2; + + for (j = 0; j < data_size / data_chunk_size; j++) { + /* Insert the EEPROM dest addess, bits 0-15 */ + data_chunk[0] = ((next_eeprom_addr >> 8) & 0xff); + data_chunk[1] = (next_eeprom_addr & 0xff); + + if (msgs[i].flags & I2C_M_RD) { + ret = arcturus_i2c_eeprom_read_data(i2c_adap, + (uint8_t)msgs[i].addr, + data_chunk, MAX_SW_I2C_COMMANDS); + + memcpy(data_ptr, data_chunk + 2, data_chunk_size); + } else { + + memcpy(data_chunk + 2, data_ptr, data_chunk_size); + + ret = arcturus_i2c_eeprom_write_data(i2c_adap, + (uint8_t)msgs[i].addr, + data_chunk, MAX_SW_I2C_COMMANDS); + } + + if (ret) { + num = -EIO; + goto fail; + } + + next_eeprom_addr += data_chunk_size; + data_ptr += data_chunk_size; + } + + if (data_size % data_chunk_size) { + data_chunk[0] = ((next_eeprom_addr >> 8) & 0xff); + data_chunk[1] = (next_eeprom_addr & 0xff); + + if (msgs[i].flags & I2C_M_RD) { + ret = arcturus_i2c_eeprom_read_data(i2c_adap, + (uint8_t)msgs[i].addr, + data_chunk, (data_size % data_chunk_size) + 2); + + memcpy(data_ptr, data_chunk + 2, data_size % data_chunk_size); + } else { + memcpy(data_chunk + 2, data_ptr, data_size % data_chunk_size); + + ret = arcturus_i2c_eeprom_write_data(i2c_adap, + (uint8_t)msgs[i].addr, + data_chunk, (data_size % data_chunk_size) + 2); + } + + if (ret) { + num = -EIO; + goto fail; + } + } + } + +fail: + return num; +} + +static u32 arcturus_i2c_eeprom_i2c_func(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + + +static const struct i2c_algorithm arcturus_i2c_eeprom_i2c_algo = { + .master_xfer = arcturus_i2c_eeprom_i2c_xfer, + .functionality = arcturus_i2c_eeprom_i2c_func, +}; + +static int arcturus_i2c_eeprom_control_init(struct i2c_adapter *control) +{ + struct amdgpu_device *adev = to_amdgpu_device(control); + int res; + + control->owner = THIS_MODULE; + control->class = I2C_CLASS_SPD; + control->dev.parent = &adev->pdev->dev; + control->algo = &arcturus_i2c_eeprom_i2c_algo; + snprintf(control->name, sizeof(control->name), "RAS EEPROM"); + + res = i2c_add_adapter(control); + if (res) + DRM_ERROR("Failed to register hw i2c, err: %d\n", res); + + return res; +} + +static void arcturus_i2c_eeprom_control_fini(struct i2c_adapter *control) +{ + i2c_del_adapter(control); +} + static const struct pptable_funcs arcturus_ppt_funcs = { /* translate smu index into arcturus specific index */ .get_smu_msg_index = arcturus_get_smu_msg_index, @@ -1909,7 +2088,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = { /* init dpm */ .get_allowed_feature_mask = arcturus_get_allowed_feature_mask, /* btc */ - .run_afll_btc = arcturus_run_btc_afll, + .run_btc = arcturus_run_btc, /* dpm/clk tables */ .set_default_dpm_table = arcturus_set_default_dpm_table, .populate_umd_state_clk = arcturus_populate_umd_state_clk, @@ -1929,12 +2108,61 @@ static const struct pptable_funcs arcturus_ppt_funcs = { .dump_pptable = arcturus_dump_pptable, .get_power_limit = arcturus_get_power_limit, .is_dpm_running = arcturus_is_dpm_running, + .dpm_set_uvd_enable = arcturus_dpm_set_uvd_enable, + .i2c_eeprom_init = arcturus_i2c_eeprom_control_init, + .i2c_eeprom_fini = arcturus_i2c_eeprom_control_fini, + .init_microcode = smu_v11_0_init_microcode, + .load_microcode = smu_v11_0_load_microcode, + .init_smc_tables = smu_v11_0_init_smc_tables, + .fini_smc_tables = smu_v11_0_fini_smc_tables, + .init_power = smu_v11_0_init_power, + .fini_power = smu_v11_0_fini_power, + .check_fw_status = smu_v11_0_check_fw_status, + .setup_pptable = smu_v11_0_setup_pptable, + .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values, + .get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios, + .check_pptable = smu_v11_0_check_pptable, + .parse_pptable = smu_v11_0_parse_pptable, + .populate_smc_tables = smu_v11_0_populate_smc_pptable, + .check_fw_version = smu_v11_0_check_fw_version, + .write_pptable = smu_v11_0_write_pptable, + .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep, + .set_tool_table_location = smu_v11_0_set_tool_table_location, + .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, + .system_features_control = smu_v11_0_system_features_control, + .send_smc_msg = smu_v11_0_send_msg, + .send_smc_msg_with_param = smu_v11_0_send_msg_with_param, + .read_smc_arg = smu_v11_0_read_arg, + .init_display_count = smu_v11_0_init_display_count, + .set_allowed_mask = smu_v11_0_set_allowed_mask, + .get_enabled_mask = smu_v11_0_get_enabled_mask, + .notify_display_change = smu_v11_0_notify_display_change, + .set_power_limit = smu_v11_0_set_power_limit, + .get_current_clk_freq = smu_v11_0_get_current_clk_freq, + .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks, + .start_thermal_control = smu_v11_0_start_thermal_control, + .stop_thermal_control = smu_v11_0_stop_thermal_control, + .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk, + .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request, + .get_fan_control_mode = smu_v11_0_get_fan_control_mode, + .set_fan_control_mode = smu_v11_0_set_fan_control_mode, + .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent, + .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm, + .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate, + .gfx_off_control = smu_v11_0_gfx_off_control, + .register_irq_handler = smu_v11_0_register_irq_handler, + .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme, + .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc, + .baco_is_support= smu_v11_0_baco_is_support, + .baco_get_state = smu_v11_0_baco_get_state, + .baco_set_state = smu_v11_0_baco_set_state, + .baco_reset = smu_v11_0_baco_reset, + .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq, + .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range, + .override_pcie_parameters = smu_v11_0_override_pcie_parameters, }; void arcturus_set_ppt_funcs(struct smu_context *smu) { - struct smu_table_context *smu_table = &smu->smu_table; - smu->ppt_funcs = &arcturus_ppt_funcs; - smu_table->table_count = TABLE_COUNT; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile index cc63705920dc..2773966ae434 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile @@ -36,7 +36,8 @@ HARDWARE_MGR = hwmgr.o processpptables.o \ pp_overdriver.o smu_helper.o \ vega20_processpptables.o vega20_hwmgr.o vega20_powertune.o \ vega20_thermal.o common_baco.o vega10_baco.o vega20_baco.o \ - vega12_baco.o smu9_baco.o + vega12_baco.o smu9_baco.o tonga_baco.o polaris_baco.o fiji_baco.o \ + ci_baco.o smu7_baco.o AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR)) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.c new file mode 100644 index 000000000000..3be40114e63d --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.c @@ -0,0 +1,195 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "ci_baco.h" + +#include "gmc/gmc_7_1_d.h" +#include "gmc/gmc_7_1_sh_mask.h" + +#include "bif/bif_4_1_d.h" +#include "bif/bif_4_1_sh_mask.h" + +#include "dce/dce_8_0_d.h" +#include "dce/dce_8_0_sh_mask.h" + +#include "smu/smu_7_0_1_d.h" +#include "smu/smu_7_0_1_sh_mask.h" + +#include "gca/gfx_7_2_d.h" +#include "gca/gfx_7_2_sh_mask.h" + +static const struct baco_cmd_entry gpio_tbl[] = +{ + { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff }, + { CMD_WRITE, mmDC_GPIO_DVODATA_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmDC_GPIO_DVODATA_MASK, 0, 0, 0, 0xffffffff }, + { CMD_WRITE, mmDC_GPIO_GENERIC_EN, 0, 0, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmDC_GPIO_GENERIC_MASK, 0, 0, 0, 0x03333333 }, + { CMD_WRITE, mmDC_GPIO_SYNCA_EN, 0, 0, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmDC_GPIO_SYNCA_MASK, 0, 0, 0, 0x00001111 } +}; + +static const struct baco_cmd_entry enable_fb_req_rej_tbl[] = +{ + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0300024 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 }, + { CMD_WRITE, mmBIF_FB_EN, 0, 0, 0, 0x0 } +}; + +static const struct baco_cmd_entry use_bclk_tbl[] = +{ + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS }, + { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS }, + { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x4000000, 0x1a, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 }, + { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL_MASK, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL__SHIFT, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_MCLK_SEL_MASK, MPLL_CNTL_MODE__MPLL_MCLK_SEL__SHIFT, 0, 0x0 } +}; + +static const struct baco_cmd_entry turn_off_plls_tbl[] = +{ + { CMD_READMODIFYWRITE, mmDISPPLL_BG_CNTL, DISPPLL_BG_CNTL__DISPPLL_BG_PDN_MASK, DISPPLL_BG_CNTL__DISPPLL_BG_PDN__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_DC }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_DC__OSC_EN_MASK, CG_CLKPIN_CNTL_DC__OSC_EN__SHIFT, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_DC__XTALIN_SEL_MASK, CG_CLKPIN_CNTL_DC__XTALIN_SEL__SHIFT, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmPLL_CNTL, PLL_CNTL__PLL_RESET_MASK, PLL_CNTL__PLL_RESET__SHIFT, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmPLL_CNTL, PLL_CNTL__PLL_POWER_DOWN_MASK, PLL_CNTL__PLL_POWER_DOWN__SHIFT, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmPLL_CNTL, PLL_CNTL__PLL_BYPASS_CAL_MASK, PLL_CNTL__PLL_BYPASS_CAL__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK, CG_SPLL_FUNC_CNTL__SPLL_RESET__SHIFT, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK, CG_SPLL_FUNC_CNTL__SPLL_PWRON__SHIFT, 0, 0x0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x2000000, 0x19, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x8000000, 0x1b, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__GLOBAL_MPLL_RESET_MASK, MPLL_CNTL_MODE__GLOBAL_MPLL_RESET__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmMPLL_CONTROL, 0, 0, 0, 0x00000006 }, + { CMD_WRITE, mmMC_IO_RXCNTL_DPHY0_D0, 0, 0, 0, 0x00007740 }, + { CMD_WRITE, mmMC_IO_RXCNTL_DPHY0_D1, 0, 0, 0, 0x00007740 }, + { CMD_WRITE, mmMC_IO_RXCNTL_DPHY1_D0, 0, 0, 0, 0x00007740 }, + { CMD_WRITE, mmMC_IO_RXCNTL_DPHY1_D1, 0, 0, 0, 0x00007740 }, + { CMD_READMODIFYWRITE, mmMCLK_PWRMGT_CNTL, MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK, MCLK_PWRMGT_CNTL__MRDCK0_PDNB__SHIFT, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmMCLK_PWRMGT_CNTL, MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK, MCLK_PWRMGT_CNTL__MRDCK1_PDNB__SHIFT, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmMC_SEQ_CNTL_2, MC_SEQ_CNTL_2__DRST_PU_MASK, MC_SEQ_CNTL_2__DRST_PU__SHIFT, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmMC_SEQ_CNTL_2, MC_SEQ_CNTL_2__DRST_PD_MASK, MC_SEQ_CNTL_2__DRST_PD__SHIFT, 0, 0x0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_2 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT, 0, 0x0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x4 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMISC_CLK_CTRL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT, 0, 0x2 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__ZCLK_SEL_MASK, MISC_CLK_CTRL__ZCLK_SEL__SHIFT, 0, 0x2 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL_MASK, MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL__SHIFT, 0, 0x2 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixTHM_CLK_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__CMON_CLK_SEL_MASK, THM_CLK_CNTL__CMON_CLK_SEL__SHIFT, 0, 0x2 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__TMON_CLK_SEL_MASK, THM_CLK_CNTL__TMON_CLK_SEL__SHIFT, 0, 0x2 } +}; + +static const struct baco_cmd_entry enter_baco_tbl[] = +{ + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x01 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x01 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, 0, 5, 0x02 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, 0, 5, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, 0, 5, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x01 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, 0, 5, 0x08 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x40 } +}; + +#define BACO_CNTL__PWRGOOD_MASK BACO_CNTL__PWRGOOD_GPIO_MASK+BACO_CNTL__PWRGOOD_MEM_MASK+BACO_CNTL__PWRGOOD_DVO_MASK + +static const struct baco_cmd_entry exit_baco_tbl[] = +{ + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 }, + { CMD_DELAY_MS, 0, 0, 0, 20, 0 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x20 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x10 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 } +}; + +static const struct baco_cmd_entry clean_baco_tbl[] = +{ + { CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 }, + { CMD_WRITE, mmCP_PFP_UCODE_ADDR, 0, 0, 0, 0 } +}; + +int ci_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state) +{ + enum BACO_STATE cur_state; + + smu7_baco_get_state(hwmgr, &cur_state); + + if (cur_state == state) + /* aisc already in the target state */ + return 0; + + if (state == BACO_STATE_IN) { + baco_program_registers(hwmgr, gpio_tbl, ARRAY_SIZE(gpio_tbl)); + baco_program_registers(hwmgr, enable_fb_req_rej_tbl, + ARRAY_SIZE(enable_fb_req_rej_tbl)); + baco_program_registers(hwmgr, use_bclk_tbl, ARRAY_SIZE(use_bclk_tbl)); + baco_program_registers(hwmgr, turn_off_plls_tbl, + ARRAY_SIZE(turn_off_plls_tbl)); + if (baco_program_registers(hwmgr, enter_baco_tbl, + ARRAY_SIZE(enter_baco_tbl))) + return 0; + + } else if (state == BACO_STATE_OUT) { + /* HW requires at least 20ms between regulator off and on */ + msleep(20); + /* Execute Hardware BACO exit sequence */ + if (baco_program_registers(hwmgr, exit_baco_tbl, + ARRAY_SIZE(exit_baco_tbl))) { + if (baco_program_registers(hwmgr, clean_baco_tbl, + ARRAY_SIZE(clean_baco_tbl))) + return 0; + } + } + + return -EINVAL; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.h new file mode 100644 index 000000000000..17041f187020 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.h @@ -0,0 +1,29 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __CI_BACO_H__ +#define __CI_BACO_H__ +#include "smu7_baco.h" + +extern int ci_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state); + +#endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c index 9c57c1f67749..1c73776bd606 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c @@ -79,6 +79,25 @@ static bool baco_cmd_handler(struct pp_hwmgr *hwmgr, u32 command, u32 reg, u32 m return ret; } +bool baco_program_registers(struct pp_hwmgr *hwmgr, + const struct baco_cmd_entry *entry, + const u32 array_size) +{ + u32 i, reg = 0; + + for (i = 0; i < array_size; i++) { + if ((entry[i].cmd == CMD_WRITE) || + (entry[i].cmd == CMD_READMODIFYWRITE) || + (entry[i].cmd == CMD_WAITFOR)) + reg = entry[i].reg_offset; + if (!baco_cmd_handler(hwmgr, entry[i].cmd, reg, entry[i].mask, + entry[i].shift, entry[i].val, entry[i].timeout)) + return false; + } + + return true; +} + bool soc15_baco_program_registers(struct pp_hwmgr *hwmgr, const struct soc15_baco_cmd_entry *entry, const u32 array_size) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h index 95296c916f4e..8393eb62706d 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h @@ -33,6 +33,15 @@ enum baco_cmd_type { CMD_DELAY_US, }; +struct baco_cmd_entry { + enum baco_cmd_type cmd; + uint32_t reg_offset; + uint32_t mask; + uint32_t shift; + uint32_t timeout; + uint32_t val; +}; + struct soc15_baco_cmd_entry { enum baco_cmd_type cmd; uint32_t hwip; @@ -44,6 +53,10 @@ struct soc15_baco_cmd_entry { uint32_t timeout; uint32_t val; }; + +extern bool baco_program_registers(struct pp_hwmgr *hwmgr, + const struct baco_cmd_entry *entry, + const u32 array_size); extern bool soc15_baco_program_registers(struct pp_hwmgr *hwmgr, const struct soc15_baco_cmd_entry *entry, const u32 array_size); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.c new file mode 100644 index 000000000000..c0368f2dfb21 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.c @@ -0,0 +1,196 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "fiji_baco.h" + +#include "gmc/gmc_8_1_d.h" +#include "gmc/gmc_8_1_sh_mask.h" + +#include "bif/bif_5_0_d.h" +#include "bif/bif_5_0_sh_mask.h" + +#include "dce/dce_10_0_d.h" +#include "dce/dce_10_0_sh_mask.h" + +#include "smu/smu_7_1_3_d.h" +#include "smu/smu_7_1_3_sh_mask.h" + + +static const struct baco_cmd_entry gpio_tbl[] = +{ + { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff }, + { CMD_WRITE, mmDC_GPIO_DVODATA_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmDC_GPIO_DVODATA_MASK, 0, 0, 0, 0xffffffff }, + { CMD_WRITE, mmDC_GPIO_GENERIC_EN, 0, 0, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmDC_GPIO_GENERIC_MASK, 0, 0, 0, 0x03333333 }, + { CMD_WRITE, mmDC_GPIO_SYNCA_EN, 0, 0, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmDC_GPIO_SYNCA_MASK, 0, 0, 0, 0x00001111 } +}; + +static const struct baco_cmd_entry enable_fb_req_rej_tbl[] = +{ + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0300024 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 }, + { CMD_WRITE, mmBIF_FB_EN, 0, 0, 0, 0x0 } +}; + +static const struct baco_cmd_entry use_bclk_tbl[] = +{ + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS }, + { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS }, + { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x4000000, 0x1a, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 } +}; + +static const struct baco_cmd_entry turn_off_plls_tbl[] = +{ + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK, CG_SPLL_FUNC_CNTL__SPLL_RESET__SHIFT, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK, CG_SPLL_FUNC_CNTL__SPLL_PWRON__SHIFT, 0, 0x0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x2000000, 0x19, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x8000000, 0x1b, 0, 0x0 } +}; + +static const struct baco_cmd_entry clk_req_b_tbl[] = +{ + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_2 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT, 0, 0x0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x4 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMISC_CLK_CTRL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__ZCLK_SEL_MASK, MISC_CLK_CTRL__ZCLK_SEL__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK, CG_CLKPIN_CNTL__BCLK_AS_XCLK__SHIFT, 0, 0x0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixTHM_CLK_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__CMON_CLK_SEL_MASK, THM_CLK_CNTL__CMON_CLK_SEL__SHIFT, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__TMON_CLK_SEL_MASK, THM_CLK_CNTL__TMON_CLK_SEL__SHIFT, 0, 0x1 } +}; + +static const struct baco_cmd_entry enter_baco_tbl[] = +{ + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x01 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x01 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, 0, 5, 0x40000 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x01 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, 0, 5, 0x02 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, 0, 5, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, 0, 5, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x01 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, 0, 5, 0x08 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x40 } +}; + +#define BACO_CNTL__PWRGOOD_MASK BACO_CNTL__PWRGOOD_GPIO_MASK+BACO_CNTL__PWRGOOD_MEM_MASK+BACO_CNTL__PWRGOOD_DVO_MASK + +static const struct baco_cmd_entry exit_baco_tbl[] = +{ + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x200 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x100 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 } +}; + +static const struct baco_cmd_entry clean_baco_tbl[] = +{ + { CMD_WRITE, mmBIOS_SCRATCH_0, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_1, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_2, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_3, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_4, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_5, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_8, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_9, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_10, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_11, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_12, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_13, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_14, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_15, 0, 0, 0, 0 } +}; + +int fiji_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state) +{ + enum BACO_STATE cur_state; + + smu7_baco_get_state(hwmgr, &cur_state); + + if (cur_state == state) + /* aisc already in the target state */ + return 0; + + if (state == BACO_STATE_IN) { + baco_program_registers(hwmgr, gpio_tbl, ARRAY_SIZE(gpio_tbl)); + baco_program_registers(hwmgr, enable_fb_req_rej_tbl, + ARRAY_SIZE(enable_fb_req_rej_tbl)); + baco_program_registers(hwmgr, use_bclk_tbl, ARRAY_SIZE(use_bclk_tbl)); + baco_program_registers(hwmgr, turn_off_plls_tbl, + ARRAY_SIZE(turn_off_plls_tbl)); + baco_program_registers(hwmgr, clk_req_b_tbl, ARRAY_SIZE(clk_req_b_tbl)); + if (baco_program_registers(hwmgr, enter_baco_tbl, + ARRAY_SIZE(enter_baco_tbl))) + return 0; + + } else if (state == BACO_STATE_OUT) { + /* HW requires at least 20ms between regulator off and on */ + msleep(20); + /* Execute Hardware BACO exit sequence */ + if (baco_program_registers(hwmgr, exit_baco_tbl, + ARRAY_SIZE(exit_baco_tbl))) { + if (baco_program_registers(hwmgr, clean_baco_tbl, + ARRAY_SIZE(clean_baco_tbl))) + return 0; + } + } + + return -EINVAL; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.h new file mode 100644 index 000000000000..47f402900bdb --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.h @@ -0,0 +1,29 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __FIJI_BACO_H__ +#define __FIJI_BACO_H__ +#include "smu7_baco.h" + +extern int fiji_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state); + +#endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.c new file mode 100644 index 000000000000..8f8e296f2fe9 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.c @@ -0,0 +1,222 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "polaris_baco.h" + +#include "gmc/gmc_8_1_d.h" +#include "gmc/gmc_8_1_sh_mask.h" + +#include "bif/bif_5_0_d.h" +#include "bif/bif_5_0_sh_mask.h" + +#include "dce/dce_11_0_d.h" +#include "dce/dce_11_0_sh_mask.h" + +#include "smu/smu_7_1_3_d.h" +#include "smu/smu_7_1_3_sh_mask.h" + +static const struct baco_cmd_entry gpio_tbl[] = +{ + { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff }, + { CMD_WRITE, mmDC_GPIO_DVODATA_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmDC_GPIO_DVODATA_MASK, 0, 0, 0, 0xffffffff }, + { CMD_WRITE, mmDC_GPIO_GENERIC_EN, 0, 0, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmDC_GPIO_GENERIC_MASK, 0, 0, 0, 0x03333333 }, + { CMD_WRITE, mmDC_GPIO_SYNCA_EN, 0, 0, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmDC_GPIO_SYNCA_MASK, 0, 0, 0, 0x00001111 } +}; + +static const struct baco_cmd_entry enable_fb_req_rej_tbl[] = +{ + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0300024 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 }, + { CMD_WRITE, mmBIF_FB_EN, 0, 0, 0, 0x0 } +}; + +static const struct baco_cmd_entry use_bclk_tbl[] = +{ + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x4000000, 0x1a, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixGCK_DFS_BYPASS_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, GCK_DFS_BYPASS_CNTL__BYPASSACLK_MASK, GCK_DFS_BYPASS_CNTL__BYPASSACLK__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 }, + { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL_MASK, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL__SHIFT, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_MCLK_SEL_MASK, MPLL_CNTL_MODE__MPLL_MCLK_SEL__SHIFT, 0, 0x0 } +}; + +static const struct baco_cmd_entry turn_off_plls_tbl[] = +{ + { CMD_READMODIFYWRITE, mmDC_GPIO_PAD_STRENGTH_1, DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP_MASK, DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP__SHIFT, 0, 0x1 }, + { CMD_DELAY_US, 0, 0, 0, 1, 0x0 }, + { CMD_READMODIFYWRITE, mmMC_SEQ_DRAM, MC_SEQ_DRAM__RST_CTL_MASK, MC_SEQ_DRAM__RST_CTL__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC05002B0 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x10, 0x4, 0, 0x1 }, + { CMD_WAITFOR, mmGCK_SMC_IND_DATA, 0x10, 0, 1, 0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC050032C }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x10, 0x4, 0, 0x1 }, + { CMD_WAITFOR, mmGCK_SMC_IND_DATA, 0x10, 0, 1, 0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500080 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 }, + { CMD_READMODIFYWRITE, 0xda2, 0x40, 0x6, 0, 0x0 }, + { CMD_DELAY_US, 0, 0, 0, 3, 0x0 }, + { CMD_READMODIFYWRITE, 0xda2, 0x8, 0x3, 0, 0x0 }, + { CMD_READMODIFYWRITE, 0xda2, 0x3fff00, 0x8, 0, 0x32 }, + { CMD_DELAY_US, 0, 0, 0, 3, 0x0 }, + { CMD_READMODIFYWRITE, mmMPLL_FUNC_CNTL_2, MPLL_FUNC_CNTL_2__ISO_DIS_P_MASK, MPLL_FUNC_CNTL_2__ISO_DIS_P__SHIFT, 0, 0x0 }, + { CMD_DELAY_US, 0, 0, 0, 5, 0x0 } +}; + +static const struct baco_cmd_entry clk_req_b_tbl[] = +{ + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixTHM_CLK_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__CMON_CLK_SEL_MASK, THM_CLK_CNTL__CMON_CLK_SEL__SHIFT, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__TMON_CLK_SEL_MASK, THM_CLK_CNTL__TMON_CLK_SEL__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMISC_CLK_CTRL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__ZCLK_SEL_MASK, MISC_CLK_CTRL__ZCLK_SEL__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK, CG_CLKPIN_CNTL__BCLK_AS_XCLK__SHIFT, 0, 0x0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_2 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT, 0, 0x0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x4 } +}; + +static const struct baco_cmd_entry enter_baco_tbl[] = +{ + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x01 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x01 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, 0, 5, 0x40000 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x01 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, 0, 5, 0x02 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, 0, 5, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, 0, 5, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x01 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, 0, 5, 0x08 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x40 } +}; + +#define BACO_CNTL__PWRGOOD_MASK BACO_CNTL__PWRGOOD_GPIO_MASK+BACO_CNTL__PWRGOOD_MEM_MASK+BACO_CNTL__PWRGOOD_DVO_MASK + +static const struct baco_cmd_entry exit_baco_tbl[] = +{ + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x200 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x100 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 } +}; + +static const struct baco_cmd_entry clean_baco_tbl[] = +{ + { CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 } +}; + +static const struct baco_cmd_entry use_bclk_tbl_vg[] = +{ + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x4000000, 0x1a, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixGCK_DFS_BYPASS_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, GCK_DFS_BYPASS_CNTL__BYPASSACLK_MASK, GCK_DFS_BYPASS_CNTL__BYPASSACLK__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 } +}; + +static const struct baco_cmd_entry turn_off_plls_tbl_vg[] = +{ + { CMD_READMODIFYWRITE, mmDC_GPIO_PAD_STRENGTH_1, DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP_MASK, DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP__SHIFT, 0, 0x1 }, + { CMD_DELAY_US, 0, 0, 0, 1, 0x0 }, + { CMD_READMODIFYWRITE, mmMC_SEQ_DRAM, MC_SEQ_DRAM__RST_CTL_MASK, MC_SEQ_DRAM__RST_CTL__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC05002B0 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x10, 0x4, 0, 0x1 }, + { CMD_WAITFOR, mmGCK_SMC_IND_DATA, 0x10, 0, 1, 0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC050032C }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x10, 0x4, 0, 0x1 }, + { CMD_WAITFOR, mmGCK_SMC_IND_DATA, 0x10, 0, 1, 0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500080 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 }, + { CMD_DELAY_US, 0, 0, 0, 3, 0x0 }, + { CMD_DELAY_US, 0, 0, 0, 3, 0x0 }, + { CMD_DELAY_US, 0, 0, 0, 5, 0x0 } +}; + +int polaris_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state) +{ + enum BACO_STATE cur_state; + + smu7_baco_get_state(hwmgr, &cur_state); + + if (cur_state == state) + /* aisc already in the target state */ + return 0; + + if (state == BACO_STATE_IN) { + baco_program_registers(hwmgr, gpio_tbl, ARRAY_SIZE(gpio_tbl)); + baco_program_registers(hwmgr, enable_fb_req_rej_tbl, + ARRAY_SIZE(enable_fb_req_rej_tbl)); + if (hwmgr->chip_id == CHIP_VEGAM) { + baco_program_registers(hwmgr, use_bclk_tbl_vg, ARRAY_SIZE(use_bclk_tbl_vg)); + baco_program_registers(hwmgr, turn_off_plls_tbl_vg, + ARRAY_SIZE(turn_off_plls_tbl_vg)); + } else { + baco_program_registers(hwmgr, use_bclk_tbl, ARRAY_SIZE(use_bclk_tbl)); + baco_program_registers(hwmgr, turn_off_plls_tbl, + ARRAY_SIZE(turn_off_plls_tbl)); + } + baco_program_registers(hwmgr, clk_req_b_tbl, ARRAY_SIZE(clk_req_b_tbl)); + if (baco_program_registers(hwmgr, enter_baco_tbl, + ARRAY_SIZE(enter_baco_tbl))) + return 0; + + } else if (state == BACO_STATE_OUT) { + /* HW requires at least 20ms between regulator off and on */ + msleep(20); + /* Execute Hardware BACO exit sequence */ + if (baco_program_registers(hwmgr, exit_baco_tbl, + ARRAY_SIZE(exit_baco_tbl))) { + if (baco_program_registers(hwmgr, clean_baco_tbl, + ARRAY_SIZE(clean_baco_tbl))) + return 0; + } + } + + return -EINVAL; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.h new file mode 100644 index 000000000000..87a5fa0a157a --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.h @@ -0,0 +1,29 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __POLARIS_BACO_H__ +#define __POLARIS_BACO_H__ +#include "smu7_baco.h" + +extern int polaris_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state); + +#endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.c new file mode 100644 index 000000000000..044cda005aed --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.c @@ -0,0 +1,91 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "smu7_baco.h" +#include "tonga_baco.h" +#include "fiji_baco.h" +#include "polaris_baco.h" +#include "ci_baco.h" + +#include "bif/bif_5_0_d.h" +#include "bif/bif_5_0_sh_mask.h" + +#include "smu/smu_7_1_2_d.h" +#include "smu/smu_7_1_2_sh_mask.h" + +int smu7_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); + uint32_t reg; + + *cap = false; + if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_BACO)) + return 0; + + reg = RREG32(mmCC_BIF_BX_FUSESTRAP0); + + if (reg & CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE_MASK) + *cap = true; + + return 0; +} + +int smu7_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); + uint32_t reg; + + reg = RREG32(mmBACO_CNTL); + + if (reg & BACO_CNTL__BACO_MODE_MASK) + /* gfx has already entered BACO state */ + *state = BACO_STATE_IN; + else + *state = BACO_STATE_OUT; + return 0; +} + +int smu7_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); + + switch (adev->asic_type) { + case CHIP_TOPAZ: + case CHIP_TONGA: + return tonga_baco_set_state(hwmgr, state); + case CHIP_FIJI: + return fiji_baco_set_state(hwmgr, state); + case CHIP_POLARIS10: + case CHIP_POLARIS11: + case CHIP_POLARIS12: + case CHIP_VEGAM: + return polaris_baco_set_state(hwmgr, state); +#ifdef CONFIG_DRM_AMDGPU_CIK + case CHIP_BONAIRE: + case CHIP_HAWAII: + return ci_baco_set_state(hwmgr, state); +#endif + default: + return -EINVAL; + } +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.h new file mode 100644 index 000000000000..be0d98abb536 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.h @@ -0,0 +1,32 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __SMU7_BACO_H__ +#define __SMU7_BACO_H__ +#include "hwmgr.h" +#include "common_baco.h" + +extern int smu7_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap); +extern int smu7_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state); +extern int smu7_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state); + +#endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 34f95e0e3ea4..c805c6fcdba2 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -48,6 +48,7 @@ #include "smu7_clockpowergating.h" #include "processpptables.h" #include "pp_thermal.h" +#include "smu7_baco.h" #include "ivsrcid/ivsrcid_vislands30.h" @@ -1994,7 +1995,6 @@ static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr, struct phm_ppt_v1_voltage_lookup_table *lookup_table) { uint32_t table_size, i, j; - struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record; table_size = lookup_table->count; PP_ASSERT_WITH_CODE(0 != lookup_table->count, @@ -2005,9 +2005,8 @@ static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr, for (j = i + 1; j > 0; j--) { if (lookup_table->entries[j].us_vdd < lookup_table->entries[j - 1].us_vdd) { - tmp_voltage_lookup_record = lookup_table->entries[j - 1]; - lookup_table->entries[j - 1] = lookup_table->entries[j]; - lookup_table->entries[j] = tmp_voltage_lookup_record; + swap(lookup_table->entries[j - 1], + lookup_table->entries[j]); } } } @@ -5145,6 +5144,9 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = { .get_power_profile_mode = smu7_get_power_profile_mode, .set_power_profile_mode = smu7_set_power_profile_mode, .get_performance_level = smu7_get_performance_level, + .get_asic_baco_capability = smu7_baco_get_capability, + .get_asic_baco_state = smu7_baco_get_state, + .set_asic_baco_state = smu7_baco_set_state, .power_off_asic = smu7_power_off_asic, }; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.c new file mode 100644 index 000000000000..ea743bea8e29 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.c @@ -0,0 +1,231 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "tonga_baco.h" + +#include "gmc/gmc_8_1_d.h" +#include "gmc/gmc_8_1_sh_mask.h" + +#include "bif/bif_5_0_d.h" +#include "bif/bif_5_0_sh_mask.h" + +#include "dce/dce_10_0_d.h" +#include "dce/dce_10_0_sh_mask.h" + +#include "smu/smu_7_1_2_d.h" +#include "smu/smu_7_1_2_sh_mask.h" + + +static const struct baco_cmd_entry gpio_tbl[] = +{ + { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff }, + { CMD_WRITE, mmDC_GPIO_DVODATA_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmDC_GPIO_DVODATA_MASK, 0, 0, 0, 0xffffffff }, + { CMD_WRITE, mmDC_GPIO_GENERIC_EN, 0, 0, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmDC_GPIO_GENERIC_MASK, 0, 0, 0, 0x03333333 }, + { CMD_WRITE, mmDC_GPIO_SYNCA_EN, 0, 0, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmDC_GPIO_SYNCA_MASK, 0, 0, 0, 0x00001111 } +}; + +static const struct baco_cmd_entry enable_fb_req_rej_tbl[] = +{ + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0300024 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 }, + { CMD_WRITE, mmBIF_FB_EN, 0, 0, 0, 0x0 } +}; + +static const struct baco_cmd_entry use_bclk_tbl[] = +{ + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS }, + { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS }, + { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x4000000, 0x1a, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 }, + { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL_MASK, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL__SHIFT, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_MCLK_SEL_MASK, MPLL_CNTL_MODE__MPLL_MCLK_SEL__SHIFT, 0, 0x0 } +}; + +static const struct baco_cmd_entry turn_off_plls_tbl[] = +{ + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK, CG_SPLL_FUNC_CNTL__SPLL_RESET__SHIFT, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK, CG_SPLL_FUNC_CNTL__SPLL_PWRON__SHIFT, 0, 0x0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x2000000, 0x19, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x8000000, 0x1b, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__GLOBAL_MPLL_RESET_MASK, MPLL_CNTL_MODE__GLOBAL_MPLL_RESET__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmMPLL_CONTROL, 0, 0, 0, 0x00000006 }, + { CMD_WRITE, mmMC_IO_RXCNTL_DPHY0_D0, 0, 0, 0, 0x00007740 }, + { CMD_WRITE, mmMC_IO_RXCNTL_DPHY0_D1, 0, 0, 0, 0x00007740 }, + { CMD_WRITE, mmMC_IO_RXCNTL_DPHY1_D0, 0, 0, 0, 0x00007740 }, + { CMD_WRITE, mmMC_IO_RXCNTL_DPHY1_D1, 0, 0, 0, 0x00007740 }, + { CMD_READMODIFYWRITE, mmMCLK_PWRMGT_CNTL, MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK, MCLK_PWRMGT_CNTL__MRDCK0_PDNB__SHIFT, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmMCLK_PWRMGT_CNTL, MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK, MCLK_PWRMGT_CNTL__MRDCK1_PDNB__SHIFT, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmMC_SEQ_CNTL_2, MC_SEQ_CNTL_2__DRST_PU_MASK, MC_SEQ_CNTL_2__DRST_PU__SHIFT, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmMC_SEQ_CNTL_2, MC_SEQ_CNTL_2__DRST_PD_MASK, MC_SEQ_CNTL_2__DRST_PD__SHIFT, 0, 0x0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_2 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT, 0, 0x0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x4 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMISC_CLK_CTRL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__ZCLK_SEL_MASK, MISC_CLK_CTRL__ZCLK_SEL__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK, CG_CLKPIN_CNTL__BCLK_AS_XCLK__SHIFT, 0, 0x0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixTHM_CLK_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__CMON_CLK_SEL_MASK, THM_CLK_CNTL__CMON_CLK_SEL__SHIFT, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__TMON_CLK_SEL_MASK, THM_CLK_CNTL__TMON_CLK_SEL__SHIFT, 0, 0x1 } +}; + +static const struct baco_cmd_entry enter_baco_tbl[] = +{ + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x01 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x01 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, 0, 5, 0x40000 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x01 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, 0, 5, 0x02 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, 0, 5, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, 0, 5, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x01 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, 0, 5, 0x08 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x40 } +}; + +#define BACO_CNTL__PWRGOOD_MASK BACO_CNTL__PWRGOOD_GPIO_MASK+BACO_CNTL__PWRGOOD_MEM_MASK+BACO_CNTL__PWRGOOD_DVO_MASK + +static const struct baco_cmd_entry exit_baco_tbl[] = +{ + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x200 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x100 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 } +}; + +static const struct baco_cmd_entry clean_baco_tbl[] = +{ + { CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 } +}; + +static const struct baco_cmd_entry gpio_tbl_iceland[] = +{ + { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff } +}; + +static const struct baco_cmd_entry exit_baco_tbl_iceland[] = +{ + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 }, + { CMD_DELAY_MS, 0, 0, 0, 20, 0 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x200 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x100 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 } +}; + +static const struct baco_cmd_entry clean_baco_tbl_iceland[] = +{ + { CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 } +}; + +int tonga_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state) +{ + enum BACO_STATE cur_state; + + smu7_baco_get_state(hwmgr, &cur_state); + + if (cur_state == state) + /* aisc already in the target state */ + return 0; + + if (state == BACO_STATE_IN) { + if (hwmgr->chip_id == CHIP_TOPAZ) + baco_program_registers(hwmgr, gpio_tbl_iceland, ARRAY_SIZE(gpio_tbl_iceland)); + else + baco_program_registers(hwmgr, gpio_tbl, ARRAY_SIZE(gpio_tbl)); + baco_program_registers(hwmgr, enable_fb_req_rej_tbl, + ARRAY_SIZE(enable_fb_req_rej_tbl)); + baco_program_registers(hwmgr, use_bclk_tbl, ARRAY_SIZE(use_bclk_tbl)); + baco_program_registers(hwmgr, turn_off_plls_tbl, + ARRAY_SIZE(turn_off_plls_tbl)); + if (baco_program_registers(hwmgr, enter_baco_tbl, + ARRAY_SIZE(enter_baco_tbl))) + return 0; + + } else if (state == BACO_STATE_OUT) { + /* HW requires at least 20ms between regulator off and on */ + msleep(20); + /* Execute Hardware BACO exit sequence */ + if (hwmgr->chip_id == CHIP_TOPAZ) { + if (baco_program_registers(hwmgr, exit_baco_tbl_iceland, + ARRAY_SIZE(exit_baco_tbl_iceland))) { + if (baco_program_registers(hwmgr, clean_baco_tbl_iceland, + ARRAY_SIZE(clean_baco_tbl_iceland))) + return 0; + } + } else { + if (baco_program_registers(hwmgr, exit_baco_tbl, + ARRAY_SIZE(exit_baco_tbl))) { + if (baco_program_registers(hwmgr, clean_baco_tbl, + ARRAY_SIZE(clean_baco_tbl))) + return 0; + } + } + } + + return -EINVAL; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.h new file mode 100644 index 000000000000..5dc16cc8a295 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.h @@ -0,0 +1,29 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __TONGA_BACO_H__ +#define __TONGA_BACO_H__ +#include "smu7_baco.h" + +extern int tonga_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state); + +#endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index d08493b67b67..d71a492c87a3 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -712,7 +712,6 @@ static int vega10_sort_lookup_table(struct pp_hwmgr *hwmgr, struct phm_ppt_v1_voltage_lookup_table *lookup_table) { uint32_t table_size, i, j; - struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record; PP_ASSERT_WITH_CODE(lookup_table && lookup_table->count, "Lookup table is empty", return -EINVAL); @@ -724,9 +723,8 @@ static int vega10_sort_lookup_table(struct pp_hwmgr *hwmgr, for (j = i + 1; j > 0; j--) { if (lookup_table->entries[j].us_vdd < lookup_table->entries[j - 1].us_vdd) { - tmp_voltage_lookup_record = lookup_table->entries[j - 1]; - lookup_table->entries[j - 1] = lookup_table->entries[j]; - lookup_table->entries[j] = tmp_voltage_lookup_record; + swap(lookup_table->entries[j - 1], + lookup_table->entries[j]); } } } @@ -3691,6 +3689,13 @@ static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr, PP_ASSERT_WITH_CODE(!result, "Failed to upload PPtable!", return result); + /* + * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag. + * That effectively disables AVFS feature. + */ + if(hwmgr->hardcode_pp_table != NULL) + data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC; + vega10_update_avfs(hwmgr); /* @@ -5098,9 +5103,7 @@ static void vega10_odn_update_soc_table(struct pp_hwmgr *hwmgr, if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) { podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk; - for (i = 0; i < podn_vdd_dep->count - 1; i++) - od_vddc_lookup_table->entries[i].us_vdd = podn_vdd_dep->entries[i].vddc; - if (od_vddc_lookup_table->entries[i].us_vdd < podn_vdd_dep->entries[i].vddc) + for (i = 0; i < podn_vdd_dep->count; i++) od_vddc_lookup_table->entries[i].us_vdd = podn_vdd_dep->entries[i].vddc; } else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) { podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk; @@ -5267,6 +5270,59 @@ static int vega10_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_ return 0; } +static int vega10_disable_power_features_for_compute_performance(struct pp_hwmgr *hwmgr, bool disable) +{ + struct vega10_hwmgr *data = hwmgr->backend; + uint32_t feature_mask = 0; + + if (disable) { + feature_mask |= data->smu_features[GNLD_ULV].enabled ? + data->smu_features[GNLD_ULV].smu_feature_bitmap : 0; + feature_mask |= data->smu_features[GNLD_DS_GFXCLK].enabled ? + data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap : 0; + feature_mask |= data->smu_features[GNLD_DS_SOCCLK].enabled ? + data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap : 0; + feature_mask |= data->smu_features[GNLD_DS_LCLK].enabled ? + data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap : 0; + feature_mask |= data->smu_features[GNLD_DS_DCEFCLK].enabled ? + data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap : 0; + } else { + feature_mask |= (!data->smu_features[GNLD_ULV].enabled) ? + data->smu_features[GNLD_ULV].smu_feature_bitmap : 0; + feature_mask |= (!data->smu_features[GNLD_DS_GFXCLK].enabled) ? + data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap : 0; + feature_mask |= (!data->smu_features[GNLD_DS_SOCCLK].enabled) ? + data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap : 0; + feature_mask |= (!data->smu_features[GNLD_DS_LCLK].enabled) ? + data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap : 0; + feature_mask |= (!data->smu_features[GNLD_DS_DCEFCLK].enabled) ? + data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap : 0; + } + + if (feature_mask) + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, + !disable, feature_mask), + "enable/disable power features for compute performance Failed!", + return -EINVAL); + + if (disable) { + data->smu_features[GNLD_ULV].enabled = false; + data->smu_features[GNLD_DS_GFXCLK].enabled = false; + data->smu_features[GNLD_DS_SOCCLK].enabled = false; + data->smu_features[GNLD_DS_LCLK].enabled = false; + data->smu_features[GNLD_DS_DCEFCLK].enabled = false; + } else { + data->smu_features[GNLD_ULV].enabled = true; + data->smu_features[GNLD_DS_GFXCLK].enabled = true; + data->smu_features[GNLD_DS_SOCCLK].enabled = true; + data->smu_features[GNLD_DS_LCLK].enabled = true; + data->smu_features[GNLD_DS_DCEFCLK].enabled = true; + } + + return 0; + +} + static const struct pp_hwmgr_func vega10_hwmgr_funcs = { .backend_init = vega10_hwmgr_backend_init, .backend_fini = vega10_hwmgr_backend_fini, @@ -5334,6 +5390,8 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = { .get_ppfeature_status = vega10_get_ppfeature_status, .set_ppfeature_status = vega10_set_ppfeature_status, .set_mp1_state = vega10_set_mp1_state, + .disable_power_features_for_compute_performance = + vega10_disable_power_features_for_compute_performance, }; int vega10_hwmgr_init(struct pp_hwmgr *hwmgr) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c index df6ff9252401..9b5e72bdceca 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c @@ -29,7 +29,7 @@ #include "vega20_baco.h" #include "vega20_smumgr.h" - +#include "amdgpu_ras.h" static const struct soc15_baco_cmd_entry clean_baco_tbl[] = { @@ -74,6 +74,7 @@ int vega20_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state) int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state) { struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); enum BACO_STATE cur_state; uint32_t data; @@ -84,13 +85,19 @@ int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state) return 0; if (state == BACO_STATE_IN) { - data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL); - data |= 0x80000000; - WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data); - - - if(smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnterBaco, 0)) - return -EINVAL; + if (!ras || !ras->supported) { + data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL); + data |= 0x80000000; + WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data); + + if(smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_EnterBaco, 0)) + return -EINVAL; + } else { + if(smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_EnterBaco, 1)) + return -EINVAL; + } } else if (state == BACO_STATE_OUT) { if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ExitBaco)) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index f5915308e643..5bcf0d684151 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c @@ -183,6 +183,9 @@ static int vega20_set_features_platform_caps(struct pp_hwmgr *hwmgr) PHM_PlatformCaps_TablelessHardwareInterface); phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_BACO); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EnableSMU7ThermalManagement); if (adev->pg_flags & AMD_PG_SUPPORT_UVD) @@ -490,8 +493,8 @@ static int vega20_setup_asic_task(struct pp_hwmgr *hwmgr) "Failed to init sclk threshold!", return ret); - if (adev->in_baco_reset) { - adev->in_baco_reset = 0; + if (adev->in_gpu_reset && + (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) { ret = vega20_baco_apply_vdci_flush_workaround(hwmgr); if (ret) @@ -4155,6 +4158,38 @@ static int vega20_smu_i2c_bus_access(struct pp_hwmgr *hwmgr, bool acquire) return res; } +static int vega20_set_df_cstate(struct pp_hwmgr *hwmgr, + enum pp_df_cstate state) +{ + int ret; + + /* PPSMC_MSG_DFCstateControl is supported with 40.50 and later fws */ + if (hwmgr->smu_version < 0x283200) { + pr_err("Df cstate control is supported with 40.50 and later SMC fw!\n"); + return -EINVAL; + } + + ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DFCstateControl, state); + if (ret) + pr_err("SetDfCstate failed!\n"); + + return ret; +} + +static int vega20_set_xgmi_pstate(struct pp_hwmgr *hwmgr, + uint32_t pstate) +{ + int ret; + + ret = smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetXgmiMode, + pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3); + if (ret) + pr_err("SetXgmiPstate failed!\n"); + + return ret; +} + static const struct pp_hwmgr_func vega20_hwmgr_funcs = { /* init/fini related */ .backend_init = vega20_hwmgr_backend_init, @@ -4223,6 +4258,8 @@ static const struct pp_hwmgr_func vega20_hwmgr_funcs = { .set_asic_baco_state = vega20_baco_set_state, .set_mp1_state = vega20_set_mp1_state, .smu_i2c_bus_access = vega20_smu_i2c_bus_access, + .set_df_cstate = vega20_set_df_cstate, + .set_xgmi_pstate = vega20_set_xgmi_pstate, }; int vega20_hwmgr_init(struct pp_hwmgr *hwmgr) diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h index 23171a4d9a31..8120e7587585 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h @@ -259,7 +259,6 @@ struct smu_table_context struct smu_bios_boot_up_values boot_values; void *driver_pptable; struct smu_table *tables; - uint32_t table_count; struct smu_table memory_pool; uint8_t thermal_controller_type; uint16_t TDPODLimit; @@ -322,6 +321,13 @@ struct mclock_latency_table { struct mclk_latency_entries entries[MAX_REGULAR_DPM_NUM]; }; +enum smu_reset_mode +{ + SMU_RESET_MODE_0, + SMU_RESET_MODE_1, + SMU_RESET_MODE_2, +}; + enum smu_baco_state { SMU_BACO_STATE_ENTER = 0, @@ -341,7 +347,6 @@ struct smu_context struct amdgpu_device *adev; struct amdgpu_irq_src *irq_source; - const struct smu_funcs *funcs; const struct pptable_funcs *ppt_funcs; struct mutex mutex; struct mutex sensor_lock; @@ -382,11 +387,14 @@ struct smu_context uint32_t power_profile_mode; uint32_t default_power_profile_mode; bool pm_enabled; + bool is_apu; uint32_t smc_if_version; }; +struct i2c_adapter; + struct pptable_funcs { int (*alloc_dpm_context)(struct smu_context *smu); int (*store_powerplay_table)(struct smu_context *smu); @@ -398,7 +406,7 @@ struct pptable_funcs { int (*get_smu_table_index)(struct smu_context *smu, uint32_t index); int (*get_smu_power_index)(struct smu_context *smu, uint32_t index); int (*get_workload_type)(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile); - int (*run_afll_btc)(struct smu_context *smu); + int (*run_btc)(struct smu_context *smu); int (*get_allowed_feature_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num); enum amd_pm_state_type (*get_current_power_state)(struct smu_context *smu); int (*set_default_dpm_table)(struct smu_context *smu); @@ -459,17 +467,19 @@ struct pptable_funcs { int (*display_disable_memory_clock_switch)(struct smu_context *smu, bool disable_memory_clock_switch); void (*dump_pptable)(struct smu_context *smu); int (*get_power_limit)(struct smu_context *smu, uint32_t *limit, bool asic_default); - int (*get_dpm_uclk_limited)(struct smu_context *smu, uint32_t *clock, bool max); -}; - -struct smu_funcs -{ + int (*get_dpm_clk_limited)(struct smu_context *smu, enum smu_clk_type clk_type, + uint32_t dpm_level, uint32_t *freq); + int (*set_df_cstate)(struct smu_context *smu, enum pp_df_cstate state); + int (*update_pcie_parameters)(struct smu_context *smu, uint32_t pcie_gen_cap, uint32_t pcie_width_cap); + int (*i2c_eeprom_init)(struct i2c_adapter *control); + void (*i2c_eeprom_fini)(struct i2c_adapter *control); + int (*get_dpm_clock_table)(struct smu_context *smu, struct dpm_clocks *clock_table); int (*init_microcode)(struct smu_context *smu); + int (*load_microcode)(struct smu_context *smu); int (*init_smc_tables)(struct smu_context *smu); int (*fini_smc_tables)(struct smu_context *smu); int (*init_power)(struct smu_context *smu); int (*fini_power)(struct smu_context *smu); - int (*load_microcode)(struct smu_context *smu); int (*check_fw_status)(struct smu_context *smu); int (*setup_pptable)(struct smu_context *smu); int (*get_vbios_bootup_values)(struct smu_context *smu); @@ -485,7 +495,6 @@ struct smu_funcs int (*set_min_dcef_deep_sleep)(struct smu_context *smu); int (*set_tool_table_location)(struct smu_context *smu); int (*notify_memory_pool_location)(struct smu_context *smu); - int (*write_watermarks_table)(struct smu_context *smu); int (*set_last_dcef_min_deep_sleep_clk)(struct smu_context *smu); int (*system_features_control)(struct smu_context *smu, bool en); int (*send_smc_msg)(struct smu_context *smu, uint16_t msg); @@ -499,8 +508,7 @@ struct smu_funcs int (*get_current_clk_freq)(struct smu_context *smu, enum smu_clk_type clk_id, uint32_t *value); int (*init_max_sustainable_clocks)(struct smu_context *smu); int (*start_thermal_control)(struct smu_context *smu); - int (*read_sensor)(struct smu_context *smu, enum amd_pp_sensors sensor, - void *data, uint32_t *size); + int (*stop_thermal_control)(struct smu_context *smu); int (*set_deep_sleep_dcefclk)(struct smu_context *smu, uint32_t clk); int (*set_active_display_count)(struct smu_context *smu, uint32_t count); int (*store_cc6_data)(struct smu_context *smu, uint32_t separation_time, @@ -522,8 +530,6 @@ struct smu_funcs int (*get_current_shallow_sleep_clocks)(struct smu_context *smu, struct smu_clock_info *clocks); int (*notify_smu_enable_pwe)(struct smu_context *smu); - int (*set_watermarks_for_clock_ranges)(struct smu_context *smu, - struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges); int (*conv_power_profile_to_pplib_workload)(int power_profile); uint32_t (*get_fan_control_mode)(struct smu_context *smu); int (*set_fan_control_mode)(struct smu_context *smu, uint32_t mode); @@ -538,234 +544,89 @@ struct smu_funcs enum smu_baco_state (*baco_get_state)(struct smu_context *smu); int (*baco_set_state)(struct smu_context *smu, enum smu_baco_state state); int (*baco_reset)(struct smu_context *smu); + int (*mode2_reset)(struct smu_context *smu); int (*get_dpm_ultimate_freq)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max); + int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max); + int (*override_pcie_parameters)(struct smu_context *smu); }; -#define smu_init_microcode(smu) \ - ((smu)->funcs->init_microcode ? (smu)->funcs->init_microcode((smu)) : 0) -#define smu_init_smc_tables(smu) \ - ((smu)->funcs->init_smc_tables ? (smu)->funcs->init_smc_tables((smu)) : 0) -#define smu_fini_smc_tables(smu) \ - ((smu)->funcs->fini_smc_tables ? (smu)->funcs->fini_smc_tables((smu)) : 0) -#define smu_init_power(smu) \ - ((smu)->funcs->init_power ? (smu)->funcs->init_power((smu)) : 0) -#define smu_fini_power(smu) \ - ((smu)->funcs->fini_power ? (smu)->funcs->fini_power((smu)) : 0) -#define smu_load_microcode(smu) \ - ((smu)->funcs->load_microcode ? (smu)->funcs->load_microcode((smu)) : 0) -#define smu_check_fw_status(smu) \ - ((smu)->funcs->check_fw_status ? (smu)->funcs->check_fw_status((smu)) : 0) -#define smu_setup_pptable(smu) \ - ((smu)->funcs->setup_pptable ? (smu)->funcs->setup_pptable((smu)) : 0) -#define smu_powergate_sdma(smu, gate) \ - ((smu)->funcs->powergate_sdma ? (smu)->funcs->powergate_sdma((smu), (gate)) : 0) -#define smu_powergate_vcn(smu, gate) \ - ((smu)->funcs->powergate_vcn ? (smu)->funcs->powergate_vcn((smu), (gate)) : 0) -#define smu_set_gfx_cgpg(smu, enabled) \ - ((smu)->funcs->set_gfx_cgpg ? (smu)->funcs->set_gfx_cgpg((smu), (enabled)) : 0) -#define smu_get_vbios_bootup_values(smu) \ - ((smu)->funcs->get_vbios_bootup_values ? (smu)->funcs->get_vbios_bootup_values((smu)) : 0) -#define smu_get_clk_info_from_vbios(smu) \ - ((smu)->funcs->get_clk_info_from_vbios ? (smu)->funcs->get_clk_info_from_vbios((smu)) : 0) -#define smu_check_pptable(smu) \ - ((smu)->funcs->check_pptable ? (smu)->funcs->check_pptable((smu)) : 0) -#define smu_parse_pptable(smu) \ - ((smu)->funcs->parse_pptable ? (smu)->funcs->parse_pptable((smu)) : 0) -#define smu_populate_smc_tables(smu) \ - ((smu)->funcs->populate_smc_tables ? (smu)->funcs->populate_smc_tables((smu)) : 0) -#define smu_check_fw_version(smu) \ - ((smu)->funcs->check_fw_version ? (smu)->funcs->check_fw_version((smu)) : 0) -#define smu_write_pptable(smu) \ - ((smu)->funcs->write_pptable ? (smu)->funcs->write_pptable((smu)) : 0) -#define smu_set_min_dcef_deep_sleep(smu) \ - ((smu)->funcs->set_min_dcef_deep_sleep ? (smu)->funcs->set_min_dcef_deep_sleep((smu)) : 0) -#define smu_set_tool_table_location(smu) \ - ((smu)->funcs->set_tool_table_location ? (smu)->funcs->set_tool_table_location((smu)) : 0) -#define smu_notify_memory_pool_location(smu) \ - ((smu)->funcs->notify_memory_pool_location ? (smu)->funcs->notify_memory_pool_location((smu)) : 0) -#define smu_gfx_off_control(smu, enable) \ - ((smu)->funcs->gfx_off_control ? (smu)->funcs->gfx_off_control((smu), (enable)) : 0) - -#define smu_write_watermarks_table(smu) \ - ((smu)->funcs->write_watermarks_table ? (smu)->funcs->write_watermarks_table((smu)) : 0) -#define smu_set_last_dcef_min_deep_sleep_clk(smu) \ - ((smu)->funcs->set_last_dcef_min_deep_sleep_clk ? (smu)->funcs->set_last_dcef_min_deep_sleep_clk((smu)) : 0) -#define smu_system_features_control(smu, en) \ - ((smu)->funcs->system_features_control ? (smu)->funcs->system_features_control((smu), (en)) : 0) -#define smu_init_max_sustainable_clocks(smu) \ - ((smu)->funcs->init_max_sustainable_clocks ? (smu)->funcs->init_max_sustainable_clocks((smu)) : 0) -#define smu_set_default_od_settings(smu, initialize) \ - ((smu)->ppt_funcs->set_default_od_settings ? (smu)->ppt_funcs->set_default_od_settings((smu), (initialize)) : 0) -#define smu_set_fan_speed_rpm(smu, speed) \ - ((smu)->funcs->set_fan_speed_rpm ? (smu)->funcs->set_fan_speed_rpm((smu), (speed)) : 0) -#define smu_send_smc_msg(smu, msg) \ - ((smu)->funcs->send_smc_msg? (smu)->funcs->send_smc_msg((smu), (msg)) : 0) -#define smu_send_smc_msg_with_param(smu, msg, param) \ - ((smu)->funcs->send_smc_msg_with_param? (smu)->funcs->send_smc_msg_with_param((smu), (msg), (param)) : 0) -#define smu_read_smc_arg(smu, arg) \ - ((smu)->funcs->read_smc_arg? (smu)->funcs->read_smc_arg((smu), (arg)) : 0) -#define smu_alloc_dpm_context(smu) \ - ((smu)->ppt_funcs->alloc_dpm_context ? (smu)->ppt_funcs->alloc_dpm_context((smu)) : 0) -#define smu_init_display_count(smu, count) \ - ((smu)->funcs->init_display_count ? (smu)->funcs->init_display_count((smu), (count)) : 0) -#define smu_feature_set_allowed_mask(smu) \ - ((smu)->funcs->set_allowed_mask? (smu)->funcs->set_allowed_mask((smu)) : 0) -#define smu_feature_get_enabled_mask(smu, mask, num) \ - ((smu)->funcs->get_enabled_mask? (smu)->funcs->get_enabled_mask((smu), (mask), (num)) : 0) -#define smu_is_dpm_running(smu) \ - ((smu)->ppt_funcs->is_dpm_running ? (smu)->ppt_funcs->is_dpm_running((smu)) : 0) -#define smu_notify_display_change(smu) \ - ((smu)->funcs->notify_display_change? (smu)->funcs->notify_display_change((smu)) : 0) -#define smu_store_powerplay_table(smu) \ - ((smu)->ppt_funcs->store_powerplay_table ? (smu)->ppt_funcs->store_powerplay_table((smu)) : 0) -#define smu_check_powerplay_table(smu) \ - ((smu)->ppt_funcs->check_powerplay_table ? (smu)->ppt_funcs->check_powerplay_table((smu)) : 0) -#define smu_append_powerplay_table(smu) \ - ((smu)->ppt_funcs->append_powerplay_table ? (smu)->ppt_funcs->append_powerplay_table((smu)) : 0) -#define smu_set_default_dpm_table(smu) \ - ((smu)->ppt_funcs->set_default_dpm_table ? (smu)->ppt_funcs->set_default_dpm_table((smu)) : 0) -#define smu_populate_umd_state_clk(smu) \ - ((smu)->ppt_funcs->populate_umd_state_clk ? (smu)->ppt_funcs->populate_umd_state_clk((smu)) : 0) -#define smu_set_default_od8_settings(smu) \ - ((smu)->ppt_funcs->set_default_od8_settings ? (smu)->ppt_funcs->set_default_od8_settings((smu)) : 0) -#define smu_get_power_limit(smu, limit, def) \ - ((smu)->ppt_funcs->get_power_limit ? (smu)->ppt_funcs->get_power_limit((smu), (limit), (def)) : 0) -#define smu_set_power_limit(smu, limit) \ - ((smu)->funcs->set_power_limit ? (smu)->funcs->set_power_limit((smu), (limit)) : 0) -#define smu_get_current_clk_freq(smu, clk_id, value) \ - ((smu)->funcs->get_current_clk_freq? (smu)->funcs->get_current_clk_freq((smu), (clk_id), (value)) : 0) -#define smu_print_clk_levels(smu, clk_type, buf) \ - ((smu)->ppt_funcs->print_clk_levels ? (smu)->ppt_funcs->print_clk_levels((smu), (clk_type), (buf)) : 0) -#define smu_force_clk_levels(smu, clk_type, level) \ - ((smu)->ppt_funcs->force_clk_levels ? (smu)->ppt_funcs->force_clk_levels((smu), (clk_type), (level)) : 0) -#define smu_get_od_percentage(smu, type) \ - ((smu)->ppt_funcs->get_od_percentage ? (smu)->ppt_funcs->get_od_percentage((smu), (type)) : 0) -#define smu_set_od_percentage(smu, type, value) \ - ((smu)->ppt_funcs->set_od_percentage ? (smu)->ppt_funcs->set_od_percentage((smu), (type), (value)) : 0) -#define smu_od_edit_dpm_table(smu, type, input, size) \ - ((smu)->ppt_funcs->od_edit_dpm_table ? (smu)->ppt_funcs->od_edit_dpm_table((smu), (type), (input), (size)) : 0) -#define smu_tables_init(smu, tab) \ - ((smu)->ppt_funcs->tables_init ? (smu)->ppt_funcs->tables_init((smu), (tab)) : 0) -#define smu_set_thermal_fan_table(smu) \ - ((smu)->ppt_funcs->set_thermal_fan_table ? (smu)->ppt_funcs->set_thermal_fan_table((smu)) : 0) -#define smu_start_thermal_control(smu) \ - ((smu)->funcs->start_thermal_control? (smu)->funcs->start_thermal_control((smu)) : 0) -#define smu_read_sensor(smu, sensor, data, size) \ - ((smu)->ppt_funcs->read_sensor? (smu)->ppt_funcs->read_sensor((smu), (sensor), (data), (size)) : 0) -#define smu_smc_read_sensor(smu, sensor, data, size) \ - ((smu)->funcs->read_sensor? (smu)->funcs->read_sensor((smu), (sensor), (data), (size)) : -EINVAL) -#define smu_get_power_profile_mode(smu, buf) \ - ((smu)->ppt_funcs->get_power_profile_mode ? (smu)->ppt_funcs->get_power_profile_mode((smu), buf) : 0) -#define smu_set_power_profile_mode(smu, param, param_size) \ - ((smu)->ppt_funcs->set_power_profile_mode ? (smu)->ppt_funcs->set_power_profile_mode((smu), (param), (param_size)) : 0) -#define smu_pre_display_config_changed(smu) \ - ((smu)->ppt_funcs->pre_display_config_changed ? (smu)->ppt_funcs->pre_display_config_changed((smu)) : 0) -#define smu_display_config_changed(smu) \ - ((smu)->ppt_funcs->display_config_changed ? (smu)->ppt_funcs->display_config_changed((smu)) : 0) -#define smu_apply_clocks_adjust_rules(smu) \ - ((smu)->ppt_funcs->apply_clocks_adjust_rules ? (smu)->ppt_funcs->apply_clocks_adjust_rules((smu)) : 0) -#define smu_notify_smc_dispaly_config(smu) \ - ((smu)->ppt_funcs->notify_smc_dispaly_config ? (smu)->ppt_funcs->notify_smc_dispaly_config((smu)) : 0) -#define smu_force_dpm_limit_value(smu, highest) \ - ((smu)->ppt_funcs->force_dpm_limit_value ? (smu)->ppt_funcs->force_dpm_limit_value((smu), (highest)) : 0) -#define smu_unforce_dpm_levels(smu) \ - ((smu)->ppt_funcs->unforce_dpm_levels ? (smu)->ppt_funcs->unforce_dpm_levels((smu)) : 0) -#define smu_get_profiling_clk_mask(smu, level, sclk_mask, mclk_mask, soc_mask) \ - ((smu)->ppt_funcs->get_profiling_clk_mask ? (smu)->ppt_funcs->get_profiling_clk_mask((smu), (level), (sclk_mask), (mclk_mask), (soc_mask)) : 0) -#define smu_set_cpu_power_state(smu) \ - ((smu)->ppt_funcs->set_cpu_power_state ? (smu)->ppt_funcs->set_cpu_power_state((smu)) : 0) -#define smu_get_fan_control_mode(smu) \ - ((smu)->funcs->get_fan_control_mode ? (smu)->funcs->get_fan_control_mode((smu)) : 0) -#define smu_set_fan_control_mode(smu, value) \ - ((smu)->funcs->set_fan_control_mode ? (smu)->funcs->set_fan_control_mode((smu), (value)) : 0) -#define smu_get_fan_speed_percent(smu, speed) \ - ((smu)->ppt_funcs->get_fan_speed_percent ? (smu)->ppt_funcs->get_fan_speed_percent((smu), (speed)) : 0) -#define smu_set_fan_speed_percent(smu, speed) \ - ((smu)->funcs->set_fan_speed_percent ? (smu)->funcs->set_fan_speed_percent((smu), (speed)) : 0) -#define smu_get_fan_speed_rpm(smu, speed) \ - ((smu)->ppt_funcs->get_fan_speed_rpm ? (smu)->ppt_funcs->get_fan_speed_rpm((smu), (speed)) : 0) - -#define smu_msg_get_index(smu, msg) \ - ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_msg_index? (smu)->ppt_funcs->get_smu_msg_index((smu), (msg)) : -EINVAL) : -EINVAL) -#define smu_clk_get_index(smu, msg) \ - ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_clk_index? (smu)->ppt_funcs->get_smu_clk_index((smu), (msg)) : -EINVAL) : -EINVAL) -#define smu_feature_get_index(smu, msg) \ - ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_feature_index? (smu)->ppt_funcs->get_smu_feature_index((smu), (msg)) : -EINVAL) : -EINVAL) -#define smu_table_get_index(smu, tab) \ - ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_table_index? (smu)->ppt_funcs->get_smu_table_index((smu), (tab)) : -EINVAL) : -EINVAL) -#define smu_power_get_index(smu, src) \ - ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_power_index? (smu)->ppt_funcs->get_smu_power_index((smu), (src)) : -EINVAL) : -EINVAL) -#define smu_workload_get_type(smu, profile) \ - ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_workload_type? (smu)->ppt_funcs->get_workload_type((smu), (profile)) : -EINVAL) : -EINVAL) -#define smu_run_afll_btc(smu) \ - ((smu)->ppt_funcs? ((smu)->ppt_funcs->run_afll_btc? (smu)->ppt_funcs->run_afll_btc((smu)) : 0) : 0) -#define smu_get_allowed_feature_mask(smu, feature_mask, num) \ - ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_allowed_feature_mask? (smu)->ppt_funcs->get_allowed_feature_mask((smu), (feature_mask), (num)) : 0) : 0) -#define smu_set_deep_sleep_dcefclk(smu, clk) \ - ((smu)->funcs->set_deep_sleep_dcefclk ? (smu)->funcs->set_deep_sleep_dcefclk((smu), (clk)) : 0) -#define smu_set_active_display_count(smu, count) \ - ((smu)->funcs->set_active_display_count ? (smu)->funcs->set_active_display_count((smu), (count)) : 0) -#define smu_store_cc6_data(smu, st, cc6_dis, pst_dis, pst_sw_dis) \ - ((smu)->funcs->store_cc6_data ? (smu)->funcs->store_cc6_data((smu), (st), (cc6_dis), (pst_dis), (pst_sw_dis)) : 0) -#define smu_get_clock_by_type(smu, type, clocks) \ - ((smu)->funcs->get_clock_by_type ? (smu)->funcs->get_clock_by_type((smu), (type), (clocks)) : 0) -#define smu_get_max_high_clocks(smu, clocks) \ - ((smu)->funcs->get_max_high_clocks ? (smu)->funcs->get_max_high_clocks((smu), (clocks)) : 0) -#define smu_get_clock_by_type_with_latency(smu, clk_type, clocks) \ - ((smu)->ppt_funcs->get_clock_by_type_with_latency ? (smu)->ppt_funcs->get_clock_by_type_with_latency((smu), (clk_type), (clocks)) : 0) -#define smu_get_clock_by_type_with_voltage(smu, type, clocks) \ - ((smu)->ppt_funcs->get_clock_by_type_with_voltage ? (smu)->ppt_funcs->get_clock_by_type_with_voltage((smu), (type), (clocks)) : 0) -#define smu_display_clock_voltage_request(smu, clock_req) \ - ((smu)->funcs->display_clock_voltage_request ? (smu)->funcs->display_clock_voltage_request((smu), (clock_req)) : 0) -#define smu_display_disable_memory_clock_switch(smu, disable_memory_clock_switch) \ - ((smu)->ppt_funcs->display_disable_memory_clock_switch ? (smu)->ppt_funcs->display_disable_memory_clock_switch((smu), (disable_memory_clock_switch)) : -EINVAL) -#define smu_get_dal_power_level(smu, clocks) \ - ((smu)->funcs->get_dal_power_level ? (smu)->funcs->get_dal_power_level((smu), (clocks)) : 0) -#define smu_get_perf_level(smu, designation, level) \ - ((smu)->funcs->get_perf_level ? (smu)->funcs->get_perf_level((smu), (designation), (level)) : 0) -#define smu_get_current_shallow_sleep_clocks(smu, clocks) \ - ((smu)->funcs->get_current_shallow_sleep_clocks ? (smu)->funcs->get_current_shallow_sleep_clocks((smu), (clocks)) : 0) -#define smu_notify_smu_enable_pwe(smu) \ - ((smu)->funcs->notify_smu_enable_pwe ? (smu)->funcs->notify_smu_enable_pwe((smu)) : 0) -#define smu_set_watermarks_for_clock_ranges(smu, clock_ranges) \ - ((smu)->funcs->set_watermarks_for_clock_ranges ? (smu)->funcs->set_watermarks_for_clock_ranges((smu), (clock_ranges)) : 0) -#define smu_dpm_set_uvd_enable(smu, enable) \ - ((smu)->ppt_funcs->dpm_set_uvd_enable ? (smu)->ppt_funcs->dpm_set_uvd_enable((smu), (enable)) : 0) -#define smu_dpm_set_vce_enable(smu, enable) \ - ((smu)->ppt_funcs->dpm_set_vce_enable ? (smu)->ppt_funcs->dpm_set_vce_enable((smu), (enable)) : 0) -#define smu_set_xgmi_pstate(smu, pstate) \ - ((smu)->funcs->set_xgmi_pstate ? (smu)->funcs->set_xgmi_pstate((smu), (pstate)) : 0) -#define smu_set_watermarks_table(smu, tab, clock_ranges) \ - ((smu)->ppt_funcs->set_watermarks_table ? (smu)->ppt_funcs->set_watermarks_table((smu), (tab), (clock_ranges)) : 0) -#define smu_get_current_clk_freq_by_table(smu, clk_type, value) \ - ((smu)->ppt_funcs->get_current_clk_freq_by_table ? (smu)->ppt_funcs->get_current_clk_freq_by_table((smu), (clk_type), (value)) : 0) -#define smu_thermal_temperature_range_update(smu, range, rw) \ - ((smu)->ppt_funcs->thermal_temperature_range_update? (smu)->ppt_funcs->thermal_temperature_range_update((smu), (range), (rw)) : 0) -#define smu_get_thermal_temperature_range(smu, range) \ - ((smu)->ppt_funcs->get_thermal_temperature_range? (smu)->ppt_funcs->get_thermal_temperature_range((smu), (range)) : 0) -#define smu_register_irq_handler(smu) \ - ((smu)->funcs->register_irq_handler ? (smu)->funcs->register_irq_handler(smu) : 0) -#define smu_set_azalia_d3_pme(smu) \ - ((smu)->funcs->set_azalia_d3_pme ? (smu)->funcs->set_azalia_d3_pme((smu)) : 0) -#define smu_get_dpm_ultimate_freq(smu, param, min, max) \ - ((smu)->funcs->get_dpm_ultimate_freq ? (smu)->funcs->get_dpm_ultimate_freq((smu), (param), (min), (max)) : 0) -#define smu_get_max_sustainable_clocks_by_dc(smu, max_clocks) \ - ((smu)->funcs->get_max_sustainable_clocks_by_dc ? (smu)->funcs->get_max_sustainable_clocks_by_dc((smu), (max_clocks)) : 0) -#define smu_get_uclk_dpm_states(smu, clocks_in_khz, num_states) \ - ((smu)->ppt_funcs->get_uclk_dpm_states ? (smu)->ppt_funcs->get_uclk_dpm_states((smu), (clocks_in_khz), (num_states)) : 0) -#define smu_baco_is_support(smu) \ - ((smu)->funcs->baco_is_support? (smu)->funcs->baco_is_support((smu)) : false) -#define smu_baco_get_state(smu, state) \ - ((smu)->funcs->baco_get_state? (smu)->funcs->baco_get_state((smu), (state)) : 0) -#define smu_baco_reset(smu) \ - ((smu)->funcs->baco_reset? (smu)->funcs->baco_reset((smu)) : 0) -#define smu_asic_set_performance_level(smu, level) \ - ((smu)->ppt_funcs->set_performance_level? (smu)->ppt_funcs->set_performance_level((smu), (level)) : -EINVAL); -#define smu_dump_pptable(smu) \ - ((smu)->ppt_funcs->dump_pptable ? (smu)->ppt_funcs->dump_pptable((smu)) : 0) -#define smu_get_dpm_uclk_limited(smu, clock, max) \ - ((smu)->ppt_funcs->get_dpm_uclk_limited ? (smu)->ppt_funcs->get_dpm_uclk_limited((smu), (clock), (max)) : -EINVAL) +int smu_load_microcode(struct smu_context *smu); + +int smu_check_fw_status(struct smu_context *smu); + +int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled); + +#define smu_i2c_eeprom_init(smu, control) \ + ((smu)->ppt_funcs->i2c_eeprom_init ? (smu)->ppt_funcs->i2c_eeprom_init((control)) : -EINVAL) +#define smu_i2c_eeprom_fini(smu, control) \ + ((smu)->ppt_funcs->i2c_eeprom_fini ? (smu)->ppt_funcs->i2c_eeprom_fini((control)) : -EINVAL) + +int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed); + +int smu_get_power_limit(struct smu_context *smu, + uint32_t *limit, + bool def, + bool lock_needed); + +int smu_set_power_limit(struct smu_context *smu, uint32_t limit); +int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf); +int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type); +int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value); + +int smu_od_edit_dpm_table(struct smu_context *smu, + enum PP_OD_DPM_TABLE_COMMAND type, + long *input, uint32_t size); + +int smu_read_sensor(struct smu_context *smu, + enum amd_pp_sensors sensor, + void *data, uint32_t *size); +int smu_get_power_profile_mode(struct smu_context *smu, char *buf); + +int smu_set_power_profile_mode(struct smu_context *smu, + long *param, + uint32_t param_size, + bool lock_needed); +int smu_get_fan_control_mode(struct smu_context *smu); +int smu_set_fan_control_mode(struct smu_context *smu, int value); +int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed); +int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed); +int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed); + +int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk); +int smu_set_active_display_count(struct smu_context *smu, uint32_t count); + +int smu_get_clock_by_type(struct smu_context *smu, + enum amd_pp_clock_type type, + struct amd_pp_clocks *clocks); + +int smu_get_max_high_clocks(struct smu_context *smu, + struct amd_pp_simple_clock_info *clocks); + +int smu_get_clock_by_type_with_latency(struct smu_context *smu, + enum smu_clk_type clk_type, + struct pp_clock_levels_with_latency *clocks); + +int smu_get_clock_by_type_with_voltage(struct smu_context *smu, + enum amd_pp_clock_type type, + struct pp_clock_levels_with_voltage *clocks); + +int smu_display_clock_voltage_request(struct smu_context *smu, + struct pp_display_clock_request *clock_req); +int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch); +int smu_notify_smu_enable_pwe(struct smu_context *smu); + +int smu_set_xgmi_pstate(struct smu_context *smu, + uint32_t pstate); + +int smu_set_azalia_d3_pme(struct smu_context *smu); + +bool smu_baco_is_support(struct smu_context *smu); + +int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state); + +int smu_baco_reset(struct smu_context *smu); +int smu_mode2_reset(struct smu_context *smu); extern int smu_get_atom_data_table(struct smu_context *smu, uint32_t table, uint16_t *size, uint8_t *frev, uint8_t *crev, @@ -799,6 +660,10 @@ int smu_sys_get_pp_table(struct smu_context *smu, void **table); int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size); int smu_get_power_num_states(struct smu_context *smu, struct pp_states_info *state_info); enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu); +int smu_write_watermarks_table(struct smu_context *smu); +int smu_set_watermarks_for_clock_ranges( + struct smu_context *smu, + struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges); /* smu to display interface */ extern int smu_display_configuration_change(struct smu_context *smu, const @@ -809,7 +674,8 @@ extern int smu_get_current_clocks(struct smu_context *smu, extern int smu_dpm_set_power_gate(struct smu_context *smu,uint32_t block_type, bool gate); extern int smu_handle_task(struct smu_context *smu, enum amd_dpm_forced_level level, - enum amd_pp_task task_id); + enum amd_pp_task task_id, + bool lock_needed); int smu_switch_power_profile(struct smu_context *smu, enum PP_SMC_POWER_PROFILE type, bool en); @@ -819,7 +685,7 @@ int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_typ int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *value); int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, - uint32_t *min, uint32_t *max); + uint32_t *min, uint32_t *max, bool lock_needed); int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max); int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, @@ -828,10 +694,27 @@ enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu); int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level); int smu_set_display_count(struct smu_context *smu, uint32_t count); bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type); -int smu_feature_update_enable_state(struct smu_context *smu, uint64_t feature_mask, bool enabled); const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type); const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature); size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf); int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask); +int smu_force_clk_levels(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t mask, + bool lock_needed); +int smu_set_mp1_state(struct smu_context *smu, + enum pp_mp1_state mp1_state); +int smu_set_df_cstate(struct smu_context *smu, + enum pp_df_cstate state); + +int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu, + struct pp_smu_nv_clock_table *max_clocks); + +int smu_get_uclk_dpm_states(struct smu_context *smu, + unsigned int *clock_values_in_khz, + unsigned int *num_states); + +int smu_get_dpm_clock_table(struct smu_context *smu, + struct dpm_clocks *clock_table); #endif diff --git a/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h index 78e5927b7711..e3291259b249 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h +++ b/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h @@ -95,8 +95,7 @@ //BTC #define PPSMC_MSG_RunAfllBtc 0x30 -#define PPSMC_MSG_RunGfxDcBtc 0x31 -#define PPSMC_MSG_RunSocDcBtc 0x32 +#define PPSMC_MSG_RunDcBtc 0x31 //Debug #define PPSMC_MSG_DramLogSetDramAddrHigh 0x33 diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index 7bf9a14bfa0b..af977675fd33 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -355,6 +355,10 @@ struct pp_hwmgr_func { int (*set_mp1_state)(struct pp_hwmgr *hwmgr, enum pp_mp1_state mp1_state); int (*asic_reset)(struct pp_hwmgr *hwmgr, enum SMU_ASIC_RESET_MODE mode); int (*smu_i2c_bus_access)(struct pp_hwmgr *hwmgr, bool aquire); + int (*set_df_cstate)(struct pp_hwmgr *hwmgr, enum pp_df_cstate state); + int (*set_xgmi_pstate)(struct pp_hwmgr *hwmgr, uint32_t pstate); + int (*disable_power_features_for_compute_performance)(struct pp_hwmgr *hwmgr, + bool disable); }; struct pp_table_func { diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h index e02950b505fa..a886f0644d24 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h @@ -137,29 +137,29 @@ #define FEATURE_DS_SOCCLK_MASK (1 << FEATURE_DS_SOCCLK_BIT ) #define FEATURE_DS_LCLK_MASK (1 << FEATURE_DS_LCLK_BIT ) #define FEATURE_DS_FCLK_MASK (1 << FEATURE_DS_FCLK_BIT ) -#define FEATURE_DS_LCLK_MASK (1 << FEATURE_DS_LCLK_BIT ) +#define FEATURE_DS_UCLK_MASK (1 << FEATURE_DS_UCLK_BIT ) #define FEATURE_GFX_ULV_MASK (1 << FEATURE_GFX_ULV_BIT ) -#define FEATURE_VCN_PG_MASK (1 << FEATURE_VCN_PG_BIT ) +#define FEATURE_DPM_VCN_MASK (1 << FEATURE_DPM_VCN_BIT ) #define FEATURE_RSMU_SMN_CG_MASK (1 << FEATURE_RSMU_SMN_CG_BIT ) #define FEATURE_WAFL_CG_MASK (1 << FEATURE_WAFL_CG_BIT ) #define FEATURE_PPT_MASK (1 << FEATURE_PPT_BIT ) #define FEATURE_TDC_MASK (1 << FEATURE_TDC_BIT ) -#define FEATURE_APCC_MASK (1 << FEATURE_APCC_BIT ) +#define FEATURE_APCC_PLUS_MASK (1 << FEATURE_APCC_PLUS_BIT ) #define FEATURE_VR0HOT_MASK (1 << FEATURE_VR0HOT_BIT ) #define FEATURE_VR1HOT_MASK (1 << FEATURE_VR1HOT_BIT ) #define FEATURE_FW_CTF_MASK (1 << FEATURE_FW_CTF_BIT ) #define FEATURE_FAN_CONTROL_MASK (1 << FEATURE_FAN_CONTROL_BIT ) #define FEATURE_THERMAL_MASK (1 << FEATURE_THERMAL_BIT ) -#define FEATURE_OUT_OF_BAND_MONITOR_MASK (1 << EATURE_OUT_OF_BAND_MONITOR_BIT ) -#define FEATURE_TEMP_DEPENDENT_VMIN_MASK (1 << FEATURE_TEMP_DEPENDENT_VMIN_MASK ) +#define FEATURE_OUT_OF_BAND_MONITOR_MASK (1 << FEATURE_OUT_OF_BAND_MONITOR_BIT ) +#define FEATURE_TEMP_DEPENDENT_VMIN_MASK (1 << FEATURE_TEMP_DEPENDENT_VMIN_BIT ) //FIXME need updating // Debug Overrides Bitmask #define DPM_OVERRIDE_DISABLE_UCLK_PID 0x00000001 -#define DPM_OVERRIDE_ENABLE_VOLT_LINK_VCN_FCLK 0x00000002 +#define DPM_OVERRIDE_DISABLE_VOLT_LINK_VCN_FCLK 0x00000002 // I2C Config Bit Defines #define I2C_CONTROLLER_ENABLED 1 @@ -423,18 +423,30 @@ typedef enum { } PwrConfig_e; typedef enum { - XGMI_LINK_RATE_12 = 0, // 12Gbps - XGMI_LINK_RATE_16, // 16Gbps - XGMI_LINK_RATE_22, // 22Gbps - XGMI_LINK_RATE_25, // 25Gbps + XGMI_LINK_RATE_2 = 2, // 2Gbps + XGMI_LINK_RATE_4 = 4, // 4Gbps + XGMI_LINK_RATE_8 = 8, // 8Gbps + XGMI_LINK_RATE_12 = 12, // 12Gbps + XGMI_LINK_RATE_16 = 16, // 16Gbps + XGMI_LINK_RATE_17 = 17, // 17Gbps + XGMI_LINK_RATE_18 = 18, // 18Gbps + XGMI_LINK_RATE_19 = 19, // 19Gbps + XGMI_LINK_RATE_20 = 20, // 20Gbps + XGMI_LINK_RATE_21 = 21, // 21Gbps + XGMI_LINK_RATE_22 = 22, // 22Gbps + XGMI_LINK_RATE_23 = 23, // 23Gbps + XGMI_LINK_RATE_24 = 24, // 24Gbps + XGMI_LINK_RATE_25 = 25, // 25Gbps XGMI_LINK_RATE_COUNT } XGMI_LINK_RATE_e; typedef enum { - XGMI_LINK_WIDTH_2 = 0, // x2 - XGMI_LINK_WIDTH_4, // x4 - XGMI_LINK_WIDTH_8, // x8 - XGMI_LINK_WIDTH_16, // x16 + XGMI_LINK_WIDTH_1 = 1, // x1 + XGMI_LINK_WIDTH_2 = 2, // x2 + XGMI_LINK_WIDTH_4 = 4, // x4 + XGMI_LINK_WIDTH_8 = 8, // x8 + XGMI_LINK_WIDTH_9 = 9, // x9 + XGMI_LINK_WIDTH_16 = 16, // x16 XGMI_LINK_WIDTH_COUNT } XGMI_LINK_WIDTH_e; @@ -696,7 +708,11 @@ typedef struct { uint8_t GpioI2cSda; // Serial Data uint16_t GpioPadding; - uint32_t BoardReserved[9]; + // Platform input telemetry voltage coefficient + uint32_t BoardVoltageCoeffA; // decode by /1000 + uint32_t BoardVoltageCoeffB; // decode by /1000 + + uint32_t BoardReserved[7]; // Padding for MMHUB - do not modify this uint32_t MmHubPadding[8]; // SMU internal use @@ -802,7 +818,7 @@ typedef struct { uint32_t P2VCharzFreq[AVFS_VOLTAGE_COUNT]; // in 10KHz units - uint32_t EnabledAvfsModules[2]; + uint32_t EnabledAvfsModules[3]; uint32_t MmHubPadding[8]; // SMU internal use } AvfsFuseOverride_t; @@ -865,7 +881,8 @@ typedef struct { //#define TABLE_ACTIVITY_MONITOR_COEFF 7 #define TABLE_OVERDRIVE 7 #define TABLE_WAFL_XGMI_TOPOLOGY 8 -#define TABLE_COUNT 9 +#define TABLE_I2C_COMMANDS 9 +#define TABLE_COUNT 10 // These defines are used with the SMC_MSG_SetUclkFastSwitch message. typedef enum { diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h index b0dd05d431dd..d8c9b7f91fcc 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h @@ -114,6 +114,7 @@ __SMU_DUMMY_MAP(PowerDownJpeg), \ __SMU_DUMMY_MAP(BacoAudioD3PME), \ __SMU_DUMMY_MAP(ArmD3), \ + __SMU_DUMMY_MAP(RunDcBtc), \ __SMU_DUMMY_MAP(RunGfxDcBtc), \ __SMU_DUMMY_MAP(RunSocDcBtc), \ __SMU_DUMMY_MAP(SetMemoryChannelEnable), \ @@ -168,6 +169,7 @@ __SMU_DUMMY_MAP(PowerGateAtHub), \ __SMU_DUMMY_MAP(SetSoftMinJpeg), \ __SMU_DUMMY_MAP(SetHardMinFclkByFreq), \ + __SMU_DUMMY_MAP(DFCstateControl), \ #undef __SMU_DUMMY_MAP #define __SMU_DUMMY_MAP(type) SMU_MSG_##type @@ -251,6 +253,7 @@ enum smu_clk_type { __SMU_DUMMY_MAP(TEMP_DEPENDENT_VMIN), \ __SMU_DUMMY_MAP(MMHUB_PG), \ __SMU_DUMMY_MAP(ATHUB_PG), \ + __SMU_DUMMY_MAP(APCC_DFLL), \ __SMU_DUMMY_MAP(WAFL_CG), #undef __SMU_DUMMY_MAP diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h index 5bda8539447a..fd6ec9033d06 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h @@ -27,7 +27,7 @@ #define SMU11_DRIVER_IF_VERSION_INV 0xFFFFFFFF #define SMU11_DRIVER_IF_VERSION_VG20 0x13 -#define SMU11_DRIVER_IF_VERSION_ARCT 0x09 +#define SMU11_DRIVER_IF_VERSION_ARCT 0x10 #define SMU11_DRIVER_IF_VERSION_NV10 0x33 #define SMU11_DRIVER_IF_VERSION_NV14 0x34 @@ -130,6 +130,124 @@ enum smu_v11_0_baco_seq { BACO_SEQ_COUNT, }; -void smu_v11_0_set_smu_funcs(struct smu_context *smu); +int smu_v11_0_init_microcode(struct smu_context *smu); + +int smu_v11_0_load_microcode(struct smu_context *smu); + +int smu_v11_0_init_smc_tables(struct smu_context *smu); + +int smu_v11_0_fini_smc_tables(struct smu_context *smu); + +int smu_v11_0_init_power(struct smu_context *smu); + +int smu_v11_0_fini_power(struct smu_context *smu); + +int smu_v11_0_check_fw_status(struct smu_context *smu); + +int smu_v11_0_setup_pptable(struct smu_context *smu); + +int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu); + +int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu); + +int smu_v11_0_check_pptable(struct smu_context *smu); + +int smu_v11_0_parse_pptable(struct smu_context *smu); + +int smu_v11_0_populate_smc_pptable(struct smu_context *smu); + +int smu_v11_0_check_fw_version(struct smu_context *smu); + +int smu_v11_0_write_pptable(struct smu_context *smu); + +int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu); + +int smu_v11_0_set_tool_table_location(struct smu_context *smu); + +int smu_v11_0_notify_memory_pool_location(struct smu_context *smu); + +int smu_v11_0_system_features_control(struct smu_context *smu, + bool en); + +int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg); + +int +smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg, + uint32_t param); + +int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg); + +int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count); + +int smu_v11_0_set_allowed_mask(struct smu_context *smu); + +int smu_v11_0_get_enabled_mask(struct smu_context *smu, + uint32_t *feature_mask, uint32_t num); + +int smu_v11_0_notify_display_change(struct smu_context *smu); + +int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n); + +int smu_v11_0_get_current_clk_freq(struct smu_context *smu, + enum smu_clk_type clk_id, + uint32_t *value); + +int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu); + +int smu_v11_0_start_thermal_control(struct smu_context *smu); + +int smu_v11_0_stop_thermal_control(struct smu_context *smu); + +int smu_v11_0_read_sensor(struct smu_context *smu, + enum amd_pp_sensors sensor, + void *data, uint32_t *size); + +int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk); + +int +smu_v11_0_display_clock_voltage_request(struct smu_context *smu, + struct pp_display_clock_request + *clock_req); + +uint32_t +smu_v11_0_get_fan_control_mode(struct smu_context *smu); + +int +smu_v11_0_set_fan_control_mode(struct smu_context *smu, + uint32_t mode); + +int +smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed); + +int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu, + uint32_t speed); + +int smu_v11_0_set_xgmi_pstate(struct smu_context *smu, + uint32_t pstate); + +int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable); + +int smu_v11_0_register_irq_handler(struct smu_context *smu); + +int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu); + +int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu, + struct pp_smu_nv_clock_table *max_clocks); + +bool smu_v11_0_baco_is_support(struct smu_context *smu); + +enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu); + +int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state); + +int smu_v11_0_baco_reset(struct smu_context *smu); + +int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, + uint32_t *min, uint32_t *max); + +int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, + uint32_t min, uint32_t max); + +int smu_v11_0_override_pcie_parameters(struct smu_context *smu); #endif diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h index acf3db12f59f..9b9f5df0911c 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h @@ -37,6 +37,45 @@ struct smu_12_0_cmn2aisc_mapping { int map_to; }; -void smu_v12_0_set_smu_funcs(struct smu_context *smu); +int smu_v12_0_send_msg_without_waiting(struct smu_context *smu, + uint16_t msg); + +int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg); + +int smu_v12_0_wait_for_response(struct smu_context *smu); + +int smu_v12_0_send_msg(struct smu_context *smu, uint16_t msg); + +int +smu_v12_0_send_msg_with_param(struct smu_context *smu, uint16_t msg, + uint32_t param); + +int smu_v12_0_check_fw_status(struct smu_context *smu); + +int smu_v12_0_check_fw_version(struct smu_context *smu); + +int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate); + +int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate); + +int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable); + +uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu); + +int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable); + +int smu_v12_0_init_smc_tables(struct smu_context *smu); + +int smu_v12_0_fini_smc_tables(struct smu_context *smu); + +int smu_v12_0_populate_smc_tables(struct smu_context *smu); + +int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, + uint32_t *min, uint32_t *max); + +int smu_v12_0_mode2_reset(struct smu_context *smu); + +int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, + uint32_t min, uint32_t max); #endif diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h index a0883038f3c3..0c66f0fe1aaf 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h +++ b/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h @@ -120,7 +120,8 @@ #define PPSMC_MSG_SetMGpuFanBoostLimitRpm 0x5D #define PPSMC_MSG_GetAVFSVoltageByDpm 0x5F #define PPSMC_MSG_BacoWorkAroundFlushVDCI 0x60 -#define PPSMC_Message_Count 0x61 +#define PPSMC_MSG_DFCstateControl 0x63 +#define PPSMC_Message_Count 0x64 typedef uint32_t PPSMC_Result; typedef uint32_t PPSMC_Msg; diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index 0b461404af6b..354f70978f82 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -26,6 +26,7 @@ #include <linux/pci.h> #include "amdgpu.h" #include "amdgpu_smu.h" +#include "smu_internal.h" #include "atomfirmware.h" #include "amdgpu_atomfirmware.h" #include "smu_v11_0.h" @@ -177,6 +178,7 @@ static struct smu_11_0_cmn2aisc_mapping navi10_feature_mask_map[SMU_FEATURE_COUN FEA_MAP(TEMP_DEPENDENT_VMIN), FEA_MAP(MMHUB_PG), FEA_MAP(ATHUB_PG), + FEA_MAP(APCC_DFLL), }; static struct smu_11_0_cmn2aisc_mapping navi10_table_map[SMU_TABLE_COUNT] = { @@ -205,7 +207,7 @@ static struct smu_11_0_cmn2aisc_mapping navi10_workload_map[PP_SMC_POWER_PROFILE WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT), WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT), WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT), - WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_CUSTOM_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT), WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT), }; @@ -327,40 +329,52 @@ navi10_get_allowed_feature_mask(struct smu_context *smu, memset(feature_mask, 0, sizeof(uint32_t) * num); *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT) - | FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) - | FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT) - | FEATURE_MASK(FEATURE_DPM_LINK_BIT) - | FEATURE_MASK(FEATURE_GFX_ULV_BIT) | FEATURE_MASK(FEATURE_RSMU_SMN_CG_BIT) | FEATURE_MASK(FEATURE_DS_SOCCLK_BIT) | FEATURE_MASK(FEATURE_PPT_BIT) | FEATURE_MASK(FEATURE_TDC_BIT) | FEATURE_MASK(FEATURE_GFX_EDC_BIT) + | FEATURE_MASK(FEATURE_APCC_PLUS_BIT) | FEATURE_MASK(FEATURE_VR0HOT_BIT) | FEATURE_MASK(FEATURE_FAN_CONTROL_BIT) | FEATURE_MASK(FEATURE_THERMAL_BIT) | FEATURE_MASK(FEATURE_LED_DISPLAY_BIT) - | FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT) - | FEATURE_MASK(FEATURE_DS_GFXCLK_BIT) + | FEATURE_MASK(FEATURE_DS_LCLK_BIT) | FEATURE_MASK(FEATURE_DS_DCEFCLK_BIT) | FEATURE_MASK(FEATURE_FW_DSTATE_BIT) | FEATURE_MASK(FEATURE_BACO_BIT) | FEATURE_MASK(FEATURE_ACDC_BIT) | FEATURE_MASK(FEATURE_GFX_SS_BIT) | FEATURE_MASK(FEATURE_APCC_DFLL_BIT) - | FEATURE_MASK(FEATURE_FW_CTF_BIT); + | FEATURE_MASK(FEATURE_FW_CTF_BIT) + | FEATURE_MASK(FEATURE_OUT_OF_BAND_MONITOR_BIT); + + if (adev->pm.pp_feature & PP_SOCCLK_DPM_MASK) + *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT); + + if (adev->pm.pp_feature & PP_SCLK_DPM_MASK) + *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT); + + if (adev->pm.pp_feature & PP_PCIE_DPM_MASK) + *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_LINK_BIT); + + if (adev->pm.pp_feature & PP_DCEFCLK_DPM_MASK) + *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT); if (adev->pm.pp_feature & PP_MCLK_DPM_MASK) *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT) | FEATURE_MASK(FEATURE_MEM_VDDCI_SCALING_BIT) | FEATURE_MASK(FEATURE_MEM_MVDD_SCALING_BIT); - if (adev->pm.pp_feature & PP_GFXOFF_MASK) { + if (adev->pm.pp_feature & PP_ULV_MASK) + *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_ULV_BIT); + + if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK) + *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_GFXCLK_BIT); + + if (adev->pm.pp_feature & PP_GFXOFF_MASK) *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFXOFF_BIT); - /* TODO: remove it once fw fix the bug */ - *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_FW_DSTATE_BIT); - } if (smu->adev->pg_flags & AMD_PG_SUPPORT_MMHUB) *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MMHUB_PG_BIT); @@ -783,13 +797,13 @@ static int navi10_populate_umd_state_clk(struct smu_context *smu) int ret = 0; uint32_t min_sclk_freq = 0, min_mclk_freq = 0; - ret = smu_get_dpm_freq_range(smu, SMU_SCLK, &min_sclk_freq, NULL); + ret = smu_get_dpm_freq_range(smu, SMU_SCLK, &min_sclk_freq, NULL, false); if (ret) return ret; smu->pstate_sclk = min_sclk_freq * 100; - ret = smu_get_dpm_freq_range(smu, SMU_MCLK, &min_mclk_freq, NULL); + ret = smu_get_dpm_freq_range(smu, SMU_MCLK, &min_mclk_freq, NULL, false); if (ret) return ret; @@ -842,7 +856,7 @@ static int navi10_pre_display_config_changed(struct smu_context *smu) return ret; if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { - ret = smu_get_dpm_freq_range(smu, SMU_UCLK, NULL, &max_freq); + ret = smu_get_dpm_freq_range(smu, SMU_UCLK, NULL, &max_freq, false); if (ret) return ret; ret = smu_set_hard_freq_range(smu, SMU_UCLK, 0, max_freq); @@ -892,7 +906,7 @@ static int navi10_force_dpm_limit_value(struct smu_context *smu, bool highest) for (i = 0; i < ARRAY_SIZE(clks); i++) { clk_type = clks[i]; - ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq); + ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false); if (ret) return ret; @@ -919,7 +933,7 @@ static int navi10_unforce_dpm_levels(struct smu_context *smu) for (i = 0; i < ARRAY_SIZE(clks); i++) { clk_type = clks[i]; - ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq); + ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false); if (ret) return ret; @@ -1254,7 +1268,9 @@ static int navi10_notify_smc_dispaly_config(struct smu_context *smu) if (smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) { clock_req.clock_type = amd_pp_dcef_clock; clock_req.clock_freq_in_khz = min_clocks.dcef_clock * 10; - if (!smu_display_clock_voltage_request(smu, &clock_req)) { + + ret = smu_v11_0_display_clock_voltage_request(smu, &clock_req); + if (!ret) { if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) { ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetMinDeepSleepDcefclk, @@ -1408,7 +1424,7 @@ static int navi10_read_sensor(struct smu_context *smu, *size = 4; break; default: - ret = smu_smc_read_sensor(smu, sensor, data, size); + ret = smu_v11_0_read_sensor(smu, sensor, data, size); } mutex_unlock(&smu->sensor_lock); @@ -1451,18 +1467,47 @@ static int navi10_set_peak_clock_by_device(struct smu_context *smu) uint32_t sclk_freq = 0, uclk_freq = 0; uint32_t uclk_level = 0; - switch (adev->pdev->revision) { - case 0xf0: /* XTX */ - case 0xc0: - sclk_freq = NAVI10_PEAK_SCLK_XTX; - break; - case 0xf1: /* XT */ - case 0xc1: - sclk_freq = NAVI10_PEAK_SCLK_XT; + switch (adev->asic_type) { + case CHIP_NAVI10: + switch (adev->pdev->revision) { + case 0xf0: /* XTX */ + case 0xc0: + sclk_freq = NAVI10_PEAK_SCLK_XTX; + break; + case 0xf1: /* XT */ + case 0xc1: + sclk_freq = NAVI10_PEAK_SCLK_XT; + break; + default: /* XL */ + sclk_freq = NAVI10_PEAK_SCLK_XL; + break; + } break; - default: /* XL */ - sclk_freq = NAVI10_PEAK_SCLK_XL; + case CHIP_NAVI14: + switch (adev->pdev->revision) { + case 0xc7: /* XT */ + case 0xf4: + sclk_freq = NAVI14_UMD_PSTATE_PEAK_XT_GFXCLK; + break; + case 0xc1: /* XTM */ + case 0xf2: + sclk_freq = NAVI14_UMD_PSTATE_PEAK_XTM_GFXCLK; + break; + case 0xc3: /* XLM */ + case 0xf3: + sclk_freq = NAVI14_UMD_PSTATE_PEAK_XLM_GFXCLK; + break; + case 0xc5: /* XTX */ + case 0xf6: + sclk_freq = NAVI14_UMD_PSTATE_PEAK_XLM_GFXCLK; + break; + default: /* XL */ + sclk_freq = NAVI14_UMD_PSTATE_PEAK_XL_GFXCLK; + break; + } break; + default: + return -EINVAL; } ret = smu_get_dpm_level_count(smu, SMU_UCLK, &uclk_level); @@ -1485,10 +1530,6 @@ static int navi10_set_peak_clock_by_device(struct smu_context *smu) static int navi10_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) { int ret = 0; - struct amdgpu_device *adev = smu->adev; - - if (adev->asic_type != CHIP_NAVI10) - return -EINVAL; switch (level) { case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: @@ -1591,6 +1632,28 @@ static int navi10_get_power_limit(struct smu_context *smu, return 0; } +static int navi10_update_pcie_parameters(struct smu_context *smu, + uint32_t pcie_gen_cap, + uint32_t pcie_width_cap) +{ + PPTable_t *pptable = smu->smu_table.driver_pptable; + int ret, i; + uint32_t smu_pcie_arg; + + for (i = 0; i < NUM_LINK_LEVELS; i++) { + smu_pcie_arg = (i << 16) | + ((pptable->PcieGenSpeed[i] <= pcie_gen_cap) ? (pptable->PcieGenSpeed[i] << 8) : + (pcie_gen_cap << 8)) | ((pptable->PcieLaneCount[i] <= pcie_width_cap) ? + pptable->PcieLaneCount[i] : pcie_width_cap); + ret = smu_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg); + } + + return ret; +} + + static const struct pptable_funcs navi10_ppt_funcs = { .tables_init = navi10_tables_init, .alloc_dpm_context = navi10_allocate_dpm_context, @@ -1629,12 +1692,59 @@ static const struct pptable_funcs navi10_ppt_funcs = { .get_thermal_temperature_range = navi10_get_thermal_temperature_range, .display_disable_memory_clock_switch = navi10_display_disable_memory_clock_switch, .get_power_limit = navi10_get_power_limit, + .update_pcie_parameters = navi10_update_pcie_parameters, + .init_microcode = smu_v11_0_init_microcode, + .load_microcode = smu_v11_0_load_microcode, + .init_smc_tables = smu_v11_0_init_smc_tables, + .fini_smc_tables = smu_v11_0_fini_smc_tables, + .init_power = smu_v11_0_init_power, + .fini_power = smu_v11_0_fini_power, + .check_fw_status = smu_v11_0_check_fw_status, + .setup_pptable = smu_v11_0_setup_pptable, + .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values, + .get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios, + .check_pptable = smu_v11_0_check_pptable, + .parse_pptable = smu_v11_0_parse_pptable, + .populate_smc_tables = smu_v11_0_populate_smc_pptable, + .check_fw_version = smu_v11_0_check_fw_version, + .write_pptable = smu_v11_0_write_pptable, + .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep, + .set_tool_table_location = smu_v11_0_set_tool_table_location, + .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, + .system_features_control = smu_v11_0_system_features_control, + .send_smc_msg = smu_v11_0_send_msg, + .send_smc_msg_with_param = smu_v11_0_send_msg_with_param, + .read_smc_arg = smu_v11_0_read_arg, + .init_display_count = smu_v11_0_init_display_count, + .set_allowed_mask = smu_v11_0_set_allowed_mask, + .get_enabled_mask = smu_v11_0_get_enabled_mask, + .notify_display_change = smu_v11_0_notify_display_change, + .set_power_limit = smu_v11_0_set_power_limit, + .get_current_clk_freq = smu_v11_0_get_current_clk_freq, + .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks, + .start_thermal_control = smu_v11_0_start_thermal_control, + .stop_thermal_control = smu_v11_0_stop_thermal_control, + .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk, + .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request, + .get_fan_control_mode = smu_v11_0_get_fan_control_mode, + .set_fan_control_mode = smu_v11_0_set_fan_control_mode, + .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent, + .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm, + .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate, + .gfx_off_control = smu_v11_0_gfx_off_control, + .register_irq_handler = smu_v11_0_register_irq_handler, + .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme, + .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc, + .baco_is_support= smu_v11_0_baco_is_support, + .baco_get_state = smu_v11_0_baco_get_state, + .baco_set_state = smu_v11_0_baco_set_state, + .baco_reset = smu_v11_0_baco_reset, + .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq, + .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range, + .override_pcie_parameters = smu_v11_0_override_pcie_parameters, }; void navi10_set_ppt_funcs(struct smu_context *smu) { - struct smu_table_context *smu_table = &smu->smu_table; - smu->ppt_funcs = &navi10_ppt_funcs; - smu_table->table_count = TABLE_COUNT; } diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h index 620ff17c2fef..a37e37c5f105 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h @@ -27,6 +27,12 @@ #define NAVI10_PEAK_SCLK_XT (1755) #define NAVI10_PEAK_SCLK_XL (1625) +#define NAVI14_UMD_PSTATE_PEAK_XT_GFXCLK (1670) +#define NAVI14_UMD_PSTATE_PEAK_XTM_GFXCLK (1448) +#define NAVI14_UMD_PSTATE_PEAK_XLM_GFXCLK (1181) +#define NAVI14_UMD_PSTATE_PEAK_XTX_GFXCLK (1717) +#define NAVI14_UMD_PSTATE_PEAK_XL_GFXCLK (1448) + extern void navi10_set_ppt_funcs(struct smu_context *smu); #endif diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c index e62bfba51562..04daf7e9fe05 100644 --- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c @@ -23,6 +23,7 @@ #include "amdgpu.h" #include "amdgpu_smu.h" +#include "smu_internal.h" #include "soc15_common.h" #include "smu_v12_0_ppsmc.h" #include "smu12_driver_if.h" @@ -160,21 +161,17 @@ static int renoir_tables_init(struct smu_context *smu, struct smu_table *tables) * This interface just for getting uclk ultimate freq and should't introduce * other likewise function result in overmuch callback. */ -static int renoir_get_dpm_uclk_limited(struct smu_context *smu, uint32_t *clock, bool max) +static int renoir_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type clk_type, + uint32_t dpm_level, uint32_t *freq) { + DpmClocks_t *clk_table = smu->smu_table.clocks_table; - DpmClocks_t *table = smu->smu_table.clocks_table; - - if (!clock || !table) + if (!clk_table || clk_type >= SMU_CLK_COUNT) return -EINVAL; - if (max) - *clock = table->FClocks[NUM_FCLK_DPM_LEVELS-1].Freq; - else - *clock = table->FClocks[0].Freq; + GET_DPM_CUR_FREQ(clk_table, clk_type, dpm_level, *freq); return 0; - } static int renoir_print_clk_levels(struct smu_context *smu, @@ -183,11 +180,13 @@ static int renoir_print_clk_levels(struct smu_context *smu, int i, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0; DpmClocks_t *clk_table = smu->smu_table.clocks_table; - SmuMetrics_t metrics = {0}; + SmuMetrics_t metrics; if (!clk_table || clk_type >= SMU_CLK_COUNT) return -EINVAL; + memset(&metrics, 0, sizeof(metrics)); + ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, (void *)&metrics, false); if (ret) @@ -198,7 +197,7 @@ static int renoir_print_clk_levels(struct smu_context *smu, case SMU_SCLK: /* retirve table returned paramters unit is MHz */ cur_value = metrics.ClockFrequency[CLOCK_GFXCLK]; - ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, &min, &max); + ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, &min, &max, false); if (!ret) { /* driver only know min/max gfx_clk, Add level 1 for all other gfx clks */ if (cur_value == max) @@ -246,20 +245,474 @@ static int renoir_print_clk_levels(struct smu_context *smu, return size; } +static enum amd_pm_state_type renoir_get_current_power_state(struct smu_context *smu) +{ + enum amd_pm_state_type pm_type; + struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); + + if (!smu_dpm_ctx->dpm_context || + !smu_dpm_ctx->dpm_current_power_state) + return -EINVAL; + + switch (smu_dpm_ctx->dpm_current_power_state->classification.ui_label) { + case SMU_STATE_UI_LABEL_BATTERY: + pm_type = POWER_STATE_TYPE_BATTERY; + break; + case SMU_STATE_UI_LABEL_BALLANCED: + pm_type = POWER_STATE_TYPE_BALANCED; + break; + case SMU_STATE_UI_LABEL_PERFORMANCE: + pm_type = POWER_STATE_TYPE_PERFORMANCE; + break; + default: + if (smu_dpm_ctx->dpm_current_power_state->classification.flags & SMU_STATE_CLASSIFICATION_FLAG_BOOT) + pm_type = POWER_STATE_TYPE_INTERNAL_BOOT; + else + pm_type = POWER_STATE_TYPE_DEFAULT; + break; + } + + return pm_type; +} + +static int renoir_dpm_set_uvd_enable(struct smu_context *smu, bool enable) +{ + struct smu_power_context *smu_power = &smu->smu_power; + struct smu_power_gate *power_gate = &smu_power->power_gate; + int ret = 0; + + if (enable) { + /* vcn dpm on is a prerequisite for vcn power gate messages */ + if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0); + if (ret) + return ret; + } + power_gate->vcn_gated = false; + } else { + if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { + ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn); + if (ret) + return ret; + } + power_gate->vcn_gated = true; + } + + return ret; +} + +static int renoir_force_dpm_limit_value(struct smu_context *smu, bool highest) +{ + int ret = 0, i = 0; + uint32_t min_freq, max_freq, force_freq; + enum smu_clk_type clk_type; + + enum smu_clk_type clks[] = { + SMU_GFXCLK, + SMU_MCLK, + SMU_SOCCLK, + }; + + for (i = 0; i < ARRAY_SIZE(clks); i++) { + clk_type = clks[i]; + ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false); + if (ret) + return ret; + + force_freq = highest ? max_freq : min_freq; + ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq); + if (ret) + return ret; + } + + return ret; +} + +static int renoir_unforce_dpm_levels(struct smu_context *smu) { + + int ret = 0, i = 0; + uint32_t min_freq, max_freq; + enum smu_clk_type clk_type; + + struct clk_feature_map { + enum smu_clk_type clk_type; + uint32_t feature; + } clk_feature_map[] = { + {SMU_GFXCLK, SMU_FEATURE_DPM_GFXCLK_BIT}, + {SMU_MCLK, SMU_FEATURE_DPM_UCLK_BIT}, + {SMU_SOCCLK, SMU_FEATURE_DPM_SOCCLK_BIT}, + }; + + for (i = 0; i < ARRAY_SIZE(clk_feature_map); i++) { + if (!smu_feature_is_enabled(smu, clk_feature_map[i].feature)) + continue; + + clk_type = clk_feature_map[i].clk_type; + + ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false); + if (ret) + return ret; + + ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq); + if (ret) + return ret; + } + + return ret; +} + +static int renoir_get_workload_type(struct smu_context *smu, uint32_t profile) +{ + + uint32_t pplib_workload = 0; + + switch (profile) { + case PP_SMC_POWER_PROFILE_FULLSCREEN3D: + pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT; + break; + case PP_SMC_POWER_PROFILE_CUSTOM: + pplib_workload = WORKLOAD_PPLIB_COUNT; + break; + case PP_SMC_POWER_PROFILE_VIDEO: + pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT; + break; + case PP_SMC_POWER_PROFILE_VR: + pplib_workload = WORKLOAD_PPLIB_VR_BIT; + break; + case PP_SMC_POWER_PROFILE_COMPUTE: + pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT; + break; + default: + return -EINVAL; + } + + return pplib_workload; +} + +static int renoir_get_profiling_clk_mask(struct smu_context *smu, + enum amd_dpm_forced_level level, + uint32_t *sclk_mask, + uint32_t *mclk_mask, + uint32_t *soc_mask) +{ + + if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) { + if (sclk_mask) + *sclk_mask = 0; + } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) { + if (mclk_mask) + *mclk_mask = 0; + } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { + if(sclk_mask) + /* The sclk as gfxclk and has three level about max/min/current */ + *sclk_mask = 3 - 1; + + if(mclk_mask) + *mclk_mask = NUM_MEMCLK_DPM_LEVELS - 1; + + if(soc_mask) + *soc_mask = NUM_SOCCLK_DPM_LEVELS - 1; + } + + return 0; +} + +/** + * This interface get dpm clock table for dc + */ +static int renoir_get_dpm_clock_table(struct smu_context *smu, struct dpm_clocks *clock_table) +{ + DpmClocks_t *table = smu->smu_table.clocks_table; + int i; + + if (!clock_table || !table) + return -EINVAL; + + for (i = 0; i < NUM_DCFCLK_DPM_LEVELS; i++) { + clock_table->DcfClocks[i].Freq = table->DcfClocks[i].Freq; + clock_table->DcfClocks[i].Vol = table->DcfClocks[i].Vol; + } + + for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) { + clock_table->SocClocks[i].Freq = table->SocClocks[i].Freq; + clock_table->SocClocks[i].Vol = table->SocClocks[i].Vol; + } + + for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) { + clock_table->FClocks[i].Freq = table->FClocks[i].Freq; + clock_table->FClocks[i].Vol = table->FClocks[i].Vol; + } + + for (i = 0; i< NUM_MEMCLK_DPM_LEVELS; i++) { + clock_table->MemClocks[i].Freq = table->MemClocks[i].Freq; + clock_table->MemClocks[i].Vol = table->MemClocks[i].Vol; + } + + return 0; +} + +static int renoir_force_clk_levels(struct smu_context *smu, + enum smu_clk_type clk_type, uint32_t mask) +{ + + int ret = 0 ; + uint32_t soft_min_level = 0, soft_max_level = 0, min_freq = 0, max_freq = 0; + DpmClocks_t *clk_table = smu->smu_table.clocks_table; + + soft_min_level = mask ? (ffs(mask) - 1) : 0; + soft_max_level = mask ? (fls(mask) - 1) : 0; + + switch (clk_type) { + case SMU_GFXCLK: + case SMU_SCLK: + if (soft_min_level > 2 || soft_max_level > 2) { + pr_info("Currently sclk only support 3 levels on APU\n"); + return -EINVAL; + } + + ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, &min_freq, &max_freq, false); + if (ret) + return ret; + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, + soft_max_level == 0 ? min_freq : + soft_max_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : max_freq); + if (ret) + return ret; + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, + soft_min_level == 2 ? max_freq : + soft_min_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : min_freq); + if (ret) + return ret; + break; + case SMU_SOCCLK: + GET_DPM_CUR_FREQ(clk_table, clk_type, soft_min_level, min_freq); + GET_DPM_CUR_FREQ(clk_table, clk_type, soft_max_level, max_freq); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max_freq); + if (ret) + return ret; + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min_freq); + if (ret) + return ret; + break; + case SMU_MCLK: + case SMU_FCLK: + GET_DPM_CUR_FREQ(clk_table, clk_type, soft_min_level, min_freq); + GET_DPM_CUR_FREQ(clk_table, clk_type, soft_max_level, max_freq); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max_freq); + if (ret) + return ret; + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min_freq); + if (ret) + return ret; + break; + default: + break; + } + + return ret; +} + +static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size) +{ + int workload_type, ret; + uint32_t profile_mode = input[size]; + + if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { + pr_err("Invalid power profile mode %d\n", smu->power_profile_mode); + return -EINVAL; + } + + /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ + workload_type = smu_workload_get_type(smu, smu->power_profile_mode); + if (workload_type < 0) { + pr_err("Unsupported power profile mode %d on RENOIR\n",smu->power_profile_mode); + return -EINVAL; + } + + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, + 1 << workload_type); + if (ret) { + pr_err("Fail to set workload type %d\n", workload_type); + return ret; + } + + smu->power_profile_mode = profile_mode; + + return 0; +} + +static int renoir_set_peak_clock_by_device(struct smu_context *smu) +{ + int ret = 0; + uint32_t sclk_freq = 0, uclk_freq = 0; + + ret = smu_get_dpm_freq_range(smu, SMU_SCLK, NULL, &sclk_freq, false); + if (ret) + return ret; + + ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq); + if (ret) + return ret; + + ret = smu_get_dpm_freq_range(smu, SMU_UCLK, NULL, &uclk_freq, false); + if (ret) + return ret; + + ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq); + if (ret) + return ret; + + return ret; +} + +static int renoir_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) +{ + int ret = 0; + + switch (level) { + case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: + ret = renoir_set_peak_clock_by_device(smu); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +/* save watermark settings into pplib smu structure, + * also pass data to smu controller + */ +static int renoir_set_watermarks_table( + struct smu_context *smu, + void *watermarks, + struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges) +{ + int i; + int ret = 0; + Watermarks_t *table = watermarks; + + if (!table || !clock_ranges) + return -EINVAL; + + if (clock_ranges->num_wm_dmif_sets > 4 || + clock_ranges->num_wm_mcif_sets > 4) + return -EINVAL; + + /* save into smu->smu_table.tables[SMU_TABLE_WATERMARKS]->cpu_addr*/ + for (i = 0; i < clock_ranges->num_wm_dmif_sets; i++) { + table->WatermarkRow[WM_DCFCLK][i].MinClock = + cpu_to_le16((uint16_t) + (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz)); + table->WatermarkRow[WM_DCFCLK][i].MaxClock = + cpu_to_le16((uint16_t) + (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz)); + table->WatermarkRow[WM_DCFCLK][i].MinMclk = + cpu_to_le16((uint16_t) + (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz)); + table->WatermarkRow[WM_DCFCLK][i].MaxMclk = + cpu_to_le16((uint16_t) + (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz)); + table->WatermarkRow[WM_DCFCLK][i].WmSetting = (uint8_t) + clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id; + } + + for (i = 0; i < clock_ranges->num_wm_mcif_sets; i++) { + table->WatermarkRow[WM_SOCCLK][i].MinClock = + cpu_to_le16((uint16_t) + (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz)); + table->WatermarkRow[WM_SOCCLK][i].MaxClock = + cpu_to_le16((uint16_t) + (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz)); + table->WatermarkRow[WM_SOCCLK][i].MinMclk = + cpu_to_le16((uint16_t) + (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz)); + table->WatermarkRow[WM_SOCCLK][i].MaxMclk = + cpu_to_le16((uint16_t) + (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz)); + table->WatermarkRow[WM_SOCCLK][i].WmSetting = (uint8_t) + clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id; + } + + /* pass data to smu controller */ + ret = smu_write_watermarks_table(smu); + + return ret; +} + +static int renoir_get_power_profile_mode(struct smu_context *smu, + char *buf) +{ + static const char *profile_name[] = { + "BOOTUP_DEFAULT", + "3D_FULL_SCREEN", + "POWER_SAVING", + "VIDEO", + "VR", + "COMPUTE", + "CUSTOM"}; + uint32_t i, size = 0; + int16_t workload_type = 0; + + if (!smu->pm_enabled || !buf) + return -EINVAL; + + for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) { + /* + * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT + * Not all profile modes are supported on arcturus. + */ + workload_type = smu_workload_get_type(smu, i); + if (workload_type < 0) + continue; + + size += sprintf(buf + size, "%2d %14s%s\n", + i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); + } + + return size; +} + static const struct pptable_funcs renoir_ppt_funcs = { .get_smu_msg_index = renoir_get_smu_msg_index, .get_smu_table_index = renoir_get_smu_table_index, .tables_init = renoir_tables_init, .set_power_state = NULL, - .get_dpm_uclk_limited = renoir_get_dpm_uclk_limited, + .get_dpm_clk_limited = renoir_get_dpm_clk_limited, .print_clk_levels = renoir_print_clk_levels, + .get_current_power_state = renoir_get_current_power_state, + .dpm_set_uvd_enable = renoir_dpm_set_uvd_enable, + .force_dpm_limit_value = renoir_force_dpm_limit_value, + .unforce_dpm_levels = renoir_unforce_dpm_levels, + .get_workload_type = renoir_get_workload_type, + .get_profiling_clk_mask = renoir_get_profiling_clk_mask, + .force_clk_levels = renoir_force_clk_levels, + .set_power_profile_mode = renoir_set_power_profile_mode, + .set_performance_level = renoir_set_performance_level, + .get_dpm_clock_table = renoir_get_dpm_clock_table, + .set_watermarks_table = renoir_set_watermarks_table, + .get_power_profile_mode = renoir_get_power_profile_mode, + .check_fw_status = smu_v12_0_check_fw_status, + .check_fw_version = smu_v12_0_check_fw_version, + .powergate_sdma = smu_v12_0_powergate_sdma, + .powergate_vcn = smu_v12_0_powergate_vcn, + .send_smc_msg = smu_v12_0_send_msg, + .send_smc_msg_with_param = smu_v12_0_send_msg_with_param, + .read_smc_arg = smu_v12_0_read_arg, + .set_gfx_cgpg = smu_v12_0_set_gfx_cgpg, + .gfx_off_control = smu_v12_0_gfx_off_control, + .init_smc_tables = smu_v12_0_init_smc_tables, + .fini_smc_tables = smu_v12_0_fini_smc_tables, + .populate_smc_tables = smu_v12_0_populate_smc_tables, + .get_dpm_ultimate_freq = smu_v12_0_get_dpm_ultimate_freq, + .mode2_reset = smu_v12_0_mode2_reset, + .set_soft_freq_limited_range = smu_v12_0_set_soft_freq_limited_range, }; void renoir_set_ppt_funcs(struct smu_context *smu) { - struct smu_table_context *smu_table = &smu->smu_table; - smu->ppt_funcs = &renoir_ppt_funcs; smu->smc_if_version = SMU12_DRIVER_IF_VERSION; - smu_table->table_count = TABLE_COUNT; + smu->is_apu = true; } diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.h b/drivers/gpu/drm/amd/powerplay/smu_internal.h new file mode 100644 index 000000000000..8bcda7871309 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smu_internal.h @@ -0,0 +1,204 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __SMU_INTERNAL_H__ +#define __SMU_INTERNAL_H__ + +#include "amdgpu_smu.h" + +#define smu_init_microcode(smu) \ + ((smu)->ppt_funcs->init_microcode ? (smu)->ppt_funcs->init_microcode((smu)) : 0) +#define smu_init_smc_tables(smu) \ + ((smu)->ppt_funcs->init_smc_tables ? (smu)->ppt_funcs->init_smc_tables((smu)) : 0) +#define smu_fini_smc_tables(smu) \ + ((smu)->ppt_funcs->fini_smc_tables ? (smu)->ppt_funcs->fini_smc_tables((smu)) : 0) +#define smu_init_power(smu) \ + ((smu)->ppt_funcs->init_power ? (smu)->ppt_funcs->init_power((smu)) : 0) +#define smu_fini_power(smu) \ + ((smu)->ppt_funcs->fini_power ? (smu)->ppt_funcs->fini_power((smu)) : 0) + +#define smu_setup_pptable(smu) \ + ((smu)->ppt_funcs->setup_pptable ? (smu)->ppt_funcs->setup_pptable((smu)) : 0) +#define smu_powergate_sdma(smu, gate) \ + ((smu)->ppt_funcs->powergate_sdma ? (smu)->ppt_funcs->powergate_sdma((smu), (gate)) : 0) +#define smu_powergate_vcn(smu, gate) \ + ((smu)->ppt_funcs->powergate_vcn ? (smu)->ppt_funcs->powergate_vcn((smu), (gate)) : 0) + +#define smu_get_vbios_bootup_values(smu) \ + ((smu)->ppt_funcs->get_vbios_bootup_values ? (smu)->ppt_funcs->get_vbios_bootup_values((smu)) : 0) +#define smu_get_clk_info_from_vbios(smu) \ + ((smu)->ppt_funcs->get_clk_info_from_vbios ? (smu)->ppt_funcs->get_clk_info_from_vbios((smu)) : 0) +#define smu_check_pptable(smu) \ + ((smu)->ppt_funcs->check_pptable ? (smu)->ppt_funcs->check_pptable((smu)) : 0) +#define smu_parse_pptable(smu) \ + ((smu)->ppt_funcs->parse_pptable ? (smu)->ppt_funcs->parse_pptable((smu)) : 0) +#define smu_populate_smc_tables(smu) \ + ((smu)->ppt_funcs->populate_smc_tables ? (smu)->ppt_funcs->populate_smc_tables((smu)) : 0) +#define smu_check_fw_version(smu) \ + ((smu)->ppt_funcs->check_fw_version ? (smu)->ppt_funcs->check_fw_version((smu)) : 0) +#define smu_write_pptable(smu) \ + ((smu)->ppt_funcs->write_pptable ? (smu)->ppt_funcs->write_pptable((smu)) : 0) +#define smu_set_min_dcef_deep_sleep(smu) \ + ((smu)->ppt_funcs->set_min_dcef_deep_sleep ? (smu)->ppt_funcs->set_min_dcef_deep_sleep((smu)) : 0) +#define smu_set_tool_table_location(smu) \ + ((smu)->ppt_funcs->set_tool_table_location ? (smu)->ppt_funcs->set_tool_table_location((smu)) : 0) +#define smu_notify_memory_pool_location(smu) \ + ((smu)->ppt_funcs->notify_memory_pool_location ? (smu)->ppt_funcs->notify_memory_pool_location((smu)) : 0) +#define smu_gfx_off_control(smu, enable) \ + ((smu)->ppt_funcs->gfx_off_control ? (smu)->ppt_funcs->gfx_off_control((smu), (enable)) : 0) + +#define smu_set_last_dcef_min_deep_sleep_clk(smu) \ + ((smu)->ppt_funcs->set_last_dcef_min_deep_sleep_clk ? (smu)->ppt_funcs->set_last_dcef_min_deep_sleep_clk((smu)) : 0) +#define smu_system_features_control(smu, en) \ + ((smu)->ppt_funcs->system_features_control ? (smu)->ppt_funcs->system_features_control((smu), (en)) : 0) +#define smu_init_max_sustainable_clocks(smu) \ + ((smu)->ppt_funcs->init_max_sustainable_clocks ? (smu)->ppt_funcs->init_max_sustainable_clocks((smu)) : 0) +#define smu_set_default_od_settings(smu, initialize) \ + ((smu)->ppt_funcs->set_default_od_settings ? (smu)->ppt_funcs->set_default_od_settings((smu), (initialize)) : 0) + +#define smu_send_smc_msg(smu, msg) \ + ((smu)->ppt_funcs->send_smc_msg? (smu)->ppt_funcs->send_smc_msg((smu), (msg)) : 0) +#define smu_send_smc_msg_with_param(smu, msg, param) \ + ((smu)->ppt_funcs->send_smc_msg_with_param? (smu)->ppt_funcs->send_smc_msg_with_param((smu), (msg), (param)) : 0) +#define smu_read_smc_arg(smu, arg) \ + ((smu)->ppt_funcs->read_smc_arg? (smu)->ppt_funcs->read_smc_arg((smu), (arg)) : 0) +#define smu_alloc_dpm_context(smu) \ + ((smu)->ppt_funcs->alloc_dpm_context ? (smu)->ppt_funcs->alloc_dpm_context((smu)) : 0) +#define smu_init_display_count(smu, count) \ + ((smu)->ppt_funcs->init_display_count ? (smu)->ppt_funcs->init_display_count((smu), (count)) : 0) +#define smu_feature_set_allowed_mask(smu) \ + ((smu)->ppt_funcs->set_allowed_mask? (smu)->ppt_funcs->set_allowed_mask((smu)) : 0) +#define smu_feature_get_enabled_mask(smu, mask, num) \ + ((smu)->ppt_funcs->get_enabled_mask? (smu)->ppt_funcs->get_enabled_mask((smu), (mask), (num)) : 0) +#define smu_is_dpm_running(smu) \ + ((smu)->ppt_funcs->is_dpm_running ? (smu)->ppt_funcs->is_dpm_running((smu)) : 0) +#define smu_notify_display_change(smu) \ + ((smu)->ppt_funcs->notify_display_change? (smu)->ppt_funcs->notify_display_change((smu)) : 0) +#define smu_store_powerplay_table(smu) \ + ((smu)->ppt_funcs->store_powerplay_table ? (smu)->ppt_funcs->store_powerplay_table((smu)) : 0) +#define smu_check_powerplay_table(smu) \ + ((smu)->ppt_funcs->check_powerplay_table ? (smu)->ppt_funcs->check_powerplay_table((smu)) : 0) +#define smu_append_powerplay_table(smu) \ + ((smu)->ppt_funcs->append_powerplay_table ? (smu)->ppt_funcs->append_powerplay_table((smu)) : 0) +#define smu_set_default_dpm_table(smu) \ + ((smu)->ppt_funcs->set_default_dpm_table ? (smu)->ppt_funcs->set_default_dpm_table((smu)) : 0) +#define smu_populate_umd_state_clk(smu) \ + ((smu)->ppt_funcs->populate_umd_state_clk ? (smu)->ppt_funcs->populate_umd_state_clk((smu)) : 0) +#define smu_set_default_od8_settings(smu) \ + ((smu)->ppt_funcs->set_default_od8_settings ? (smu)->ppt_funcs->set_default_od8_settings((smu)) : 0) + +#define smu_get_current_clk_freq(smu, clk_id, value) \ + ((smu)->ppt_funcs->get_current_clk_freq? (smu)->ppt_funcs->get_current_clk_freq((smu), (clk_id), (value)) : 0) + +#define smu_tables_init(smu, tab) \ + ((smu)->ppt_funcs->tables_init ? (smu)->ppt_funcs->tables_init((smu), (tab)) : 0) +#define smu_set_thermal_fan_table(smu) \ + ((smu)->ppt_funcs->set_thermal_fan_table ? (smu)->ppt_funcs->set_thermal_fan_table((smu)) : 0) +#define smu_start_thermal_control(smu) \ + ((smu)->ppt_funcs->start_thermal_control? (smu)->ppt_funcs->start_thermal_control((smu)) : 0) +#define smu_stop_thermal_control(smu) \ + ((smu)->ppt_funcs->stop_thermal_control? (smu)->ppt_funcs->stop_thermal_control((smu)) : 0) + +#define smu_smc_read_sensor(smu, sensor, data, size) \ + ((smu)->ppt_funcs->read_sensor? (smu)->ppt_funcs->read_sensor((smu), (sensor), (data), (size)) : -EINVAL) + +#define smu_pre_display_config_changed(smu) \ + ((smu)->ppt_funcs->pre_display_config_changed ? (smu)->ppt_funcs->pre_display_config_changed((smu)) : 0) +#define smu_display_config_changed(smu) \ + ((smu)->ppt_funcs->display_config_changed ? (smu)->ppt_funcs->display_config_changed((smu)) : 0) +#define smu_apply_clocks_adjust_rules(smu) \ + ((smu)->ppt_funcs->apply_clocks_adjust_rules ? (smu)->ppt_funcs->apply_clocks_adjust_rules((smu)) : 0) +#define smu_notify_smc_dispaly_config(smu) \ + ((smu)->ppt_funcs->notify_smc_dispaly_config ? (smu)->ppt_funcs->notify_smc_dispaly_config((smu)) : 0) +#define smu_force_dpm_limit_value(smu, highest) \ + ((smu)->ppt_funcs->force_dpm_limit_value ? (smu)->ppt_funcs->force_dpm_limit_value((smu), (highest)) : 0) +#define smu_unforce_dpm_levels(smu) \ + ((smu)->ppt_funcs->unforce_dpm_levels ? (smu)->ppt_funcs->unforce_dpm_levels((smu)) : 0) +#define smu_get_profiling_clk_mask(smu, level, sclk_mask, mclk_mask, soc_mask) \ + ((smu)->ppt_funcs->get_profiling_clk_mask ? (smu)->ppt_funcs->get_profiling_clk_mask((smu), (level), (sclk_mask), (mclk_mask), (soc_mask)) : 0) +#define smu_set_cpu_power_state(smu) \ + ((smu)->ppt_funcs->set_cpu_power_state ? (smu)->ppt_funcs->set_cpu_power_state((smu)) : 0) + +#define smu_msg_get_index(smu, msg) \ + ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_msg_index? (smu)->ppt_funcs->get_smu_msg_index((smu), (msg)) : -EINVAL) : -EINVAL) +#define smu_clk_get_index(smu, msg) \ + ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_clk_index? (smu)->ppt_funcs->get_smu_clk_index((smu), (msg)) : -EINVAL) : -EINVAL) +#define smu_feature_get_index(smu, msg) \ + ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_feature_index? (smu)->ppt_funcs->get_smu_feature_index((smu), (msg)) : -EINVAL) : -EINVAL) +#define smu_table_get_index(smu, tab) \ + ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_table_index? (smu)->ppt_funcs->get_smu_table_index((smu), (tab)) : -EINVAL) : -EINVAL) +#define smu_power_get_index(smu, src) \ + ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_power_index? (smu)->ppt_funcs->get_smu_power_index((smu), (src)) : -EINVAL) : -EINVAL) +#define smu_workload_get_type(smu, profile) \ + ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_workload_type? (smu)->ppt_funcs->get_workload_type((smu), (profile)) : -EINVAL) : -EINVAL) +#define smu_run_btc(smu) \ + ((smu)->ppt_funcs? ((smu)->ppt_funcs->run_btc? (smu)->ppt_funcs->run_btc((smu)) : 0) : 0) +#define smu_get_allowed_feature_mask(smu, feature_mask, num) \ + ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_allowed_feature_mask? (smu)->ppt_funcs->get_allowed_feature_mask((smu), (feature_mask), (num)) : 0) : 0) + + +#define smu_store_cc6_data(smu, st, cc6_dis, pst_dis, pst_sw_dis) \ + ((smu)->ppt_funcs->store_cc6_data ? (smu)->ppt_funcs->store_cc6_data((smu), (st), (cc6_dis), (pst_dis), (pst_sw_dis)) : 0) + +#define smu_get_dal_power_level(smu, clocks) \ + ((smu)->ppt_funcs->get_dal_power_level ? (smu)->ppt_funcs->get_dal_power_level((smu), (clocks)) : 0) +#define smu_get_perf_level(smu, designation, level) \ + ((smu)->ppt_funcs->get_perf_level ? (smu)->ppt_funcs->get_perf_level((smu), (designation), (level)) : 0) +#define smu_get_current_shallow_sleep_clocks(smu, clocks) \ + ((smu)->ppt_funcs->get_current_shallow_sleep_clocks ? (smu)->ppt_funcs->get_current_shallow_sleep_clocks((smu), (clocks)) : 0) + +#define smu_dpm_set_uvd_enable(smu, enable) \ + ((smu)->ppt_funcs->dpm_set_uvd_enable ? (smu)->ppt_funcs->dpm_set_uvd_enable((smu), (enable)) : 0) +#define smu_dpm_set_vce_enable(smu, enable) \ + ((smu)->ppt_funcs->dpm_set_vce_enable ? (smu)->ppt_funcs->dpm_set_vce_enable((smu), (enable)) : 0) + +#define smu_set_watermarks_table(smu, tab, clock_ranges) \ + ((smu)->ppt_funcs->set_watermarks_table ? (smu)->ppt_funcs->set_watermarks_table((smu), (tab), (clock_ranges)) : 0) +#define smu_get_current_clk_freq_by_table(smu, clk_type, value) \ + ((smu)->ppt_funcs->get_current_clk_freq_by_table ? (smu)->ppt_funcs->get_current_clk_freq_by_table((smu), (clk_type), (value)) : 0) +#define smu_thermal_temperature_range_update(smu, range, rw) \ + ((smu)->ppt_funcs->thermal_temperature_range_update? (smu)->ppt_funcs->thermal_temperature_range_update((smu), (range), (rw)) : 0) +#define smu_get_thermal_temperature_range(smu, range) \ + ((smu)->ppt_funcs->get_thermal_temperature_range? (smu)->ppt_funcs->get_thermal_temperature_range((smu), (range)) : 0) +#define smu_register_irq_handler(smu) \ + ((smu)->ppt_funcs->register_irq_handler ? (smu)->ppt_funcs->register_irq_handler(smu) : 0) + +#define smu_get_dpm_ultimate_freq(smu, param, min, max) \ + ((smu)->ppt_funcs->get_dpm_ultimate_freq ? (smu)->ppt_funcs->get_dpm_ultimate_freq((smu), (param), (min), (max)) : 0) + +#define smu_asic_set_performance_level(smu, level) \ + ((smu)->ppt_funcs->set_performance_level? (smu)->ppt_funcs->set_performance_level((smu), (level)) : -EINVAL); +#define smu_dump_pptable(smu) \ + ((smu)->ppt_funcs->dump_pptable ? (smu)->ppt_funcs->dump_pptable((smu)) : 0) +#define smu_get_dpm_clk_limited(smu, clk_type, dpm_level, freq) \ + ((smu)->ppt_funcs->get_dpm_clk_limited ? (smu)->ppt_funcs->get_dpm_clk_limited((smu), (clk_type), (dpm_level), (freq)) : -EINVAL) + +#define smu_set_soft_freq_limited_range(smu, clk_type, min, max) \ + ((smu)->ppt_funcs->set_soft_freq_limited_range ? (smu)->ppt_funcs->set_soft_freq_limited_range((smu), (clk_type), (min), (max)) : -EINVAL) + +#define smu_override_pcie_parameters(smu) \ + ((smu)->ppt_funcs->override_pcie_parameters ? (smu)->ppt_funcs->override_pcie_parameters((smu)) : 0) + +#define smu_update_pcie_parameters(smu, pcie_gen_cap, pcie_width_cap) \ + ((smu)->ppt_funcs->update_pcie_parameters ? (smu)->ppt_funcs->update_pcie_parameters((smu), (pcie_gen_cap), (pcie_width_cap)) : 0) + +#endif diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index c5257ae3188a..e859bb1132ac 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -27,14 +27,13 @@ #include "pp_debug.h" #include "amdgpu.h" #include "amdgpu_smu.h" +#include "smu_internal.h" #include "atomfirmware.h" #include "amdgpu_atomfirmware.h" #include "smu_v11_0.h" #include "soc15_common.h" #include "atom.h" -#include "vega20_ppt.h" -#include "arcturus_ppt.h" -#include "navi10_ppt.h" +#include "amd_pcie.h" #include "asic_reg/thm/thm_11_0_2_offset.h" #include "asic_reg/thm/thm_11_0_2_sh_mask.h" @@ -61,7 +60,7 @@ static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu, return 0; } -static int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg) +int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg) { struct amdgpu_device *adev = smu->adev; @@ -88,7 +87,7 @@ static int smu_v11_0_wait_for_response(struct smu_context *smu) return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO; } -static int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg) +int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg) { struct amdgpu_device *adev = smu->adev; int ret = 0, index = 0; @@ -113,7 +112,7 @@ static int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg) } -static int +int smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg, uint32_t param) { @@ -144,7 +143,7 @@ smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg, return ret; } -static int smu_v11_0_init_microcode(struct smu_context *smu) +int smu_v11_0_init_microcode(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; const char *chip_name; @@ -206,7 +205,7 @@ out: return err; } -static int smu_v11_0_load_microcode(struct smu_context *smu) +int smu_v11_0_load_microcode(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; const uint32_t *src; @@ -244,7 +243,7 @@ static int smu_v11_0_load_microcode(struct smu_context *smu) return 0; } -static int smu_v11_0_check_fw_status(struct smu_context *smu) +int smu_v11_0_check_fw_status(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; uint32_t mp1_fw_flags; @@ -259,7 +258,7 @@ static int smu_v11_0_check_fw_status(struct smu_context *smu) return -EIO; } -static int smu_v11_0_check_fw_version(struct smu_context *smu) +int smu_v11_0_check_fw_version(struct smu_context *smu) { uint32_t if_version = 0xff, smu_version = 0xff; uint16_t smu_major; @@ -354,7 +353,7 @@ static int smu_v11_0_set_pptable_v2_1(struct smu_context *smu, void **table, return 0; } -static int smu_v11_0_setup_pptable(struct smu_context *smu) +int smu_v11_0_setup_pptable(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; const struct smc_firmware_header_v1_0 *hdr; @@ -369,6 +368,7 @@ static int smu_v11_0_setup_pptable(struct smu_context *smu) version_major = le16_to_cpu(hdr->header.header_version_major); version_minor = le16_to_cpu(hdr->header.header_version_minor); if (version_major == 2 && smu->smu_table.boot_values.pp_table_id > 0) { + pr_info("use driver provided pptable %d\n", smu->smu_table.boot_values.pp_table_id); switch (version_minor) { case 0: ret = smu_v11_0_set_pptable_v2_0(smu, &table, &size); @@ -385,6 +385,7 @@ static int smu_v11_0_setup_pptable(struct smu_context *smu) return ret; } else { + pr_info("use vbios provided pptable\n"); index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, powerplayinfo); @@ -433,13 +434,13 @@ static int smu_v11_0_fini_dpm_context(struct smu_context *smu) return 0; } -static int smu_v11_0_init_smc_tables(struct smu_context *smu) +int smu_v11_0_init_smc_tables(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; struct smu_table *tables = NULL; int ret = 0; - if (smu_table->tables || smu_table->table_count == 0) + if (smu_table->tables) return -EINVAL; tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table), @@ -460,18 +461,17 @@ static int smu_v11_0_init_smc_tables(struct smu_context *smu) return 0; } -static int smu_v11_0_fini_smc_tables(struct smu_context *smu) +int smu_v11_0_fini_smc_tables(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; int ret = 0; - if (!smu_table->tables || smu_table->table_count == 0) + if (!smu_table->tables) return -EINVAL; kfree(smu_table->tables); kfree(smu_table->metrics_table); smu_table->tables = NULL; - smu_table->table_count = 0; smu_table->metrics_table = NULL; smu_table->metrics_time = 0; @@ -481,7 +481,7 @@ static int smu_v11_0_fini_smc_tables(struct smu_context *smu) return 0; } -static int smu_v11_0_init_power(struct smu_context *smu) +int smu_v11_0_init_power(struct smu_context *smu) { struct smu_power_context *smu_power = &smu->smu_power; @@ -499,7 +499,7 @@ static int smu_v11_0_init_power(struct smu_context *smu) return 0; } -static int smu_v11_0_fini_power(struct smu_context *smu) +int smu_v11_0_fini_power(struct smu_context *smu) { struct smu_power_context *smu_power = &smu->smu_power; @@ -576,7 +576,7 @@ int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu) return 0; } -static int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu) +int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu) { int ret, index; struct amdgpu_device *adev = smu->adev; @@ -673,7 +673,7 @@ static int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu) return 0; } -static int smu_v11_0_notify_memory_pool_location(struct smu_context *smu) +int smu_v11_0_notify_memory_pool_location(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; struct smu_table *memory_pool = &smu_table->memory_pool; @@ -719,7 +719,7 @@ static int smu_v11_0_notify_memory_pool_location(struct smu_context *smu) return ret; } -static int smu_v11_0_check_pptable(struct smu_context *smu) +int smu_v11_0_check_pptable(struct smu_context *smu) { int ret; @@ -727,7 +727,7 @@ static int smu_v11_0_check_pptable(struct smu_context *smu) return ret; } -static int smu_v11_0_parse_pptable(struct smu_context *smu) +int smu_v11_0_parse_pptable(struct smu_context *smu) { int ret; @@ -751,7 +751,7 @@ static int smu_v11_0_parse_pptable(struct smu_context *smu) return ret; } -static int smu_v11_0_populate_smc_pptable(struct smu_context *smu) +int smu_v11_0_populate_smc_pptable(struct smu_context *smu) { int ret; @@ -760,7 +760,7 @@ static int smu_v11_0_populate_smc_pptable(struct smu_context *smu) return ret; } -static int smu_v11_0_write_pptable(struct smu_context *smu) +int smu_v11_0_write_pptable(struct smu_context *smu) { struct smu_table_context *table_context = &smu->smu_table; int ret = 0; @@ -771,24 +771,7 @@ static int smu_v11_0_write_pptable(struct smu_context *smu) return ret; } -static int smu_v11_0_write_watermarks_table(struct smu_context *smu) -{ - int ret = 0; - struct smu_table_context *smu_table = &smu->smu_table; - struct smu_table *table = NULL; - - table = &smu_table->tables[SMU_TABLE_WATERMARKS]; - - if (!table->cpu_addr) - return -EINVAL; - - ret = smu_update_table(smu, SMU_TABLE_WATERMARKS, 0, table->cpu_addr, - true); - - return ret; -} - -static int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk) +int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk) { int ret; @@ -800,7 +783,7 @@ static int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t cl return ret; } -static int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu) +int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu) { struct smu_table_context *table_context = &smu->smu_table; @@ -809,11 +792,10 @@ static int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu) if (!table_context) return -EINVAL; - return smu_set_deep_sleep_dcefclk(smu, - table_context->boot_values.dcefclk / 100); + return smu_v11_0_set_deep_sleep_dcefclk(smu, table_context->boot_values.dcefclk / 100); } -static int smu_v11_0_set_tool_table_location(struct smu_context *smu) +int smu_v11_0_set_tool_table_location(struct smu_context *smu) { int ret = 0; struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG]; @@ -831,7 +813,7 @@ static int smu_v11_0_set_tool_table_location(struct smu_context *smu) return ret; } -static int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count) +int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count) { int ret = 0; @@ -843,7 +825,7 @@ static int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count) } -static int smu_v11_0_set_allowed_mask(struct smu_context *smu) +int smu_v11_0_set_allowed_mask(struct smu_context *smu) { struct smu_feature *feature = &smu->smu_feature; int ret = 0; @@ -870,7 +852,7 @@ failed: return ret; } -static int smu_v11_0_get_enabled_mask(struct smu_context *smu, +int smu_v11_0_get_enabled_mask(struct smu_context *smu, uint32_t *feature_mask, uint32_t num) { uint32_t feature_mask_high = 0, feature_mask_low = 0; @@ -899,7 +881,7 @@ static int smu_v11_0_get_enabled_mask(struct smu_context *smu, return ret; } -static int smu_v11_0_system_features_control(struct smu_context *smu, +int smu_v11_0_system_features_control(struct smu_context *smu, bool en) { struct smu_feature *feature = &smu->smu_feature; @@ -925,7 +907,7 @@ static int smu_v11_0_system_features_control(struct smu_context *smu, return ret; } -static int smu_v11_0_notify_display_change(struct smu_context *smu) +int smu_v11_0_notify_display_change(struct smu_context *smu) { int ret = 0; @@ -983,7 +965,7 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock, return ret; } -static int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu) +int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu) { struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks; int ret = 0; @@ -1063,7 +1045,7 @@ static int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu) return 0; } -static int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n) +int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n) { int ret = 0; @@ -1091,7 +1073,7 @@ static int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n) return 0; } -static int smu_v11_0_get_current_clk_freq(struct smu_context *smu, +int smu_v11_0_get_current_clk_freq(struct smu_context *smu, enum smu_clk_type clk_id, uint32_t *value) { @@ -1170,7 +1152,7 @@ static int smu_v11_0_enable_thermal_alert(struct smu_context *smu) return 0; } -static int smu_v11_0_start_thermal_control(struct smu_context *smu) +int smu_v11_0_start_thermal_control(struct smu_context *smu) { int ret = 0; struct smu_temperature_range range; @@ -1212,6 +1194,15 @@ static int smu_v11_0_start_thermal_control(struct smu_context *smu) return ret; } +int smu_v11_0_stop_thermal_control(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + + WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, 0); + + return 0; +} + static uint16_t convert_to_vddc(uint8_t vid) { return (uint16_t) ((6200 - (vid * 25)) / SMU11_VOLTAGE_SCALE); @@ -1236,7 +1227,7 @@ static int smu_v11_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value) } -static int smu_v11_0_read_sensor(struct smu_context *smu, +int smu_v11_0_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor, void *data, uint32_t *size) { @@ -1273,7 +1264,7 @@ static int smu_v11_0_read_sensor(struct smu_context *smu, return ret; } -static int +int smu_v11_0_display_clock_voltage_request(struct smu_context *smu, struct pp_display_clock_request *clock_req) @@ -1316,9 +1307,7 @@ smu_v11_0_display_clock_voltage_request(struct smu_context *smu, if (clk_select == SMU_UCLK && smu->disable_uclk_switch) return 0; - mutex_lock(&smu->mutex); ret = smu_set_hard_freq_range(smu, clk_select, clk_freq, 0); - mutex_unlock(&smu->mutex); if(clk_select == SMU_UCLK) smu->hard_min_uclk_req_from_dal = clk_freq; @@ -1328,27 +1317,7 @@ failed: return ret; } -static int -smu_v11_0_set_watermarks_for_clock_ranges(struct smu_context *smu, struct - dm_pp_wm_sets_with_clock_ranges_soc15 - *clock_ranges) -{ - int ret = 0; - struct smu_table *watermarks = &smu->smu_table.tables[SMU_TABLE_WATERMARKS]; - void *table = watermarks->cpu_addr; - - if (!smu->disable_watermark && - smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) && - smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { - smu_set_watermarks_table(smu, table, clock_ranges); - smu->watermarks_bitmap |= WATERMARKS_EXIST; - smu->watermarks_bitmap &= ~WATERMARKS_LOADED; - } - - return ret; -} - -static int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable) +int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable) { int ret = 0; struct amdgpu_device *adev = smu->adev; @@ -1361,12 +1330,10 @@ static int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable) case CHIP_NAVI12: if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) return 0; - mutex_lock(&smu->mutex); if (enable) ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff); else ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff); - mutex_unlock(&smu->mutex); break; default: break; @@ -1375,7 +1342,7 @@ static int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable) return ret; } -static uint32_t +uint32_t smu_v11_0_get_fan_control_mode(struct smu_context *smu) { if (!smu_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT)) @@ -1415,7 +1382,7 @@ smu_v11_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode) return 0; } -static int +int smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed) { struct amdgpu_device *adev = smu->adev; @@ -1444,7 +1411,7 @@ smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed) return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC); } -static int +int smu_v11_0_set_fan_control_mode(struct smu_context *smu, uint32_t mode) { @@ -1472,7 +1439,7 @@ smu_v11_0_set_fan_control_mode(struct smu_context *smu, return ret; } -static int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu, +int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed) { struct amdgpu_device *adev = smu->adev; @@ -1482,10 +1449,9 @@ static int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu, if (!speed) return -EINVAL; - mutex_lock(&(smu->mutex)); ret = smu_v11_0_auto_fan_control(smu, 0); if (ret) - goto set_fan_speed_rpm_failed; + return ret; crystal_clock_freq = amdgpu_asic_get_xclk(adev); tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed); @@ -1496,23 +1462,16 @@ static int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu, ret = smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM); -set_fan_speed_rpm_failed: - mutex_unlock(&(smu->mutex)); return ret; } -#define XGMI_STATE_D0 1 -#define XGMI_STATE_D3 0 - -static int smu_v11_0_set_xgmi_pstate(struct smu_context *smu, +int smu_v11_0_set_xgmi_pstate(struct smu_context *smu, uint32_t pstate) { int ret = 0; - mutex_lock(&(smu->mutex)); ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetXgmiMode, - pstate ? XGMI_STATE_D0 : XGMI_STATE_D3); - mutex_unlock(&(smu->mutex)); + pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3); return ret; } @@ -1559,7 +1518,7 @@ static const struct amdgpu_irq_src_funcs smu_v11_0_irq_funcs = .process = smu_v11_0_irq_process, }; -static int smu_v11_0_register_irq_handler(struct smu_context *smu) +int smu_v11_0_register_irq_handler(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; struct amdgpu_irq_src *irq_src = smu->irq_source; @@ -1591,7 +1550,7 @@ static int smu_v11_0_register_irq_handler(struct smu_context *smu) return ret; } -static int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu, +int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu, struct pp_smu_nv_clock_table *max_clocks) { struct smu_table_context *table_context = &smu->smu_table; @@ -1621,13 +1580,11 @@ static int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu, return 0; } -static int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu) +int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu) { int ret = 0; - mutex_lock(&smu->mutex); ret = smu_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME); - mutex_unlock(&smu->mutex); return ret; } @@ -1637,7 +1594,7 @@ static int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_v return smu_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq); } -static bool smu_v11_0_baco_is_support(struct smu_context *smu) +bool smu_v11_0_baco_is_support(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; struct smu_baco_context *smu_baco = &smu->smu_baco; @@ -1661,7 +1618,7 @@ static bool smu_v11_0_baco_is_support(struct smu_context *smu) return false; } -static enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu) +enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu) { struct smu_baco_context *smu_baco = &smu->smu_baco; enum smu_baco_state baco_state; @@ -1673,7 +1630,7 @@ static enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu) return baco_state; } -static int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) +int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) { struct smu_baco_context *smu_baco = &smu->smu_baco; @@ -1697,7 +1654,7 @@ out: return ret; } -static int smu_v11_0_baco_reset(struct smu_context *smu) +int smu_v11_0_baco_reset(struct smu_context *smu) { int ret = 0; @@ -1718,13 +1675,12 @@ static int smu_v11_0_baco_reset(struct smu_context *smu) return ret; } -static int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, +int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max) { int ret = 0, clk_id = 0; uint32_t param = 0; - mutex_lock(&smu->mutex); clk_id = smu_clk_get_index(smu, clk_type); if (clk_id < 0) { ret = -EINVAL; @@ -1751,80 +1707,75 @@ static int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk } failed: - mutex_unlock(&smu->mutex); return ret; } -static const struct smu_funcs smu_v11_0_funcs = { - .init_microcode = smu_v11_0_init_microcode, - .load_microcode = smu_v11_0_load_microcode, - .check_fw_status = smu_v11_0_check_fw_status, - .check_fw_version = smu_v11_0_check_fw_version, - .send_smc_msg = smu_v11_0_send_msg, - .send_smc_msg_with_param = smu_v11_0_send_msg_with_param, - .read_smc_arg = smu_v11_0_read_arg, - .setup_pptable = smu_v11_0_setup_pptable, - .init_smc_tables = smu_v11_0_init_smc_tables, - .fini_smc_tables = smu_v11_0_fini_smc_tables, - .init_power = smu_v11_0_init_power, - .fini_power = smu_v11_0_fini_power, - .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values, - .get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios, - .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, - .check_pptable = smu_v11_0_check_pptable, - .parse_pptable = smu_v11_0_parse_pptable, - .populate_smc_tables = smu_v11_0_populate_smc_pptable, - .write_pptable = smu_v11_0_write_pptable, - .write_watermarks_table = smu_v11_0_write_watermarks_table, - .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep, - .set_tool_table_location = smu_v11_0_set_tool_table_location, - .init_display_count = smu_v11_0_init_display_count, - .set_allowed_mask = smu_v11_0_set_allowed_mask, - .get_enabled_mask = smu_v11_0_get_enabled_mask, - .system_features_control = smu_v11_0_system_features_control, - .notify_display_change = smu_v11_0_notify_display_change, - .set_power_limit = smu_v11_0_set_power_limit, - .get_current_clk_freq = smu_v11_0_get_current_clk_freq, - .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks, - .start_thermal_control = smu_v11_0_start_thermal_control, - .read_sensor = smu_v11_0_read_sensor, - .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk, - .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request, - .set_watermarks_for_clock_ranges = smu_v11_0_set_watermarks_for_clock_ranges, - .get_fan_control_mode = smu_v11_0_get_fan_control_mode, - .set_fan_control_mode = smu_v11_0_set_fan_control_mode, - .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent, - .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm, - .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate, - .gfx_off_control = smu_v11_0_gfx_off_control, - .register_irq_handler = smu_v11_0_register_irq_handler, - .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme, - .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc, - .baco_is_support = smu_v11_0_baco_is_support, - .baco_get_state = smu_v11_0_baco_get_state, - .baco_set_state = smu_v11_0_baco_set_state, - .baco_reset = smu_v11_0_baco_reset, - .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq, -}; +int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, + uint32_t min, uint32_t max) +{ + int ret = 0, clk_id = 0; + uint32_t param; -void smu_v11_0_set_smu_funcs(struct smu_context *smu) + clk_id = smu_clk_get_index(smu, clk_type); + if (clk_id < 0) + return clk_id; + + if (max > 0) { + param = (uint32_t)((clk_id << 16) | (max & 0xffff)); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq, + param); + if (ret) + return ret; + } + + if (min > 0) { + param = (uint32_t)((clk_id << 16) | (min & 0xffff)); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq, + param); + if (ret) + return ret; + } + + return ret; +} + +int smu_v11_0_override_pcie_parameters(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; + uint32_t pcie_gen = 0, pcie_width = 0; + int ret; + + if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4) + pcie_gen = 3; + else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) + pcie_gen = 2; + else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) + pcie_gen = 1; + else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1) + pcie_gen = 0; + + /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1 + * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4 + * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 + */ + if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16) + pcie_width = 6; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12) + pcie_width = 5; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8) + pcie_width = 4; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4) + pcie_width = 3; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2) + pcie_width = 2; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1) + pcie_width = 1; + + ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width); + + if (ret) + pr_err("[%s] Attempt to override pcie params failed!\n", __func__); + + return ret; - smu->funcs = &smu_v11_0_funcs; - switch (adev->asic_type) { - case CHIP_VEGA20: - vega20_set_ppt_funcs(smu); - break; - case CHIP_ARCTURUS: - arcturus_set_ppt_funcs(smu); - break; - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_NAVI12: - navi10_set_ppt_funcs(smu); - break; - default: - pr_warn("Unknown asic for smu11\n"); - } } diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c index 9d2280ca1f4b..139dd737eaa5 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c @@ -24,12 +24,12 @@ #include <linux/firmware.h> #include "amdgpu.h" #include "amdgpu_smu.h" +#include "smu_internal.h" #include "atomfirmware.h" #include "amdgpu_atomfirmware.h" #include "smu_v12_0.h" #include "soc15_common.h" #include "atom.h" -#include "renoir_ppt.h" #include "asic_reg/mp/mp_12_0_0_offset.h" #include "asic_reg/mp/mp_12_0_0_sh_mask.h" @@ -41,7 +41,7 @@ #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK 0x00000006L #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT 0x1 -static int smu_v12_0_send_msg_without_waiting(struct smu_context *smu, +int smu_v12_0_send_msg_without_waiting(struct smu_context *smu, uint16_t msg) { struct amdgpu_device *adev = smu->adev; @@ -50,7 +50,7 @@ static int smu_v12_0_send_msg_without_waiting(struct smu_context *smu, return 0; } -static int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg) +int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg) { struct amdgpu_device *adev = smu->adev; @@ -58,7 +58,7 @@ static int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg) return 0; } -static int smu_v12_0_wait_for_response(struct smu_context *smu) +int smu_v12_0_wait_for_response(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; uint32_t cur_value, i; @@ -77,7 +77,7 @@ static int smu_v12_0_wait_for_response(struct smu_context *smu) return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO; } -static int smu_v12_0_send_msg(struct smu_context *smu, uint16_t msg) +int smu_v12_0_send_msg(struct smu_context *smu, uint16_t msg) { struct amdgpu_device *adev = smu->adev; int ret = 0, index = 0; @@ -102,7 +102,7 @@ static int smu_v12_0_send_msg(struct smu_context *smu, uint16_t msg) } -static int +int smu_v12_0_send_msg_with_param(struct smu_context *smu, uint16_t msg, uint32_t param) { @@ -132,7 +132,7 @@ smu_v12_0_send_msg_with_param(struct smu_context *smu, uint16_t msg, return ret; } -static int smu_v12_0_check_fw_status(struct smu_context *smu) +int smu_v12_0_check_fw_status(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; uint32_t mp1_fw_flags; @@ -147,7 +147,7 @@ static int smu_v12_0_check_fw_status(struct smu_context *smu) return -EIO; } -static int smu_v12_0_check_fw_version(struct smu_context *smu) +int smu_v12_0_check_fw_version(struct smu_context *smu) { uint32_t if_version = 0xff, smu_version = 0xff; uint16_t smu_major; @@ -181,7 +181,7 @@ static int smu_v12_0_check_fw_version(struct smu_context *smu) return ret; } -static int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate) +int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate) { if (!(smu->adev->flags & AMD_IS_APU)) return 0; @@ -192,7 +192,7 @@ static int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate) return smu_send_smc_msg(smu, SMU_MSG_PowerUpSdma); } -static int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate) +int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate) { if (!(smu->adev->flags & AMD_IS_APU)) return 0; @@ -203,7 +203,7 @@ static int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate) return smu_send_smc_msg(smu, SMU_MSG_PowerUpVcn); } -static int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable) +int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable) { if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) return 0; @@ -224,7 +224,7 @@ static int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable) * Returns 2=Not in GFXOFF. * Returns 3=Transition into GFXOFF. */ -static uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu) +uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu) { uint32_t reg; uint32_t gfxOff_Status = 0; @@ -237,22 +237,13 @@ static uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu) return gfxOff_Status; } -static int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable) +int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable) { int ret = 0, timeout = 500; if (enable) { ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff); - /* confirm gfx is back to "off" state, timeout is 5 seconds */ - while (!(smu_v12_0_get_gfxoff_status(smu) == 0)) { - msleep(10); - timeout--; - if (timeout == 0) { - DRM_ERROR("enable gfxoff timeout and failed!\n"); - break; - } - } } else { ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff); @@ -270,12 +261,12 @@ static int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable) return ret; } -static int smu_v12_0_init_smc_tables(struct smu_context *smu) +int smu_v12_0_init_smc_tables(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; struct smu_table *tables = NULL; - if (smu_table->tables || smu_table->table_count == 0) + if (smu_table->tables) return -EINVAL; tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table), @@ -288,11 +279,11 @@ static int smu_v12_0_init_smc_tables(struct smu_context *smu) return smu_tables_init(smu, tables); } -static int smu_v12_0_fini_smc_tables(struct smu_context *smu) +int smu_v12_0_fini_smc_tables(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; - if (!smu_table->tables || smu_table->table_count == 0) + if (!smu_table->tables) return -EINVAL; kfree(smu_table->clocks_table); @@ -304,7 +295,7 @@ static int smu_v12_0_fini_smc_tables(struct smu_context *smu) return 0; } -static int smu_v12_0_populate_smc_tables(struct smu_context *smu) +int smu_v12_0_populate_smc_tables(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; struct smu_table *table = NULL; @@ -319,14 +310,20 @@ static int smu_v12_0_populate_smc_tables(struct smu_context *smu) return smu_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false); } -static int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, +int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max) { int ret = 0; - - mutex_lock(&smu->mutex); + uint32_t mclk_mask, soc_mask; if (max) { + ret = smu_get_profiling_clk_mask(smu, AMD_DPM_FORCED_LEVEL_PROFILE_PEAK, + NULL, + &mclk_mask, + &soc_mask); + if (ret) + goto failed; + switch (clk_type) { case SMU_GFXCLK: case SMU_SCLK: @@ -340,14 +337,20 @@ static int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk goto failed; break; case SMU_UCLK: - ret = smu_get_dpm_uclk_limited(smu, max, true); + case SMU_FCLK: + case SMU_MCLK: + ret = smu_get_dpm_clk_limited(smu, clk_type, mclk_mask, max); + if (ret) + goto failed; + break; + case SMU_SOCCLK: + ret = smu_get_dpm_clk_limited(smu, clk_type, soc_mask, max); if (ret) goto failed; break; default: ret = -EINVAL; goto failed; - } } @@ -365,7 +368,14 @@ static int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk goto failed; break; case SMU_UCLK: - ret = smu_get_dpm_uclk_limited(smu, min, false); + case SMU_FCLK: + case SMU_MCLK: + ret = smu_get_dpm_clk_limited(smu, clk_type, 0, min); + if (ret) + goto failed; + break; + case SMU_SOCCLK: + ret = smu_get_dpm_clk_limited(smu, clk_type, 0, min); if (ret) goto failed; break; @@ -373,40 +383,65 @@ static int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk ret = -EINVAL; goto failed; } - } failed: - mutex_unlock(&smu->mutex); return ret; } -static const struct smu_funcs smu_v12_0_funcs = { - .check_fw_status = smu_v12_0_check_fw_status, - .check_fw_version = smu_v12_0_check_fw_version, - .powergate_sdma = smu_v12_0_powergate_sdma, - .powergate_vcn = smu_v12_0_powergate_vcn, - .send_smc_msg = smu_v12_0_send_msg, - .send_smc_msg_with_param = smu_v12_0_send_msg_with_param, - .read_smc_arg = smu_v12_0_read_arg, - .set_gfx_cgpg = smu_v12_0_set_gfx_cgpg, - .gfx_off_control = smu_v12_0_gfx_off_control, - .init_smc_tables = smu_v12_0_init_smc_tables, - .fini_smc_tables = smu_v12_0_fini_smc_tables, - .populate_smc_tables = smu_v12_0_populate_smc_tables, - .get_dpm_ultimate_freq = smu_v12_0_get_dpm_ultimate_freq, -}; - -void smu_v12_0_set_smu_funcs(struct smu_context *smu) +int smu_v12_0_mode2_reset(struct smu_context *smu){ + return smu_v12_0_send_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2); +} + +int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, + uint32_t min, uint32_t max) { - struct amdgpu_device *adev = smu->adev; + int ret = 0; - smu->funcs = &smu_v12_0_funcs; + if (max < min) + return -EINVAL; - switch (adev->asic_type) { - case CHIP_RENOIR: - renoir_set_ppt_funcs(smu); - break; + switch (clk_type) { + case SMU_GFXCLK: + case SMU_SCLK: + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, min); + if (ret) + return ret; + + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, max); + if (ret) + return ret; + break; + case SMU_FCLK: + case SMU_MCLK: + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min); + if (ret) + return ret; + + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max); + if (ret) + return ret; + break; + case SMU_SOCCLK: + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min); + if (ret) + return ret; + + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max); + if (ret) + return ret; + break; + case SMU_VCLK: + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, min); + if (ret) + return ret; + + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, max); + if (ret) + return ret; + break; default: - pr_warn("Unknown asic for smu12\n"); + return -EINVAL; } + + return ret; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c index dc754447f0dd..23c12018dbc1 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c @@ -655,7 +655,7 @@ static int polaris10_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr, count = SMU_MAX_SMIO_LEVELS; for (level = 0; level < count; level++) { table->SmioTable2.Pattern[level].Voltage = - PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE); + PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[level].value * VOLTAGE_SCALE); /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/ table->SmioTable2.Pattern[level].Smio = (uint8_t) level; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c index 3f12cf341511..aa0ee2b46135 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c @@ -137,7 +137,7 @@ static int smu10_copy_table_from_smc(struct pp_hwmgr *hwmgr, priv->smu_tables.entry[table_id].table_id); /* flush hdp cache */ - adev->nbio_funcs->hdp_flush(adev, NULL); + adev->nbio.funcs->hdp_flush(adev, NULL); memcpy(table, (uint8_t *)priv->smu_tables.entry[table_id].table, priv->smu_tables.entry[table_id].size); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c index 4728aa23a818..7dca04a89217 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c @@ -177,12 +177,10 @@ static int smu8_load_mec_firmware(struct pp_hwmgr *hwmgr) uint32_t tmp; int ret = 0; struct cgs_firmware_info info = {0}; - struct smu8_smumgr *smu8_smu; if (hwmgr == NULL || hwmgr->device == NULL) return -EINVAL; - smu8_smu = hwmgr->smu_backend; ret = cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_CP_MEC, &info); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c index 0dbdde69f2d9..0f3836fd9666 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c @@ -58,7 +58,7 @@ static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr, priv->smu_tables.entry[table_id].table_id); /* flush hdp cache */ - adev->nbio_funcs->hdp_flush(adev, NULL); + adev->nbio.funcs->hdp_flush(adev, NULL); memcpy(table, priv->smu_tables.entry[table_id].table, priv->smu_tables.entry[table_id].size); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c index f9589806bf83..90c782c132d2 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c @@ -66,7 +66,7 @@ static int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr, return -EINVAL); /* flush hdp cache */ - adev->nbio_funcs->hdp_flush(adev, NULL); + adev->nbio.funcs->hdp_flush(adev, NULL); memcpy(table, priv->smu_tables.entry[table_id].table, priv->smu_tables.entry[table_id].size); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c index b9089c6bea85..f604612f411f 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c @@ -189,7 +189,7 @@ static int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr, return ret); /* flush hdp cache */ - adev->nbio_funcs->hdp_flush(adev, NULL); + adev->nbio.funcs->hdp_flush(adev, NULL); memcpy(table, priv->smu_tables.entry[table_id].table, priv->smu_tables.entry[table_id].size); @@ -290,7 +290,7 @@ int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr, return ret); /* flush hdp cache */ - adev->nbio_funcs->hdp_flush(adev, NULL); + adev->nbio.funcs->hdp_flush(adev, NULL); memcpy(table, priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table, priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c index 7c960b07746f..ae18fbcb26fb 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c @@ -456,7 +456,7 @@ static int vegam_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr, count = SMU_MAX_SMIO_LEVELS; for (level = 0; level < count; level++) { table->SmioTable2.Pattern[level].Voltage = PP_HOST_TO_SMC_US( - data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE); + data->mvdd_voltage_table.entries[level].value * VOLTAGE_SCALE); /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/ table->SmioTable2.Pattern[level].Smio = (uint8_t) level; diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c index bbd8ebd58434..5b21386f558d 100644 --- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c @@ -25,6 +25,7 @@ #include <linux/firmware.h> #include "amdgpu.h" #include "amdgpu_smu.h" +#include "smu_internal.h" #include "atomfirmware.h" #include "amdgpu_atomfirmware.h" #include "smu_v11_0.h" @@ -143,6 +144,7 @@ static struct smu_11_0_cmn2aisc_mapping vega20_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(PrepareMp1ForShutdown), MSG_MAP(SetMGpuFanBoostLimitRpm), MSG_MAP(GetAVFSVoltageByDpm), + MSG_MAP(DFCstateControl), }; static struct smu_11_0_cmn2aisc_mapping vega20_clk_map[SMU_CLK_COUNT] = { @@ -219,7 +221,7 @@ static struct smu_11_0_cmn2aisc_mapping vega20_workload_map[PP_SMC_POWER_PROFILE WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT), WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT), WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT), - WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_CUSTOM_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT), WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT), }; @@ -634,7 +636,6 @@ amd_pm_state_type vega20_get_current_power_state(struct smu_context *smu) !smu_dpm_ctx->dpm_current_power_state) return -EINVAL; - mutex_lock(&(smu->mutex)); switch (smu_dpm_ctx->dpm_current_power_state->classification.ui_label) { case SMU_STATE_UI_LABEL_BATTERY: pm_type = POWER_STATE_TYPE_BATTERY; @@ -652,7 +653,6 @@ amd_pm_state_type vega20_get_current_power_state(struct smu_context *smu) pm_type = POWER_STATE_TYPE_DEFAULT; break; } - mutex_unlock(&(smu->mutex)); return pm_type; } @@ -1274,16 +1274,8 @@ static int vega20_force_clk_levels(struct smu_context *smu, struct vega20_dpm_table *dpm_table; struct vega20_single_dpm_table *single_dpm_table; uint32_t soft_min_level, soft_max_level, hard_min_level; - struct smu_dpm_context *smu_dpm = &smu->smu_dpm; int ret = 0; - if (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { - pr_info("force clock level is for dpm manual mode only.\n"); - return -EINVAL; - } - - mutex_lock(&(smu->mutex)); - soft_min_level = mask ? (ffs(mask) - 1) : 0; soft_max_level = mask ? (fls(mask) - 1) : 0; @@ -1436,7 +1428,6 @@ static int vega20_force_clk_levels(struct smu_context *smu, break; } - mutex_unlock(&(smu->mutex)); return ret; } @@ -1451,8 +1442,6 @@ static int vega20_get_clock_by_type_with_latency(struct smu_context *smu, dpm_table = smu_dpm->dpm_context; - mutex_lock(&smu->mutex); - switch (clk_type) { case SMU_GFXCLK: single_dpm_table = &(dpm_table->gfx_table); @@ -1474,7 +1463,6 @@ static int vega20_get_clock_by_type_with_latency(struct smu_context *smu, ret = -EINVAL; } - mutex_unlock(&smu->mutex); return ret; } @@ -2260,7 +2248,7 @@ vega20_notify_smc_dispaly_config(struct smu_context *smu) if (smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) { clock_req.clock_type = amd_pp_dcef_clock; clock_req.clock_freq_in_khz = min_clocks.dcef_clock * 10; - if (!smu->funcs->display_clock_voltage_request(smu, &clock_req)) { + if (!smu_v11_0_display_clock_voltage_request(smu, &clock_req)) { if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) { ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetMinDeepSleepDcefclk, @@ -2547,8 +2535,6 @@ static int vega20_set_od_percentage(struct smu_context *smu, int feature_enabled; PPCLK_e clk_id; - mutex_lock(&(smu->mutex)); - dpm_table = smu_dpm->dpm_context; golden_table = smu_dpm->golden_dpm_context; @@ -2598,11 +2584,10 @@ static int vega20_set_od_percentage(struct smu_context *smu, } ret = smu_handle_task(smu, smu_dpm->dpm_level, - AMD_PP_TASK_READJUST_POWER_STATE); + AMD_PP_TASK_READJUST_POWER_STATE, + false); set_od_failed: - mutex_unlock(&(smu->mutex)); - return ret; } @@ -2827,10 +2812,9 @@ static int vega20_odn_edit_dpm_table(struct smu_context *smu, } if (type == PP_OD_COMMIT_DPM_TABLE) { - mutex_lock(&(smu->mutex)); ret = smu_handle_task(smu, smu_dpm->dpm_level, - AMD_PP_TASK_READJUST_POWER_STATE); - mutex_unlock(&(smu->mutex)); + AMD_PP_TASK_READJUST_POWER_STATE, + false); } return ret; @@ -3047,7 +3031,7 @@ static int vega20_read_sensor(struct smu_context *smu, *size = 4; break; default: - ret = smu_smc_read_sensor(smu, sensor, data, size); + ret = smu_v11_0_read_sensor(smu, sensor, data, size); } mutex_unlock(&smu->sensor_lock); @@ -3141,6 +3125,49 @@ static int vega20_get_thermal_temperature_range(struct smu_context *smu, return 0; } +static int vega20_set_df_cstate(struct smu_context *smu, + enum pp_df_cstate state) +{ + uint32_t smu_version; + int ret; + + ret = smu_get_smc_version(smu, NULL, &smu_version); + if (ret) { + pr_err("Failed to get smu version!\n"); + return ret; + } + + /* PPSMC_MSG_DFCstateControl is supported with 40.50 and later fws */ + if (smu_version < 0x283200) { + pr_err("Df cstate control is supported with 40.50 and later SMC fw!\n"); + return -EINVAL; + } + + return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state); +} + +static int vega20_update_pcie_parameters(struct smu_context *smu, + uint32_t pcie_gen_cap, + uint32_t pcie_width_cap) +{ + PPTable_t *pptable = smu->smu_table.driver_pptable; + int ret, i; + uint32_t smu_pcie_arg; + + for (i = 0; i < NUM_LINK_LEVELS; i++) { + smu_pcie_arg = (i << 16) | + ((pptable->PcieGenSpeed[i] <= pcie_gen_cap) ? (pptable->PcieGenSpeed[i] << 8) : + (pcie_gen_cap << 8)) | ((pptable->PcieLaneCount[i] <= pcie_width_cap) ? + pptable->PcieLaneCount[i] : pcie_width_cap); + ret = smu_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg); + } + + return ret; +} + + static const struct pptable_funcs vega20_ppt_funcs = { .tables_init = vega20_tables_init, .alloc_dpm_context = vega20_allocate_dpm_context, @@ -3153,7 +3180,7 @@ static const struct pptable_funcs vega20_ppt_funcs = { .get_smu_table_index = vega20_get_smu_table_index, .get_smu_power_index = vega20_get_pwr_src_index, .get_workload_type = vega20_get_workload_type, - .run_afll_btc = vega20_run_btc_afll, + .run_btc = vega20_run_btc_afll, .get_allowed_feature_mask = vega20_get_allowed_feature_mask, .get_current_power_state = vega20_get_current_power_state, .set_default_dpm_table = vega20_set_default_dpm_table, @@ -3183,13 +3210,61 @@ static const struct pptable_funcs vega20_ppt_funcs = { .get_fan_speed_percent = vega20_get_fan_speed_percent, .get_fan_speed_rpm = vega20_get_fan_speed_rpm, .set_watermarks_table = vega20_set_watermarks_table, - .get_thermal_temperature_range = vega20_get_thermal_temperature_range + .get_thermal_temperature_range = vega20_get_thermal_temperature_range, + .set_df_cstate = vega20_set_df_cstate, + .update_pcie_parameters = vega20_update_pcie_parameters, + .init_microcode = smu_v11_0_init_microcode, + .load_microcode = smu_v11_0_load_microcode, + .init_smc_tables = smu_v11_0_init_smc_tables, + .fini_smc_tables = smu_v11_0_fini_smc_tables, + .init_power = smu_v11_0_init_power, + .fini_power = smu_v11_0_fini_power, + .check_fw_status = smu_v11_0_check_fw_status, + .setup_pptable = smu_v11_0_setup_pptable, + .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values, + .get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios, + .check_pptable = smu_v11_0_check_pptable, + .parse_pptable = smu_v11_0_parse_pptable, + .populate_smc_tables = smu_v11_0_populate_smc_pptable, + .check_fw_version = smu_v11_0_check_fw_version, + .write_pptable = smu_v11_0_write_pptable, + .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep, + .set_tool_table_location = smu_v11_0_set_tool_table_location, + .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, + .system_features_control = smu_v11_0_system_features_control, + .send_smc_msg = smu_v11_0_send_msg, + .send_smc_msg_with_param = smu_v11_0_send_msg_with_param, + .read_smc_arg = smu_v11_0_read_arg, + .init_display_count = smu_v11_0_init_display_count, + .set_allowed_mask = smu_v11_0_set_allowed_mask, + .get_enabled_mask = smu_v11_0_get_enabled_mask, + .notify_display_change = smu_v11_0_notify_display_change, + .set_power_limit = smu_v11_0_set_power_limit, + .get_current_clk_freq = smu_v11_0_get_current_clk_freq, + .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks, + .start_thermal_control = smu_v11_0_start_thermal_control, + .stop_thermal_control = smu_v11_0_stop_thermal_control, + .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk, + .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request, + .get_fan_control_mode = smu_v11_0_get_fan_control_mode, + .set_fan_control_mode = smu_v11_0_set_fan_control_mode, + .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent, + .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm, + .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate, + .gfx_off_control = smu_v11_0_gfx_off_control, + .register_irq_handler = smu_v11_0_register_irq_handler, + .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme, + .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc, + .baco_is_support= smu_v11_0_baco_is_support, + .baco_get_state = smu_v11_0_baco_get_state, + .baco_set_state = smu_v11_0_baco_set_state, + .baco_reset = smu_v11_0_baco_reset, + .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq, + .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range, + .override_pcie_parameters = smu_v11_0_override_pcie_parameters, }; void vega20_set_ppt_funcs(struct smu_context *smu) { - struct smu_table_context *smu_table = &smu->smu_table; - smu->ppt_funcs = &vega20_ppt_funcs; - smu_table->table_count = TABLE_COUNT; } diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c index 6b7f791685ec..d6a6692db0ac 100644 --- a/drivers/gpu/drm/arc/arcpgu_drv.c +++ b/drivers/gpu/drm/arc/arcpgu_drv.c @@ -14,6 +14,7 @@ #include <drm/drm_fb_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_of.h> #include <drm/drm_probe_helper.h> #include <linux/dma-mapping.h> #include <linux/module.h> @@ -45,7 +46,7 @@ static int arcpgu_load(struct drm_device *drm) { struct platform_device *pdev = to_platform_device(drm->dev); struct arcpgu_drm_private *arcpgu; - struct device_node *encoder_node; + struct device_node *encoder_node = NULL, *endpoint_node = NULL; struct resource *res; int ret; @@ -80,14 +81,23 @@ static int arcpgu_load(struct drm_device *drm) if (arc_pgu_setup_crtc(drm) < 0) return -ENODEV; - /* find the encoder node and initialize it */ - encoder_node = of_parse_phandle(drm->dev->of_node, "encoder-slave", 0); + /* + * There is only one output port inside each device. It is linked with + * encoder endpoint. + */ + endpoint_node = of_graph_get_next_endpoint(pdev->dev.of_node, NULL); + if (endpoint_node) { + encoder_node = of_graph_get_remote_port_parent(endpoint_node); + of_node_put(endpoint_node); + } + if (encoder_node) { ret = arcpgu_drm_hdmi_init(drm, encoder_node); of_node_put(encoder_node); if (ret < 0) return ret; } else { + dev_info(drm->dev, "no encoder found. Assumed virtual LCD on simulation platform\n"); ret = arcpgu_drm_sim_init(drm, NULL); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c b/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c index c3d29c0b051b..f0ba26e282c3 100644 --- a/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c +++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c @@ -106,6 +106,23 @@ static void dump_block_header(struct seq_file *sf, void __iomem *reg) i, hdr.output_ids[i]); } +/* On D71, we are using the global line size. From D32, every component have + * a line size register to indicate the fifo size. + */ +static u32 __get_blk_line_size(struct d71_dev *d71, u32 __iomem *reg, + u32 max_default) +{ + if (!d71->periph_addr) + max_default = malidp_read32(reg, BLK_MAX_LINE_SIZE); + + return max_default; +} + +static u32 get_blk_line_size(struct d71_dev *d71, u32 __iomem *reg) +{ + return __get_blk_line_size(d71, reg, d71->max_line_size); +} + static u32 to_rot_ctrl(u32 rot) { u32 lr_ctrl = 0; @@ -332,7 +349,56 @@ static void d71_layer_dump(struct komeda_component *c, struct seq_file *sf) seq_printf(sf, "%sAD_V_CROP:\t\t0x%X\n", prefix, v[2]); } +static int d71_layer_validate(struct komeda_component *c, + struct komeda_component_state *state) +{ + struct komeda_layer_state *st = to_layer_st(state); + struct komeda_layer *layer = to_layer(c); + struct drm_plane_state *plane_st; + struct drm_framebuffer *fb; + u32 fourcc, line_sz, max_line_sz; + + plane_st = drm_atomic_get_new_plane_state(state->obj.state, + state->plane); + fb = plane_st->fb; + fourcc = fb->format->format; + + if (drm_rotation_90_or_270(st->rot)) + line_sz = st->vsize - st->afbc_crop_t - st->afbc_crop_b; + else + line_sz = st->hsize - st->afbc_crop_l - st->afbc_crop_r; + + if (fb->modifier) { + if ((fb->modifier & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK) == + AFBC_FORMAT_MOD_BLOCK_SIZE_32x8) + max_line_sz = layer->line_sz; + else + max_line_sz = layer->line_sz / 2; + + if (line_sz > max_line_sz) { + DRM_DEBUG_ATOMIC("afbc request line_sz: %d exceed the max afbc line_sz: %d.\n", + line_sz, max_line_sz); + return -EINVAL; + } + } + + if (fourcc == DRM_FORMAT_YUV420_10BIT && line_sz > 2046 && (st->afbc_crop_l % 4)) { + DRM_DEBUG_ATOMIC("YUV420_10BIT input_hsize: %d exceed the max size 2046.\n", + line_sz); + return -EINVAL; + } + + if (fourcc == DRM_FORMAT_X0L2 && line_sz > 2046 && (st->addr[0] % 16)) { + DRM_DEBUG_ATOMIC("X0L2 input_hsize: %d exceed the max size 2046.\n", + line_sz); + return -EINVAL; + } + + return 0; +} + static const struct komeda_component_funcs d71_layer_funcs = { + .validate = d71_layer_validate, .update = d71_layer_update, .disable = d71_layer_disable, .dump_register = d71_layer_dump, @@ -365,7 +431,28 @@ static int d71_layer_init(struct d71_dev *d71, else layer->layer_type = KOMEDA_FMT_SIMPLE_LAYER; - set_range(&layer->hsize_in, 4, d71->max_line_size); + if (!d71->periph_addr) { + /* D32 or newer product */ + layer->line_sz = malidp_read32(reg, BLK_MAX_LINE_SIZE); + layer->yuv_line_sz = L_INFO_YUV_MAX_LINESZ(layer_info); + } else if (d71->max_line_size > 2048) { + /* D71 4K */ + layer->line_sz = d71->max_line_size; + layer->yuv_line_sz = layer->line_sz / 2; + } else { + /* D71 2K */ + if (layer->layer_type == KOMEDA_FMT_RICH_LAYER) { + /* rich layer is 4K configuration */ + layer->line_sz = d71->max_line_size * 2; + layer->yuv_line_sz = layer->line_sz / 2; + } else { + layer->line_sz = d71->max_line_size; + layer->yuv_line_sz = 0; + } + } + + set_range(&layer->hsize_in, 4, layer->line_sz); + set_range(&layer->vsize_in, 4, d71->max_vsize); malidp_write32(reg, LAYER_PALPHA, D71_PALPHA_DEF_MAP); @@ -456,9 +543,11 @@ static int d71_wb_layer_init(struct d71_dev *d71, wb_layer = to_layer(c); wb_layer->layer_type = KOMEDA_FMT_WB_LAYER; + wb_layer->line_sz = get_blk_line_size(d71, reg); + wb_layer->yuv_line_sz = wb_layer->line_sz; - set_range(&wb_layer->hsize_in, D71_MIN_LINE_SIZE, d71->max_line_size); - set_range(&wb_layer->vsize_in, D71_MIN_VERTICAL_SIZE, d71->max_vsize); + set_range(&wb_layer->hsize_in, 64, wb_layer->line_sz); + set_range(&wb_layer->vsize_in, 64, d71->max_vsize); return 0; } @@ -595,8 +684,8 @@ static int d71_compiz_init(struct d71_dev *d71, compiz = to_compiz(c); - set_range(&compiz->hsize, D71_MIN_LINE_SIZE, d71->max_line_size); - set_range(&compiz->vsize, D71_MIN_VERTICAL_SIZE, d71->max_vsize); + set_range(&compiz->hsize, 64, get_blk_line_size(d71, reg)); + set_range(&compiz->vsize, 64, d71->max_vsize); return 0; } @@ -703,7 +792,7 @@ static void d71_scaler_update(struct komeda_component *c, static void d71_scaler_dump(struct komeda_component *c, struct seq_file *sf) { - u32 v[9]; + u32 v[10]; dump_block_header(sf, c->reg); @@ -723,6 +812,18 @@ static void d71_scaler_dump(struct komeda_component *c, struct seq_file *sf) seq_printf(sf, "SC_H_DELTA_PH:\t\t0x%X\n", v[6]); seq_printf(sf, "SC_V_INIT_PH:\t\t0x%X\n", v[7]); seq_printf(sf, "SC_V_DELTA_PH:\t\t0x%X\n", v[8]); + + get_values_from_reg(c->reg, 0x130, 10, v); + seq_printf(sf, "SC_ENH_LIMITS:\t\t0x%X\n", v[0]); + seq_printf(sf, "SC_ENH_COEFF0:\t\t0x%X\n", v[1]); + seq_printf(sf, "SC_ENH_COEFF1:\t\t0x%X\n", v[2]); + seq_printf(sf, "SC_ENH_COEFF2:\t\t0x%X\n", v[3]); + seq_printf(sf, "SC_ENH_COEFF3:\t\t0x%X\n", v[4]); + seq_printf(sf, "SC_ENH_COEFF4:\t\t0x%X\n", v[5]); + seq_printf(sf, "SC_ENH_COEFF5:\t\t0x%X\n", v[6]); + seq_printf(sf, "SC_ENH_COEFF6:\t\t0x%X\n", v[7]); + seq_printf(sf, "SC_ENH_COEFF7:\t\t0x%X\n", v[8]); + seq_printf(sf, "SC_ENH_COEFF8:\t\t0x%X\n", v[9]); } static const struct komeda_component_funcs d71_scaler_funcs = { @@ -753,7 +854,7 @@ static int d71_scaler_init(struct d71_dev *d71, } scaler = to_scaler(c); - set_range(&scaler->hsize, 4, 2048); + set_range(&scaler->hsize, 4, __get_blk_line_size(d71, reg, 2048)); set_range(&scaler->vsize, 4, 4096); scaler->max_downscaling = 6; scaler->max_upscaling = 64; @@ -862,7 +963,7 @@ static int d71_splitter_init(struct d71_dev *d71, splitter = to_splitter(c); - set_range(&splitter->hsize, 4, d71->max_line_size); + set_range(&splitter->hsize, 4, get_blk_line_size(d71, reg)); set_range(&splitter->vsize, 4, d71->max_vsize); return 0; @@ -933,7 +1034,8 @@ static int d71_merger_init(struct d71_dev *d71, merger = to_merger(c); - set_range(&merger->hsize_merged, 4, 4032); + set_range(&merger->hsize_merged, 4, + __get_blk_line_size(d71, reg, 4032)); set_range(&merger->vsize_merged, 4, 4096); return 0; @@ -944,13 +1046,26 @@ static void d71_improc_update(struct komeda_component *c, { struct komeda_improc_state *st = to_improc_st(state); u32 __iomem *reg = c->reg; - u32 index; + u32 index, mask = 0, ctrl = 0; for_each_changed_input(state, index) malidp_write32(reg, BLK_INPUT_ID0 + index * 4, to_d71_input_id(state, index)); malidp_write32(reg, BLK_SIZE, HV_SIZE(st->hsize, st->vsize)); + malidp_write32(reg, IPS_DEPTH, st->color_depth); + + mask |= IPS_CTRL_YUV | IPS_CTRL_CHD422 | IPS_CTRL_CHD420; + + /* config color format */ + if (st->color_format == DRM_COLOR_FORMAT_YCRCB420) + ctrl |= IPS_CTRL_YUV | IPS_CTRL_CHD422 | IPS_CTRL_CHD420; + else if (st->color_format == DRM_COLOR_FORMAT_YCRCB422) + ctrl |= IPS_CTRL_YUV | IPS_CTRL_CHD422; + else if (st->color_format == DRM_COLOR_FORMAT_YCRCB444) + ctrl |= IPS_CTRL_YUV; + + malidp_write32_mask(reg, BLK_CONTROL, mask, ctrl); } static void d71_improc_dump(struct komeda_component *c, struct seq_file *sf) diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h b/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h index 2d5e6d00b42c..1727dc993909 100644 --- a/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h +++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h @@ -10,6 +10,7 @@ /* Common block registers offset */ #define BLK_BLOCK_INFO 0x000 #define BLK_PIPELINE_INFO 0x004 +#define BLK_MAX_LINE_SIZE 0x008 #define BLK_VALID_INPUT_ID0 0x020 #define BLK_OUTPUT_ID0 0x060 #define BLK_INPUT_ID0 0x080 @@ -321,6 +322,7 @@ #define L_INFO_RF BIT(0) #define L_INFO_CM BIT(1) #define L_INFO_ABUF_SIZE(x) (((x) >> 4) & 0x7) +#define L_INFO_YUV_MAX_LINESZ(x) (((x) >> 16) & 0xFFFF) /* Scaler registers */ #define SC_COEFFTAB 0x0DC @@ -494,13 +496,6 @@ enum d71_blk_type { #define D71_DEFAULT_PREPRETCH_LINE 5 #define D71_BUS_WIDTH_16_BYTES 16 -#define D71_MIN_LINE_SIZE 64 -#define D71_MIN_VERTICAL_SIZE 64 -#define D71_SC_MIN_LIN_SIZE 4 -#define D71_SC_MIN_VERTICAL_SIZE 4 -#define D71_SC_MAX_LIN_SIZE 2048 -#define D71_SC_MAX_VERTICAL_SIZE 4096 - #define D71_SC_MAX_UPSCALING 64 #define D71_SC_MAX_DOWNSCALING 6 #define D71_SC_SPLIT_OVERLAP 8 diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c index 75263d8cd0bd..252015210fbc 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c @@ -17,6 +17,33 @@ #include "komeda_dev.h" #include "komeda_kms.h" +void komeda_crtc_get_color_config(struct drm_crtc_state *crtc_st, + u32 *color_depths, u32 *color_formats) +{ + struct drm_connector *conn; + struct drm_connector_state *conn_st; + u32 conn_color_formats = ~0u; + int i, min_bpc = 31, conn_bpc = 0; + + for_each_new_connector_in_state(crtc_st->state, conn, conn_st, i) { + if (conn_st->crtc != crtc_st->crtc) + continue; + + conn_bpc = conn->display_info.bpc ? conn->display_info.bpc : 8; + conn_color_formats &= conn->display_info.color_formats; + + if (conn_bpc < min_bpc) + min_bpc = conn_bpc; + } + + /* connector doesn't config any color_format, use RGB444 as default */ + if (!conn_color_formats) + conn_color_formats = DRM_COLOR_FORMAT_RGB444; + + *color_depths = GENMASK(min_bpc, 0); + *color_formats = conn_color_formats; +} + static void komeda_crtc_update_clock_ratio(struct komeda_crtc_state *kcrtc_st) { u64 pxlclk, aclk; @@ -296,7 +323,7 @@ komeda_crtc_atomic_disable(struct drm_crtc *crtc, struct komeda_crtc_state *old_st = to_kcrtc_st(old); struct komeda_pipeline *master = kcrtc->master; struct komeda_pipeline *slave = kcrtc->slave; - struct completion *disable_done = &crtc->state->commit->flip_done; + struct completion *disable_done; bool needs_phase2 = false; DRM_DEBUG_ATOMIC("CRTC%d_DISABLE: active_pipes: 0x%x, affected: 0x%x\n", diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c index d49772de93e0..52648b4008bc 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c @@ -84,7 +84,8 @@ static void komeda_kms_commit_tail(struct drm_atomic_state *old_state) drm_atomic_helper_commit_modeset_disables(dev, old_state); - drm_atomic_helper_commit_planes(dev, old_state, 0); + drm_atomic_helper_commit_planes(dev, old_state, + DRM_PLANE_COMMIT_ACTIVE_ONLY); drm_atomic_helper_commit_modeset_enables(dev, old_state); diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.h b/drivers/gpu/drm/arm/display/komeda/komeda_kms.h index 45c498e15e7a..456f3c435719 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.h +++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.h @@ -166,6 +166,8 @@ static inline bool has_flip_h(u32 rot) return !!(rotation & DRM_MODE_REFLECT_X); } +void komeda_crtc_get_color_config(struct drm_crtc_state *crtc_st, + u32 *color_depths, u32 *color_formats); unsigned long komeda_crtc_get_aclk(struct komeda_crtc_state *kcrtc_st); int komeda_kms_setup_crtcs(struct komeda_kms_dev *kms, struct komeda_dev *mdev); diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h index b322f52ba8f2..bd6ca7c87037 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h +++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h @@ -227,6 +227,8 @@ struct komeda_layer { /* accepted h/v input range before rotation */ struct malidp_range hsize_in, vsize_in; u32 layer_type; /* RICH, SIMPLE or WB */ + u32 line_sz; + u32 yuv_line_sz; /* maximum line size for YUV422 and YUV420 */ u32 supported_rots; /* komeda supports layer split which splits a whole image to two parts * left and right and handle them by two individual layer processors @@ -323,6 +325,7 @@ struct komeda_improc { struct komeda_improc_state { struct komeda_component_state base; + u8 color_format, color_depth; u16 hsize, vsize; }; diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c index 0ba9c6aa3708..52750116aa19 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c @@ -285,6 +285,7 @@ komeda_layer_check_cfg(struct komeda_layer *layer, struct komeda_data_flow_cfg *dflow) { u32 src_x, src_y, src_w, src_h; + u32 line_sz, max_line_sz; if (!komeda_fb_is_layer_supported(kfb, layer->layer_type, dflow->rot)) return -EINVAL; @@ -314,6 +315,22 @@ komeda_layer_check_cfg(struct komeda_layer *layer, return -EINVAL; } + if (drm_rotation_90_or_270(dflow->rot)) + line_sz = dflow->in_h; + else + line_sz = dflow->in_w; + + if (kfb->base.format->hsub > 1) + max_line_sz = layer->yuv_line_sz; + else + max_line_sz = layer->line_sz; + + if (line_sz > max_line_sz) { + DRM_DEBUG_ATOMIC("Required line_sz: %d exceeds the max size %d\n", + line_sz, max_line_sz); + return -EINVAL; + } + return 0; } @@ -564,8 +581,8 @@ komeda_splitter_validate(struct komeda_splitter *splitter, } if (!in_range(&splitter->vsize, dflow->in_h)) { - DRM_DEBUG_ATOMIC("split in_in: %d exceed the acceptable range.\n", - dflow->in_w); + DRM_DEBUG_ATOMIC("split in_h: %d exceeds the acceptable range.\n", + dflow->in_h); return -EINVAL; } @@ -743,6 +760,7 @@ komeda_improc_validate(struct komeda_improc *improc, struct komeda_data_flow_cfg *dflow) { struct drm_crtc *crtc = kcrtc_st->base.crtc; + struct drm_crtc_state *crtc_st = &kcrtc_st->base; struct komeda_component_state *c_st; struct komeda_improc_state *st; @@ -756,6 +774,34 @@ komeda_improc_validate(struct komeda_improc *improc, st->hsize = dflow->in_w; st->vsize = dflow->in_h; + if (drm_atomic_crtc_needs_modeset(crtc_st)) { + u32 output_depths, output_formats; + u32 avail_depths, avail_formats; + + komeda_crtc_get_color_config(crtc_st, &output_depths, + &output_formats); + + avail_depths = output_depths & improc->supported_color_depths; + if (avail_depths == 0) { + DRM_DEBUG_ATOMIC("No available color depths, conn depths: 0x%x & display: 0x%x\n", + output_depths, + improc->supported_color_depths); + return -EINVAL; + } + + avail_formats = output_formats & + improc->supported_color_formats; + if (!avail_formats) { + DRM_DEBUG_ATOMIC("No available color_formats, conn formats 0x%x & display: 0x%x\n", + output_formats, + improc->supported_color_formats); + return -EINVAL; + } + + st->color_depth = __fls(avail_depths); + st->color_format = BIT(__ffs(avail_formats)); + } + komeda_component_add_input(&st->base, &dflow->input, 0); komeda_component_set_output(&dflow->input, &improc->base, 0); diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c index b72840c06ab7..e465cc4879c9 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c @@ -141,6 +141,7 @@ static int komeda_wb_connector_add(struct komeda_kms_dev *kms, struct komeda_dev *mdev = kms->base.dev_private; struct komeda_wb_connector *kwb_conn; struct drm_writeback_connector *wb_conn; + struct drm_display_info *info; u32 *formats, n_formats = 0; int err; @@ -172,6 +173,10 @@ static int komeda_wb_connector_add(struct komeda_kms_dev *kms, drm_connector_helper_add(&wb_conn->base, &komeda_wb_conn_helper_funcs); + info = &kwb_conn->base.base.display_info; + info->bpc = __fls(kcrtc->master->improc->supported_color_depths); + info->color_formats = kcrtc->master->improc->supported_color_formats; + kcrtc->wb_conn = kwb_conn; return 0; diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c index 333b88a5efb0..37d92a06318e 100644 --- a/drivers/gpu/drm/arm/malidp_drv.c +++ b/drivers/gpu/drm/arm/malidp_drv.c @@ -368,7 +368,7 @@ malidp_verify_afbc_framebuffer(struct drm_device *dev, struct drm_file *file, return false; } -struct drm_framebuffer * +static struct drm_framebuffer * malidp_fb_create(struct drm_device *dev, struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd) { @@ -491,9 +491,9 @@ void malidp_error(struct malidp_drm *malidp, spin_unlock_irqrestore(&malidp->errors_lock, irqflags); } -void malidp_error_stats_dump(const char *prefix, - struct malidp_error_stats error_stats, - struct seq_file *m) +static void malidp_error_stats_dump(const char *prefix, + struct malidp_error_stats error_stats, + struct seq_file *m) { seq_printf(m, "[%s] num_errors : %d\n", prefix, error_stats.num_errors); @@ -665,7 +665,7 @@ static ssize_t core_id_show(struct device *dev, struct device_attribute *attr, return snprintf(buf, PAGE_SIZE, "%08x\n", malidp->core_id); } -DEVICE_ATTR_RO(core_id); +static DEVICE_ATTR_RO(core_id); static int malidp_init_sysfs(struct device *dev) { @@ -817,6 +817,12 @@ static int malidp_bind(struct device *dev) malidp->core_id = version; + ret = of_property_read_u32(dev->of_node, + "arm,malidp-arqos-value", + &hwdev->arqos_value); + if (ret) + hwdev->arqos_value = 0x0; + /* set the number of lines used for output of RGB data */ ret = of_property_read_u8_array(dev->of_node, "arm,malidp-output-port-lines", diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c index bd8265f02e0b..ca570b135478 100644 --- a/drivers/gpu/drm/arm/malidp_hw.c +++ b/drivers/gpu/drm/arm/malidp_hw.c @@ -379,6 +379,15 @@ static void malidp500_modeset(struct malidp_hw_device *hwdev, struct videomode * malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC); else malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC); + + /* + * Program the RQoS register to avoid high resolutions flicker + * issue on the LS1028A. + */ + if (hwdev->arqos_value) { + val = hwdev->arqos_value; + malidp_hw_setbits(hwdev, val, MALIDP500_RQOS_QUALITY); + } } int malidp_format_get_bpp(u32 fmt) diff --git a/drivers/gpu/drm/arm/malidp_hw.h b/drivers/gpu/drm/arm/malidp_hw.h index 968a65eed371..e4c36bc90bda 100644 --- a/drivers/gpu/drm/arm/malidp_hw.h +++ b/drivers/gpu/drm/arm/malidp_hw.h @@ -251,6 +251,9 @@ struct malidp_hw_device { /* size of memory used for rotating layers, up to two banks available */ u32 rotation_memory[2]; + + /* priority level of RQOS register used for driven the ARQOS signal */ + u32 arqos_value; }; static inline u32 malidp_hw_read(struct malidp_hw_device *hwdev, u32 reg) diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h index 993031542fa1..514c50dcb74d 100644 --- a/drivers/gpu/drm/arm/malidp_regs.h +++ b/drivers/gpu/drm/arm/malidp_regs.h @@ -210,6 +210,16 @@ #define MALIDP500_CONFIG_VALID 0x00f00 #define MALIDP500_CONFIG_ID 0x00fd4 +/* + * The quality of service (QoS) register on the DP500. RQOS register values + * are driven by the ARQOS signal, using AXI transacations, dependent on the + * FIFO input level. + * The RQOS register can also set QoS levels for: + * - RED_ARQOS @ A 4-bit signal value for close to underflow conditions + * - GREEN_ARQOS @ A 4-bit signal value for normal conditions + */ +#define MALIDP500_RQOS_QUALITY 0x00500 + /* register offsets and bits specific to DP550/DP650 */ #define MALIDP550_ADDR_SPACE_SIZE 0x10000 #define MALIDP550_DE_CONTROL 0x00010 diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index e0e8770462bc..1f17794b0890 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c @@ -200,10 +200,7 @@ static struct pci_driver ast_pci_driver = { .driver.pm = &ast_pm_ops, }; -static const struct file_operations ast_fops = { - .owner = THIS_MODULE, - DRM_VRAM_MM_FILE_OPERATIONS -}; +DEFINE_DRM_GEM_FOPS(ast_fops); static struct drm_driver driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM, diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c index 89f5a756fa37..034f202dfe8f 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c @@ -601,7 +601,6 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p, struct drm_framebuffer *fb = state->base.fb; const struct drm_display_mode *mode; struct drm_crtc_state *crtc_state; - unsigned int tmp; int ret; int i; @@ -694,9 +693,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p, * Swap width and size in case of 90 or 270 degrees rotation */ if (drm_rotation_90_or_270(state->base.rotation)) { - tmp = state->src_w; - state->src_w = state->src_h; - state->src_h = tmp; + swap(state->src_w, state->src_h); } if (!desc->layout.size && diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c index 3b9b0d9bbc14..10460878414e 100644 --- a/drivers/gpu/drm/bochs/bochs_drv.c +++ b/drivers/gpu/drm/bochs/bochs_drv.c @@ -58,10 +58,7 @@ err: return ret; } -static const struct file_operations bochs_fops = { - .owner = THIS_MODULE, - DRM_VRAM_MM_FILE_OPERATIONS -}; +DEFINE_DRM_GEM_FOPS(bochs_fops); static struct drm_driver bochs_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c index 02a9c1ed165b..3f0006c2470d 100644 --- a/drivers/gpu/drm/bochs/bochs_kms.c +++ b/drivers/gpu/drm/bochs/bochs_kms.c @@ -69,33 +69,11 @@ static void bochs_pipe_update(struct drm_simple_display_pipe *pipe, } } -static int bochs_pipe_prepare_fb(struct drm_simple_display_pipe *pipe, - struct drm_plane_state *new_state) -{ - struct drm_gem_vram_object *gbo; - - if (!new_state->fb) - return 0; - gbo = drm_gem_vram_of_gem(new_state->fb->obj[0]); - return drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM); -} - -static void bochs_pipe_cleanup_fb(struct drm_simple_display_pipe *pipe, - struct drm_plane_state *old_state) -{ - struct drm_gem_vram_object *gbo; - - if (!old_state->fb) - return; - gbo = drm_gem_vram_of_gem(old_state->fb->obj[0]); - drm_gem_vram_unpin(gbo); -} - static const struct drm_simple_display_pipe_funcs bochs_pipe_funcs = { .enable = bochs_pipe_enable, .update = bochs_pipe_update, - .prepare_fb = bochs_pipe_prepare_fb, - .cleanup_fb = bochs_pipe_cleanup_fb, + .prepare_fb = drm_gem_vram_simple_display_pipe_prepare_fb, + .cleanup_fb = drm_gem_vram_simple_display_pipe_cleanup_fb, }; static int bochs_connector_get_modes(struct drm_connector *connector) diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index 1cc9f502c1f2..34362976cd6f 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -87,8 +87,7 @@ config DRM_SIL_SII8620 depends on OF select DRM_KMS_HELPER imply EXTCON - select INPUT - select RC_CORE + depends on RC_CORE || !RC_CORE help Silicon Image SII8620 HDMI/MHL bridge chip driver. diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix-anx78xx.c index 31118d385580..274989f96a91 100644 --- a/drivers/gpu/drm/bridge/analogix-anx78xx.c +++ b/drivers/gpu/drm/bridge/analogix-anx78xx.c @@ -39,12 +39,20 @@ #define AUX_CH_BUFFER_SIZE 16 #define AUX_WAIT_TIMEOUT_MS 15 -static const u8 anx78xx_i2c_addresses[] = { - [I2C_IDX_TX_P0] = TX_P0, - [I2C_IDX_TX_P1] = TX_P1, - [I2C_IDX_TX_P2] = TX_P2, - [I2C_IDX_RX_P0] = RX_P0, - [I2C_IDX_RX_P1] = RX_P1, +static const u8 anx7808_i2c_addresses[] = { + [I2C_IDX_TX_P0] = 0x78, + [I2C_IDX_TX_P1] = 0x7a, + [I2C_IDX_TX_P2] = 0x72, + [I2C_IDX_RX_P0] = 0x7e, + [I2C_IDX_RX_P1] = 0x80, +}; + +static const u8 anx781x_i2c_addresses[] = { + [I2C_IDX_TX_P0] = 0x70, + [I2C_IDX_TX_P1] = 0x7a, + [I2C_IDX_TX_P2] = 0x72, + [I2C_IDX_RX_P0] = 0x7e, + [I2C_IDX_RX_P1] = 0x80, }; struct anx78xx_platform_data { @@ -63,7 +71,6 @@ struct anx78xx { struct i2c_client *client; struct edid *edid; struct drm_connector connector; - struct drm_dp_link link; struct anx78xx_platform_data pdata; struct mutex lock; @@ -740,7 +747,7 @@ static int anx78xx_init_pdata(struct anx78xx *anx78xx) static int anx78xx_dp_link_training(struct anx78xx *anx78xx) { - u8 dp_bw, value; + u8 dp_bw, dpcd[2]; int err; err = regmap_write(anx78xx->map[I2C_IDX_RX_P0], SP_HDMI_MUTE_CTRL_REG, @@ -793,18 +800,34 @@ static int anx78xx_dp_link_training(struct anx78xx *anx78xx) if (err) return err; - /* Check link capabilities */ - err = drm_dp_link_probe(&anx78xx->aux, &anx78xx->link); - if (err < 0) { - DRM_ERROR("Failed to probe link capabilities: %d\n", err); - return err; - } + /* + * Power up the sink (DP_SET_POWER register is only available on DPCD + * v1.1 and later). + */ + if (anx78xx->dpcd[DP_DPCD_REV] >= 0x11) { + err = drm_dp_dpcd_readb(&anx78xx->aux, DP_SET_POWER, &dpcd[0]); + if (err < 0) { + DRM_ERROR("Failed to read DP_SET_POWER register: %d\n", + err); + return err; + } - /* Power up the sink */ - err = drm_dp_link_power_up(&anx78xx->aux, &anx78xx->link); - if (err < 0) { - DRM_ERROR("Failed to power up DisplayPort link: %d\n", err); - return err; + dpcd[0] &= ~DP_SET_POWER_MASK; + dpcd[0] |= DP_SET_POWER_D0; + + err = drm_dp_dpcd_writeb(&anx78xx->aux, DP_SET_POWER, dpcd[0]); + if (err < 0) { + DRM_ERROR("Failed to power up DisplayPort link: %d\n", + err); + return err; + } + + /* + * According to the DP 1.1 specification, a "Sink Device must + * exit the power saving state within 1 ms" (Section 2.5.3.1, + * Table 5-52, "Sink Control Field" (register 0x600). + */ + usleep_range(1000, 2000); } /* Possibly enable downspread on the sink */ @@ -843,15 +866,22 @@ static int anx78xx_dp_link_training(struct anx78xx *anx78xx) if (err) return err; - value = drm_dp_link_rate_to_bw_code(anx78xx->link.rate); + dpcd[0] = drm_dp_max_link_rate(anx78xx->dpcd); + dpcd[0] = drm_dp_link_rate_to_bw_code(dpcd[0]); err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], - SP_DP_MAIN_LINK_BW_SET_REG, value); + SP_DP_MAIN_LINK_BW_SET_REG, dpcd[0]); if (err) return err; - err = drm_dp_link_configure(&anx78xx->aux, &anx78xx->link); + dpcd[1] = drm_dp_max_lane_count(anx78xx->dpcd); + + if (drm_dp_enhanced_frame_cap(anx78xx->dpcd)) + dpcd[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; + + err = drm_dp_dpcd_write(&anx78xx->aux, DP_LINK_BW_SET, dpcd, + sizeof(dpcd)); if (err < 0) { - DRM_ERROR("Failed to configure DisplayPort link: %d\n", err); + DRM_ERROR("Failed to configure link: %d\n", err); return err; } @@ -1316,6 +1346,7 @@ static int anx78xx_i2c_probe(struct i2c_client *client, struct anx78xx *anx78xx; struct anx78xx_platform_data *pdata; unsigned int i, idl, idh, version; + const u8 *i2c_addresses; bool found = false; int err; @@ -1355,15 +1386,16 @@ static int anx78xx_i2c_probe(struct i2c_client *client, } /* Map slave addresses of ANX7814 */ + i2c_addresses = device_get_match_data(&client->dev); for (i = 0; i < I2C_NUM_ADDRESSES; i++) { struct i2c_client *i2c_dummy; i2c_dummy = i2c_new_dummy_device(client->adapter, - anx78xx_i2c_addresses[i] >> 1); + i2c_addresses[i] >> 1); if (IS_ERR(i2c_dummy)) { err = PTR_ERR(i2c_dummy); DRM_ERROR("Failed to reserve I2C bus %02x: %d\n", - anx78xx_i2c_addresses[i], err); + i2c_addresses[i], err); goto err_unregister_i2c; } @@ -1373,7 +1405,7 @@ static int anx78xx_i2c_probe(struct i2c_client *client, if (IS_ERR(anx78xx->map[i])) { err = PTR_ERR(anx78xx->map[i]); DRM_ERROR("Failed regmap initialization %02x\n", - anx78xx_i2c_addresses[i]); + i2c_addresses[i]); goto err_unregister_i2c; } } @@ -1472,10 +1504,10 @@ MODULE_DEVICE_TABLE(i2c, anx78xx_id); #if IS_ENABLED(CONFIG_OF) static const struct of_device_id anx78xx_match_table[] = { - { .compatible = "analogix,anx7808", }, - { .compatible = "analogix,anx7812", }, - { .compatible = "analogix,anx7814", }, - { .compatible = "analogix,anx7818", }, + { .compatible = "analogix,anx7808", .data = anx7808_i2c_addresses }, + { .compatible = "analogix,anx7812", .data = anx781x_i2c_addresses }, + { .compatible = "analogix,anx7814", .data = anx781x_i2c_addresses }, + { .compatible = "analogix,anx7818", .data = anx781x_i2c_addresses }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, anx78xx_match_table); diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.h b/drivers/gpu/drm/bridge/analogix-anx78xx.h index 25e063bcecbc..55d6c2109740 100644 --- a/drivers/gpu/drm/bridge/analogix-anx78xx.h +++ b/drivers/gpu/drm/bridge/analogix-anx78xx.h @@ -6,15 +6,8 @@ #ifndef __ANX78xx_H #define __ANX78xx_H -#define TX_P0 0x70 -#define TX_P1 0x7a -#define TX_P2 0x72 - -#define RX_P0 0x7e -#define RX_P1 0x80 - /***************************************************************/ -/* Register definition of device address 0x7e */ +/* Register definitions for RX_PO */ /***************************************************************/ /* @@ -171,7 +164,7 @@ #define SP_VSI_RCVD BIT(1) /***************************************************************/ -/* Register definition of device address 0x80 */ +/* Register definitions for RX_P1 */ /***************************************************************/ /* HDCP BCAPS Shadow Register */ @@ -217,7 +210,7 @@ #define SP_SET_AVMUTE BIT(0) /***************************************************************/ -/* Register definition of device address 0x70 */ +/* Register definitions for TX_P0 */ /***************************************************************/ /* HDCP Status Register */ @@ -451,7 +444,7 @@ #define SP_DP_BUF_DATA0_REG 0xf0 /***************************************************************/ -/* Register definition of device address 0x72 */ +/* Register definitions for TX_P2 */ /***************************************************************/ /* @@ -674,7 +667,7 @@ #define SP_INT_CTRL_REG 0xff /***************************************************************/ -/* Register definition of device address 0x7a */ +/* Register definitions for TX_P1 */ /***************************************************************/ /* DP TX Link Training Control Register */ diff --git a/drivers/gpu/drm/bridge/sii9234.c b/drivers/gpu/drm/bridge/sii9234.c index ad00d841ed9e..f81f81b7051f 100644 --- a/drivers/gpu/drm/bridge/sii9234.c +++ b/drivers/gpu/drm/bridge/sii9234.c @@ -842,39 +842,28 @@ static int sii9234_init_resources(struct sii9234 *ctx, ctx->client[I2C_MHL] = client; - ctx->client[I2C_TPI] = i2c_new_dummy(adapter, I2C_TPI_ADDR); - if (!ctx->client[I2C_TPI]) { + ctx->client[I2C_TPI] = devm_i2c_new_dummy_device(&client->dev, adapter, + I2C_TPI_ADDR); + if (IS_ERR(ctx->client[I2C_TPI])) { dev_err(ctx->dev, "failed to create TPI client\n"); - return -ENODEV; + return PTR_ERR(ctx->client[I2C_TPI]); } - ctx->client[I2C_HDMI] = i2c_new_dummy(adapter, I2C_HDMI_ADDR); - if (!ctx->client[I2C_HDMI]) { + ctx->client[I2C_HDMI] = devm_i2c_new_dummy_device(&client->dev, adapter, + I2C_HDMI_ADDR); + if (IS_ERR(ctx->client[I2C_HDMI])) { dev_err(ctx->dev, "failed to create HDMI RX client\n"); - goto fail_tpi; + return PTR_ERR(ctx->client[I2C_HDMI]); } - ctx->client[I2C_CBUS] = i2c_new_dummy(adapter, I2C_CBUS_ADDR); - if (!ctx->client[I2C_CBUS]) { + ctx->client[I2C_CBUS] = devm_i2c_new_dummy_device(&client->dev, adapter, + I2C_CBUS_ADDR); + if (IS_ERR(ctx->client[I2C_CBUS])) { dev_err(ctx->dev, "failed to create CBUS client\n"); - goto fail_hdmi; + return PTR_ERR(ctx->client[I2C_CBUS]); } return 0; - -fail_hdmi: - i2c_unregister_device(ctx->client[I2C_HDMI]); -fail_tpi: - i2c_unregister_device(ctx->client[I2C_TPI]); - - return -ENODEV; -} - -static void sii9234_deinit_resources(struct sii9234 *ctx) -{ - i2c_unregister_device(ctx->client[I2C_CBUS]); - i2c_unregister_device(ctx->client[I2C_HDMI]); - i2c_unregister_device(ctx->client[I2C_TPI]); } static inline struct sii9234 *bridge_to_sii9234(struct drm_bridge *bridge) @@ -951,7 +940,6 @@ static int sii9234_remove(struct i2c_client *client) sii9234_cable_out(ctx); drm_bridge_remove(&ctx->bridge); - sii9234_deinit_resources(ctx); return 0; } diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c index 14643923a721..4c0eef406eb1 100644 --- a/drivers/gpu/drm/bridge/sil-sii8620.c +++ b/drivers/gpu/drm/bridge/sil-sii8620.c @@ -1760,10 +1760,8 @@ static bool sii8620_rcp_consume(struct sii8620 *ctx, u8 scancode) scancode &= MHL_RCP_KEY_ID_MASK; - if (!ctx->rc_dev) { - dev_dbg(ctx->dev, "RCP input device not initialized\n"); + if (!IS_ENABLED(CONFIG_RC_CORE) || !ctx->rc_dev) return false; - } if (pressed) rc_keydown(ctx->rc_dev, RC_PROTO_CEC, scancode, 0); @@ -2100,6 +2098,9 @@ static void sii8620_init_rcp_input_dev(struct sii8620 *ctx) struct rc_dev *rc_dev; int ret; + if (!IS_ENABLED(CONFIG_RC_CORE)) + return; + rc_dev = rc_allocate_device(RC_DRIVER_SCANCODE); if (!rc_dev) { dev_err(ctx->dev, "Failed to allocate RC device\n"); @@ -2214,6 +2215,9 @@ static void sii8620_detach(struct drm_bridge *bridge) { struct sii8620 *ctx = bridge_to_sii8620(bridge); + if (!IS_ENABLED(CONFIG_RC_CORE)) + return; + rc_unregister_device(ctx->rc_dev); } diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index a15fbf71b9d7..dbe38a54870b 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -25,6 +25,7 @@ #include <uapi/linux/videodev2.h> #include <drm/bridge/dw_hdmi.h> +#include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_edid.h> @@ -1743,6 +1744,41 @@ static void hdmi_config_vendor_specific_infoframe(struct dw_hdmi *hdmi, HDMI_FC_DATAUTO0_VSD_MASK); } +static void hdmi_config_drm_infoframe(struct dw_hdmi *hdmi) +{ + const struct drm_connector_state *conn_state = hdmi->connector.state; + struct hdmi_drm_infoframe frame; + u8 buffer[30]; + ssize_t err; + int i; + + if (!hdmi->plat_data->use_drm_infoframe) + return; + + hdmi_modb(hdmi, HDMI_FC_PACKET_TX_EN_DRM_DISABLE, + HDMI_FC_PACKET_TX_EN_DRM_MASK, HDMI_FC_PACKET_TX_EN); + + err = drm_hdmi_infoframe_set_hdr_metadata(&frame, conn_state); + if (err < 0) + return; + + err = hdmi_drm_infoframe_pack(&frame, buffer, sizeof(buffer)); + if (err < 0) { + dev_err(hdmi->dev, "Failed to pack drm infoframe: %zd\n", err); + return; + } + + hdmi_writeb(hdmi, frame.version, HDMI_FC_DRM_HB0); + hdmi_writeb(hdmi, frame.length, HDMI_FC_DRM_HB1); + + for (i = 0; i < frame.length; i++) + hdmi_writeb(hdmi, buffer[4 + i], HDMI_FC_DRM_PB0 + i); + + hdmi_writeb(hdmi, 1, HDMI_FC_DRM_UP); + hdmi_modb(hdmi, HDMI_FC_PACKET_TX_EN_DRM_ENABLE, + HDMI_FC_PACKET_TX_EN_DRM_MASK, HDMI_FC_PACKET_TX_EN); +} + static void hdmi_av_composer(struct dw_hdmi *hdmi, const struct drm_display_mode *mode) { @@ -2054,7 +2090,7 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode) /* HDMI Initialization Step E - Configure audio */ hdmi_clk_regenerator_update_pixel_clock(hdmi); - hdmi_enable_audio_clk(hdmi, true); + hdmi_enable_audio_clk(hdmi, hdmi->audio_enable); } /* not for DVI mode */ @@ -2064,6 +2100,7 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode) /* HDMI Initialization Step F - Configure AVI InfoFrame */ hdmi_config_AVI(hdmi, mode); hdmi_config_vendor_specific_infoframe(hdmi, mode); + hdmi_config_drm_infoframe(hdmi); } else { dev_dbg(hdmi->dev, "%s DVI mode\n", __func__); } @@ -2230,6 +2267,45 @@ static int dw_hdmi_connector_get_modes(struct drm_connector *connector) return ret; } +static bool hdr_metadata_equal(const struct drm_connector_state *old_state, + const struct drm_connector_state *new_state) +{ + struct drm_property_blob *old_blob = old_state->hdr_output_metadata; + struct drm_property_blob *new_blob = new_state->hdr_output_metadata; + + if (!old_blob || !new_blob) + return old_blob == new_blob; + + if (old_blob->length != new_blob->length) + return false; + + return !memcmp(old_blob->data, new_blob->data, old_blob->length); +} + +static int dw_hdmi_connector_atomic_check(struct drm_connector *connector, + struct drm_atomic_state *state) +{ + struct drm_connector_state *old_state = + drm_atomic_get_old_connector_state(state, connector); + struct drm_connector_state *new_state = + drm_atomic_get_new_connector_state(state, connector); + struct drm_crtc *crtc = new_state->crtc; + struct drm_crtc_state *crtc_state; + + if (!crtc) + return 0; + + if (!hdr_metadata_equal(old_state, new_state)) { + crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + crtc_state->mode_changed = true; + } + + return 0; +} + static void dw_hdmi_connector_force(struct drm_connector *connector) { struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi, @@ -2254,6 +2330,7 @@ static const struct drm_connector_funcs dw_hdmi_connector_funcs = { static const struct drm_connector_helper_funcs dw_hdmi_connector_helper_funcs = { .get_modes = dw_hdmi_connector_get_modes, + .atomic_check = dw_hdmi_connector_atomic_check, }; static int dw_hdmi_bridge_attach(struct drm_bridge *bridge) @@ -2274,6 +2351,10 @@ static int dw_hdmi_bridge_attach(struct drm_bridge *bridge) DRM_MODE_CONNECTOR_HDMIA, hdmi->ddc); + if (hdmi->version >= 0x200a && hdmi->plat_data->use_drm_infoframe) + drm_object_attach_property(&connector->base, + connector->dev->mode_config.hdr_output_metadata_property, 0); + drm_connector_attach_encoder(connector, encoder); cec_fill_conn_info_from_drm(&conn_info, connector); diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h index fcff5059db24..1999db05bc3b 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h @@ -254,6 +254,7 @@ #define HDMI_FC_POL2 0x10DB #define HDMI_FC_PRCONF 0x10E0 #define HDMI_FC_SCRAMBLER_CTRL 0x10E1 +#define HDMI_FC_PACKET_TX_EN 0x10E3 #define HDMI_FC_GMD_STAT 0x1100 #define HDMI_FC_GMD_EN 0x1101 @@ -289,6 +290,37 @@ #define HDMI_FC_GMD_PB26 0x111F #define HDMI_FC_GMD_PB27 0x1120 +#define HDMI_FC_DRM_UP 0x1167 +#define HDMI_FC_DRM_HB0 0x1168 +#define HDMI_FC_DRM_HB1 0x1169 +#define HDMI_FC_DRM_PB0 0x116A +#define HDMI_FC_DRM_PB1 0x116B +#define HDMI_FC_DRM_PB2 0x116C +#define HDMI_FC_DRM_PB3 0x116D +#define HDMI_FC_DRM_PB4 0x116E +#define HDMI_FC_DRM_PB5 0x116F +#define HDMI_FC_DRM_PB6 0x1170 +#define HDMI_FC_DRM_PB7 0x1171 +#define HDMI_FC_DRM_PB8 0x1172 +#define HDMI_FC_DRM_PB9 0x1173 +#define HDMI_FC_DRM_PB10 0x1174 +#define HDMI_FC_DRM_PB11 0x1175 +#define HDMI_FC_DRM_PB12 0x1176 +#define HDMI_FC_DRM_PB13 0x1177 +#define HDMI_FC_DRM_PB14 0x1178 +#define HDMI_FC_DRM_PB15 0x1179 +#define HDMI_FC_DRM_PB16 0x117A +#define HDMI_FC_DRM_PB17 0x117B +#define HDMI_FC_DRM_PB18 0x117C +#define HDMI_FC_DRM_PB19 0x117D +#define HDMI_FC_DRM_PB20 0x117E +#define HDMI_FC_DRM_PB21 0x117F +#define HDMI_FC_DRM_PB22 0x1180 +#define HDMI_FC_DRM_PB23 0x1181 +#define HDMI_FC_DRM_PB24 0x1182 +#define HDMI_FC_DRM_PB25 0x1183 +#define HDMI_FC_DRM_PB26 0x1184 + #define HDMI_FC_DBGFORCE 0x1200 #define HDMI_FC_DBGAUD0CH0 0x1201 #define HDMI_FC_DBGAUD1CH0 0x1202 @@ -744,6 +776,11 @@ enum { HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_MASK = 0x0F, HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_OFFSET = 0, +/* FC_PACKET_TX_EN field values */ + HDMI_FC_PACKET_TX_EN_DRM_MASK = 0x80, + HDMI_FC_PACKET_TX_EN_DRM_ENABLE = 0x80, + HDMI_FC_PACKET_TX_EN_DRM_DISABLE = 0x00, + /* FC_AVICONF0-FC_AVICONF3 field values */ HDMI_FC_AVICONF0_PIX_FMT_MASK = 0x03, HDMI_FC_AVICONF0_PIX_FMT_RGB = 0x00, diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c index 51664a2df731..8029478ffebb 100644 --- a/drivers/gpu/drm/bridge/tc358767.c +++ b/drivers/gpu/drm/bridge/tc358767.c @@ -229,7 +229,9 @@ static bool tc_test_pattern; module_param_named(test, tc_test_pattern, bool, 0644); struct tc_edp_link { - struct drm_dp_link base; + u8 dpcd[DP_RECEIVER_CAP_SIZE]; + unsigned int rate; + u8 num_lanes; u8 assr; bool scrambler_dis; bool spread; @@ -438,9 +440,9 @@ static u32 tc_srcctrl(struct tc_data *tc) reg |= DP0_SRCCTRL_SCRMBLDIS; /* Scrambler Disabled */ if (tc->link.spread) reg |= DP0_SRCCTRL_SSCG; /* Spread Spectrum Enable */ - if (tc->link.base.num_lanes == 2) + if (tc->link.num_lanes == 2) reg |= DP0_SRCCTRL_LANES_2; /* Two Main Channel Lanes */ - if (tc->link.base.rate != 162000) + if (tc->link.rate != 162000) reg |= DP0_SRCCTRL_BW27; /* 2.7 Gbps link */ return reg; } @@ -663,23 +665,35 @@ err: static int tc_get_display_props(struct tc_data *tc) { + u8 revision, num_lanes; + unsigned int rate; int ret; u8 reg; /* Read DP Rx Link Capability */ - ret = drm_dp_link_probe(&tc->aux, &tc->link.base); + ret = drm_dp_dpcd_read(&tc->aux, DP_DPCD_REV, tc->link.dpcd, + DP_RECEIVER_CAP_SIZE); if (ret < 0) goto err_dpcd_read; - if (tc->link.base.rate != 162000 && tc->link.base.rate != 270000) { + + revision = tc->link.dpcd[DP_DPCD_REV]; + rate = drm_dp_max_link_rate(tc->link.dpcd); + num_lanes = drm_dp_max_lane_count(tc->link.dpcd); + + if (rate != 162000 && rate != 270000) { dev_dbg(tc->dev, "Falling to 2.7 Gbps rate\n"); - tc->link.base.rate = 270000; + rate = 270000; } - if (tc->link.base.num_lanes > 2) { + tc->link.rate = rate; + + if (num_lanes > 2) { dev_dbg(tc->dev, "Falling to 2 lanes\n"); - tc->link.base.num_lanes = 2; + num_lanes = 2; } + tc->link.num_lanes = num_lanes; + ret = drm_dp_dpcd_readb(&tc->aux, DP_MAX_DOWNSPREAD, ®); if (ret < 0) goto err_dpcd_read; @@ -697,11 +711,11 @@ static int tc_get_display_props(struct tc_data *tc) tc->link.assr = reg & DP_ALTERNATE_SCRAMBLER_RESET_ENABLE; dev_dbg(tc->dev, "DPCD rev: %d.%d, rate: %s, lanes: %d, framing: %s\n", - tc->link.base.revision >> 4, tc->link.base.revision & 0x0f, - (tc->link.base.rate == 162000) ? "1.62Gbps" : "2.7Gbps", - tc->link.base.num_lanes, - (tc->link.base.capabilities & DP_LINK_CAP_ENHANCED_FRAMING) ? - "enhanced" : "non-enhanced"); + revision >> 4, revision & 0x0f, + (tc->link.rate == 162000) ? "1.62Gbps" : "2.7Gbps", + tc->link.num_lanes, + drm_dp_enhanced_frame_cap(tc->link.dpcd) ? + "enhanced" : "default"); dev_dbg(tc->dev, "Downspread: %s, scrambler: %s\n", tc->link.spread ? "0.5%" : "0.0%", tc->link.scrambler_dis ? "disabled" : "enabled"); @@ -729,6 +743,8 @@ static int tc_set_video_mode(struct tc_data *tc, int lower_margin = mode->vsync_start - mode->vdisplay; int vsync_len = mode->vsync_end - mode->vsync_start; u32 dp0_syncval; + u32 bits_per_pixel = 24; + u32 in_bw, out_bw; /* * Recommended maximum number of symbols transferred in a transfer unit: @@ -736,7 +752,10 @@ static int tc_set_video_mode(struct tc_data *tc, * (output active video bandwidth in bytes)) * Must be less than tu_size. */ - max_tu_symbol = TU_SIZE_RECOMMENDED - 1; + + in_bw = mode->clock * bits_per_pixel / 8; + out_bw = tc->link.num_lanes * tc->link.rate; + max_tu_symbol = DIV_ROUND_UP(in_bw * TU_SIZE_RECOMMENDED, out_bw); dev_dbg(tc->dev, "set mode %dx%d\n", mode->hdisplay, mode->vdisplay); @@ -897,7 +916,7 @@ static int tc_main_link_enable(struct tc_data *tc) /* SSCG and BW27 on DP1 must be set to the same as on DP0 */ ret = regmap_write(tc->regmap, DP1_SRCCTRL, (tc->link.spread ? DP0_SRCCTRL_SSCG : 0) | - ((tc->link.base.rate != 162000) ? DP0_SRCCTRL_BW27 : 0)); + ((tc->link.rate != 162000) ? DP0_SRCCTRL_BW27 : 0)); if (ret) return ret; @@ -907,7 +926,7 @@ static int tc_main_link_enable(struct tc_data *tc) /* Setup Main Link */ dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN | PHY_M0_EN; - if (tc->link.base.num_lanes == 2) + if (tc->link.num_lanes == 2) dp_phy_ctrl |= PHY_2LANE; ret = regmap_write(tc->regmap, DP_PHY_CTRL, dp_phy_ctrl); @@ -970,7 +989,13 @@ static int tc_main_link_enable(struct tc_data *tc) } /* Setup Link & DPRx Config for Training */ - ret = drm_dp_link_configure(aux, &tc->link.base); + tmp[0] = drm_dp_link_rate_to_bw_code(tc->link.rate); + tmp[1] = tc->link.num_lanes; + + if (drm_dp_enhanced_frame_cap(tc->link.dpcd)) + tmp[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; + + ret = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, tmp, 2); if (ret < 0) goto err_dpcd_write; @@ -1014,9 +1039,8 @@ static int tc_main_link_enable(struct tc_data *tc) /* Enable DP0 to start Link Training */ ret = regmap_write(tc->regmap, DP0CTL, - ((tc->link.base.capabilities & - DP_LINK_CAP_ENHANCED_FRAMING) ? EF_EN : 0) | - DP_EN); + (drm_dp_enhanced_frame_cap(tc->link.dpcd) ? + EF_EN : 0) | DP_EN); if (ret) return ret; @@ -1095,7 +1119,7 @@ static int tc_main_link_enable(struct tc_data *tc) ret = -ENODEV; } - if (tc->link.base.num_lanes == 2) { + if (tc->link.num_lanes == 2) { value = (tmp[0] >> 4) & DP_CHANNEL_EQ_BITS; if (value != DP_CHANNEL_EQ_BITS) { @@ -1166,7 +1190,7 @@ static int tc_stream_enable(struct tc_data *tc) return ret; value = VID_MN_GEN | DP_EN; - if (tc->link.base.capabilities & DP_LINK_CAP_ENHANCED_FRAMING) + if (drm_dp_enhanced_frame_cap(tc->link.dpcd)) value |= EF_EN; ret = regmap_write(tc->regmap, DP0CTL, value); if (ret) @@ -1292,7 +1316,7 @@ static enum drm_mode_status tc_mode_valid(struct drm_bridge *bridge, return MODE_CLOCK_HIGH; req = mode->clock * bits_per_pixel / 8; - avail = tc->link.base.num_lanes * tc->link.base.rate; + avail = tc->link.num_lanes * tc->link.rate; if (req > avail) return MODE_BAD; diff --git a/drivers/gpu/drm/cirrus/cirrus.c b/drivers/gpu/drm/cirrus/cirrus.c index 89d9e6fdeb8c..248c9f765c45 100644 --- a/drivers/gpu/drm/cirrus/cirrus.c +++ b/drivers/gpu/drm/cirrus/cirrus.c @@ -390,7 +390,7 @@ static int cirrus_conn_init(struct cirrus_device *cirrus) /* ------------------------------------------------------------------ */ /* cirrus (simple) display pipe */ -static enum drm_mode_status cirrus_pipe_mode_valid(struct drm_crtc *crtc, +static enum drm_mode_status cirrus_pipe_mode_valid(struct drm_simple_display_pipe *pipe, const struct drm_display_mode *mode) { if (cirrus_check_size(mode->hdisplay, mode->vdisplay, NULL) < 0) @@ -510,7 +510,7 @@ static void cirrus_mode_config_init(struct cirrus_device *cirrus) /* ------------------------------------------------------------------ */ -DEFINE_DRM_GEM_SHMEM_FOPS(cirrus_fops); +DEFINE_DRM_GEM_FOPS(cirrus_fops); static struct drm_driver cirrus_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h deleted file mode 100644 index 1f73916e528e..000000000000 --- a/drivers/gpu/drm/cirrus/cirrus_drv.h +++ /dev/null @@ -1,247 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright 2012 Red Hat - * - * Authors: Matthew Garrett - * Dave Airlie - */ -#ifndef __CIRRUS_DRV_H__ -#define __CIRRUS_DRV_H__ - -#include <video/vga.h> - -#include <drm/drm_encoder.h> -#include <drm/drm_fb_helper.h> - -#include <drm/ttm/ttm_bo_api.h> -#include <drm/ttm/ttm_bo_driver.h> -#include <drm/ttm/ttm_placement.h> -#include <drm/ttm/ttm_memory.h> -#include <drm/ttm/ttm_module.h> - -#include <drm/drm_gem.h> - -#define DRIVER_AUTHOR "Matthew Garrett" - -#define DRIVER_NAME "cirrus" -#define DRIVER_DESC "qemu Cirrus emulation" -#define DRIVER_DATE "20110418" - -#define DRIVER_MAJOR 1 -#define DRIVER_MINOR 0 -#define DRIVER_PATCHLEVEL 0 - -#define CIRRUSFB_CONN_LIMIT 1 - -#define RREG8(reg) ioread8(((void __iomem *)cdev->rmmio) + (reg)) -#define WREG8(reg, v) iowrite8(v, ((void __iomem *)cdev->rmmio) + (reg)) -#define RREG32(reg) ioread32(((void __iomem *)cdev->rmmio) + (reg)) -#define WREG32(reg, v) iowrite32(v, ((void __iomem *)cdev->rmmio) + (reg)) - -#define SEQ_INDEX 4 -#define SEQ_DATA 5 - -#define WREG_SEQ(reg, v) \ - do { \ - WREG8(SEQ_INDEX, reg); \ - WREG8(SEQ_DATA, v); \ - } while (0) \ - -#define CRT_INDEX 0x14 -#define CRT_DATA 0x15 - -#define WREG_CRT(reg, v) \ - do { \ - WREG8(CRT_INDEX, reg); \ - WREG8(CRT_DATA, v); \ - } while (0) \ - -#define GFX_INDEX 0xe -#define GFX_DATA 0xf - -#define WREG_GFX(reg, v) \ - do { \ - WREG8(GFX_INDEX, reg); \ - WREG8(GFX_DATA, v); \ - } while (0) \ - -/* - * Cirrus has a "hidden" DAC register that can be accessed by writing to - * the pixel mask register to reset the state, then reading from the register - * four times. The next write will then pass to the DAC - */ -#define VGA_DAC_MASK 0x6 - -#define WREG_HDR(v) \ - do { \ - RREG8(VGA_DAC_MASK); \ - RREG8(VGA_DAC_MASK); \ - RREG8(VGA_DAC_MASK); \ - RREG8(VGA_DAC_MASK); \ - WREG8(VGA_DAC_MASK, v); \ - } while (0) \ - - -#define CIRRUS_MAX_FB_HEIGHT 4096 -#define CIRRUS_MAX_FB_WIDTH 4096 - -#define CIRRUS_DPMS_CLEARED (-1) - -#define to_cirrus_crtc(x) container_of(x, struct cirrus_crtc, base) -#define to_cirrus_encoder(x) container_of(x, struct cirrus_encoder, base) - -struct cirrus_crtc { - struct drm_crtc base; - int last_dpms; - bool enabled; -}; - -struct cirrus_fbdev; -struct cirrus_mode_info { - struct cirrus_crtc *crtc; - /* pointer to fbdev info structure */ - struct cirrus_fbdev *gfbdev; -}; - -struct cirrus_encoder { - struct drm_encoder base; - int last_dpms; -}; - -struct cirrus_connector { - struct drm_connector base; -}; - -struct cirrus_mc { - resource_size_t vram_size; - resource_size_t vram_base; -}; - -struct cirrus_device { - struct drm_device *dev; - unsigned long flags; - - resource_size_t rmmio_base; - resource_size_t rmmio_size; - void __iomem *rmmio; - - struct cirrus_mc mc; - struct cirrus_mode_info mode_info; - - int num_crtc; - int fb_mtrr; - - struct { - struct ttm_bo_device bdev; - } ttm; - bool mm_inited; -}; - - -struct cirrus_fbdev { - struct drm_fb_helper helper; /* must be first */ - struct drm_framebuffer *gfb; - void *sysram; - int size; - int x1, y1, x2, y2; /* dirty rect */ - spinlock_t dirty_lock; -}; - -struct cirrus_bo { - struct ttm_buffer_object bo; - struct ttm_placement placement; - struct ttm_bo_kmap_obj kmap; - struct drm_gem_object gem; - struct ttm_place placements[3]; - int pin_count; -}; -#define gem_to_cirrus_bo(gobj) container_of((gobj), struct cirrus_bo, gem) - -static inline struct cirrus_bo * -cirrus_bo(struct ttm_buffer_object *bo) -{ - return container_of(bo, struct cirrus_bo, bo); -} - - -#define to_cirrus_obj(x) container_of(x, struct cirrus_gem_object, base) - - /* cirrus_main.c */ -int cirrus_device_init(struct cirrus_device *cdev, - struct drm_device *ddev, - struct pci_dev *pdev, - uint32_t flags); -void cirrus_device_fini(struct cirrus_device *cdev); -void cirrus_gem_free_object(struct drm_gem_object *obj); -int cirrus_dumb_mmap_offset(struct drm_file *file, - struct drm_device *dev, - uint32_t handle, - uint64_t *offset); -int cirrus_gem_create(struct drm_device *dev, - u32 size, bool iskernel, - struct drm_gem_object **obj); -int cirrus_dumb_create(struct drm_file *file, - struct drm_device *dev, - struct drm_mode_create_dumb *args); - -int cirrus_framebuffer_init(struct drm_device *dev, - struct drm_framebuffer *gfb, - const struct drm_mode_fb_cmd2 *mode_cmd, - struct drm_gem_object *obj); - -bool cirrus_check_framebuffer(struct cirrus_device *cdev, int width, int height, - int bpp, int pitch); - - /* cirrus_display.c */ -int cirrus_modeset_init(struct cirrus_device *cdev); -void cirrus_modeset_fini(struct cirrus_device *cdev); - - /* cirrus_fbdev.c */ -int cirrus_fbdev_init(struct cirrus_device *cdev); -void cirrus_fbdev_fini(struct cirrus_device *cdev); - - - - /* cirrus_irq.c */ -void cirrus_driver_irq_preinstall(struct drm_device *dev); -int cirrus_driver_irq_postinstall(struct drm_device *dev); -void cirrus_driver_irq_uninstall(struct drm_device *dev); -irqreturn_t cirrus_driver_irq_handler(int irq, void *arg); - - /* cirrus_kms.c */ -int cirrus_driver_load(struct drm_device *dev, unsigned long flags); -void cirrus_driver_unload(struct drm_device *dev); -extern struct drm_ioctl_desc cirrus_ioctls[]; -extern int cirrus_max_ioctl; - -int cirrus_mm_init(struct cirrus_device *cirrus); -void cirrus_mm_fini(struct cirrus_device *cirrus); -void cirrus_ttm_placement(struct cirrus_bo *bo, int domain); -int cirrus_bo_create(struct drm_device *dev, int size, int align, - uint32_t flags, struct cirrus_bo **pcirrusbo); -int cirrus_mmap(struct file *filp, struct vm_area_struct *vma); - -static inline int cirrus_bo_reserve(struct cirrus_bo *bo, bool no_wait) -{ - int ret; - - ret = ttm_bo_reserve(&bo->bo, true, no_wait, NULL); - if (ret) { - if (ret != -ERESTARTSYS && ret != -EBUSY) - DRM_ERROR("reserve failed %p\n", bo); - return ret; - } - return 0; -} - -static inline void cirrus_bo_unreserve(struct cirrus_bo *bo) -{ - ttm_bo_unreserve(&bo->bo); -} - -int cirrus_bo_push_sysram(struct cirrus_bo *bo); -int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr); - -extern int cirrus_bpp; - -#endif /* __CIRRUS_DRV_H__ */ diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 587052751b48..b191d39c071d 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -1571,8 +1571,11 @@ static void commit_tail(struct drm_atomic_state *old_state) { struct drm_device *dev = old_state->dev; const struct drm_mode_config_helper_funcs *funcs; + struct drm_crtc_state *new_crtc_state; + struct drm_crtc *crtc; ktime_t start; s64 commit_time_ms; + unsigned int i, new_self_refresh_mask = 0; funcs = dev->mode_config.helper_private; @@ -1592,6 +1595,15 @@ static void commit_tail(struct drm_atomic_state *old_state) drm_atomic_helper_wait_for_dependencies(old_state); + /* + * We cannot safely access new_crtc_state after + * drm_atomic_helper_commit_hw_done() so figure out which crtc's have + * self-refresh active beforehand: + */ + for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) + if (new_crtc_state->self_refresh_active) + new_self_refresh_mask |= BIT(i); + if (funcs && funcs->atomic_commit_tail) funcs->atomic_commit_tail(old_state); else @@ -1600,7 +1612,8 @@ static void commit_tail(struct drm_atomic_state *old_state) commit_time_ms = ktime_ms_delta(ktime_get(), start); if (commit_time_ms > 0) drm_self_refresh_helper_update_avg_times(old_state, - (unsigned long)commit_time_ms); + (unsigned long)commit_time_ms, + new_self_refresh_mask); drm_atomic_helper_commit_cleanup_done(old_state); diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c index d02709dd2d4a..121481f6aa71 100644 --- a/drivers/gpu/drm/drm_blend.c +++ b/drivers/gpu/drm/drm_blend.c @@ -132,10 +132,10 @@ * planes. Without this property the primary plane is always below the cursor * plane, and ordering between all other planes is undefined. The positive * Z axis points towards the user, i.e. planes with lower Z position values - * are underneath planes with higher Z position values. Note that the Z - * position value can also be immutable, to inform userspace about the - * hard-coded stacking of overlay planes, see - * drm_plane_create_zpos_immutable_property(). + * are underneath planes with higher Z position values. Two planes with the + * same Z position value have undefined ordering. Note that the Z position + * value can also be immutable, to inform userspace about the hard-coded + * stacking of planes, see drm_plane_create_zpos_immutable_property(). * * pixel blend mode: * Pixel blend mode is set up with drm_plane_create_blend_mode_property(). diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c index 3bd76e918b5d..03e01b000f7a 100644 --- a/drivers/gpu/drm/drm_cache.c +++ b/drivers/gpu/drm/drm_cache.c @@ -62,10 +62,10 @@ static void drm_cache_flush_clflush(struct page *pages[], { unsigned long i; - mb(); + mb(); /*Full memory barrier used before so that CLFLUSH is ordered*/ for (i = 0; i < num_pages; i++) drm_clflush_page(*pages++); - mb(); + mb(); /*Also used after CLFLUSH so that all cache is flushed*/ } #endif @@ -92,6 +92,7 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages) #elif defined(__powerpc__) unsigned long i; + for (i = 0; i < num_pages; i++) { struct page *page = pages[i]; void *page_virtual; @@ -125,10 +126,10 @@ drm_clflush_sg(struct sg_table *st) if (static_cpu_has(X86_FEATURE_CLFLUSH)) { struct sg_page_iter sg_iter; - mb(); + mb(); /*CLFLUSH is ordered only by using memory barriers*/ for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) drm_clflush_page(sg_page_iter_page(&sg_iter)); - mb(); + mb(); /*Make sure that all cache line entry is flushed*/ return; } @@ -157,12 +158,13 @@ drm_clflush_virt_range(void *addr, unsigned long length) if (static_cpu_has(X86_FEATURE_CLFLUSH)) { const int size = boot_cpu_data.x86_clflush_size; void *end = addr + length; + addr = (void *)(((unsigned long)addr) & -size); - mb(); + mb(); /*CLFLUSH is only ordered with a full memory barrier*/ for (; addr < end; addr += size) clflushopt(addr); clflushopt(end - 1); /* force serialisation */ - mb(); + mb(); /*Ensure that evry data cache line entry is flushed*/ return; } diff --git a/drivers/gpu/drm/drm_dp_cec.c b/drivers/gpu/drm/drm_dp_cec.c index b457c16c3a8b..3ab2609f9ec7 100644 --- a/drivers/gpu/drm/drm_dp_cec.c +++ b/drivers/gpu/drm/drm_dp_cec.c @@ -8,10 +8,12 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> + +#include <media/cec.h> + #include <drm/drm_connector.h> +#include <drm/drm_device.h> #include <drm/drm_dp_helper.h> -#include <drm/drmP.h> -#include <media/cec.h> /* * Unfortunately it turns out that we have a chicken-and-egg situation diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index f373798d82f6..2c7870aef469 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c @@ -120,33 +120,49 @@ u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SI } EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis); -void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) { - int rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] & - DP_TRAINING_AUX_RD_MASK; +u8 drm_dp_get_adjust_request_post_cursor(const u8 link_status[DP_LINK_STATUS_SIZE], + unsigned int lane) +{ + unsigned int offset = DP_ADJUST_REQUEST_POST_CURSOR2; + u8 value = dp_link_status(link_status, offset); + + return (value >> (lane << 1)) & 0x3; +} +EXPORT_SYMBOL(drm_dp_get_adjust_request_post_cursor); + +void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + unsigned long rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] & + DP_TRAINING_AUX_RD_MASK; if (rd_interval > 4) - DRM_DEBUG_KMS("AUX interval %d, out of range (max 4)\n", + DRM_DEBUG_KMS("AUX interval %lu, out of range (max 4)\n", rd_interval); if (rd_interval == 0 || dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14) - udelay(100); + rd_interval = 100; else - mdelay(rd_interval * 4); + rd_interval *= 4 * USEC_PER_MSEC; + + usleep_range(rd_interval, rd_interval * 2); } EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay); -void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) { - int rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] & - DP_TRAINING_AUX_RD_MASK; +void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + unsigned long rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] & + DP_TRAINING_AUX_RD_MASK; if (rd_interval > 4) - DRM_DEBUG_KMS("AUX interval %d, out of range (max 4)\n", + DRM_DEBUG_KMS("AUX interval %lu, out of range (max 4)\n", rd_interval); if (rd_interval == 0) - udelay(400); + rd_interval = 400; else - mdelay(rd_interval * 4); + rd_interval *= 4 * USEC_PER_MSEC; + + usleep_range(rd_interval, rd_interval * 2); } EXPORT_SYMBOL(drm_dp_link_train_channel_eq_delay); @@ -220,7 +236,6 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request, } ret = aux->transfer(aux, &msg); - if (ret >= 0) { native_reply = msg.reply & DP_AUX_NATIVE_REPLY_MASK; if (native_reply == DP_AUX_NATIVE_REPLY_ACK) { @@ -337,134 +352,6 @@ int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux, EXPORT_SYMBOL(drm_dp_dpcd_read_link_status); /** - * drm_dp_link_probe() - probe a DisplayPort link for capabilities - * @aux: DisplayPort AUX channel - * @link: pointer to structure in which to return link capabilities - * - * The structure filled in by this function can usually be passed directly - * into drm_dp_link_power_up() and drm_dp_link_configure() to power up and - * configure the link based on the link's capabilities. - * - * Returns 0 on success or a negative error code on failure. - */ -int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link) -{ - u8 values[3]; - int err; - - memset(link, 0, sizeof(*link)); - - err = drm_dp_dpcd_read(aux, DP_DPCD_REV, values, sizeof(values)); - if (err < 0) - return err; - - link->revision = values[0]; - link->rate = drm_dp_bw_code_to_link_rate(values[1]); - link->num_lanes = values[2] & DP_MAX_LANE_COUNT_MASK; - - if (values[2] & DP_ENHANCED_FRAME_CAP) - link->capabilities |= DP_LINK_CAP_ENHANCED_FRAMING; - - return 0; -} -EXPORT_SYMBOL(drm_dp_link_probe); - -/** - * drm_dp_link_power_up() - power up a DisplayPort link - * @aux: DisplayPort AUX channel - * @link: pointer to a structure containing the link configuration - * - * Returns 0 on success or a negative error code on failure. - */ -int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link) -{ - u8 value; - int err; - - /* DP_SET_POWER register is only available on DPCD v1.1 and later */ - if (link->revision < 0x11) - return 0; - - err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value); - if (err < 0) - return err; - - value &= ~DP_SET_POWER_MASK; - value |= DP_SET_POWER_D0; - - err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value); - if (err < 0) - return err; - - /* - * According to the DP 1.1 specification, a "Sink Device must exit the - * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink - * Control Field" (register 0x600). - */ - usleep_range(1000, 2000); - - return 0; -} -EXPORT_SYMBOL(drm_dp_link_power_up); - -/** - * drm_dp_link_power_down() - power down a DisplayPort link - * @aux: DisplayPort AUX channel - * @link: pointer to a structure containing the link configuration - * - * Returns 0 on success or a negative error code on failure. - */ -int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link) -{ - u8 value; - int err; - - /* DP_SET_POWER register is only available on DPCD v1.1 and later */ - if (link->revision < 0x11) - return 0; - - err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value); - if (err < 0) - return err; - - value &= ~DP_SET_POWER_MASK; - value |= DP_SET_POWER_D3; - - err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value); - if (err < 0) - return err; - - return 0; -} -EXPORT_SYMBOL(drm_dp_link_power_down); - -/** - * drm_dp_link_configure() - configure a DisplayPort link - * @aux: DisplayPort AUX channel - * @link: pointer to a structure containing the link configuration - * - * Returns 0 on success or a negative error code on failure. - */ -int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link) -{ - u8 values[2]; - int err; - - values[0] = drm_dp_link_rate_to_bw_code(link->rate); - values[1] = link->num_lanes; - - if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING) - values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; - - err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values)); - if (err < 0) - return err; - - return 0; -} -EXPORT_SYMBOL(drm_dp_link_configure); - -/** * drm_dp_downstream_max_clock() - extract branch device max * pixel rate for legacy VGA * converter or max TMDS clock diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 6b14b63b8d62..ae5809a1f19a 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -28,6 +28,13 @@ #include <linux/sched.h> #include <linux/seq_file.h> +#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) +#include <linux/stacktrace.h> +#include <linux/sort.h> +#include <linux/timekeeping.h> +#include <linux/math64.h> +#endif + #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_dp_mst_helper.h> @@ -45,6 +52,12 @@ * protocol. The helpers contain a topology manager and bandwidth manager. * The helpers encapsulate the sending and received of sideband msgs. */ +struct drm_dp_pending_up_req { + struct drm_dp_sideband_msg_hdr hdr; + struct drm_dp_sideband_msg_req_body msg; + struct list_head next; +}; + static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr, char *buf); @@ -61,8 +74,8 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int offset, int size, u8 *bytes); -static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_branch *mstb); +static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb); static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *port); @@ -1393,39 +1406,194 @@ drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port) } EXPORT_SYMBOL(drm_dp_mst_put_port_malloc); -static void drm_dp_destroy_mst_branch_device(struct kref *kref) +#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) + +#define STACK_DEPTH 8 + +static noinline void +__topology_ref_save(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_topology_ref_history *history, + enum drm_dp_mst_topology_ref_type type) { - struct drm_dp_mst_branch *mstb = - container_of(kref, struct drm_dp_mst_branch, topology_kref); - struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; - struct drm_dp_mst_port *port, *tmp; - bool wake_tx = false; + struct drm_dp_mst_topology_ref_entry *entry = NULL; + depot_stack_handle_t backtrace; + ulong stack_entries[STACK_DEPTH]; + uint n; + int i; - mutex_lock(&mgr->lock); - list_for_each_entry_safe(port, tmp, &mstb->ports, next) { - list_del(&port->next); - drm_dp_mst_topology_put_port(port); + n = stack_trace_save(stack_entries, ARRAY_SIZE(stack_entries), 1); + backtrace = stack_depot_save(stack_entries, n, GFP_KERNEL); + if (!backtrace) + return; + + /* Try to find an existing entry for this backtrace */ + for (i = 0; i < history->len; i++) { + if (history->entries[i].backtrace == backtrace) { + entry = &history->entries[i]; + break; + } } - mutex_unlock(&mgr->lock); - /* drop any tx slots msg */ - mutex_lock(&mstb->mgr->qlock); - if (mstb->tx_slots[0]) { - mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT; - mstb->tx_slots[0] = NULL; - wake_tx = true; + /* Otherwise add one */ + if (!entry) { + struct drm_dp_mst_topology_ref_entry *new; + int new_len = history->len + 1; + + new = krealloc(history->entries, sizeof(*new) * new_len, + GFP_KERNEL); + if (!new) + return; + + entry = &new[history->len]; + history->len = new_len; + history->entries = new; + + entry->backtrace = backtrace; + entry->type = type; + entry->count = 0; } - if (mstb->tx_slots[1]) { - mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT; - mstb->tx_slots[1] = NULL; - wake_tx = true; + entry->count++; + entry->ts_nsec = ktime_get_ns(); +} + +static int +topology_ref_history_cmp(const void *a, const void *b) +{ + const struct drm_dp_mst_topology_ref_entry *entry_a = a, *entry_b = b; + + if (entry_a->ts_nsec > entry_b->ts_nsec) + return 1; + else if (entry_a->ts_nsec < entry_b->ts_nsec) + return -1; + else + return 0; +} + +static inline const char * +topology_ref_type_to_str(enum drm_dp_mst_topology_ref_type type) +{ + if (type == DRM_DP_MST_TOPOLOGY_REF_GET) + return "get"; + else + return "put"; +} + +static void +__dump_topology_ref_history(struct drm_dp_mst_topology_ref_history *history, + void *ptr, const char *type_str) +{ + struct drm_printer p = drm_debug_printer(DBG_PREFIX); + char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL); + int i; + + if (!buf) + return; + + if (!history->len) + goto out; + + /* First, sort the list so that it goes from oldest to newest + * reference entry + */ + sort(history->entries, history->len, sizeof(*history->entries), + topology_ref_history_cmp, NULL); + + drm_printf(&p, "%s (%p) topology count reached 0, dumping history:\n", + type_str, ptr); + + for (i = 0; i < history->len; i++) { + const struct drm_dp_mst_topology_ref_entry *entry = + &history->entries[i]; + ulong *entries; + uint nr_entries; + u64 ts_nsec = entry->ts_nsec; + u32 rem_nsec = do_div(ts_nsec, 1000000000); + + nr_entries = stack_depot_fetch(entry->backtrace, &entries); + stack_trace_snprint(buf, PAGE_SIZE, entries, nr_entries, 4); + + drm_printf(&p, " %d %ss (last at %5llu.%06u):\n%s", + entry->count, + topology_ref_type_to_str(entry->type), + ts_nsec, rem_nsec / 1000, buf); } - mutex_unlock(&mstb->mgr->qlock); - if (wake_tx) - wake_up_all(&mstb->mgr->tx_waitq); + /* Now free the history, since this is the only time we expose it */ + kfree(history->entries); +out: + kfree(buf); +} - drm_dp_mst_put_mstb_malloc(mstb); +static __always_inline void +drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb) +{ + __dump_topology_ref_history(&mstb->topology_ref_history, mstb, + "MSTB"); +} + +static __always_inline void +drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) +{ + __dump_topology_ref_history(&port->topology_ref_history, port, + "Port"); +} + +static __always_inline void +save_mstb_topology_ref(struct drm_dp_mst_branch *mstb, + enum drm_dp_mst_topology_ref_type type) +{ + __topology_ref_save(mstb->mgr, &mstb->topology_ref_history, type); +} + +static __always_inline void +save_port_topology_ref(struct drm_dp_mst_port *port, + enum drm_dp_mst_topology_ref_type type) +{ + __topology_ref_save(port->mgr, &port->topology_ref_history, type); +} + +static inline void +topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr) +{ + mutex_lock(&mgr->topology_ref_history_lock); +} + +static inline void +topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr) +{ + mutex_unlock(&mgr->topology_ref_history_lock); +} +#else +static inline void +topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr) {} +static inline void +topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr) {} +static inline void +drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb) {} +static inline void +drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) {} +#define save_mstb_topology_ref(mstb, type) +#define save_port_topology_ref(port, type) +#endif + +static void drm_dp_destroy_mst_branch_device(struct kref *kref) +{ + struct drm_dp_mst_branch *mstb = + container_of(kref, struct drm_dp_mst_branch, topology_kref); + struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; + + drm_dp_mst_dump_mstb_topology_history(mstb); + + INIT_LIST_HEAD(&mstb->destroy_next); + + /* + * This can get called under mgr->mutex, so we need to perform the + * actual destruction of the mstb in another worker + */ + mutex_lock(&mgr->delayed_destroy_lock); + list_add(&mstb->destroy_next, &mgr->destroy_branch_device_list); + mutex_unlock(&mgr->delayed_destroy_lock); + schedule_work(&mgr->delayed_destroy_work); } /** @@ -1453,11 +1621,17 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref) static int __must_check drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb) { - int ret = kref_get_unless_zero(&mstb->topology_kref); + int ret; - if (ret) - DRM_DEBUG("mstb %p (%d)\n", mstb, - kref_read(&mstb->topology_kref)); + topology_ref_history_lock(mstb->mgr); + ret = kref_get_unless_zero(&mstb->topology_kref); + if (ret) { + DRM_DEBUG("mstb %p (%d)\n", + mstb, kref_read(&mstb->topology_kref)); + save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET); + } + + topology_ref_history_unlock(mstb->mgr); return ret; } @@ -1478,9 +1652,14 @@ drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb) */ static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb) { + topology_ref_history_lock(mstb->mgr); + + save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET); WARN_ON(kref_read(&mstb->topology_kref) == 0); kref_get(&mstb->topology_kref); DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref)); + + topology_ref_history_unlock(mstb->mgr); } /** @@ -1498,27 +1677,14 @@ static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb) static void drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb) { + topology_ref_history_lock(mstb->mgr); + DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref) - 1); - kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device); -} - -static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt) -{ - struct drm_dp_mst_branch *mstb; + save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_PUT); - switch (old_pdt) { - case DP_PEER_DEVICE_DP_LEGACY_CONV: - case DP_PEER_DEVICE_SST_SINK: - /* remove i2c over sideband */ - drm_dp_mst_unregister_i2c_bus(&port->aux); - break; - case DP_PEER_DEVICE_MST_BRANCHING: - mstb = port->mstb; - port->mstb = NULL; - drm_dp_mst_topology_put_mstb(mstb); - break; - } + topology_ref_history_unlock(mstb->mgr); + kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device); } static void drm_dp_destroy_port(struct kref *kref) @@ -1527,31 +1693,24 @@ static void drm_dp_destroy_port(struct kref *kref) container_of(kref, struct drm_dp_mst_port, topology_kref); struct drm_dp_mst_topology_mgr *mgr = port->mgr; - if (!port->input) { - kfree(port->cached_edid); + drm_dp_mst_dump_port_topology_history(port); - /* - * The only time we don't have a connector - * on an output port is if the connector init - * fails. - */ - if (port->connector) { - /* we can't destroy the connector here, as - * we might be holding the mode_config.mutex - * from an EDID retrieval */ - - mutex_lock(&mgr->destroy_connector_lock); - list_add(&port->next, &mgr->destroy_connector_list); - mutex_unlock(&mgr->destroy_connector_lock); - schedule_work(&mgr->destroy_connector_work); - return; - } - /* no need to clean up vcpi - * as if we have no connector we never setup a vcpi */ - drm_dp_port_teardown_pdt(port, port->pdt); - port->pdt = DP_PEER_DEVICE_NONE; + /* There's nothing that needs locking to destroy an input port yet */ + if (port->input) { + drm_dp_mst_put_port_malloc(port); + return; } - drm_dp_mst_put_port_malloc(port); + + kfree(port->cached_edid); + + /* + * we can't destroy the connector here, as we might be holding the + * mode_config.mutex from an EDID retrieval + */ + mutex_lock(&mgr->delayed_destroy_lock); + list_add(&port->next, &mgr->destroy_port_list); + mutex_unlock(&mgr->delayed_destroy_lock); + schedule_work(&mgr->delayed_destroy_work); } /** @@ -1579,12 +1738,17 @@ static void drm_dp_destroy_port(struct kref *kref) static int __must_check drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port) { - int ret = kref_get_unless_zero(&port->topology_kref); + int ret; - if (ret) - DRM_DEBUG("port %p (%d)\n", port, - kref_read(&port->topology_kref)); + topology_ref_history_lock(port->mgr); + ret = kref_get_unless_zero(&port->topology_kref); + if (ret) { + DRM_DEBUG("port %p (%d)\n", + port, kref_read(&port->topology_kref)); + save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET); + } + topology_ref_history_unlock(port->mgr); return ret; } @@ -1603,9 +1767,14 @@ drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port) */ static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port) { + topology_ref_history_lock(port->mgr); + WARN_ON(kref_read(&port->topology_kref) == 0); kref_get(&port->topology_kref); DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref)); + save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET); + + topology_ref_history_unlock(port->mgr); } /** @@ -1621,8 +1790,13 @@ static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port) */ static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port) { + topology_ref_history_lock(port->mgr); + DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref) - 1); + save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_PUT); + + topology_ref_history_unlock(port->mgr); kref_put(&port->topology_kref, drm_dp_destroy_port); } @@ -1739,38 +1913,79 @@ static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port, return parent_lct + 1; } -/* - * return sends link address for new mstb - */ -static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port) +static int drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt) { - int ret; - u8 rad[6], lct; - bool send_link = false; + struct drm_dp_mst_topology_mgr *mgr = port->mgr; + struct drm_dp_mst_branch *mstb; + u8 rad[8], lct; + int ret = 0; + + if (port->pdt == new_pdt) + return 0; + + /* Teardown the old pdt, if there is one */ + switch (port->pdt) { + case DP_PEER_DEVICE_DP_LEGACY_CONV: + case DP_PEER_DEVICE_SST_SINK: + /* + * If the new PDT would also have an i2c bus, don't bother + * with reregistering it + */ + if (new_pdt == DP_PEER_DEVICE_DP_LEGACY_CONV || + new_pdt == DP_PEER_DEVICE_SST_SINK) { + port->pdt = new_pdt; + return 0; + } + + /* remove i2c over sideband */ + drm_dp_mst_unregister_i2c_bus(&port->aux); + break; + case DP_PEER_DEVICE_MST_BRANCHING: + mutex_lock(&mgr->lock); + drm_dp_mst_topology_put_mstb(port->mstb); + port->mstb = NULL; + mutex_unlock(&mgr->lock); + break; + } + + port->pdt = new_pdt; switch (port->pdt) { case DP_PEER_DEVICE_DP_LEGACY_CONV: case DP_PEER_DEVICE_SST_SINK: /* add i2c over sideband */ ret = drm_dp_mst_register_i2c_bus(&port->aux); break; + case DP_PEER_DEVICE_MST_BRANCHING: lct = drm_dp_calculate_rad(port, rad); + mstb = drm_dp_add_mst_branch_device(lct, rad); + if (!mstb) { + ret = -ENOMEM; + DRM_ERROR("Failed to create MSTB for port %p", port); + goto out; + } - port->mstb = drm_dp_add_mst_branch_device(lct, rad); - if (port->mstb) { - port->mstb->mgr = port->mgr; - port->mstb->port_parent = port; - /* - * Make sure this port's memory allocation stays - * around until its child MSTB releases it - */ - drm_dp_mst_get_port_malloc(port); + mutex_lock(&mgr->lock); + port->mstb = mstb; + mstb->mgr = port->mgr; + mstb->port_parent = port; - send_link = true; - } + /* + * Make sure this port's memory allocation stays + * around until its child MSTB releases it + */ + drm_dp_mst_get_port_malloc(port); + mutex_unlock(&mgr->lock); + + /* And make sure we send a link address for this */ + ret = 1; break; } - return send_link; + +out: + if (ret < 0) + port->pdt = DP_PEER_DEVICE_NONE; + return ret; } /** @@ -1903,44 +2118,130 @@ void drm_dp_mst_connector_early_unregister(struct drm_connector *connector, EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister); static void +drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb, + struct drm_dp_mst_port *port) +{ + struct drm_dp_mst_topology_mgr *mgr = port->mgr; + char proppath[255]; + int ret; + + build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath)); + port->connector = mgr->cbs->add_connector(mgr, port, proppath); + if (!port->connector) { + ret = -ENOMEM; + goto error; + } + + if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV || + port->pdt == DP_PEER_DEVICE_SST_SINK) && + port->port_num >= DP_MST_LOGICAL_PORT_0) { + port->cached_edid = drm_get_edid(port->connector, + &port->aux.ddc); + drm_connector_set_tile_property(port->connector); + } + + mgr->cbs->register_connector(port->connector); + return; + +error: + DRM_ERROR("Failed to create connector for port %p: %d\n", port, ret); +} + +/* + * Drop a topology reference, and unlink the port from the in-memory topology + * layout + */ +static void +drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port) +{ + mutex_lock(&mgr->lock); + list_del(&port->next); + mutex_unlock(&mgr->lock); + drm_dp_mst_topology_put_port(port); +} + +static struct drm_dp_mst_port * +drm_dp_mst_add_port(struct drm_device *dev, + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb, u8 port_number) +{ + struct drm_dp_mst_port *port = kzalloc(sizeof(*port), GFP_KERNEL); + + if (!port) + return NULL; + + kref_init(&port->topology_kref); + kref_init(&port->malloc_kref); + port->parent = mstb; + port->port_num = port_number; + port->mgr = mgr; + port->aux.name = "DPMST"; + port->aux.dev = dev->dev; + port->aux.is_remote = true; + + /* + * Make sure the memory allocation for our parent branch stays + * around until our own memory allocation is released + */ + drm_dp_mst_get_mstb_malloc(mstb); + + return port; +} + +static int drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb, struct drm_device *dev, struct drm_dp_link_addr_reply_port *port_msg) { + struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; struct drm_dp_mst_port *port; - bool ret; - bool created = false; - int old_pdt = 0; - int old_ddps = 0; + int old_ddps = 0, ret; + u8 new_pdt = DP_PEER_DEVICE_NONE; + bool created = false, send_link_addr = false, changed = false; port = drm_dp_get_port(mstb, port_msg->port_number); if (!port) { - port = kzalloc(sizeof(*port), GFP_KERNEL); + port = drm_dp_mst_add_port(dev, mgr, mstb, + port_msg->port_number); if (!port) - return; - kref_init(&port->topology_kref); - kref_init(&port->malloc_kref); - port->parent = mstb; - port->port_num = port_msg->port_number; - port->mgr = mstb->mgr; - port->aux.name = "DPMST"; - port->aux.dev = dev->dev; - port->aux.is_remote = true; - - /* - * Make sure the memory allocation for our parent branch stays - * around until our own memory allocation is released + return -ENOMEM; + created = true; + changed = true; + } else if (!port->input && port_msg->input_port && port->connector) { + /* Since port->connector can't be changed here, we create a + * new port if input_port changes from 0 to 1 */ - drm_dp_mst_get_mstb_malloc(mstb); - + drm_dp_mst_topology_unlink_port(mgr, port); + drm_dp_mst_topology_put_port(port); + port = drm_dp_mst_add_port(dev, mgr, mstb, + port_msg->port_number); + if (!port) + return -ENOMEM; + changed = true; created = true; - } else { - old_pdt = port->pdt; + } else if (port->input && !port_msg->input_port) { + changed = true; + } else if (port->connector) { + /* We're updating a port that's exposed to userspace, so do it + * under lock + */ + drm_modeset_lock(&mgr->base.lock, NULL); + old_ddps = port->ddps; + changed = port->ddps != port_msg->ddps || + (port->ddps && + (port->ldps != port_msg->legacy_device_plug_status || + port->dpcd_rev != port_msg->dpcd_revision || + port->mcs != port_msg->mcs || + port->pdt != port_msg->peer_device_type || + port->num_sdp_stream_sinks != + port_msg->num_sdp_stream_sinks)); } - port->pdt = port_msg->peer_device_type; port->input = port_msg->input_port; + if (!port->input) + new_pdt = port_msg->peer_device_type; port->mcs = port_msg->mcs; port->ddps = port_msg->ddps; port->ldps = port_msg->legacy_device_plug_status; @@ -1951,78 +2252,104 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb, /* manage mstb port lists with mgr lock - take a reference for this list */ if (created) { - mutex_lock(&mstb->mgr->lock); + mutex_lock(&mgr->lock); drm_dp_mst_topology_get_port(port); list_add(&port->next, &mstb->ports); - mutex_unlock(&mstb->mgr->lock); + mutex_unlock(&mgr->lock); } if (old_ddps != port->ddps) { if (port->ddps) { if (!port->input) { - drm_dp_send_enum_path_resources(mstb->mgr, - mstb, port); + drm_dp_send_enum_path_resources(mgr, mstb, + port); } } else { port->available_pbn = 0; } } - if (old_pdt != port->pdt && !port->input) { - drm_dp_port_teardown_pdt(port, old_pdt); - - ret = drm_dp_port_setup_pdt(port); - if (ret == true) - drm_dp_send_link_address(mstb->mgr, port->mstb); + ret = drm_dp_port_set_pdt(port, new_pdt); + if (ret == 1) { + send_link_addr = true; + } else if (ret < 0) { + DRM_ERROR("Failed to change PDT on port %p: %d\n", + port, ret); + goto fail; } - if (created && !port->input) { - char proppath[255]; - - build_mst_prop_path(mstb, port->port_num, proppath, - sizeof(proppath)); - port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, - port, - proppath); - if (!port->connector) { - /* remove it from the port list */ - mutex_lock(&mstb->mgr->lock); - list_del(&port->next); - mutex_unlock(&mstb->mgr->lock); - /* drop port list reference */ - drm_dp_mst_topology_put_port(port); - goto out; - } - if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV || - port->pdt == DP_PEER_DEVICE_SST_SINK) && - port->port_num >= DP_MST_LOGICAL_PORT_0) { - port->cached_edid = drm_get_edid(port->connector, - &port->aux.ddc); - drm_connector_set_tile_property(port->connector); - } - (*mstb->mgr->cbs->register_connector)(port->connector); + /* + * If this port wasn't just created, then we're reprobing because + * we're coming out of suspend. In this case, always resend the link + * address if there's an MSTB on this port + */ + if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING) + send_link_addr = true; + + if (port->connector) + drm_modeset_unlock(&mgr->base.lock); + else if (!port->input) + drm_dp_mst_port_add_connector(mstb, port); + + if (send_link_addr && port->mstb) { + ret = drm_dp_send_link_address(mgr, port->mstb); + if (ret == 1) /* MSTB below us changed */ + changed = true; + else if (ret < 0) + goto fail_put; } -out: /* put reference to this port */ drm_dp_mst_topology_put_port(port); + return changed; + +fail: + drm_dp_mst_topology_unlink_port(mgr, port); + if (port->connector) + drm_modeset_unlock(&mgr->base.lock); +fail_put: + drm_dp_mst_topology_put_port(port); + return ret; } static void drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb, struct drm_dp_connection_status_notify *conn_stat) { + struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; struct drm_dp_mst_port *port; - int old_pdt; - int old_ddps; - bool dowork = false; + int old_ddps, ret; + u8 new_pdt; + bool dowork = false, create_connector = false; + port = drm_dp_get_port(mstb, conn_stat->port_number); if (!port) return; + if (port->connector) { + if (!port->input && conn_stat->input_port) { + /* + * We can't remove a connector from an already exposed + * port, so just throw the port out and make sure we + * reprobe the link address of it's parent MSTB + */ + drm_dp_mst_topology_unlink_port(mgr, port); + mstb->link_address_sent = false; + dowork = true; + goto out; + } + + /* Locking is only needed if the port's exposed to userspace */ + drm_modeset_lock(&mgr->base.lock, NULL); + } else if (port->input && !conn_stat->input_port) { + create_connector = true; + /* Reprobe link address so we get num_sdp_streams */ + mstb->link_address_sent = false; + dowork = true; + } + old_ddps = port->ddps; - old_pdt = port->pdt; - port->pdt = conn_stat->peer_device_type; + port->input = conn_stat->input_port; port->mcs = conn_stat->message_capability_status; port->ldps = conn_stat->legacy_device_plug_status; port->ddps = conn_stat->displayport_device_plug_status; @@ -2034,17 +2361,27 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb, port->available_pbn = 0; } } - if (old_pdt != port->pdt && !port->input) { - drm_dp_port_teardown_pdt(port, old_pdt); - if (drm_dp_port_setup_pdt(port)) - dowork = true; + new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type; + + ret = drm_dp_port_set_pdt(port, new_pdt); + if (ret == 1) { + dowork = true; + } else if (ret < 0) { + DRM_ERROR("Failed to change PDT for port %p: %d\n", + port, ret); + dowork = false; } + if (port->connector) + drm_modeset_unlock(&mgr->base.lock); + else if (create_connector) + drm_dp_mst_port_add_connector(mstb, port); + +out: drm_dp_mst_topology_put_port(port); if (dowork) queue_work(system_long_wq, &mstb->mgr->work); - } static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr, @@ -2130,41 +2467,62 @@ drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr, return mstb; } -static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr, +static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb) { struct drm_dp_mst_port *port; - struct drm_dp_mst_branch *mstb_child; - if (!mstb->link_address_sent) - drm_dp_send_link_address(mgr, mstb); + int ret; + bool changed = false; + + if (!mstb->link_address_sent) { + ret = drm_dp_send_link_address(mgr, mstb); + if (ret == 1) + changed = true; + else if (ret < 0) + return ret; + } list_for_each_entry(port, &mstb->ports, next) { - if (port->input) - continue; + struct drm_dp_mst_branch *mstb_child = NULL; - if (!port->ddps) + if (port->input || !port->ddps) continue; - if (!port->available_pbn) + if (!port->available_pbn) { + drm_modeset_lock(&mgr->base.lock, NULL); drm_dp_send_enum_path_resources(mgr, mstb, port); + drm_modeset_unlock(&mgr->base.lock); + changed = true; + } - if (port->mstb) { + if (port->mstb) mstb_child = drm_dp_mst_topology_get_mstb_validated( mgr, port->mstb); - if (mstb_child) { - drm_dp_check_and_send_link_address(mgr, mstb_child); - drm_dp_mst_topology_put_mstb(mstb_child); - } + + if (mstb_child) { + ret = drm_dp_check_and_send_link_address(mgr, + mstb_child); + drm_dp_mst_topology_put_mstb(mstb_child); + if (ret == 1) + changed = true; + else if (ret < 0) + return ret; } } + + return changed; } static void drm_dp_mst_link_probe_work(struct work_struct *work) { - struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work); + struct drm_dp_mst_topology_mgr *mgr = + container_of(work, struct drm_dp_mst_topology_mgr, work); + struct drm_device *dev = mgr->dev; struct drm_dp_mst_branch *mstb; int ret; + mutex_lock(&mgr->probe_lock); + mutex_lock(&mgr->lock); mstb = mgr->mst_primary; if (mstb) { @@ -2173,10 +2531,17 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work) mstb = NULL; } mutex_unlock(&mgr->lock); - if (mstb) { - drm_dp_check_and_send_link_address(mgr, mstb); - drm_dp_mst_topology_put_mstb(mstb); + if (!mstb) { + mutex_unlock(&mgr->probe_lock); + return; } + + ret = drm_dp_check_and_send_link_address(mgr, mstb); + drm_dp_mst_topology_put_mstb(mstb); + + mutex_unlock(&mgr->probe_lock); + if (ret) + drm_kms_helper_hotplug_event(dev); } static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr, @@ -2422,16 +2787,18 @@ drm_dp_dump_link_address(struct drm_dp_link_address_ack_reply *reply) } } -static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, +static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb) { struct drm_dp_sideband_msg_tx *txmsg; struct drm_dp_link_address_ack_reply *reply; - int i, len, ret; + struct drm_dp_mst_port *port, *tmp; + int i, len, ret, port_mask = 0; + bool changed = false; txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); if (!txmsg) - return; + return -ENOMEM; txmsg->dst = mstb; len = build_link_address(txmsg); @@ -2457,16 +2824,39 @@ static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, drm_dp_check_mstb_guid(mstb, reply->guid); - for (i = 0; i < reply->nports; i++) - drm_dp_mst_handle_link_address_port(mstb, mgr->dev, - &reply->ports[i]); + for (i = 0; i < reply->nports; i++) { + port_mask |= BIT(reply->ports[i].port_number); + ret = drm_dp_mst_handle_link_address_port(mstb, mgr->dev, + &reply->ports[i]); + if (ret == 1) + changed = true; + else if (ret < 0) + goto out; + } - drm_kms_helper_hotplug_event(mgr->dev); + /* Prune any ports that are currently a part of mstb in our in-memory + * topology, but were not seen in this link address. Usually this + * means that they were removed while the topology was out of sync, + * e.g. during suspend/resume + */ + mutex_lock(&mgr->lock); + list_for_each_entry_safe(port, tmp, &mstb->ports, next) { + if (port_mask & BIT(port->port_num)) + continue; + + DRM_DEBUG_KMS("port %d was not in link address, removing\n", + port->port_num); + list_del(&port->next); + drm_dp_mst_topology_put_port(port); + changed = true; + } + mutex_unlock(&mgr->lock); out: if (ret <= 0) mstb->link_address_sent = false; kfree(txmsg); + return ret < 0 ? ret : changed; } static int @@ -3071,6 +3461,23 @@ out_unlock: } EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst); +static void +drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb) +{ + struct drm_dp_mst_port *port; + + /* The link address will need to be re-sent on resume */ + mstb->link_address_sent = false; + + list_for_each_entry(port, &mstb->ports, next) { + /* The PBN for each port will also need to be re-probed */ + port->available_pbn = 0; + + if (port->mstb) + drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb); + } +} + /** * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager * @mgr: manager to suspend @@ -3084,62 +3491,89 @@ void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr) drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, DP_MST_EN | DP_UPSTREAM_IS_SRC); mutex_unlock(&mgr->lock); + flush_work(&mgr->up_req_work); flush_work(&mgr->work); - flush_work(&mgr->destroy_connector_work); + flush_work(&mgr->delayed_destroy_work); + + mutex_lock(&mgr->lock); + if (mgr->mst_state && mgr->mst_primary) + drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary); + mutex_unlock(&mgr->lock); } EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend); /** * drm_dp_mst_topology_mgr_resume() - resume the MST manager * @mgr: manager to resume + * @sync: whether or not to perform topology reprobing synchronously * * This will fetch DPCD and see if the device is still there, * if it is, it will rewrite the MSTM control bits, and return. * - * if the device fails this returns -1, and the driver should do + * If the device fails this returns -1, and the driver should do * a full MST reprobe, in case we were undocked. + * + * During system resume (where it is assumed that the driver will be calling + * drm_atomic_helper_resume()) this function should be called beforehand with + * @sync set to true. In contexts like runtime resume where the driver is not + * expected to be calling drm_atomic_helper_resume(), this function should be + * called with @sync set to false in order to avoid deadlocking. + * + * Returns: -1 if the MST topology was removed while we were suspended, 0 + * otherwise. */ -int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr) +int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr, + bool sync) { - int ret = 0; + int ret; + u8 guid[16]; mutex_lock(&mgr->lock); + if (!mgr->mst_primary) + goto out_fail; - if (mgr->mst_primary) { - int sret; - u8 guid[16]; + ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, + DP_RECEIVER_CAP_SIZE); + if (ret != DP_RECEIVER_CAP_SIZE) { + DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n"); + goto out_fail; + } - sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE); - if (sret != DP_RECEIVER_CAP_SIZE) { - DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n"); - ret = -1; - goto out_unlock; - } + ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, + DP_MST_EN | + DP_UP_REQ_EN | + DP_UPSTREAM_IS_SRC); + if (ret < 0) { + DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n"); + goto out_fail; + } - ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, - DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC); - if (ret < 0) { - DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n"); - ret = -1; - goto out_unlock; - } + /* Some hubs forget their guids after they resume */ + ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16); + if (ret != 16) { + DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n"); + goto out_fail; + } + drm_dp_check_mstb_guid(mgr->mst_primary, guid); - /* Some hubs forget their guids after they resume */ - sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16); - if (sret != 16) { - DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n"); - ret = -1; - goto out_unlock; - } - drm_dp_check_mstb_guid(mgr->mst_primary, guid); + /* + * For the final step of resuming the topology, we need to bring the + * state of our in-memory topology back into sync with reality. So, + * restart the probing process as if we're probing a new hub + */ + queue_work(system_long_wq, &mgr->work); + mutex_unlock(&mgr->lock); - ret = 0; - } else - ret = -1; + if (sync) { + DRM_DEBUG_KMS("Waiting for link probe work to finish re-syncing topology...\n"); + flush_work(&mgr->work); + } -out_unlock: + return 0; + +out_fail: mutex_unlock(&mgr->lock); - return ret; + return -1; } EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume); @@ -3256,12 +3690,78 @@ clear_down_rep_recv: return 0; } +static inline bool +drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_pending_up_req *up_req) +{ + struct drm_dp_mst_branch *mstb = NULL; + struct drm_dp_sideband_msg_req_body *msg = &up_req->msg; + struct drm_dp_sideband_msg_hdr *hdr = &up_req->hdr; + bool hotplug = false; + + if (hdr->broadcast) { + const u8 *guid = NULL; + + if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) + guid = msg->u.conn_stat.guid; + else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY) + guid = msg->u.resource_stat.guid; + + mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid); + } else { + mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad); + } + + if (!mstb) { + DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", + hdr->lct); + return false; + } + + /* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */ + if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) { + drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat); + hotplug = true; + } + + drm_dp_mst_topology_put_mstb(mstb); + return hotplug; +} + +static void drm_dp_mst_up_req_work(struct work_struct *work) +{ + struct drm_dp_mst_topology_mgr *mgr = + container_of(work, struct drm_dp_mst_topology_mgr, + up_req_work); + struct drm_dp_pending_up_req *up_req; + bool send_hotplug = false; + + mutex_lock(&mgr->probe_lock); + while (true) { + mutex_lock(&mgr->up_req_lock); + up_req = list_first_entry_or_null(&mgr->up_req_list, + struct drm_dp_pending_up_req, + next); + if (up_req) + list_del(&up_req->next); + mutex_unlock(&mgr->up_req_lock); + + if (!up_req) + break; + + send_hotplug |= drm_dp_mst_process_up_req(mgr, up_req); + kfree(up_req); + } + mutex_unlock(&mgr->probe_lock); + + if (send_hotplug) + drm_kms_helper_hotplug_event(mgr->dev); +} + static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) { - struct drm_dp_sideband_msg_req_body msg; struct drm_dp_sideband_msg_hdr *hdr = &mgr->up_req_recv.initial_hdr; - struct drm_dp_mst_branch *mstb = NULL; - const u8 *guid; + struct drm_dp_pending_up_req *up_req; bool seqno; if (!drm_dp_get_one_sb_msg(mgr, true)) @@ -3270,56 +3770,53 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) if (!mgr->up_req_recv.have_eomt) return 0; - if (!hdr->broadcast) { - mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad); - if (!mstb) { - DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", - hdr->lct); - goto out; - } + up_req = kzalloc(sizeof(*up_req), GFP_KERNEL); + if (!up_req) { + DRM_ERROR("Not enough memory to process MST up req\n"); + return -ENOMEM; } + INIT_LIST_HEAD(&up_req->next); seqno = hdr->seqno; - drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg); + drm_dp_sideband_parse_req(&mgr->up_req_recv, &up_req->msg); - if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) - guid = msg.u.conn_stat.guid; - else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) - guid = msg.u.resource_stat.guid; - else + if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY && + up_req->msg.req_type != DP_RESOURCE_STATUS_NOTIFY) { + DRM_DEBUG_KMS("Received unknown up req type, ignoring: %x\n", + up_req->msg.req_type); + kfree(up_req); goto out; - - drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, - false); - - if (!mstb) { - mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid); - if (!mstb) { - DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", - hdr->lct); - goto out; - } } - if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) { - drm_dp_mst_handle_conn_stat(mstb, &msg.u.conn_stat); + drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type, + seqno, false); + + if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) { + const struct drm_dp_connection_status_notify *conn_stat = + &up_req->msg.u.conn_stat; DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", - msg.u.conn_stat.port_number, - msg.u.conn_stat.legacy_device_plug_status, - msg.u.conn_stat.displayport_device_plug_status, - msg.u.conn_stat.message_capability_status, - msg.u.conn_stat.input_port, - msg.u.conn_stat.peer_device_type); + conn_stat->port_number, + conn_stat->legacy_device_plug_status, + conn_stat->displayport_device_plug_status, + conn_stat->message_capability_status, + conn_stat->input_port, + conn_stat->peer_device_type); + } else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) { + const struct drm_dp_resource_status_notify *res_stat = + &up_req->msg.u.resource_stat; - drm_kms_helper_hotplug_event(mgr->dev); - } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) { DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", - msg.u.resource_stat.port_number, - msg.u.resource_stat.available_pbn); + res_stat->port_number, + res_stat->available_pbn); } - drm_dp_mst_topology_put_mstb(mstb); + up_req->hdr = *hdr; + mutex_lock(&mgr->up_req_lock); + list_add_tail(&up_req->next, &mgr->up_req_list); + mutex_unlock(&mgr->up_req_lock); + queue_work(system_long_wq, &mgr->up_req_work); + out: memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); return 0; @@ -3366,22 +3863,31 @@ EXPORT_SYMBOL(drm_dp_mst_hpd_irq); /** * drm_dp_mst_detect_port() - get connection status for an MST port * @connector: DRM connector for this port + * @ctx: The acquisition context to use for grabbing locks * @mgr: manager for this port - * @port: unverified pointer to a port + * @port: pointer to a port * - * This returns the current connection state for a port. It validates the - * port pointer still exists so the caller doesn't require a reference + * This returns the current connection state for a port. */ -enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector, - struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) +int +drm_dp_mst_detect_port(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port) { - enum drm_connector_status status = connector_status_disconnected; + int ret; /* we need to search for the port in the mgr in case it's gone */ port = drm_dp_mst_topology_get_port_validated(mgr, port); if (!port) return connector_status_disconnected; + ret = drm_modeset_lock(&mgr->base.lock, ctx); + if (ret) + goto out; + + ret = connector_status_disconnected; + if (!port->ddps) goto out; @@ -3391,7 +3897,7 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector break; case DP_PEER_DEVICE_SST_SINK: - status = connector_status_connected; + ret = connector_status_connected; /* for logical ports - cache the EDID */ if (port->port_num >= 8 && !port->cached_edid) { port->cached_edid = drm_get_edid(connector, &port->aux.ddc); @@ -3399,12 +3905,12 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector break; case DP_PEER_DEVICE_DP_LEGACY_CONV: if (port->ldps) - status = connector_status_connected; + ret = connector_status_connected; break; } out: drm_dp_mst_topology_put_port(port); - return status; + return ret; } EXPORT_SYMBOL(drm_dp_mst_detect_port); @@ -3540,7 +4046,7 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state, { struct drm_dp_mst_topology_state *topology_state; struct drm_dp_vcpi_allocation *pos, *vcpi = NULL; - int prev_slots, req_slots, ret; + int prev_slots, req_slots; topology_state = drm_atomic_get_mst_topology_state(state, mgr); if (IS_ERR(topology_state)) @@ -3587,8 +4093,7 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state, } vcpi->vcpi = req_slots; - ret = req_slots; - return ret; + return req_slots; } EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots); @@ -3995,34 +4500,103 @@ static void drm_dp_tx_work(struct work_struct *work) mutex_unlock(&mgr->qlock); } -static void drm_dp_destroy_connector_work(struct work_struct *work) +static inline void +drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port) { - struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work); - struct drm_dp_mst_port *port; - bool send_hotplug = false; + if (port->connector) + port->mgr->cbs->destroy_connector(port->mgr, port->connector); + + drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE); + drm_dp_mst_put_port_malloc(port); +} + +static inline void +drm_dp_delayed_destroy_mstb(struct drm_dp_mst_branch *mstb) +{ + struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; + struct drm_dp_mst_port *port, *tmp; + bool wake_tx = false; + + mutex_lock(&mgr->lock); + list_for_each_entry_safe(port, tmp, &mstb->ports, next) { + list_del(&port->next); + drm_dp_mst_topology_put_port(port); + } + mutex_unlock(&mgr->lock); + + /* drop any tx slots msg */ + mutex_lock(&mstb->mgr->qlock); + if (mstb->tx_slots[0]) { + mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT; + mstb->tx_slots[0] = NULL; + wake_tx = true; + } + if (mstb->tx_slots[1]) { + mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT; + mstb->tx_slots[1] = NULL; + wake_tx = true; + } + mutex_unlock(&mstb->mgr->qlock); + + if (wake_tx) + wake_up_all(&mstb->mgr->tx_waitq); + + drm_dp_mst_put_mstb_malloc(mstb); +} + +static void drm_dp_delayed_destroy_work(struct work_struct *work) +{ + struct drm_dp_mst_topology_mgr *mgr = + container_of(work, struct drm_dp_mst_topology_mgr, + delayed_destroy_work); + bool send_hotplug = false, go_again; + /* * Not a regular list traverse as we have to drop the destroy - * connector lock before destroying the connector, to avoid AB->BA + * connector lock before destroying the mstb/port, to avoid AB->BA * ordering between this lock and the config mutex. */ - for (;;) { - mutex_lock(&mgr->destroy_connector_lock); - port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next); - if (!port) { - mutex_unlock(&mgr->destroy_connector_lock); - break; + do { + go_again = false; + + for (;;) { + struct drm_dp_mst_branch *mstb; + + mutex_lock(&mgr->delayed_destroy_lock); + mstb = list_first_entry_or_null(&mgr->destroy_branch_device_list, + struct drm_dp_mst_branch, + destroy_next); + if (mstb) + list_del(&mstb->destroy_next); + mutex_unlock(&mgr->delayed_destroy_lock); + + if (!mstb) + break; + + drm_dp_delayed_destroy_mstb(mstb); + go_again = true; } - list_del(&port->next); - mutex_unlock(&mgr->destroy_connector_lock); - mgr->cbs->destroy_connector(mgr, port->connector); + for (;;) { + struct drm_dp_mst_port *port; - drm_dp_port_teardown_pdt(port, port->pdt); - port->pdt = DP_PEER_DEVICE_NONE; + mutex_lock(&mgr->delayed_destroy_lock); + port = list_first_entry_or_null(&mgr->destroy_port_list, + struct drm_dp_mst_port, + next); + if (port) + list_del(&port->next); + mutex_unlock(&mgr->delayed_destroy_lock); + + if (!port) + break; + + drm_dp_delayed_destroy_port(port); + send_hotplug = true; + go_again = true; + } + } while (go_again); - drm_dp_mst_put_port_malloc(port); - send_hotplug = true; - } if (send_hotplug) drm_kms_helper_hotplug_event(mgr->dev); } @@ -4184,9 +4758,6 @@ EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs); struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr) { - struct drm_device *dev = mgr->dev; - - WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base)); } EXPORT_SYMBOL(drm_atomic_get_mst_topology_state); @@ -4212,12 +4783,20 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, mutex_init(&mgr->lock); mutex_init(&mgr->qlock); mutex_init(&mgr->payload_lock); - mutex_init(&mgr->destroy_connector_lock); + mutex_init(&mgr->delayed_destroy_lock); + mutex_init(&mgr->up_req_lock); + mutex_init(&mgr->probe_lock); +#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) + mutex_init(&mgr->topology_ref_history_lock); +#endif INIT_LIST_HEAD(&mgr->tx_msg_downq); - INIT_LIST_HEAD(&mgr->destroy_connector_list); + INIT_LIST_HEAD(&mgr->destroy_port_list); + INIT_LIST_HEAD(&mgr->destroy_branch_device_list); + INIT_LIST_HEAD(&mgr->up_req_list); INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work); INIT_WORK(&mgr->tx_work, drm_dp_tx_work); - INIT_WORK(&mgr->destroy_connector_work, drm_dp_destroy_connector_work); + INIT_WORK(&mgr->delayed_destroy_work, drm_dp_delayed_destroy_work); + INIT_WORK(&mgr->up_req_work, drm_dp_mst_up_req_work); init_waitqueue_head(&mgr->tx_waitq); mgr->dev = dev; mgr->aux = aux; @@ -4258,7 +4837,7 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr) { drm_dp_mst_topology_mgr_set_mst(mgr, false); flush_work(&mgr->work); - flush_work(&mgr->destroy_connector_work); + cancel_work_sync(&mgr->delayed_destroy_work); mutex_lock(&mgr->payload_lock); kfree(mgr->payloads); mgr->payloads = NULL; @@ -4270,10 +4849,15 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr) drm_atomic_private_obj_fini(&mgr->base); mgr->funcs = NULL; - mutex_destroy(&mgr->destroy_connector_lock); + mutex_destroy(&mgr->delayed_destroy_lock); mutex_destroy(&mgr->payload_lock); mutex_destroy(&mgr->qlock); mutex_destroy(&mgr->lock); + mutex_destroy(&mgr->up_req_lock); + mutex_destroy(&mgr->probe_lock); +#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) + mutex_destroy(&mgr->topology_ref_history_lock); +#endif } EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy); diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 0552175313cb..474ac04d5600 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -159,6 +159,9 @@ static const struct edid_quirk { /* Medion MD 30217 PG */ { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 }, + /* Lenovo G50 */ + { "SDC", 18514, EDID_QUIRK_FORCE_6BPC }, + /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */ { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC }, @@ -2189,7 +2192,8 @@ static int standard_timing_level(struct edid *edid) return LEVEL_CVT; if (drm_gtf2_hbreak(edid)) return LEVEL_GTF2; - return LEVEL_GTF; + if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF) + return LEVEL_GTF; } return LEVEL_DMT; } @@ -3205,18 +3209,10 @@ static bool drm_valid_cea_vic(u8 vic) return vic > 0 && vic < ARRAY_SIZE(edid_cea_modes); } -/** - * drm_get_cea_aspect_ratio - get the picture aspect ratio corresponding to - * the input VIC from the CEA mode list - * @video_code: ID given to each of the CEA modes - * - * Returns picture aspect ratio - */ -enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code) +static enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code) { return edid_cea_modes[video_code].picture_aspect_ratio; } -EXPORT_SYMBOL(drm_get_cea_aspect_ratio); /* * Calculate the alternate clock for HDMI modes (those from the HDMI vendor @@ -5168,6 +5164,49 @@ drm_hdmi_infoframe_set_hdr_metadata(struct hdmi_drm_infoframe *frame, } EXPORT_SYMBOL(drm_hdmi_infoframe_set_hdr_metadata); +static u8 drm_mode_hdmi_vic(struct drm_connector *connector, + const struct drm_display_mode *mode) +{ + bool has_hdmi_infoframe = connector ? + connector->display_info.has_hdmi_infoframe : false; + + if (!has_hdmi_infoframe) + return 0; + + /* No HDMI VIC when signalling 3D video format */ + if (mode->flags & DRM_MODE_FLAG_3D_MASK) + return 0; + + return drm_match_hdmi_mode(mode); +} + +static u8 drm_mode_cea_vic(struct drm_connector *connector, + const struct drm_display_mode *mode) +{ + u8 vic; + + /* + * HDMI spec says if a mode is found in HDMI 1.4b 4K modes + * we should send its VIC in vendor infoframes, else send the + * VIC in AVI infoframes. Lets check if this mode is present in + * HDMI 1.4b 4K modes + */ + if (drm_mode_hdmi_vic(connector, mode)) + return 0; + + vic = drm_match_cea_mode(mode); + + /* + * HDMI 1.4 VIC range: 1 <= VIC <= 64 (CEA-861-D) but + * HDMI 2.0 VIC range: 1 <= VIC <= 107 (CEA-861-F). So we + * have to make sure we dont break HDMI 1.4 sinks. + */ + if (!is_hdmi2_sink(connector) && vic > 64) + return 0; + + return vic; +} + /** * drm_hdmi_avi_infoframe_from_display_mode() - fill an HDMI AVI infoframe with * data from a DRM display mode @@ -5195,29 +5234,7 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame, if (mode->flags & DRM_MODE_FLAG_DBLCLK) frame->pixel_repeat = 1; - frame->video_code = drm_match_cea_mode(mode); - - /* - * HDMI 1.4 VIC range: 1 <= VIC <= 64 (CEA-861-D) but - * HDMI 2.0 VIC range: 1 <= VIC <= 107 (CEA-861-F). So we - * have to make sure we dont break HDMI 1.4 sinks. - */ - if (!is_hdmi2_sink(connector) && frame->video_code > 64) - frame->video_code = 0; - - /* - * HDMI spec says if a mode is found in HDMI 1.4b 4K modes - * we should send its VIC in vendor infoframes, else send the - * VIC in AVI infoframes. Lets check if this mode is present in - * HDMI 1.4b 4K modes - */ - if (frame->video_code) { - u8 vendor_if_vic = drm_match_hdmi_mode(mode); - bool is_s3d = mode->flags & DRM_MODE_FLAG_3D_MASK; - - if (drm_valid_hdmi_vic(vendor_if_vic) && !is_s3d) - frame->video_code = 0; - } + frame->video_code = drm_mode_cea_vic(connector, mode); frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE; @@ -5382,6 +5399,23 @@ drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame, } EXPORT_SYMBOL(drm_hdmi_avi_infoframe_quant_range); +/** + * drm_hdmi_avi_infoframe_bars() - fill the HDMI AVI infoframe + * bar information + * @frame: HDMI AVI infoframe + * @conn_state: connector state + */ +void +drm_hdmi_avi_infoframe_bars(struct hdmi_avi_infoframe *frame, + const struct drm_connector_state *conn_state) +{ + frame->right_bar = conn_state->tv.margins.right; + frame->left_bar = conn_state->tv.margins.left; + frame->top_bar = conn_state->tv.margins.top; + frame->bottom_bar = conn_state->tv.margins.bottom; +} +EXPORT_SYMBOL(drm_hdmi_avi_infoframe_bars); + static enum hdmi_3d_structure s3d_structure_from_display_mode(const struct drm_display_mode *mode) { @@ -5434,8 +5468,6 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame, bool has_hdmi_infoframe = connector ? connector->display_info.has_hdmi_infoframe : false; int err; - u32 s3d_flags; - u8 vic; if (!frame || !mode) return -EINVAL; @@ -5443,8 +5475,9 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame, if (!has_hdmi_infoframe) return -EINVAL; - vic = drm_match_hdmi_mode(mode); - s3d_flags = mode->flags & DRM_MODE_FLAG_3D_MASK; + err = hdmi_vendor_infoframe_init(frame); + if (err < 0) + return err; /* * Even if it's not absolutely necessary to send the infoframe @@ -5455,15 +5488,7 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame, * mode if the source simply stops sending the infoframe when * it wants to switch from 3D to 2D. */ - - if (vic && s3d_flags) - return -EINVAL; - - err = hdmi_vendor_infoframe_init(frame); - if (err < 0) - return err; - - frame->vic = vic; + frame->vic = drm_mode_hdmi_vic(connector, mode); frame->s3d_struct = s3d_structure_from_display_mode(mode); return 0; diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index b75ae8555baf..8ebeccdeed23 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -92,9 +92,12 @@ static DEFINE_MUTEX(kernel_fb_helper_lock); * * Drivers that support a dumb buffer with a virtual address and mmap support, * should try out the generic fbdev emulation using drm_fbdev_generic_setup(). + * It will automatically set up deferred I/O if the driver requires a shadow + * buffer. * - * Setup fbdev emulation by calling drm_fb_helper_fbdev_setup() and tear it - * down by calling drm_fb_helper_fbdev_teardown(). + * For other drivers, setup fbdev emulation by calling + * drm_fb_helper_fbdev_setup() and tear it down by calling + * drm_fb_helper_fbdev_teardown(). * * At runtime drivers should restore the fbdev console by using * drm_fb_helper_lastclose() as their &drm_driver.lastclose callback. @@ -127,8 +130,10 @@ static DEFINE_MUTEX(kernel_fb_helper_lock); * always run in process context since the fb_*() function could be running in * atomic context. If drm_fb_helper_deferred_io() is used as the deferred_io * callback it will also schedule dirty_work with the damage collected from the - * mmap page writes. Drivers can use drm_fb_helper_defio_init() to setup - * deferred I/O (coupled with drm_fb_helper_fbdev_teardown()). + * mmap page writes. + * + * Deferred I/O is not compatible with SHMEM. Such drivers should request an + * fbdev shadow buffer and call drm_fbdev_generic_setup() instead. */ static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc) @@ -680,49 +685,6 @@ void drm_fb_helper_deferred_io(struct fb_info *info, EXPORT_SYMBOL(drm_fb_helper_deferred_io); /** - * drm_fb_helper_defio_init - fbdev deferred I/O initialization - * @fb_helper: driver-allocated fbdev helper - * - * This function allocates &fb_deferred_io, sets callback to - * drm_fb_helper_deferred_io(), delay to 50ms and calls fb_deferred_io_init(). - * It should be called from the &drm_fb_helper_funcs->fb_probe callback. - * drm_fb_helper_fbdev_teardown() cleans up deferred I/O. - * - * NOTE: A copy of &fb_ops is made and assigned to &info->fbops. This is done - * because fb_deferred_io_cleanup() clears &fbops->fb_mmap and would thereby - * affect other instances of that &fb_ops. - * - * Returns: - * 0 on success or a negative error code on failure. - */ -int drm_fb_helper_defio_init(struct drm_fb_helper *fb_helper) -{ - struct fb_info *info = fb_helper->fbdev; - struct fb_deferred_io *fbdefio; - struct fb_ops *fbops; - - fbdefio = kzalloc(sizeof(*fbdefio), GFP_KERNEL); - fbops = kzalloc(sizeof(*fbops), GFP_KERNEL); - if (!fbdefio || !fbops) { - kfree(fbdefio); - kfree(fbops); - return -ENOMEM; - } - - info->fbdefio = fbdefio; - fbdefio->delay = msecs_to_jiffies(50); - fbdefio->deferred_io = drm_fb_helper_deferred_io; - - *fbops = *info->fbops; - info->fbops = fbops; - - fb_deferred_io_init(info); - - return 0; -} -EXPORT_SYMBOL(drm_fb_helper_defio_init); - -/** * drm_fb_helper_sys_read - wrapper around fb_sys_read * @info: fb_info struct pointer * @buf: userspace buffer to read from framebuffer memory @@ -2356,7 +2318,10 @@ static const struct drm_client_funcs drm_fbdev_client_funcs = { * * Drivers that set the dirty callback on their framebuffer will get a shadow * fbdev buffer that is blitted onto the real buffer. This is done in order to - * make deferred I/O work with all kinds of buffers. + * make deferred I/O work with all kinds of buffers. A shadow buffer can be + * requested explicitly by setting struct drm_mode_config.prefer_shadow or + * struct drm_mode_config.prefer_shadow_fbdev to true beforehand. This is + * required to use generic fbdev emulation with SHMEM helpers. * * This function is safe to call even when there are no connectors present. * Setup will be retried on the next hotplug event. diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 6854f5867d51..2f2b889096b0 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -1099,22 +1099,34 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, struct vm_area_struct *vma) { struct drm_device *dev = obj->dev; + int ret; /* Check for valid size. */ if (obj_size < vma->vm_end - vma->vm_start) return -EINVAL; - if (obj->funcs && obj->funcs->vm_ops) - vma->vm_ops = obj->funcs->vm_ops; - else if (dev->driver->gem_vm_ops) - vma->vm_ops = dev->driver->gem_vm_ops; - else - return -EINVAL; + if (obj->funcs && obj->funcs->mmap) { + /* Remove the fake offset */ + vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node); + + ret = obj->funcs->mmap(obj, vma); + if (ret) + return ret; + WARN_ON(!(vma->vm_flags & VM_DONTEXPAND)); + } else { + if (obj->funcs && obj->funcs->vm_ops) + vma->vm_ops = obj->funcs->vm_ops; + else if (dev->driver->gem_vm_ops) + vma->vm_ops = dev->driver->gem_vm_ops; + else + return -EINVAL; + + vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; + vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); + vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); + } - vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; vma->vm_private_data = obj; - vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); - vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); /* Take a ref for this mapping of the object, so that the fault * handler can dereference the mmap offset's pointer to the object. diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index f5918707672f..0810d3ef6961 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -32,7 +32,7 @@ static const struct drm_gem_object_funcs drm_gem_shmem_funcs = { .get_sg_table = drm_gem_shmem_get_sg_table, .vmap = drm_gem_shmem_vmap, .vunmap = drm_gem_shmem_vunmap, - .vm_ops = &drm_gem_shmem_vm_ops, + .mmap = drm_gem_shmem_mmap, }; /** @@ -505,39 +505,30 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct *vma) drm_gem_vm_close(vma); } -const struct vm_operations_struct drm_gem_shmem_vm_ops = { +static const struct vm_operations_struct drm_gem_shmem_vm_ops = { .fault = drm_gem_shmem_fault, .open = drm_gem_shmem_vm_open, .close = drm_gem_shmem_vm_close, }; -EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops); /** * drm_gem_shmem_mmap - Memory-map a shmem GEM object - * @filp: File object + * @obj: gem object * @vma: VMA for the area to be mapped * * This function implements an augmented version of the GEM DRM file mmap * operation for shmem objects. Drivers which employ the shmem helpers should - * use this function as their &file_operations.mmap handler in the DRM device file's - * file_operations structure. - * - * Instead of directly referencing this function, drivers should use the - * DEFINE_DRM_GEM_SHMEM_FOPS() macro. + * use this function as their &drm_gem_object_funcs.mmap handler. * * Returns: * 0 on success or a negative error code on failure. */ -int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma) +int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) { struct drm_gem_shmem_object *shmem; int ret; - ret = drm_gem_mmap(filp, vma); - if (ret) - return ret; - - shmem = to_drm_gem_shmem_obj(vma->vm_private_data); + shmem = to_drm_gem_shmem_obj(obj); ret = drm_gem_shmem_get_pages(shmem); if (ret) { @@ -545,12 +536,10 @@ int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma) return ret; } - /* VM_PFNMAP was set by drm_gem_mmap() */ - vma->vm_flags &= ~VM_PFNMAP; - vma->vm_flags |= VM_MIXEDMAP; - - /* Remove the fake offset */ - vma->vm_pgoff -= drm_vma_node_start(&shmem->base.vma_node); + vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND; + vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); + vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); + vma->vm_ops = &drm_gem_shmem_vm_ops; return 0; } diff --git a/drivers/gpu/drm/drm_gem_ttm_helper.c b/drivers/gpu/drm/drm_gem_ttm_helper.c index a534104d8bee..7412bfc5c05a 100644 --- a/drivers/gpu/drm/drm_gem_ttm_helper.c +++ b/drivers/gpu/drm/drm_gem_ttm_helper.c @@ -52,5 +52,22 @@ void drm_gem_ttm_print_info(struct drm_printer *p, unsigned int indent, } EXPORT_SYMBOL(drm_gem_ttm_print_info); +/** + * drm_gem_ttm_mmap() - mmap &ttm_buffer_object + * @gem: GEM object. + * @vma: vm area. + * + * This function can be used as &drm_gem_object_funcs.mmap + * callback. + */ +int drm_gem_ttm_mmap(struct drm_gem_object *gem, + struct vm_area_struct *vma) +{ + struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem); + + return ttm_bo_mmap_obj(vma, bo); +} +EXPORT_SYMBOL(drm_gem_ttm_mmap); + MODULE_DESCRIPTION("DRM gem ttm helpers"); MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c index dc7942981f4a..666cb4c22bb9 100644 --- a/drivers/gpu/drm/drm_gem_vram_helper.c +++ b/drivers/gpu/drm/drm_gem_vram_helper.c @@ -3,10 +3,13 @@ #include <drm/drm_debugfs.h> #include <drm/drm_device.h> #include <drm/drm_file.h> +#include <drm/drm_framebuffer.h> #include <drm/drm_gem_ttm_helper.h> #include <drm/drm_gem_vram_helper.h> #include <drm/drm_mode.h> +#include <drm/drm_plane.h> #include <drm/drm_prime.h> +#include <drm/drm_simple_kms_helper.h> #include <drm/ttm/ttm_page_alloc.h> static const struct drm_gem_object_funcs drm_gem_vram_object_funcs; @@ -552,13 +555,6 @@ static void drm_gem_vram_bo_driver_evict_flags(struct drm_gem_vram_object *gbo, *pl = gbo->placement; } -static int drm_gem_vram_bo_driver_verify_access(struct drm_gem_vram_object *gbo, - struct file *filp) -{ - return drm_vma_node_verify_access(&gbo->bo.base.vma_node, - filp->private_data); -} - static void drm_gem_vram_bo_driver_move_notify(struct drm_gem_vram_object *gbo, bool evict, struct ttm_mem_reg *new_mem) @@ -654,6 +650,129 @@ int drm_gem_vram_driver_dumb_mmap_offset(struct drm_file *file, EXPORT_SYMBOL(drm_gem_vram_driver_dumb_mmap_offset); /* + * Helpers for struct drm_plane_helper_funcs + */ + +/** + * drm_gem_vram_plane_helper_prepare_fb() - \ + * Implements &struct drm_plane_helper_funcs.prepare_fb + * @plane: a DRM plane + * @new_state: the plane's new state + * + * During plane updates, this function pins the GEM VRAM + * objects of the plane's new framebuffer to VRAM. Call + * drm_gem_vram_plane_helper_cleanup_fb() to unpin them. + * + * Returns: + * 0 on success, or + * a negative errno code otherwise. + */ +int +drm_gem_vram_plane_helper_prepare_fb(struct drm_plane *plane, + struct drm_plane_state *new_state) +{ + size_t i; + struct drm_gem_vram_object *gbo; + int ret; + + if (!new_state->fb) + return 0; + + for (i = 0; i < ARRAY_SIZE(new_state->fb->obj); ++i) { + if (!new_state->fb->obj[i]) + continue; + gbo = drm_gem_vram_of_gem(new_state->fb->obj[i]); + ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM); + if (ret) + goto err_drm_gem_vram_unpin; + } + + return 0; + +err_drm_gem_vram_unpin: + while (i) { + --i; + gbo = drm_gem_vram_of_gem(new_state->fb->obj[i]); + drm_gem_vram_unpin(gbo); + } + return ret; +} +EXPORT_SYMBOL(drm_gem_vram_plane_helper_prepare_fb); + +/** + * drm_gem_vram_plane_helper_cleanup_fb() - \ + * Implements &struct drm_plane_helper_funcs.cleanup_fb + * @plane: a DRM plane + * @old_state: the plane's old state + * + * During plane updates, this function unpins the GEM VRAM + * objects of the plane's old framebuffer from VRAM. Complements + * drm_gem_vram_plane_helper_prepare_fb(). + */ +void +drm_gem_vram_plane_helper_cleanup_fb(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + size_t i; + struct drm_gem_vram_object *gbo; + + if (!old_state->fb) + return; + + for (i = 0; i < ARRAY_SIZE(old_state->fb->obj); ++i) { + if (!old_state->fb->obj[i]) + continue; + gbo = drm_gem_vram_of_gem(old_state->fb->obj[i]); + drm_gem_vram_unpin(gbo); + } +} +EXPORT_SYMBOL(drm_gem_vram_plane_helper_cleanup_fb); + +/* + * Helpers for struct drm_simple_display_pipe_funcs + */ + +/** + * drm_gem_vram_simple_display_pipe_prepare_fb() - \ + * Implements &struct drm_simple_display_pipe_funcs.prepare_fb + * @pipe: a simple display pipe + * @new_state: the plane's new state + * + * During plane updates, this function pins the GEM VRAM + * objects of the plane's new framebuffer to VRAM. Call + * drm_gem_vram_simple_display_pipe_cleanup_fb() to unpin them. + * + * Returns: + * 0 on success, or + * a negative errno code otherwise. + */ +int drm_gem_vram_simple_display_pipe_prepare_fb( + struct drm_simple_display_pipe *pipe, + struct drm_plane_state *new_state) +{ + return drm_gem_vram_plane_helper_prepare_fb(&pipe->plane, new_state); +} +EXPORT_SYMBOL(drm_gem_vram_simple_display_pipe_prepare_fb); + +/** + * drm_gem_vram_simple_display_pipe_cleanup_fb() - \ + * Implements &struct drm_simple_display_pipe_funcs.cleanup_fb + * @pipe: a simple display pipe + * @old_state: the plane's old state + * + * During plane updates, this function unpins the GEM VRAM + * objects of the plane's old framebuffer from VRAM. Complements + * drm_gem_vram_simple_display_pipe_prepare_fb(). + */ +void drm_gem_vram_simple_display_pipe_cleanup_fb( + struct drm_simple_display_pipe *pipe, + struct drm_plane_state *old_state) +{ + drm_gem_vram_plane_helper_cleanup_fb(&pipe->plane, old_state); +} +EXPORT_SYMBOL(drm_gem_vram_simple_display_pipe_cleanup_fb); + +/* * PRIME helpers */ @@ -737,6 +856,7 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs = { .unpin = drm_gem_vram_object_unpin, .vmap = drm_gem_vram_object_vmap, .vunmap = drm_gem_vram_object_vunmap, + .mmap = drm_gem_ttm_mmap, .print_info = drm_gem_ttm_print_info, }; @@ -822,20 +942,6 @@ static void bo_driver_evict_flags(struct ttm_buffer_object *bo, drm_gem_vram_bo_driver_evict_flags(gbo, placement); } -static int bo_driver_verify_access(struct ttm_buffer_object *bo, - struct file *filp) -{ - struct drm_gem_vram_object *gbo; - - /* TTM may pass BOs that are not GEM VRAM BOs. */ - if (!drm_is_gem_vram(bo)) - return -EINVAL; - - gbo = drm_gem_vram_of_bo(bo); - - return drm_gem_vram_bo_driver_verify_access(gbo, filp); -} - static void bo_driver_move_notify(struct ttm_buffer_object *bo, bool evict, struct ttm_mem_reg *new_mem) @@ -892,7 +998,6 @@ static struct ttm_bo_driver bo_driver = { .init_mem_type = bo_driver_init_mem_type, .eviction_valuable = ttm_bo_eviction_valuable, .evict_flags = bo_driver_evict_flags, - .verify_access = bo_driver_verify_access, .move_notify = bo_driver_move_notify, .io_mem_reserve = bo_driver_io_mem_reserve, .io_mem_free = bo_driver_io_mem_free, @@ -908,12 +1013,11 @@ static int drm_vram_mm_debugfs(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_vram_mm *vmm = node->minor->dev->vram_mm; struct drm_mm *mm = vmm->bdev.man[TTM_PL_VRAM].priv; - struct ttm_bo_global *glob = vmm->bdev.glob; struct drm_printer p = drm_seq_file_printer(m); - spin_lock(&glob->lru_lock); + spin_lock(&ttm_bo_glob.lru_lock); drm_mm_print(mm, &p); - spin_unlock(&glob->lru_lock); + spin_unlock(&ttm_bo_glob.lru_lock); return 0; } @@ -971,12 +1075,6 @@ static void drm_vram_mm_cleanup(struct drm_vram_mm *vmm) ttm_bo_device_release(&vmm->bdev); } -static int drm_vram_mm_mmap(struct file *filp, struct vm_area_struct *vma, - struct drm_vram_mm *vmm) -{ - return ttm_bo_mmap(filp, vma, &vmm->bdev); -} - /* * Helpers for integration with struct drm_device */ @@ -1032,30 +1130,3 @@ void drm_vram_helper_release_mm(struct drm_device *dev) dev->vram_mm = NULL; } EXPORT_SYMBOL(drm_vram_helper_release_mm); - -/* - * Helpers for &struct file_operations - */ - -/** - * drm_vram_mm_file_operations_mmap() - \ - Implements &struct file_operations.mmap() - * @filp: the mapping's file structure - * @vma: the mapping's memory area - * - * Returns: - * 0 on success, or - * a negative error code otherwise. - */ -int drm_vram_mm_file_operations_mmap( - struct file *filp, struct vm_area_struct *vma) -{ - struct drm_file *file_priv = filp->private_data; - struct drm_device *dev = file_priv->minor->dev; - - if (WARN_ONCE(!dev->vram_mm, "VRAM MM not initialized")) - return -EINVAL; - - return drm_vram_mm_mmap(filp, vma, dev->vram_mm); -} -EXPORT_SYMBOL(drm_vram_mm_file_operations_mmap); diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c index ccfb5b33c5e3..e34058c721be 100644 --- a/drivers/gpu/drm/drm_mipi_dbi.c +++ b/drivers/gpu/drm/drm_mipi_dbi.c @@ -1021,7 +1021,7 @@ static int mipi_dbi_typec3_command_read(struct mipi_dbi *dbi, u8 *cmd, unsigned int i; for (i = 0; i < len; i++) - data[i] = (buf[i] << 1) | !!(buf[i + 1] & BIT(7)); + data[i] = (buf[i] << 1) | (buf[i + 1] >> 7); } MIPI_DBI_DEBUG_COMMAND(*cmd, data, len); diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c index 7bc03c3c154f..3b570a404933 100644 --- a/drivers/gpu/drm/drm_mode_config.c +++ b/drivers/gpu/drm/drm_mode_config.c @@ -428,8 +428,6 @@ EXPORT_SYMBOL(drm_mode_config_init); * Note that since this /should/ happen single-threaded at driver/device * teardown time, no locking is required. It's the driver's job to ensure that * this guarantee actually holds true. - * - * FIXME: cleanup any dangling user buffer objects too */ void drm_mode_config_cleanup(struct drm_device *dev) { diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 0a2316e0e812..0814211b0f3f 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -713,6 +713,15 @@ int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) struct file *fil; int ret; + if (obj->funcs && obj->funcs->mmap) { + ret = obj->funcs->mmap(obj, vma); + if (ret) + return ret; + vma->vm_private_data = obj; + drm_gem_object_get(obj); + return 0; + } + priv = kzalloc(sizeof(*priv), GFP_KERNEL); fil = kzalloc(sizeof(*fil), GFP_KERNEL); if (!priv || !fil) { diff --git a/drivers/gpu/drm/drm_self_refresh_helper.c b/drivers/gpu/drm/drm_self_refresh_helper.c index 68f4765a5896..dd33fec5aabd 100644 --- a/drivers/gpu/drm/drm_self_refresh_helper.c +++ b/drivers/gpu/drm/drm_self_refresh_helper.c @@ -133,29 +133,33 @@ out_drop_locks: * drm_self_refresh_helper_update_avg_times - Updates a crtc's SR time averages * @state: the state which has just been applied to hardware * @commit_time_ms: the amount of time in ms that this commit took to complete + * @new_self_refresh_mask: bitmask of crtc's that have self_refresh_active in + * new state * * Called after &drm_mode_config_funcs.atomic_commit_tail, this function will * update the average entry/exit self refresh times on self refresh transitions. * These averages will be used when calculating how long to delay before * entering self refresh mode after activity. */ -void drm_self_refresh_helper_update_avg_times(struct drm_atomic_state *state, - unsigned int commit_time_ms) +void +drm_self_refresh_helper_update_avg_times(struct drm_atomic_state *state, + unsigned int commit_time_ms, + unsigned int new_self_refresh_mask) { struct drm_crtc *crtc; - struct drm_crtc_state *old_crtc_state, *new_crtc_state; + struct drm_crtc_state *old_crtc_state; int i; - for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, - new_crtc_state, i) { + for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) { + bool new_self_refresh_active = new_self_refresh_mask & BIT(i); struct drm_self_refresh_data *sr_data = crtc->self_refresh_data; struct ewma_psr_time *time; if (old_crtc_state->self_refresh_active == - new_crtc_state->self_refresh_active) + new_self_refresh_active) continue; - if (new_crtc_state->self_refresh_active) + if (new_self_refresh_active) time = &sr_data->entry_avg_ms; else time = &sr_data->exit_avg_ms; diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c index 046055719245..15fb516ae2d8 100644 --- a/drivers/gpu/drm/drm_simple_kms_helper.c +++ b/drivers/gpu/drm/drm_simple_kms_helper.c @@ -43,7 +43,7 @@ drm_simple_kms_crtc_mode_valid(struct drm_crtc *crtc, /* Anything goes */ return MODE_OK; - return pipe->funcs->mode_valid(crtc, mode); + return pipe->funcs->mode_valid(pipe, mode); } static int drm_simple_kms_crtc_check(struct drm_crtc *crtc, diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index 9ec334663c2d..669c93fe2500 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -1280,7 +1280,7 @@ drm_syncobj_timeline_signal_ioctl(struct drm_device *dev, void *data, if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE)) return -EOPNOTSUPP; - if (args->pad != 0) + if (args->flags != 0) return -EINVAL; if (args->count_handles == 0) @@ -1351,7 +1351,7 @@ int drm_syncobj_query_ioctl(struct drm_device *dev, void *data, if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE)) return -EOPNOTSUPP; - if (args->pad != 0) + if (args->flags & ~DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED) return -EINVAL; if (args->count_handles == 0) @@ -1372,25 +1372,32 @@ int drm_syncobj_query_ioctl(struct drm_device *dev, void *data, fence = drm_syncobj_fence_get(syncobjs[i]); chain = to_dma_fence_chain(fence); if (chain) { - struct dma_fence *iter, *last_signaled = NULL; - - dma_fence_chain_for_each(iter, fence) { - if (iter->context != fence->context) { - dma_fence_put(iter); - /* It is most likely that timeline has - * unorder points. */ - break; + struct dma_fence *iter, *last_signaled = + dma_fence_get(fence); + + if (args->flags & + DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED) { + point = fence->seqno; + } else { + dma_fence_chain_for_each(iter, fence) { + if (iter->context != fence->context) { + dma_fence_put(iter); + /* It is most likely that timeline has + * unorder points. */ + break; + } + dma_fence_put(last_signaled); + last_signaled = dma_fence_get(iter); } - dma_fence_put(last_signaled); - last_signaled = dma_fence_get(iter); + point = dma_fence_is_signaled(last_signaled) ? + last_signaled->seqno : + to_dma_fence_chain(last_signaled)->prev_seqno; } - point = dma_fence_is_signaled(last_signaled) ? - last_signaled->seqno : - to_dma_fence_chain(last_signaled)->prev_seqno; dma_fence_put(last_signaled); } else { point = 0; } + dma_fence_put(fence); ret = copy_to_user(&points[i], &point, sizeof(uint64_t)); ret = ret ? -EFAULT : 0; if (ret) diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c index 4f7962b6427b..1659b13b178c 100644 --- a/drivers/gpu/drm/drm_vblank.c +++ b/drivers/gpu/drm/drm_vblank.c @@ -1610,7 +1610,7 @@ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data, unsigned int flags, pipe, high_pipe; if (!dev->irq_enabled) - return -EINVAL; + return -EOPNOTSUPP; if (vblwait->request.type & _DRM_VBLANK_SIGNAL) return -EINVAL; @@ -1876,7 +1876,7 @@ int drm_crtc_get_sequence_ioctl(struct drm_device *dev, void *data, return -EOPNOTSUPP; if (!dev->irq_enabled) - return -EINVAL; + return -EOPNOTSUPP; crtc = drm_crtc_find(dev, file_priv, get_seq->crtc_id); if (!crtc) @@ -1934,7 +1934,7 @@ int drm_crtc_queue_sequence_ioctl(struct drm_device *dev, void *data, return -EOPNOTSUPP; if (!dev->irq_enabled) - return -EINVAL; + return -EOPNOTSUPP; crtc = drm_crtc_find(dev, file_priv, queue_seq->crtc_id); if (!crtc) diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c index 698db540972c..648cf0207309 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c @@ -180,6 +180,8 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit) etnaviv_cmdbuf_get_va(&submit->cmdbuf, &gpu->mmu_context->cmdbuf_mapping)); + mutex_unlock(&gpu->mmu_context->lock); + /* Reserve space for the bomap */ if (n_bomap_pages) { bomap_start = bomap = iter.data; @@ -221,8 +223,6 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit) obj->base.size); } - mutex_unlock(&gpu->mmu_context->lock); - etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data); dev_coredumpv(gpu->dev, iter.start, iter.data - iter.start, GFP_KERNEL); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c index 043111a1d60c..f8bf488e9d71 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c @@ -155,9 +155,11 @@ static void etnaviv_iommuv2_dump(struct etnaviv_iommu_context *context, void *bu memcpy(buf, v2_context->mtlb_cpu, SZ_4K); buf += SZ_4K; - for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++, buf += SZ_4K) - if (v2_context->mtlb_cpu[i] & MMUv2_PTE_PRESENT) + for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) + if (v2_context->mtlb_cpu[i] & MMUv2_PTE_PRESENT) { memcpy(buf, v2_context->stlb_cpu[i], SZ_4K); + buf += SZ_4K; + } } static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu, diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c index 35ebae6a1be7..3607d348c298 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c @@ -328,12 +328,23 @@ etnaviv_iommu_context_init(struct etnaviv_iommu_global *global, ret = etnaviv_cmdbuf_suballoc_map(suballoc, ctx, &ctx->cmdbuf_mapping, global->memory_base); - if (ret) { - global->ops->free(ctx); - return NULL; + if (ret) + goto out_free; + + if (global->version == ETNAVIV_IOMMU_V1 && + ctx->cmdbuf_mapping.iova > 0x80000000) { + dev_err(global->dev, + "command buffer outside valid memory window\n"); + goto out_unmap; } return ctx; + +out_unmap: + etnaviv_cmdbuf_suballoc_unmap(ctx, &ctx->cmdbuf_mapping); +out_free: + global->ops->free(ctx); + return NULL; } void etnaviv_iommu_restore(struct etnaviv_gpu *gpu, diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index 7b24338fad3c..6cfdb95fef2f 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -1069,9 +1069,9 @@ static bool mixer_mode_fixup(struct exynos_drm_crtc *crtc, struct mixer_context *ctx = crtc->ctx; int width = mode->hdisplay, height = mode->vdisplay, i; - struct { + static const struct { int hdisplay, vdisplay, htotal, vtotal, scan_val; - } static const modes[] = { + } modes[] = { { 720, 480, 858, 525, MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD }, { 720, 576, 864, 625, MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD }, { 1280, 720, 1650, 750, MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD }, diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c index f56852a503e8..8b784947ed3b 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_display.c +++ b/drivers/gpu/drm/gma500/cdv_intel_display.c @@ -405,6 +405,8 @@ static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit, struct gma_crtc *gma_crtc = to_gma_crtc(crtc); struct gma_clock_t clock; + memset(&clock, 0, sizeof(clock)); + switch (refclk) { case 27000: if (target < 200000) { diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c index 03023fa0fb6f..f350ac1ead18 100644 --- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c +++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c @@ -498,7 +498,7 @@ void mdfld_dsi_output_init(struct drm_device *dev, return; } - /*create a new connetor*/ + /*create a new connector*/ dsi_connector = kzalloc(sizeof(struct mdfld_dsi_connector), GFP_KERNEL); if (!dsi_connector) { DRM_ERROR("No memory"); diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c index 167c10767dd4..900e5499249d 100644 --- a/drivers/gpu/drm/gma500/oaktrail_crtc.c +++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c @@ -129,6 +129,7 @@ static bool mrst_sdvo_find_best_pll(const struct gma_limit_t *limit, s32 freq_error, min_error = 100000; memset(best_clock, 0, sizeof(*best_clock)); + memset(&clock, 0, sizeof(clock)); for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) { for (clock.n = limit->n.min; clock.n <= limit->n.max; @@ -185,6 +186,7 @@ static bool mrst_lvds_find_best_pll(const struct gma_limit_t *limit, int err = target; memset(best_clock, 0, sizeof(*best_clock)); + memset(&clock, 0, sizeof(clock)); for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) { for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max; diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c index cc4c41748cfb..6527a97f68a3 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c @@ -96,7 +96,6 @@ static void hibmc_plane_atomic_update(struct drm_plane *plane, { struct drm_plane_state *state = plane->state; u32 reg; - int ret; s64 gpu_addr = 0; unsigned int line_l; struct hibmc_drm_private *priv = plane->dev->dev_private; @@ -109,16 +108,9 @@ static void hibmc_plane_atomic_update(struct drm_plane *plane, hibmc_fb = to_hibmc_framebuffer(state->fb); gbo = drm_gem_vram_of_gem(hibmc_fb->obj); - ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM); - if (ret) { - DRM_ERROR("failed to pin bo: %d", ret); - return; - } gpu_addr = drm_gem_vram_offset(gbo); - if (gpu_addr < 0) { - drm_gem_vram_unpin(gbo); - return; - } + if (WARN_ON_ONCE(gpu_addr < 0)) + return; /* Bug: we didn't pin the BO to VRAM in prepare_fb. */ writel(gpu_addr, priv->mmio + HIBMC_CRT_FB_ADDRESS); @@ -157,6 +149,8 @@ static struct drm_plane_funcs hibmc_plane_funcs = { }; static const struct drm_plane_helper_funcs hibmc_plane_helper_funcs = { + .prepare_fb = drm_gem_vram_plane_helper_prepare_fb, + .cleanup_fb = drm_gem_vram_plane_helper_cleanup_fb, .atomic_check = hibmc_plane_atomic_check, .atomic_update = hibmc_plane_atomic_update, }; diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c index 4f52c83b9b4c..2fd4ca91a62d 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c @@ -26,10 +26,7 @@ #include "hibmc_drm_drv.h" #include "hibmc_drm_regs.h" -static const struct file_operations hibmc_fops = { - .owner = THIS_MODULE, - DRM_VRAM_MM_FILE_OPERATIONS -}; +DEFINE_DRM_GEM_FOPS(hibmc_fops); static irqreturn_t hibmc_drm_interrupt(int irq, void *arg) { diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c index 6c218bace2ce..a63790d32d75 100644 --- a/drivers/gpu/drm/i2c/tda998x_drv.c +++ b/drivers/gpu/drm/i2c/tda998x_drv.c @@ -806,8 +806,8 @@ static irqreturn_t tda998x_irq_thread(int irq, void *data) tda998x_edid_delay_start(priv); } else { schedule_work(&priv->detect_work); - cec_notifier_set_phys_addr(priv->cec_notify, - CEC_PHYS_ADDR_INVALID); + cec_notifier_phys_addr_invalidate( + priv->cec_notify); } handled = true; @@ -1791,8 +1791,7 @@ static void tda998x_destroy(struct device *dev) i2c_unregister_device(priv->cec); - if (priv->cec_notify) - cec_notifier_put(priv->cec_notify); + cec_notifier_conn_unregister(priv->cec_notify); } static int tda998x_create(struct device *dev) @@ -1917,7 +1916,7 @@ static int tda998x_create(struct device *dev) cec_write(priv, REG_CEC_RXSHPDINTENA, CEC_RXSHPDLEV_HPD); } - priv->cec_notify = cec_notifier_get(dev); + priv->cec_notify = cec_notifier_conn_register(dev, NULL, NULL); if (!priv->cec_notify) { ret = -ENOMEM; goto fail; diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug index e28face9fd9f..a1cf437d2e99 100644 --- a/drivers/gpu/drm/i915/Kconfig.debug +++ b/drivers/gpu/drm/i915/Kconfig.debug @@ -37,7 +37,6 @@ config DRM_I915_DEBUG select DRM_I915_SELFTEST select DRM_I915_DEBUG_RUNTIME_PM select DRM_I915_DEBUG_MMIO - select BROKEN # for prototype uAPI default n help Choose this option to turn on extra driver debugging that may affect diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 8c6c3690e929..9e9593965a9a 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -7640,7 +7640,8 @@ void intel_dp_mst_resume(struct drm_i915_private *dev_priv) if (!intel_dp->can_mst) continue; - ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr); + ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr, + true); if (ret) { intel_dp->is_mst = false; drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 21e81830d525..9fc606c6dbee 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -407,20 +407,7 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector) return ret; } -static enum drm_connector_status -intel_dp_mst_detect(struct drm_connector *connector, bool force) -{ - struct intel_connector *intel_connector = to_intel_connector(connector); - struct intel_dp *intel_dp = intel_connector->mst_port; - - if (drm_connector_is_unregistered(connector)) - return connector_status_disconnected; - return drm_dp_mst_detect_port(connector, &intel_dp->mst_mgr, - intel_connector->port); -} - static const struct drm_connector_funcs intel_dp_mst_connector_funcs = { - .detect = intel_dp_mst_detect, .fill_modes = drm_helper_probe_single_connector_modes, .atomic_get_property = intel_digital_connector_atomic_get_property, .atomic_set_property = intel_digital_connector_atomic_set_property, @@ -481,11 +468,26 @@ static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *c return &intel_dp->mst_encoders[crtc->pipe]->base.base; } +static int +intel_dp_mst_detect(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, bool force) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + struct intel_dp *intel_dp = intel_connector->mst_port; + + if (drm_connector_is_unregistered(connector)) + return connector_status_disconnected; + + return drm_dp_mst_detect_port(connector, ctx, &intel_dp->mst_mgr, + intel_connector->port); +} + static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = { .get_modes = intel_dp_mst_get_modes, .mode_valid = intel_dp_mst_mode_valid, .atomic_best_encoder = intel_mst_atomic_best_encoder, .atomic_check = intel_dp_mst_atomic_check, + .detect_ctx = intel_dp_mst_detect, }; static void intel_dp_mst_encoder_destroy(struct drm_encoder *encoder) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index 17f395672e5e..1284f47303fa 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -274,6 +274,8 @@ static void i915_gem_context_free(struct i915_gem_context *ctx) free_engines(rcu_access_pointer(ctx->engines)); mutex_destroy(&ctx->engines_mutex); + kfree(ctx->jump_whitelist); + if (ctx->timeline) intel_timeline_put(ctx->timeline); @@ -581,6 +583,9 @@ __create_context(struct drm_i915_private *i915) for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++) ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES; + ctx->jump_whitelist = NULL; + ctx->jump_whitelist_cmds = 0; + spin_lock(&i915->gem.contexts.lock); list_add_tail(&ctx->link, &i915->gem.contexts.list); spin_unlock(&i915->gem.contexts.lock); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h index 69df5459c350..c060bc428f49 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h @@ -168,6 +168,13 @@ struct i915_gem_context { */ struct radix_tree_root handles_vma; + /** jump_whitelist: Bit array for tracking cmds during cmdparsing + * Guarded by struct_mutex + */ + unsigned long *jump_whitelist; + /** jump_whitelist_cmds: No of cmd slots available */ + u32 jump_whitelist_cmds; + /** * @name: arbitrary name, used for user debug * diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index e4f5c269150a..f0998f1225af 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -298,7 +298,9 @@ static inline u64 gen8_noncanonical_addr(u64 address) static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb) { - return intel_engine_needs_cmd_parser(eb->engine) && eb->batch_len; + return intel_engine_requires_cmd_parser(eb->engine) || + (intel_engine_using_cmd_parser(eb->engine) && + eb->args->batch_len); } static int eb_create(struct i915_execbuffer *eb) @@ -1990,40 +1992,94 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq) return 0; } -static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master) +static struct i915_vma * +shadow_batch_pin(struct i915_execbuffer *eb, struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *dev_priv = eb->i915; + struct i915_vma * const vma = *eb->vma; + struct i915_address_space *vm; + u64 flags; + + /* + * PPGTT backed shadow buffers must be mapped RO, to prevent + * post-scan tampering + */ + if (CMDPARSER_USES_GGTT(dev_priv)) { + flags = PIN_GLOBAL; + vm = &dev_priv->ggtt.vm; + } else if (vma->vm->has_read_only) { + flags = PIN_USER; + vm = vma->vm; + i915_gem_object_set_readonly(obj); + } else { + DRM_DEBUG("Cannot prevent post-scan tampering without RO capable vm\n"); + return ERR_PTR(-EINVAL); + } + + return i915_gem_object_pin(obj, vm, NULL, 0, 0, flags); +} + +static struct i915_vma *eb_parse(struct i915_execbuffer *eb) { struct intel_engine_pool_node *pool; struct i915_vma *vma; + u64 batch_start; + u64 shadow_batch_start; int err; pool = intel_engine_get_pool(eb->engine, eb->batch_len); if (IS_ERR(pool)) return ERR_CAST(pool); - err = intel_engine_cmd_parser(eb->engine, + vma = shadow_batch_pin(eb, pool->obj); + if (IS_ERR(vma)) + goto err; + + batch_start = gen8_canonical_addr(eb->batch->node.start) + + eb->batch_start_offset; + + shadow_batch_start = gen8_canonical_addr(vma->node.start); + + err = intel_engine_cmd_parser(eb->gem_context, + eb->engine, eb->batch->obj, - pool->obj, + batch_start, eb->batch_start_offset, eb->batch_len, - is_master); + pool->obj, + shadow_batch_start); + if (err) { - if (err == -EACCES) /* unhandled chained batch */ + i915_vma_unpin(vma); + + /* + * Unsafe GGTT-backed buffers can still be submitted safely + * as non-secure. + * For PPGTT backing however, we have no choice but to forcibly + * reject unsafe buffers + */ + if (CMDPARSER_USES_GGTT(eb->i915) && (err == -EACCES)) + /* Execute original buffer non-secure */ vma = NULL; else vma = ERR_PTR(err); goto err; } - vma = i915_gem_object_ggtt_pin(pool->obj, NULL, 0, 0, 0); - if (IS_ERR(vma)) - goto err; - eb->vma[eb->buffer_count] = i915_vma_get(vma); eb->flags[eb->buffer_count] = __EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF; vma->exec_flags = &eb->flags[eb->buffer_count]; eb->buffer_count++; + eb->batch_start_offset = 0; + eb->batch = vma; + + if (CMDPARSER_USES_GGTT(eb->i915)) + eb->batch_flags |= I915_DISPATCH_SECURE; + + /* eb->batch_len unchanged */ + vma->private = pool; return vma; @@ -2430,6 +2486,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, struct drm_i915_gem_exec_object2 *exec, struct drm_syncobj **fences) { + struct drm_i915_private *i915 = to_i915(dev); struct i915_execbuffer eb; struct dma_fence *in_fence = NULL; struct dma_fence *exec_fence = NULL; @@ -2441,7 +2498,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS & ~__EXEC_OBJECT_UNKNOWN_FLAGS); - eb.i915 = to_i915(dev); + eb.i915 = i915; eb.file = file; eb.args = args; if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC)) @@ -2461,8 +2518,15 @@ i915_gem_do_execbuffer(struct drm_device *dev, eb.batch_flags = 0; if (args->flags & I915_EXEC_SECURE) { + if (INTEL_GEN(i915) >= 11) + return -ENODEV; + + /* Return -EPERM to trigger fallback code on old binaries. */ + if (!HAS_SECURE_BATCHES(i915)) + return -EPERM; + if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN)) - return -EPERM; + return -EPERM; eb.batch_flags |= I915_DISPATCH_SECURE; } @@ -2539,34 +2603,19 @@ i915_gem_do_execbuffer(struct drm_device *dev, goto err_vma; } + if (eb.batch_len == 0) + eb.batch_len = eb.batch->size - eb.batch_start_offset; + if (eb_use_cmdparser(&eb)) { struct i915_vma *vma; - vma = eb_parse(&eb, drm_is_current_master(file)); + vma = eb_parse(&eb); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto err_vma; } - - if (vma) { - /* - * Batch parsed and accepted: - * - * Set the DISPATCH_SECURE bit to remove the NON_SECURE - * bit from MI_BATCH_BUFFER_START commands issued in - * the dispatch_execbuffer implementations. We - * specifically don't want that set on batches the - * command parser has accepted. - */ - eb.batch_flags |= I915_DISPATCH_SECURE; - eb.batch_start_offset = 0; - eb.batch = vma; - } } - if (eb.batch_len == 0) - eb.batch_len = eb.batch->size - eb.batch_start_offset; - /* * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure * batch" bit. Hence we need to pin secure batches into the global gtt. diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index c5d1047a4bc5..758f0e8ec672 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -454,13 +454,14 @@ struct intel_engine_cs { /* status_notifier: list of callbacks for context-switch changes */ struct atomic_notifier_head context_status_notifier; -#define I915_ENGINE_NEEDS_CMD_PARSER BIT(0) +#define I915_ENGINE_USING_CMD_PARSER BIT(0) #define I915_ENGINE_SUPPORTS_STATS BIT(1) #define I915_ENGINE_HAS_PREEMPTION BIT(2) #define I915_ENGINE_HAS_SEMAPHORES BIT(3) #define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(4) #define I915_ENGINE_IS_VIRTUAL BIT(5) #define I915_ENGINE_HAS_RELATIVE_MMIO BIT(6) +#define I915_ENGINE_REQUIRES_CMD_PARSER BIT(7) unsigned int flags; /* @@ -528,9 +529,15 @@ struct intel_engine_cs { }; static inline bool -intel_engine_needs_cmd_parser(const struct intel_engine_cs *engine) +intel_engine_using_cmd_parser(const struct intel_engine_cs *engine) { - return engine->flags & I915_ENGINE_NEEDS_CMD_PARSER; + return engine->flags & I915_ENGINE_USING_CMD_PARSER; +} + +static inline bool +intel_engine_requires_cmd_parser(const struct intel_engine_cs *engine) +{ + return engine->flags & I915_ENGINE_REQUIRES_CMD_PARSER; } static inline bool diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c index 060a27d9af34..6187cdd06646 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c @@ -61,6 +61,9 @@ static int __gt_unpark(struct intel_wakeref *wf) gt->awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ); GEM_BUG_ON(!gt->awake); + if (NEEDS_RC6_CTX_CORRUPTION_WA(i915)) + intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL); + intel_rps_unpark(>->rps); i915_pmu_gt_unparked(i915); @@ -86,6 +89,11 @@ static int __gt_park(struct intel_wakeref *wf) /* Everything switched off, flush any residual interrupt just in case */ intel_synchronize_irq(i915); + if (NEEDS_RC6_CTX_CORRUPTION_WA(i915)) { + intel_rc6_ctx_wa_check(&i915->gt.rc6); + intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL); + } + GEM_BUG_ON(!wakeref); intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref); diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c index 7c0d9eb5c6fc..602a02d01850 100644 --- a/drivers/gpu/drm/i915/gt/intel_rc6.c +++ b/drivers/gpu/drm/i915/gt/intel_rc6.c @@ -488,6 +488,66 @@ static void rpm_put(struct intel_rc6 *rc6) rc6->wakeref = false; } +static bool intel_rc6_ctx_corrupted(struct intel_rc6 *rc6) +{ + return !intel_uncore_read(rc6_to_uncore(rc6), GEN8_RC6_CTX_INFO); +} + +static void intel_rc6_ctx_wa_init(struct intel_rc6 *rc6) +{ + struct drm_i915_private *i915 = rc6_to_i915(rc6); + + if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915)) + return; + + if (intel_rc6_ctx_corrupted(rc6)) { + DRM_INFO("RC6 context corrupted, disabling runtime power management\n"); + rc6->ctx_corrupted = true; + } +} + +/** + * intel_rc6_ctx_wa_resume - system resume sequence for the RC6 CTX WA + * @rc6: rc6 state + * + * Perform any steps needed to re-init the RC6 CTX WA after system resume. + */ +void intel_rc6_ctx_wa_resume(struct intel_rc6 *rc6) +{ + if (rc6->ctx_corrupted && !intel_rc6_ctx_corrupted(rc6)) { + DRM_INFO("RC6 context restored, re-enabling runtime power management\n"); + rc6->ctx_corrupted = false; + } +} + +/** + * intel_rc6_ctx_wa_check - check for a new RC6 CTX corruption + * @rc6: rc6 state + * + * Check if an RC6 CTX corruption has happened since the last check and if so + * disable RC6 and runtime power management. +*/ +void intel_rc6_ctx_wa_check(struct intel_rc6 *rc6) +{ + struct drm_i915_private *i915 = rc6_to_i915(rc6); + + if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915)) + return; + + if (rc6->ctx_corrupted) + return; + + if (!intel_rc6_ctx_corrupted(rc6)) + return; + + DRM_NOTE("RC6 context corruption, disabling runtime power management\n"); + + intel_rc6_disable(rc6); + rc6->ctx_corrupted = true; + + return; +} + static void __intel_rc6_disable(struct intel_rc6 *rc6) { struct drm_i915_private *i915 = rc6_to_i915(rc6); @@ -512,6 +572,8 @@ void intel_rc6_init(struct intel_rc6 *rc6) if (!rc6_supported(rc6)) return; + intel_rc6_ctx_wa_init(rc6); + if (IS_CHERRYVIEW(i915)) err = chv_rc6_init(rc6); else if (IS_VALLEYVIEW(i915)) @@ -546,6 +608,9 @@ void intel_rc6_enable(struct intel_rc6 *rc6) GEM_BUG_ON(rc6->enabled); + if (rc6->ctx_corrupted) + return; + intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); if (IS_CHERRYVIEW(i915)) diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.h b/drivers/gpu/drm/i915/gt/intel_rc6.h index 5e6711f36457..1370f6834a4c 100644 --- a/drivers/gpu/drm/i915/gt/intel_rc6.h +++ b/drivers/gpu/drm/i915/gt/intel_rc6.h @@ -22,4 +22,7 @@ void intel_rc6_disable(struct intel_rc6 *rc6); u64 intel_rc6_residency_ns(struct intel_rc6 *rc6, i915_reg_t reg); u64 intel_rc6_residency_us(struct intel_rc6 *rc6, i915_reg_t reg); +void intel_rc6_ctx_wa_check(struct intel_rc6 *rc6); +void intel_rc6_ctx_wa_resume(struct intel_rc6 *rc6); + #endif /* INTEL_RC6_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_rc6_types.h b/drivers/gpu/drm/i915/gt/intel_rc6_types.h index 214f354d6ae4..89ad5697a8d4 100644 --- a/drivers/gpu/drm/i915/gt/intel_rc6_types.h +++ b/drivers/gpu/drm/i915/gt/intel_rc6_types.h @@ -23,6 +23,7 @@ struct intel_rc6 { bool supported : 1; bool enabled : 1; bool wakeref : 1; + bool ctx_corrupted : 1; }; #endif /* INTEL_RC6_TYPES_H */ diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index 24555102e198..f24096e27bef 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -53,13 +53,11 @@ * granting userspace undue privileges. There are three categories of privilege. * * First, commands which are explicitly defined as privileged or which should - * only be used by the kernel driver. The parser generally rejects such - * commands, though it may allow some from the drm master process. + * only be used by the kernel driver. The parser rejects such commands * * Second, commands which access registers. To support correct/enhanced * userspace functionality, particularly certain OpenGL extensions, the parser - * provides a whitelist of registers which userspace may safely access (for both - * normal and drm master processes). + * provides a whitelist of registers which userspace may safely access * * Third, commands which access privileged memory (i.e. GGTT, HWS page, etc). * The parser always rejects such commands. @@ -84,9 +82,9 @@ * in the per-engine command tables. * * Other command table entries map fairly directly to high level categories - * mentioned above: rejected, master-only, register whitelist. The parser - * implements a number of checks, including the privileged memory checks, via a - * general bitmasking mechanism. + * mentioned above: rejected, register whitelist. The parser implements a number + * of checks, including the privileged memory checks, via a general bitmasking + * mechanism. */ /* @@ -104,8 +102,6 @@ struct drm_i915_cmd_descriptor { * CMD_DESC_REJECT: The command is never allowed * CMD_DESC_REGISTER: The command should be checked against the * register whitelist for the appropriate ring - * CMD_DESC_MASTER: The command is allowed if the submitting process - * is the DRM master */ u32 flags; #define CMD_DESC_FIXED (1<<0) @@ -113,7 +109,6 @@ struct drm_i915_cmd_descriptor { #define CMD_DESC_REJECT (1<<2) #define CMD_DESC_REGISTER (1<<3) #define CMD_DESC_BITMASK (1<<4) -#define CMD_DESC_MASTER (1<<5) /* * The command's unique identification bits and the bitmask to get them. @@ -194,7 +189,7 @@ struct drm_i915_cmd_table { #define CMD(op, opm, f, lm, fl, ...) \ { \ .flags = (fl) | ((f) ? CMD_DESC_FIXED : 0), \ - .cmd = { (op), ~0u << (opm) }, \ + .cmd = { (op & ~0u << (opm)), ~0u << (opm) }, \ .length = { (lm) }, \ __VA_ARGS__ \ } @@ -209,14 +204,13 @@ struct drm_i915_cmd_table { #define R CMD_DESC_REJECT #define W CMD_DESC_REGISTER #define B CMD_DESC_BITMASK -#define M CMD_DESC_MASTER /* Command Mask Fixed Len Action ---------------------------------------------------------- */ -static const struct drm_i915_cmd_descriptor common_cmds[] = { +static const struct drm_i915_cmd_descriptor gen7_common_cmds[] = { CMD( MI_NOOP, SMI, F, 1, S ), CMD( MI_USER_INTERRUPT, SMI, F, 1, R ), - CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, M ), + CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, R ), CMD( MI_ARB_CHECK, SMI, F, 1, S ), CMD( MI_REPORT_HEAD, SMI, F, 1, S ), CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ), @@ -246,7 +240,7 @@ static const struct drm_i915_cmd_descriptor common_cmds[] = { CMD( MI_BATCH_BUFFER_START, SMI, !F, 0xFF, S ), }; -static const struct drm_i915_cmd_descriptor render_cmds[] = { +static const struct drm_i915_cmd_descriptor gen7_render_cmds[] = { CMD( MI_FLUSH, SMI, F, 1, S ), CMD( MI_ARB_ON_OFF, SMI, F, 1, R ), CMD( MI_PREDICATE, SMI, F, 1, S ), @@ -313,7 +307,7 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = { CMD( MI_URB_ATOMIC_ALLOC, SMI, F, 1, S ), CMD( MI_SET_APPID, SMI, F, 1, S ), CMD( MI_RS_CONTEXT, SMI, F, 1, S ), - CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ), + CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, R ), CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ), CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W, .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ), @@ -330,7 +324,7 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = { CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS, S3D, !F, 0x1FF, S ), }; -static const struct drm_i915_cmd_descriptor video_cmds[] = { +static const struct drm_i915_cmd_descriptor gen7_video_cmds[] = { CMD( MI_ARB_ON_OFF, SMI, F, 1, R ), CMD( MI_SET_APPID, SMI, F, 1, S ), CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B, @@ -374,7 +368,7 @@ static const struct drm_i915_cmd_descriptor video_cmds[] = { CMD( MFX_WAIT, SMFX, F, 1, S ), }; -static const struct drm_i915_cmd_descriptor vecs_cmds[] = { +static const struct drm_i915_cmd_descriptor gen7_vecs_cmds[] = { CMD( MI_ARB_ON_OFF, SMI, F, 1, R ), CMD( MI_SET_APPID, SMI, F, 1, S ), CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B, @@ -412,7 +406,7 @@ static const struct drm_i915_cmd_descriptor vecs_cmds[] = { }}, ), }; -static const struct drm_i915_cmd_descriptor blt_cmds[] = { +static const struct drm_i915_cmd_descriptor gen7_blt_cmds[] = { CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ), CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, B, .bits = {{ @@ -446,10 +440,64 @@ static const struct drm_i915_cmd_descriptor blt_cmds[] = { }; static const struct drm_i915_cmd_descriptor hsw_blt_cmds[] = { - CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ), + CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, R ), CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ), }; +/* + * For Gen9 we can still rely on the h/w to enforce cmd security, and only + * need to re-enforce the register access checks. We therefore only need to + * teach the cmdparser how to find the end of each command, and identify + * register accesses. The table doesn't need to reject any commands, and so + * the only commands listed here are: + * 1) Those that touch registers + * 2) Those that do not have the default 8-bit length + * + * Note that the default MI length mask chosen for this table is 0xFF, not + * the 0x3F used on older devices. This is because the vast majority of MI + * cmds on Gen9 use a standard 8-bit Length field. + * All the Gen9 blitter instructions are standard 0xFF length mask, and + * none allow access to non-general registers, so in fact no BLT cmds are + * included in the table at all. + * + */ +static const struct drm_i915_cmd_descriptor gen9_blt_cmds[] = { + CMD( MI_NOOP, SMI, F, 1, S ), + CMD( MI_USER_INTERRUPT, SMI, F, 1, S ), + CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, S ), + CMD( MI_FLUSH, SMI, F, 1, S ), + CMD( MI_ARB_CHECK, SMI, F, 1, S ), + CMD( MI_REPORT_HEAD, SMI, F, 1, S ), + CMD( MI_ARB_ON_OFF, SMI, F, 1, S ), + CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ), + CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, S ), + CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, S ), + CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, S ), + CMD( MI_LOAD_REGISTER_IMM(1), SMI, !F, 0xFF, W, + .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 2 } ), + CMD( MI_UPDATE_GTT, SMI, !F, 0x3FF, S ), + CMD( MI_STORE_REGISTER_MEM_GEN8, SMI, F, 4, W, + .reg = { .offset = 1, .mask = 0x007FFFFC } ), + CMD( MI_FLUSH_DW, SMI, !F, 0x3F, S ), + CMD( MI_LOAD_REGISTER_MEM_GEN8, SMI, F, 4, W, + .reg = { .offset = 1, .mask = 0x007FFFFC } ), + CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W, + .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ), + + /* + * We allow BB_START but apply further checks. We just sanitize the + * basic fields here. + */ +#define MI_BB_START_OPERAND_MASK GENMASK(SMI-1, 0) +#define MI_BB_START_OPERAND_EXPECT (MI_BATCH_PPGTT_HSW | 1) + CMD( MI_BATCH_BUFFER_START_GEN8, SMI, !F, 0xFF, B, + .bits = {{ + .offset = 0, + .mask = MI_BB_START_OPERAND_MASK, + .expected = MI_BB_START_OPERAND_EXPECT, + }}, ), +}; + static const struct drm_i915_cmd_descriptor noop_desc = CMD(MI_NOOP, SMI, F, 1, S); @@ -463,40 +511,44 @@ static const struct drm_i915_cmd_descriptor noop_desc = #undef R #undef W #undef B -#undef M -static const struct drm_i915_cmd_table gen7_render_cmds[] = { - { common_cmds, ARRAY_SIZE(common_cmds) }, - { render_cmds, ARRAY_SIZE(render_cmds) }, +static const struct drm_i915_cmd_table gen7_render_cmd_table[] = { + { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) }, + { gen7_render_cmds, ARRAY_SIZE(gen7_render_cmds) }, }; -static const struct drm_i915_cmd_table hsw_render_ring_cmds[] = { - { common_cmds, ARRAY_SIZE(common_cmds) }, - { render_cmds, ARRAY_SIZE(render_cmds) }, +static const struct drm_i915_cmd_table hsw_render_ring_cmd_table[] = { + { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) }, + { gen7_render_cmds, ARRAY_SIZE(gen7_render_cmds) }, { hsw_render_cmds, ARRAY_SIZE(hsw_render_cmds) }, }; -static const struct drm_i915_cmd_table gen7_video_cmds[] = { - { common_cmds, ARRAY_SIZE(common_cmds) }, - { video_cmds, ARRAY_SIZE(video_cmds) }, +static const struct drm_i915_cmd_table gen7_video_cmd_table[] = { + { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) }, + { gen7_video_cmds, ARRAY_SIZE(gen7_video_cmds) }, }; -static const struct drm_i915_cmd_table hsw_vebox_cmds[] = { - { common_cmds, ARRAY_SIZE(common_cmds) }, - { vecs_cmds, ARRAY_SIZE(vecs_cmds) }, +static const struct drm_i915_cmd_table hsw_vebox_cmd_table[] = { + { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) }, + { gen7_vecs_cmds, ARRAY_SIZE(gen7_vecs_cmds) }, }; -static const struct drm_i915_cmd_table gen7_blt_cmds[] = { - { common_cmds, ARRAY_SIZE(common_cmds) }, - { blt_cmds, ARRAY_SIZE(blt_cmds) }, +static const struct drm_i915_cmd_table gen7_blt_cmd_table[] = { + { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) }, + { gen7_blt_cmds, ARRAY_SIZE(gen7_blt_cmds) }, }; -static const struct drm_i915_cmd_table hsw_blt_ring_cmds[] = { - { common_cmds, ARRAY_SIZE(common_cmds) }, - { blt_cmds, ARRAY_SIZE(blt_cmds) }, +static const struct drm_i915_cmd_table hsw_blt_ring_cmd_table[] = { + { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) }, + { gen7_blt_cmds, ARRAY_SIZE(gen7_blt_cmds) }, { hsw_blt_cmds, ARRAY_SIZE(hsw_blt_cmds) }, }; +static const struct drm_i915_cmd_table gen9_blt_cmd_table[] = { + { gen9_blt_cmds, ARRAY_SIZE(gen9_blt_cmds) }, +}; + + /* * Register whitelists, sorted by increasing register offset. */ @@ -612,17 +664,27 @@ static const struct drm_i915_reg_descriptor gen7_blt_regs[] = { REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE), }; -static const struct drm_i915_reg_descriptor ivb_master_regs[] = { - REG32(FORCEWAKE_MT), - REG32(DERRMR), - REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_A)), - REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_B)), - REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_C)), -}; - -static const struct drm_i915_reg_descriptor hsw_master_regs[] = { - REG32(FORCEWAKE_MT), - REG32(DERRMR), +static const struct drm_i915_reg_descriptor gen9_blt_regs[] = { + REG64_IDX(RING_TIMESTAMP, RENDER_RING_BASE), + REG64_IDX(RING_TIMESTAMP, BSD_RING_BASE), + REG32(BCS_SWCTRL), + REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE), + REG64_IDX(BCS_GPR, 0), + REG64_IDX(BCS_GPR, 1), + REG64_IDX(BCS_GPR, 2), + REG64_IDX(BCS_GPR, 3), + REG64_IDX(BCS_GPR, 4), + REG64_IDX(BCS_GPR, 5), + REG64_IDX(BCS_GPR, 6), + REG64_IDX(BCS_GPR, 7), + REG64_IDX(BCS_GPR, 8), + REG64_IDX(BCS_GPR, 9), + REG64_IDX(BCS_GPR, 10), + REG64_IDX(BCS_GPR, 11), + REG64_IDX(BCS_GPR, 12), + REG64_IDX(BCS_GPR, 13), + REG64_IDX(BCS_GPR, 14), + REG64_IDX(BCS_GPR, 15), }; #undef REG64 @@ -631,28 +693,27 @@ static const struct drm_i915_reg_descriptor hsw_master_regs[] = { struct drm_i915_reg_table { const struct drm_i915_reg_descriptor *regs; int num_regs; - bool master; }; static const struct drm_i915_reg_table ivb_render_reg_tables[] = { - { gen7_render_regs, ARRAY_SIZE(gen7_render_regs), false }, - { ivb_master_regs, ARRAY_SIZE(ivb_master_regs), true }, + { gen7_render_regs, ARRAY_SIZE(gen7_render_regs) }, }; static const struct drm_i915_reg_table ivb_blt_reg_tables[] = { - { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs), false }, - { ivb_master_regs, ARRAY_SIZE(ivb_master_regs), true }, + { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs) }, }; static const struct drm_i915_reg_table hsw_render_reg_tables[] = { - { gen7_render_regs, ARRAY_SIZE(gen7_render_regs), false }, - { hsw_render_regs, ARRAY_SIZE(hsw_render_regs), false }, - { hsw_master_regs, ARRAY_SIZE(hsw_master_regs), true }, + { gen7_render_regs, ARRAY_SIZE(gen7_render_regs) }, + { hsw_render_regs, ARRAY_SIZE(hsw_render_regs) }, }; static const struct drm_i915_reg_table hsw_blt_reg_tables[] = { - { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs), false }, - { hsw_master_regs, ARRAY_SIZE(hsw_master_regs), true }, + { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs) }, +}; + +static const struct drm_i915_reg_table gen9_blt_reg_tables[] = { + { gen9_blt_regs, ARRAY_SIZE(gen9_blt_regs) }, }; static u32 gen7_render_get_cmd_length_mask(u32 cmd_header) @@ -710,6 +771,17 @@ static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header) return 0; } +static u32 gen9_blt_get_cmd_length_mask(u32 cmd_header) +{ + u32 client = cmd_header >> INSTR_CLIENT_SHIFT; + + if (client == INSTR_MI_CLIENT || client == INSTR_BC_CLIENT) + return 0xFF; + + DRM_DEBUG_DRIVER("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header); + return 0; +} + static bool validate_cmds_sorted(const struct intel_engine_cs *engine, const struct drm_i915_cmd_table *cmd_tables, int cmd_table_count) @@ -867,18 +939,19 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) int cmd_table_count; int ret; - if (!IS_GEN(engine->i915, 7)) + if (!IS_GEN(engine->i915, 7) && !(IS_GEN(engine->i915, 9) && + engine->class == COPY_ENGINE_CLASS)) return; switch (engine->class) { case RENDER_CLASS: if (IS_HASWELL(engine->i915)) { - cmd_tables = hsw_render_ring_cmds; + cmd_tables = hsw_render_ring_cmd_table; cmd_table_count = - ARRAY_SIZE(hsw_render_ring_cmds); + ARRAY_SIZE(hsw_render_ring_cmd_table); } else { - cmd_tables = gen7_render_cmds; - cmd_table_count = ARRAY_SIZE(gen7_render_cmds); + cmd_tables = gen7_render_cmd_table; + cmd_table_count = ARRAY_SIZE(gen7_render_cmd_table); } if (IS_HASWELL(engine->i915)) { @@ -888,36 +961,46 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) engine->reg_tables = ivb_render_reg_tables; engine->reg_table_count = ARRAY_SIZE(ivb_render_reg_tables); } - engine->get_cmd_length_mask = gen7_render_get_cmd_length_mask; break; case VIDEO_DECODE_CLASS: - cmd_tables = gen7_video_cmds; - cmd_table_count = ARRAY_SIZE(gen7_video_cmds); + cmd_tables = gen7_video_cmd_table; + cmd_table_count = ARRAY_SIZE(gen7_video_cmd_table); engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; break; case COPY_ENGINE_CLASS: - if (IS_HASWELL(engine->i915)) { - cmd_tables = hsw_blt_ring_cmds; - cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds); + engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask; + if (IS_GEN(engine->i915, 9)) { + cmd_tables = gen9_blt_cmd_table; + cmd_table_count = ARRAY_SIZE(gen9_blt_cmd_table); + engine->get_cmd_length_mask = + gen9_blt_get_cmd_length_mask; + + /* BCS Engine unsafe without parser */ + engine->flags |= I915_ENGINE_REQUIRES_CMD_PARSER; + } else if (IS_HASWELL(engine->i915)) { + cmd_tables = hsw_blt_ring_cmd_table; + cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmd_table); } else { - cmd_tables = gen7_blt_cmds; - cmd_table_count = ARRAY_SIZE(gen7_blt_cmds); + cmd_tables = gen7_blt_cmd_table; + cmd_table_count = ARRAY_SIZE(gen7_blt_cmd_table); } - if (IS_HASWELL(engine->i915)) { + if (IS_GEN(engine->i915, 9)) { + engine->reg_tables = gen9_blt_reg_tables; + engine->reg_table_count = + ARRAY_SIZE(gen9_blt_reg_tables); + } else if (IS_HASWELL(engine->i915)) { engine->reg_tables = hsw_blt_reg_tables; engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables); } else { engine->reg_tables = ivb_blt_reg_tables; engine->reg_table_count = ARRAY_SIZE(ivb_blt_reg_tables); } - - engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask; break; case VIDEO_ENHANCEMENT_CLASS: - cmd_tables = hsw_vebox_cmds; - cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds); + cmd_tables = hsw_vebox_cmd_table; + cmd_table_count = ARRAY_SIZE(hsw_vebox_cmd_table); /* VECS can use the same length_mask function as VCS */ engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; break; @@ -943,7 +1026,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) return; } - engine->flags |= I915_ENGINE_NEEDS_CMD_PARSER; + engine->flags |= I915_ENGINE_USING_CMD_PARSER; } /** @@ -955,7 +1038,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) */ void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine) { - if (!intel_engine_needs_cmd_parser(engine)) + if (!intel_engine_using_cmd_parser(engine)) return; fini_hash_table(engine); @@ -1029,22 +1112,16 @@ __find_reg(const struct drm_i915_reg_descriptor *table, int count, u32 addr) } static const struct drm_i915_reg_descriptor * -find_reg(const struct intel_engine_cs *engine, bool is_master, u32 addr) +find_reg(const struct intel_engine_cs *engine, u32 addr) { const struct drm_i915_reg_table *table = engine->reg_tables; + const struct drm_i915_reg_descriptor *reg = NULL; int count = engine->reg_table_count; - for (; count > 0; ++table, --count) { - if (!table->master || is_master) { - const struct drm_i915_reg_descriptor *reg; + for (; !reg && (count > 0); ++table, --count) + reg = __find_reg(table->regs, table->num_regs, addr); - reg = __find_reg(table->regs, table->num_regs, addr); - if (reg != NULL) - return reg; - } - } - - return NULL; + return reg; } /* Returns a vmap'd pointer to dst_obj, which the caller must unmap */ @@ -1128,8 +1205,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj, static bool check_cmd(const struct intel_engine_cs *engine, const struct drm_i915_cmd_descriptor *desc, - const u32 *cmd, u32 length, - const bool is_master) + const u32 *cmd, u32 length) { if (desc->flags & CMD_DESC_SKIP) return true; @@ -1139,12 +1215,6 @@ static bool check_cmd(const struct intel_engine_cs *engine, return false; } - if ((desc->flags & CMD_DESC_MASTER) && !is_master) { - DRM_DEBUG_DRIVER("CMD: Rejected master-only command: 0x%08X\n", - *cmd); - return false; - } - if (desc->flags & CMD_DESC_REGISTER) { /* * Get the distance between individual register offset @@ -1158,7 +1228,7 @@ static bool check_cmd(const struct intel_engine_cs *engine, offset += step) { const u32 reg_addr = cmd[offset] & desc->reg.mask; const struct drm_i915_reg_descriptor *reg = - find_reg(engine, is_master, reg_addr); + find_reg(engine, reg_addr); if (!reg) { DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (%s)\n", @@ -1236,16 +1306,112 @@ static bool check_cmd(const struct intel_engine_cs *engine, return true; } +static int check_bbstart(const struct i915_gem_context *ctx, + u32 *cmd, u32 offset, u32 length, + u32 batch_len, + u64 batch_start, + u64 shadow_batch_start) +{ + u64 jump_offset, jump_target; + u32 target_cmd_offset, target_cmd_index; + + /* For igt compatibility on older platforms */ + if (CMDPARSER_USES_GGTT(ctx->i915)) { + DRM_DEBUG("CMD: Rejecting BB_START for ggtt based submission\n"); + return -EACCES; + } + + if (length != 3) { + DRM_DEBUG("CMD: Recursive BB_START with bad length(%u)\n", + length); + return -EINVAL; + } + + jump_target = *(u64*)(cmd+1); + jump_offset = jump_target - batch_start; + + /* + * Any underflow of jump_target is guaranteed to be outside the range + * of a u32, so >= test catches both too large and too small + */ + if (jump_offset >= batch_len) { + DRM_DEBUG("CMD: BB_START to 0x%llx jumps out of BB\n", + jump_target); + return -EINVAL; + } + + /* + * This cannot overflow a u32 because we already checked jump_offset + * is within the BB, and the batch_len is a u32 + */ + target_cmd_offset = lower_32_bits(jump_offset); + target_cmd_index = target_cmd_offset / sizeof(u32); + + *(u64*)(cmd + 1) = shadow_batch_start + target_cmd_offset; + + if (target_cmd_index == offset) + return 0; + + if (ctx->jump_whitelist_cmds <= target_cmd_index) { + DRM_DEBUG("CMD: Rejecting BB_START - truncated whitelist array\n"); + return -EINVAL; + } else if (!test_bit(target_cmd_index, ctx->jump_whitelist)) { + DRM_DEBUG("CMD: BB_START to 0x%llx not a previously executed cmd\n", + jump_target); + return -EINVAL; + } + + return 0; +} + +static void init_whitelist(struct i915_gem_context *ctx, u32 batch_len) +{ + const u32 batch_cmds = DIV_ROUND_UP(batch_len, sizeof(u32)); + const u32 exact_size = BITS_TO_LONGS(batch_cmds); + u32 next_size = BITS_TO_LONGS(roundup_pow_of_two(batch_cmds)); + unsigned long *next_whitelist; + + if (CMDPARSER_USES_GGTT(ctx->i915)) + return; + + if (batch_cmds <= ctx->jump_whitelist_cmds) { + bitmap_zero(ctx->jump_whitelist, batch_cmds); + return; + } + +again: + next_whitelist = kcalloc(next_size, sizeof(long), GFP_KERNEL); + if (next_whitelist) { + kfree(ctx->jump_whitelist); + ctx->jump_whitelist = next_whitelist; + ctx->jump_whitelist_cmds = + next_size * BITS_PER_BYTE * sizeof(long); + return; + } + + if (next_size > exact_size) { + next_size = exact_size; + goto again; + } + + DRM_DEBUG("CMD: Failed to extend whitelist. BB_START may be disallowed\n"); + bitmap_zero(ctx->jump_whitelist, ctx->jump_whitelist_cmds); + + return; +} + #define LENGTH_BIAS 2 /** * i915_parse_cmds() - parse a submitted batch buffer for privilege violations + * @ctx: the context in which the batch is to execute * @engine: the engine on which the batch is to execute * @batch_obj: the batch buffer in question - * @shadow_batch_obj: copy of the batch buffer in question + * @batch_start: Canonical base address of batch * @batch_start_offset: byte offset in the batch at which execution starts * @batch_len: length of the commands in batch_obj - * @is_master: is the submitting process the drm master? + * @shadow_batch_obj: copy of the batch buffer in question + * @shadow_batch_start: Canonical base address of shadow_batch_obj * * Parses the specified batch buffer looking for privilege violations as * described in the overview. @@ -1253,14 +1419,17 @@ static bool check_cmd(const struct intel_engine_cs *engine, * Return: non-zero if the parser finds violations or otherwise fails; -EACCES * if the batch appears legal but should use hardware parsing */ -int intel_engine_cmd_parser(struct intel_engine_cs *engine, + +int intel_engine_cmd_parser(struct i915_gem_context *ctx, + struct intel_engine_cs *engine, struct drm_i915_gem_object *batch_obj, - struct drm_i915_gem_object *shadow_batch_obj, + u64 batch_start, u32 batch_start_offset, u32 batch_len, - bool is_master) + struct drm_i915_gem_object *shadow_batch_obj, + u64 shadow_batch_start) { - u32 *cmd, *batch_end; + u32 *cmd, *batch_end, offset = 0; struct drm_i915_cmd_descriptor default_desc = noop_desc; const struct drm_i915_cmd_descriptor *desc = &default_desc; bool needs_clflush_after = false; @@ -1274,6 +1443,8 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine, return PTR_ERR(cmd); } + init_whitelist(ctx, batch_len); + /* * We use the batch length as size because the shadow object is as * large or larger and copy_batch() will write MI_NOPs to the extra @@ -1283,31 +1454,15 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine, do { u32 length; - if (*cmd == MI_BATCH_BUFFER_END) { - if (needs_clflush_after) { - void *ptr = page_mask_bits(shadow_batch_obj->mm.mapping); - drm_clflush_virt_range(ptr, - (void *)(cmd + 1) - ptr); - } + if (*cmd == MI_BATCH_BUFFER_END) break; - } desc = find_cmd(engine, *cmd, desc, &default_desc); if (!desc) { DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n", *cmd); ret = -EINVAL; - break; - } - - /* - * If the batch buffer contains a chained batch, return an - * error that tells the caller to abort and dispatch the - * workload as a non-secure batch. - */ - if (desc->cmd.value == MI_BATCH_BUFFER_START) { - ret = -EACCES; - break; + goto err; } if (desc->flags & CMD_DESC_FIXED) @@ -1321,22 +1476,43 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine, length, batch_end - cmd); ret = -EINVAL; - break; + goto err; } - if (!check_cmd(engine, desc, cmd, length, is_master)) { + if (!check_cmd(engine, desc, cmd, length)) { ret = -EACCES; + goto err; + } + + if (desc->cmd.value == MI_BATCH_BUFFER_START) { + ret = check_bbstart(ctx, cmd, offset, length, + batch_len, batch_start, + shadow_batch_start); + + if (ret) + goto err; break; } + if (ctx->jump_whitelist_cmds > offset) + set_bit(offset, ctx->jump_whitelist); + cmd += length; + offset += length; if (cmd >= batch_end) { DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n"); ret = -EINVAL; - break; + goto err; } } while (1); + if (needs_clflush_after) { + void *ptr = page_mask_bits(shadow_batch_obj->mm.mapping); + + drm_clflush_virt_range(ptr, (void *)(cmd + 1) - ptr); + } + +err: i915_gem_object_unpin_map(shadow_batch_obj); return ret; } @@ -1357,7 +1533,7 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv) /* If the command parser is not enabled, report 0 - unsupported */ for_each_uabi_engine(engine, dev_priv) { - if (intel_engine_needs_cmd_parser(engine)) { + if (intel_engine_using_cmd_parser(engine)) { active = true; break; } @@ -1382,6 +1558,7 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv) * the parser enabled. * 9. Don't whitelist or handle oacontrol specially, as ownership * for oacontrol state is moving to i915-perf. + * 10. Support for Gen9 BCS Parsing */ - return 9; + return 10; } diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 64f8ba3449ed..fc8286809e62 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -63,6 +63,7 @@ #include "gem/i915_gem_ioctls.h" #include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" +#include "gt/intel_rc6.h" #include "i915_debugfs.h" #include "i915_drv.h" @@ -1816,6 +1817,8 @@ static int i915_drm_resume(struct drm_device *dev) disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); + intel_rc6_ctx_wa_resume(&dev_priv->gt.rc6); + intel_gt_sanitize(&dev_priv->gt, true); ret = i915_ggtt_enable_hw(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 50809ffdc6b2..1779f600fcfb 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1613,9 +1613,16 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define VEBOX_MASK(dev_priv) \ ENGINE_INSTANCES_MASK(dev_priv, VECS0, I915_MAX_VECS) +/* + * The Gen7 cmdparser copies the scanned buffer to the ggtt for execution + * All later gens can run the final buffer from the ppgtt + */ +#define CMDPARSER_USES_GGTT(dev_priv) IS_GEN(dev_priv, 7) + #define HAS_LLC(dev_priv) (INTEL_INFO(dev_priv)->has_llc) #define HAS_SNOOP(dev_priv) (INTEL_INFO(dev_priv)->has_snoop) #define HAS_EDRAM(dev_priv) ((dev_priv)->edram_size_mb) +#define HAS_SECURE_BATCHES(dev_priv) (INTEL_GEN(dev_priv) < 6) #define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \ IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv)) @@ -1648,10 +1655,12 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, /* Early gen2 have a totally busted CS tlb and require pinned batches. */ #define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv)) +#define NEEDS_RC6_CTX_CORRUPTION_WA(dev_priv) \ + (IS_BROADWELL(dev_priv) || IS_GEN(dev_priv, 9)) + /* WaRsDisableCoarsePowerGating:skl,cnl */ #define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \ - (IS_CANNONLAKE(dev_priv) || \ - IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv)) + (IS_CANNONLAKE(dev_priv) || IS_GEN(dev_priv, 9)) #define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4) #define HAS_GMBUS_BURST_READ(dev_priv) (INTEL_GEN(dev_priv) >= 10 || \ @@ -1833,6 +1842,14 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj, unsigned long flags); #define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0) +struct i915_vma * __must_check +i915_gem_object_pin(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, + const struct i915_ggtt_view *view, + u64 size, + u64 alignment, + u64 flags); + void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv); static inline int __must_check @@ -1938,12 +1955,14 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type); int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv); void intel_engine_init_cmd_parser(struct intel_engine_cs *engine); void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine); -int intel_engine_cmd_parser(struct intel_engine_cs *engine, +int intel_engine_cmd_parser(struct i915_gem_context *cxt, + struct intel_engine_cs *engine, struct drm_i915_gem_object *batch_obj, - struct drm_i915_gem_object *shadow_batch_obj, + u64 user_batch_start, u32 batch_start_offset, u32 batch_len, - bool is_master); + struct drm_i915_gem_object *shadow_batch_obj, + u64 shadow_batch_start); /* intel_device_info.c */ static inline struct intel_device_info * diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 143a8952b736..43c532756c7c 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -893,6 +893,20 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, { struct drm_i915_private *dev_priv = to_i915(obj->base.dev); struct i915_address_space *vm = &dev_priv->ggtt.vm; + + return i915_gem_object_pin(obj, vm, view, size, alignment, + flags | PIN_GLOBAL); +} + +struct i915_vma * +i915_gem_object_pin(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, + const struct i915_ggtt_view *view, + u64 size, + u64 alignment, + u64 flags) +{ + struct drm_i915_private *dev_priv = to_i915(obj->base.dev); struct i915_vma *vma; int ret; @@ -958,7 +972,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, return ERR_PTR(ret); } - ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL); + ret = i915_vma_pin(vma, size, alignment, flags); if (ret) return ERR_PTR(ret); diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c index ad33fbe90a28..cf8a8c3ef047 100644 --- a/drivers/gpu/drm/i915/i915_getparam.c +++ b/drivers/gpu/drm/i915/i915_getparam.c @@ -63,7 +63,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data, value = !!(i915->caps.scheduler & I915_SCHEDULER_CAP_SEMAPHORES); break; case I915_PARAM_HAS_SECURE_BATCHES: - value = capable(CAP_SYS_ADMIN); + value = HAS_SECURE_BATCHES(i915) && capable(CAP_SYS_ADMIN); break; case I915_PARAM_CMD_PARSER_VERSION: value = i915_cmd_parser_get_version(i915); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 1cd07d789a17..478f5ff6c554 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -474,6 +474,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define ECOCHK_PPGTT_WT_HSW (0x2 << 3) #define ECOCHK_PPGTT_WB_HSW (0x3 << 3) +#define GEN8_RC6_CTX_INFO _MMIO(0x8504) + #define GAC_ECO_BITS _MMIO(0x14090) #define ECOBITS_SNB_BIT (1 << 13) #define ECOBITS_PPGTT_CACHE64B (3 << 8) @@ -560,6 +562,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) */ #define BCS_SWCTRL _MMIO(0x22200) +/* There are 16 GPR registers */ +#define BCS_GPR(n) _MMIO(0x22600 + (n) * 8) +#define BCS_GPR_UDW(n) _MMIO(0x22600 + (n) * 8 + 4) + #define GPGPU_THREADS_DISPATCHED _MMIO(0x2290) #define GPGPU_THREADS_DISPATCHED_UDW _MMIO(0x2290 + 4) #define HS_INVOCATION_COUNT _MMIO(0x2300) @@ -7363,6 +7369,10 @@ enum { #define DMC_DEBUG3 _MMIO(0x101090) +/* Display Internal Timeout Register */ +#define RM_TIMEOUT _MMIO(0x42060) +#define MMIO_TIMEOUT_US(us) ((us) << 0) + /* interrupts */ #define DE_MASTER_IRQ_CONTROL (1 << 31) #define DE_SPRITEB_FLIP_DONE (1 << 29) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 2d389e437e87..87f96157e156 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -107,6 +107,14 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv) */ I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) | PWM1_GATING_DIS | PWM2_GATING_DIS); + + /* + * Lower the display internal timeout. + * This is needed to avoid any hard hangs when DSI port PLL + * is off and a MMIO access is attempted by any privilege + * application, using batch buffers or any other means. + */ + I915_WRITE(RM_TIMEOUT, MMIO_TIMEOUT_US(950)); } static void glk_init_clock_gating(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/lima/Kconfig b/drivers/gpu/drm/lima/Kconfig index bb4ddc6bb0a6..571dc369a7e9 100644 --- a/drivers/gpu/drm/lima/Kconfig +++ b/drivers/gpu/drm/lima/Kconfig @@ -9,5 +9,6 @@ config DRM_LIMA depends on COMMON_CLK depends on OF select DRM_SCHED + select DRM_GEM_SHMEM_HELPER help DRM driver for ARM Mali 400/450 GPUs. diff --git a/drivers/gpu/drm/lima/Makefile b/drivers/gpu/drm/lima/Makefile index 38cc70281ba5..a85444b0a1d4 100644 --- a/drivers/gpu/drm/lima/Makefile +++ b/drivers/gpu/drm/lima/Makefile @@ -13,9 +13,7 @@ lima-y := \ lima_vm.o \ lima_sched.o \ lima_ctx.o \ - lima_gem_prime.o \ lima_dlbu.o \ - lima_bcast.o \ - lima_object.o + lima_bcast.o obj-$(CONFIG_DRM_LIMA) += lima.o diff --git a/drivers/gpu/drm/lima/lima_device.c b/drivers/gpu/drm/lima/lima_device.c index e3e0ca11382e..19829b543024 100644 --- a/drivers/gpu/drm/lima/lima_device.c +++ b/drivers/gpu/drm/lima/lima_device.c @@ -314,7 +314,7 @@ int lima_device_init(struct lima_device *ldev) ldev->va_end = LIMA_VA_RESERVE_START; ldev->dlbu_cpu = dma_alloc_wc( ldev->dev, LIMA_PAGE_SIZE, - &ldev->dlbu_dma, GFP_KERNEL); + &ldev->dlbu_dma, GFP_KERNEL | __GFP_NOWARN); if (!ldev->dlbu_cpu) { err = -ENOMEM; goto err_out2; diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c index 75ec703d22e0..124efe4fa97b 100644 --- a/drivers/gpu/drm/lima/lima_drv.c +++ b/drivers/gpu/drm/lima/lima_drv.c @@ -12,7 +12,6 @@ #include "lima_drv.h" #include "lima_gem.h" -#include "lima_gem_prime.h" #include "lima_vm.h" int lima_sched_timeout_ms; @@ -240,16 +239,7 @@ static const struct drm_ioctl_desc lima_drm_driver_ioctls[] = { DRM_IOCTL_DEF_DRV(LIMA_CTX_FREE, lima_ioctl_ctx_free, DRM_RENDER_ALLOW), }; -static const struct file_operations lima_drm_driver_fops = { - .owner = THIS_MODULE, - .open = drm_open, - .release = drm_release, - .unlocked_ioctl = drm_ioctl, -#ifdef CONFIG_COMPAT - .compat_ioctl = drm_compat_ioctl, -#endif - .mmap = lima_gem_mmap, -}; +DEFINE_DRM_GEM_FOPS(lima_drm_driver_fops); static struct drm_driver lima_drm_driver = { .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ, @@ -258,10 +248,6 @@ static struct drm_driver lima_drm_driver = { .ioctls = lima_drm_driver_ioctls, .num_ioctls = ARRAY_SIZE(lima_drm_driver_ioctls), .fops = &lima_drm_driver_fops, - .gem_free_object_unlocked = lima_gem_free_object, - .gem_open_object = lima_gem_object_open, - .gem_close_object = lima_gem_object_close, - .gem_vm_ops = &lima_gem_vm_ops, .name = "lima", .desc = "lima DRM", .date = "20190217", @@ -269,11 +255,11 @@ static struct drm_driver lima_drm_driver = { .minor = 0, .patchlevel = 0, + .gem_create_object = lima_gem_create_object, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, - .gem_prime_import_sg_table = lima_gem_prime_import_sg_table, + .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .gem_prime_get_sg_table = lima_gem_prime_get_sg_table, - .gem_prime_mmap = lima_gem_prime_mmap, + .gem_prime_mmap = drm_gem_prime_mmap, }; static int lima_pdev_probe(struct platform_device *pdev) diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c index 4da21353c3a2..d0059d8c97d8 100644 --- a/drivers/gpu/drm/lima/lima_gem.c +++ b/drivers/gpu/drm/lima/lima_gem.c @@ -3,7 +3,7 @@ #include <linux/mm.h> #include <linux/sync_file.h> -#include <linux/pfn_t.h> +#include <linux/pagemap.h> #include <drm/drm_file.h> #include <drm/drm_syncobj.h> @@ -13,40 +13,55 @@ #include "lima_drv.h" #include "lima_gem.h" -#include "lima_gem_prime.h" #include "lima_vm.h" -#include "lima_object.h" int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file, u32 size, u32 flags, u32 *handle) { int err; - struct lima_bo *bo; - struct lima_device *ldev = to_lima_dev(dev); + gfp_t mask; + struct drm_gem_shmem_object *shmem; + struct drm_gem_object *obj; + struct sg_table *sgt; + + shmem = drm_gem_shmem_create(dev, size); + if (IS_ERR(shmem)) + return PTR_ERR(shmem); + + obj = &shmem->base; - bo = lima_bo_create(ldev, size, flags, NULL); - if (IS_ERR(bo)) - return PTR_ERR(bo); + /* Mali Utgard GPU can only support 32bit address space */ + mask = mapping_gfp_mask(obj->filp->f_mapping); + mask &= ~__GFP_HIGHMEM; + mask |= __GFP_DMA32; + mapping_set_gfp_mask(obj->filp->f_mapping, mask); - err = drm_gem_handle_create(file, &bo->gem, handle); + sgt = drm_gem_shmem_get_pages_sgt(obj); + if (IS_ERR(sgt)) { + err = PTR_ERR(sgt); + goto out; + } + + err = drm_gem_handle_create(file, obj, handle); +out: /* drop reference from allocate - handle holds it now */ - drm_gem_object_put_unlocked(&bo->gem); + drm_gem_object_put_unlocked(obj); return err; } -void lima_gem_free_object(struct drm_gem_object *obj) +static void lima_gem_free_object(struct drm_gem_object *obj) { struct lima_bo *bo = to_lima_bo(obj); if (!list_empty(&bo->va)) dev_err(obj->dev->dev, "lima gem free bo still has va\n"); - lima_bo_destroy(bo); + drm_gem_shmem_free_object(obj); } -int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file) +static int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file) { struct lima_bo *bo = to_lima_bo(obj); struct lima_drm_priv *priv = to_lima_drm_priv(file); @@ -55,7 +70,7 @@ int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file) return lima_vm_bo_add(vm, bo, true); } -void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file) +static void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file) { struct lima_bo *bo = to_lima_bo(obj); struct lima_drm_priv *priv = to_lima_drm_priv(file); @@ -64,13 +79,41 @@ void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file) lima_vm_bo_del(vm, bo); } +static const struct drm_gem_object_funcs lima_gem_funcs = { + .free = lima_gem_free_object, + .open = lima_gem_object_open, + .close = lima_gem_object_close, + .print_info = drm_gem_shmem_print_info, + .pin = drm_gem_shmem_pin, + .unpin = drm_gem_shmem_unpin, + .get_sg_table = drm_gem_shmem_get_sg_table, + .vmap = drm_gem_shmem_vmap, + .vunmap = drm_gem_shmem_vunmap, + .mmap = drm_gem_shmem_mmap, +}; + +struct drm_gem_object *lima_gem_create_object(struct drm_device *dev, size_t size) +{ + struct lima_bo *bo; + + bo = kzalloc(sizeof(*bo), GFP_KERNEL); + if (!bo) + return NULL; + + mutex_init(&bo->lock); + INIT_LIST_HEAD(&bo->va); + + bo->base.base.funcs = &lima_gem_funcs; + + return &bo->base.base; +} + int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset) { struct drm_gem_object *obj; struct lima_bo *bo; struct lima_drm_priv *priv = to_lima_drm_priv(file); struct lima_vm *vm = priv->vm; - int err; obj = drm_gem_object_lookup(file, handle); if (!obj) @@ -80,53 +123,9 @@ int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset) *va = lima_vm_get_va(vm, bo); - err = drm_gem_create_mmap_offset(obj); - if (!err) - *offset = drm_vma_node_offset_addr(&obj->vma_node); + *offset = drm_vma_node_offset_addr(&obj->vma_node); drm_gem_object_put_unlocked(obj); - return err; -} - -static vm_fault_t lima_gem_fault(struct vm_fault *vmf) -{ - struct vm_area_struct *vma = vmf->vma; - struct drm_gem_object *obj = vma->vm_private_data; - struct lima_bo *bo = to_lima_bo(obj); - pfn_t pfn; - pgoff_t pgoff; - - /* We don't use vmf->pgoff since that has the fake offset: */ - pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; - pfn = __pfn_to_pfn_t(page_to_pfn(bo->pages[pgoff]), PFN_DEV); - - return vmf_insert_mixed(vma, vmf->address, pfn); -} - -const struct vm_operations_struct lima_gem_vm_ops = { - .fault = lima_gem_fault, - .open = drm_gem_vm_open, - .close = drm_gem_vm_close, -}; - -void lima_set_vma_flags(struct vm_area_struct *vma) -{ - pgprot_t prot = vm_get_page_prot(vma->vm_flags); - - vma->vm_flags |= VM_MIXEDMAP; - vma->vm_flags &= ~VM_PFNMAP; - vma->vm_page_prot = pgprot_writecombine(prot); -} - -int lima_gem_mmap(struct file *filp, struct vm_area_struct *vma) -{ - int ret; - - ret = drm_gem_mmap(filp, vma); - if (ret) - return ret; - - lima_set_vma_flags(vma); return 0; } @@ -136,7 +135,7 @@ static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo, int err = 0; if (!write) { - err = dma_resv_reserve_shared(bo->gem.resv, 1); + err = dma_resv_reserve_shared(lima_bo_resv(bo), 1); if (err) return err; } @@ -145,62 +144,7 @@ static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo, if (explicit) return 0; - return drm_gem_fence_array_add_implicit(&task->deps, &bo->gem, write); -} - -static int lima_gem_lock_bos(struct lima_bo **bos, u32 nr_bos, - struct ww_acquire_ctx *ctx) -{ - int i, ret = 0, contended, slow_locked = -1; - - ww_acquire_init(ctx, &reservation_ww_class); - -retry: - for (i = 0; i < nr_bos; i++) { - if (i == slow_locked) { - slow_locked = -1; - continue; - } - - ret = ww_mutex_lock_interruptible(&bos[i]->gem.resv->lock, ctx); - if (ret < 0) { - contended = i; - goto err; - } - } - - ww_acquire_done(ctx); - return 0; - -err: - for (i--; i >= 0; i--) - ww_mutex_unlock(&bos[i]->gem.resv->lock); - - if (slow_locked >= 0) - ww_mutex_unlock(&bos[slow_locked]->gem.resv->lock); - - if (ret == -EDEADLK) { - /* we lost out in a seqno race, lock and retry.. */ - ret = ww_mutex_lock_slow_interruptible( - &bos[contended]->gem.resv->lock, ctx); - if (!ret) { - slow_locked = contended; - goto retry; - } - } - ww_acquire_fini(ctx); - - return ret; -} - -static void lima_gem_unlock_bos(struct lima_bo **bos, u32 nr_bos, - struct ww_acquire_ctx *ctx) -{ - int i; - - for (i = 0; i < nr_bos; i++) - ww_mutex_unlock(&bos[i]->gem.resv->lock); - ww_acquire_fini(ctx); + return drm_gem_fence_array_add_implicit(&task->deps, &bo->base.base, write); } static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit) @@ -268,7 +212,8 @@ int lima_gem_submit(struct drm_file *file, struct lima_submit *submit) bos[i] = bo; } - err = lima_gem_lock_bos(bos, submit->nr_bos, &ctx); + err = drm_gem_lock_reservations((struct drm_gem_object **)bos, + submit->nr_bos, &ctx); if (err) goto err_out0; @@ -296,15 +241,16 @@ int lima_gem_submit(struct drm_file *file, struct lima_submit *submit) for (i = 0; i < submit->nr_bos; i++) { if (submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE) - dma_resv_add_excl_fence(bos[i]->gem.resv, fence); + dma_resv_add_excl_fence(lima_bo_resv(bos[i]), fence); else - dma_resv_add_shared_fence(bos[i]->gem.resv, fence); + dma_resv_add_shared_fence(lima_bo_resv(bos[i]), fence); } - lima_gem_unlock_bos(bos, submit->nr_bos, &ctx); + drm_gem_unlock_reservations((struct drm_gem_object **)bos, + submit->nr_bos, &ctx); for (i = 0; i < submit->nr_bos; i++) - drm_gem_object_put_unlocked(&bos[i]->gem); + drm_gem_object_put_unlocked(&bos[i]->base.base); if (out_sync) { drm_syncobj_replace_fence(out_sync, fence); @@ -318,13 +264,14 @@ int lima_gem_submit(struct drm_file *file, struct lima_submit *submit) err_out2: lima_sched_task_fini(submit->task); err_out1: - lima_gem_unlock_bos(bos, submit->nr_bos, &ctx); + drm_gem_unlock_reservations((struct drm_gem_object **)bos, + submit->nr_bos, &ctx); err_out0: for (i = 0; i < submit->nr_bos; i++) { if (!bos[i]) break; lima_vm_bo_del(vm, bos[i]); - drm_gem_object_put_unlocked(&bos[i]->gem); + drm_gem_object_put_unlocked(&bos[i]->base.base); } if (out_sync) drm_syncobj_put(out_sync); diff --git a/drivers/gpu/drm/lima/lima_gem.h b/drivers/gpu/drm/lima/lima_gem.h index 556111a01135..1800feb3e47f 100644 --- a/drivers/gpu/drm/lima/lima_gem.h +++ b/drivers/gpu/drm/lima/lima_gem.h @@ -4,19 +4,37 @@ #ifndef __LIMA_GEM_H__ #define __LIMA_GEM_H__ -struct lima_bo; +#include <drm/drm_gem_shmem_helper.h> + struct lima_submit; -extern const struct vm_operations_struct lima_gem_vm_ops; +struct lima_bo { + struct drm_gem_shmem_object base; + + struct mutex lock; + struct list_head va; +}; + +static inline struct lima_bo * +to_lima_bo(struct drm_gem_object *obj) +{ + return container_of(to_drm_gem_shmem_obj(obj), struct lima_bo, base); +} + +static inline size_t lima_bo_size(struct lima_bo *bo) +{ + return bo->base.base.size; +} + +static inline struct dma_resv *lima_bo_resv(struct lima_bo *bo) +{ + return bo->base.base.resv; +} -struct lima_bo *lima_gem_create_bo(struct drm_device *dev, u32 size, u32 flags); +struct drm_gem_object *lima_gem_create_object(struct drm_device *dev, size_t size); int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file, u32 size, u32 flags, u32 *handle); -void lima_gem_free_object(struct drm_gem_object *obj); -int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file); -void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file); int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset); -int lima_gem_mmap(struct file *filp, struct vm_area_struct *vma); int lima_gem_submit(struct drm_file *file, struct lima_submit *submit); int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, s64 timeout_ns); diff --git a/drivers/gpu/drm/lima/lima_gem_prime.c b/drivers/gpu/drm/lima/lima_gem_prime.c deleted file mode 100644 index e3eb251e0a12..000000000000 --- a/drivers/gpu/drm/lima/lima_gem_prime.c +++ /dev/null @@ -1,46 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 OR MIT -/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */ - -#include <linux/dma-buf.h> -#include <drm/drm_prime.h> -#include <drm/drm_drv.h> -#include <drm/drm_file.h> - -#include "lima_device.h" -#include "lima_object.h" -#include "lima_gem.h" -#include "lima_gem_prime.h" - -struct drm_gem_object *lima_gem_prime_import_sg_table( - struct drm_device *dev, struct dma_buf_attachment *attach, - struct sg_table *sgt) -{ - struct lima_device *ldev = to_lima_dev(dev); - struct lima_bo *bo; - - bo = lima_bo_create(ldev, attach->dmabuf->size, 0, sgt); - if (IS_ERR(bo)) - return ERR_CAST(bo); - - return &bo->gem; -} - -struct sg_table *lima_gem_prime_get_sg_table(struct drm_gem_object *obj) -{ - struct lima_bo *bo = to_lima_bo(obj); - int npages = obj->size >> PAGE_SHIFT; - - return drm_prime_pages_to_sg(bo->pages, npages); -} - -int lima_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) -{ - int ret; - - ret = drm_gem_mmap_obj(obj, obj->size, vma); - if (ret) - return ret; - - lima_set_vma_flags(vma); - return 0; -} diff --git a/drivers/gpu/drm/lima/lima_gem_prime.h b/drivers/gpu/drm/lima/lima_gem_prime.h deleted file mode 100644 index 34b4d35c21e3..000000000000 --- a/drivers/gpu/drm/lima/lima_gem_prime.h +++ /dev/null @@ -1,13 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 OR MIT */ -/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */ - -#ifndef __LIMA_GEM_PRIME_H__ -#define __LIMA_GEM_PRIME_H__ - -struct drm_gem_object *lima_gem_prime_import_sg_table( - struct drm_device *dev, struct dma_buf_attachment *attach, - struct sg_table *sgt); -struct sg_table *lima_gem_prime_get_sg_table(struct drm_gem_object *obj); -int lima_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); - -#endif diff --git a/drivers/gpu/drm/lima/lima_mmu.c b/drivers/gpu/drm/lima/lima_mmu.c index 8e1651d6a61f..97ec09dee572 100644 --- a/drivers/gpu/drm/lima/lima_mmu.c +++ b/drivers/gpu/drm/lima/lima_mmu.c @@ -8,7 +8,6 @@ #include "lima_device.h" #include "lima_mmu.h" #include "lima_vm.h" -#include "lima_object.h" #include "lima_regs.h" #define mmu_write(reg, data) writel(data, ip->iomem + reg) diff --git a/drivers/gpu/drm/lima/lima_object.c b/drivers/gpu/drm/lima/lima_object.c deleted file mode 100644 index 87123b1d083c..000000000000 --- a/drivers/gpu/drm/lima/lima_object.c +++ /dev/null @@ -1,119 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 OR MIT -/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */ - -#include <drm/drm_prime.h> -#include <linux/pagemap.h> -#include <linux/dma-mapping.h> - -#include "lima_object.h" - -void lima_bo_destroy(struct lima_bo *bo) -{ - if (bo->sgt) { - kfree(bo->pages); - drm_prime_gem_destroy(&bo->gem, bo->sgt); - } else { - if (bo->pages_dma_addr) { - int i, npages = bo->gem.size >> PAGE_SHIFT; - - for (i = 0; i < npages; i++) { - if (bo->pages_dma_addr[i]) - dma_unmap_page(bo->gem.dev->dev, - bo->pages_dma_addr[i], - PAGE_SIZE, DMA_BIDIRECTIONAL); - } - } - - if (bo->pages) - drm_gem_put_pages(&bo->gem, bo->pages, true, true); - } - - kfree(bo->pages_dma_addr); - drm_gem_object_release(&bo->gem); - kfree(bo); -} - -static struct lima_bo *lima_bo_create_struct(struct lima_device *dev, u32 size, u32 flags) -{ - struct lima_bo *bo; - int err; - - size = PAGE_ALIGN(size); - - bo = kzalloc(sizeof(*bo), GFP_KERNEL); - if (!bo) - return ERR_PTR(-ENOMEM); - - mutex_init(&bo->lock); - INIT_LIST_HEAD(&bo->va); - - err = drm_gem_object_init(dev->ddev, &bo->gem, size); - if (err) { - kfree(bo); - return ERR_PTR(err); - } - - return bo; -} - -struct lima_bo *lima_bo_create(struct lima_device *dev, u32 size, - u32 flags, struct sg_table *sgt) -{ - int i, err; - size_t npages; - struct lima_bo *bo, *ret; - - bo = lima_bo_create_struct(dev, size, flags); - if (IS_ERR(bo)) - return bo; - - npages = bo->gem.size >> PAGE_SHIFT; - - bo->pages_dma_addr = kcalloc(npages, sizeof(dma_addr_t), GFP_KERNEL); - if (!bo->pages_dma_addr) { - ret = ERR_PTR(-ENOMEM); - goto err_out; - } - - if (sgt) { - bo->sgt = sgt; - - bo->pages = kcalloc(npages, sizeof(*bo->pages), GFP_KERNEL); - if (!bo->pages) { - ret = ERR_PTR(-ENOMEM); - goto err_out; - } - - err = drm_prime_sg_to_page_addr_arrays( - sgt, bo->pages, bo->pages_dma_addr, npages); - if (err) { - ret = ERR_PTR(err); - goto err_out; - } - } else { - mapping_set_gfp_mask(bo->gem.filp->f_mapping, GFP_DMA32); - bo->pages = drm_gem_get_pages(&bo->gem); - if (IS_ERR(bo->pages)) { - ret = ERR_CAST(bo->pages); - bo->pages = NULL; - goto err_out; - } - - for (i = 0; i < npages; i++) { - dma_addr_t addr = dma_map_page(dev->dev, bo->pages[i], 0, - PAGE_SIZE, DMA_BIDIRECTIONAL); - if (dma_mapping_error(dev->dev, addr)) { - ret = ERR_PTR(-EFAULT); - goto err_out; - } - bo->pages_dma_addr[i] = addr; - } - - } - - return bo; - -err_out: - lima_bo_destroy(bo); - return ret; -} diff --git a/drivers/gpu/drm/lima/lima_object.h b/drivers/gpu/drm/lima/lima_object.h deleted file mode 100644 index 31ca2d8dc0a1..000000000000 --- a/drivers/gpu/drm/lima/lima_object.h +++ /dev/null @@ -1,35 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 OR MIT */ -/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */ - -#ifndef __LIMA_OBJECT_H__ -#define __LIMA_OBJECT_H__ - -#include <drm/drm_gem.h> - -#include "lima_device.h" - -struct lima_bo { - struct drm_gem_object gem; - - struct page **pages; - dma_addr_t *pages_dma_addr; - struct sg_table *sgt; - void *vaddr; - - struct mutex lock; - struct list_head va; -}; - -static inline struct lima_bo * -to_lima_bo(struct drm_gem_object *obj) -{ - return container_of(obj, struct lima_bo, gem); -} - -struct lima_bo *lima_bo_create(struct lima_device *dev, u32 size, - u32 flags, struct sg_table *sgt); -void lima_bo_destroy(struct lima_bo *bo); -void *lima_bo_vmap(struct lima_bo *bo); -void lima_bo_vunmap(struct lima_bo *bo); - -#endif diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c index 4127cacac454..f522c5f99729 100644 --- a/drivers/gpu/drm/lima/lima_sched.c +++ b/drivers/gpu/drm/lima/lima_sched.c @@ -10,7 +10,7 @@ #include "lima_vm.h" #include "lima_mmu.h" #include "lima_l2_cache.h" -#include "lima_object.h" +#include "lima_gem.h" struct lima_fence { struct dma_fence base; @@ -117,7 +117,7 @@ int lima_sched_task_init(struct lima_sched_task *task, return -ENOMEM; for (i = 0; i < num_bos; i++) - drm_gem_object_get(&bos[i]->gem); + drm_gem_object_get(&bos[i]->base.base); err = drm_sched_job_init(&task->base, &context->base, vm); if (err) { @@ -148,7 +148,7 @@ void lima_sched_task_fini(struct lima_sched_task *task) if (task->bos) { for (i = 0; i < task->num_bos; i++) - drm_gem_object_put_unlocked(&task->bos[i]->gem); + drm_gem_object_put_unlocked(&task->bos[i]->base.base); kfree(task->bos); } diff --git a/drivers/gpu/drm/lima/lima_vm.c b/drivers/gpu/drm/lima/lima_vm.c index 19e88ca16527..840e2350d872 100644 --- a/drivers/gpu/drm/lima/lima_vm.c +++ b/drivers/gpu/drm/lima/lima_vm.c @@ -6,7 +6,7 @@ #include "lima_device.h" #include "lima_vm.h" -#include "lima_object.h" +#include "lima_gem.h" #include "lima_regs.h" struct lima_bo_va { @@ -32,7 +32,7 @@ struct lima_bo_va { #define LIMA_BTE(va) ((va & LIMA_VM_BT_MASK) >> LIMA_VM_BT_SHIFT) -static void lima_vm_unmap_page_table(struct lima_vm *vm, u32 start, u32 end) +static void lima_vm_unmap_range(struct lima_vm *vm, u32 start, u32 end) { u32 addr; @@ -44,41 +44,32 @@ static void lima_vm_unmap_page_table(struct lima_vm *vm, u32 start, u32 end) } } -static int lima_vm_map_page_table(struct lima_vm *vm, dma_addr_t *dma, - u32 start, u32 end) +static int lima_vm_map_page(struct lima_vm *vm, dma_addr_t pa, u32 va) { - u64 addr; - int i = 0; - - for (addr = start; addr <= end; addr += LIMA_PAGE_SIZE) { - u32 pbe = LIMA_PBE(addr); - u32 bte = LIMA_BTE(addr); - - if (!vm->bts[pbe].cpu) { - dma_addr_t pts; - u32 *pd; - int j; - - vm->bts[pbe].cpu = dma_alloc_wc( - vm->dev->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT, - &vm->bts[pbe].dma, GFP_KERNEL | __GFP_ZERO); - if (!vm->bts[pbe].cpu) { - if (addr != start) - lima_vm_unmap_page_table(vm, start, addr - 1); - return -ENOMEM; - } - - pts = vm->bts[pbe].dma; - pd = vm->pd.cpu + (pbe << LIMA_VM_NUM_PT_PER_BT_SHIFT); - for (j = 0; j < LIMA_VM_NUM_PT_PER_BT; j++) { - pd[j] = pts | LIMA_VM_FLAG_PRESENT; - pts += LIMA_PAGE_SIZE; - } + u32 pbe = LIMA_PBE(va); + u32 bte = LIMA_BTE(va); + + if (!vm->bts[pbe].cpu) { + dma_addr_t pts; + u32 *pd; + int j; + + vm->bts[pbe].cpu = dma_alloc_wc( + vm->dev->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT, + &vm->bts[pbe].dma, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); + if (!vm->bts[pbe].cpu) + return -ENOMEM; + + pts = vm->bts[pbe].dma; + pd = vm->pd.cpu + (pbe << LIMA_VM_NUM_PT_PER_BT_SHIFT); + for (j = 0; j < LIMA_VM_NUM_PT_PER_BT; j++) { + pd[j] = pts | LIMA_VM_FLAG_PRESENT; + pts += LIMA_PAGE_SIZE; } - - vm->bts[pbe].cpu[bte] = dma[i++] | LIMA_VM_FLAGS_CACHE; } + vm->bts[pbe].cpu[bte] = pa | LIMA_VM_FLAGS_CACHE; + return 0; } @@ -100,7 +91,8 @@ lima_vm_bo_find(struct lima_vm *vm, struct lima_bo *bo) int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo, bool create) { struct lima_bo_va *bo_va; - int err; + struct sg_dma_page_iter sg_iter; + int offset = 0, err; mutex_lock(&bo->lock); @@ -128,14 +120,18 @@ int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo, bool create) mutex_lock(&vm->lock); - err = drm_mm_insert_node(&vm->mm, &bo_va->node, bo->gem.size); + err = drm_mm_insert_node(&vm->mm, &bo_va->node, lima_bo_size(bo)); if (err) goto err_out1; - err = lima_vm_map_page_table(vm, bo->pages_dma_addr, bo_va->node.start, - bo_va->node.start + bo_va->node.size - 1); - if (err) - goto err_out2; + for_each_sg_dma_page(bo->base.sgt->sgl, &sg_iter, bo->base.sgt->nents, 0) { + err = lima_vm_map_page(vm, sg_page_iter_dma_address(&sg_iter), + bo_va->node.start + offset); + if (err) + goto err_out2; + + offset += PAGE_SIZE; + } mutex_unlock(&vm->lock); @@ -145,6 +141,8 @@ int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo, bool create) return 0; err_out2: + if (offset) + lima_vm_unmap_range(vm, bo_va->node.start, bo_va->node.start + offset - 1); drm_mm_remove_node(&bo_va->node); err_out1: mutex_unlock(&vm->lock); @@ -168,8 +166,8 @@ void lima_vm_bo_del(struct lima_vm *vm, struct lima_bo *bo) mutex_lock(&vm->lock); - lima_vm_unmap_page_table(vm, bo_va->node.start, - bo_va->node.start + bo_va->node.size - 1); + lima_vm_unmap_range(vm, bo_va->node.start, + bo_va->node.start + bo_va->node.size - 1); drm_mm_remove_node(&bo_va->node); @@ -210,14 +208,13 @@ struct lima_vm *lima_vm_create(struct lima_device *dev) kref_init(&vm->refcount); vm->pd.cpu = dma_alloc_wc(dev->dev, LIMA_PAGE_SIZE, &vm->pd.dma, - GFP_KERNEL | __GFP_ZERO); + GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); if (!vm->pd.cpu) goto err_out0; if (dev->dlbu_cpu) { - int err = lima_vm_map_page_table( - vm, &dev->dlbu_dma, LIMA_VA_RESERVE_DLBU, - LIMA_VA_RESERVE_DLBU + LIMA_PAGE_SIZE - 1); + int err = lima_vm_map_page( + vm, dev->dlbu_dma, LIMA_VA_RESERVE_DLBU); if (err) goto err_out1; } diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile index 82ae49c64221..8067a4be8311 100644 --- a/drivers/gpu/drm/mediatek/Makefile +++ b/drivers/gpu/drm/mediatek/Makefile @@ -12,6 +12,8 @@ mediatek-drm-y := mtk_disp_color.o \ mtk_drm_plane.o \ mtk_dsi.o \ mtk_mipi_tx.o \ + mtk_mt8173_mipi_tx.o \ + mtk_mt8183_mipi_tx.o \ mtk_dpi.o obj-$(CONFIG_DRM_MEDIATEK) += mediatek-drm.o diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c index 21851756c579..4a55bb6e2213 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c @@ -3,6 +3,8 @@ * Copyright (c) 2015 MediaTek Inc. */ +#include <drm/drm_fourcc.h> + #include <linux/clk.h> #include <linux/component.h> #include <linux/module.h> @@ -19,6 +21,8 @@ #define DISP_REG_OVL_EN 0x000c #define DISP_REG_OVL_RST 0x0014 #define DISP_REG_OVL_ROI_SIZE 0x0020 +#define DISP_REG_OVL_DATAPATH_CON 0x0024 +#define OVL_BGCLR_SEL_IN BIT(2) #define DISP_REG_OVL_ROI_BGCLR 0x0028 #define DISP_REG_OVL_SRC_CON 0x002c #define DISP_REG_OVL_CON(n) (0x0030 + 0x20 * (n)) @@ -31,7 +35,9 @@ #define DISP_REG_OVL_ADDR_MT8173 0x0f40 #define DISP_REG_OVL_ADDR(ovl, n) ((ovl)->data->addr + 0x20 * (n)) -#define OVL_RDMA_MEM_GMC 0x40402020 +#define GMC_THRESHOLD_BITS 16 +#define GMC_THRESHOLD_HIGH ((1 << GMC_THRESHOLD_BITS) / 4) +#define GMC_THRESHOLD_LOW ((1 << GMC_THRESHOLD_BITS) / 8) #define OVL_CON_BYTE_SWAP BIT(24) #define OVL_CON_MTX_YUV_TO_RGB (6 << 16) @@ -46,9 +52,13 @@ OVL_CON_CLRFMT_RGB : 0) #define OVL_CON_AEN BIT(8) #define OVL_CON_ALPHA 0xff +#define OVL_CON_VIRT_FLIP BIT(9) +#define OVL_CON_HORZ_FLIP BIT(10) struct mtk_disp_ovl_data { unsigned int addr; + unsigned int gmc_bits; + unsigned int layer_nr; bool fmt_rgb565_is_0; }; @@ -126,15 +136,65 @@ static void mtk_ovl_config(struct mtk_ddp_comp *comp, unsigned int w, static unsigned int mtk_ovl_layer_nr(struct mtk_ddp_comp *comp) { - return 4; + struct mtk_disp_ovl *ovl = comp_to_ovl(comp); + + return ovl->data->layer_nr; +} + +static unsigned int mtk_ovl_supported_rotations(struct mtk_ddp_comp *comp) +{ + return DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 | + DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y; +} + +static int mtk_ovl_layer_check(struct mtk_ddp_comp *comp, unsigned int idx, + struct mtk_plane_state *mtk_state) +{ + struct drm_plane_state *state = &mtk_state->base; + unsigned int rotation = 0; + + rotation = drm_rotation_simplify(state->rotation, + DRM_MODE_ROTATE_0 | + DRM_MODE_REFLECT_X | + DRM_MODE_REFLECT_Y); + rotation &= ~DRM_MODE_ROTATE_0; + + /* We can only do reflection, not rotation */ + if ((rotation & DRM_MODE_ROTATE_MASK) != 0) + return -EINVAL; + + /* + * TODO: Rotating/reflecting YUV buffers is not supported at this time. + * Only RGB[AX] variants are supported. + */ + if (state->fb->format->is_yuv && rotation != 0) + return -EINVAL; + + state->rotation = rotation; + + return 0; } static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx) { unsigned int reg; + unsigned int gmc_thrshd_l; + unsigned int gmc_thrshd_h; + unsigned int gmc_value; + struct mtk_disp_ovl *ovl = comp_to_ovl(comp); writel(0x1, comp->regs + DISP_REG_OVL_RDMA_CTRL(idx)); - writel(OVL_RDMA_MEM_GMC, comp->regs + DISP_REG_OVL_RDMA_GMC(idx)); + + gmc_thrshd_l = GMC_THRESHOLD_LOW >> + (GMC_THRESHOLD_BITS - ovl->data->gmc_bits); + gmc_thrshd_h = GMC_THRESHOLD_HIGH >> + (GMC_THRESHOLD_BITS - ovl->data->gmc_bits); + if (ovl->data->gmc_bits == 10) + gmc_value = gmc_thrshd_h | gmc_thrshd_h << 16; + else + gmc_value = gmc_thrshd_l | gmc_thrshd_l << 8 | + gmc_thrshd_h << 16 | gmc_thrshd_h << 24; + writel(gmc_value, comp->regs + DISP_REG_OVL_RDMA_GMC(idx)); reg = readl(comp->regs + DISP_REG_OVL_SRC_CON); reg = reg | BIT(idx); @@ -207,6 +267,16 @@ static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx, if (idx != 0) con |= OVL_CON_AEN | OVL_CON_ALPHA; + if (pending->rotation & DRM_MODE_REFLECT_Y) { + con |= OVL_CON_VIRT_FLIP; + addr += (pending->height - 1) * pending->pitch; + } + + if (pending->rotation & DRM_MODE_REFLECT_X) { + con |= OVL_CON_HORZ_FLIP; + addr += pending->pitch - 1; + } + writel_relaxed(con, comp->regs + DISP_REG_OVL_CON(idx)); writel_relaxed(pitch, comp->regs + DISP_REG_OVL_PITCH(idx)); writel_relaxed(src_size, comp->regs + DISP_REG_OVL_SRC_SIZE(idx)); @@ -217,16 +287,38 @@ static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx, mtk_ovl_layer_on(comp, idx); } +static void mtk_ovl_bgclr_in_on(struct mtk_ddp_comp *comp) +{ + unsigned int reg; + + reg = readl(comp->regs + DISP_REG_OVL_DATAPATH_CON); + reg = reg | OVL_BGCLR_SEL_IN; + writel(reg, comp->regs + DISP_REG_OVL_DATAPATH_CON); +} + +static void mtk_ovl_bgclr_in_off(struct mtk_ddp_comp *comp) +{ + unsigned int reg; + + reg = readl(comp->regs + DISP_REG_OVL_DATAPATH_CON); + reg = reg & ~OVL_BGCLR_SEL_IN; + writel(reg, comp->regs + DISP_REG_OVL_DATAPATH_CON); +} + static const struct mtk_ddp_comp_funcs mtk_disp_ovl_funcs = { .config = mtk_ovl_config, .start = mtk_ovl_start, .stop = mtk_ovl_stop, .enable_vblank = mtk_ovl_enable_vblank, .disable_vblank = mtk_ovl_disable_vblank, + .supported_rotations = mtk_ovl_supported_rotations, .layer_nr = mtk_ovl_layer_nr, .layer_on = mtk_ovl_layer_on, .layer_off = mtk_ovl_layer_off, + .layer_check = mtk_ovl_layer_check, .layer_config = mtk_ovl_layer_config, + .bgclr_in_on = mtk_ovl_bgclr_in_on, + .bgclr_in_off = mtk_ovl_bgclr_in_off, }; static int mtk_disp_ovl_bind(struct device *dev, struct device *master, @@ -276,7 +368,12 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev) if (irq < 0) return irq; - comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DISP_OVL); + priv->data = of_device_get_match_data(dev); + + comp_id = mtk_ddp_comp_get_id(dev->of_node, + priv->data->layer_nr == 4 ? + MTK_DISP_OVL : + MTK_DISP_OVL_2L); if (comp_id < 0) { dev_err(dev, "Failed to identify by alias: %d\n", comp_id); return comp_id; @@ -289,8 +386,6 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev) return ret; } - priv->data = of_device_get_match_data(dev); - platform_set_drvdata(pdev, priv); ret = devm_request_irq(dev, irq, mtk_disp_ovl_irq_handler, @@ -316,11 +411,15 @@ static int mtk_disp_ovl_remove(struct platform_device *pdev) static const struct mtk_disp_ovl_data mt2701_ovl_driver_data = { .addr = DISP_REG_OVL_ADDR_MT2701, + .gmc_bits = 8, + .layer_nr = 4, .fmt_rgb565_is_0 = false, }; static const struct mtk_disp_ovl_data mt8173_ovl_driver_data = { .addr = DISP_REG_OVL_ADDR_MT8173, + .gmc_bits = 8, + .layer_nr = 4, .fmt_rgb565_is_0 = true, }; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index 34a731755791..f80a8ba75977 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -207,6 +207,28 @@ static void mtk_crtc_ddp_clk_disable(struct mtk_drm_crtc *mtk_crtc) clk_disable_unprepare(mtk_crtc->ddp_comp[i]->clk); } +static +struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc, + struct drm_plane *plane, + unsigned int *local_layer) +{ + struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); + struct mtk_ddp_comp *comp; + int i, count = 0; + + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { + comp = mtk_crtc->ddp_comp[i]; + if (plane->index < (count + mtk_ddp_comp_layer_nr(comp))) { + *local_layer = plane->index - count; + return comp; + } + count += mtk_ddp_comp_layer_nr(comp); + } + + WARN(1, "Failed to find component for plane %d\n", plane->index); + return NULL; +} + static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc) { struct drm_crtc *crtc = &mtk_crtc->base; @@ -272,6 +294,9 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc) for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[i]; + if (i == 1) + mtk_ddp_comp_bgclr_in_on(comp); + mtk_ddp_comp_config(comp, width, height, vrefresh, bpc); mtk_ddp_comp_start(comp); } @@ -280,10 +305,12 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc) for (i = 0; i < mtk_crtc->layer_nr; i++) { struct drm_plane *plane = &mtk_crtc->planes[i]; struct mtk_plane_state *plane_state; + struct mtk_ddp_comp *comp; + unsigned int local_layer; plane_state = to_mtk_plane_state(plane->state); - mtk_ddp_comp_layer_config(mtk_crtc->ddp_comp[0], i, - plane_state); + comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer); + mtk_ddp_comp_layer_config(comp, local_layer, plane_state); } return 0; @@ -301,8 +328,12 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc) int i; DRM_DEBUG_DRIVER("%s\n", __func__); - for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { mtk_ddp_comp_stop(mtk_crtc->ddp_comp[i]); + if (i == 1) + mtk_ddp_comp_bgclr_in_off(mtk_crtc->ddp_comp[i]); + } + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) mtk_disp_mutex_remove_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id); @@ -327,6 +358,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc) struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state); struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; unsigned int i; + unsigned int local_layer; /* * TODO: instead of updating the registers here, we should prepare @@ -348,15 +380,30 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc) plane_state = to_mtk_plane_state(plane->state); - if (plane_state->pending.config) { - mtk_ddp_comp_layer_config(comp, i, plane_state); - plane_state->pending.config = false; - } + if (!plane_state->pending.config) + continue; + + comp = mtk_drm_ddp_comp_for_plane(crtc, plane, + &local_layer); + + mtk_ddp_comp_layer_config(comp, local_layer, + plane_state); + plane_state->pending.config = false; } mtk_crtc->pending_planes = false; } } +int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane, + struct mtk_plane_state *state) +{ + unsigned int local_layer; + struct mtk_ddp_comp *comp; + + comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer); + return mtk_ddp_comp_layer_check(comp, local_layer, state); +} + static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state) { @@ -518,14 +565,65 @@ void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp) mtk_drm_finish_page_flip(mtk_crtc); } +static int mtk_drm_crtc_num_comp_planes(struct mtk_drm_crtc *mtk_crtc, + int comp_idx) +{ + struct mtk_ddp_comp *comp; + + if (comp_idx > 1) + return 0; + + comp = mtk_crtc->ddp_comp[comp_idx]; + if (!comp->funcs) + return 0; + + if (comp_idx == 1 && !comp->funcs->bgclr_in_on) + return 0; + + return mtk_ddp_comp_layer_nr(comp); +} + +static inline +enum drm_plane_type mtk_drm_crtc_plane_type(unsigned int plane_idx) +{ + if (plane_idx == 0) + return DRM_PLANE_TYPE_PRIMARY; + else if (plane_idx == 1) + return DRM_PLANE_TYPE_CURSOR; + else + return DRM_PLANE_TYPE_OVERLAY; + +} + +static int mtk_drm_crtc_init_comp_planes(struct drm_device *drm_dev, + struct mtk_drm_crtc *mtk_crtc, + int comp_idx, int pipe) +{ + int num_planes = mtk_drm_crtc_num_comp_planes(mtk_crtc, comp_idx); + struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[comp_idx]; + int i, ret; + + for (i = 0; i < num_planes; i++) { + ret = mtk_plane_init(drm_dev, + &mtk_crtc->planes[mtk_crtc->layer_nr], + BIT(pipe), + mtk_drm_crtc_plane_type(mtk_crtc->layer_nr), + mtk_ddp_comp_supported_rotations(comp)); + if (ret) + return ret; + + mtk_crtc->layer_nr++; + } + return 0; +} + int mtk_drm_crtc_create(struct drm_device *drm_dev, const enum mtk_ddp_comp_id *path, unsigned int path_len) { struct mtk_drm_private *priv = drm_dev->dev_private; struct device *dev = drm_dev->dev; struct mtk_drm_crtc *mtk_crtc; - enum drm_plane_type type; - unsigned int zpos; + unsigned int num_comp_planes = 0; int pipe = priv->num_pipes; int ret; int i; @@ -581,17 +679,15 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, mtk_crtc->ddp_comp[i] = comp; } - mtk_crtc->layer_nr = mtk_ddp_comp_layer_nr(mtk_crtc->ddp_comp[0]); - mtk_crtc->planes = devm_kcalloc(dev, mtk_crtc->layer_nr, - sizeof(struct drm_plane), - GFP_KERNEL); - - for (zpos = 0; zpos < mtk_crtc->layer_nr; zpos++) { - type = (zpos == 0) ? DRM_PLANE_TYPE_PRIMARY : - (zpos == 1) ? DRM_PLANE_TYPE_CURSOR : - DRM_PLANE_TYPE_OVERLAY; - ret = mtk_plane_init(drm_dev, &mtk_crtc->planes[zpos], - BIT(pipe), type); + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) + num_comp_planes += mtk_drm_crtc_num_comp_planes(mtk_crtc, i); + + mtk_crtc->planes = devm_kcalloc(dev, num_comp_planes, + sizeof(struct drm_plane), GFP_KERNEL); + + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { + ret = mtk_drm_crtc_init_comp_planes(drm_dev, mtk_crtc, i, + pipe); if (ret) return ret; } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h index fcc134eb00c9..6afe1c19557a 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h @@ -19,5 +19,7 @@ void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp); int mtk_drm_crtc_create(struct drm_device *drm_dev, const enum mtk_ddp_comp_id *path, unsigned int path_len); +int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane, + struct mtk_plane_state *state); #endif /* MTK_DRM_CRTC_H */ diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c index 8106a71a7404..13035c906035 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c @@ -33,12 +33,15 @@ #define DISP_REG_CONFIG_DSI_SEL 0x050 #define DISP_REG_CONFIG_DPI_SEL 0x064 -#define DISP_REG_MUTEX_EN(n) (0x20 + 0x20 * (n)) -#define DISP_REG_MUTEX(n) (0x24 + 0x20 * (n)) -#define DISP_REG_MUTEX_RST(n) (0x28 + 0x20 * (n)) -#define DISP_REG_MUTEX_MOD(n) (0x2c + 0x20 * (n)) -#define DISP_REG_MUTEX_SOF(n) (0x30 + 0x20 * (n)) -#define DISP_REG_MUTEX_MOD2(n) (0x34 + 0x20 * (n)) +#define MT2701_DISP_MUTEX0_MOD0 0x2c +#define MT2701_DISP_MUTEX0_SOF0 0x30 + +#define DISP_REG_MUTEX_EN(n) (0x20 + 0x20 * (n)) +#define DISP_REG_MUTEX(n) (0x24 + 0x20 * (n)) +#define DISP_REG_MUTEX_RST(n) (0x28 + 0x20 * (n)) +#define DISP_REG_MUTEX_MOD(mutex_mod_reg, n) (mutex_mod_reg + 0x20 * (n)) +#define DISP_REG_MUTEX_SOF(mutex_sof_reg, n) (mutex_sof_reg + 0x20 * (n)) +#define DISP_REG_MUTEX_MOD2(n) (0x34 + 0x20 * (n)) #define INT_MUTEX BIT(1) @@ -139,12 +142,30 @@ struct mtk_disp_mutex { bool claimed; }; +enum mtk_ddp_mutex_sof_id { + DDP_MUTEX_SOF_SINGLE_MODE, + DDP_MUTEX_SOF_DSI0, + DDP_MUTEX_SOF_DSI1, + DDP_MUTEX_SOF_DPI0, + DDP_MUTEX_SOF_DPI1, + DDP_MUTEX_SOF_DSI2, + DDP_MUTEX_SOF_DSI3, +}; + +struct mtk_ddp_data { + const unsigned int *mutex_mod; + const unsigned int *mutex_sof; + const unsigned int mutex_mod_reg; + const unsigned int mutex_sof_reg; + const bool no_clk; +}; + struct mtk_ddp { struct device *dev; struct clk *clk; void __iomem *regs; struct mtk_disp_mutex mutex[10]; - const unsigned int *mutex_mod; + const struct mtk_ddp_data *data; }; static const unsigned int mt2701_mutex_mod[DDP_COMPONENT_ID_MAX] = { @@ -194,6 +215,37 @@ static const unsigned int mt8173_mutex_mod[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_WDMA1] = MT8173_MUTEX_MOD_DISP_WDMA1, }; +static const unsigned int mt2712_mutex_sof[DDP_MUTEX_SOF_DSI3 + 1] = { + [DDP_MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE, + [DDP_MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0, + [DDP_MUTEX_SOF_DSI1] = MUTEX_SOF_DSI1, + [DDP_MUTEX_SOF_DPI0] = MUTEX_SOF_DPI0, + [DDP_MUTEX_SOF_DPI1] = MUTEX_SOF_DPI1, + [DDP_MUTEX_SOF_DSI2] = MUTEX_SOF_DSI2, + [DDP_MUTEX_SOF_DSI3] = MUTEX_SOF_DSI3, +}; + +static const struct mtk_ddp_data mt2701_ddp_driver_data = { + .mutex_mod = mt2701_mutex_mod, + .mutex_sof = mt2712_mutex_sof, + .mutex_mod_reg = MT2701_DISP_MUTEX0_MOD0, + .mutex_sof_reg = MT2701_DISP_MUTEX0_SOF0, +}; + +static const struct mtk_ddp_data mt2712_ddp_driver_data = { + .mutex_mod = mt2712_mutex_mod, + .mutex_sof = mt2712_mutex_sof, + .mutex_mod_reg = MT2701_DISP_MUTEX0_MOD0, + .mutex_sof_reg = MT2701_DISP_MUTEX0_SOF0, +}; + +static const struct mtk_ddp_data mt8173_ddp_driver_data = { + .mutex_mod = mt8173_mutex_mod, + .mutex_sof = mt2712_mutex_sof, + .mutex_mod_reg = MT2701_DISP_MUTEX0_MOD0, + .mutex_sof_reg = MT2701_DISP_MUTEX0_SOF0, +}; + static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur, enum mtk_ddp_comp_id next, unsigned int *addr) @@ -432,45 +484,49 @@ void mtk_disp_mutex_add_comp(struct mtk_disp_mutex *mutex, struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp, mutex[mutex->id]); unsigned int reg; + unsigned int sof_id; unsigned int offset; WARN_ON(&ddp->mutex[mutex->id] != mutex); switch (id) { case DDP_COMPONENT_DSI0: - reg = MUTEX_SOF_DSI0; + sof_id = DDP_MUTEX_SOF_DSI0; break; case DDP_COMPONENT_DSI1: - reg = MUTEX_SOF_DSI0; + sof_id = DDP_MUTEX_SOF_DSI0; break; case DDP_COMPONENT_DSI2: - reg = MUTEX_SOF_DSI2; + sof_id = DDP_MUTEX_SOF_DSI2; break; case DDP_COMPONENT_DSI3: - reg = MUTEX_SOF_DSI3; + sof_id = DDP_MUTEX_SOF_DSI3; break; case DDP_COMPONENT_DPI0: - reg = MUTEX_SOF_DPI0; + sof_id = DDP_MUTEX_SOF_DPI0; break; case DDP_COMPONENT_DPI1: - reg = MUTEX_SOF_DPI1; + sof_id = DDP_MUTEX_SOF_DPI1; break; default: - if (ddp->mutex_mod[id] < 32) { - offset = DISP_REG_MUTEX_MOD(mutex->id); + if (ddp->data->mutex_mod[id] < 32) { + offset = DISP_REG_MUTEX_MOD(ddp->data->mutex_mod_reg, + mutex->id); reg = readl_relaxed(ddp->regs + offset); - reg |= 1 << ddp->mutex_mod[id]; + reg |= 1 << ddp->data->mutex_mod[id]; writel_relaxed(reg, ddp->regs + offset); } else { offset = DISP_REG_MUTEX_MOD2(mutex->id); reg = readl_relaxed(ddp->regs + offset); - reg |= 1 << (ddp->mutex_mod[id] - 32); + reg |= 1 << (ddp->data->mutex_mod[id] - 32); writel_relaxed(reg, ddp->regs + offset); } return; } - writel_relaxed(reg, ddp->regs + DISP_REG_MUTEX_SOF(mutex->id)); + writel_relaxed(ddp->data->mutex_sof[sof_id], + ddp->regs + + DISP_REG_MUTEX_SOF(ddp->data->mutex_sof_reg, mutex->id)); } void mtk_disp_mutex_remove_comp(struct mtk_disp_mutex *mutex, @@ -491,18 +547,21 @@ void mtk_disp_mutex_remove_comp(struct mtk_disp_mutex *mutex, case DDP_COMPONENT_DPI0: case DDP_COMPONENT_DPI1: writel_relaxed(MUTEX_SOF_SINGLE_MODE, - ddp->regs + DISP_REG_MUTEX_SOF(mutex->id)); + ddp->regs + + DISP_REG_MUTEX_SOF(ddp->data->mutex_sof_reg, + mutex->id)); break; default: - if (ddp->mutex_mod[id] < 32) { - offset = DISP_REG_MUTEX_MOD(mutex->id); + if (ddp->data->mutex_mod[id] < 32) { + offset = DISP_REG_MUTEX_MOD(ddp->data->mutex_mod_reg, + mutex->id); reg = readl_relaxed(ddp->regs + offset); - reg &= ~(1 << ddp->mutex_mod[id]); + reg &= ~(1 << ddp->data->mutex_mod[id]); writel_relaxed(reg, ddp->regs + offset); } else { offset = DISP_REG_MUTEX_MOD2(mutex->id); reg = readl_relaxed(ddp->regs + offset); - reg &= ~(1 << (ddp->mutex_mod[id] - 32)); + reg &= ~(1 << (ddp->data->mutex_mod[id] - 32)); writel_relaxed(reg, ddp->regs + offset); } break; @@ -564,10 +623,14 @@ static int mtk_ddp_probe(struct platform_device *pdev) for (i = 0; i < 10; i++) ddp->mutex[i].id = i; - ddp->clk = devm_clk_get(dev, NULL); - if (IS_ERR(ddp->clk)) { - dev_err(dev, "Failed to get clock\n"); - return PTR_ERR(ddp->clk); + ddp->data = of_device_get_match_data(dev); + + if (!ddp->data->no_clk) { + ddp->clk = devm_clk_get(dev, NULL); + if (IS_ERR(ddp->clk)) { + dev_err(dev, "Failed to get clock\n"); + return PTR_ERR(ddp->clk); + } } regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -577,8 +640,6 @@ static int mtk_ddp_probe(struct platform_device *pdev) return PTR_ERR(ddp->regs); } - ddp->mutex_mod = of_device_get_match_data(dev); - platform_set_drvdata(pdev, ddp); return 0; @@ -590,9 +651,12 @@ static int mtk_ddp_remove(struct platform_device *pdev) } static const struct of_device_id ddp_driver_dt_match[] = { - { .compatible = "mediatek,mt2701-disp-mutex", .data = mt2701_mutex_mod}, - { .compatible = "mediatek,mt2712-disp-mutex", .data = mt2712_mutex_mod}, - { .compatible = "mediatek,mt8173-disp-mutex", .data = mt8173_mutex_mod}, + { .compatible = "mediatek,mt2701-disp-mutex", + .data = &mt2701_ddp_driver_data}, + { .compatible = "mediatek,mt2712-disp-mutex", + .data = &mt2712_ddp_driver_data}, + { .compatible = "mediatek,mt8173-disp-mutex", + .data = &mt8173_ddp_driver_data}, {}, }; MODULE_DEVICE_TABLE(of, ddp_driver_dt_match); diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c index efa85973e46b..7f21307cda75 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c @@ -33,6 +33,18 @@ #define DISP_AAL_EN 0x0000 #define DISP_AAL_SIZE 0x0030 +#define DISP_CCORR_EN 0x0000 +#define CCORR_EN BIT(0) +#define DISP_CCORR_CFG 0x0020 +#define CCORR_RELAY_MODE BIT(0) +#define DISP_CCORR_SIZE 0x0030 + +#define DISP_DITHER_EN 0x0000 +#define DITHER_EN BIT(0) +#define DISP_DITHER_CFG 0x0020 +#define DITHER_RELAY_MODE BIT(0) +#define DISP_DITHER_SIZE 0x0030 + #define DISP_GAMMA_EN 0x0000 #define DISP_GAMMA_CFG 0x0020 #define DISP_GAMMA_SIZE 0x0030 @@ -123,6 +135,42 @@ static void mtk_aal_stop(struct mtk_ddp_comp *comp) writel_relaxed(0x0, comp->regs + DISP_AAL_EN); } +static void mtk_ccorr_config(struct mtk_ddp_comp *comp, unsigned int w, + unsigned int h, unsigned int vrefresh, + unsigned int bpc) +{ + writel(h << 16 | w, comp->regs + DISP_CCORR_SIZE); + writel(CCORR_RELAY_MODE, comp->regs + DISP_CCORR_CFG); +} + +static void mtk_ccorr_start(struct mtk_ddp_comp *comp) +{ + writel(CCORR_EN, comp->regs + DISP_CCORR_EN); +} + +static void mtk_ccorr_stop(struct mtk_ddp_comp *comp) +{ + writel_relaxed(0x0, comp->regs + DISP_CCORR_EN); +} + +static void mtk_dither_config(struct mtk_ddp_comp *comp, unsigned int w, + unsigned int h, unsigned int vrefresh, + unsigned int bpc) +{ + writel(h << 16 | w, comp->regs + DISP_DITHER_SIZE); + writel(DITHER_RELAY_MODE, comp->regs + DISP_DITHER_CFG); +} + +static void mtk_dither_start(struct mtk_ddp_comp *comp) +{ + writel(DITHER_EN, comp->regs + DISP_DITHER_EN); +} + +static void mtk_dither_stop(struct mtk_ddp_comp *comp) +{ + writel_relaxed(0x0, comp->regs + DISP_DITHER_EN); +} + static void mtk_gamma_config(struct mtk_ddp_comp *comp, unsigned int w, unsigned int h, unsigned int vrefresh, unsigned int bpc) @@ -171,6 +219,18 @@ static const struct mtk_ddp_comp_funcs ddp_aal = { .stop = mtk_aal_stop, }; +static const struct mtk_ddp_comp_funcs ddp_ccorr = { + .config = mtk_ccorr_config, + .start = mtk_ccorr_start, + .stop = mtk_ccorr_stop, +}; + +static const struct mtk_ddp_comp_funcs ddp_dither = { + .config = mtk_dither_config, + .start = mtk_dither_start, + .stop = mtk_dither_stop, +}; + static const struct mtk_ddp_comp_funcs ddp_gamma = { .gamma_set = mtk_gamma_set, .config = mtk_gamma_config, @@ -189,11 +249,14 @@ static const struct mtk_ddp_comp_funcs ddp_ufoe = { static const char * const mtk_ddp_comp_stem[MTK_DDP_COMP_TYPE_MAX] = { [MTK_DISP_OVL] = "ovl", + [MTK_DISP_OVL_2L] = "ovl_2l", [MTK_DISP_RDMA] = "rdma", [MTK_DISP_WDMA] = "wdma", [MTK_DISP_COLOR] = "color", + [MTK_DISP_CCORR] = "ccorr", [MTK_DISP_AAL] = "aal", [MTK_DISP_GAMMA] = "gamma", + [MTK_DISP_DITHER] = "dither", [MTK_DISP_UFOE] = "ufoe", [MTK_DSI] = "dsi", [MTK_DPI] = "dpi", @@ -213,8 +276,10 @@ static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_AAL0] = { MTK_DISP_AAL, 0, &ddp_aal }, [DDP_COMPONENT_AAL1] = { MTK_DISP_AAL, 1, &ddp_aal }, [DDP_COMPONENT_BLS] = { MTK_DISP_BLS, 0, NULL }, + [DDP_COMPONENT_CCORR] = { MTK_DISP_CCORR, 0, &ddp_ccorr }, [DDP_COMPONENT_COLOR0] = { MTK_DISP_COLOR, 0, NULL }, [DDP_COMPONENT_COLOR1] = { MTK_DISP_COLOR, 1, NULL }, + [DDP_COMPONENT_DITHER] = { MTK_DISP_DITHER, 0, &ddp_dither }, [DDP_COMPONENT_DPI0] = { MTK_DPI, 0, NULL }, [DDP_COMPONENT_DPI1] = { MTK_DPI, 1, NULL }, [DDP_COMPONENT_DSI0] = { MTK_DSI, 0, NULL }, @@ -226,6 +291,8 @@ static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_OD1] = { MTK_DISP_OD, 1, &ddp_od }, [DDP_COMPONENT_OVL0] = { MTK_DISP_OVL, 0, NULL }, [DDP_COMPONENT_OVL1] = { MTK_DISP_OVL, 1, NULL }, + [DDP_COMPONENT_OVL_2L0] = { MTK_DISP_OVL_2L, 0, NULL }, + [DDP_COMPONENT_OVL_2L1] = { MTK_DISP_OVL_2L, 1, NULL }, [DDP_COMPONENT_PWM0] = { MTK_DISP_PWM, 0, NULL }, [DDP_COMPONENT_PWM1] = { MTK_DISP_PWM, 1, NULL }, [DDP_COMPONENT_PWM2] = { MTK_DISP_PWM, 2, NULL }, diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h index 0ad287f427cc..2f1e9e75b8da 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h @@ -17,9 +17,12 @@ struct drm_crtc_state; enum mtk_ddp_comp_type { MTK_DISP_OVL, + MTK_DISP_OVL_2L, MTK_DISP_RDMA, MTK_DISP_WDMA, MTK_DISP_COLOR, + MTK_DISP_CCORR, + MTK_DISP_DITHER, MTK_DISP_AAL, MTK_DISP_GAMMA, MTK_DISP_UFOE, @@ -36,8 +39,10 @@ enum mtk_ddp_comp_id { DDP_COMPONENT_AAL0, DDP_COMPONENT_AAL1, DDP_COMPONENT_BLS, + DDP_COMPONENT_CCORR, DDP_COMPONENT_COLOR0, DDP_COMPONENT_COLOR1, + DDP_COMPONENT_DITHER, DDP_COMPONENT_DPI0, DDP_COMPONENT_DPI1, DDP_COMPONENT_DSI0, @@ -48,6 +53,8 @@ enum mtk_ddp_comp_id { DDP_COMPONENT_OD0, DDP_COMPONENT_OD1, DDP_COMPONENT_OVL0, + DDP_COMPONENT_OVL_2L0, + DDP_COMPONENT_OVL_2L1, DDP_COMPONENT_OVL1, DDP_COMPONENT_PWM0, DDP_COMPONENT_PWM1, @@ -70,13 +77,19 @@ struct mtk_ddp_comp_funcs { void (*stop)(struct mtk_ddp_comp *comp); void (*enable_vblank)(struct mtk_ddp_comp *comp, struct drm_crtc *crtc); void (*disable_vblank)(struct mtk_ddp_comp *comp); + unsigned int (*supported_rotations)(struct mtk_ddp_comp *comp); unsigned int (*layer_nr)(struct mtk_ddp_comp *comp); void (*layer_on)(struct mtk_ddp_comp *comp, unsigned int idx); void (*layer_off)(struct mtk_ddp_comp *comp, unsigned int idx); + int (*layer_check)(struct mtk_ddp_comp *comp, + unsigned int idx, + struct mtk_plane_state *state); void (*layer_config)(struct mtk_ddp_comp *comp, unsigned int idx, struct mtk_plane_state *state); void (*gamma_set)(struct mtk_ddp_comp *comp, struct drm_crtc_state *state); + void (*bgclr_in_on)(struct mtk_ddp_comp *comp); + void (*bgclr_in_off)(struct mtk_ddp_comp *comp); }; struct mtk_ddp_comp { @@ -121,6 +134,15 @@ static inline void mtk_ddp_comp_disable_vblank(struct mtk_ddp_comp *comp) comp->funcs->disable_vblank(comp); } +static inline +unsigned int mtk_ddp_comp_supported_rotations(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->supported_rotations) + return comp->funcs->supported_rotations(comp); + + return 0; +} + static inline unsigned int mtk_ddp_comp_layer_nr(struct mtk_ddp_comp *comp) { if (comp->funcs && comp->funcs->layer_nr) @@ -143,6 +165,15 @@ static inline void mtk_ddp_comp_layer_off(struct mtk_ddp_comp *comp, comp->funcs->layer_off(comp, idx); } +static inline int mtk_ddp_comp_layer_check(struct mtk_ddp_comp *comp, + unsigned int idx, + struct mtk_plane_state *state) +{ + if (comp->funcs && comp->funcs->layer_check) + return comp->funcs->layer_check(comp, idx, state); + return 0; +} + static inline void mtk_ddp_comp_layer_config(struct mtk_ddp_comp *comp, unsigned int idx, struct mtk_plane_state *state) @@ -158,6 +189,18 @@ static inline void mtk_ddp_gamma_set(struct mtk_ddp_comp *comp, comp->funcs->gamma_set(comp, state); } +static inline void mtk_ddp_comp_bgclr_in_on(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->bgclr_in_on) + comp->funcs->bgclr_in_on(comp); +} + +static inline void mtk_ddp_comp_bgclr_in_off(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->bgclr_in_off) + comp->funcs->bgclr_in_off(comp); +} + int mtk_ddp_comp_get_id(struct device_node *node, enum mtk_ddp_comp_type comp_type); int mtk_ddp_comp_init(struct device *dev, struct device_node *comp_node, diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 352b81a7a670..84d14213d992 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -547,6 +547,7 @@ static int mtk_drm_probe(struct platform_device *pdev) */ if (comp_type == MTK_DISP_COLOR || comp_type == MTK_DISP_OVL || + comp_type == MTK_DISP_OVL_2L || comp_type == MTK_DISP_RDMA || comp_type == MTK_DSI || comp_type == MTK_DPI) { @@ -669,8 +670,8 @@ static struct platform_driver * const mtk_drm_drivers[] = { &mtk_disp_rdma_driver, &mtk_dpi_driver, &mtk_drm_platform_driver, - &mtk_dsi_driver, &mtk_mipi_tx_driver, + &mtk_dsi_driver, }; static int __init mtk_drm_init(void) diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c index ca672f1d140d..b04a3c2b111e 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c @@ -271,7 +271,7 @@ void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj) pgprot_writecombine(PAGE_KERNEL)); out: - kfree((void *)sgt); + kfree(sgt); return mtk_gem->kvaddr; } @@ -285,5 +285,5 @@ void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) vunmap(vaddr); mtk_gem->kvaddr = 0; - kfree((void *)mtk_gem->pages); + kfree(mtk_gem->pages); } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c index 584a9ecadce6..3b0cc91c7023 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c @@ -20,6 +20,12 @@ static const u32 formats[] = { DRM_FORMAT_XRGB8888, DRM_FORMAT_ARGB8888, + DRM_FORMAT_BGRX8888, + DRM_FORMAT_BGRA8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_RGB888, + DRM_FORMAT_BGR888, DRM_FORMAT_RGB565, DRM_FORMAT_UYVY, DRM_FORMAT_YUYV, @@ -84,6 +90,7 @@ static int mtk_plane_atomic_check(struct drm_plane *plane, { struct drm_framebuffer *fb = state->fb; struct drm_crtc_state *crtc_state; + int ret; if (!fb) return 0; @@ -91,6 +98,11 @@ static int mtk_plane_atomic_check(struct drm_plane *plane, if (!state->crtc) return 0; + ret = mtk_drm_crtc_plane_check(state->crtc, plane, + to_mtk_plane_state(state)); + if (ret) + return ret; + crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc); if (IS_ERR(crtc_state)) return PTR_ERR(crtc_state); @@ -132,6 +144,7 @@ static void mtk_plane_atomic_update(struct drm_plane *plane, state->pending.y = plane->state->dst.y1; state->pending.width = drm_rect_width(&plane->state->dst); state->pending.height = drm_rect_height(&plane->state->dst); + state->pending.rotation = plane->state->rotation; wmb(); /* Make sure the above parameters are set before update */ state->pending.dirty = true; } @@ -154,7 +167,8 @@ static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = { }; int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane, - unsigned long possible_crtcs, enum drm_plane_type type) + unsigned long possible_crtcs, enum drm_plane_type type, + unsigned int supported_rotations) { int err; @@ -166,6 +180,14 @@ int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane, return err; } + if (supported_rotations & ~DRM_MODE_ROTATE_0) { + err = drm_plane_create_rotation_property(plane, + DRM_MODE_ROTATE_0, + supported_rotations); + if (err) + DRM_INFO("Create rotation property failed\n"); + } + drm_plane_helper_add(plane, &mtk_plane_helper_funcs); return 0; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.h b/drivers/gpu/drm/mediatek/mtk_drm_plane.h index 6f842df722c7..760885e35b27 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_plane.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.h @@ -20,6 +20,7 @@ struct mtk_plane_pending_state { unsigned int y; unsigned int width; unsigned int height; + unsigned int rotation; bool dirty; }; @@ -35,6 +36,7 @@ to_mtk_plane_state(struct drm_plane_state *state) } int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane, - unsigned long possible_crtcs, enum drm_plane_type type); + unsigned long possible_crtcs, enum drm_plane_type type, + unsigned int supported_rotations); #endif diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index a413f5ff442d..e9931bbbe846 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -40,6 +40,7 @@ #define DSI_CON_CTRL 0x10 #define DSI_RESET BIT(0) #define DSI_EN BIT(1) +#define DPHY_RESET BIT(2) #define DSI_MODE_CTRL 0x14 #define MODE (3) @@ -73,6 +74,7 @@ #define DSI_VBP_NL 0x24 #define DSI_VFP_NL 0x28 #define DSI_VACT_NL 0x2C +#define DSI_SIZE_CON 0x38 #define DSI_HSA_WC 0x50 #define DSI_HBP_WC 0x54 #define DSI_HFP_WC 0x58 @@ -126,7 +128,10 @@ #define VM_CMD_EN BIT(0) #define TS_VFP_EN BIT(5) -#define DSI_CMDQ0 0x180 +#define DSI_SHADOW_DEBUG 0x190U +#define FORCE_COMMIT BIT(0) +#define BYPASS_SHADOW BIT(1) + #define CONFIG (0xff << 0) #define SHORT_PACKET 0 #define LONG_PACKET 2 @@ -135,12 +140,6 @@ #define DATA_0 (0xff << 16) #define DATA_1 (0xff << 24) -#define T_LPX 5 -#define T_HS_PREP 6 -#define T_HS_TRAIL 8 -#define T_HS_EXIT 7 -#define T_HS_ZERO 10 - #define NS_TO_CYCLE(n, c) ((n) / (c) + (((n) % (c)) ? 1 : 0)) #define MTK_DSI_HOST_IS_READ(type) \ @@ -149,8 +148,33 @@ (type == MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM) || \ (type == MIPI_DSI_DCS_READ)) +struct mtk_phy_timing { + u32 lpx; + u32 da_hs_prepare; + u32 da_hs_zero; + u32 da_hs_trail; + + u32 ta_go; + u32 ta_sure; + u32 ta_get; + u32 da_hs_exit; + + u32 clk_hs_zero; + u32 clk_hs_trail; + + u32 clk_hs_prepare; + u32 clk_hs_post; + u32 clk_hs_exit; +}; + struct phy; +struct mtk_dsi_driver_data { + const u32 reg_cmdq_off; + bool has_shadow_ctl; + bool has_size_ctl; +}; + struct mtk_dsi { struct mtk_ddp_comp ddp_comp; struct device *dev; @@ -173,10 +197,12 @@ struct mtk_dsi { enum mipi_dsi_pixel_format format; unsigned int lanes; struct videomode vm; + struct mtk_phy_timing phy_timing; int refcount; bool enabled; u32 irq_data; wait_queue_head_t irq_wait_queue; + const struct mtk_dsi_driver_data *driver_data; }; static inline struct mtk_dsi *encoder_to_dsi(struct drm_encoder *e) @@ -205,17 +231,36 @@ static void mtk_dsi_phy_timconfig(struct mtk_dsi *dsi) { u32 timcon0, timcon1, timcon2, timcon3; u32 ui, cycle_time; + struct mtk_phy_timing *timing = &dsi->phy_timing; + + ui = DIV_ROUND_UP(1000000000, dsi->data_rate); + cycle_time = div_u64(8000000000ULL, dsi->data_rate); + + timing->lpx = NS_TO_CYCLE(60, cycle_time); + timing->da_hs_prepare = NS_TO_CYCLE(50 + 5 * ui, cycle_time); + timing->da_hs_zero = NS_TO_CYCLE(110 + 6 * ui, cycle_time); + timing->da_hs_trail = NS_TO_CYCLE(77 + 4 * ui, cycle_time); + + timing->ta_go = 4 * timing->lpx; + timing->ta_sure = 3 * timing->lpx / 2; + timing->ta_get = 5 * timing->lpx; + timing->da_hs_exit = 2 * timing->lpx; - ui = 1000 / dsi->data_rate + 0x01; - cycle_time = 8000 / dsi->data_rate + 0x01; + timing->clk_hs_zero = NS_TO_CYCLE(336, cycle_time); + timing->clk_hs_trail = NS_TO_CYCLE(100, cycle_time) + 10; - timcon0 = T_LPX | T_HS_PREP << 8 | T_HS_ZERO << 16 | T_HS_TRAIL << 24; - timcon1 = 4 * T_LPX | (3 * T_LPX / 2) << 8 | 5 * T_LPX << 16 | - T_HS_EXIT << 24; - timcon2 = ((NS_TO_CYCLE(0x64, cycle_time) + 0xa) << 24) | - (NS_TO_CYCLE(0x150, cycle_time) << 16); - timcon3 = NS_TO_CYCLE(0x40, cycle_time) | (2 * T_LPX) << 16 | - NS_TO_CYCLE(80 + 52 * ui, cycle_time) << 8; + timing->clk_hs_prepare = NS_TO_CYCLE(64, cycle_time); + timing->clk_hs_post = NS_TO_CYCLE(80 + 52 * ui, cycle_time); + timing->clk_hs_exit = 2 * timing->lpx; + + timcon0 = timing->lpx | timing->da_hs_prepare << 8 | + timing->da_hs_zero << 16 | timing->da_hs_trail << 24; + timcon1 = timing->ta_go | timing->ta_sure << 8 | + timing->ta_get << 16 | timing->da_hs_exit << 24; + timcon2 = 1 << 8 | timing->clk_hs_zero << 16 | + timing->clk_hs_trail << 24; + timcon3 = timing->clk_hs_prepare | timing->clk_hs_post << 8 | + timing->clk_hs_exit << 16; writel(timcon0, dsi->regs + DSI_PHY_TIMECON0); writel(timcon1, dsi->regs + DSI_PHY_TIMECON1); @@ -239,6 +284,12 @@ static void mtk_dsi_reset_engine(struct mtk_dsi *dsi) mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_RESET, 0); } +static void mtk_dsi_reset_dphy(struct mtk_dsi *dsi) +{ + mtk_dsi_mask(dsi, DSI_CON_CTRL, DPHY_RESET, DPHY_RESET); + mtk_dsi_mask(dsi, DSI_CON_CTRL, DPHY_RESET, 0); +} + static void mtk_dsi_clk_ulp_mode_enter(struct mtk_dsi *dsi) { mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, 0); @@ -402,7 +453,8 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi) u32 horizontal_sync_active_byte; u32 horizontal_backporch_byte; u32 horizontal_frontporch_byte; - u32 dsi_tmp_buf_bpp; + u32 dsi_tmp_buf_bpp, data_phy_cycles; + struct mtk_phy_timing *timing = &dsi->phy_timing; struct videomode *vm = &dsi->vm; @@ -416,6 +468,10 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi) writel(vm->vfront_porch, dsi->regs + DSI_VFP_NL); writel(vm->vactive, dsi->regs + DSI_VACT_NL); + if (dsi->driver_data->has_size_ctl) + writel(vm->vactive << 16 | vm->hactive, + dsi->regs + DSI_SIZE_CON); + horizontal_sync_active_byte = (vm->hsync_len * dsi_tmp_buf_bpp - 10); if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) @@ -425,7 +481,34 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi) horizontal_backporch_byte = ((vm->hback_porch + vm->hsync_len) * dsi_tmp_buf_bpp - 10); - horizontal_frontporch_byte = (vm->hfront_porch * dsi_tmp_buf_bpp - 12); + data_phy_cycles = timing->lpx + timing->da_hs_prepare + + timing->da_hs_zero + timing->da_hs_exit + 2; + + if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) { + if (vm->hfront_porch * dsi_tmp_buf_bpp > + data_phy_cycles * dsi->lanes + 18) { + horizontal_frontporch_byte = vm->hfront_porch * + dsi_tmp_buf_bpp - + data_phy_cycles * + dsi->lanes - 18; + } else { + DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n"); + horizontal_frontporch_byte = vm->hfront_porch * + dsi_tmp_buf_bpp; + } + } else { + if (vm->hfront_porch * dsi_tmp_buf_bpp > + data_phy_cycles * dsi->lanes + 12) { + horizontal_frontporch_byte = vm->hfront_porch * + dsi_tmp_buf_bpp - + data_phy_cycles * + dsi->lanes - 12; + } else { + DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n"); + horizontal_frontporch_byte = vm->hfront_porch * + dsi_tmp_buf_bpp; + } + } writel(horizontal_sync_active_byte, dsi->regs + DSI_HSA_WC); writel(horizontal_backporch_byte, dsi->regs + DSI_HBP_WC); @@ -523,10 +606,9 @@ static s32 mtk_dsi_switch_to_cmd_mode(struct mtk_dsi *dsi, u8 irq_flag, u32 t) static int mtk_dsi_poweron(struct mtk_dsi *dsi) { - struct device *dev = dsi->dev; + struct device *dev = dsi->host.dev; int ret; - u64 pixel_clock, total_bits; - u32 htotal, htotal_bits, bit_per_pixel, overhead_cycles, overhead_bits; + u32 bit_per_pixel; if (++dsi->refcount != 1) return 0; @@ -545,24 +627,8 @@ static int mtk_dsi_poweron(struct mtk_dsi *dsi) break; } - /** - * htotal_time = htotal * byte_per_pixel / num_lanes - * overhead_time = lpx + hs_prepare + hs_zero + hs_trail + hs_exit - * mipi_ratio = (htotal_time + overhead_time) / htotal_time - * data_rate = pixel_clock * bit_per_pixel * mipi_ratio / num_lanes; - */ - pixel_clock = dsi->vm.pixelclock; - htotal = dsi->vm.hactive + dsi->vm.hback_porch + dsi->vm.hfront_porch + - dsi->vm.hsync_len; - htotal_bits = htotal * bit_per_pixel; - - overhead_cycles = T_LPX + T_HS_PREP + T_HS_ZERO + T_HS_TRAIL + - T_HS_EXIT; - overhead_bits = overhead_cycles * dsi->lanes * 8; - total_bits = htotal_bits + overhead_bits; - - dsi->data_rate = DIV_ROUND_UP_ULL(pixel_clock * total_bits, - htotal * dsi->lanes); + dsi->data_rate = DIV_ROUND_UP_ULL(dsi->vm.pixelclock * bit_per_pixel, + dsi->lanes); ret = clk_set_rate(dsi->hs_clk, dsi->data_rate); if (ret < 0) { @@ -585,10 +651,17 @@ static int mtk_dsi_poweron(struct mtk_dsi *dsi) } mtk_dsi_enable(dsi); + + if (dsi->driver_data->has_shadow_ctl) + writel(FORCE_COMMIT | BYPASS_SHADOW, + dsi->regs + DSI_SHADOW_DEBUG); + mtk_dsi_reset_engine(dsi); mtk_dsi_phy_timconfig(dsi); mtk_dsi_rxtx_control(dsi); + usleep_range(30, 100); + mtk_dsi_reset_dphy(dsi); mtk_dsi_ps_control_vact(dsi); mtk_dsi_set_vm_cmd(dsi); mtk_dsi_config_vdo_timing(dsi); @@ -939,6 +1012,7 @@ static void mtk_dsi_cmdq(struct mtk_dsi *dsi, const struct mipi_dsi_msg *msg) const char *tx_buf = msg->tx_buf; u8 config, cmdq_size, cmdq_off, type = msg->type; u32 reg_val, cmdq_mask, i; + u32 reg_cmdq_off = dsi->driver_data->reg_cmdq_off; if (MTK_DSI_HOST_IS_READ(type)) config = BTA; @@ -958,9 +1032,11 @@ static void mtk_dsi_cmdq(struct mtk_dsi *dsi, const struct mipi_dsi_msg *msg) } for (i = 0; i < msg->tx_len; i++) - writeb(tx_buf[i], dsi->regs + DSI_CMDQ0 + cmdq_off + i); + mtk_dsi_mask(dsi, (reg_cmdq_off + cmdq_off + i) & (~0x3U), + (0xffUL << (((i + cmdq_off) & 3U) * 8U)), + tx_buf[i] << (((i + cmdq_off) & 3U) * 8U)); - mtk_dsi_mask(dsi, DSI_CMDQ0, cmdq_mask, reg_val); + mtk_dsi_mask(dsi, reg_cmdq_off, cmdq_mask, reg_val); mtk_dsi_mask(dsi, DSI_CMDQ_SIZE, CMDQ_SIZE, cmdq_size); } @@ -1050,12 +1126,6 @@ static int mtk_dsi_bind(struct device *dev, struct device *master, void *data) return ret; } - ret = mipi_dsi_host_register(&dsi->host); - if (ret < 0) { - dev_err(dev, "failed to register DSI host: %d\n", ret); - goto err_ddp_comp_unregister; - } - ret = mtk_dsi_create_conn_enc(drm, dsi); if (ret) { DRM_ERROR("Encoder create failed with %d\n", ret); @@ -1065,8 +1135,6 @@ static int mtk_dsi_bind(struct device *dev, struct device *master, void *data) return 0; err_unregister: - mipi_dsi_host_unregister(&dsi->host); -err_ddp_comp_unregister: mtk_ddp_comp_unregister(drm, &dsi->ddp_comp); return ret; } @@ -1078,7 +1146,6 @@ static void mtk_dsi_unbind(struct device *dev, struct device *master, struct mtk_dsi *dsi = dev_get_drvdata(dev); mtk_dsi_destroy_conn_enc(dsi); - mipi_dsi_host_unregister(&dsi->host); mtk_ddp_comp_unregister(drm, &dsi->ddp_comp); } @@ -1102,31 +1169,38 @@ static int mtk_dsi_probe(struct platform_device *pdev) dsi->host.ops = &mtk_dsi_ops; dsi->host.dev = dev; + ret = mipi_dsi_host_register(&dsi->host); + if (ret < 0) { + dev_err(dev, "failed to register DSI host: %d\n", ret); + return ret; + } ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0, &dsi->panel, &dsi->bridge); if (ret) - return ret; + goto err_unregister_host; + + dsi->driver_data = of_device_get_match_data(dev); dsi->engine_clk = devm_clk_get(dev, "engine"); if (IS_ERR(dsi->engine_clk)) { ret = PTR_ERR(dsi->engine_clk); dev_err(dev, "Failed to get engine clock: %d\n", ret); - return ret; + goto err_unregister_host; } dsi->digital_clk = devm_clk_get(dev, "digital"); if (IS_ERR(dsi->digital_clk)) { ret = PTR_ERR(dsi->digital_clk); dev_err(dev, "Failed to get digital clock: %d\n", ret); - return ret; + goto err_unregister_host; } dsi->hs_clk = devm_clk_get(dev, "hs"); if (IS_ERR(dsi->hs_clk)) { ret = PTR_ERR(dsi->hs_clk); dev_err(dev, "Failed to get hs clock: %d\n", ret); - return ret; + goto err_unregister_host; } regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -1134,33 +1208,35 @@ static int mtk_dsi_probe(struct platform_device *pdev) if (IS_ERR(dsi->regs)) { ret = PTR_ERR(dsi->regs); dev_err(dev, "Failed to ioremap memory: %d\n", ret); - return ret; + goto err_unregister_host; } dsi->phy = devm_phy_get(dev, "dphy"); if (IS_ERR(dsi->phy)) { ret = PTR_ERR(dsi->phy); dev_err(dev, "Failed to get MIPI-DPHY: %d\n", ret); - return ret; + goto err_unregister_host; } comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DSI); if (comp_id < 0) { dev_err(dev, "Failed to identify by alias: %d\n", comp_id); - return comp_id; + ret = comp_id; + goto err_unregister_host; } ret = mtk_ddp_comp_init(dev, dev->of_node, &dsi->ddp_comp, comp_id, &mtk_dsi_funcs); if (ret) { dev_err(dev, "Failed to initialize component: %d\n", ret); - return ret; + goto err_unregister_host; } irq_num = platform_get_irq(pdev, 0); if (irq_num < 0) { - dev_err(&pdev->dev, "failed to request dsi irq resource\n"); - return -EPROBE_DEFER; + dev_err(&pdev->dev, "failed to get dsi irq_num: %d\n", irq_num); + ret = irq_num; + goto err_unregister_host; } irq_set_status_flags(irq_num, IRQ_TYPE_LEVEL_LOW); @@ -1168,14 +1244,24 @@ static int mtk_dsi_probe(struct platform_device *pdev) IRQF_TRIGGER_LOW, dev_name(&pdev->dev), dsi); if (ret) { dev_err(&pdev->dev, "failed to request mediatek dsi irq\n"); - return -EPROBE_DEFER; + goto err_unregister_host; } init_waitqueue_head(&dsi->irq_wait_queue); platform_set_drvdata(pdev, dsi); - return component_add(&pdev->dev, &mtk_dsi_component_ops); + ret = component_add(&pdev->dev, &mtk_dsi_component_ops); + if (ret) { + dev_err(&pdev->dev, "failed to add component: %d\n", ret); + goto err_unregister_host; + } + + return 0; + +err_unregister_host: + mipi_dsi_host_unregister(&dsi->host); + return ret; } static int mtk_dsi_remove(struct platform_device *pdev) @@ -1184,13 +1270,32 @@ static int mtk_dsi_remove(struct platform_device *pdev) mtk_output_dsi_disable(dsi); component_del(&pdev->dev, &mtk_dsi_component_ops); + mipi_dsi_host_unregister(&dsi->host); return 0; } +static const struct mtk_dsi_driver_data mt8173_dsi_driver_data = { + .reg_cmdq_off = 0x200, +}; + +static const struct mtk_dsi_driver_data mt2701_dsi_driver_data = { + .reg_cmdq_off = 0x180, +}; + +static const struct mtk_dsi_driver_data mt8183_dsi_driver_data = { + .reg_cmdq_off = 0x200, + .has_shadow_ctl = true, + .has_size_ctl = true, +}; + static const struct of_device_id mtk_dsi_of_match[] = { - { .compatible = "mediatek,mt2701-dsi" }, - { .compatible = "mediatek,mt8173-dsi" }, + { .compatible = "mediatek,mt2701-dsi", + .data = &mt2701_dsi_driver_data }, + { .compatible = "mediatek,mt8173-dsi", + .data = &mt8173_dsi_driver_data }, + { .compatible = "mediatek,mt8183-dsi", + .data = &mt8183_dsi_driver_data }, { }, }; diff --git a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c index 1842dc2caae9..e4d34484ecc8 100644 --- a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c +++ b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c @@ -3,292 +3,39 @@ * Copyright (c) 2015 MediaTek Inc. */ -#include <linux/clk.h> -#include <linux/clk-provider.h> -#include <linux/delay.h> -#include <linux/io.h> -#include <linux/module.h> -#include <linux/of_device.h> -#include <linux/platform_device.h> -#include <linux/phy/phy.h> - -#define MIPITX_DSI_CON 0x00 -#define RG_DSI_LDOCORE_EN BIT(0) -#define RG_DSI_CKG_LDOOUT_EN BIT(1) -#define RG_DSI_BCLK_SEL (3 << 2) -#define RG_DSI_LD_IDX_SEL (7 << 4) -#define RG_DSI_PHYCLK_SEL (2 << 8) -#define RG_DSI_DSICLK_FREQ_SEL BIT(10) -#define RG_DSI_LPTX_CLMP_EN BIT(11) - -#define MIPITX_DSI_CLOCK_LANE 0x04 -#define MIPITX_DSI_DATA_LANE0 0x08 -#define MIPITX_DSI_DATA_LANE1 0x0c -#define MIPITX_DSI_DATA_LANE2 0x10 -#define MIPITX_DSI_DATA_LANE3 0x14 -#define RG_DSI_LNTx_LDOOUT_EN BIT(0) -#define RG_DSI_LNTx_CKLANE_EN BIT(1) -#define RG_DSI_LNTx_LPTX_IPLUS1 BIT(2) -#define RG_DSI_LNTx_LPTX_IPLUS2 BIT(3) -#define RG_DSI_LNTx_LPTX_IMINUS BIT(4) -#define RG_DSI_LNTx_LPCD_IPLUS BIT(5) -#define RG_DSI_LNTx_LPCD_IMINUS BIT(6) -#define RG_DSI_LNTx_RT_CODE (0xf << 8) - -#define MIPITX_DSI_TOP_CON 0x40 -#define RG_DSI_LNT_INTR_EN BIT(0) -#define RG_DSI_LNT_HS_BIAS_EN BIT(1) -#define RG_DSI_LNT_IMP_CAL_EN BIT(2) -#define RG_DSI_LNT_TESTMODE_EN BIT(3) -#define RG_DSI_LNT_IMP_CAL_CODE (0xf << 4) -#define RG_DSI_LNT_AIO_SEL (7 << 8) -#define RG_DSI_PAD_TIE_LOW_EN BIT(11) -#define RG_DSI_DEBUG_INPUT_EN BIT(12) -#define RG_DSI_PRESERVE (7 << 13) - -#define MIPITX_DSI_BG_CON 0x44 -#define RG_DSI_BG_CORE_EN BIT(0) -#define RG_DSI_BG_CKEN BIT(1) -#define RG_DSI_BG_DIV (0x3 << 2) -#define RG_DSI_BG_FAST_CHARGE BIT(4) -#define RG_DSI_VOUT_MSK (0x3ffff << 5) -#define RG_DSI_V12_SEL (7 << 5) -#define RG_DSI_V10_SEL (7 << 8) -#define RG_DSI_V072_SEL (7 << 11) -#define RG_DSI_V04_SEL (7 << 14) -#define RG_DSI_V032_SEL (7 << 17) -#define RG_DSI_V02_SEL (7 << 20) -#define RG_DSI_BG_R1_TRIM (0xf << 24) -#define RG_DSI_BG_R2_TRIM (0xf << 28) - -#define MIPITX_DSI_PLL_CON0 0x50 -#define RG_DSI_MPPLL_PLL_EN BIT(0) -#define RG_DSI_MPPLL_DIV_MSK (0x1ff << 1) -#define RG_DSI_MPPLL_PREDIV (3 << 1) -#define RG_DSI_MPPLL_TXDIV0 (3 << 3) -#define RG_DSI_MPPLL_TXDIV1 (3 << 5) -#define RG_DSI_MPPLL_POSDIV (7 << 7) -#define RG_DSI_MPPLL_MONVC_EN BIT(10) -#define RG_DSI_MPPLL_MONREF_EN BIT(11) -#define RG_DSI_MPPLL_VOD_EN BIT(12) - -#define MIPITX_DSI_PLL_CON1 0x54 -#define RG_DSI_MPPLL_SDM_FRA_EN BIT(0) -#define RG_DSI_MPPLL_SDM_SSC_PH_INIT BIT(1) -#define RG_DSI_MPPLL_SDM_SSC_EN BIT(2) -#define RG_DSI_MPPLL_SDM_SSC_PRD (0xffff << 16) - -#define MIPITX_DSI_PLL_CON2 0x58 - -#define MIPITX_DSI_PLL_TOP 0x64 -#define RG_DSI_MPPLL_PRESERVE (0xff << 8) - -#define MIPITX_DSI_PLL_PWR 0x68 -#define RG_DSI_MPPLL_SDM_PWR_ON BIT(0) -#define RG_DSI_MPPLL_SDM_ISO_EN BIT(1) -#define RG_DSI_MPPLL_SDM_PWR_ACK BIT(8) - -#define MIPITX_DSI_SW_CTRL 0x80 -#define SW_CTRL_EN BIT(0) - -#define MIPITX_DSI_SW_CTRL_CON0 0x84 -#define SW_LNTC_LPTX_PRE_OE BIT(0) -#define SW_LNTC_LPTX_OE BIT(1) -#define SW_LNTC_LPTX_P BIT(2) -#define SW_LNTC_LPTX_N BIT(3) -#define SW_LNTC_HSTX_PRE_OE BIT(4) -#define SW_LNTC_HSTX_OE BIT(5) -#define SW_LNTC_HSTX_ZEROCLK BIT(6) -#define SW_LNT0_LPTX_PRE_OE BIT(7) -#define SW_LNT0_LPTX_OE BIT(8) -#define SW_LNT0_LPTX_P BIT(9) -#define SW_LNT0_LPTX_N BIT(10) -#define SW_LNT0_HSTX_PRE_OE BIT(11) -#define SW_LNT0_HSTX_OE BIT(12) -#define SW_LNT0_LPRX_EN BIT(13) -#define SW_LNT1_LPTX_PRE_OE BIT(14) -#define SW_LNT1_LPTX_OE BIT(15) -#define SW_LNT1_LPTX_P BIT(16) -#define SW_LNT1_LPTX_N BIT(17) -#define SW_LNT1_HSTX_PRE_OE BIT(18) -#define SW_LNT1_HSTX_OE BIT(19) -#define SW_LNT2_LPTX_PRE_OE BIT(20) -#define SW_LNT2_LPTX_OE BIT(21) -#define SW_LNT2_LPTX_P BIT(22) -#define SW_LNT2_LPTX_N BIT(23) -#define SW_LNT2_HSTX_PRE_OE BIT(24) -#define SW_LNT2_HSTX_OE BIT(25) - -struct mtk_mipitx_data { - const u32 mppll_preserve; -}; - -struct mtk_mipi_tx { - struct device *dev; - void __iomem *regs; - u32 data_rate; - const struct mtk_mipitx_data *driver_data; - struct clk_hw pll_hw; - struct clk *pll; -}; +#include "mtk_mipi_tx.h" -static inline struct mtk_mipi_tx *mtk_mipi_tx_from_clk_hw(struct clk_hw *hw) +inline struct mtk_mipi_tx *mtk_mipi_tx_from_clk_hw(struct clk_hw *hw) { return container_of(hw, struct mtk_mipi_tx, pll_hw); } -static void mtk_mipi_tx_clear_bits(struct mtk_mipi_tx *mipi_tx, u32 offset, - u32 bits) +void mtk_mipi_tx_clear_bits(struct mtk_mipi_tx *mipi_tx, u32 offset, + u32 bits) { u32 temp = readl(mipi_tx->regs + offset); writel(temp & ~bits, mipi_tx->regs + offset); } -static void mtk_mipi_tx_set_bits(struct mtk_mipi_tx *mipi_tx, u32 offset, - u32 bits) +void mtk_mipi_tx_set_bits(struct mtk_mipi_tx *mipi_tx, u32 offset, + u32 bits) { u32 temp = readl(mipi_tx->regs + offset); writel(temp | bits, mipi_tx->regs + offset); } -static void mtk_mipi_tx_update_bits(struct mtk_mipi_tx *mipi_tx, u32 offset, - u32 mask, u32 data) +void mtk_mipi_tx_update_bits(struct mtk_mipi_tx *mipi_tx, u32 offset, + u32 mask, u32 data) { u32 temp = readl(mipi_tx->regs + offset); writel((temp & ~mask) | (data & mask), mipi_tx->regs + offset); } -static int mtk_mipi_tx_pll_prepare(struct clk_hw *hw) -{ - struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw); - u8 txdiv, txdiv0, txdiv1; - u64 pcw; - - dev_dbg(mipi_tx->dev, "prepare: %u Hz\n", mipi_tx->data_rate); - - if (mipi_tx->data_rate >= 500000000) { - txdiv = 1; - txdiv0 = 0; - txdiv1 = 0; - } else if (mipi_tx->data_rate >= 250000000) { - txdiv = 2; - txdiv0 = 1; - txdiv1 = 0; - } else if (mipi_tx->data_rate >= 125000000) { - txdiv = 4; - txdiv0 = 2; - txdiv1 = 0; - } else if (mipi_tx->data_rate > 62000000) { - txdiv = 8; - txdiv0 = 2; - txdiv1 = 1; - } else if (mipi_tx->data_rate >= 50000000) { - txdiv = 16; - txdiv0 = 2; - txdiv1 = 2; - } else { - return -EINVAL; - } - - mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_BG_CON, - RG_DSI_VOUT_MSK | - RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN, - (4 << 20) | (4 << 17) | (4 << 14) | - (4 << 11) | (4 << 8) | (4 << 5) | - RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN); - - usleep_range(30, 100); - - mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_TOP_CON, - RG_DSI_LNT_IMP_CAL_CODE | RG_DSI_LNT_HS_BIAS_EN, - (8 << 4) | RG_DSI_LNT_HS_BIAS_EN); - - mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_CON, - RG_DSI_CKG_LDOOUT_EN | RG_DSI_LDOCORE_EN); - - mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_PWR, - RG_DSI_MPPLL_SDM_PWR_ON | - RG_DSI_MPPLL_SDM_ISO_EN, - RG_DSI_MPPLL_SDM_PWR_ON); - - mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0, - RG_DSI_MPPLL_PLL_EN); - - mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_CON0, - RG_DSI_MPPLL_TXDIV0 | RG_DSI_MPPLL_TXDIV1 | - RG_DSI_MPPLL_PREDIV, - (txdiv0 << 3) | (txdiv1 << 5)); - - /* - * PLL PCW config - * PCW bit 24~30 = integer part of pcw - * PCW bit 0~23 = fractional part of pcw - * pcw = data_Rate*4*txdiv/(Ref_clk*2); - * Post DIV =4, so need data_Rate*4 - * Ref_clk is 26MHz - */ - pcw = div_u64(((u64)mipi_tx->data_rate * 2 * txdiv) << 24, - 26000000); - writel(pcw, mipi_tx->regs + MIPITX_DSI_PLL_CON2); - - mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_PLL_CON1, - RG_DSI_MPPLL_SDM_FRA_EN); - - mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_PLL_CON0, RG_DSI_MPPLL_PLL_EN); - - usleep_range(20, 100); - - mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON1, - RG_DSI_MPPLL_SDM_SSC_EN); - - mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_TOP, - RG_DSI_MPPLL_PRESERVE, - mipi_tx->driver_data->mppll_preserve); - - return 0; -} - -static void mtk_mipi_tx_pll_unprepare(struct clk_hw *hw) -{ - struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw); - - dev_dbg(mipi_tx->dev, "unprepare\n"); - - mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0, - RG_DSI_MPPLL_PLL_EN); - - mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_TOP, - RG_DSI_MPPLL_PRESERVE, 0); - - mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_PWR, - RG_DSI_MPPLL_SDM_ISO_EN | - RG_DSI_MPPLL_SDM_PWR_ON, - RG_DSI_MPPLL_SDM_ISO_EN); - - mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_TOP_CON, - RG_DSI_LNT_HS_BIAS_EN); - - mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_CON, - RG_DSI_CKG_LDOOUT_EN | RG_DSI_LDOCORE_EN); - - mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_BG_CON, - RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN); - - mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0, - RG_DSI_MPPLL_DIV_MSK); -} - -static long mtk_mipi_tx_pll_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *prate) -{ - return clamp_val(rate, 50000000, 1250000000); -} - -static int mtk_mipi_tx_pll_set_rate(struct clk_hw *hw, unsigned long rate, - unsigned long parent_rate) +int mtk_mipi_tx_pll_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) { struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw); @@ -299,37 +46,14 @@ static int mtk_mipi_tx_pll_set_rate(struct clk_hw *hw, unsigned long rate, return 0; } -static unsigned long mtk_mipi_tx_pll_recalc_rate(struct clk_hw *hw, - unsigned long parent_rate) +unsigned long mtk_mipi_tx_pll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) { struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw); return mipi_tx->data_rate; } -static const struct clk_ops mtk_mipi_tx_pll_ops = { - .prepare = mtk_mipi_tx_pll_prepare, - .unprepare = mtk_mipi_tx_pll_unprepare, - .round_rate = mtk_mipi_tx_pll_round_rate, - .set_rate = mtk_mipi_tx_pll_set_rate, - .recalc_rate = mtk_mipi_tx_pll_recalc_rate, -}; - -static int mtk_mipi_tx_power_on_signal(struct phy *phy) -{ - struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy); - u32 reg; - - for (reg = MIPITX_DSI_CLOCK_LANE; - reg <= MIPITX_DSI_DATA_LANE3; reg += 4) - mtk_mipi_tx_set_bits(mipi_tx, reg, RG_DSI_LNTx_LDOOUT_EN); - - mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_TOP_CON, - RG_DSI_PAD_TIE_LOW_EN); - - return 0; -} - static int mtk_mipi_tx_power_on(struct phy *phy) { struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy); @@ -341,30 +65,16 @@ static int mtk_mipi_tx_power_on(struct phy *phy) return ret; /* Enable DSI Lane LDO outputs, disable pad tie low */ - mtk_mipi_tx_power_on_signal(phy); - + mipi_tx->driver_data->mipi_tx_enable_signal(phy); return 0; } -static void mtk_mipi_tx_power_off_signal(struct phy *phy) -{ - struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy); - u32 reg; - - mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_TOP_CON, - RG_DSI_PAD_TIE_LOW_EN); - - for (reg = MIPITX_DSI_CLOCK_LANE; - reg <= MIPITX_DSI_DATA_LANE3; reg += 4) - mtk_mipi_tx_clear_bits(mipi_tx, reg, RG_DSI_LNTx_LDOOUT_EN); -} - static int mtk_mipi_tx_power_off(struct phy *phy) { struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy); /* Enable pad tie low, disable DSI Lane LDO outputs */ - mtk_mipi_tx_power_off_signal(phy); + mipi_tx->driver_data->mipi_tx_disable_signal(phy); /* Disable PLL and power down core */ clk_disable_unprepare(mipi_tx->pll); @@ -383,10 +93,9 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct mtk_mipi_tx *mipi_tx; struct resource *mem; - struct clk *ref_clk; const char *ref_clk_name; + struct clk *ref_clk; struct clk_init_data clk_init = { - .ops = &mtk_mipi_tx_pll_ops, .num_parents = 1, .parent_names = (const char * const *)&ref_clk_name, .flags = CLK_SET_RATE_GATE, @@ -400,6 +109,7 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev) return -ENOMEM; mipi_tx->driver_data = of_device_get_match_data(dev); + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); mipi_tx->regs = devm_ioremap_resource(dev, mem); if (IS_ERR(mipi_tx->regs)) { @@ -414,6 +124,7 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev) dev_err(dev, "Failed to get reference clock: %d\n", ret); return ret; } + ref_clk_name = __clk_get_name(ref_clk); ret = of_property_read_string(dev->of_node, "clock-output-names", @@ -423,6 +134,8 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev) return ret; } + clk_init.ops = mipi_tx->driver_data->mipi_tx_clk_ops; + mipi_tx->pll_hw.init = &clk_init; mipi_tx->pll = devm_clk_register(dev, &mipi_tx->pll_hw); if (IS_ERR(mipi_tx->pll)) { @@ -457,20 +170,14 @@ static int mtk_mipi_tx_remove(struct platform_device *pdev) return 0; } -static const struct mtk_mipitx_data mt2701_mipitx_data = { - .mppll_preserve = (3 << 8) -}; - -static const struct mtk_mipitx_data mt8173_mipitx_data = { - .mppll_preserve = (0 << 8) -}; - static const struct of_device_id mtk_mipi_tx_match[] = { { .compatible = "mediatek,mt2701-mipi-tx", .data = &mt2701_mipitx_data }, { .compatible = "mediatek,mt8173-mipi-tx", .data = &mt8173_mipitx_data }, - {}, + { .compatible = "mediatek,mt8183-mipi-tx", + .data = &mt8183_mipitx_data }, + { }, }; struct platform_driver mtk_mipi_tx_driver = { @@ -481,3 +188,4 @@ struct platform_driver mtk_mipi_tx_driver = { .of_match_table = mtk_mipi_tx_match, }, }; + diff --git a/drivers/gpu/drm/mediatek/mtk_mipi_tx.h b/drivers/gpu/drm/mediatek/mtk_mipi_tx.h new file mode 100644 index 000000000000..413f35d86219 --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_mipi_tx.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2019 MediaTek Inc. + * Author: Jitao Shi <jitao.shi@mediatek.com> + */ + +#ifndef _MTK_MIPI_TX_H +#define _MTK_MIPI_TX_H + +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/phy/phy.h> + +struct mtk_mipitx_data { + const u32 mppll_preserve; + const struct clk_ops *mipi_tx_clk_ops; + void (*mipi_tx_enable_signal)(struct phy *phy); + void (*mipi_tx_disable_signal)(struct phy *phy); +}; + +struct mtk_mipi_tx { + struct device *dev; + void __iomem *regs; + u32 data_rate; + const struct mtk_mipitx_data *driver_data; + struct clk_hw pll_hw; + struct clk *pll; +}; + +struct mtk_mipi_tx *mtk_mipi_tx_from_clk_hw(struct clk_hw *hw); +void mtk_mipi_tx_clear_bits(struct mtk_mipi_tx *mipi_tx, u32 offset, u32 bits); +void mtk_mipi_tx_set_bits(struct mtk_mipi_tx *mipi_tx, u32 offset, u32 bits); +void mtk_mipi_tx_update_bits(struct mtk_mipi_tx *mipi_tx, u32 offset, u32 mask, + u32 data); +int mtk_mipi_tx_pll_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate); +unsigned long mtk_mipi_tx_pll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate); + +extern const struct mtk_mipitx_data mt2701_mipitx_data; +extern const struct mtk_mipitx_data mt8173_mipitx_data; +extern const struct mtk_mipitx_data mt8183_mipitx_data; + +#endif diff --git a/drivers/gpu/drm/mediatek/mtk_mt8173_mipi_tx.c b/drivers/gpu/drm/mediatek/mtk_mt8173_mipi_tx.c new file mode 100644 index 000000000000..f18db14d8b63 --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_mt8173_mipi_tx.c @@ -0,0 +1,288 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2019 MediaTek Inc. + * Author: jitao.shi <jitao.shi@mediatek.com> + */ + +#include "mtk_mipi_tx.h" + +#define MIPITX_DSI_CON 0x00 +#define RG_DSI_LDOCORE_EN BIT(0) +#define RG_DSI_CKG_LDOOUT_EN BIT(1) +#define RG_DSI_BCLK_SEL (3 << 2) +#define RG_DSI_LD_IDX_SEL (7 << 4) +#define RG_DSI_PHYCLK_SEL (2 << 8) +#define RG_DSI_DSICLK_FREQ_SEL BIT(10) +#define RG_DSI_LPTX_CLMP_EN BIT(11) + +#define MIPITX_DSI_CLOCK_LANE 0x04 +#define MIPITX_DSI_DATA_LANE0 0x08 +#define MIPITX_DSI_DATA_LANE1 0x0c +#define MIPITX_DSI_DATA_LANE2 0x10 +#define MIPITX_DSI_DATA_LANE3 0x14 +#define RG_DSI_LNTx_LDOOUT_EN BIT(0) +#define RG_DSI_LNTx_CKLANE_EN BIT(1) +#define RG_DSI_LNTx_LPTX_IPLUS1 BIT(2) +#define RG_DSI_LNTx_LPTX_IPLUS2 BIT(3) +#define RG_DSI_LNTx_LPTX_IMINUS BIT(4) +#define RG_DSI_LNTx_LPCD_IPLUS BIT(5) +#define RG_DSI_LNTx_LPCD_IMINUS BIT(6) +#define RG_DSI_LNTx_RT_CODE (0xf << 8) + +#define MIPITX_DSI_TOP_CON 0x40 +#define RG_DSI_LNT_INTR_EN BIT(0) +#define RG_DSI_LNT_HS_BIAS_EN BIT(1) +#define RG_DSI_LNT_IMP_CAL_EN BIT(2) +#define RG_DSI_LNT_TESTMODE_EN BIT(3) +#define RG_DSI_LNT_IMP_CAL_CODE (0xf << 4) +#define RG_DSI_LNT_AIO_SEL (7 << 8) +#define RG_DSI_PAD_TIE_LOW_EN BIT(11) +#define RG_DSI_DEBUG_INPUT_EN BIT(12) +#define RG_DSI_PRESERVE (7 << 13) + +#define MIPITX_DSI_BG_CON 0x44 +#define RG_DSI_BG_CORE_EN BIT(0) +#define RG_DSI_BG_CKEN BIT(1) +#define RG_DSI_BG_DIV (0x3 << 2) +#define RG_DSI_BG_FAST_CHARGE BIT(4) +#define RG_DSI_VOUT_MSK (0x3ffff << 5) +#define RG_DSI_V12_SEL (7 << 5) +#define RG_DSI_V10_SEL (7 << 8) +#define RG_DSI_V072_SEL (7 << 11) +#define RG_DSI_V04_SEL (7 << 14) +#define RG_DSI_V032_SEL (7 << 17) +#define RG_DSI_V02_SEL (7 << 20) +#define RG_DSI_BG_R1_TRIM (0xf << 24) +#define RG_DSI_BG_R2_TRIM (0xf << 28) + +#define MIPITX_DSI_PLL_CON0 0x50 +#define RG_DSI_MPPLL_PLL_EN BIT(0) +#define RG_DSI_MPPLL_DIV_MSK (0x1ff << 1) +#define RG_DSI_MPPLL_PREDIV (3 << 1) +#define RG_DSI_MPPLL_TXDIV0 (3 << 3) +#define RG_DSI_MPPLL_TXDIV1 (3 << 5) +#define RG_DSI_MPPLL_POSDIV (7 << 7) +#define RG_DSI_MPPLL_MONVC_EN BIT(10) +#define RG_DSI_MPPLL_MONREF_EN BIT(11) +#define RG_DSI_MPPLL_VOD_EN BIT(12) + +#define MIPITX_DSI_PLL_CON1 0x54 +#define RG_DSI_MPPLL_SDM_FRA_EN BIT(0) +#define RG_DSI_MPPLL_SDM_SSC_PH_INIT BIT(1) +#define RG_DSI_MPPLL_SDM_SSC_EN BIT(2) +#define RG_DSI_MPPLL_SDM_SSC_PRD (0xffff << 16) + +#define MIPITX_DSI_PLL_CON2 0x58 + +#define MIPITX_DSI_PLL_TOP 0x64 +#define RG_DSI_MPPLL_PRESERVE (0xff << 8) + +#define MIPITX_DSI_PLL_PWR 0x68 +#define RG_DSI_MPPLL_SDM_PWR_ON BIT(0) +#define RG_DSI_MPPLL_SDM_ISO_EN BIT(1) +#define RG_DSI_MPPLL_SDM_PWR_ACK BIT(8) + +#define MIPITX_DSI_SW_CTRL 0x80 +#define SW_CTRL_EN BIT(0) + +#define MIPITX_DSI_SW_CTRL_CON0 0x84 +#define SW_LNTC_LPTX_PRE_OE BIT(0) +#define SW_LNTC_LPTX_OE BIT(1) +#define SW_LNTC_LPTX_P BIT(2) +#define SW_LNTC_LPTX_N BIT(3) +#define SW_LNTC_HSTX_PRE_OE BIT(4) +#define SW_LNTC_HSTX_OE BIT(5) +#define SW_LNTC_HSTX_ZEROCLK BIT(6) +#define SW_LNT0_LPTX_PRE_OE BIT(7) +#define SW_LNT0_LPTX_OE BIT(8) +#define SW_LNT0_LPTX_P BIT(9) +#define SW_LNT0_LPTX_N BIT(10) +#define SW_LNT0_HSTX_PRE_OE BIT(11) +#define SW_LNT0_HSTX_OE BIT(12) +#define SW_LNT0_LPRX_EN BIT(13) +#define SW_LNT1_LPTX_PRE_OE BIT(14) +#define SW_LNT1_LPTX_OE BIT(15) +#define SW_LNT1_LPTX_P BIT(16) +#define SW_LNT1_LPTX_N BIT(17) +#define SW_LNT1_HSTX_PRE_OE BIT(18) +#define SW_LNT1_HSTX_OE BIT(19) +#define SW_LNT2_LPTX_PRE_OE BIT(20) +#define SW_LNT2_LPTX_OE BIT(21) +#define SW_LNT2_LPTX_P BIT(22) +#define SW_LNT2_LPTX_N BIT(23) +#define SW_LNT2_HSTX_PRE_OE BIT(24) +#define SW_LNT2_HSTX_OE BIT(25) + +static int mtk_mipi_tx_pll_prepare(struct clk_hw *hw) +{ + struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw); + u8 txdiv, txdiv0, txdiv1; + u64 pcw; + + dev_dbg(mipi_tx->dev, "prepare: %u Hz\n", mipi_tx->data_rate); + + if (mipi_tx->data_rate >= 500000000) { + txdiv = 1; + txdiv0 = 0; + txdiv1 = 0; + } else if (mipi_tx->data_rate >= 250000000) { + txdiv = 2; + txdiv0 = 1; + txdiv1 = 0; + } else if (mipi_tx->data_rate >= 125000000) { + txdiv = 4; + txdiv0 = 2; + txdiv1 = 0; + } else if (mipi_tx->data_rate > 62000000) { + txdiv = 8; + txdiv0 = 2; + txdiv1 = 1; + } else if (mipi_tx->data_rate >= 50000000) { + txdiv = 16; + txdiv0 = 2; + txdiv1 = 2; + } else { + return -EINVAL; + } + + mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_BG_CON, + RG_DSI_VOUT_MSK | + RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN, + (4 << 20) | (4 << 17) | (4 << 14) | + (4 << 11) | (4 << 8) | (4 << 5) | + RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN); + + usleep_range(30, 100); + + mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_TOP_CON, + RG_DSI_LNT_IMP_CAL_CODE | RG_DSI_LNT_HS_BIAS_EN, + (8 << 4) | RG_DSI_LNT_HS_BIAS_EN); + + mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_CON, + RG_DSI_CKG_LDOOUT_EN | RG_DSI_LDOCORE_EN); + + mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_PWR, + RG_DSI_MPPLL_SDM_PWR_ON | + RG_DSI_MPPLL_SDM_ISO_EN, + RG_DSI_MPPLL_SDM_PWR_ON); + + mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0, + RG_DSI_MPPLL_PLL_EN); + + mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_CON0, + RG_DSI_MPPLL_TXDIV0 | RG_DSI_MPPLL_TXDIV1 | + RG_DSI_MPPLL_PREDIV, + (txdiv0 << 3) | (txdiv1 << 5)); + + /* + * PLL PCW config + * PCW bit 24~30 = integer part of pcw + * PCW bit 0~23 = fractional part of pcw + * pcw = data_Rate*4*txdiv/(Ref_clk*2); + * Post DIV =4, so need data_Rate*4 + * Ref_clk is 26MHz + */ + pcw = div_u64(((u64)mipi_tx->data_rate * 2 * txdiv) << 24, + 26000000); + writel(pcw, mipi_tx->regs + MIPITX_DSI_PLL_CON2); + + mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_PLL_CON1, + RG_DSI_MPPLL_SDM_FRA_EN); + + mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_PLL_CON0, RG_DSI_MPPLL_PLL_EN); + + usleep_range(20, 100); + + mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON1, + RG_DSI_MPPLL_SDM_SSC_EN); + + mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_TOP, + RG_DSI_MPPLL_PRESERVE, + mipi_tx->driver_data->mppll_preserve); + + return 0; +} + +static void mtk_mipi_tx_pll_unprepare(struct clk_hw *hw) +{ + struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw); + + dev_dbg(mipi_tx->dev, "unprepare\n"); + + mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0, + RG_DSI_MPPLL_PLL_EN); + + mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_TOP, + RG_DSI_MPPLL_PRESERVE, 0); + + mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_PWR, + RG_DSI_MPPLL_SDM_ISO_EN | + RG_DSI_MPPLL_SDM_PWR_ON, + RG_DSI_MPPLL_SDM_ISO_EN); + + mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_TOP_CON, + RG_DSI_LNT_HS_BIAS_EN); + + mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_CON, + RG_DSI_CKG_LDOOUT_EN | RG_DSI_LDOCORE_EN); + + mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_BG_CON, + RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN); + + mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0, + RG_DSI_MPPLL_DIV_MSK); +} + +static long mtk_mipi_tx_pll_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + return clamp_val(rate, 50000000, 1250000000); +} + +static const struct clk_ops mtk_mipi_tx_pll_ops = { + .prepare = mtk_mipi_tx_pll_prepare, + .unprepare = mtk_mipi_tx_pll_unprepare, + .round_rate = mtk_mipi_tx_pll_round_rate, + .set_rate = mtk_mipi_tx_pll_set_rate, + .recalc_rate = mtk_mipi_tx_pll_recalc_rate, +}; + +static void mtk_mipi_tx_power_on_signal(struct phy *phy) +{ + struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy); + u32 reg; + + for (reg = MIPITX_DSI_CLOCK_LANE; + reg <= MIPITX_DSI_DATA_LANE3; reg += 4) + mtk_mipi_tx_set_bits(mipi_tx, reg, RG_DSI_LNTx_LDOOUT_EN); + + mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_TOP_CON, + RG_DSI_PAD_TIE_LOW_EN); +} + +static void mtk_mipi_tx_power_off_signal(struct phy *phy) +{ + struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy); + u32 reg; + + mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_TOP_CON, + RG_DSI_PAD_TIE_LOW_EN); + + for (reg = MIPITX_DSI_CLOCK_LANE; + reg <= MIPITX_DSI_DATA_LANE3; reg += 4) + mtk_mipi_tx_clear_bits(mipi_tx, reg, RG_DSI_LNTx_LDOOUT_EN); +} + +const struct mtk_mipitx_data mt2701_mipitx_data = { + .mppll_preserve = (3 << 8), + .mipi_tx_clk_ops = &mtk_mipi_tx_pll_ops, + .mipi_tx_enable_signal = mtk_mipi_tx_power_on_signal, + .mipi_tx_disable_signal = mtk_mipi_tx_power_off_signal, +}; + +const struct mtk_mipitx_data mt8173_mipitx_data = { + .mppll_preserve = (0 << 8), + .mipi_tx_clk_ops = &mtk_mipi_tx_pll_ops, + .mipi_tx_enable_signal = mtk_mipi_tx_power_on_signal, + .mipi_tx_disable_signal = mtk_mipi_tx_power_off_signal, +}; diff --git a/drivers/gpu/drm/mediatek/mtk_mt8183_mipi_tx.c b/drivers/gpu/drm/mediatek/mtk_mt8183_mipi_tx.c new file mode 100644 index 000000000000..91f08a351fd0 --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_mt8183_mipi_tx.c @@ -0,0 +1,149 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2019 MediaTek Inc. + * Author: jitao.shi <jitao.shi@mediatek.com> + */ + +#include "mtk_mipi_tx.h" + +#define MIPITX_LANE_CON 0x000c +#define RG_DSI_CPHY_T1DRV_EN BIT(0) +#define RG_DSI_ANA_CK_SEL BIT(1) +#define RG_DSI_PHY_CK_SEL BIT(2) +#define RG_DSI_CPHY_EN BIT(3) +#define RG_DSI_PHYCK_INV_EN BIT(4) +#define RG_DSI_PWR04_EN BIT(5) +#define RG_DSI_BG_LPF_EN BIT(6) +#define RG_DSI_BG_CORE_EN BIT(7) +#define RG_DSI_PAD_TIEL_SEL BIT(8) + +#define MIPITX_PLL_PWR 0x0028 +#define MIPITX_PLL_CON0 0x002c +#define MIPITX_PLL_CON1 0x0030 +#define MIPITX_PLL_CON2 0x0034 +#define MIPITX_PLL_CON3 0x0038 +#define MIPITX_PLL_CON4 0x003c +#define RG_DSI_PLL_IBIAS (3 << 10) + +#define MIPITX_D2_SW_CTL_EN 0x0144 +#define MIPITX_D0_SW_CTL_EN 0x0244 +#define MIPITX_CK_CKMODE_EN 0x0328 +#define DSI_CK_CKMODE_EN BIT(0) +#define MIPITX_CK_SW_CTL_EN 0x0344 +#define MIPITX_D1_SW_CTL_EN 0x0444 +#define MIPITX_D3_SW_CTL_EN 0x0544 +#define DSI_SW_CTL_EN BIT(0) +#define AD_DSI_PLL_SDM_PWR_ON BIT(0) +#define AD_DSI_PLL_SDM_ISO_EN BIT(1) + +#define RG_DSI_PLL_EN BIT(4) +#define RG_DSI_PLL_POSDIV (0x7 << 8) + +static int mtk_mipi_tx_pll_enable(struct clk_hw *hw) +{ + struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw); + unsigned int txdiv, txdiv0; + u64 pcw; + + dev_dbg(mipi_tx->dev, "enable: %u bps\n", mipi_tx->data_rate); + + if (mipi_tx->data_rate >= 2000000000) { + txdiv = 1; + txdiv0 = 0; + } else if (mipi_tx->data_rate >= 1000000000) { + txdiv = 2; + txdiv0 = 1; + } else if (mipi_tx->data_rate >= 500000000) { + txdiv = 4; + txdiv0 = 2; + } else if (mipi_tx->data_rate > 250000000) { + txdiv = 8; + txdiv0 = 3; + } else if (mipi_tx->data_rate >= 125000000) { + txdiv = 16; + txdiv0 = 4; + } else { + return -EINVAL; + } + + mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_CON4, RG_DSI_PLL_IBIAS); + + mtk_mipi_tx_set_bits(mipi_tx, MIPITX_PLL_PWR, AD_DSI_PLL_SDM_PWR_ON); + mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_CON1, RG_DSI_PLL_EN); + udelay(1); + mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_PWR, AD_DSI_PLL_SDM_ISO_EN); + pcw = div_u64(((u64)mipi_tx->data_rate * txdiv) << 24, 26000000); + writel(pcw, mipi_tx->regs + MIPITX_PLL_CON0); + mtk_mipi_tx_update_bits(mipi_tx, MIPITX_PLL_CON1, RG_DSI_PLL_POSDIV, + txdiv0 << 8); + mtk_mipi_tx_set_bits(mipi_tx, MIPITX_PLL_CON1, RG_DSI_PLL_EN); + + return 0; +} + +static void mtk_mipi_tx_pll_disable(struct clk_hw *hw) +{ + struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw); + + mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_CON1, RG_DSI_PLL_EN); + + mtk_mipi_tx_set_bits(mipi_tx, MIPITX_PLL_PWR, AD_DSI_PLL_SDM_ISO_EN); + mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_PWR, AD_DSI_PLL_SDM_PWR_ON); +} + +static long mtk_mipi_tx_pll_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + return clamp_val(rate, 50000000, 1600000000); +} + +static const struct clk_ops mtk_mipi_tx_pll_ops = { + .enable = mtk_mipi_tx_pll_enable, + .disable = mtk_mipi_tx_pll_disable, + .round_rate = mtk_mipi_tx_pll_round_rate, + .set_rate = mtk_mipi_tx_pll_set_rate, + .recalc_rate = mtk_mipi_tx_pll_recalc_rate, +}; + +static void mtk_mipi_tx_power_on_signal(struct phy *phy) +{ + struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy); + + /* BG_LPF_EN / BG_CORE_EN */ + writel(RG_DSI_PAD_TIEL_SEL | RG_DSI_BG_CORE_EN, + mipi_tx->regs + MIPITX_LANE_CON); + usleep_range(30, 100); + writel(RG_DSI_BG_CORE_EN | RG_DSI_BG_LPF_EN, + mipi_tx->regs + MIPITX_LANE_CON); + + /* Switch OFF each Lane */ + mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_D0_SW_CTL_EN, DSI_SW_CTL_EN); + mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_D1_SW_CTL_EN, DSI_SW_CTL_EN); + mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_D2_SW_CTL_EN, DSI_SW_CTL_EN); + mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_D3_SW_CTL_EN, DSI_SW_CTL_EN); + mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_CK_SW_CTL_EN, DSI_SW_CTL_EN); + + mtk_mipi_tx_set_bits(mipi_tx, MIPITX_CK_CKMODE_EN, DSI_CK_CKMODE_EN); +} + +static void mtk_mipi_tx_power_off_signal(struct phy *phy) +{ + struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy); + + /* Switch ON each Lane */ + mtk_mipi_tx_set_bits(mipi_tx, MIPITX_D0_SW_CTL_EN, DSI_SW_CTL_EN); + mtk_mipi_tx_set_bits(mipi_tx, MIPITX_D1_SW_CTL_EN, DSI_SW_CTL_EN); + mtk_mipi_tx_set_bits(mipi_tx, MIPITX_D2_SW_CTL_EN, DSI_SW_CTL_EN); + mtk_mipi_tx_set_bits(mipi_tx, MIPITX_D3_SW_CTL_EN, DSI_SW_CTL_EN); + mtk_mipi_tx_set_bits(mipi_tx, MIPITX_CK_SW_CTL_EN, DSI_SW_CTL_EN); + + writel(RG_DSI_PAD_TIEL_SEL | RG_DSI_BG_CORE_EN, + mipi_tx->regs + MIPITX_LANE_CON); + writel(RG_DSI_PAD_TIEL_SEL, mipi_tx->regs + MIPITX_LANE_CON); +} + +const struct mtk_mipitx_data mt8183_mipitx_data = { + .mipi_tx_clk_ops = &mtk_mipi_tx_pll_ops, + .mipi_tx_enable_signal = mtk_mipi_tx_power_on_signal, + .mipi_tx_disable_signal = mtk_mipi_tx_power_off_signal, +}; diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c index 022286dc6ab2..3bb7ffe5fc39 100644 --- a/drivers/gpu/drm/meson/meson_dw_hdmi.c +++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c @@ -977,6 +977,11 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master, dw_plat_data->input_bus_format = MEDIA_BUS_FMT_YUV8_1X24; dw_plat_data->input_bus_encoding = V4L2_YCBCR_ENC_709; + if (dw_hdmi_is_compatible(meson_dw_hdmi, "amlogic,meson-gxl-dw-hdmi") || + dw_hdmi_is_compatible(meson_dw_hdmi, "amlogic,meson-gxm-dw-hdmi") || + dw_hdmi_is_compatible(meson_dw_hdmi, "amlogic,meson-g12a-dw-hdmi")) + dw_plat_data->use_drm_infoframe = true; + platform_set_drvdata(pdev, meson_dw_hdmi); meson_dw_hdmi->hdmi = dw_hdmi_bind(pdev, encoder, diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c index 4f9df3b93598..397f8b0a9af8 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.c +++ b/drivers/gpu/drm/mgag200/mgag200_drv.c @@ -58,10 +58,7 @@ static void mga_pci_remove(struct pci_dev *pdev) drm_put_dev(dev); } -static const struct file_operations mgag200_driver_fops = { - .owner = THIS_MODULE, - DRM_VRAM_MM_FILE_OPERATIONS -}; +DEFINE_DRM_GEM_FOPS(mgag200_driver_fops); static struct drm_driver driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET, diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c index e686331fa089..691c1a277d91 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c @@ -352,26 +352,26 @@ static void a6xx_get_debugbus(struct msm_gpu *gpu, cxdbg = ioremap(res->start, resource_size(res)); if (cxdbg) { - cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_CNTLT, + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLT, A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT(0xf)); - cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_CNTLM, + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLM, A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(0xf)); - cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_0, 0); - cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_1, 0); - cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_2, 0); - cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_3, 0); + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0, 0); + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1, 0); + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2, 0); + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3, 0); - cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_0, + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0, 0x76543210); - cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_1, + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1, 0xFEDCBA98); - cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_0, 0); - cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_1, 0); - cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_2, 0); - cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_3, 0); + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0, 0); + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1, 0); + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2, 0); + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3, 0); } a6xx_state->debugbus = state_kcalloc(a6xx_state, diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index 663ff9f4fac9..1e7b1be25bb0 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -26,6 +26,8 @@ #include "dsi_cfg.h" #include "msm_kms.h" +#define DSI_RESET_TOGGLE_DELAY_MS 20 + static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor) { u32 ver; @@ -986,7 +988,7 @@ static void dsi_sw_reset(struct msm_dsi_host *msm_host) wmb(); /* clocks need to be enabled before reset */ dsi_write(msm_host, REG_DSI_RESET, 1); - wmb(); /* make sure reset happen */ + msleep(DSI_RESET_TOGGLE_DELAY_MS); /* make sure reset happen */ dsi_write(msm_host, REG_DSI_RESET, 0); } @@ -1396,7 +1398,7 @@ static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host) /* dsi controller can only be reset while clocks are running */ dsi_write(msm_host, REG_DSI_RESET, 1); - wmb(); /* make sure reset happen */ + msleep(DSI_RESET_TOGGLE_DELAY_MS); /* make sure reset happen */ dsi_write(msm_host, REG_DSI_RESET, 0); wmb(); /* controller out of reset */ dsi_write(msm_host, REG_DSI_CTRL, data0); diff --git a/drivers/gpu/drm/msm/edp/edp_ctrl.c b/drivers/gpu/drm/msm/edp/edp_ctrl.c index 7f3dd3ffe2c9..0d9657cc70db 100644 --- a/drivers/gpu/drm/msm/edp/edp_ctrl.c +++ b/drivers/gpu/drm/msm/edp/edp_ctrl.c @@ -89,7 +89,6 @@ struct edp_ctrl { /* edid raw data */ struct edid *edid; - struct drm_dp_link dp_link; struct drm_dp_aux *drm_aux; /* dpcd raw data */ @@ -403,7 +402,7 @@ static void edp_fill_link_cfg(struct edp_ctrl *ctrl) u32 prate; u32 lrate; u32 bpp; - u8 max_lane = ctrl->dp_link.num_lanes; + u8 max_lane = drm_dp_max_lane_count(ctrl->dpcd); u8 lane; prate = ctrl->pixel_rate; @@ -413,7 +412,7 @@ static void edp_fill_link_cfg(struct edp_ctrl *ctrl) * By default, use the maximum link rate and minimum lane count, * so that we can do rate down shift during link training. */ - ctrl->link_rate = drm_dp_link_rate_to_bw_code(ctrl->dp_link.rate); + ctrl->link_rate = ctrl->dpcd[DP_MAX_LINK_RATE]; prate *= bpp; prate /= 8; /* in kByte */ @@ -439,7 +438,7 @@ static void edp_config_ctrl(struct edp_ctrl *ctrl) data = EDP_CONFIGURATION_CTRL_LANES(ctrl->lane_cnt - 1); - if (ctrl->dp_link.capabilities & DP_LINK_CAP_ENHANCED_FRAMING) + if (drm_dp_enhanced_frame_cap(ctrl->dpcd)) data |= EDP_CONFIGURATION_CTRL_ENHANCED_FRAMING; depth = EDP_6BIT; @@ -701,7 +700,7 @@ static int edp_link_rate_down_shift(struct edp_ctrl *ctrl) rate = ctrl->link_rate; lane = ctrl->lane_cnt; - max_lane = ctrl->dp_link.num_lanes; + max_lane = drm_dp_max_lane_count(ctrl->dpcd); bpp = ctrl->color_depth * 3; prate = ctrl->pixel_rate; @@ -751,18 +750,22 @@ static int edp_clear_training_pattern(struct edp_ctrl *ctrl) static int edp_do_link_train(struct edp_ctrl *ctrl) { + u8 values[2]; int ret; - struct drm_dp_link dp_link; DBG(""); /* * Set the current link rate and lane cnt to panel. They may have been * adjusted and the values are different from them in DPCD CAP */ - dp_link.num_lanes = ctrl->lane_cnt; - dp_link.rate = drm_dp_bw_code_to_link_rate(ctrl->link_rate); - dp_link.capabilities = ctrl->dp_link.capabilities; - if (drm_dp_link_configure(ctrl->drm_aux, &dp_link) < 0) + values[0] = ctrl->lane_cnt; + values[1] = ctrl->link_rate; + + if (drm_dp_enhanced_frame_cap(ctrl->dpcd)) + values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; + + if (drm_dp_dpcd_write(ctrl->drm_aux, DP_LINK_BW_SET, values, + sizeof(values)) < 0) return EDP_TRAIN_FAIL; ctrl->v_level = 0; /* start from default level */ @@ -952,6 +955,7 @@ static void edp_ctrl_on_worker(struct work_struct *work) { struct edp_ctrl *ctrl = container_of( work, struct edp_ctrl, on_work); + u8 value; int ret; mutex_lock(&ctrl->dev_mutex); @@ -965,9 +969,27 @@ static void edp_ctrl_on_worker(struct work_struct *work) edp_ctrl_link_enable(ctrl, 1); edp_ctrl_irq_enable(ctrl, 1); - ret = drm_dp_link_power_up(ctrl->drm_aux, &ctrl->dp_link); - if (ret) - goto fail; + + /* DP_SET_POWER register is only available on DPCD v1.1 and later */ + if (ctrl->dpcd[DP_DPCD_REV] >= 0x11) { + ret = drm_dp_dpcd_readb(ctrl->drm_aux, DP_SET_POWER, &value); + if (ret < 0) + goto fail; + + value &= ~DP_SET_POWER_MASK; + value |= DP_SET_POWER_D0; + + ret = drm_dp_dpcd_writeb(ctrl->drm_aux, DP_SET_POWER, value); + if (ret < 0) + goto fail; + + /* + * According to the DP 1.1 specification, a "Sink Device must + * exit the power saving state within 1 ms" (Section 2.5.3.1, + * Table 5-52, "Sink Control Field" (register 0x600). + */ + usleep_range(1000, 2000); + } ctrl->power_on = true; @@ -1011,7 +1033,19 @@ static void edp_ctrl_off_worker(struct work_struct *work) edp_state_ctrl(ctrl, 0); - drm_dp_link_power_down(ctrl->drm_aux, &ctrl->dp_link); + /* DP_SET_POWER register is only available on DPCD v1.1 and later */ + if (ctrl->dpcd[DP_DPCD_REV] >= 0x11) { + u8 value; + int ret; + + ret = drm_dp_dpcd_readb(ctrl->drm_aux, DP_SET_POWER, &value); + if (ret > 0) { + value &= ~DP_SET_POWER_MASK; + value |= DP_SET_POWER_D3; + + drm_dp_dpcd_writeb(ctrl->drm_aux, DP_SET_POWER, value); + } + } edp_ctrl_irq_enable(ctrl, 0); @@ -1225,14 +1259,8 @@ int msm_edp_ctrl_get_panel_info(struct edp_ctrl *ctrl, edp_ctrl_irq_enable(ctrl, 1); } - ret = drm_dp_link_probe(ctrl->drm_aux, &ctrl->dp_link); - if (ret) { - pr_err("%s: read dpcd cap failed, %d\n", __func__, ret); - goto disable_ret; - } - /* Initialize link rate as panel max link rate */ - ctrl->link_rate = drm_dp_link_rate_to_bw_code(ctrl->dp_link.rate); + ctrl->link_rate = ctrl->dpcd[DP_MAX_LINK_RATE]; ctrl->edid = drm_get_edid(connector, &ctrl->drm_aux->ddc); if (!ctrl->edid) { diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c index 6be879578140..1c74381a4fc9 100644 --- a/drivers/gpu/drm/msm/msm_debugfs.c +++ b/drivers/gpu/drm/msm/msm_debugfs.c @@ -47,12 +47,8 @@ static int msm_gpu_release(struct inode *inode, struct file *file) struct msm_gpu_show_priv *show_priv = m->private; struct msm_drm_private *priv = show_priv->dev->dev_private; struct msm_gpu *gpu = priv->gpu; - int ret; - - ret = mutex_lock_interruptible(&show_priv->dev->struct_mutex); - if (ret) - return ret; + mutex_lock(&show_priv->dev->struct_mutex); gpu->funcs->gpu_state_put(show_priv->state); mutex_unlock(&show_priv->dev->struct_mutex); diff --git a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c index 12421567af89..b69ace8bf526 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c @@ -95,8 +95,11 @@ static void mxsfb_set_bus_fmt(struct mxsfb_drm_private *mxsfb) reg = readl(mxsfb->base + LCDC_CTRL); - if (mxsfb->connector.display_info.num_bus_formats) - bus_format = mxsfb->connector.display_info.bus_formats[0]; + if (mxsfb->connector->display_info.num_bus_formats) + bus_format = mxsfb->connector->display_info.bus_formats[0]; + + DRM_DEV_DEBUG_DRIVER(drm->dev, "Using bus_format: 0x%08X\n", + bus_format); reg &= ~CTRL_BUS_WIDTH_MASK; switch (bus_format) { @@ -204,8 +207,9 @@ static dma_addr_t mxsfb_get_fb_paddr(struct mxsfb_drm_private *mxsfb) static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb) { + struct drm_device *drm = mxsfb->pipe.crtc.dev; struct drm_display_mode *m = &mxsfb->pipe.crtc.state->adjusted_mode; - const u32 bus_flags = mxsfb->connector.display_info.bus_flags; + u32 bus_flags = mxsfb->connector->display_info.bus_flags; u32 vdctrl0, vsync_pulse_len, hsync_pulse_len; int err; @@ -229,6 +233,16 @@ static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb) clk_set_rate(mxsfb->clk, m->crtc_clock * 1000); + if (mxsfb->bridge && mxsfb->bridge->timings) + bus_flags = mxsfb->bridge->timings->input_bus_flags; + + DRM_DEV_DEBUG_DRIVER(drm->dev, "Pixel clock: %dkHz (actual: %dkHz)\n", + m->crtc_clock, + (int)(clk_get_rate(mxsfb->clk) / 1000)); + DRM_DEV_DEBUG_DRIVER(drm->dev, "Connector bus_flags: 0x%08X\n", + bus_flags); + DRM_DEV_DEBUG_DRIVER(drm->dev, "Mode flags: 0x%08X\n", m->flags); + writel(TRANSFER_COUNT_SET_VCOUNT(m->crtc_vdisplay) | TRANSFER_COUNT_SET_HCOUNT(m->crtc_hdisplay), mxsfb->base + mxsfb->devdata->transfer_count); diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c index e8506335cd15..497cf443a9af 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c @@ -101,9 +101,25 @@ static void mxsfb_pipe_enable(struct drm_simple_display_pipe *pipe, struct drm_crtc_state *crtc_state, struct drm_plane_state *plane_state) { + struct drm_connector *connector; struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe); struct drm_device *drm = pipe->plane.dev; + if (!mxsfb->connector) { + list_for_each_entry(connector, + &drm->mode_config.connector_list, + head) + if (connector->encoder == &mxsfb->pipe.encoder) { + mxsfb->connector = connector; + break; + } + } + + if (!mxsfb->connector) { + dev_warn(drm->dev, "No connector attached, using default\n"); + mxsfb->connector = &mxsfb->panel_connector; + } + pm_runtime_get_sync(drm->dev); drm_panel_prepare(mxsfb->panel); mxsfb_crtc_enable(mxsfb); @@ -129,6 +145,9 @@ static void mxsfb_pipe_disable(struct drm_simple_display_pipe *pipe) drm_crtc_send_vblank_event(crtc, event); } spin_unlock_irq(&drm->event_lock); + + if (mxsfb->connector != &mxsfb->panel_connector) + mxsfb->connector = NULL; } static void mxsfb_pipe_update(struct drm_simple_display_pipe *pipe, @@ -226,16 +245,33 @@ static int mxsfb_load(struct drm_device *drm, unsigned long flags) ret = drm_simple_display_pipe_init(drm, &mxsfb->pipe, &mxsfb_funcs, mxsfb_formats, ARRAY_SIZE(mxsfb_formats), NULL, - &mxsfb->connector); + mxsfb->connector); if (ret < 0) { dev_err(drm->dev, "Cannot setup simple display pipe\n"); goto err_vblank; } - ret = drm_panel_attach(mxsfb->panel, &mxsfb->connector); - if (ret) { - dev_err(drm->dev, "Cannot connect panel\n"); - goto err_vblank; + /* + * Attach panel only if there is one. + * If there is no panel attach, it must be a bridge. In this case, we + * need a reference to its connector for a proper initialization. + * We will do this check in pipe->enable(), since the connector won't + * be attached to an encoder until then. + */ + + if (mxsfb->panel) { + ret = drm_panel_attach(mxsfb->panel, mxsfb->connector); + if (ret) { + dev_err(drm->dev, "Cannot connect panel: %d\n", ret); + goto err_vblank; + } + } else if (mxsfb->bridge) { + ret = drm_simple_display_pipe_attach_bridge(&mxsfb->pipe, + mxsfb->bridge); + if (ret) { + dev_err(drm->dev, "Cannot connect bridge: %d\n", ret); + goto err_vblank; + } } drm->mode_config.min_width = MXSFB_MIN_XRES; diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.h b/drivers/gpu/drm/mxsfb/mxsfb_drv.h index d975300dca05..0b65b5194a9c 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_drv.h +++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.h @@ -27,8 +27,10 @@ struct mxsfb_drm_private { struct clk *clk_disp_axi; struct drm_simple_display_pipe pipe; - struct drm_connector connector; + struct drm_connector panel_connector; + struct drm_connector *connector; struct drm_panel *panel; + struct drm_bridge *bridge; }; int mxsfb_setup_crtc(struct drm_device *dev); diff --git a/drivers/gpu/drm/mxsfb/mxsfb_out.c b/drivers/gpu/drm/mxsfb/mxsfb_out.c index be36f4d6cc96..4eb94744c526 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_out.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_out.c @@ -21,7 +21,8 @@ static struct mxsfb_drm_private * drm_connector_to_mxsfb_drm_private(struct drm_connector *connector) { - return container_of(connector, struct mxsfb_drm_private, connector); + return container_of(connector, struct mxsfb_drm_private, + panel_connector); } static int mxsfb_panel_get_modes(struct drm_connector *connector) @@ -76,22 +77,23 @@ static const struct drm_connector_funcs mxsfb_panel_connector_funcs = { int mxsfb_create_output(struct drm_device *drm) { struct mxsfb_drm_private *mxsfb = drm->dev_private; - struct drm_panel *panel; int ret; - ret = drm_of_find_panel_or_bridge(drm->dev->of_node, 0, 0, &panel, NULL); + ret = drm_of_find_panel_or_bridge(drm->dev->of_node, 0, 0, + &mxsfb->panel, &mxsfb->bridge); if (ret) return ret; - mxsfb->connector.dpms = DRM_MODE_DPMS_OFF; - mxsfb->connector.polled = 0; - drm_connector_helper_add(&mxsfb->connector, - &mxsfb_panel_connector_helper_funcs); - ret = drm_connector_init(drm, &mxsfb->connector, - &mxsfb_panel_connector_funcs, - DRM_MODE_CONNECTOR_Unknown); - if (!ret) - mxsfb->panel = panel; + if (mxsfb->panel) { + mxsfb->connector = &mxsfb->panel_connector; + mxsfb->connector->dpms = DRM_MODE_DPMS_OFF; + mxsfb->connector->polled = 0; + drm_connector_helper_add(mxsfb->connector, + &mxsfb_panel_connector_helper_funcs); + ret = drm_connector_init(drm, mxsfb->connector, + &mxsfb_panel_connector_funcs, + DRM_MODE_CONNECTOR_Unknown); + } return ret; } diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index a13924ae1992..549486f1d937 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -986,20 +986,11 @@ nv50_mstc_atomic_check(struct drm_connector *connector, return drm_dp_atomic_release_vcpi_slots(state, mgr, mstc->port); } -static const struct drm_connector_helper_funcs -nv50_mstc_help = { - .get_modes = nv50_mstc_get_modes, - .mode_valid = nv50_mstc_mode_valid, - .best_encoder = nv50_mstc_best_encoder, - .atomic_best_encoder = nv50_mstc_atomic_best_encoder, - .atomic_check = nv50_mstc_atomic_check, -}; - -static enum drm_connector_status -nv50_mstc_detect(struct drm_connector *connector, bool force) +static int +nv50_mstc_detect(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, bool force) { struct nv50_mstc *mstc = nv50_mstc(connector); - enum drm_connector_status conn_status; int ret; if (drm_connector_is_unregistered(connector)) @@ -1009,14 +1000,24 @@ nv50_mstc_detect(struct drm_connector *connector, bool force) if (ret < 0 && ret != -EACCES) return connector_status_disconnected; - conn_status = drm_dp_mst_detect_port(connector, mstc->port->mgr, - mstc->port); + ret = drm_dp_mst_detect_port(connector, ctx, mstc->port->mgr, + mstc->port); pm_runtime_mark_last_busy(connector->dev->dev); pm_runtime_put_autosuspend(connector->dev->dev); - return conn_status; + return ret; } +static const struct drm_connector_helper_funcs +nv50_mstc_help = { + .get_modes = nv50_mstc_get_modes, + .mode_valid = nv50_mstc_mode_valid, + .best_encoder = nv50_mstc_best_encoder, + .atomic_best_encoder = nv50_mstc_atomic_best_encoder, + .atomic_check = nv50_mstc_atomic_check, + .detect_ctx = nv50_mstc_detect, +}; + static void nv50_mstc_destroy(struct drm_connector *connector) { @@ -1031,7 +1032,6 @@ nv50_mstc_destroy(struct drm_connector *connector) static const struct drm_connector_funcs nv50_mstc = { .reset = nouveau_conn_reset, - .detect = nv50_mstc_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = nv50_mstc_destroy, .atomic_duplicate_state = nouveau_conn_atomic_duplicate_state, @@ -1309,14 +1309,14 @@ nv50_mstm_fini(struct nv50_mstm *mstm) } static void -nv50_mstm_init(struct nv50_mstm *mstm) +nv50_mstm_init(struct nv50_mstm *mstm, bool runtime) { int ret; if (!mstm || !mstm->mgr.mst_state) return; - ret = drm_dp_mst_topology_mgr_resume(&mstm->mgr); + ret = drm_dp_mst_topology_mgr_resume(&mstm->mgr, !runtime); if (ret == -1) { drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false); drm_kms_helper_hotplug_event(mstm->mgr.dev); @@ -2263,7 +2263,7 @@ nv50_display_init(struct drm_device *dev, bool resume, bool runtime) if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); - nv50_mstm_init(nv_encoder->dp.mstm); + nv50_mstm_init(nv_encoder->dp.mstm, runtime); } } diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 3a5db17bc5c7..5b413588b823 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -1130,6 +1130,16 @@ nouveau_connector_hotplug(struct nvif_notify *notify) const char *name = connector->name; struct nouveau_encoder *nv_encoder; int ret; + bool plugged = (rep->mask != NVIF_NOTIFY_CONN_V0_UNPLUG); + + if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) { + NV_DEBUG(drm, "service %s\n", name); + drm_dp_cec_irq(&nv_connector->aux); + if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP))) + nv50_mstm_service(nv_encoder->dp.mstm); + + return NVIF_NOTIFY_KEEP; + } ret = pm_runtime_get(drm->dev->dev); if (ret == 0) { @@ -1150,25 +1160,16 @@ nouveau_connector_hotplug(struct nvif_notify *notify) return NVIF_NOTIFY_DROP; } - if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) { - NV_DEBUG(drm, "service %s\n", name); - drm_dp_cec_irq(&nv_connector->aux); - if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP))) - nv50_mstm_service(nv_encoder->dp.mstm); - } else { - bool plugged = (rep->mask != NVIF_NOTIFY_CONN_V0_UNPLUG); - + if (!plugged) + drm_dp_cec_unset_edid(&nv_connector->aux); + NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name); + if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP))) { if (!plugged) - drm_dp_cec_unset_edid(&nv_connector->aux); - NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name); - if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP))) { - if (!plugged) - nv50_mstm_remove(nv_encoder->dp.mstm); - } - - drm_helper_hpd_irq_event(connector->dev); + nv50_mstm_remove(nv_encoder->dp.mstm); } + drm_helper_hpd_irq_event(connector->dev); + pm_runtime_mark_last_busy(drm->dev->dev); pm_runtime_put_autosuspend(drm->dev->dev); return NVIF_NOTIFY_KEEP; diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 6f038511a03a..53f9bceaf17a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -407,6 +407,17 @@ nouveau_display_init(struct drm_device *dev, bool resume, bool runtime) struct drm_connector_list_iter conn_iter; int ret; + /* + * Enable hotplug interrupts (done as early as possible, since we need + * them for MST) + */ + drm_connector_list_iter_begin(dev, &conn_iter); + nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) { + struct nouveau_connector *conn = nouveau_connector(connector); + nvif_notify_get(&conn->hpd); + } + drm_connector_list_iter_end(&conn_iter); + ret = disp->init(dev, resume, runtime); if (ret) return ret; @@ -416,14 +427,6 @@ nouveau_display_init(struct drm_device *dev, bool resume, bool runtime) */ drm_kms_helper_poll_enable(dev); - /* enable hotplug interrupts */ - drm_connector_list_iter_begin(dev, &conn_iter); - nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) { - struct nouveau_connector *conn = nouveau_connector(connector); - nvif_notify_get(&conn->hpd); - } - drm_connector_list_iter_end(&conn_iter); - return ret; } diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index b30fcaa2d0f5..da16ea095f13 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c @@ -3548,7 +3548,7 @@ static int dsi_proto_config(struct dsi_data *dsi) static void dsi_proto_timings(struct dsi_data *dsi) { - unsigned int tlpx, tclk_zero, tclk_prepare, tclk_trail; + unsigned int tlpx, tclk_zero, tclk_prepare; unsigned int tclk_pre, tclk_post; unsigned int ths_prepare, ths_prepare_ths_zero, ths_zero; unsigned int ths_trail, ths_exit; @@ -3567,7 +3567,6 @@ static void dsi_proto_timings(struct dsi_data *dsi) r = dsi_read_reg(dsi, DSI_DSIPHY_CFG1); tlpx = FLD_GET(r, 20, 16) * 2; - tclk_trail = FLD_GET(r, 15, 8); tclk_zero = FLD_GET(r, 7, 0); r = dsi_read_reg(dsi, DSI_DSIPHY_CFG2); diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c index c4ffe96e28bc..ea5d5c228534 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c @@ -676,7 +676,7 @@ int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp, struct hdmi_audio_format audio_format; struct hdmi_audio_dma audio_dma; struct hdmi_core_audio_config acore; - int err, n, cts, channel_count; + int n, cts, channel_count; unsigned int fs_nr; bool word_length_16b = false; @@ -738,7 +738,7 @@ int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp, return -EINVAL; } - err = hdmi_compute_acr(pclk, fs_nr, &n, &cts); + hdmi_compute_acr(pclk, fs_nr, &n, &cts); /* Audio clock regeneration settings */ acore.n = n; diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c index 96f5cd17768c..ff4d35c8771f 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c @@ -807,7 +807,7 @@ int hdmi5_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp, struct hdmi_audio_format audio_format; struct hdmi_audio_dma audio_dma; struct hdmi_core_audio_config core_cfg; - int err, n, cts, channel_count; + int n, cts, channel_count; unsigned int fs_nr; bool word_length_16b = false; @@ -850,7 +850,7 @@ int hdmi5_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp, return -EINVAL; } - err = hdmi_compute_acr(pclk, fs_nr, &n, &cts); + hdmi_compute_acr(pclk, fs_nr, &n, &cts); core_cfg.n = n; core_cfg.cts = cts; diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h index 835e6654fa82..43c1d096b021 100644 --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h @@ -113,7 +113,7 @@ extern struct platform_driver omap_dmm_driver; /* GEM bo flags -> tiler fmt */ static inline enum tiler_fmt gem2fmt(u32 flags) { - switch (flags & OMAP_BO_TILED) { + switch (flags & OMAP_BO_TILED_MASK) { case OMAP_BO_TILED_8: return TILFMT_8BIT; case OMAP_BO_TILED_16: diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c index 1b8b5108caf8..9aeab81dfb90 100644 --- a/drivers/gpu/drm/omapdrm/omap_fb.c +++ b/drivers/gpu/drm/omapdrm/omap_fb.c @@ -95,7 +95,7 @@ static u32 get_linear_addr(struct drm_framebuffer *fb, bool omap_framebuffer_supports_rotation(struct drm_framebuffer *fb) { - return omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED; + return omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED_MASK; } /* Note: DRM rotates counter-clockwise, TILER & DSS rotates clockwise */ @@ -135,7 +135,6 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, { struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb); const struct drm_format_info *format = omap_fb->format; - struct plane *plane = &omap_fb->planes[0]; u32 x, y, orient = 0; info->fourcc = fb->format->format; @@ -154,7 +153,7 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, x = state->src_x >> 16; y = state->src_y >> 16; - if (omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED) { + if (omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED_MASK) { u32 w = state->src_w >> 16; u32 h = state->src_h >> 16; @@ -209,10 +208,8 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, info->screen_width /= format->cpp[0]; if (fb->format->format == DRM_FORMAT_NV12) { - plane = &omap_fb->planes[1]; - if (info->rotation_type == OMAP_DSS_ROT_TILER) { - WARN_ON(!(omap_gem_flags(fb->obj[1]) & OMAP_BO_TILED)); + WARN_ON(!(omap_gem_flags(fb->obj[1]) & OMAP_BO_TILED_MASK)); omap_gem_rotated_dma_addr(fb->obj[1], orient, x/2, y/2, &info->p_uv_addr); } else { diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index 08f539efddfb..e518d93ca6df 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c @@ -67,7 +67,7 @@ struct omap_gem_object { /** * # of users of dma_addr */ - u32 dma_addr_cnt; + refcount_t dma_addr_cnt; /** * If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag @@ -196,7 +196,7 @@ static void omap_gem_evict(struct drm_gem_object *obj) struct omap_gem_object *omap_obj = to_omap_bo(obj); struct omap_drm_private *priv = obj->dev->dev_private; - if (omap_obj->flags & OMAP_BO_TILED) { + if (omap_obj->flags & OMAP_BO_TILED_MASK) { enum tiler_fmt fmt = gem2fmt(omap_obj->flags); int i; @@ -324,7 +324,7 @@ size_t omap_gem_mmap_size(struct drm_gem_object *obj) struct omap_gem_object *omap_obj = to_omap_bo(obj); size_t size = obj->size; - if (omap_obj->flags & OMAP_BO_TILED) { + if (omap_obj->flags & OMAP_BO_TILED_MASK) { /* for tiled buffers, the virtual size has stride rounded up * to 4kb.. (to hide the fact that row n+1 might start 16kb or * 32kb later!). But we don't back the entire buffer with @@ -513,7 +513,7 @@ vm_fault_t omap_gem_fault(struct vm_fault *vmf) * probably trigger put_pages()? */ - if (omap_obj->flags & OMAP_BO_TILED) + if (omap_obj->flags & OMAP_BO_TILED_MASK) ret = omap_gem_fault_2d(obj, vma, vmf); else ret = omap_gem_fault_1d(obj, vma, vmf); @@ -773,18 +773,20 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr) mutex_lock(&omap_obj->lock); if (!omap_gem_is_contiguous(omap_obj) && priv->has_dmm) { - if (omap_obj->dma_addr_cnt == 0) { + if (refcount_read(&omap_obj->dma_addr_cnt) == 0) { u32 npages = obj->size >> PAGE_SHIFT; enum tiler_fmt fmt = gem2fmt(omap_obj->flags); struct tiler_block *block; BUG_ON(omap_obj->block); + refcount_set(&omap_obj->dma_addr_cnt, 1); + ret = omap_gem_attach_pages(obj); if (ret) goto fail; - if (omap_obj->flags & OMAP_BO_TILED) { + if (omap_obj->flags & OMAP_BO_TILED_MASK) { block = tiler_reserve_2d(fmt, omap_obj->width, omap_obj->height, 0); @@ -813,13 +815,15 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr) omap_obj->block = block; DBG("got dma address: %pad", &omap_obj->dma_addr); + } else { + refcount_inc(&omap_obj->dma_addr_cnt); } - omap_obj->dma_addr_cnt++; - - *dma_addr = omap_obj->dma_addr; + if (dma_addr) + *dma_addr = omap_obj->dma_addr; } else if (omap_gem_is_contiguous(omap_obj)) { - *dma_addr = omap_obj->dma_addr; + if (dma_addr) + *dma_addr = omap_obj->dma_addr; } else { ret = -EINVAL; goto fail; @@ -832,38 +836,46 @@ fail: } /** + * omap_gem_unpin_locked() - Unpin a GEM object from memory + * @obj: the GEM object + * + * omap_gem_unpin() without locking. + */ +static void omap_gem_unpin_locked(struct drm_gem_object *obj) +{ + struct omap_gem_object *omap_obj = to_omap_bo(obj); + int ret; + + if (refcount_dec_and_test(&omap_obj->dma_addr_cnt)) { + ret = tiler_unpin(omap_obj->block); + if (ret) { + dev_err(obj->dev->dev, + "could not unpin pages: %d\n", ret); + } + ret = tiler_release(omap_obj->block); + if (ret) { + dev_err(obj->dev->dev, + "could not release unmap: %d\n", ret); + } + omap_obj->dma_addr = 0; + omap_obj->block = NULL; + } +} + +/** * omap_gem_unpin() - Unpin a GEM object from memory * @obj: the GEM object * * Unpin the given GEM object previously pinned with omap_gem_pin(). Pins are - * reference-counted, the actualy unpin will only be performed when the number + * reference-counted, the actual unpin will only be performed when the number * of calls to this function matches the number of calls to omap_gem_pin(). */ void omap_gem_unpin(struct drm_gem_object *obj) { struct omap_gem_object *omap_obj = to_omap_bo(obj); - int ret; mutex_lock(&omap_obj->lock); - - if (omap_obj->dma_addr_cnt > 0) { - omap_obj->dma_addr_cnt--; - if (omap_obj->dma_addr_cnt == 0) { - ret = tiler_unpin(omap_obj->block); - if (ret) { - dev_err(obj->dev->dev, - "could not unpin pages: %d\n", ret); - } - ret = tiler_release(omap_obj->block); - if (ret) { - dev_err(obj->dev->dev, - "could not release unmap: %d\n", ret); - } - omap_obj->dma_addr = 0; - omap_obj->block = NULL; - } - } - + omap_gem_unpin_locked(obj); mutex_unlock(&omap_obj->lock); } @@ -879,8 +891,8 @@ int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient, mutex_lock(&omap_obj->lock); - if ((omap_obj->dma_addr_cnt > 0) && omap_obj->block && - (omap_obj->flags & OMAP_BO_TILED)) { + if ((refcount_read(&omap_obj->dma_addr_cnt) > 0) && omap_obj->block && + (omap_obj->flags & OMAP_BO_TILED_MASK)) { *dma_addr = tiler_tsptr(omap_obj->block, orient, x, y); ret = 0; } @@ -895,7 +907,7 @@ int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient) { struct omap_gem_object *omap_obj = to_omap_bo(obj); int ret = -EINVAL; - if (omap_obj->flags & OMAP_BO_TILED) + if (omap_obj->flags & OMAP_BO_TILED_MASK) ret = tiler_stride(gem2fmt(omap_obj->flags), orient); return ret; } @@ -1030,10 +1042,11 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m) seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d", omap_obj->flags, obj->name, kref_read(&obj->refcount), - off, &omap_obj->dma_addr, omap_obj->dma_addr_cnt, + off, &omap_obj->dma_addr, + refcount_read(&omap_obj->dma_addr_cnt), omap_obj->vaddr, omap_obj->roll); - if (omap_obj->flags & OMAP_BO_TILED) { + if (omap_obj->flags & OMAP_BO_TILED_MASK) { seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height); if (omap_obj->block) { struct tcm_area *area = &omap_obj->block->area; @@ -1093,7 +1106,7 @@ void omap_gem_free_object(struct drm_gem_object *obj) mutex_lock(&omap_obj->lock); /* The object should not be pinned. */ - WARN_ON(omap_obj->dma_addr_cnt > 0); + WARN_ON(refcount_read(&omap_obj->dma_addr_cnt) > 0); if (omap_obj->pages) { if (omap_obj->flags & OMAP_BO_MEM_DMABUF) @@ -1120,6 +1133,38 @@ void omap_gem_free_object(struct drm_gem_object *obj) kfree(omap_obj); } +static bool omap_gem_validate_flags(struct drm_device *dev, u32 flags) +{ + struct omap_drm_private *priv = dev->dev_private; + + switch (flags & OMAP_BO_CACHE_MASK) { + case OMAP_BO_CACHED: + case OMAP_BO_WC: + case OMAP_BO_CACHE_MASK: + break; + + default: + return false; + } + + if (flags & OMAP_BO_TILED_MASK) { + if (!priv->usergart) + return false; + + switch (flags & OMAP_BO_TILED_MASK) { + case OMAP_BO_TILED_8: + case OMAP_BO_TILED_16: + case OMAP_BO_TILED_32: + break; + + default: + return false; + } + } + + return true; +} + /* GEM buffer object constructor */ struct drm_gem_object *omap_gem_new(struct drm_device *dev, union omap_gem_size gsize, u32 flags) @@ -1131,18 +1176,15 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev, size_t size; int ret; - /* Validate the flags and compute the memory and cache flags. */ - if (flags & OMAP_BO_TILED) { - if (!priv->usergart) { - dev_err(dev->dev, "Tiled buffers require DMM\n"); - return NULL; - } + if (!omap_gem_validate_flags(dev, flags)) + return NULL; + /* Validate the flags and compute the memory and cache flags. */ + if (flags & OMAP_BO_TILED_MASK) { /* * Tiled buffers are always shmem paged backed. When they are * scanned out, they are remapped into DMM/TILER. */ - flags &= ~OMAP_BO_SCANOUT; flags |= OMAP_BO_MEM_SHMEM; /* @@ -1153,9 +1195,8 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev, flags |= tiler_get_cpu_cache_flags(); } else if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) { /* - * OMAP_BO_SCANOUT hints that the buffer doesn't need to be - * tiled. However, to lower the pressure on memory allocation, - * use contiguous memory only if no TILER is available. + * If we don't have DMM, we must allocate scanout buffers + * from contiguous DMA memory. */ flags |= OMAP_BO_MEM_DMA_API; } else if (!(flags & OMAP_BO_MEM_DMABUF)) { @@ -1174,7 +1215,7 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev, omap_obj->flags = flags; mutex_init(&omap_obj->lock); - if (flags & OMAP_BO_TILED) { + if (flags & OMAP_BO_TILED_MASK) { /* * For tiled buffers align dimensions to slot boundaries and * calculate size based on aligned dimensions. diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c index e8c3ae7ac77e..7344bb61936c 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c +++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c @@ -67,7 +67,7 @@ static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer, { struct drm_gem_object *obj = buffer->priv; struct page **pages; - if (omap_gem_flags(obj) & OMAP_BO_TILED) { + if (omap_gem_flags(obj) & OMAP_BO_TILED_MASK) { /* TODO we would need to pin at least part of the buffer to * get de-tiled view. For now just reject it. */ diff --git a/drivers/gpu/drm/panel/panel-lg-lb035q02.c b/drivers/gpu/drm/panel/panel-lg-lb035q02.c index f984a5189fbf..7a1385e834f0 100644 --- a/drivers/gpu/drm/panel/panel-lg-lb035q02.c +++ b/drivers/gpu/drm/panel/panel-lg-lb035q02.c @@ -219,9 +219,17 @@ static const struct of_device_id lb035q02_of_match[] = { MODULE_DEVICE_TABLE(of, lb035q02_of_match); +static const struct spi_device_id lb035q02_ids[] = { + { "lb035q02", 0 }, + { /* sentinel */ } +}; + +MODULE_DEVICE_TABLE(spi, lb035q02_ids); + static struct spi_driver lb035q02_driver = { .probe = lb035q02_probe, .remove = lb035q02_remove, + .id_table = lb035q02_ids, .driver = { .name = "panel-lg-lb035q02", .of_match_table = lb035q02_of_match, @@ -230,7 +238,6 @@ static struct spi_driver lb035q02_driver = { module_spi_driver(lb035q02_driver); -MODULE_ALIAS("spi:lgphilips,lb035q02"); MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>"); MODULE_DESCRIPTION("LG.Philips LB035Q02 LCD Panel driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c index c4bc7fa85fad..fd593532ab23 100644 --- a/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c +++ b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c @@ -229,9 +229,17 @@ static const struct of_device_id nl8048_of_match[] = { MODULE_DEVICE_TABLE(of, nl8048_of_match); +static const struct spi_device_id nl8048_ids[] = { + { "nl8048hl11", 0 }, + { /* sentinel */ } +}; + +MODULE_DEVICE_TABLE(spi, nl8048_ids); + static struct spi_driver nl8048_driver = { .probe = nl8048_probe, .remove = nl8048_remove, + .id_table = nl8048_ids, .driver = { .name = "panel-nec-nl8048hl11", .pm = &nl8048_pm_ops, @@ -241,7 +249,6 @@ static struct spi_driver nl8048_driver = { module_spi_driver(nl8048_driver); -MODULE_ALIAS("spi:nec,nl8048hl11"); MODULE_AUTHOR("Erik Gilling <konkers@android.com>"); MODULE_DESCRIPTION("NEC-NL8048HL11 Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/panel/panel-sony-acx565akm.c b/drivers/gpu/drm/panel/panel-sony-acx565akm.c index 6f5ce5867ff9..d6387d8f88a3 100644 --- a/drivers/gpu/drm/panel/panel-sony-acx565akm.c +++ b/drivers/gpu/drm/panel/panel-sony-acx565akm.c @@ -683,9 +683,17 @@ static const struct of_device_id acx565akm_of_match[] = { MODULE_DEVICE_TABLE(of, acx565akm_of_match); +static const struct spi_device_id acx565akm_ids[] = { + { "acx565akm", 0 }, + { /* sentinel */ } +}; + +MODULE_DEVICE_TABLE(spi, acx565akm_ids); + static struct spi_driver acx565akm_driver = { .probe = acx565akm_probe, .remove = acx565akm_remove, + .id_table = acx565akm_ids, .driver = { .name = "panel-sony-acx565akm", .of_match_table = acx565akm_of_match, @@ -694,7 +702,6 @@ static struct spi_driver acx565akm_driver = { module_spi_driver(acx565akm_driver); -MODULE_ALIAS("spi:sony,acx565akm"); MODULE_AUTHOR("Nokia Corporation"); MODULE_DESCRIPTION("Sony ACX565AKM LCD Panel Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c index 1d08d03f509d..c44d6a65c0aa 100644 --- a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c +++ b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c @@ -374,8 +374,7 @@ static const struct of_device_id td028ttec1_of_match[] = { MODULE_DEVICE_TABLE(of, td028ttec1_of_match); static const struct spi_device_id td028ttec1_ids[] = { - { "tpo,td028ttec1", 0}, - { "toppoly,td028ttec1", 0 }, + { "td028ttec1", 0 }, { /* sentinel */ } }; diff --git a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c index 416aebd23b6a..621b65feec07 100644 --- a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c +++ b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c @@ -490,9 +490,17 @@ static const struct of_device_id td043mtea1_of_match[] = { MODULE_DEVICE_TABLE(of, td043mtea1_of_match); +static const struct spi_device_id td043mtea1_ids[] = { + { "td043mtea1", 0 }, + { /* sentinel */ } +}; + +MODULE_DEVICE_TABLE(spi, td043mtea1_ids); + static struct spi_driver td043mtea1_driver = { .probe = td043mtea1_probe, .remove = td043mtea1_remove, + .id_table = td043mtea1_ids, .driver = { .name = "panel-tpo-td043mtea1", .pm = &td043mtea1_pm_ops, @@ -502,7 +510,6 @@ static struct spi_driver td043mtea1_driver = { module_spi_driver(td043mtea1_driver); -MODULE_ALIAS("spi:tpo,td043mtea1"); MODULE_AUTHOR("Gražvydas Ignotas <notasas@gmail.com>"); MODULE_DESCRIPTION("TPO TD043MTEA1 Panel Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/panfrost/TODO b/drivers/gpu/drm/panfrost/TODO index 536a0d4f8d29..8c811a9e683b 100644 --- a/drivers/gpu/drm/panfrost/TODO +++ b/drivers/gpu/drm/panfrost/TODO @@ -10,3 +10,5 @@ - Compute job support. So called 'compute only' jobs need to be plumbed up to userspace. + +- Support core dump on job failure diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c index c1eb8cfe6aeb..4c4e8a30a1ac 100644 --- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c +++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c @@ -13,99 +13,42 @@ #include "panfrost_gpu.h" #include "panfrost_regs.h" -static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev, int slot); +static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev); static int panfrost_devfreq_target(struct device *dev, unsigned long *freq, u32 flags) { - struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev)); - struct dev_pm_opp *opp; - unsigned long old_clk_rate = pfdev->devfreq.cur_freq; - unsigned long target_volt, target_rate; + struct panfrost_device *pfdev = dev_get_drvdata(dev); int err; - opp = devfreq_recommended_opp(dev, freq, flags); - if (IS_ERR(opp)) - return PTR_ERR(opp); - - target_rate = dev_pm_opp_get_freq(opp); - target_volt = dev_pm_opp_get_voltage(opp); - dev_pm_opp_put(opp); - - if (old_clk_rate == target_rate) - return 0; - - /* - * If frequency scaling from low to high, adjust voltage first. - * If frequency scaling from high to low, adjust frequency first. - */ - if (old_clk_rate < target_rate) { - err = regulator_set_voltage(pfdev->regulator, target_volt, - target_volt); - if (err) { - dev_err(dev, "Cannot set voltage %lu uV\n", - target_volt); - return err; - } - } - - err = clk_set_rate(pfdev->clock, target_rate); - if (err) { - dev_err(dev, "Cannot set frequency %lu (%d)\n", target_rate, - err); - if (pfdev->regulator) - regulator_set_voltage(pfdev->regulator, - pfdev->devfreq.cur_volt, - pfdev->devfreq.cur_volt); + err = dev_pm_opp_set_rate(dev, *freq); + if (err) return err; - } - if (old_clk_rate > target_rate) { - err = regulator_set_voltage(pfdev->regulator, target_volt, - target_volt); - if (err) - dev_err(dev, "Cannot set voltage %lu uV\n", target_volt); - } - - pfdev->devfreq.cur_freq = target_rate; - pfdev->devfreq.cur_volt = target_volt; + *freq = clk_get_rate(pfdev->clock); return 0; } static void panfrost_devfreq_reset(struct panfrost_device *pfdev) { - ktime_t now = ktime_get(); - int i; - - for (i = 0; i < NUM_JOB_SLOTS; i++) { - pfdev->devfreq.slot[i].busy_time = 0; - pfdev->devfreq.slot[i].idle_time = 0; - pfdev->devfreq.slot[i].time_last_update = now; - } + pfdev->devfreq.busy_time = 0; + pfdev->devfreq.idle_time = 0; + pfdev->devfreq.time_last_update = ktime_get(); } static int panfrost_devfreq_get_dev_status(struct device *dev, struct devfreq_dev_status *status) { - struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev)); - int i; + struct panfrost_device *pfdev = dev_get_drvdata(dev); - for (i = 0; i < NUM_JOB_SLOTS; i++) { - panfrost_devfreq_update_utilization(pfdev, i); - } + panfrost_devfreq_update_utilization(pfdev); status->current_frequency = clk_get_rate(pfdev->clock); - status->total_time = ktime_to_ns(ktime_add(pfdev->devfreq.slot[0].busy_time, - pfdev->devfreq.slot[0].idle_time)); + status->total_time = ktime_to_ns(ktime_add(pfdev->devfreq.busy_time, + pfdev->devfreq.idle_time)); - status->busy_time = 0; - for (i = 0; i < NUM_JOB_SLOTS; i++) { - status->busy_time += ktime_to_ns(pfdev->devfreq.slot[i].busy_time); - } - - /* We're scheduling only to one core atm, so don't divide for now */ - /* status->busy_time /= NUM_JOB_SLOTS; */ + status->busy_time = ktime_to_ns(pfdev->devfreq.busy_time); panfrost_devfreq_reset(pfdev); @@ -121,7 +64,7 @@ static int panfrost_devfreq_get_cur_freq(struct device *dev, unsigned long *freq { struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev)); - *freq = pfdev->devfreq.cur_freq; + *freq = clk_get_rate(pfdev->clock); return 0; } @@ -137,6 +80,7 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev) { int ret; struct dev_pm_opp *opp; + unsigned long cur_freq; ret = dev_pm_opp_of_add_table(&pfdev->pdev->dev); if (ret == -ENODEV) /* Optional, continue without devfreq */ @@ -146,13 +90,13 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev) panfrost_devfreq_reset(pfdev); - pfdev->devfreq.cur_freq = clk_get_rate(pfdev->clock); + cur_freq = clk_get_rate(pfdev->clock); - opp = devfreq_recommended_opp(&pfdev->pdev->dev, &pfdev->devfreq.cur_freq, 0); + opp = devfreq_recommended_opp(&pfdev->pdev->dev, &cur_freq, 0); if (IS_ERR(opp)) return PTR_ERR(opp); - panfrost_devfreq_profile.initial_freq = pfdev->devfreq.cur_freq; + panfrost_devfreq_profile.initial_freq = cur_freq; dev_pm_opp_put(opp); pfdev->devfreq.devfreq = devm_devfreq_add_device(&pfdev->pdev->dev, @@ -176,14 +120,10 @@ void panfrost_devfreq_fini(struct panfrost_device *pfdev) void panfrost_devfreq_resume(struct panfrost_device *pfdev) { - int i; - if (!pfdev->devfreq.devfreq) return; panfrost_devfreq_reset(pfdev); - for (i = 0; i < NUM_JOB_SLOTS; i++) - pfdev->devfreq.slot[i].busy = false; devfreq_resume_device(pfdev->devfreq.devfreq); } @@ -196,9 +136,8 @@ void panfrost_devfreq_suspend(struct panfrost_device *pfdev) devfreq_suspend_device(pfdev->devfreq.devfreq); } -static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev, int slot) +static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev) { - struct panfrost_devfreq_slot *devfreq_slot = &pfdev->devfreq.slot[slot]; ktime_t now; ktime_t last; @@ -206,22 +145,27 @@ static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev, i return; now = ktime_get(); - last = pfdev->devfreq.slot[slot].time_last_update; + last = pfdev->devfreq.time_last_update; - /* If we last recorded a transition to busy, we have been idle since */ - if (devfreq_slot->busy) - pfdev->devfreq.slot[slot].busy_time += ktime_sub(now, last); + if (atomic_read(&pfdev->devfreq.busy_count) > 0) + pfdev->devfreq.busy_time += ktime_sub(now, last); else - pfdev->devfreq.slot[slot].idle_time += ktime_sub(now, last); + pfdev->devfreq.idle_time += ktime_sub(now, last); - pfdev->devfreq.slot[slot].time_last_update = now; + pfdev->devfreq.time_last_update = now; +} + +void panfrost_devfreq_record_busy(struct panfrost_device *pfdev) +{ + panfrost_devfreq_update_utilization(pfdev); + atomic_inc(&pfdev->devfreq.busy_count); } -/* The job scheduler is expected to call this at every transition busy <-> idle */ -void panfrost_devfreq_record_transition(struct panfrost_device *pfdev, int slot) +void panfrost_devfreq_record_idle(struct panfrost_device *pfdev) { - struct panfrost_devfreq_slot *devfreq_slot = &pfdev->devfreq.slot[slot]; + int count; - panfrost_devfreq_update_utilization(pfdev, slot); - devfreq_slot->busy = !devfreq_slot->busy; + panfrost_devfreq_update_utilization(pfdev); + count = atomic_dec_if_positive(&pfdev->devfreq.busy_count); + WARN_ON(count < 0); } diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.h b/drivers/gpu/drm/panfrost/panfrost_devfreq.h index e3bc63e82843..0611beffc8d0 100644 --- a/drivers/gpu/drm/panfrost/panfrost_devfreq.h +++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.h @@ -10,6 +10,7 @@ void panfrost_devfreq_fini(struct panfrost_device *pfdev); void panfrost_devfreq_resume(struct panfrost_device *pfdev); void panfrost_devfreq_suspend(struct panfrost_device *pfdev); -void panfrost_devfreq_record_transition(struct panfrost_device *pfdev, int slot); +void panfrost_devfreq_record_busy(struct panfrost_device *pfdev); +void panfrost_devfreq_record_idle(struct panfrost_device *pfdev); #endif /* __PANFROST_DEVFREQ_H__ */ diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h index 9c39b9794811..06713811b92c 100644 --- a/drivers/gpu/drm/panfrost/panfrost_device.h +++ b/drivers/gpu/drm/panfrost/panfrost_device.h @@ -51,13 +51,6 @@ struct panfrost_features { unsigned long hw_issues[64 / BITS_PER_LONG]; }; -struct panfrost_devfreq_slot { - ktime_t busy_time; - ktime_t idle_time; - ktime_t time_last_update; - bool busy; -}; - struct panfrost_device { struct device *dev; struct drm_device *ddev; @@ -93,9 +86,10 @@ struct panfrost_device { struct { struct devfreq *devfreq; struct thermal_cooling_device *cooling; - unsigned long cur_freq; - unsigned long cur_volt; - struct panfrost_devfreq_slot slot[NUM_JOB_SLOTS]; + ktime_t busy_time; + ktime_t idle_time; + ktime_t time_last_update; + atomic_t busy_count; } devfreq; }; diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index bc2ddeb55f5d..9458dc6c750c 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -470,7 +470,7 @@ static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = { PANFROST_IOCTL(MADVISE, madvise, DRM_RENDER_ALLOW), }; -DEFINE_DRM_GEM_SHMEM_FOPS(panfrost_drm_driver_fops); +DEFINE_DRM_GEM_FOPS(panfrost_drm_driver_fops); /* * Panfrost driver version: @@ -556,11 +556,11 @@ static int panfrost_probe(struct platform_device *pdev) return 0; err_out2: + pm_runtime_disable(pfdev->dev); panfrost_devfreq_fini(pfdev); err_out1: panfrost_device_fini(pfdev); err_out0: - pm_runtime_disable(pfdev->dev); drm_dev_put(ddev); return err; } diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c index acb07fe06580..deca0c30bbd4 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem.c @@ -112,7 +112,7 @@ static const struct drm_gem_object_funcs panfrost_gem_funcs = { .get_sg_table = drm_gem_shmem_get_sg_table, .vmap = drm_gem_shmem_vmap, .vunmap = drm_gem_shmem_vunmap, - .vm_ops = &drm_gem_shmem_vm_ops, + .mmap = drm_gem_shmem_mmap, }; /** diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c index f67ed925c0ef..8822ec13a0d6 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gpu.c +++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c @@ -208,6 +208,9 @@ static void panfrost_gpu_init_features(struct panfrost_device *pfdev) pfdev->features.mem_features = gpu_read(pfdev, GPU_MEM_FEATURES); pfdev->features.mmu_features = gpu_read(pfdev, GPU_MMU_FEATURES); pfdev->features.thread_features = gpu_read(pfdev, GPU_THREAD_FEATURES); + pfdev->features.max_threads = gpu_read(pfdev, GPU_THREAD_MAX_THREADS); + pfdev->features.thread_max_workgroup_sz = gpu_read(pfdev, GPU_THREAD_MAX_WORKGROUP_SIZE); + pfdev->features.thread_max_barrier_sz = gpu_read(pfdev, GPU_THREAD_MAX_BARRIER_SIZE); pfdev->features.coherency_features = gpu_read(pfdev, GPU_COHERENCY_FEATURES); for (i = 0; i < 4; i++) pfdev->features.texture_features[i] = gpu_read(pfdev, GPU_TEXTURE_FEATURES(i)); diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c index a58551668d9a..d411eb6c8eb9 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.c +++ b/drivers/gpu/drm/panfrost/panfrost_job.c @@ -155,8 +155,7 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js) } cfg = panfrost_mmu_as_get(pfdev, &job->file_priv->mmu); - - panfrost_devfreq_record_transition(pfdev, js); + panfrost_devfreq_record_busy(pfdev); job_write(pfdev, JS_HEAD_NEXT_LO(js), jc_head & 0xFFFFFFFF); job_write(pfdev, JS_HEAD_NEXT_HI(js), jc_head >> 32); @@ -381,13 +380,19 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job) job_read(pfdev, JS_TAIL_LO(js)), sched_job); - mutex_lock(&pfdev->reset_lock); + if (!mutex_trylock(&pfdev->reset_lock)) + return; - for (i = 0; i < NUM_JOB_SLOTS; i++) - drm_sched_stop(&pfdev->js->queue[i].sched, sched_job); + for (i = 0; i < NUM_JOB_SLOTS; i++) { + struct drm_gpu_scheduler *sched = &pfdev->js->queue[i].sched; + + drm_sched_stop(sched, sched_job); + if (js != i) + /* Ensure any timeouts on other slots have finished */ + cancel_delayed_work_sync(&sched->work_tdr); + } - if (sched_job) - drm_sched_increase_karma(sched_job); + drm_sched_increase_karma(sched_job); spin_lock_irqsave(&pfdev->js->job_lock, flags); for (i = 0; i < NUM_JOB_SLOTS; i++) { @@ -398,9 +403,7 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job) } spin_unlock_irqrestore(&pfdev->js->job_lock, flags); - /* panfrost_core_dump(pfdev); */ - - panfrost_devfreq_record_transition(pfdev, js); + panfrost_devfreq_record_idle(pfdev); panfrost_device_reset(pfdev); for (i = 0; i < NUM_JOB_SLOTS; i++) @@ -463,7 +466,7 @@ static irqreturn_t panfrost_job_irq_handler(int irq, void *data) pfdev->jobs[j] = NULL; panfrost_mmu_as_put(pfdev, &job->file_priv->mmu); - panfrost_devfreq_record_transition(pfdev, j); + panfrost_devfreq_record_idle(pfdev); dma_fence_signal_locked(job->done_fence); pm_runtime_put_autosuspend(pfdev->dev); @@ -564,14 +567,14 @@ int panfrost_job_is_idle(struct panfrost_device *pfdev) struct panfrost_job_slot *js = pfdev->js; int i; + /* Check whether the hardware is idle */ + if (atomic_read(&pfdev->devfreq.busy_count)) + return false; + for (i = 0; i < NUM_JOB_SLOTS; i++) { /* If there are any jobs in the HW queue, we're not idle */ if (atomic_read(&js->queue[i].sched.hw_rq_count)) return false; - - /* Check whether the hardware is idle */ - if (pfdev->devfreq.slot[i].busy) - return false; } return true; diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index bdd990568476..a3ed64a1f15e 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -224,9 +224,9 @@ static size_t get_pgsize(u64 addr, size_t size) return SZ_2M; } -void panfrost_mmu_flush_range(struct panfrost_device *pfdev, - struct panfrost_mmu *mmu, - u64 iova, size_t size) +static void panfrost_mmu_flush_range(struct panfrost_device *pfdev, + struct panfrost_mmu *mmu, + u64 iova, size_t size) { if (mmu->as < 0) return; @@ -406,11 +406,11 @@ addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr) spin_lock(&pfdev->as_lock); list_for_each_entry(mmu, &pfdev->as_lru_list, list) { if (as == mmu->as) - break; + goto found_mmu; } - if (as != mmu->as) - goto out; + goto out; +found_mmu: priv = container_of(mmu, struct panfrost_file_priv, mmu); spin_lock(&priv->mm_lock); @@ -432,7 +432,8 @@ out: #define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE) -int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, u64 addr) +static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, + u64 addr) { int ret, i; struct panfrost_gem_object *bo; diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c index 83c57d325ca8..2dba192bf198 100644 --- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c +++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c @@ -16,6 +16,7 @@ #include "panfrost_issues.h" #include "panfrost_job.h" #include "panfrost_mmu.h" +#include "panfrost_perfcnt.h" #include "panfrost_regs.h" #define COUNTERS_PER_BLOCK 64 diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c index 024771a4083e..703ddc803c55 100644 --- a/drivers/gpu/drm/pl111/pl111_display.c +++ b/drivers/gpu/drm/pl111/pl111_display.c @@ -48,10 +48,10 @@ irqreturn_t pl111_irq(int irq, void *data) } static enum drm_mode_status -pl111_mode_valid(struct drm_crtc *crtc, +pl111_mode_valid(struct drm_simple_display_pipe *pipe, const struct drm_display_mode *mode) { - struct drm_device *drm = crtc->dev; + struct drm_device *drm = pipe->crtc.dev; struct pl111_drm_dev_private *priv = drm->dev_private; u32 cpp = priv->variant->fb_bpp / 8; u64 bw; diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig index d0d691b31f4a..ca3f51c2a8fe 100644 --- a/drivers/gpu/drm/qxl/Kconfig +++ b/drivers/gpu/drm/qxl/Kconfig @@ -4,6 +4,7 @@ config DRM_QXL depends on DRM && PCI && MMU select DRM_KMS_HELPER select DRM_TTM + select DRM_TTM_HELPER select CRC32 help QXL virtual GPU for Spice virtualization desktop integration. diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index 483b4c57554a..1d601f57a6ba 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c @@ -150,15 +150,7 @@ qxl_pci_remove(struct pci_dev *pdev) drm_dev_put(dev); } -static const struct file_operations qxl_fops = { - .owner = THIS_MODULE, - .open = drm_open, - .release = drm_release, - .unlocked_ioctl = drm_ioctl, - .poll = drm_poll, - .read = drm_read, - .mmap = qxl_mmap, -}; +DEFINE_DRM_GEM_FOPS(qxl_fops); static int qxl_drm_freeze(struct drm_device *dev) { diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index d4051409ce64..27e45a2d6b52 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -355,7 +355,8 @@ int qxl_mode_dumb_mmap(struct drm_file *filp, /* qxl ttm */ int qxl_ttm_init(struct qxl_device *qdev); void qxl_ttm_fini(struct qxl_device *qdev); -int qxl_mmap(struct file *filp, struct vm_area_struct *vma); +int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev, + struct ttm_mem_reg *mem); /* qxl image */ diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c index c013c516f561..ab72dc3476e9 100644 --- a/drivers/gpu/drm/qxl/qxl_object.c +++ b/drivers/gpu/drm/qxl/qxl_object.c @@ -54,9 +54,14 @@ bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo) void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned) { u32 c = 0; - u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0; + u32 pflag = 0; unsigned int i; + if (pinned) + pflag |= TTM_PL_FLAG_NO_EVICT; + if (qbo->tbo.base.size <= PAGE_SIZE) + pflag |= TTM_PL_FLAG_TOPDOWN; + qbo->placement.placement = qbo->placements; qbo->placement.busy_placement = qbo->placements; if (domain == QXL_GEM_DOMAIN_VRAM) @@ -86,6 +91,7 @@ static const struct drm_gem_object_funcs qxl_object_funcs = { .get_sg_table = qxl_gem_prime_get_sg_table, .vmap = qxl_gem_prime_vmap, .vunmap = qxl_gem_prime_vunmap, + .mmap = drm_gem_ttm_mmap, .print_info = drm_gem_ttm_print_info, }; @@ -161,7 +167,6 @@ int qxl_bo_kmap(struct qxl_bo *bo, void **ptr) void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, int page_offset) { - struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type]; void *rptr; int ret; struct io_mapping *map; @@ -173,9 +178,7 @@ void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, else goto fallback; - (void) ttm_mem_io_lock(man, false); - ret = ttm_mem_io_reserve(bo->tbo.bdev, &bo->tbo.mem); - ttm_mem_io_unlock(man); + ret = qxl_ttm_io_mem_reserve(bo->tbo.bdev, &bo->tbo.mem); return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset); fallback: @@ -206,17 +209,11 @@ void qxl_bo_kunmap(struct qxl_bo *bo) void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, void *pmap) { - struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type]; - if ((bo->tbo.mem.mem_type != TTM_PL_VRAM) && (bo->tbo.mem.mem_type != TTM_PL_PRIV)) goto fallback; io_mapping_unmap_atomic(pmap); - - (void) ttm_mem_io_lock(man, false); - ttm_mem_io_free(bo->tbo.bdev, &bo->tbo.mem); - ttm_mem_io_unlock(man); return; fallback: qxl_bo_kunmap(bo); diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index 312216caeea2..2feca734c7b1 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c @@ -260,7 +260,7 @@ int qxl_release_reserve_list(struct qxl_release *release, bool no_intr) return 0; ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos, - !no_intr, NULL, true); + !no_intr, NULL); if (ret) return ret; @@ -429,7 +429,6 @@ void qxl_release_unmap(struct qxl_device *qdev, void qxl_release_fence_buffer_objects(struct qxl_release *release) { struct ttm_buffer_object *bo; - struct ttm_bo_global *glob; struct ttm_bo_device *bdev; struct ttm_validate_buffer *entry; struct qxl_device *qdev; @@ -451,18 +450,16 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release) release->id | 0xf0000000, release->base.seqno); trace_dma_fence_emit(&release->base); - glob = bdev->glob; - - spin_lock(&glob->lru_lock); + spin_lock(&ttm_bo_glob.lru_lock); list_for_each_entry(entry, &release->bos, head) { bo = entry->bo; dma_resv_add_shared_fence(bo->base.resv, &release->base); - ttm_bo_add_to_lru(bo); + ttm_bo_move_to_lru_tail(bo, NULL); dma_resv_unlock(bo->base.resv); } - spin_unlock(&glob->lru_lock); + spin_unlock(&ttm_bo_glob.lru_lock); ww_acquire_fini(&release->ticket); } diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index cbc6c2ba8630..16a5e903533d 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -48,47 +48,6 @@ static struct qxl_device *qxl_get_qdev(struct ttm_bo_device *bdev) return qdev; } -static struct vm_operations_struct qxl_ttm_vm_ops; -static const struct vm_operations_struct *ttm_vm_ops; - -static vm_fault_t qxl_ttm_fault(struct vm_fault *vmf) -{ - struct ttm_buffer_object *bo; - vm_fault_t ret; - - bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data; - if (bo == NULL) - return VM_FAULT_NOPAGE; - ret = ttm_vm_ops->fault(vmf); - return ret; -} - -int qxl_mmap(struct file *filp, struct vm_area_struct *vma) -{ - int r; - struct drm_file *file_priv = filp->private_data; - struct qxl_device *qdev = file_priv->minor->dev->dev_private; - - if (qdev == NULL) { - DRM_ERROR( - "filp->private_data->minor->dev->dev_private == NULL\n"); - return -EINVAL; - } - DRM_DEBUG_DRIVER("filp->private_data = 0x%p, vma->vm_pgoff = %lx\n", - filp->private_data, vma->vm_pgoff); - - r = ttm_bo_mmap(filp, vma, &qdev->mman.bdev); - if (unlikely(r != 0)) - return r; - if (unlikely(ttm_vm_ops == NULL)) { - ttm_vm_ops = vma->vm_ops; - qxl_ttm_vm_ops = *ttm_vm_ops; - qxl_ttm_vm_ops.fault = &qxl_ttm_fault; - } - vma->vm_ops = &qxl_ttm_vm_ops; - return 0; -} - static int qxl_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) { return 0; @@ -151,16 +110,8 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo, *placement = qbo->placement; } -static int qxl_verify_access(struct ttm_buffer_object *bo, struct file *filp) -{ - struct qxl_bo *qbo = to_qxl_bo(bo); - - return drm_vma_node_verify_access(&qbo->tbo.base.vma_node, - filp->private_data); -} - -static int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev, - struct ttm_mem_reg *mem) +int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev, + struct ttm_mem_reg *mem) { struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; struct qxl_device *qdev = qxl_get_qdev(bdev); @@ -310,7 +261,6 @@ static struct ttm_bo_driver qxl_bo_driver = { .eviction_valuable = ttm_bo_eviction_valuable, .evict_flags = &qxl_evict_flags, .move = &qxl_bo_move, - .verify_access = &qxl_verify_access, .io_mem_reserve = &qxl_ttm_io_mem_reserve, .io_mem_free = &qxl_ttm_io_mem_free, .move_notify = &qxl_bo_move_notify, @@ -369,14 +319,11 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *)m->private; struct drm_mm *mm = (struct drm_mm *)node->info_ent->data; - struct drm_device *dev = node->minor->dev; - struct qxl_device *rdev = dev->dev_private; - struct ttm_bo_global *glob = rdev->mman.bdev.glob; struct drm_printer p = drm_seq_file_printer(m); - spin_lock(&glob->lru_lock); + spin_lock(&ttm_bo_glob.lru_lock); drm_mm_print(mm, &p); - spin_unlock(&glob->lru_lock); + spin_unlock(&ttm_bo_glob.lru_lock); return 0; } #endif diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 62eab82a64f9..daff9a2af3be 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -221,9 +221,7 @@ int ci_get_temp(struct radeon_device *rdev) else actual_temp = temp & 0x1ff; - actual_temp = actual_temp * 1000; - - return actual_temp; + return actual_temp * 1000; } /* get temperature in millidegrees */ @@ -239,9 +237,7 @@ int kv_get_temp(struct radeon_device *rdev) else actual_temp = 0; - actual_temp = actual_temp * 1000; - - return actual_temp; + return actual_temp * 1000; } /* diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c index b9aea5776d3d..72db2b41e96d 100644 --- a/drivers/gpu/drm/radeon/radeon_audio.c +++ b/drivers/gpu/drm/radeon/radeon_audio.c @@ -367,10 +367,10 @@ static void radeon_audio_write_sad_regs(struct drm_encoder *encoder) return; sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads); - if (sad_count <= 0) { + if (sad_count < 0) DRM_ERROR("Couldn't read SADs: %d\n", sad_count); + if (sad_count <= 0) return; - } BUG_ON(!sads); if (radeon_encoder->audio && radeon_encoder->audio->write_sad_regs) diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c index 2994f07fbad9..ee28f5b3785e 100644 --- a/drivers/gpu/drm/radeon/radeon_dp_mst.c +++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c @@ -233,21 +233,26 @@ drm_encoder *radeon_mst_best_encoder(struct drm_connector *connector) return &radeon_connector->mst_encoder->base; } +static int +radeon_dp_mst_detect(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, + bool force) +{ + struct radeon_connector *radeon_connector = + to_radeon_connector(connector); + struct radeon_connector *master = radeon_connector->mst_port; + + return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr, + radeon_connector->port); +} + static const struct drm_connector_helper_funcs radeon_dp_mst_connector_helper_funcs = { .get_modes = radeon_dp_mst_get_modes, .mode_valid = radeon_dp_mst_mode_valid, .best_encoder = radeon_mst_best_encoder, + .detect_ctx = radeon_dp_mst_detect, }; -static enum drm_connector_status -radeon_dp_mst_detect(struct drm_connector *connector, bool force) -{ - struct radeon_connector *radeon_connector = to_radeon_connector(connector); - struct radeon_connector *master = radeon_connector->mst_port; - - return drm_dp_mst_detect_port(connector, &master->mst_mgr, radeon_connector->port); -} - static void radeon_dp_mst_connector_destroy(struct drm_connector *connector) { @@ -262,7 +267,6 @@ radeon_dp_mst_connector_destroy(struct drm_connector *connector) static const struct drm_connector_funcs radeon_dp_mst_connector_funcs = { .dpms = drm_helper_connector_dpms, - .detect = radeon_dp_mst_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = radeon_dp_mst_connector_destroy, }; diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 19590fff135c..41f08321a361 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -379,7 +379,9 @@ radeon_pci_remove(struct pci_dev *pdev) static void radeon_pci_shutdown(struct pci_dev *pdev) { +#ifdef CONFIG_PPC64 struct drm_device *ddev = pci_get_drvdata(pdev); +#endif /* if we are running in a VM, make sure the device * torn down properly on reboot/shutdown @@ -387,11 +389,15 @@ radeon_pci_shutdown(struct pci_dev *pdev) if (radeon_device_is_virtual()) radeon_pci_remove(pdev); +#ifdef CONFIG_PPC64 /* Some adapters need to be suspended before a - * shutdown occurs in order to prevent an error - * during kexec. - */ + * shutdown occurs in order to prevent an error + * during kexec. + * Make this power specific becauase it breaks + * some non-power boards. + */ radeon_suspend_kms(ddev, true, true, false); +#endif } static int radeon_pmops_suspend(struct device *dev) diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index b2b076606f54..67298a0739cb 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -566,7 +566,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev, if (!vm_bos) return; - r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL, true); + r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); if (r) goto error_free; diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 2abe1eab471f..140d94cc080d 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -542,7 +542,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev, u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev); INIT_LIST_HEAD(&duplicates); - r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates, true); + r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates); if (unlikely(r != 0)) { return r; } diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 460fd98e40a7..a0b382a637a6 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -1958,6 +1958,7 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev) case 0x682C: si_pi->cac_weights = cac_weights_cape_verde_pro; si_pi->dte_data = dte_data_sun_xt; + update_dte_from_pl2 = true; break; case 0x6825: case 0x6827: diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index 9c93eb4fad8b..f266c17b907a 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -131,6 +131,35 @@ static const struct rcar_du_device_info rcar_du_r8a774a1_info = { .dpll_mask = BIT(1), }; +static const struct rcar_du_device_info rcar_du_r8a774b1_info = { + .gen = 3, + .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK + | RCAR_DU_FEATURE_VSP1_SOURCE + | RCAR_DU_FEATURE_INTERLACED + | RCAR_DU_FEATURE_TVM_SYNC, + .channels_mask = BIT(3) | BIT(1) | BIT(0), + .routes = { + /* + * R8A774B1 has one RGB output, one LVDS output and one HDMI + * output. + */ + [RCAR_DU_OUTPUT_DPAD0] = { + .possible_crtcs = BIT(2), + .port = 0, + }, + [RCAR_DU_OUTPUT_HDMI0] = { + .possible_crtcs = BIT(1), + .port = 1, + }, + [RCAR_DU_OUTPUT_LVDS0] = { + .possible_crtcs = BIT(0), + .port = 2, + }, + }, + .num_lvds = 1, + .dpll_mask = BIT(1), +}; + static const struct rcar_du_device_info rcar_du_r8a774c0_info = { .gen = 3, .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK @@ -416,6 +445,7 @@ static const struct of_device_id rcar_du_of_table[] = { { .compatible = "renesas,du-r8a7745", .data = &rzg1_du_r8a7745_info }, { .compatible = "renesas,du-r8a77470", .data = &rzg1_du_r8a77470_info }, { .compatible = "renesas,du-r8a774a1", .data = &rcar_du_r8a774a1_info }, + { .compatible = "renesas,du-r8a774b1", .data = &rcar_du_r8a774b1_info }, { .compatible = "renesas,du-r8a774c0", .data = &rcar_du_r8a774c0_info }, { .compatible = "renesas,du-r8a7779", .data = &rcar_du_r8a7779_info }, { .compatible = "renesas,du-r8a7790", .data = &rcar_du_r8a7790_info }, diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c index 2dc9caee8767..0d59f390de19 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c @@ -585,7 +585,11 @@ static int rcar_du_vsps_init(struct rcar_du_device *rcdu) vsps[j].crtcs_mask |= BIT(i); - /* Store the VSP pointer and pipe index in the CRTC. */ + /* + * Store the VSP pointer and pipe index in the CRTC. If the + * second cell of the 'vsps' specifier isn't present, default + * to 0 to remain compatible with older DT bindings. + */ rcdu->crtcs[i].vsp = &rcdu->vsps[j]; rcdu->crtcs[i].vsp_pipe = cells >= 1 ? args.args[0] : 0; } diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c index 3fc7e6899cab..8c6c172bbf2e 100644 --- a/drivers/gpu/drm/rcar-du/rcar_lvds.c +++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c @@ -16,6 +16,7 @@ #include <linux/of_graph.h> #include <linux/platform_device.h> #include <linux/slab.h> +#include <linux/sys_soc.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> @@ -842,8 +843,23 @@ static int rcar_lvds_get_clocks(struct rcar_lvds *lvds) return 0; } +static const struct rcar_lvds_device_info rcar_lvds_r8a7790es1_info = { + .gen = 2, + .quirks = RCAR_LVDS_QUIRK_LANES, + .pll_setup = rcar_lvds_pll_setup_gen2, +}; + +static const struct soc_device_attribute lvds_quirk_matches[] = { + { + .soc_id = "r8a7790", .revision = "ES1.*", + .data = &rcar_lvds_r8a7790es1_info, + }, + { /* sentinel */ } +}; + static int rcar_lvds_probe(struct platform_device *pdev) { + const struct soc_device_attribute *attr; struct rcar_lvds *lvds; struct resource *mem; int ret; @@ -857,6 +873,10 @@ static int rcar_lvds_probe(struct platform_device *pdev) lvds->dev = &pdev->dev; lvds->info = of_device_get_match_data(&pdev->dev); + attr = soc_device_match(lvds_quirk_matches); + if (attr) + lvds->info = attr->data; + ret = rcar_lvds_parse_dt(lvds); if (ret < 0) return ret; @@ -893,12 +913,6 @@ static const struct rcar_lvds_device_info rcar_lvds_gen2_info = { .pll_setup = rcar_lvds_pll_setup_gen2, }; -static const struct rcar_lvds_device_info rcar_lvds_r8a7790_info = { - .gen = 2, - .quirks = RCAR_LVDS_QUIRK_LANES, - .pll_setup = rcar_lvds_pll_setup_gen2, -}; - static const struct rcar_lvds_device_info rcar_lvds_gen3_info = { .gen = 3, .quirks = RCAR_LVDS_QUIRK_PWD, @@ -929,8 +943,9 @@ static const struct of_device_id rcar_lvds_of_table[] = { { .compatible = "renesas,r8a7743-lvds", .data = &rcar_lvds_gen2_info }, { .compatible = "renesas,r8a7744-lvds", .data = &rcar_lvds_gen2_info }, { .compatible = "renesas,r8a774a1-lvds", .data = &rcar_lvds_gen3_info }, + { .compatible = "renesas,r8a774b1-lvds", .data = &rcar_lvds_gen3_info }, { .compatible = "renesas,r8a774c0-lvds", .data = &rcar_lvds_r8a77990_info }, - { .compatible = "renesas,r8a7790-lvds", .data = &rcar_lvds_r8a7790_info }, + { .compatible = "renesas,r8a7790-lvds", .data = &rcar_lvds_gen2_info }, { .compatible = "renesas,r8a7791-lvds", .data = &rcar_lvds_gen2_info }, { .compatible = "renesas,r8a7793-lvds", .data = &rcar_lvds_gen2_info }, { .compatible = "renesas,r8a7795-lvds", .data = &rcar_lvds_gen3_info }, diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c index d505ea7d5384..eed594bd38d3 100644 --- a/drivers/gpu/drm/rockchip/cdn-dp-core.c +++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c @@ -477,8 +477,8 @@ static int cdn_dp_disable(struct cdn_dp_device *dp) cdn_dp_set_firmware_active(dp, false); cdn_dp_clk_disable(dp); dp->active = false; - dp->link.rate = 0; - dp->link.num_lanes = 0; + dp->max_lanes = 0; + dp->max_rate = 0; if (!dp->connected) { kfree(dp->edid); dp->edid = NULL; @@ -570,7 +570,7 @@ static bool cdn_dp_check_link_status(struct cdn_dp_device *dp) struct cdn_dp_port *port = cdn_dp_connected_port(dp); u8 sink_lanes = drm_dp_max_lane_count(dp->dpcd); - if (!port || !dp->link.rate || !dp->link.num_lanes) + if (!port || !dp->max_rate || !dp->max_lanes) return false; if (cdn_dp_dpcd_read(dp, DP_LANE0_1_STATUS, link_status, @@ -952,8 +952,8 @@ static void cdn_dp_pd_event_work(struct work_struct *work) /* Enabled and connected with a sink, re-train if requested */ } else if (!cdn_dp_check_link_status(dp)) { - unsigned int rate = dp->link.rate; - unsigned int lanes = dp->link.num_lanes; + unsigned int rate = dp->max_rate; + unsigned int lanes = dp->max_lanes; struct drm_display_mode *mode = &dp->mode; DRM_DEV_INFO(dp->dev, "Connected with sink. Re-train link\n"); @@ -966,7 +966,7 @@ static void cdn_dp_pd_event_work(struct work_struct *work) /* If training result is changed, update the video config */ if (mode->clock && - (rate != dp->link.rate || lanes != dp->link.num_lanes)) { + (rate != dp->max_rate || lanes != dp->max_lanes)) { ret = cdn_dp_config_video(dp); if (ret) { dp->connected = false; diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.h b/drivers/gpu/drm/rockchip/cdn-dp-core.h index b85ea89eb60b..83c4586665b4 100644 --- a/drivers/gpu/drm/rockchip/cdn-dp-core.h +++ b/drivers/gpu/drm/rockchip/cdn-dp-core.h @@ -92,9 +92,10 @@ struct cdn_dp_device { struct reset_control *core_rst; struct audio_info audio_info; struct video_info video_info; - struct drm_dp_link link; struct cdn_dp_port *port[MAX_PHY]; u8 ports; + u8 max_lanes; + u8 max_rate; u8 lanes; int active_port; diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.c b/drivers/gpu/drm/rockchip/cdn-dp-reg.c index 077c87021908..7361c07cb4a7 100644 --- a/drivers/gpu/drm/rockchip/cdn-dp-reg.c +++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.c @@ -535,8 +535,8 @@ static int cdn_dp_get_training_status(struct cdn_dp_device *dp) if (ret) goto err_get_training_status; - dp->link.rate = drm_dp_bw_code_to_link_rate(status[0]); - dp->link.num_lanes = status[1]; + dp->max_rate = drm_dp_bw_code_to_link_rate(status[0]); + dp->max_lanes = status[1]; err_get_training_status: if (ret) @@ -560,8 +560,8 @@ int cdn_dp_train_link(struct cdn_dp_device *dp) return ret; } - DRM_DEV_DEBUG_KMS(dp->dev, "rate:0x%x, lanes:%d\n", dp->link.rate, - dp->link.num_lanes); + DRM_DEV_DEBUG_KMS(dp->dev, "rate:0x%x, lanes:%d\n", dp->max_rate, + dp->max_lanes); return ret; } @@ -639,7 +639,7 @@ int cdn_dp_config_video(struct cdn_dp_device *dp) bit_per_pix = (video->color_fmt == YCBCR_4_2_2) ? (video->color_depth * 2) : (video->color_depth * 3); - link_rate = dp->link.rate / 1000; + link_rate = dp->max_rate / 1000; ret = cdn_dp_reg_write(dp, BND_HSYNC2VSYNC, VIF_BYPASS_INTERLACE); if (ret) @@ -659,14 +659,13 @@ int cdn_dp_config_video(struct cdn_dp_device *dp) do { tu_size_reg += 2; symbol = tu_size_reg * mode->clock * bit_per_pix; - do_div(symbol, dp->link.num_lanes * link_rate * 8); + do_div(symbol, dp->max_lanes * link_rate * 8); rem = do_div(symbol, 1000); if (tu_size_reg > 64) { ret = -EINVAL; DRM_DEV_ERROR(dp->dev, "tu error, clk:%d, lanes:%d, rate:%d\n", - mode->clock, dp->link.num_lanes, - link_rate); + mode->clock, dp->max_lanes, link_rate); goto err_config_video; } } while ((symbol <= 1) || (tu_size_reg - symbol < 4) || @@ -680,7 +679,7 @@ int cdn_dp_config_video(struct cdn_dp_device *dp) /* set the FIFO Buffer size */ val = div_u64(mode->clock * (symbol + 1), 1000) + link_rate; - val /= (dp->link.num_lanes * link_rate); + val /= (dp->max_lanes * link_rate); val = div_u64(8 * (symbol + 1), bit_per_pix) - val; val += 2; ret = cdn_dp_reg_write(dp, DP_VC_TABLE(15), val); @@ -833,7 +832,7 @@ static void cdn_dp_audio_config_i2s(struct cdn_dp_device *dp, u32 val; if (audio->channels == 2) { - if (dp->link.num_lanes == 1) + if (dp->max_lanes == 1) sub_pckt_num = 2; else sub_pckt_num = 4; diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c index 906891b03a38..7f56d8c3491d 100644 --- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c @@ -450,6 +450,7 @@ static const struct dw_hdmi_plat_data rk3328_hdmi_drv_data = { .phy_ops = &rk3328_hdmi_phy_ops, .phy_name = "inno_dw_hdmi_phy2", .phy_force_vendor = true, + .use_drm_infoframe = true, }; static struct rockchip_hdmi_chip_data rk3399_chip_data = { @@ -464,6 +465,7 @@ static const struct dw_hdmi_plat_data rk3399_hdmi_drv_data = { .cur_ctr = rockchip_cur_ctr, .phy_config = rockchip_phy_config, .phy_data = &rk3399_chip_data, + .use_drm_infoframe = true, }; static const struct of_device_id dw_hdmi_rockchip_dt_ids[] = { diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.c b/drivers/gpu/drm/rockchip/rk3066_hdmi.c index 85fc5f01f761..cdb401f4283d 100644 --- a/drivers/gpu/drm/rockchip/rk3066_hdmi.c +++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.c @@ -743,7 +743,6 @@ static int rk3066_hdmi_bind(struct device *dev, struct device *master, struct platform_device *pdev = to_platform_device(dev); struct drm_device *drm = data; struct rk3066_hdmi *hdmi; - struct resource *iores; int irq; int ret; @@ -753,12 +752,7 @@ static int rk3066_hdmi_bind(struct device *dev, struct device *master, hdmi->dev = dev; hdmi->drm_dev = drm; - - iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!iores) - return -ENXIO; - - hdmi->regs = devm_ioremap_resource(dev, iores); + hdmi->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(hdmi->regs)) return PTR_ERR(hdmi->regs); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c index 291e89b4045f..7582d0e6a60a 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c @@ -294,7 +294,7 @@ static void rockchip_gem_release_object(struct rockchip_gem_object *rk_obj) kfree(rk_obj); } -struct rockchip_gem_object * +static struct rockchip_gem_object * rockchip_gem_alloc_object(struct drm_device *drm, unsigned int size) { struct rockchip_gem_object *rk_obj; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index 613404f86668..d04b3492bdac 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -139,6 +139,7 @@ struct vop { uint32_t *regsbak; void __iomem *regs; + void __iomem *lut_regs; /* physical map length of vop register */ uint32_t len; @@ -1040,14 +1041,118 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *adjusted_mode) { struct vop *vop = to_vop(crtc); + unsigned long rate; - adjusted_mode->clock = - DIV_ROUND_UP(clk_round_rate(vop->dclk, - adjusted_mode->clock * 1000), 1000); + /* + * Clock craziness. + * + * Key points: + * + * - DRM works in in kHz. + * - Clock framework works in Hz. + * - Rockchip's clock driver picks the clock rate that is the + * same _OR LOWER_ than the one requested. + * + * Action plan: + * + * 1. When DRM gives us a mode, we should add 999 Hz to it. That way + * if the clock we need is 60000001 Hz (~60 MHz) and DRM tells us to + * make 60000 kHz then the clock framework will actually give us + * the right clock. + * + * NOTE: if the PLL (maybe through a divider) could actually make + * a clock rate 999 Hz higher instead of the one we want then this + * could be a problem. Unfortunately there's not much we can do + * since it's baked into DRM to use kHz. It shouldn't matter in + * practice since Rockchip PLLs are controlled by tables and + * even if there is a divider in the middle I wouldn't expect PLL + * rates in the table that are just a few kHz different. + * + * 2. Get the clock framework to round the rate for us to tell us + * what it will actually make. + * + * 3. Store the rounded up rate so that we don't need to worry about + * this in the actual clk_set_rate(). + */ + rate = clk_round_rate(vop->dclk, adjusted_mode->clock * 1000 + 999); + adjusted_mode->clock = DIV_ROUND_UP(rate, 1000); return true; } +static bool vop_dsp_lut_is_enabled(struct vop *vop) +{ + return vop_read_reg(vop, 0, &vop->data->common->dsp_lut_en); +} + +static void vop_crtc_write_gamma_lut(struct vop *vop, struct drm_crtc *crtc) +{ + struct drm_color_lut *lut = crtc->state->gamma_lut->data; + unsigned int i; + + for (i = 0; i < crtc->gamma_size; i++) { + u32 word; + + word = (drm_color_lut_extract(lut[i].red, 10) << 20) | + (drm_color_lut_extract(lut[i].green, 10) << 10) | + drm_color_lut_extract(lut[i].blue, 10); + writel(word, vop->lut_regs + i * 4); + } +} + +static void vop_crtc_gamma_set(struct vop *vop, struct drm_crtc *crtc, + struct drm_crtc_state *old_state) +{ + struct drm_crtc_state *state = crtc->state; + unsigned int idle; + int ret; + + if (!vop->lut_regs) + return; + /* + * To disable gamma (gamma_lut is null) or to write + * an update to the LUT, clear dsp_lut_en. + */ + spin_lock(&vop->reg_lock); + VOP_REG_SET(vop, common, dsp_lut_en, 0); + vop_cfg_done(vop); + spin_unlock(&vop->reg_lock); + + /* + * In order to write the LUT to the internal memory, + * we need to first make sure the dsp_lut_en bit is cleared. + */ + ret = readx_poll_timeout(vop_dsp_lut_is_enabled, vop, + idle, !idle, 5, 30 * 1000); + if (ret) { + DRM_DEV_ERROR(vop->dev, "display LUT RAM enable timeout!\n"); + return; + } + + if (!state->gamma_lut) + return; + + spin_lock(&vop->reg_lock); + vop_crtc_write_gamma_lut(vop, crtc); + VOP_REG_SET(vop, common, dsp_lut_en, 1); + vop_cfg_done(vop); + spin_unlock(&vop->reg_lock); +} + +static void vop_crtc_atomic_begin(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) +{ + struct vop *vop = to_vop(crtc); + + /* + * Only update GAMMA if the 'active' flag is not changed, + * otherwise it's updated by .atomic_enable. + */ + if (crtc->state->color_mgmt_changed && + !crtc->state->active_changed) + vop_crtc_gamma_set(vop, crtc, old_crtc_state); +} + static void vop_crtc_atomic_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state) { @@ -1075,6 +1180,14 @@ static void vop_crtc_atomic_enable(struct drm_crtc *crtc, return; } + /* + * If we have a GAMMA LUT in the state, then let's make sure + * it's updated. We might be coming out of suspend, + * which means the LUT internal memory needs to be re-written. + */ + if (crtc->state->gamma_lut) + vop_crtc_gamma_set(vop, crtc, old_state); + mutex_lock(&vop->vop_lock); WARN_ON(vop->event); @@ -1085,9 +1198,7 @@ static void vop_crtc_atomic_enable(struct drm_crtc *crtc, DRM_DEV_ERROR(vop->dev, "Failed to enable vop (%d)\n", ret); return; } - - pin_pol = BIT(DCLK_INVERT); - pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) ? + pin_pol = (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) ? BIT(HSYNC_POSITIVE) : 0; pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) ? BIT(VSYNC_POSITIVE) : 0; @@ -1096,25 +1207,29 @@ static void vop_crtc_atomic_enable(struct drm_crtc *crtc, switch (s->output_type) { case DRM_MODE_CONNECTOR_LVDS: - VOP_REG_SET(vop, output, rgb_en, 1); + VOP_REG_SET(vop, output, rgb_dclk_pol, 1); VOP_REG_SET(vop, output, rgb_pin_pol, pin_pol); + VOP_REG_SET(vop, output, rgb_en, 1); break; case DRM_MODE_CONNECTOR_eDP: + VOP_REG_SET(vop, output, edp_dclk_pol, 1); VOP_REG_SET(vop, output, edp_pin_pol, pin_pol); VOP_REG_SET(vop, output, edp_en, 1); break; case DRM_MODE_CONNECTOR_HDMIA: + VOP_REG_SET(vop, output, hdmi_dclk_pol, 1); VOP_REG_SET(vop, output, hdmi_pin_pol, pin_pol); VOP_REG_SET(vop, output, hdmi_en, 1); break; case DRM_MODE_CONNECTOR_DSI: + VOP_REG_SET(vop, output, mipi_dclk_pol, 1); VOP_REG_SET(vop, output, mipi_pin_pol, pin_pol); VOP_REG_SET(vop, output, mipi_en, 1); VOP_REG_SET(vop, output, mipi_dual_channel_en, !!(s->output_flags & ROCKCHIP_OUTPUT_DSI_DUAL)); break; case DRM_MODE_CONNECTOR_DisplayPort: - pin_pol &= ~BIT(DCLK_INVERT); + VOP_REG_SET(vop, output, dp_dclk_pol, 0); VOP_REG_SET(vop, output, dp_pin_pol, pin_pol); VOP_REG_SET(vop, output, dp_en, 1); break; @@ -1191,6 +1306,26 @@ static void vop_wait_for_irq_handler(struct vop *vop) synchronize_irq(vop->irq); } +static int vop_crtc_atomic_check(struct drm_crtc *crtc, + struct drm_crtc_state *crtc_state) +{ + struct vop *vop = to_vop(crtc); + + if (vop->lut_regs && crtc_state->color_mgmt_changed && + crtc_state->gamma_lut) { + unsigned int len; + + len = drm_color_lut_size(crtc_state->gamma_lut); + if (len != crtc->gamma_size) { + DRM_DEBUG_KMS("Invalid LUT size; got %d, expected %d\n", + len, crtc->gamma_size); + return -EINVAL; + } + } + + return 0; +} + static void vop_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state) { @@ -1243,6 +1378,8 @@ static void vop_crtc_atomic_flush(struct drm_crtc *crtc, static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = { .mode_fixup = vop_crtc_mode_fixup, + .atomic_check = vop_crtc_atomic_check, + .atomic_begin = vop_crtc_atomic_begin, .atomic_flush = vop_crtc_atomic_flush, .atomic_enable = vop_crtc_atomic_enable, .atomic_disable = vop_crtc_atomic_disable, @@ -1361,6 +1498,7 @@ static const struct drm_crtc_funcs vop_crtc_funcs = { .disable_vblank = vop_crtc_disable_vblank, .set_crc_source = vop_crtc_set_crc_source, .verify_crc_source = vop_crtc_verify_crc_source, + .gamma_set = drm_atomic_helper_legacy_gamma_set, }; static void vop_fb_unref_worker(struct drm_flip_work *work, void *val) @@ -1518,6 +1656,10 @@ static int vop_create_crtc(struct vop *vop) goto err_cleanup_planes; drm_crtc_helper_add(crtc, &vop_crtc_helper_funcs); + if (vop->lut_regs) { + drm_mode_crtc_set_gamma_size(crtc, vop_data->lut_size); + drm_crtc_enable_color_mgmt(crtc, 0, false, vop_data->lut_size); + } /* * Create drm_planes for overlay windows with possible_crtcs restricted @@ -1822,6 +1964,17 @@ static int vop_bind(struct device *dev, struct device *master, void *data) if (IS_ERR(vop->regs)) return PTR_ERR(vop->regs); + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (res) { + if (!vop_data->lut_size) { + DRM_DEV_ERROR(dev, "no gamma LUT size defined\n"); + return -EINVAL; + } + vop->lut_regs = devm_ioremap_resource(dev, res); + if (IS_ERR(vop->lut_regs)) + return PTR_ERR(vop->lut_regs); + } + vop->regsbak = devm_kzalloc(dev, vop->len, GFP_KERNEL); if (!vop->regsbak) return -ENOMEM; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h index 2149a889c29d..0b3d18c457b2 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h @@ -46,10 +46,15 @@ struct vop_modeset { struct vop_output { struct vop_reg pin_pol; struct vop_reg dp_pin_pol; + struct vop_reg dp_dclk_pol; struct vop_reg edp_pin_pol; + struct vop_reg edp_dclk_pol; struct vop_reg hdmi_pin_pol; + struct vop_reg hdmi_dclk_pol; struct vop_reg mipi_pin_pol; + struct vop_reg mipi_dclk_pol; struct vop_reg rgb_pin_pol; + struct vop_reg rgb_dclk_pol; struct vop_reg dp_en; struct vop_reg edp_en; struct vop_reg hdmi_en; @@ -67,6 +72,7 @@ struct vop_common { struct vop_reg dither_down_mode; struct vop_reg dither_down_en; struct vop_reg dither_up; + struct vop_reg dsp_lut_en; struct vop_reg gate_en; struct vop_reg mmu_en; struct vop_reg out_mode; @@ -170,6 +176,7 @@ struct vop_data { const struct vop_win_yuv2yuv_data *win_yuv2yuv; const struct vop_win_data *win; unsigned int win_size; + unsigned int lut_size; #define VOP_FEATURE_OUTPUT_RGB10 BIT(0) #define VOP_FEATURE_INTERNAL_RGB BIT(1) @@ -294,8 +301,7 @@ enum dither_down_mode_sel { enum vop_pol { HSYNC_POSITIVE = 0, VSYNC_POSITIVE = 1, - DEN_NEGATIVE = 2, - DCLK_INVERT = 3 + DEN_NEGATIVE = 2 }; #define FRAC_16_16(mult, div) (((mult) << 16) / (div)) diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c index d1494be14471..7a9d979c8d5d 100644 --- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c +++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c @@ -16,6 +16,7 @@ #include "rockchip_drm_vop.h" #include "rockchip_vop_reg.h" +#include "rockchip_drm_drv.h" #define _VOP_REG(off, _mask, _shift, _write_mask, _relaxed) \ { \ @@ -214,9 +215,11 @@ static const struct vop_modeset px30_modeset = { }; static const struct vop_output px30_output = { - .rgb_pin_pol = VOP_REG(PX30_DSP_CTRL0, 0xf, 1), - .mipi_pin_pol = VOP_REG(PX30_DSP_CTRL0, 0xf, 25), + .rgb_dclk_pol = VOP_REG(PX30_DSP_CTRL0, 0x1, 1), + .rgb_pin_pol = VOP_REG(PX30_DSP_CTRL0, 0x7, 2), .rgb_en = VOP_REG(PX30_DSP_CTRL0, 0x1, 0), + .mipi_dclk_pol = VOP_REG(PX30_DSP_CTRL0, 0x1, 25), + .mipi_pin_pol = VOP_REG(PX30_DSP_CTRL0, 0x7, 26), .mipi_en = VOP_REG(PX30_DSP_CTRL0, 0x1, 24), }; @@ -598,6 +601,7 @@ static const struct vop_common rk3288_common = { .dither_down_en = VOP_REG(RK3288_DSP_CTRL1, 0x1, 2), .pre_dither_down = VOP_REG(RK3288_DSP_CTRL1, 0x1, 1), .dither_up = VOP_REG(RK3288_DSP_CTRL1, 0x1, 6), + .dsp_lut_en = VOP_REG(RK3288_DSP_CTRL1, 0x1, 0), .data_blank = VOP_REG(RK3288_DSP_CTRL0, 0x1, 19), .dsp_blank = VOP_REG(RK3288_DSP_CTRL0, 0x3, 18), .out_mode = VOP_REG(RK3288_DSP_CTRL0, 0xf, 0), @@ -646,6 +650,7 @@ static const struct vop_data rk3288_vop = { .output = &rk3288_output, .win = rk3288_vop_win_data, .win_size = ARRAY_SIZE(rk3288_vop_win_data), + .lut_size = 1024, }; static const int rk3368_vop_intrs[] = { @@ -717,10 +722,14 @@ static const struct vop_win_data rk3368_vop_win_data[] = { }; static const struct vop_output rk3368_output = { - .rgb_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 16), - .hdmi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 20), - .edp_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 24), - .mipi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 28), + .rgb_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 19), + .hdmi_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 23), + .edp_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 27), + .mipi_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 31), + .rgb_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 16), + .hdmi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 20), + .edp_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 24), + .mipi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 28), .rgb_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 12), .hdmi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 13), .edp_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 14), @@ -764,11 +773,16 @@ static const struct vop_data rk3366_vop = { }; static const struct vop_output rk3399_output = { - .dp_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 16), - .rgb_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 16), - .hdmi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 20), - .edp_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 24), - .mipi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 28), + .dp_dclk_pol = VOP_REG(RK3399_DSP_CTRL1, 0x1, 19), + .rgb_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 19), + .hdmi_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 23), + .edp_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 27), + .mipi_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 31), + .dp_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0x7, 16), + .rgb_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 16), + .hdmi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 20), + .edp_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 24), + .mipi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 28), .dp_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 11), .rgb_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 12), .hdmi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 13), @@ -872,14 +886,18 @@ static const struct vop_modeset rk3328_modeset = { }; static const struct vop_output rk3328_output = { + .rgb_dclk_pol = VOP_REG(RK3328_DSP_CTRL1, 0x1, 19), + .hdmi_dclk_pol = VOP_REG(RK3328_DSP_CTRL1, 0x1, 23), + .edp_dclk_pol = VOP_REG(RK3328_DSP_CTRL1, 0x1, 27), + .mipi_dclk_pol = VOP_REG(RK3328_DSP_CTRL1, 0x1, 31), .rgb_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 12), .hdmi_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 13), .edp_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 14), .mipi_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 15), - .rgb_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0xf, 16), - .hdmi_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0xf, 20), - .edp_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0xf, 24), - .mipi_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0xf, 28), + .rgb_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0x7, 16), + .hdmi_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0x7, 20), + .edp_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0x7, 24), + .mipi_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0x7, 28), }; static const struct vop_misc rk3328_misc = { diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index 1a5153197fe9..461a7a8129f4 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -23,6 +23,7 @@ #include <linux/kthread.h> #include <linux/slab.h> +#include <linux/completion.h> #include <drm/drm_print.h> #include <drm/gpu_scheduler.h> @@ -68,6 +69,8 @@ int drm_sched_entity_init(struct drm_sched_entity *entity, if (!entity->rq_list) return -ENOMEM; + init_completion(&entity->entity_idle); + for (i = 0; i < num_rq_list; ++i) entity->rq_list[i] = rq_list[i]; @@ -286,11 +289,12 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity) */ if (spsc_queue_count(&entity->job_queue)) { if (sched) { - /* Park the kernel for a moment to make sure it isn't processing - * our enity. + /* + * Wait for thread to idle to make sure it isn't processing + * this entity. */ - kthread_park(sched->thread); - kthread_unpark(sched->thread); + wait_for_completion(&entity->entity_idle); + } if (entity->dependency) { dma_fence_remove_callback(entity->dependency, diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c index 54977408f574..8b45c3a1b84e 100644 --- a/drivers/gpu/drm/scheduler/sched_fence.c +++ b/drivers/gpu/drm/scheduler/sched_fence.c @@ -128,13 +128,13 @@ static void drm_sched_fence_release_finished(struct dma_fence *f) dma_fence_put(&fence->scheduled); } -const struct dma_fence_ops drm_sched_fence_ops_scheduled = { +static const struct dma_fence_ops drm_sched_fence_ops_scheduled = { .get_driver_name = drm_sched_fence_get_driver_name, .get_timeline_name = drm_sched_fence_get_timeline_name, .release = drm_sched_fence_release_scheduled, }; -const struct dma_fence_ops drm_sched_fence_ops_finished = { +static const struct dma_fence_ops drm_sched_fence_ops_finished = { .get_driver_name = drm_sched_fence_get_driver_name, .get_timeline_name = drm_sched_fence_get_timeline_name, .release = drm_sched_fence_release_finished, diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 9a0ee74d82dc..3c57e84222ca 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -47,6 +47,7 @@ #include <linux/kthread.h> #include <linux/wait.h> #include <linux/sched.h> +#include <linux/completion.h> #include <uapi/linux/sched/types.h> #include <drm/drm_print.h> @@ -134,6 +135,7 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq) list_for_each_entry_continue(entity, &rq->entities, list) { if (drm_sched_entity_is_ready(entity)) { rq->current_entity = entity; + reinit_completion(&entity->entity_idle); spin_unlock(&rq->lock); return entity; } @@ -144,6 +146,7 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq) if (drm_sched_entity_is_ready(entity)) { rq->current_entity = entity; + reinit_completion(&entity->entity_idle); spin_unlock(&rq->lock); return entity; } @@ -479,6 +482,7 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched) struct drm_sched_job *s_job, *tmp; uint64_t guilty_context; bool found_guilty = false; + struct dma_fence *fence; list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { struct drm_sched_fence *s_fence = s_job->s_fence; @@ -492,7 +496,18 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched) dma_fence_set_error(&s_fence->finished, -ECANCELED); dma_fence_put(s_job->s_fence->parent); - s_job->s_fence->parent = sched->ops->run_job(s_job); + fence = sched->ops->run_job(s_job); + + if (IS_ERR_OR_NULL(fence)) { + if (IS_ERR(fence)) + dma_fence_set_error(&s_fence->finished, PTR_ERR(fence)); + + s_job->s_fence->parent = NULL; + } else { + s_job->s_fence->parent = fence; + } + + } } EXPORT_SYMBOL(drm_sched_resubmit_jobs); @@ -622,43 +637,45 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb) } /** - * drm_sched_cleanup_jobs - destroy finished jobs + * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed * * @sched: scheduler instance * - * Remove all finished jobs from the mirror list and destroy them. + * Returns the next finished job from the mirror list (if there is one) + * ready for it to be destroyed. */ -static void drm_sched_cleanup_jobs(struct drm_gpu_scheduler *sched) +static struct drm_sched_job * +drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched) { + struct drm_sched_job *job; unsigned long flags; - /* Don't destroy jobs while the timeout worker is running */ - if (sched->timeout != MAX_SCHEDULE_TIMEOUT && - !cancel_delayed_work(&sched->work_tdr)) - return; - + /* + * Don't destroy jobs while the timeout worker is running OR thread + * is being parked and hence assumed to not touch ring_mirror_list + */ + if ((sched->timeout != MAX_SCHEDULE_TIMEOUT && + !cancel_delayed_work(&sched->work_tdr)) || + __kthread_should_park(sched->thread)) + return NULL; - while (!list_empty(&sched->ring_mirror_list)) { - struct drm_sched_job *job; + spin_lock_irqsave(&sched->job_list_lock, flags); - job = list_first_entry(&sched->ring_mirror_list, + job = list_first_entry_or_null(&sched->ring_mirror_list, struct drm_sched_job, node); - if (!dma_fence_is_signaled(&job->s_fence->finished)) - break; - spin_lock_irqsave(&sched->job_list_lock, flags); + if (job && dma_fence_is_signaled(&job->s_fence->finished)) { /* remove job from ring_mirror_list */ list_del_init(&job->node); - spin_unlock_irqrestore(&sched->job_list_lock, flags); - - sched->ops->free_job(job); + } else { + job = NULL; + /* queue timeout for next job */ + drm_sched_start_timeout(sched); } - /* queue timeout for next job */ - spin_lock_irqsave(&sched->job_list_lock, flags); - drm_sched_start_timeout(sched); spin_unlock_irqrestore(&sched->job_list_lock, flags); + return job; } /** @@ -698,17 +715,27 @@ static int drm_sched_main(void *param) struct drm_sched_fence *s_fence; struct drm_sched_job *sched_job; struct dma_fence *fence; + struct drm_sched_job *cleanup_job = NULL; wait_event_interruptible(sched->wake_up_worker, - (drm_sched_cleanup_jobs(sched), + (cleanup_job = drm_sched_get_cleanup_job(sched)) || (!drm_sched_blocked(sched) && (entity = drm_sched_select_entity(sched))) || - kthread_should_stop())); + kthread_should_stop()); + + if (cleanup_job) { + sched->ops->free_job(cleanup_job); + /* queue timeout for next job */ + drm_sched_start_timeout(sched); + } if (!entity) continue; sched_job = drm_sched_entity_pop_job(entity); + + complete(&entity->entity_idle); + if (!sched_job) continue; @@ -720,7 +747,7 @@ static int drm_sched_main(void *param) fence = sched->ops->run_job(sched_job); drm_sched_fence_scheduled(s_fence); - if (fence) { + if (!IS_ERR_OR_NULL(fence)) { s_fence->parent = dma_fence_get(fence); r = dma_fence_add_callback(fence, &sched_job->cb, drm_sched_process_job); @@ -730,8 +757,12 @@ static int drm_sched_main(void *param) DRM_ERROR("fence add callback failed (%d)\n", r); dma_fence_put(fence); - } else + } else { + if (IS_ERR(fence)) + dma_fence_set_error(&s_fence->finished, PTR_ERR(fence)); + drm_sched_process_job(NULL, &sched_job->cb); + } wake_up(&sched->job_scheduled); } diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c index a44dca4b0219..e8a317d5ba19 100644 --- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c +++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c @@ -226,6 +226,7 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master, sun8i_hdmi_phy_init(hdmi->phy); plat_data->mode_valid = hdmi->quirks->mode_valid; + plat_data->use_drm_infoframe = hdmi->quirks->use_drm_infoframe; sun8i_hdmi_phy_set_ops(hdmi->phy, plat_data); platform_set_drvdata(pdev, hdmi); @@ -300,6 +301,7 @@ static const struct sun8i_dw_hdmi_quirks sun8i_a83t_quirks = { static const struct sun8i_dw_hdmi_quirks sun50i_h6_quirks = { .mode_valid = sun8i_dw_hdmi_mode_valid_h6, + .use_drm_infoframe = true, }; static const struct of_device_id sun8i_dw_hdmi_dt_ids[] = { diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h index d707c9171824..8e64945167e9 100644 --- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h +++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h @@ -179,6 +179,7 @@ struct sun8i_dw_hdmi_quirks { enum drm_mode_status (*mode_valid)(struct drm_connector *connector, const struct drm_display_mode *mode); unsigned int set_rate : 1; + unsigned int use_drm_infoframe : 1; }; struct sun8i_dw_hdmi { diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig index 1d1269fde3c1..5043dcaf1cf9 100644 --- a/drivers/gpu/drm/tegra/Kconfig +++ b/drivers/gpu/drm/tegra/Kconfig @@ -9,7 +9,7 @@ config DRM_TEGRA select DRM_MIPI_DSI select DRM_PANEL select TEGRA_HOST1X - select IOMMU_IOVA if IOMMU_SUPPORT + select IOMMU_IOVA select CEC_CORE if CEC_NOTIFIER help Choose this option if you have an NVIDIA Tegra SoC. diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile index 33c463e8d49f..d6cf202414f0 100644 --- a/drivers/gpu/drm/tegra/Makefile +++ b/drivers/gpu/drm/tegra/Makefile @@ -5,6 +5,7 @@ tegra-drm-y := \ drm.o \ gem.o \ fb.o \ + dp.o \ hub.o \ plane.o \ dc.o \ diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index fbf57bc3cdab..5b1f9ff97576 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -715,9 +715,7 @@ static void tegra_plane_atomic_update(struct drm_plane *plane, window.swap = state->swap; for (i = 0; i < fb->format->num_planes; i++) { - struct tegra_bo *bo = tegra_fb_get_plane(fb, i); - - window.base[i] = bo->paddr + fb->offsets[i]; + window.base[i] = state->iova[i] + fb->offsets[i]; /* * Tegra uses a shared stride for UV planes. Framebuffers are @@ -732,6 +730,8 @@ static void tegra_plane_atomic_update(struct drm_plane *plane, } static const struct drm_plane_helper_funcs tegra_plane_helper_funcs = { + .prepare_fb = tegra_plane_prepare_fb, + .cleanup_fb = tegra_plane_cleanup_fb, .atomic_check = tegra_plane_atomic_check, .atomic_disable = tegra_plane_atomic_disable, .atomic_update = tegra_plane_atomic_update, @@ -869,11 +869,11 @@ static void tegra_cursor_atomic_update(struct drm_plane *plane, return; } - value |= (bo->paddr >> 10) & 0x3fffff; + value |= (bo->iova >> 10) & 0x3fffff; tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR); #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - value = (bo->paddr >> 32) & 0x3; + value = (bo->iova >> 32) & 0x3; tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR_HI); #endif @@ -914,6 +914,8 @@ static void tegra_cursor_atomic_disable(struct drm_plane *plane, } static const struct drm_plane_helper_funcs tegra_cursor_plane_helper_funcs = { + .prepare_fb = tegra_plane_prepare_fb, + .cleanup_fb = tegra_plane_cleanup_fb, .atomic_check = tegra_cursor_atomic_check, .atomic_update = tegra_cursor_atomic_update, .atomic_disable = tegra_cursor_atomic_disable, @@ -2014,9 +2016,8 @@ static int tegra_dc_init(struct host1x_client *client) if (!dc->syncpt) dev_warn(dc->dev, "failed to allocate syncpoint\n"); - dc->group = host1x_client_iommu_attach(client, true); - if (IS_ERR(dc->group)) { - err = PTR_ERR(dc->group); + err = host1x_client_iommu_attach(client); + if (err < 0) { dev_err(client->dev, "failed to attach to domain: %d\n", err); return err; } @@ -2074,6 +2075,12 @@ static int tegra_dc_init(struct host1x_client *client) goto cleanup; } + /* + * Inherit the DMA parameters (such as maximum segment size) from the + * parent device. + */ + client->dev->dma_parms = client->parent->dma_parms; + return 0; cleanup: @@ -2083,7 +2090,7 @@ cleanup: if (!IS_ERR(primary)) drm_plane_cleanup(primary); - host1x_client_iommu_detach(client, dc->group); + host1x_client_iommu_detach(client); host1x_syncpt_free(dc->syncpt); return err; @@ -2097,6 +2104,9 @@ static int tegra_dc_exit(struct host1x_client *client) if (!tegra_dc_has_window_groups(dc)) return 0; + /* avoid a dangling pointer just in case this disappears */ + client->dev->dma_parms = NULL; + devm_free_irq(dc->dev, dc->irq, dc); err = tegra_dc_rgb_exit(dc); @@ -2105,7 +2115,7 @@ static int tegra_dc_exit(struct host1x_client *client) return err; } - host1x_client_iommu_detach(client, dc->group); + host1x_client_iommu_detach(client); host1x_syncpt_free(dc->syncpt); return 0; diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h index 0c4d17851f47..3d8ddccd758f 100644 --- a/drivers/gpu/drm/tegra/dc.h +++ b/drivers/gpu/drm/tegra/dc.h @@ -90,8 +90,6 @@ struct tegra_dc { struct drm_info_list *debugfs_files; const struct tegra_dc_soc_info *soc; - - struct iommu_group *group; }; static inline struct tegra_dc * diff --git a/drivers/gpu/drm/tegra/dp.c b/drivers/gpu/drm/tegra/dp.c new file mode 100644 index 000000000000..70dfb7d1dec5 --- /dev/null +++ b/drivers/gpu/drm/tegra/dp.c @@ -0,0 +1,876 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright (C) 2013-2019 NVIDIA Corporation + * Copyright (C) 2015 Rob Clark + */ + +#include <drm/drm_crtc.h> +#include <drm/drm_dp_helper.h> +#include <drm/drm_print.h> + +#include "dp.h" + +static const u8 drm_dp_edp_revisions[] = { 0x11, 0x12, 0x13, 0x14 }; + +static void drm_dp_link_caps_reset(struct drm_dp_link_caps *caps) +{ + caps->enhanced_framing = false; + caps->tps3_supported = false; + caps->fast_training = false; + caps->channel_coding = false; + caps->alternate_scrambler_reset = false; +} + +void drm_dp_link_caps_copy(struct drm_dp_link_caps *dest, + const struct drm_dp_link_caps *src) +{ + dest->enhanced_framing = src->enhanced_framing; + dest->tps3_supported = src->tps3_supported; + dest->fast_training = src->fast_training; + dest->channel_coding = src->channel_coding; + dest->alternate_scrambler_reset = src->alternate_scrambler_reset; +} + +static void drm_dp_link_reset(struct drm_dp_link *link) +{ + unsigned int i; + + if (!link) + return; + + link->revision = 0; + link->max_rate = 0; + link->max_lanes = 0; + + drm_dp_link_caps_reset(&link->caps); + link->aux_rd_interval.cr = 0; + link->aux_rd_interval.ce = 0; + link->edp = 0; + + link->rate = 0; + link->lanes = 0; + + for (i = 0; i < DP_MAX_SUPPORTED_RATES; i++) + link->rates[i] = 0; + + link->num_rates = 0; +} + +/** + * drm_dp_link_add_rate() - add a rate to the list of supported rates + * @link: the link to add the rate to + * @rate: the rate to add + * + * Add a link rate to the list of supported link rates. + * + * Returns: + * 0 on success or one of the following negative error codes on failure: + * - ENOSPC if the maximum number of supported rates has been reached + * - EEXISTS if the link already supports this rate + * + * See also: + * drm_dp_link_remove_rate() + */ +int drm_dp_link_add_rate(struct drm_dp_link *link, unsigned long rate) +{ + unsigned int i, pivot; + + if (link->num_rates == DP_MAX_SUPPORTED_RATES) + return -ENOSPC; + + for (pivot = 0; pivot < link->num_rates; pivot++) + if (rate <= link->rates[pivot]) + break; + + if (pivot != link->num_rates && rate == link->rates[pivot]) + return -EEXIST; + + for (i = link->num_rates; i > pivot; i--) + link->rates[i] = link->rates[i - 1]; + + link->rates[pivot] = rate; + link->num_rates++; + + return 0; +} + +/** + * drm_dp_link_remove_rate() - remove a rate from the list of supported rates + * @link: the link from which to remove the rate + * @rate: the rate to remove + * + * Removes a link rate from the list of supported link rates. + * + * Returns: + * 0 on success or one of the following negative error codes on failure: + * - EINVAL if the specified rate is not among the supported rates + * + * See also: + * drm_dp_link_add_rate() + */ +int drm_dp_link_remove_rate(struct drm_dp_link *link, unsigned long rate) +{ + unsigned int i; + + for (i = 0; i < link->num_rates; i++) + if (rate == link->rates[i]) + break; + + if (i == link->num_rates) + return -EINVAL; + + link->num_rates--; + + while (i < link->num_rates) { + link->rates[i] = link->rates[i + 1]; + i++; + } + + return 0; +} + +/** + * drm_dp_link_update_rates() - normalize the supported link rates array + * @link: the link for which to normalize the supported link rates + * + * Users should call this function after they've manually modified the array + * of supported link rates. This function removes any stale entries, compacts + * the array and updates the supported link rate count. Note that calling the + * drm_dp_link_remove_rate() function already does this janitorial work. + * + * See also: + * drm_dp_link_add_rate(), drm_dp_link_remove_rate() + */ +void drm_dp_link_update_rates(struct drm_dp_link *link) +{ + unsigned int i, count = 0; + + for (i = 0; i < link->num_rates; i++) { + if (link->rates[i] != 0) + link->rates[count++] = link->rates[i]; + } + + for (i = count; i < link->num_rates; i++) + link->rates[i] = 0; + + link->num_rates = count; +} + +/** + * drm_dp_link_probe() - probe a DisplayPort link for capabilities + * @aux: DisplayPort AUX channel + * @link: pointer to structure in which to return link capabilities + * + * The structure filled in by this function can usually be passed directly + * into drm_dp_link_power_up() and drm_dp_link_configure() to power up and + * configure the link based on the link's capabilities. + * + * Returns 0 on success or a negative error code on failure. + */ +int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link) +{ + u8 dpcd[DP_RECEIVER_CAP_SIZE], value; + unsigned int rd_interval; + int err; + + drm_dp_link_reset(link); + + err = drm_dp_dpcd_read(aux, DP_DPCD_REV, dpcd, sizeof(dpcd)); + if (err < 0) + return err; + + link->revision = dpcd[DP_DPCD_REV]; + link->max_rate = drm_dp_max_link_rate(dpcd); + link->max_lanes = drm_dp_max_lane_count(dpcd); + + link->caps.enhanced_framing = drm_dp_enhanced_frame_cap(dpcd); + link->caps.tps3_supported = drm_dp_tps3_supported(dpcd); + link->caps.fast_training = drm_dp_fast_training_cap(dpcd); + link->caps.channel_coding = drm_dp_channel_coding_supported(dpcd); + + if (drm_dp_alternate_scrambler_reset_cap(dpcd)) { + link->caps.alternate_scrambler_reset = true; + + err = drm_dp_dpcd_readb(aux, DP_EDP_DPCD_REV, &value); + if (err < 0) + return err; + + if (value >= ARRAY_SIZE(drm_dp_edp_revisions)) + DRM_ERROR("unsupported eDP version: %02x\n", value); + else + link->edp = drm_dp_edp_revisions[value]; + } + + /* + * The DPCD stores the AUX read interval in units of 4 ms. There are + * two special cases: + * + * 1) if the TRAINING_AUX_RD_INTERVAL field is 0, the clock recovery + * and channel equalization should use 100 us or 400 us AUX read + * intervals, respectively + * + * 2) for DP v1.4 and above, clock recovery should always use 100 us + * AUX read intervals + */ + rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] & + DP_TRAINING_AUX_RD_MASK; + + if (rd_interval > 4) { + DRM_DEBUG_KMS("AUX interval %u out of range (max. 4)\n", + rd_interval); + rd_interval = 4; + } + + rd_interval *= 4 * USEC_PER_MSEC; + + if (rd_interval == 0 || link->revision >= DP_DPCD_REV_14) + link->aux_rd_interval.cr = 100; + + if (rd_interval == 0) + link->aux_rd_interval.ce = 400; + + link->rate = link->max_rate; + link->lanes = link->max_lanes; + + /* Parse SUPPORTED_LINK_RATES from eDP 1.4 */ + if (link->edp >= 0x14) { + u8 supported_rates[DP_MAX_SUPPORTED_RATES * 2]; + unsigned int i; + u16 rate; + + err = drm_dp_dpcd_read(aux, DP_SUPPORTED_LINK_RATES, + supported_rates, + sizeof(supported_rates)); + if (err < 0) + return err; + + for (i = 0; i < DP_MAX_SUPPORTED_RATES; i++) { + rate = supported_rates[i * 2 + 1] << 8 | + supported_rates[i * 2 + 0]; + + drm_dp_link_add_rate(link, rate * 200); + } + } + + return 0; +} + +/** + * drm_dp_link_power_up() - power up a DisplayPort link + * @aux: DisplayPort AUX channel + * @link: pointer to a structure containing the link configuration + * + * Returns 0 on success or a negative error code on failure. + */ +int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link) +{ + u8 value; + int err; + + /* DP_SET_POWER register is only available on DPCD v1.1 and later */ + if (link->revision < 0x11) + return 0; + + err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value); + if (err < 0) + return err; + + value &= ~DP_SET_POWER_MASK; + value |= DP_SET_POWER_D0; + + err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value); + if (err < 0) + return err; + + /* + * According to the DP 1.1 specification, a "Sink Device must exit the + * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink + * Control Field" (register 0x600). + */ + usleep_range(1000, 2000); + + return 0; +} + +/** + * drm_dp_link_power_down() - power down a DisplayPort link + * @aux: DisplayPort AUX channel + * @link: pointer to a structure containing the link configuration + * + * Returns 0 on success or a negative error code on failure. + */ +int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link) +{ + u8 value; + int err; + + /* DP_SET_POWER register is only available on DPCD v1.1 and later */ + if (link->revision < 0x11) + return 0; + + err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value); + if (err < 0) + return err; + + value &= ~DP_SET_POWER_MASK; + value |= DP_SET_POWER_D3; + + err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value); + if (err < 0) + return err; + + return 0; +} + +/** + * drm_dp_link_configure() - configure a DisplayPort link + * @aux: DisplayPort AUX channel + * @link: pointer to a structure containing the link configuration + * + * Returns 0 on success or a negative error code on failure. + */ +int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link) +{ + u8 values[2], value; + int err; + + if (link->ops && link->ops->configure) { + err = link->ops->configure(link); + if (err < 0) { + DRM_ERROR("failed to configure DP link: %d\n", err); + return err; + } + } + + values[0] = drm_dp_link_rate_to_bw_code(link->rate); + values[1] = link->lanes; + + if (link->caps.enhanced_framing) + values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; + + err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values)); + if (err < 0) + return err; + + if (link->caps.channel_coding) + value = DP_SET_ANSI_8B10B; + else + value = 0; + + err = drm_dp_dpcd_writeb(aux, DP_MAIN_LINK_CHANNEL_CODING_SET, value); + if (err < 0) + return err; + + if (link->caps.alternate_scrambler_reset) { + err = drm_dp_dpcd_writeb(aux, DP_EDP_CONFIGURATION_SET, + DP_ALTERNATE_SCRAMBLER_RESET_ENABLE); + if (err < 0) + return err; + } + + return 0; +} + +/** + * drm_dp_link_choose() - choose the lowest possible configuration for a mode + * @link: DRM DP link object + * @mode: DRM display mode + * @info: DRM display information + * + * According to the eDP specification, a source should select a configuration + * with the lowest number of lanes and the lowest possible link rate that can + * match the bitrate requirements of a video mode. However it must ensure not + * to exceed the capabilities of the sink. + * + * Returns: 0 on success or a negative error code on failure. + */ +int drm_dp_link_choose(struct drm_dp_link *link, + const struct drm_display_mode *mode, + const struct drm_display_info *info) +{ + /* available link symbol clock rates */ + static const unsigned int rates[3] = { 162000, 270000, 540000 }; + /* available number of lanes */ + static const unsigned int lanes[3] = { 1, 2, 4 }; + unsigned long requirement, capacity; + unsigned int rate = link->max_rate; + unsigned int i, j; + + /* bandwidth requirement */ + requirement = mode->clock * info->bpc * 3; + + for (i = 0; i < ARRAY_SIZE(lanes) && lanes[i] <= link->max_lanes; i++) { + for (j = 0; j < ARRAY_SIZE(rates) && rates[j] <= rate; j++) { + /* + * Capacity for this combination of lanes and rate, + * factoring in the ANSI 8B/10B encoding. + * + * Link rates in the DRM DP helpers are really link + * symbol frequencies, so a tenth of the actual rate + * of the link. + */ + capacity = lanes[i] * (rates[j] * 10) * 8 / 10; + + if (capacity >= requirement) { + DRM_DEBUG_KMS("using %u lanes at %u kHz (%lu/%lu kbps)\n", + lanes[i], rates[j], requirement, + capacity); + link->lanes = lanes[i]; + link->rate = rates[j]; + return 0; + } + } + } + + return -ERANGE; +} + +/** + * DOC: Link training + * + * These functions contain common logic and helpers to implement DisplayPort + * link training. + */ + +/** + * drm_dp_link_train_init() - initialize DisplayPort link training state + * @train: DisplayPort link training state + */ +void drm_dp_link_train_init(struct drm_dp_link_train *train) +{ + struct drm_dp_link_train_set *request = &train->request; + struct drm_dp_link_train_set *adjust = &train->adjust; + unsigned int i; + + for (i = 0; i < 4; i++) { + request->voltage_swing[i] = 0; + adjust->voltage_swing[i] = 0; + + request->pre_emphasis[i] = 0; + adjust->pre_emphasis[i] = 0; + + request->post_cursor[i] = 0; + adjust->post_cursor[i] = 0; + } + + train->pattern = DP_TRAINING_PATTERN_DISABLE; + train->clock_recovered = false; + train->channel_equalized = false; +} + +static bool drm_dp_link_train_valid(const struct drm_dp_link_train *train) +{ + return train->clock_recovered && train->channel_equalized; +} + +static int drm_dp_link_apply_training(struct drm_dp_link *link) +{ + struct drm_dp_link_train_set *request = &link->train.request; + unsigned int lanes = link->lanes, *vs, *pe, *pc, i; + struct drm_dp_aux *aux = link->aux; + u8 values[4], pattern = 0; + int err; + + err = link->ops->apply_training(link); + if (err < 0) { + DRM_ERROR("failed to apply link training: %d\n", err); + return err; + } + + vs = request->voltage_swing; + pe = request->pre_emphasis; + pc = request->post_cursor; + + /* write currently selected voltage-swing and pre-emphasis levels */ + for (i = 0; i < lanes; i++) + values[i] = DP_TRAIN_VOLTAGE_SWING_LEVEL(vs[i]) | + DP_TRAIN_PRE_EMPHASIS_LEVEL(pe[i]); + + err = drm_dp_dpcd_write(aux, DP_TRAINING_LANE0_SET, values, lanes); + if (err < 0) { + DRM_ERROR("failed to set training parameters: %d\n", err); + return err; + } + + /* write currently selected post-cursor level (if supported) */ + if (link->revision >= 0x12 && link->rate == 540000) { + values[0] = values[1] = 0; + + for (i = 0; i < lanes; i++) + values[i / 2] |= DP_LANE_POST_CURSOR(i, pc[i]); + + err = drm_dp_dpcd_write(aux, DP_TRAINING_LANE0_1_SET2, values, + DIV_ROUND_UP(lanes, 2)); + if (err < 0) { + DRM_ERROR("failed to set post-cursor: %d\n", err); + return err; + } + } + + /* write link pattern */ + if (link->train.pattern != DP_TRAINING_PATTERN_DISABLE) + pattern |= DP_LINK_SCRAMBLING_DISABLE; + + pattern |= link->train.pattern; + + err = drm_dp_dpcd_writeb(aux, DP_TRAINING_PATTERN_SET, pattern); + if (err < 0) { + DRM_ERROR("failed to set training pattern: %d\n", err); + return err; + } + + return 0; +} + +static void drm_dp_link_train_wait(struct drm_dp_link *link) +{ + unsigned long min = 0; + + switch (link->train.pattern) { + case DP_TRAINING_PATTERN_1: + min = link->aux_rd_interval.cr; + break; + + case DP_TRAINING_PATTERN_2: + case DP_TRAINING_PATTERN_3: + min = link->aux_rd_interval.ce; + break; + + default: + break; + } + + if (min > 0) + usleep_range(min, 2 * min); +} + +static void drm_dp_link_get_adjustments(struct drm_dp_link *link, + u8 status[DP_LINK_STATUS_SIZE]) +{ + struct drm_dp_link_train_set *adjust = &link->train.adjust; + unsigned int i; + + for (i = 0; i < link->lanes; i++) { + adjust->voltage_swing[i] = + drm_dp_get_adjust_request_voltage(status, i) >> + DP_TRAIN_VOLTAGE_SWING_SHIFT; + + adjust->pre_emphasis[i] = + drm_dp_get_adjust_request_pre_emphasis(status, i) >> + DP_TRAIN_PRE_EMPHASIS_SHIFT; + + adjust->post_cursor[i] = + drm_dp_get_adjust_request_post_cursor(status, i); + } +} + +static void drm_dp_link_train_adjust(struct drm_dp_link_train *train) +{ + struct drm_dp_link_train_set *request = &train->request; + struct drm_dp_link_train_set *adjust = &train->adjust; + unsigned int i; + + for (i = 0; i < 4; i++) + if (request->voltage_swing[i] != adjust->voltage_swing[i]) + request->voltage_swing[i] = adjust->voltage_swing[i]; + + for (i = 0; i < 4; i++) + if (request->pre_emphasis[i] != adjust->pre_emphasis[i]) + request->pre_emphasis[i] = adjust->pre_emphasis[i]; + + for (i = 0; i < 4; i++) + if (request->post_cursor[i] != adjust->post_cursor[i]) + request->post_cursor[i] = adjust->post_cursor[i]; +} + +static int drm_dp_link_recover_clock(struct drm_dp_link *link) +{ + u8 status[DP_LINK_STATUS_SIZE]; + int err; + + err = drm_dp_link_apply_training(link); + if (err < 0) + return err; + + drm_dp_link_train_wait(link); + + err = drm_dp_dpcd_read_link_status(link->aux, status); + if (err < 0) { + DRM_ERROR("failed to read link status: %d\n", err); + return err; + } + + if (!drm_dp_clock_recovery_ok(status, link->lanes)) + drm_dp_link_get_adjustments(link, status); + else + link->train.clock_recovered = true; + + return 0; +} + +static int drm_dp_link_clock_recovery(struct drm_dp_link *link) +{ + unsigned int repeat; + int err; + + /* start clock recovery using training pattern 1 */ + link->train.pattern = DP_TRAINING_PATTERN_1; + + for (repeat = 1; repeat < 5; repeat++) { + err = drm_dp_link_recover_clock(link); + if (err < 0) { + DRM_ERROR("failed to recover clock: %d\n", err); + return err; + } + + if (link->train.clock_recovered) + break; + + drm_dp_link_train_adjust(&link->train); + } + + return 0; +} + +static int drm_dp_link_equalize_channel(struct drm_dp_link *link) +{ + struct drm_dp_aux *aux = link->aux; + u8 status[DP_LINK_STATUS_SIZE]; + int err; + + err = drm_dp_link_apply_training(link); + if (err < 0) + return err; + + drm_dp_link_train_wait(link); + + err = drm_dp_dpcd_read_link_status(aux, status); + if (err < 0) { + DRM_ERROR("failed to read link status: %d\n", err); + return err; + } + + if (!drm_dp_clock_recovery_ok(status, link->lanes)) { + DRM_ERROR("clock recovery lost while equalizing channel\n"); + link->train.clock_recovered = false; + return 0; + } + + if (!drm_dp_channel_eq_ok(status, link->lanes)) + drm_dp_link_get_adjustments(link, status); + else + link->train.channel_equalized = true; + + return 0; +} + +static int drm_dp_link_channel_equalization(struct drm_dp_link *link) +{ + unsigned int repeat; + int err; + + /* start channel equalization using pattern 2 or 3 */ + if (link->caps.tps3_supported) + link->train.pattern = DP_TRAINING_PATTERN_3; + else + link->train.pattern = DP_TRAINING_PATTERN_2; + + for (repeat = 1; repeat < 5; repeat++) { + err = drm_dp_link_equalize_channel(link); + if (err < 0) { + DRM_ERROR("failed to equalize channel: %d\n", err); + return err; + } + + if (link->train.channel_equalized) + break; + + drm_dp_link_train_adjust(&link->train); + } + + return 0; +} + +static int drm_dp_link_downgrade(struct drm_dp_link *link) +{ + switch (link->rate) { + case 162000: + return -EINVAL; + + case 270000: + link->rate = 162000; + break; + + case 540000: + link->rate = 270000; + return 0; + } + + return 0; +} + +static void drm_dp_link_train_disable(struct drm_dp_link *link) +{ + int err; + + link->train.pattern = DP_TRAINING_PATTERN_DISABLE; + + err = drm_dp_link_apply_training(link); + if (err < 0) + DRM_ERROR("failed to disable link training: %d\n", err); +} + +static int drm_dp_link_train_full(struct drm_dp_link *link) +{ + int err; + +retry: + DRM_DEBUG_KMS("full-training link: %u lane%s at %u MHz\n", + link->lanes, (link->lanes > 1) ? "s" : "", + link->rate / 100); + + err = drm_dp_link_configure(link->aux, link); + if (err < 0) { + DRM_ERROR("failed to configure DP link: %d\n", err); + return err; + } + + err = drm_dp_link_clock_recovery(link); + if (err < 0) { + DRM_ERROR("clock recovery failed: %d\n", err); + goto out; + } + + if (!link->train.clock_recovered) { + DRM_ERROR("clock recovery failed, downgrading link\n"); + + err = drm_dp_link_downgrade(link); + if (err < 0) + goto out; + + goto retry; + } + + DRM_DEBUG_KMS("clock recovery succeeded\n"); + + err = drm_dp_link_channel_equalization(link); + if (err < 0) { + DRM_ERROR("channel equalization failed: %d\n", err); + goto out; + } + + if (!link->train.channel_equalized) { + DRM_ERROR("channel equalization failed, downgrading link\n"); + + err = drm_dp_link_downgrade(link); + if (err < 0) + goto out; + + goto retry; + } + + DRM_DEBUG_KMS("channel equalization succeeded\n"); + +out: + drm_dp_link_train_disable(link); + return err; +} + +static int drm_dp_link_train_fast(struct drm_dp_link *link) +{ + u8 status[DP_LINK_STATUS_SIZE]; + int err; + + DRM_DEBUG_KMS("fast-training link: %u lane%s at %u MHz\n", + link->lanes, (link->lanes > 1) ? "s" : "", + link->rate / 100); + + err = drm_dp_link_configure(link->aux, link); + if (err < 0) { + DRM_ERROR("failed to configure DP link: %d\n", err); + return err; + } + + /* transmit training pattern 1 for 500 microseconds */ + link->train.pattern = DP_TRAINING_PATTERN_1; + + err = drm_dp_link_apply_training(link); + if (err < 0) + goto out; + + usleep_range(500, 1000); + + /* transmit training pattern 2 or 3 for 500 microseconds */ + if (link->caps.tps3_supported) + link->train.pattern = DP_TRAINING_PATTERN_3; + else + link->train.pattern = DP_TRAINING_PATTERN_2; + + err = drm_dp_link_apply_training(link); + if (err < 0) + goto out; + + usleep_range(500, 1000); + + err = drm_dp_dpcd_read_link_status(link->aux, status); + if (err < 0) { + DRM_ERROR("failed to read link status: %d\n", err); + goto out; + } + + if (!drm_dp_clock_recovery_ok(status, link->lanes)) { + DRM_ERROR("clock recovery failed\n"); + err = -EIO; + } + + if (!drm_dp_channel_eq_ok(status, link->lanes)) { + DRM_ERROR("channel equalization failed\n"); + err = -EIO; + } + +out: + drm_dp_link_train_disable(link); + return err; +} + +/** + * drm_dp_link_train() - perform DisplayPort link training + * @link: a DP link object + * + * Uses the context stored in the DP link object to perform link training. It + * is expected that drivers will call drm_dp_link_probe() to obtain the link + * capabilities before performing link training. + * + * If the sink supports fast link training (no AUX CH handshake) and valid + * training settings are available, this function will try to perform fast + * link training and fall back to full link training on failure. + * + * Returns: 0 on success or a negative error code on failure. + */ +int drm_dp_link_train(struct drm_dp_link *link) +{ + int err; + + drm_dp_link_train_init(&link->train); + + if (link->caps.fast_training) { + if (drm_dp_link_train_valid(&link->train)) { + err = drm_dp_link_train_fast(link); + if (err < 0) + DRM_ERROR("fast link training failed: %d\n", + err); + else + return 0; + } else { + DRM_DEBUG_KMS("training parameters not available\n"); + } + } else { + DRM_DEBUG_KMS("fast link training not supported\n"); + } + + err = drm_dp_link_train_full(link); + if (err < 0) + DRM_ERROR("full link training failed: %d\n", err); + + return err; +} diff --git a/drivers/gpu/drm/tegra/dp.h b/drivers/gpu/drm/tegra/dp.h new file mode 100644 index 000000000000..cb12ed0c54e7 --- /dev/null +++ b/drivers/gpu/drm/tegra/dp.h @@ -0,0 +1,177 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (C) 2013-2019 NVIDIA Corporation. + * Copyright (C) 2015 Rob Clark + */ + +#ifndef DRM_TEGRA_DP_H +#define DRM_TEGRA_DP_H 1 + +#include <linux/types.h> + +struct drm_display_info; +struct drm_display_mode; +struct drm_dp_aux; +struct drm_dp_link; + +/** + * struct drm_dp_link_caps - DP link capabilities + */ +struct drm_dp_link_caps { + /** + * @enhanced_framing: + * + * enhanced framing capability (mandatory as of DP 1.2) + */ + bool enhanced_framing; + + /** + * tps3_supported: + * + * training pattern sequence 3 supported for equalization + */ + bool tps3_supported; + + /** + * @fast_training: + * + * AUX CH handshake not required for link training + */ + bool fast_training; + + /** + * @channel_coding: + * + * ANSI 8B/10B channel coding capability + */ + bool channel_coding; + + /** + * @alternate_scrambler_reset: + * + * eDP alternate scrambler reset capability + */ + bool alternate_scrambler_reset; +}; + +void drm_dp_link_caps_copy(struct drm_dp_link_caps *dest, + const struct drm_dp_link_caps *src); + +/** + * struct drm_dp_link_ops - DP link operations + */ +struct drm_dp_link_ops { + /** + * @apply_training: + */ + int (*apply_training)(struct drm_dp_link *link); + + /** + * @configure: + */ + int (*configure)(struct drm_dp_link *link); +}; + +#define DP_TRAIN_VOLTAGE_SWING_LEVEL(x) ((x) << 0) +#define DP_TRAIN_PRE_EMPHASIS_LEVEL(x) ((x) << 3) +#define DP_LANE_POST_CURSOR(i, x) (((x) & 0x3) << (((i) & 1) << 2)) + +/** + * struct drm_dp_link_train_set - link training settings + * @voltage_swing: per-lane voltage swing + * @pre_emphasis: per-lane pre-emphasis + * @post_cursor: per-lane post-cursor + */ +struct drm_dp_link_train_set { + unsigned int voltage_swing[4]; + unsigned int pre_emphasis[4]; + unsigned int post_cursor[4]; +}; + +/** + * struct drm_dp_link_train - link training state information + * @request: currently requested settings + * @adjust: adjustments requested by sink + * @pattern: currently requested training pattern + * @clock_recovered: flag to track if clock recovery has completed + * @channel_equalized: flag to track if channel equalization has completed + */ +struct drm_dp_link_train { + struct drm_dp_link_train_set request; + struct drm_dp_link_train_set adjust; + + unsigned int pattern; + + bool clock_recovered; + bool channel_equalized; +}; + +/** + * struct drm_dp_link - DP link capabilities and configuration + * @revision: DP specification revision supported on the link + * @max_rate: maximum clock rate supported on the link + * @max_lanes: maximum number of lanes supported on the link + * @caps: capabilities supported on the link (see &drm_dp_link_caps) + * @aux_rd_interval: AUX read interval to use for training (in microseconds) + * @edp: eDP revision (0x11: eDP 1.1, 0x12: eDP 1.2, ...) + * @rate: currently configured link rate + * @lanes: currently configured number of lanes + * @rates: additional supported link rates in kHz (eDP 1.4) + * @num_rates: number of additional supported link rates (eDP 1.4) + */ +struct drm_dp_link { + unsigned char revision; + unsigned int max_rate; + unsigned int max_lanes; + + struct drm_dp_link_caps caps; + + /** + * @cr: clock recovery read interval + * @ce: channel equalization read interval + */ + struct { + unsigned int cr; + unsigned int ce; + } aux_rd_interval; + + unsigned char edp; + + unsigned int rate; + unsigned int lanes; + + unsigned long rates[DP_MAX_SUPPORTED_RATES]; + unsigned int num_rates; + + /** + * @ops: DP link operations + */ + const struct drm_dp_link_ops *ops; + + /** + * @aux: DP AUX channel + */ + struct drm_dp_aux *aux; + + /** + * @train: DP link training state + */ + struct drm_dp_link_train train; +}; + +int drm_dp_link_add_rate(struct drm_dp_link *link, unsigned long rate); +int drm_dp_link_remove_rate(struct drm_dp_link *link, unsigned long rate); +void drm_dp_link_update_rates(struct drm_dp_link *link); + +int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link); +int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link); +int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link); +int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link); +int drm_dp_link_choose(struct drm_dp_link *link, + const struct drm_display_mode *mode, + const struct drm_display_info *info); + +void drm_dp_link_train_init(struct drm_dp_link_train *train); +int drm_dp_link_train(struct drm_dp_link *link); + +#endif diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c index a0f6f9b0d258..622cdf1ad246 100644 --- a/drivers/gpu/drm/tegra/dpaux.c +++ b/drivers/gpu/drm/tegra/dpaux.c @@ -9,6 +9,7 @@ #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> +#include <linux/of_device.h> #include <linux/of_gpio.h> #include <linux/pinctrl/pinconf-generic.h> #include <linux/pinctrl/pinctrl.h> @@ -22,6 +23,7 @@ #include <drm/drm_dp_helper.h> #include <drm/drm_panel.h> +#include "dp.h" #include "dpaux.h" #include "drm.h" #include "trace.h" @@ -29,10 +31,18 @@ static DEFINE_MUTEX(dpaux_lock); static LIST_HEAD(dpaux_list); +struct tegra_dpaux_soc { + unsigned int cmh; + unsigned int drvz; + unsigned int drvi; +}; + struct tegra_dpaux { struct drm_dp_aux aux; struct device *dev; + const struct tegra_dpaux_soc *soc; + void __iomem *regs; int irq; @@ -120,6 +130,7 @@ static ssize_t tegra_dpaux_transfer(struct drm_dp_aux *aux, struct tegra_dpaux *dpaux = to_dpaux(aux); unsigned long status; ssize_t ret = 0; + u8 reply = 0; u32 value; /* Tegra has 4x4 byte DP AUX transmit and receive FIFOs. */ @@ -214,23 +225,23 @@ static ssize_t tegra_dpaux_transfer(struct drm_dp_aux *aux, switch ((value & DPAUX_DP_AUXSTAT_REPLY_TYPE_MASK) >> 16) { case 0x00: - msg->reply = DP_AUX_NATIVE_REPLY_ACK; + reply = DP_AUX_NATIVE_REPLY_ACK; break; case 0x01: - msg->reply = DP_AUX_NATIVE_REPLY_NACK; + reply = DP_AUX_NATIVE_REPLY_NACK; break; case 0x02: - msg->reply = DP_AUX_NATIVE_REPLY_DEFER; + reply = DP_AUX_NATIVE_REPLY_DEFER; break; case 0x04: - msg->reply = DP_AUX_I2C_REPLY_NACK; + reply = DP_AUX_I2C_REPLY_NACK; break; case 0x08: - msg->reply = DP_AUX_I2C_REPLY_DEFER; + reply = DP_AUX_I2C_REPLY_DEFER; break; } @@ -238,14 +249,24 @@ static ssize_t tegra_dpaux_transfer(struct drm_dp_aux *aux, if (msg->request & DP_AUX_I2C_READ) { size_t count = value & DPAUX_DP_AUXSTAT_REPLY_MASK; - if (WARN_ON(count != msg->size)) - count = min_t(size_t, count, msg->size); + /* + * There might be a smarter way to do this, but since + * the DP helpers will already retry transactions for + * an -EBUSY return value, simply reuse that instead. + */ + if (count != msg->size) { + ret = -EBUSY; + goto out; + } tegra_dpaux_read_fifo(dpaux, msg->buffer, count); ret = count; } } + msg->reply = reply; + +out: return ret; } @@ -310,9 +331,9 @@ static int tegra_dpaux_pad_config(struct tegra_dpaux *dpaux, unsigned function) switch (function) { case DPAUX_PADCTL_FUNC_AUX: - value = DPAUX_HYBRID_PADCTL_AUX_CMH(2) | - DPAUX_HYBRID_PADCTL_AUX_DRVZ(4) | - DPAUX_HYBRID_PADCTL_AUX_DRVI(0x18) | + value = DPAUX_HYBRID_PADCTL_AUX_CMH(dpaux->soc->cmh) | + DPAUX_HYBRID_PADCTL_AUX_DRVZ(dpaux->soc->drvz) | + DPAUX_HYBRID_PADCTL_AUX_DRVI(dpaux->soc->drvi) | DPAUX_HYBRID_PADCTL_AUX_INPUT_RCV | DPAUX_HYBRID_PADCTL_MODE_AUX; break; @@ -320,9 +341,9 @@ static int tegra_dpaux_pad_config(struct tegra_dpaux *dpaux, unsigned function) case DPAUX_PADCTL_FUNC_I2C: value = DPAUX_HYBRID_PADCTL_I2C_SDA_INPUT_RCV | DPAUX_HYBRID_PADCTL_I2C_SCL_INPUT_RCV | - DPAUX_HYBRID_PADCTL_AUX_CMH(2) | - DPAUX_HYBRID_PADCTL_AUX_DRVZ(4) | - DPAUX_HYBRID_PADCTL_AUX_DRVI(0x18) | + DPAUX_HYBRID_PADCTL_AUX_CMH(dpaux->soc->cmh) | + DPAUX_HYBRID_PADCTL_AUX_DRVZ(dpaux->soc->drvz) | + DPAUX_HYBRID_PADCTL_AUX_DRVI(dpaux->soc->drvi) | DPAUX_HYBRID_PADCTL_MODE_I2C; break; @@ -436,6 +457,7 @@ static int tegra_dpaux_probe(struct platform_device *pdev) if (!dpaux) return -ENOMEM; + dpaux->soc = of_device_get_match_data(&pdev->dev); INIT_WORK(&dpaux->work, tegra_dpaux_hotplug); init_completion(&dpaux->complete); INIT_LIST_HEAD(&dpaux->list); @@ -493,6 +515,8 @@ static int tegra_dpaux_probe(struct platform_device *pdev) return PTR_ERR(dpaux->vdd); } + + dpaux->vdd = NULL; } platform_set_drvdata(pdev, dpaux); @@ -641,11 +665,29 @@ static const struct dev_pm_ops tegra_dpaux_pm_ops = { SET_RUNTIME_PM_OPS(tegra_dpaux_suspend, tegra_dpaux_resume, NULL) }; +static const struct tegra_dpaux_soc tegra124_dpaux_soc = { + .cmh = 0x02, + .drvz = 0x04, + .drvi = 0x18, +}; + +static const struct tegra_dpaux_soc tegra210_dpaux_soc = { + .cmh = 0x02, + .drvz = 0x04, + .drvi = 0x30, +}; + +static const struct tegra_dpaux_soc tegra194_dpaux_soc = { + .cmh = 0x02, + .drvz = 0x04, + .drvi = 0x2c, +}; + static const struct of_device_id tegra_dpaux_of_match[] = { - { .compatible = "nvidia,tegra194-dpaux", }, - { .compatible = "nvidia,tegra186-dpaux", }, - { .compatible = "nvidia,tegra210-dpaux", }, - { .compatible = "nvidia,tegra124-dpaux", }, + { .compatible = "nvidia,tegra194-dpaux", .data = &tegra194_dpaux_soc }, + { .compatible = "nvidia,tegra186-dpaux", .data = &tegra210_dpaux_soc }, + { .compatible = "nvidia,tegra210-dpaux", .data = &tegra210_dpaux_soc }, + { .compatible = "nvidia,tegra124-dpaux", .data = &tegra124_dpaux_soc }, { }, }; MODULE_DEVICE_TABLE(of, tegra_dpaux_of_match); @@ -686,25 +728,32 @@ int drm_dp_aux_attach(struct drm_dp_aux *aux, struct tegra_output *output) output->connector.polled = DRM_CONNECTOR_POLL_HPD; dpaux->output = output; - err = regulator_enable(dpaux->vdd); - if (err < 0) - return err; + if (output->panel) { + enum drm_connector_status status; - timeout = jiffies + msecs_to_jiffies(250); + if (dpaux->vdd) { + err = regulator_enable(dpaux->vdd); + if (err < 0) + return err; + } - while (time_before(jiffies, timeout)) { - enum drm_connector_status status; + timeout = jiffies + msecs_to_jiffies(250); + + while (time_before(jiffies, timeout)) { + status = drm_dp_aux_detect(aux); + + if (status == connector_status_connected) + break; - status = drm_dp_aux_detect(aux); - if (status == connector_status_connected) { - enable_irq(dpaux->irq); - return 0; + usleep_range(1000, 2000); } - usleep_range(1000, 2000); + if (status != connector_status_connected) + return -ETIMEDOUT; } - return -ETIMEDOUT; + enable_irq(dpaux->irq); + return 0; } int drm_dp_aux_detach(struct drm_dp_aux *aux) @@ -715,25 +764,33 @@ int drm_dp_aux_detach(struct drm_dp_aux *aux) disable_irq(dpaux->irq); - err = regulator_disable(dpaux->vdd); - if (err < 0) - return err; + if (dpaux->output->panel) { + enum drm_connector_status status; - timeout = jiffies + msecs_to_jiffies(250); + if (dpaux->vdd) { + err = regulator_disable(dpaux->vdd); + if (err < 0) + return err; + } - while (time_before(jiffies, timeout)) { - enum drm_connector_status status; + timeout = jiffies + msecs_to_jiffies(250); + + while (time_before(jiffies, timeout)) { + status = drm_dp_aux_detect(aux); + + if (status == connector_status_disconnected) + break; - status = drm_dp_aux_detect(aux); - if (status == connector_status_disconnected) { - dpaux->output = NULL; - return 0; + usleep_range(1000, 2000); } - usleep_range(1000, 2000); + if (status != connector_status_disconnected) + return -ETIMEDOUT; + + dpaux->output = NULL; } - return -ETIMEDOUT; + return 0; } enum drm_connector_status drm_dp_aux_detect(struct drm_dp_aux *aux) @@ -764,72 +821,3 @@ int drm_dp_aux_disable(struct drm_dp_aux *aux) return 0; } - -int drm_dp_aux_prepare(struct drm_dp_aux *aux, u8 encoding) -{ - int err; - - err = drm_dp_dpcd_writeb(aux, DP_MAIN_LINK_CHANNEL_CODING_SET, - encoding); - if (err < 0) - return err; - - return 0; -} - -int drm_dp_aux_train(struct drm_dp_aux *aux, struct drm_dp_link *link, - u8 pattern) -{ - u8 tp = pattern & DP_TRAINING_PATTERN_MASK; - u8 status[DP_LINK_STATUS_SIZE], values[4]; - unsigned int i; - int err; - - err = drm_dp_dpcd_writeb(aux, DP_TRAINING_PATTERN_SET, pattern); - if (err < 0) - return err; - - if (tp == DP_TRAINING_PATTERN_DISABLE) - return 0; - - for (i = 0; i < link->num_lanes; i++) - values[i] = DP_TRAIN_MAX_PRE_EMPHASIS_REACHED | - DP_TRAIN_PRE_EMPH_LEVEL_0 | - DP_TRAIN_MAX_SWING_REACHED | - DP_TRAIN_VOLTAGE_SWING_LEVEL_0; - - err = drm_dp_dpcd_write(aux, DP_TRAINING_LANE0_SET, values, - link->num_lanes); - if (err < 0) - return err; - - usleep_range(500, 1000); - - err = drm_dp_dpcd_read_link_status(aux, status); - if (err < 0) - return err; - - switch (tp) { - case DP_TRAINING_PATTERN_1: - if (!drm_dp_clock_recovery_ok(status, link->num_lanes)) - return -EAGAIN; - - break; - - case DP_TRAINING_PATTERN_2: - if (!drm_dp_channel_eq_ok(status, link->num_lanes)) - return -EAGAIN; - - break; - - default: - dev_err(aux->dev, "unsupported training pattern %u\n", tp); - return -EINVAL; - } - - err = drm_dp_dpcd_writeb(aux, DP_EDP_CONFIGURATION_SET, 0); - if (err < 0) - return err; - - return 0; -} diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 6fb7d74ff553..56e5e7a5c108 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -20,10 +20,6 @@ #include <drm/drm_prime.h> #include <drm/drm_vblank.h> -#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) -#include <asm/dma-iommu.h> -#endif - #include "drm.h" #include "gem.h" @@ -86,168 +82,6 @@ tegra_drm_mode_config_helpers = { .atomic_commit_tail = tegra_atomic_commit_tail, }; -static int tegra_drm_load(struct drm_device *drm, unsigned long flags) -{ - struct host1x_device *device = to_host1x_device(drm->dev); - struct tegra_drm *tegra; - int err; - - tegra = kzalloc(sizeof(*tegra), GFP_KERNEL); - if (!tegra) - return -ENOMEM; - - if (iommu_present(&platform_bus_type)) { - tegra->domain = iommu_domain_alloc(&platform_bus_type); - if (!tegra->domain) { - err = -ENOMEM; - goto free; - } - - err = iova_cache_get(); - if (err < 0) - goto domain; - } - - mutex_init(&tegra->clients_lock); - INIT_LIST_HEAD(&tegra->clients); - - drm->dev_private = tegra; - tegra->drm = drm; - - drm_mode_config_init(drm); - - drm->mode_config.min_width = 0; - drm->mode_config.min_height = 0; - - drm->mode_config.max_width = 4096; - drm->mode_config.max_height = 4096; - - drm->mode_config.allow_fb_modifiers = true; - - drm->mode_config.normalize_zpos = true; - - drm->mode_config.funcs = &tegra_drm_mode_config_funcs; - drm->mode_config.helper_private = &tegra_drm_mode_config_helpers; - - err = tegra_drm_fb_prepare(drm); - if (err < 0) - goto config; - - drm_kms_helper_poll_init(drm); - - err = host1x_device_init(device); - if (err < 0) - goto fbdev; - - if (tegra->domain) { - u64 carveout_start, carveout_end, gem_start, gem_end; - u64 dma_mask = dma_get_mask(&device->dev); - dma_addr_t start, end; - unsigned long order; - - start = tegra->domain->geometry.aperture_start & dma_mask; - end = tegra->domain->geometry.aperture_end & dma_mask; - - gem_start = start; - gem_end = end - CARVEOUT_SZ; - carveout_start = gem_end + 1; - carveout_end = end; - - order = __ffs(tegra->domain->pgsize_bitmap); - init_iova_domain(&tegra->carveout.domain, 1UL << order, - carveout_start >> order); - - tegra->carveout.shift = iova_shift(&tegra->carveout.domain); - tegra->carveout.limit = carveout_end >> tegra->carveout.shift; - - drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1); - mutex_init(&tegra->mm_lock); - - DRM_DEBUG("IOMMU apertures:\n"); - DRM_DEBUG(" GEM: %#llx-%#llx\n", gem_start, gem_end); - DRM_DEBUG(" Carveout: %#llx-%#llx\n", carveout_start, - carveout_end); - } - - if (tegra->hub) { - err = tegra_display_hub_prepare(tegra->hub); - if (err < 0) - goto device; - } - - /* - * We don't use the drm_irq_install() helpers provided by the DRM - * core, so we need to set this manually in order to allow the - * DRM_IOCTL_WAIT_VBLANK to operate correctly. - */ - drm->irq_enabled = true; - - /* syncpoints are used for full 32-bit hardware VBLANK counters */ - drm->max_vblank_count = 0xffffffff; - - err = drm_vblank_init(drm, drm->mode_config.num_crtc); - if (err < 0) - goto hub; - - drm_mode_config_reset(drm); - - err = tegra_drm_fb_init(drm); - if (err < 0) - goto hub; - - return 0; - -hub: - if (tegra->hub) - tegra_display_hub_cleanup(tegra->hub); -device: - host1x_device_exit(device); -fbdev: - drm_kms_helper_poll_fini(drm); - tegra_drm_fb_free(drm); -config: - drm_mode_config_cleanup(drm); - - if (tegra->domain) { - mutex_destroy(&tegra->mm_lock); - drm_mm_takedown(&tegra->mm); - put_iova_domain(&tegra->carveout.domain); - iova_cache_put(); - } -domain: - if (tegra->domain) - iommu_domain_free(tegra->domain); -free: - kfree(tegra); - return err; -} - -static void tegra_drm_unload(struct drm_device *drm) -{ - struct host1x_device *device = to_host1x_device(drm->dev); - struct tegra_drm *tegra = drm->dev_private; - int err; - - drm_kms_helper_poll_fini(drm); - tegra_drm_fb_exit(drm); - drm_atomic_helper_shutdown(drm); - drm_mode_config_cleanup(drm); - - err = host1x_device_exit(device); - if (err < 0) - return; - - if (tegra->domain) { - mutex_destroy(&tegra->mm_lock); - drm_mm_takedown(&tegra->mm); - put_iova_domain(&tegra->carveout.domain); - iova_cache_put(); - iommu_domain_free(tegra->domain); - } - - kfree(tegra); -} - static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp) { struct tegra_drm_file *fpriv; @@ -311,6 +145,8 @@ static int host1x_reloc_copy_from_user(struct host1x_reloc *dest, if (err < 0) return err; + dest->flags = HOST1X_RELOC_READ | HOST1X_RELOC_WRITE; + dest->cmdbuf.bo = host1x_bo_lookup(file, cmdbuf); if (!dest->cmdbuf.bo) return -ENOENT; @@ -1014,8 +850,6 @@ static int tegra_debugfs_init(struct drm_minor *minor) static struct drm_driver tegra_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC | DRIVER_RENDER, - .load = tegra_drm_load, - .unload = tegra_drm_unload, .open = tegra_drm_open, .postclose = tegra_drm_postclose, .lastclose = drm_fb_helper_lastclose, @@ -1068,57 +902,63 @@ int tegra_drm_unregister_client(struct tegra_drm *tegra, return 0; } -struct iommu_group *host1x_client_iommu_attach(struct host1x_client *client, - bool shared) +int host1x_client_iommu_attach(struct host1x_client *client) { + struct iommu_domain *domain = iommu_get_domain_for_dev(client->dev); struct drm_device *drm = dev_get_drvdata(client->parent); struct tegra_drm *tegra = drm->dev_private; struct iommu_group *group = NULL; int err; + /* + * If the host1x client is already attached to an IOMMU domain that is + * not the shared IOMMU domain, don't try to attach it to a different + * domain. This allows using the IOMMU-backed DMA API. + */ + if (domain && domain != tegra->domain) + return 0; + if (tegra->domain) { group = iommu_group_get(client->dev); if (!group) { dev_err(client->dev, "failed to get IOMMU group\n"); - return ERR_PTR(-ENODEV); + return -ENODEV; } - if (!shared || (shared && (group != tegra->group))) { -#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) - if (client->dev->archdata.mapping) { - struct dma_iommu_mapping *mapping = - to_dma_iommu_mapping(client->dev); - arm_iommu_detach_device(client->dev); - arm_iommu_release_mapping(mapping); - } -#endif + if (domain != tegra->domain) { err = iommu_attach_group(tegra->domain, group); if (err < 0) { iommu_group_put(group); - return ERR_PTR(err); + return err; } - - if (shared && !tegra->group) - tegra->group = group; } + + tegra->use_explicit_iommu = true; } - return group; + client->group = group; + + return 0; } -void host1x_client_iommu_detach(struct host1x_client *client, - struct iommu_group *group) +void host1x_client_iommu_detach(struct host1x_client *client) { struct drm_device *drm = dev_get_drvdata(client->parent); struct tegra_drm *tegra = drm->dev_private; + struct iommu_domain *domain; - if (group) { - if (group == tegra->group) { - iommu_detach_group(tegra->domain, group); - tegra->group = NULL; - } + if (client->group) { + /* + * Devices that are part of the same group may no longer be + * attached to a domain at this point because their group may + * have been detached by an earlier client. + */ + domain = iommu_get_domain_for_dev(client->dev); + if (domain) + iommu_detach_group(tegra->domain, client->group); - iommu_group_put(group); + iommu_group_put(client->group); + client->group = NULL; } } @@ -1202,6 +1042,8 @@ void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt, static int host1x_drm_probe(struct host1x_device *dev) { struct drm_driver *driver = &tegra_drm_driver; + struct iommu_domain *domain; + struct tegra_drm *tegra; struct drm_device *drm; int err; @@ -1209,18 +1051,180 @@ static int host1x_drm_probe(struct host1x_device *dev) if (IS_ERR(drm)) return PTR_ERR(drm); + tegra = kzalloc(sizeof(*tegra), GFP_KERNEL); + if (!tegra) { + err = -ENOMEM; + goto put; + } + + /* + * If the Tegra DRM clients are backed by an IOMMU, push buffers are + * likely to be allocated beyond the 32-bit boundary if sufficient + * system memory is available. This is problematic on earlier Tegra + * generations where host1x supports a maximum of 32 address bits in + * the GATHER opcode. In this case, unless host1x is behind an IOMMU + * as well it won't be able to process buffers allocated beyond the + * 32-bit boundary. + * + * The DMA API will use bounce buffers in this case, so that could + * perhaps still be made to work, even if less efficient, but there + * is another catch: in order to perform cache maintenance on pages + * allocated for discontiguous buffers we need to map and unmap the + * SG table representing these buffers. This is fine for something + * small like a push buffer, but it exhausts the bounce buffer pool + * (typically on the order of a few MiB) for framebuffers (many MiB + * for any modern resolution). + * + * Work around this by making sure that Tegra DRM clients only use + * an IOMMU if the parent host1x also uses an IOMMU. + * + * Note that there's still a small gap here that we don't cover: if + * the DMA API is backed by an IOMMU there's no way to control which + * device is attached to an IOMMU and which isn't, except via wiring + * up the device tree appropriately. This is considered an problem + * of integration, so care must be taken for the DT to be consistent. + */ + domain = iommu_get_domain_for_dev(drm->dev->parent); + + if (domain && iommu_present(&platform_bus_type)) { + tegra->domain = iommu_domain_alloc(&platform_bus_type); + if (!tegra->domain) { + err = -ENOMEM; + goto free; + } + + err = iova_cache_get(); + if (err < 0) + goto domain; + } + + mutex_init(&tegra->clients_lock); + INIT_LIST_HEAD(&tegra->clients); + dev_set_drvdata(&dev->dev, drm); + drm->dev_private = tegra; + tegra->drm = drm; + + drm_mode_config_init(drm); - err = drm_fb_helper_remove_conflicting_framebuffers(NULL, "tegradrmfb", false); + drm->mode_config.min_width = 0; + drm->mode_config.min_height = 0; + + drm->mode_config.max_width = 4096; + drm->mode_config.max_height = 4096; + + drm->mode_config.allow_fb_modifiers = true; + + drm->mode_config.normalize_zpos = true; + + drm->mode_config.funcs = &tegra_drm_mode_config_funcs; + drm->mode_config.helper_private = &tegra_drm_mode_config_helpers; + + err = tegra_drm_fb_prepare(drm); if (err < 0) - goto put; + goto config; + + drm_kms_helper_poll_init(drm); + + err = host1x_device_init(dev); + if (err < 0) + goto fbdev; + + if (tegra->use_explicit_iommu) { + u64 carveout_start, carveout_end, gem_start, gem_end; + u64 dma_mask = dma_get_mask(&dev->dev); + dma_addr_t start, end; + unsigned long order; + + start = tegra->domain->geometry.aperture_start & dma_mask; + end = tegra->domain->geometry.aperture_end & dma_mask; + + gem_start = start; + gem_end = end - CARVEOUT_SZ; + carveout_start = gem_end + 1; + carveout_end = end; + + order = __ffs(tegra->domain->pgsize_bitmap); + init_iova_domain(&tegra->carveout.domain, 1UL << order, + carveout_start >> order); + + tegra->carveout.shift = iova_shift(&tegra->carveout.domain); + tegra->carveout.limit = carveout_end >> tegra->carveout.shift; + + drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1); + mutex_init(&tegra->mm_lock); + + DRM_DEBUG_DRIVER("IOMMU apertures:\n"); + DRM_DEBUG_DRIVER(" GEM: %#llx-%#llx\n", gem_start, gem_end); + DRM_DEBUG_DRIVER(" Carveout: %#llx-%#llx\n", carveout_start, + carveout_end); + } else if (tegra->domain) { + iommu_domain_free(tegra->domain); + tegra->domain = NULL; + iova_cache_put(); + } + + if (tegra->hub) { + err = tegra_display_hub_prepare(tegra->hub); + if (err < 0) + goto device; + } + + /* + * We don't use the drm_irq_install() helpers provided by the DRM + * core, so we need to set this manually in order to allow the + * DRM_IOCTL_WAIT_VBLANK to operate correctly. + */ + drm->irq_enabled = true; + + /* syncpoints are used for full 32-bit hardware VBLANK counters */ + drm->max_vblank_count = 0xffffffff; + + err = drm_vblank_init(drm, drm->mode_config.num_crtc); + if (err < 0) + goto hub; + + drm_mode_config_reset(drm); + + err = drm_fb_helper_remove_conflicting_framebuffers(NULL, "tegradrmfb", + false); + if (err < 0) + goto hub; + + err = tegra_drm_fb_init(drm); + if (err < 0) + goto hub; err = drm_dev_register(drm, 0); if (err < 0) - goto put; + goto fb; return 0; +fb: + tegra_drm_fb_exit(drm); +hub: + if (tegra->hub) + tegra_display_hub_cleanup(tegra->hub); +device: + if (tegra->domain) { + mutex_destroy(&tegra->mm_lock); + drm_mm_takedown(&tegra->mm); + put_iova_domain(&tegra->carveout.domain); + iova_cache_put(); + } + + host1x_device_exit(dev); +fbdev: + drm_kms_helper_poll_fini(drm); + tegra_drm_fb_free(drm); +config: + drm_mode_config_cleanup(drm); +domain: + if (tegra->domain) + iommu_domain_free(tegra->domain); +free: + kfree(tegra); put: drm_dev_put(drm); return err; @@ -1229,8 +1233,29 @@ put: static int host1x_drm_remove(struct host1x_device *dev) { struct drm_device *drm = dev_get_drvdata(&dev->dev); + struct tegra_drm *tegra = drm->dev_private; + int err; drm_dev_unregister(drm); + + drm_kms_helper_poll_fini(drm); + tegra_drm_fb_exit(drm); + drm_atomic_helper_shutdown(drm); + drm_mode_config_cleanup(drm); + + err = host1x_device_exit(dev); + if (err < 0) + dev_err(&dev->dev, "host1x device cleanup failed: %d\n", err); + + if (tegra->domain) { + mutex_destroy(&tegra->mm_lock); + drm_mm_takedown(&tegra->mm); + put_iova_domain(&tegra->carveout.domain); + iova_cache_put(); + iommu_domain_free(tegra->domain); + } + + kfree(tegra); drm_dev_put(drm); return 0; diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h index 29911eff9ceb..d941553f7a3d 100644 --- a/drivers/gpu/drm/tegra/drm.h +++ b/drivers/gpu/drm/tegra/drm.h @@ -36,7 +36,7 @@ struct tegra_drm { struct drm_device *drm; struct iommu_domain *domain; - struct iommu_group *group; + bool use_explicit_iommu; struct mutex mm_lock; struct drm_mm mm; @@ -100,10 +100,8 @@ int tegra_drm_register_client(struct tegra_drm *tegra, struct tegra_drm_client *client); int tegra_drm_unregister_client(struct tegra_drm *tegra, struct tegra_drm_client *client); -struct iommu_group *host1x_client_iommu_attach(struct host1x_client *client, - bool shared); -void host1x_client_iommu_detach(struct host1x_client *client, - struct iommu_group *group); +int host1x_client_iommu_attach(struct host1x_client *client); +void host1x_client_iommu_detach(struct host1x_client *client); int tegra_drm_init(struct tegra_drm *tegra, struct drm_device *drm); int tegra_drm_exit(struct tegra_drm *tegra); @@ -155,17 +153,12 @@ void tegra_output_connector_destroy(struct drm_connector *connector); void tegra_output_encoder_destroy(struct drm_encoder *encoder); /* from dpaux.c */ -struct drm_dp_link; - struct drm_dp_aux *drm_dp_aux_find_by_of_node(struct device_node *np); enum drm_connector_status drm_dp_aux_detect(struct drm_dp_aux *aux); int drm_dp_aux_attach(struct drm_dp_aux *aux, struct tegra_output *output); int drm_dp_aux_detach(struct drm_dp_aux *aux); int drm_dp_aux_enable(struct drm_dp_aux *aux); int drm_dp_aux_disable(struct drm_dp_aux *aux); -int drm_dp_aux_prepare(struct drm_dp_aux *aux, u8 encoding); -int drm_dp_aux_train(struct drm_dp_aux *aux, struct drm_dp_link *link, - u8 pattern); /* from fb.c */ struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer, diff --git a/drivers/gpu/drm/tegra/falcon.c b/drivers/gpu/drm/tegra/falcon.c index f49ad36e24db..56edef06c48e 100644 --- a/drivers/gpu/drm/tegra/falcon.c +++ b/drivers/gpu/drm/tegra/falcon.c @@ -58,32 +58,17 @@ static int falcon_copy_chunk(struct falcon *falcon, static void falcon_copy_firmware_image(struct falcon *falcon, const struct firmware *firmware) { - u32 *firmware_vaddr = falcon->firmware.vaddr; - dma_addr_t daddr; + u32 *virt = falcon->firmware.virt; size_t i; - int err; /* copy the whole thing taking into account endianness */ for (i = 0; i < firmware->size / sizeof(u32); i++) - firmware_vaddr[i] = le32_to_cpu(((u32 *)firmware->data)[i]); - - /* ensure that caches are flushed and falcon can see the firmware */ - daddr = dma_map_single(falcon->dev, firmware_vaddr, - falcon->firmware.size, DMA_TO_DEVICE); - err = dma_mapping_error(falcon->dev, daddr); - if (err) { - dev_err(falcon->dev, "failed to map firmware: %d\n", err); - return; - } - dma_sync_single_for_device(falcon->dev, daddr, - falcon->firmware.size, DMA_TO_DEVICE); - dma_unmap_single(falcon->dev, daddr, falcon->firmware.size, - DMA_TO_DEVICE); + virt[i] = le32_to_cpu(((u32 *)firmware->data)[i]); } static int falcon_parse_firmware_image(struct falcon *falcon) { - struct falcon_fw_bin_header_v1 *bin = (void *)falcon->firmware.vaddr; + struct falcon_fw_bin_header_v1 *bin = (void *)falcon->firmware.virt; struct falcon_fw_os_header_v1 *os; /* endian problems would show up right here */ @@ -104,7 +89,7 @@ static int falcon_parse_firmware_image(struct falcon *falcon) return -EINVAL; } - os = falcon->firmware.vaddr + bin->os_header_offset; + os = falcon->firmware.virt + bin->os_header_offset; falcon->firmware.bin_data.size = bin->os_size; falcon->firmware.bin_data.offset = bin->os_data_offset; @@ -125,6 +110,8 @@ int falcon_read_firmware(struct falcon *falcon, const char *name) if (err < 0) return err; + falcon->firmware.size = falcon->firmware.firmware->size; + return 0; } @@ -133,16 +120,6 @@ int falcon_load_firmware(struct falcon *falcon) const struct firmware *firmware = falcon->firmware.firmware; int err; - falcon->firmware.size = firmware->size; - - /* allocate iova space for the firmware */ - falcon->firmware.vaddr = falcon->ops->alloc(falcon, firmware->size, - &falcon->firmware.paddr); - if (IS_ERR(falcon->firmware.vaddr)) { - dev_err(falcon->dev, "DMA memory mapping failed\n"); - return PTR_ERR(falcon->firmware.vaddr); - } - /* copy firmware image into local area. this also ensures endianness */ falcon_copy_firmware_image(falcon, firmware); @@ -150,45 +127,26 @@ int falcon_load_firmware(struct falcon *falcon) err = falcon_parse_firmware_image(falcon); if (err < 0) { dev_err(falcon->dev, "failed to parse firmware image\n"); - goto err_setup_firmware_image; + return err; } release_firmware(firmware); falcon->firmware.firmware = NULL; return 0; - -err_setup_firmware_image: - falcon->ops->free(falcon, falcon->firmware.size, - falcon->firmware.paddr, falcon->firmware.vaddr); - - return err; } int falcon_init(struct falcon *falcon) { - /* check mandatory ops */ - if (!falcon->ops || !falcon->ops->alloc || !falcon->ops->free) - return -EINVAL; - - falcon->firmware.vaddr = NULL; + falcon->firmware.virt = NULL; return 0; } void falcon_exit(struct falcon *falcon) { - if (falcon->firmware.firmware) { + if (falcon->firmware.firmware) release_firmware(falcon->firmware.firmware); - falcon->firmware.firmware = NULL; - } - - if (falcon->firmware.vaddr) { - falcon->ops->free(falcon, falcon->firmware.size, - falcon->firmware.paddr, - falcon->firmware.vaddr); - falcon->firmware.vaddr = NULL; - } } int falcon_boot(struct falcon *falcon) @@ -197,7 +155,7 @@ int falcon_boot(struct falcon *falcon) u32 value; int err; - if (!falcon->firmware.vaddr) + if (!falcon->firmware.virt) return -EINVAL; err = readl_poll_timeout(falcon->regs + FALCON_DMACTL, value, @@ -210,7 +168,7 @@ int falcon_boot(struct falcon *falcon) falcon_writel(falcon, 0, FALCON_DMACTL); /* setup the address of the binary data so Falcon can access it later */ - falcon_writel(falcon, (falcon->firmware.paddr + + falcon_writel(falcon, (falcon->firmware.iova + falcon->firmware.bin_data.offset) >> 8, FALCON_DMATRFBASE); diff --git a/drivers/gpu/drm/tegra/falcon.h b/drivers/gpu/drm/tegra/falcon.h index 3d1243217410..c56ee32d92ee 100644 --- a/drivers/gpu/drm/tegra/falcon.h +++ b/drivers/gpu/drm/tegra/falcon.h @@ -74,15 +74,6 @@ struct falcon_fw_os_header_v1 { u32 data_size; }; -struct falcon; - -struct falcon_ops { - void *(*alloc)(struct falcon *falcon, size_t size, - dma_addr_t *paddr); - void (*free)(struct falcon *falcon, size_t size, - dma_addr_t paddr, void *vaddr); -}; - struct falcon_firmware_section { unsigned long offset; size_t size; @@ -93,8 +84,9 @@ struct falcon_firmware { const struct firmware *firmware; /* Raw firmware data */ - dma_addr_t paddr; - void *vaddr; + dma_addr_t iova; + dma_addr_t phys; + void *virt; size_t size; /* Parsed firmware information */ @@ -107,8 +99,6 @@ struct falcon { /* Set by falcon client */ struct device *dev; void __iomem *regs; - const struct falcon_ops *ops; - void *data; struct falcon_firmware firmware; }; diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c index e34325c83d28..7cea89f29a5c 100644 --- a/drivers/gpu/drm/tegra/fb.c +++ b/drivers/gpu/drm/tegra/fb.c @@ -269,10 +269,10 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper, } } - drm->mode_config.fb_base = (resource_size_t)bo->paddr; + drm->mode_config.fb_base = (resource_size_t)bo->iova; info->screen_base = (void __iomem *)bo->vaddr + offset; info->screen_size = size; - info->fix.smem_start = (unsigned long)(bo->paddr + offset); + info->fix.smem_start = (unsigned long)(bo->iova + offset); info->fix.smem_len = size; return 0; diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c index fb7667c8dd4c..746dae32c484 100644 --- a/drivers/gpu/drm/tegra/gem.c +++ b/drivers/gpu/drm/tegra/gem.c @@ -27,17 +27,55 @@ static void tegra_bo_put(struct host1x_bo *bo) drm_gem_object_put_unlocked(&obj->gem); } -static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt) +static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo, + dma_addr_t *phys) { struct tegra_bo *obj = host1x_to_tegra_bo(bo); + struct sg_table *sgt; + int err; + + /* + * If we've manually mapped the buffer object through the IOMMU, make + * sure to return the IOVA address of our mapping. + */ + if (phys && obj->mm) { + *phys = obj->iova; + return NULL; + } + + /* + * If we don't have a mapping for this buffer yet, return an SG table + * so that host1x can do the mapping for us via the DMA API. + */ + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) + return ERR_PTR(-ENOMEM); - *sgt = obj->sgt; + if (obj->pages) { + err = sg_alloc_table_from_pages(sgt, obj->pages, obj->num_pages, + 0, obj->gem.size, GFP_KERNEL); + if (err < 0) + goto free; + } else { + err = dma_get_sgtable(dev, sgt, obj->vaddr, obj->iova, + obj->gem.size); + if (err < 0) + goto free; + } - return obj->paddr; + return sgt; + +free: + kfree(sgt); + return ERR_PTR(err); } -static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt) +static void tegra_bo_unpin(struct device *dev, struct sg_table *sgt) { + if (sgt) { + sg_free_table(sgt); + kfree(sgt); + } } static void *tegra_bo_mmap(struct host1x_bo *bo) @@ -133,9 +171,9 @@ static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo) goto unlock; } - bo->paddr = bo->mm->start; + bo->iova = bo->mm->start; - bo->size = iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl, + bo->size = iommu_map_sg(tegra->domain, bo->iova, bo->sgt->sgl, bo->sgt->nents, prot); if (!bo->size) { dev_err(tegra->drm->dev, "failed to map buffer\n"); @@ -161,7 +199,7 @@ static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo) return 0; mutex_lock(&tegra->mm_lock); - iommu_unmap(tegra->domain, bo->paddr, bo->size); + iommu_unmap(tegra->domain, bo->iova, bo->size); drm_mm_remove_node(bo->mm); mutex_unlock(&tegra->mm_lock); @@ -209,7 +247,7 @@ static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo) sg_free_table(bo->sgt); kfree(bo->sgt); } else if (bo->vaddr) { - dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->paddr); + dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->iova); } } @@ -264,7 +302,7 @@ static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo) } else { size_t size = bo->gem.size; - bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->paddr, + bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->iova, GFP_KERNEL | __GFP_NOWARN); if (!bo->vaddr) { dev_err(drm->dev, @@ -365,7 +403,7 @@ static struct tegra_bo *tegra_bo_import(struct drm_device *drm, goto detach; } - bo->paddr = sg_dma_address(bo->sgt->sgl); + bo->iova = sg_dma_address(bo->sgt->sgl); } bo->gem.import_attach = attach; @@ -461,7 +499,7 @@ int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma) vma->vm_flags &= ~VM_PFNMAP; vma->vm_pgoff = 0; - err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->paddr, + err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova, gem->size); if (err < 0) { drm_gem_vm_close(vma); @@ -508,25 +546,18 @@ tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach, return NULL; if (bo->pages) { - struct scatterlist *sg; - unsigned int i; - - if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL)) - goto free; - - for_each_sg(sgt->sgl, sg, bo->num_pages, i) - sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0); - - if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) + if (sg_alloc_table_from_pages(sgt, bo->pages, bo->num_pages, + 0, gem->size, GFP_KERNEL) < 0) goto free; } else { - if (sg_alloc_table(sgt, 1, GFP_KERNEL)) + if (dma_get_sgtable(attach->dev, sgt, bo->vaddr, bo->iova, + gem->size) < 0) goto free; - - sg_dma_address(sgt->sgl) = bo->paddr; - sg_dma_len(sgt->sgl) = gem->size; } + if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) + goto free; + return sgt; free: diff --git a/drivers/gpu/drm/tegra/gem.h b/drivers/gpu/drm/tegra/gem.h index 83ffb1e14ca3..fafb5724499b 100644 --- a/drivers/gpu/drm/tegra/gem.h +++ b/drivers/gpu/drm/tegra/gem.h @@ -31,7 +31,7 @@ struct tegra_bo { struct host1x_bo base; unsigned long flags; struct sg_table *sgt; - dma_addr_t paddr; + dma_addr_t iova; void *vaddr; struct drm_mm_node *mm; diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c index 641299cc85b8..1fc4e56c7cc5 100644 --- a/drivers/gpu/drm/tegra/gr2d.c +++ b/drivers/gpu/drm/tegra/gr2d.c @@ -17,7 +17,6 @@ struct gr2d_soc { }; struct gr2d { - struct iommu_group *group; struct tegra_drm_client client; struct host1x_channel *channel; struct clk *clk; @@ -40,7 +39,7 @@ static int gr2d_init(struct host1x_client *client) struct gr2d *gr2d = to_gr2d(drm); int err; - gr2d->channel = host1x_channel_request(client->dev); + gr2d->channel = host1x_channel_request(client); if (!gr2d->channel) return -ENOMEM; @@ -51,9 +50,8 @@ static int gr2d_init(struct host1x_client *client) goto put; } - gr2d->group = host1x_client_iommu_attach(client, false); - if (IS_ERR(gr2d->group)) { - err = PTR_ERR(gr2d->group); + err = host1x_client_iommu_attach(client); + if (err < 0) { dev_err(client->dev, "failed to attach to domain: %d\n", err); goto free; } @@ -67,7 +65,7 @@ static int gr2d_init(struct host1x_client *client) return 0; detach: - host1x_client_iommu_detach(client, gr2d->group); + host1x_client_iommu_detach(client); free: host1x_syncpt_free(client->syncpts[0]); put: @@ -87,7 +85,7 @@ static int gr2d_exit(struct host1x_client *client) if (err < 0) return err; - host1x_client_iommu_detach(client, gr2d->group); + host1x_client_iommu_detach(client); host1x_syncpt_free(client->syncpts[0]); host1x_channel_put(gr2d->channel); diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c index 8b9a35b1cbb3..24fae0f64032 100644 --- a/drivers/gpu/drm/tegra/gr3d.c +++ b/drivers/gpu/drm/tegra/gr3d.c @@ -23,7 +23,6 @@ struct gr3d_soc { }; struct gr3d { - struct iommu_group *group; struct tegra_drm_client client; struct host1x_channel *channel; struct clk *clk_secondary; @@ -49,7 +48,7 @@ static int gr3d_init(struct host1x_client *client) struct gr3d *gr3d = to_gr3d(drm); int err; - gr3d->channel = host1x_channel_request(client->dev); + gr3d->channel = host1x_channel_request(client); if (!gr3d->channel) return -ENOMEM; @@ -60,9 +59,8 @@ static int gr3d_init(struct host1x_client *client) goto put; } - gr3d->group = host1x_client_iommu_attach(client, false); - if (IS_ERR(gr3d->group)) { - err = PTR_ERR(gr3d->group); + err = host1x_client_iommu_attach(client); + if (err < 0) { dev_err(client->dev, "failed to attach to domain: %d\n", err); goto free; } @@ -76,7 +74,7 @@ static int gr3d_init(struct host1x_client *client) return 0; detach: - host1x_client_iommu_detach(client, gr3d->group); + host1x_client_iommu_detach(client); free: host1x_syncpt_free(client->syncpts[0]); put: @@ -95,7 +93,7 @@ static int gr3d_exit(struct host1x_client *client) if (err < 0) return err; - host1x_client_iommu_detach(client, gr3d->group); + host1x_client_iommu_detach(client); host1x_syncpt_free(client->syncpts[0]); host1x_channel_put(gr3d->channel); diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c index 839b49c40e51..2b4082d0bc9e 100644 --- a/drivers/gpu/drm/tegra/hub.c +++ b/drivers/gpu/drm/tegra/hub.c @@ -413,7 +413,6 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane, unsigned int zpos = plane->state->normalized_zpos; struct drm_framebuffer *fb = plane->state->fb; struct tegra_plane *p = to_tegra_plane(plane); - struct tegra_bo *bo; dma_addr_t base; u32 value; @@ -456,8 +455,7 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane, /* disable compression */ tegra_plane_writel(p, 0, DC_WINBUF_CDE_CONTROL); - bo = tegra_fb_get_plane(fb, 0); - base = bo->paddr; + base = state->iova[0] + fb->offsets[0]; tegra_plane_writel(p, state->format, DC_WIN_COLOR_DEPTH); tegra_plane_writel(p, 0, DC_WIN_PRECOMP_WGRP_PARAMS); @@ -521,6 +519,8 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane, } static const struct drm_plane_helper_funcs tegra_shared_plane_helper_funcs = { + .prepare_fb = tegra_plane_prepare_fb, + .cleanup_fb = tegra_plane_cleanup_fb, .atomic_check = tegra_shared_plane_atomic_check, .atomic_update = tegra_shared_plane_atomic_update, .atomic_disable = tegra_shared_plane_atomic_disable, diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c index bdcaa4c7168c..34373734ff68 100644 --- a/drivers/gpu/drm/tegra/output.c +++ b/drivers/gpu/drm/tegra/output.c @@ -70,6 +70,11 @@ tegra_output_connector_detect(struct drm_connector *connector, bool force) void tegra_output_connector_destroy(struct drm_connector *connector) { + struct tegra_output *output = connector_to_output(connector); + + if (output->cec) + cec_notifier_conn_unregister(output->cec); + drm_connector_unregister(connector); drm_connector_cleanup(connector); } @@ -163,18 +168,11 @@ int tegra_output_probe(struct tegra_output *output) disable_irq(output->hpd_irq); } - output->cec = cec_notifier_get(output->dev); - if (!output->cec) - return -ENOMEM; - return 0; } void tegra_output_remove(struct tegra_output *output) { - if (output->cec) - cec_notifier_put(output->cec); - if (output->hpd_gpio) free_irq(output->hpd_irq, output); @@ -184,6 +182,7 @@ void tegra_output_remove(struct tegra_output *output) int tegra_output_init(struct drm_device *drm, struct tegra_output *output) { + int connector_type; int err; if (output->panel) { @@ -199,6 +198,21 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output) if (output->hpd_gpio) enable_irq(output->hpd_irq); + connector_type = output->connector.connector_type; + /* + * Create a CEC notifier for HDMI connector. + */ + if (connector_type == DRM_MODE_CONNECTOR_HDMIA || + connector_type == DRM_MODE_CONNECTOR_HDMIB) { + struct cec_connector_info conn_info; + + cec_fill_conn_info_from_drm(&conn_info, &output->connector); + output->cec = cec_notifier_conn_register(output->dev, NULL, + &conn_info); + if (!output->cec) + return -ENOMEM; + } + return 0; } diff --git a/drivers/gpu/drm/tegra/plane.c b/drivers/gpu/drm/tegra/plane.c index 6bab71d6e81d..163b590be224 100644 --- a/drivers/gpu/drm/tegra/plane.c +++ b/drivers/gpu/drm/tegra/plane.c @@ -6,6 +6,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_fourcc.h> +#include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_plane_helper.h> #include "dc.h" @@ -23,6 +24,7 @@ static void tegra_plane_reset(struct drm_plane *plane) { struct tegra_plane *p = to_tegra_plane(plane); struct tegra_plane_state *state; + unsigned int i; if (plane->state) __drm_atomic_helper_plane_destroy_state(plane->state); @@ -36,6 +38,9 @@ static void tegra_plane_reset(struct drm_plane *plane) plane->state->plane = plane; plane->state->zpos = p->index; plane->state->normalized_zpos = p->index; + + for (i = 0; i < 3; i++) + state->iova[i] = DMA_MAPPING_ERROR; } } @@ -60,6 +65,11 @@ tegra_plane_atomic_duplicate_state(struct drm_plane *plane) for (i = 0; i < 2; i++) copy->blending[i] = state->blending[i]; + for (i = 0; i < 3; i++) { + copy->iova[i] = DMA_MAPPING_ERROR; + copy->sgt[i] = NULL; + } + return ©->base; } @@ -95,6 +105,100 @@ const struct drm_plane_funcs tegra_plane_funcs = { .format_mod_supported = tegra_plane_format_mod_supported, }; +static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state) +{ + unsigned int i; + int err; + + for (i = 0; i < state->base.fb->format->num_planes; i++) { + struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i); + + if (!dc->client.group) { + struct sg_table *sgt; + + sgt = host1x_bo_pin(dc->dev, &bo->base, NULL); + if (IS_ERR(sgt)) { + err = PTR_ERR(sgt); + goto unpin; + } + + err = dma_map_sg(dc->dev, sgt->sgl, sgt->nents, + DMA_TO_DEVICE); + if (err == 0) { + err = -ENOMEM; + goto unpin; + } + + state->iova[i] = sg_dma_address(sgt->sgl); + state->sgt[i] = sgt; + } else { + state->iova[i] = bo->iova; + } + } + + return 0; + +unpin: + dev_err(dc->dev, "failed to map plane %u: %d\n", i, err); + + while (i--) { + struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i); + struct sg_table *sgt = state->sgt[i]; + + dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE); + host1x_bo_unpin(dc->dev, &bo->base, sgt); + + state->iova[i] = DMA_MAPPING_ERROR; + state->sgt[i] = NULL; + } + + return err; +} + +static void tegra_dc_unpin(struct tegra_dc *dc, struct tegra_plane_state *state) +{ + unsigned int i; + + for (i = 0; i < state->base.fb->format->num_planes; i++) { + struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i); + + if (!dc->client.group) { + struct sg_table *sgt = state->sgt[i]; + + if (sgt) { + dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents, + DMA_TO_DEVICE); + host1x_bo_unpin(dc->dev, &bo->base, sgt); + } + } + + state->iova[i] = DMA_MAPPING_ERROR; + state->sgt[i] = NULL; + } +} + +int tegra_plane_prepare_fb(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct tegra_dc *dc = to_tegra_dc(state->crtc); + + if (!state->fb) + return 0; + + drm_gem_fb_prepare_fb(plane, state); + + return tegra_dc_pin(dc, to_tegra_plane_state(state)); +} + +void tegra_plane_cleanup_fb(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct tegra_dc *dc = to_tegra_dc(state->crtc); + + if (dc) + tegra_dc_unpin(dc, to_tegra_plane_state(state)); +} + int tegra_plane_state_add(struct tegra_plane *plane, struct drm_plane_state *state) { diff --git a/drivers/gpu/drm/tegra/plane.h b/drivers/gpu/drm/tegra/plane.h index 510c394e6d9a..a158a915109a 100644 --- a/drivers/gpu/drm/tegra/plane.h +++ b/drivers/gpu/drm/tegra/plane.h @@ -39,6 +39,9 @@ struct tegra_plane_legacy_blending_state { struct tegra_plane_state { struct drm_plane_state base; + struct sg_table *sgt[3]; + dma_addr_t iova[3]; + struct tegra_bo_tiling tiling; u32 format; u32 swap; @@ -61,6 +64,11 @@ to_tegra_plane_state(struct drm_plane_state *state) extern const struct drm_plane_funcs tegra_plane_funcs; +int tegra_plane_prepare_fb(struct drm_plane *plane, + struct drm_plane_state *state); +void tegra_plane_cleanup_fb(struct drm_plane *plane, + struct drm_plane_state *state); + int tegra_plane_state_add(struct tegra_plane *plane, struct drm_plane_state *state); diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c index e1669ada0a40..615cb319fa8b 100644 --- a/drivers/gpu/drm/tegra/sor.c +++ b/drivers/gpu/drm/tegra/sor.c @@ -25,6 +25,7 @@ #include <drm/drm_scdc_helper.h> #include "dc.h" +#include "dp.h" #include "drm.h" #include "hda.h" #include "sor.h" @@ -370,10 +371,11 @@ struct tegra_sor_regs { }; struct tegra_sor_soc { - bool supports_edp; bool supports_lvds; bool supports_hdmi; bool supports_dp; + bool supports_audio; + bool supports_hdcp; const struct tegra_sor_regs *regs; bool has_nvdisplay; @@ -382,6 +384,12 @@ struct tegra_sor_soc { unsigned int num_settings; const u8 *xbar_cfg; + const u8 *lane_map; + + const u8 (*voltage_swing)[4][4]; + const u8 (*pre_emphasis)[4][4]; + const u8 (*post_cursor)[4][4]; + const u8 (*tx_pu)[4][4]; }; struct tegra_sor; @@ -390,6 +398,8 @@ struct tegra_sor_ops { const char *name; int (*probe)(struct tegra_sor *sor); int (*remove)(struct tegra_sor *sor); + void (*audio_enable)(struct tegra_sor *sor); + void (*audio_disable)(struct tegra_sor *sor); }; struct tegra_sor { @@ -412,6 +422,7 @@ struct tegra_sor { u8 xbar_cfg[5]; + struct drm_dp_link link; struct drm_dp_aux *aux; struct drm_info_list *debugfs_files; @@ -514,10 +525,19 @@ static inline struct tegra_clk_sor_pad *to_pad(struct clk_hw *hw) return container_of(hw, struct tegra_clk_sor_pad, hw); } -static const char * const tegra_clk_sor_pad_parents[] = { - "pll_d2_out0", "pll_dp" +static const char * const tegra_clk_sor_pad_parents[2][2] = { + { "pll_d_out0", "pll_dp" }, + { "pll_d2_out0", "pll_dp" }, }; +/* + * Implementing ->set_parent() here isn't really required because the parent + * will be explicitly selected in the driver code via the DP_CLK_SEL mux in + * the SOR_CLK_CNTRL register. This is primarily for compatibility with the + * Tegra186 and later SoC generations where the BPMP implements this clock + * and doesn't expose the mux via the common clock framework. + */ + static int tegra_clk_sor_pad_set_parent(struct clk_hw *hw, u8 index) { struct tegra_clk_sor_pad *pad = to_pad(hw); @@ -586,8 +606,8 @@ static struct clk *tegra_clk_sor_pad_register(struct tegra_sor *sor, init.name = name; init.flags = 0; - init.parent_names = tegra_clk_sor_pad_parents; - init.num_parents = ARRAY_SIZE(tegra_clk_sor_pad_parents); + init.parent_names = tegra_clk_sor_pad_parents[sor->index]; + init.num_parents = ARRAY_SIZE(tegra_clk_sor_pad_parents[sor->index]); init.ops = &tegra_clk_sor_pad_ops; pad->hw.init = &init; @@ -597,112 +617,340 @@ static struct clk *tegra_clk_sor_pad_register(struct tegra_sor *sor, return clk; } -static int tegra_sor_dp_train_fast(struct tegra_sor *sor, - struct drm_dp_link *link) +static void tegra_sor_filter_rates(struct tegra_sor *sor) { + struct drm_dp_link *link = &sor->link; unsigned int i; - u8 pattern; + + /* Tegra only supports RBR, HBR and HBR2 */ + for (i = 0; i < link->num_rates; i++) { + switch (link->rates[i]) { + case 1620000: + case 2700000: + case 5400000: + break; + + default: + DRM_DEBUG_KMS("link rate %lu kHz not supported\n", + link->rates[i]); + link->rates[i] = 0; + break; + } + } + + drm_dp_link_update_rates(link); +} + +static int tegra_sor_power_up_lanes(struct tegra_sor *sor, unsigned int lanes) +{ + unsigned long timeout; u32 value; - int err; - /* setup lane parameters */ - value = SOR_LANE_DRIVE_CURRENT_LANE3(0x40) | - SOR_LANE_DRIVE_CURRENT_LANE2(0x40) | - SOR_LANE_DRIVE_CURRENT_LANE1(0x40) | - SOR_LANE_DRIVE_CURRENT_LANE0(0x40); - tegra_sor_writel(sor, value, SOR_LANE_DRIVE_CURRENT0); + /* + * Clear or set the PD_TXD bit corresponding to each lane, depending + * on whether it is used or not. + */ + value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0); - value = SOR_LANE_PREEMPHASIS_LANE3(0x0f) | - SOR_LANE_PREEMPHASIS_LANE2(0x0f) | - SOR_LANE_PREEMPHASIS_LANE1(0x0f) | - SOR_LANE_PREEMPHASIS_LANE0(0x0f); - tegra_sor_writel(sor, value, SOR_LANE_PREEMPHASIS0); + if (lanes <= 2) + value &= ~(SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[3]) | + SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[2])); + else + value |= SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[3]) | + SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[2]); - value = SOR_LANE_POSTCURSOR_LANE3(0x00) | - SOR_LANE_POSTCURSOR_LANE2(0x00) | - SOR_LANE_POSTCURSOR_LANE1(0x00) | - SOR_LANE_POSTCURSOR_LANE0(0x00); - tegra_sor_writel(sor, value, SOR_LANE_POSTCURSOR0); + if (lanes <= 1) + value &= ~SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[1]); + else + value |= SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[1]); - /* disable LVDS mode */ - tegra_sor_writel(sor, 0, SOR_LVDS); + if (lanes == 0) + value &= ~SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[0]); + else + value |= SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[0]); + + tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0); + + /* start lane sequencer */ + value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_DOWN | + SOR_LANE_SEQ_CTL_POWER_STATE_UP; + tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL); + + timeout = jiffies + msecs_to_jiffies(250); + while (time_before(jiffies, timeout)) { + value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL); + if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0) + break; + + usleep_range(250, 1000); + } + + if ((value & SOR_LANE_SEQ_CTL_TRIGGER) != 0) + return -ETIMEDOUT; + + return 0; +} + +static int tegra_sor_power_down_lanes(struct tegra_sor *sor) +{ + unsigned long timeout; + u32 value; + + /* power down all lanes */ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0); - value |= SOR_DP_PADCTL_TX_PU_ENABLE; - value &= ~SOR_DP_PADCTL_TX_PU_MASK; - value |= SOR_DP_PADCTL_TX_PU(2); /* XXX: don't hardcode? */ + value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 | + SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2); tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0); + /* start lane sequencer */ + value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_UP | + SOR_LANE_SEQ_CTL_POWER_STATE_DOWN; + tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL); + + timeout = jiffies + msecs_to_jiffies(250); + + while (time_before(jiffies, timeout)) { + value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL); + if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0) + break; + + usleep_range(25, 100); + } + + if ((value & SOR_LANE_SEQ_CTL_TRIGGER) != 0) + return -ETIMEDOUT; + + return 0; +} + +static void tegra_sor_dp_precharge(struct tegra_sor *sor, unsigned int lanes) +{ + u32 value; + + /* pre-charge all used lanes */ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0); - value |= SOR_DP_PADCTL_CM_TXD_3 | SOR_DP_PADCTL_CM_TXD_2 | - SOR_DP_PADCTL_CM_TXD_1 | SOR_DP_PADCTL_CM_TXD_0; + + if (lanes <= 2) + value &= ~(SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[3]) | + SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[2])); + else + value |= SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[3]) | + SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[2]); + + if (lanes <= 1) + value &= ~SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[1]); + else + value |= SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[1]); + + if (lanes == 0) + value &= ~SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[0]); + else + value |= SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[0]); + tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0); - usleep_range(10, 100); + usleep_range(15, 100); value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0); value &= ~(SOR_DP_PADCTL_CM_TXD_3 | SOR_DP_PADCTL_CM_TXD_2 | SOR_DP_PADCTL_CM_TXD_1 | SOR_DP_PADCTL_CM_TXD_0); tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0); +} - err = drm_dp_aux_prepare(sor->aux, DP_SET_ANSI_8B10B); - if (err < 0) - return err; +static void tegra_sor_dp_term_calibrate(struct tegra_sor *sor) +{ + u32 mask = 0x08, adj = 0, value; + + /* enable pad calibration logic */ + value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0); + value &= ~SOR_DP_PADCTL_PAD_CAL_PD; + tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0); + + value = tegra_sor_readl(sor, sor->soc->regs->pll1); + value |= SOR_PLL1_TMDS_TERM; + tegra_sor_writel(sor, value, sor->soc->regs->pll1); + + while (mask) { + adj |= mask; + + value = tegra_sor_readl(sor, sor->soc->regs->pll1); + value &= ~SOR_PLL1_TMDS_TERMADJ_MASK; + value |= SOR_PLL1_TMDS_TERMADJ(adj); + tegra_sor_writel(sor, value, sor->soc->regs->pll1); - for (i = 0, value = 0; i < link->num_lanes; i++) { - unsigned long lane = SOR_DP_TPG_CHANNEL_CODING | - SOR_DP_TPG_SCRAMBLER_NONE | - SOR_DP_TPG_PATTERN_TRAIN1; - value = (value << 8) | lane; + usleep_range(100, 200); + + value = tegra_sor_readl(sor, sor->soc->regs->pll1); + if (value & SOR_PLL1_TERM_COMPOUT) + adj &= ~mask; + + mask >>= 1; } - tegra_sor_writel(sor, value, SOR_DP_TPG); + value = tegra_sor_readl(sor, sor->soc->regs->pll1); + value &= ~SOR_PLL1_TMDS_TERMADJ_MASK; + value |= SOR_PLL1_TMDS_TERMADJ(adj); + tegra_sor_writel(sor, value, sor->soc->regs->pll1); - pattern = DP_TRAINING_PATTERN_1; + /* disable pad calibration logic */ + value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0); + value |= SOR_DP_PADCTL_PAD_CAL_PD; + tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0); +} - err = drm_dp_aux_train(sor->aux, link, pattern); - if (err < 0) - return err; +static int tegra_sor_dp_link_apply_training(struct drm_dp_link *link) +{ + struct tegra_sor *sor = container_of(link, struct tegra_sor, link); + u32 voltage_swing = 0, pre_emphasis = 0, post_cursor = 0; + const struct tegra_sor_soc *soc = sor->soc; + u32 pattern = 0, tx_pu = 0, value; + unsigned int i; - value = tegra_sor_readl(sor, SOR_DP_SPARE0); - value |= SOR_DP_SPARE_SEQ_ENABLE; - value &= ~SOR_DP_SPARE_PANEL_INTERNAL; - value |= SOR_DP_SPARE_MACRO_SOR_CLK; - tegra_sor_writel(sor, value, SOR_DP_SPARE0); + for (value = 0, i = 0; i < link->lanes; i++) { + u8 vs = link->train.request.voltage_swing[i]; + u8 pe = link->train.request.pre_emphasis[i]; + u8 pc = link->train.request.post_cursor[i]; + u8 shift = sor->soc->lane_map[i] << 3; + + voltage_swing |= soc->voltage_swing[pc][vs][pe] << shift; + pre_emphasis |= soc->pre_emphasis[pc][vs][pe] << shift; + post_cursor |= soc->post_cursor[pc][vs][pe] << shift; + + if (sor->soc->tx_pu[pc][vs][pe] > tx_pu) + tx_pu = sor->soc->tx_pu[pc][vs][pe]; + + switch (link->train.pattern) { + case DP_TRAINING_PATTERN_DISABLE: + value = SOR_DP_TPG_SCRAMBLER_GALIOS | + SOR_DP_TPG_PATTERN_NONE; + break; + + case DP_TRAINING_PATTERN_1: + value = SOR_DP_TPG_SCRAMBLER_NONE | + SOR_DP_TPG_PATTERN_TRAIN1; + break; + + case DP_TRAINING_PATTERN_2: + value = SOR_DP_TPG_SCRAMBLER_NONE | + SOR_DP_TPG_PATTERN_TRAIN2; + break; + + case DP_TRAINING_PATTERN_3: + value = SOR_DP_TPG_SCRAMBLER_NONE | + SOR_DP_TPG_PATTERN_TRAIN3; + break; + + default: + return -EINVAL; + } + + if (link->caps.channel_coding) + value |= SOR_DP_TPG_CHANNEL_CODING; - for (i = 0, value = 0; i < link->num_lanes; i++) { - unsigned long lane = SOR_DP_TPG_CHANNEL_CODING | - SOR_DP_TPG_SCRAMBLER_NONE | - SOR_DP_TPG_PATTERN_TRAIN2; - value = (value << 8) | lane; + pattern = pattern << 8 | value; } - tegra_sor_writel(sor, value, SOR_DP_TPG); + tegra_sor_writel(sor, voltage_swing, SOR_LANE_DRIVE_CURRENT0); + tegra_sor_writel(sor, pre_emphasis, SOR_LANE_PREEMPHASIS0); - pattern = DP_LINK_SCRAMBLING_DISABLE | DP_TRAINING_PATTERN_2; + if (link->caps.tps3_supported) + tegra_sor_writel(sor, post_cursor, SOR_LANE_POSTCURSOR0); - err = drm_dp_aux_train(sor->aux, link, pattern); - if (err < 0) - return err; + tegra_sor_writel(sor, pattern, SOR_DP_TPG); - for (i = 0, value = 0; i < link->num_lanes; i++) { - unsigned long lane = SOR_DP_TPG_CHANNEL_CODING | - SOR_DP_TPG_SCRAMBLER_GALIOS | - SOR_DP_TPG_PATTERN_NONE; - value = (value << 8) | lane; + value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0); + value &= ~SOR_DP_PADCTL_TX_PU_MASK; + value |= SOR_DP_PADCTL_TX_PU_ENABLE; + value |= SOR_DP_PADCTL_TX_PU(tx_pu); + tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0); + + usleep_range(20, 100); + + return 0; +} + +static int tegra_sor_dp_link_configure(struct drm_dp_link *link) +{ + struct tegra_sor *sor = container_of(link, struct tegra_sor, link); + unsigned int rate, lanes; + u32 value; + int err; + + rate = drm_dp_link_rate_to_bw_code(link->rate); + lanes = link->lanes; + + /* configure link speed and lane count */ + value = tegra_sor_readl(sor, SOR_CLK_CNTRL); + value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK; + value |= SOR_CLK_CNTRL_DP_LINK_SPEED(rate); + tegra_sor_writel(sor, value, SOR_CLK_CNTRL); + + value = tegra_sor_readl(sor, SOR_DP_LINKCTL0); + value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK; + value |= SOR_DP_LINKCTL_LANE_COUNT(lanes); + + if (link->caps.enhanced_framing) + value |= SOR_DP_LINKCTL_ENHANCED_FRAME; + + tegra_sor_writel(sor, value, SOR_DP_LINKCTL0); + + usleep_range(400, 1000); + + /* configure load pulse position adjustment */ + value = tegra_sor_readl(sor, sor->soc->regs->pll1); + value &= ~SOR_PLL1_LOADADJ_MASK; + + switch (rate) { + case DP_LINK_BW_1_62: + value |= SOR_PLL1_LOADADJ(0x3); + break; + + case DP_LINK_BW_2_7: + value |= SOR_PLL1_LOADADJ(0x4); + break; + + case DP_LINK_BW_5_4: + value |= SOR_PLL1_LOADADJ(0x6); + break; } - tegra_sor_writel(sor, value, SOR_DP_TPG); + tegra_sor_writel(sor, value, sor->soc->regs->pll1); - pattern = DP_TRAINING_PATTERN_DISABLE; + /* use alternate scrambler reset for eDP */ + value = tegra_sor_readl(sor, SOR_DP_SPARE0); - err = drm_dp_aux_train(sor->aux, link, pattern); - if (err < 0) + if (link->edp == 0) + value &= ~SOR_DP_SPARE_PANEL_INTERNAL; + else + value |= SOR_DP_SPARE_PANEL_INTERNAL; + + tegra_sor_writel(sor, value, SOR_DP_SPARE0); + + err = tegra_sor_power_down_lanes(sor); + if (err < 0) { + dev_err(sor->dev, "failed to power down lanes: %d\n", err); + return err; + } + + /* power up and pre-charge lanes */ + err = tegra_sor_power_up_lanes(sor, lanes); + if (err < 0) { + dev_err(sor->dev, "failed to power up %u lane%s: %d\n", + lanes, (lanes != 1) ? "s" : "", err); return err; + } + + tegra_sor_dp_precharge(sor, lanes); return 0; } +static const struct drm_dp_link_ops tegra_sor_dp_link_ops = { + .apply_training = tegra_sor_dp_link_apply_training, + .configure = tegra_sor_dp_link_configure, +}; + static void tegra_sor_super_update(struct tegra_sor *sor) { tegra_sor_writel(sor, 0, SOR_SUPER_STATE0); @@ -912,11 +1160,11 @@ static int tegra_sor_compute_config(struct tegra_sor *sor, u32 num_syms_per_line; unsigned int i; - if (!link_rate || !link->num_lanes || !pclk || !config->bits_per_pixel) + if (!link_rate || !link->lanes || !pclk || !config->bits_per_pixel) return -EINVAL; - output = link_rate * 8 * link->num_lanes; input = pclk * config->bits_per_pixel; + output = link_rate * 8 * link->lanes; if (input >= output) return -ERANGE; @@ -959,7 +1207,7 @@ static int tegra_sor_compute_config(struct tegra_sor *sor, watermark = div_u64(watermark + params.error, f); config->watermark = watermark + (config->bits_per_pixel / 8) + 2; num_syms_per_line = (mode->hdisplay * config->bits_per_pixel) * - (link->num_lanes * 8); + (link->lanes * 8); if (config->watermark > 30) { config->watermark = 30; @@ -976,15 +1224,15 @@ static int tegra_sor_compute_config(struct tegra_sor *sor, num = ((mode->htotal - mode->hdisplay) - 7) * link_rate; config->hblank_symbols = div_u64(num, pclk); - if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING) + if (link->caps.enhanced_framing) config->hblank_symbols -= 3; - config->hblank_symbols -= 12 / link->num_lanes; + config->hblank_symbols -= 12 / link->lanes; /* compute the number of symbols per vertical blanking interval */ num = (mode->hdisplay - 25) * link_rate; config->vblank_symbols = div_u64(num, pclk); - config->vblank_symbols -= 36 / link->num_lanes + 4; + config->vblank_symbols -= 36 / link->lanes + 4; dev_dbg(sor->dev, "blank symbols: H:%u V:%u\n", config->hblank_symbols, config->vblank_symbols); @@ -1200,29 +1448,6 @@ static int tegra_sor_power_down(struct tegra_sor *sor) return err; } - value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0); - value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 | - SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2); - tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0); - - /* stop lane sequencer */ - value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_UP | - SOR_LANE_SEQ_CTL_POWER_STATE_DOWN; - tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL); - - timeout = jiffies + msecs_to_jiffies(250); - - while (time_before(jiffies, timeout)) { - value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL); - if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0) - break; - - usleep_range(25, 100); - } - - if ((value & SOR_LANE_SEQ_CTL_TRIGGER) != 0) - return -ETIMEDOUT; - value = tegra_sor_readl(sor, sor->soc->regs->pll2); value |= SOR_PLL2_PORT_POWERDOWN; tegra_sor_writel(sor, value, sor->soc->regs->pll2); @@ -1584,403 +1809,6 @@ static const struct drm_encoder_funcs tegra_sor_encoder_funcs = { .destroy = tegra_output_encoder_destroy, }; -static void tegra_sor_edp_disable(struct drm_encoder *encoder) -{ - struct tegra_output *output = encoder_to_output(encoder); - struct tegra_dc *dc = to_tegra_dc(encoder->crtc); - struct tegra_sor *sor = to_sor(output); - u32 value; - int err; - - if (output->panel) - drm_panel_disable(output->panel); - - err = tegra_sor_detach(sor); - if (err < 0) - dev_err(sor->dev, "failed to detach SOR: %d\n", err); - - tegra_sor_writel(sor, 0, SOR_STATE1); - tegra_sor_update(sor); - - /* - * The following accesses registers of the display controller, so make - * sure it's only executed when the output is attached to one. - */ - if (dc) { - value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); - value &= ~SOR_ENABLE(0); - tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); - - tegra_dc_commit(dc); - } - - err = tegra_sor_power_down(sor); - if (err < 0) - dev_err(sor->dev, "failed to power down SOR: %d\n", err); - - if (sor->aux) { - err = drm_dp_aux_disable(sor->aux); - if (err < 0) - dev_err(sor->dev, "failed to disable DP: %d\n", err); - } - - err = tegra_io_pad_power_disable(sor->pad); - if (err < 0) - dev_err(sor->dev, "failed to power off I/O pad: %d\n", err); - - if (output->panel) - drm_panel_unprepare(output->panel); - - pm_runtime_put(sor->dev); -} - -#if 0 -static int calc_h_ref_to_sync(const struct drm_display_mode *mode, - unsigned int *value) -{ - unsigned int hfp, hsw, hbp, a = 0, b; - - hfp = mode->hsync_start - mode->hdisplay; - hsw = mode->hsync_end - mode->hsync_start; - hbp = mode->htotal - mode->hsync_end; - - pr_info("hfp: %u, hsw: %u, hbp: %u\n", hfp, hsw, hbp); - - b = hfp - 1; - - pr_info("a: %u, b: %u\n", a, b); - pr_info("a + hsw + hbp = %u\n", a + hsw + hbp); - - if (a + hsw + hbp <= 11) { - a = 1 + 11 - hsw - hbp; - pr_info("a: %u\n", a); - } - - if (a > b) - return -EINVAL; - - if (hsw < 1) - return -EINVAL; - - if (mode->hdisplay < 16) - return -EINVAL; - - if (value) { - if (b > a && a % 2) - *value = a + 1; - else - *value = a; - } - - return 0; -} -#endif - -static void tegra_sor_edp_enable(struct drm_encoder *encoder) -{ - struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode; - struct tegra_output *output = encoder_to_output(encoder); - struct tegra_dc *dc = to_tegra_dc(encoder->crtc); - struct tegra_sor *sor = to_sor(output); - struct tegra_sor_config config; - struct tegra_sor_state *state; - struct drm_dp_link link; - u8 rate, lanes; - unsigned int i; - int err = 0; - u32 value; - - state = to_sor_state(output->connector.state); - - pm_runtime_get_sync(sor->dev); - - if (output->panel) - drm_panel_prepare(output->panel); - - err = drm_dp_aux_enable(sor->aux); - if (err < 0) - dev_err(sor->dev, "failed to enable DP: %d\n", err); - - err = drm_dp_link_probe(sor->aux, &link); - if (err < 0) { - dev_err(sor->dev, "failed to probe eDP link: %d\n", err); - return; - } - - /* switch to safe parent clock */ - err = tegra_sor_set_parent_clock(sor, sor->clk_safe); - if (err < 0) - dev_err(sor->dev, "failed to set safe parent clock: %d\n", err); - - memset(&config, 0, sizeof(config)); - config.bits_per_pixel = state->bpc * 3; - - err = tegra_sor_compute_config(sor, mode, &config, &link); - if (err < 0) - dev_err(sor->dev, "failed to compute configuration: %d\n", err); - - value = tegra_sor_readl(sor, SOR_CLK_CNTRL); - value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK; - value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK; - tegra_sor_writel(sor, value, SOR_CLK_CNTRL); - - value = tegra_sor_readl(sor, sor->soc->regs->pll2); - value &= ~SOR_PLL2_BANDGAP_POWERDOWN; - tegra_sor_writel(sor, value, sor->soc->regs->pll2); - usleep_range(20, 100); - - value = tegra_sor_readl(sor, sor->soc->regs->pll3); - value |= SOR_PLL3_PLL_VDD_MODE_3V3; - tegra_sor_writel(sor, value, sor->soc->regs->pll3); - - value = SOR_PLL0_ICHPMP(0xf) | SOR_PLL0_VCOCAP_RST | - SOR_PLL0_PLLREG_LEVEL_V45 | SOR_PLL0_RESISTOR_EXT; - tegra_sor_writel(sor, value, sor->soc->regs->pll0); - - value = tegra_sor_readl(sor, sor->soc->regs->pll2); - value |= SOR_PLL2_SEQ_PLLCAPPD; - value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE; - value |= SOR_PLL2_LVDS_ENABLE; - tegra_sor_writel(sor, value, sor->soc->regs->pll2); - - value = SOR_PLL1_TERM_COMPOUT | SOR_PLL1_TMDS_TERM; - tegra_sor_writel(sor, value, sor->soc->regs->pll1); - - while (true) { - value = tegra_sor_readl(sor, sor->soc->regs->pll2); - if ((value & SOR_PLL2_SEQ_PLLCAPPD_ENFORCE) == 0) - break; - - usleep_range(250, 1000); - } - - value = tegra_sor_readl(sor, sor->soc->regs->pll2); - value &= ~SOR_PLL2_POWERDOWN_OVERRIDE; - value &= ~SOR_PLL2_PORT_POWERDOWN; - tegra_sor_writel(sor, value, sor->soc->regs->pll2); - - /* - * power up - */ - - /* set safe link bandwidth (1.62 Gbps) */ - value = tegra_sor_readl(sor, SOR_CLK_CNTRL); - value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK; - value |= SOR_CLK_CNTRL_DP_LINK_SPEED_G1_62; - tegra_sor_writel(sor, value, SOR_CLK_CNTRL); - - /* step 1 */ - value = tegra_sor_readl(sor, sor->soc->regs->pll2); - value |= SOR_PLL2_SEQ_PLLCAPPD_ENFORCE | SOR_PLL2_PORT_POWERDOWN | - SOR_PLL2_BANDGAP_POWERDOWN; - tegra_sor_writel(sor, value, sor->soc->regs->pll2); - - value = tegra_sor_readl(sor, sor->soc->regs->pll0); - value |= SOR_PLL0_VCOPD | SOR_PLL0_PWR; - tegra_sor_writel(sor, value, sor->soc->regs->pll0); - - value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0); - value &= ~SOR_DP_PADCTL_PAD_CAL_PD; - tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0); - - /* step 2 */ - err = tegra_io_pad_power_enable(sor->pad); - if (err < 0) - dev_err(sor->dev, "failed to power on I/O pad: %d\n", err); - - usleep_range(5, 100); - - /* step 3 */ - value = tegra_sor_readl(sor, sor->soc->regs->pll2); - value &= ~SOR_PLL2_BANDGAP_POWERDOWN; - tegra_sor_writel(sor, value, sor->soc->regs->pll2); - - usleep_range(20, 100); - - /* step 4 */ - value = tegra_sor_readl(sor, sor->soc->regs->pll0); - value &= ~SOR_PLL0_VCOPD; - value &= ~SOR_PLL0_PWR; - tegra_sor_writel(sor, value, sor->soc->regs->pll0); - - value = tegra_sor_readl(sor, sor->soc->regs->pll2); - value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE; - tegra_sor_writel(sor, value, sor->soc->regs->pll2); - - usleep_range(200, 1000); - - /* step 5 */ - value = tegra_sor_readl(sor, sor->soc->regs->pll2); - value &= ~SOR_PLL2_PORT_POWERDOWN; - tegra_sor_writel(sor, value, sor->soc->regs->pll2); - - /* XXX not in TRM */ - for (value = 0, i = 0; i < 5; i++) - value |= SOR_XBAR_CTRL_LINK0_XSEL(i, sor->xbar_cfg[i]) | - SOR_XBAR_CTRL_LINK1_XSEL(i, i); - - tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL); - tegra_sor_writel(sor, value, SOR_XBAR_CTRL); - - /* switch to DP parent clock */ - err = tegra_sor_set_parent_clock(sor, sor->clk_dp); - if (err < 0) - dev_err(sor->dev, "failed to set parent clock: %d\n", err); - - /* power DP lanes */ - value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0); - - if (link.num_lanes <= 2) - value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_2); - else - value |= SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_2; - - if (link.num_lanes <= 1) - value &= ~SOR_DP_PADCTL_PD_TXD_1; - else - value |= SOR_DP_PADCTL_PD_TXD_1; - - if (link.num_lanes == 0) - value &= ~SOR_DP_PADCTL_PD_TXD_0; - else - value |= SOR_DP_PADCTL_PD_TXD_0; - - tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0); - - value = tegra_sor_readl(sor, SOR_DP_LINKCTL0); - value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK; - value |= SOR_DP_LINKCTL_LANE_COUNT(link.num_lanes); - tegra_sor_writel(sor, value, SOR_DP_LINKCTL0); - - /* start lane sequencer */ - value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_DOWN | - SOR_LANE_SEQ_CTL_POWER_STATE_UP; - tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL); - - while (true) { - value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL); - if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0) - break; - - usleep_range(250, 1000); - } - - /* set link bandwidth */ - value = tegra_sor_readl(sor, SOR_CLK_CNTRL); - value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK; - value |= drm_dp_link_rate_to_bw_code(link.rate) << 2; - tegra_sor_writel(sor, value, SOR_CLK_CNTRL); - - tegra_sor_apply_config(sor, &config); - - /* enable link */ - value = tegra_sor_readl(sor, SOR_DP_LINKCTL0); - value |= SOR_DP_LINKCTL_ENABLE; - value |= SOR_DP_LINKCTL_ENHANCED_FRAME; - tegra_sor_writel(sor, value, SOR_DP_LINKCTL0); - - for (i = 0, value = 0; i < 4; i++) { - unsigned long lane = SOR_DP_TPG_CHANNEL_CODING | - SOR_DP_TPG_SCRAMBLER_GALIOS | - SOR_DP_TPG_PATTERN_NONE; - value = (value << 8) | lane; - } - - tegra_sor_writel(sor, value, SOR_DP_TPG); - - /* enable pad calibration logic */ - value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0); - value |= SOR_DP_PADCTL_PAD_CAL_PD; - tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0); - - err = drm_dp_link_probe(sor->aux, &link); - if (err < 0) - dev_err(sor->dev, "failed to probe eDP link: %d\n", err); - - err = drm_dp_link_power_up(sor->aux, &link); - if (err < 0) - dev_err(sor->dev, "failed to power up eDP link: %d\n", err); - - err = drm_dp_link_configure(sor->aux, &link); - if (err < 0) - dev_err(sor->dev, "failed to configure eDP link: %d\n", err); - - rate = drm_dp_link_rate_to_bw_code(link.rate); - lanes = link.num_lanes; - - value = tegra_sor_readl(sor, SOR_CLK_CNTRL); - value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK; - value |= SOR_CLK_CNTRL_DP_LINK_SPEED(rate); - tegra_sor_writel(sor, value, SOR_CLK_CNTRL); - - value = tegra_sor_readl(sor, SOR_DP_LINKCTL0); - value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK; - value |= SOR_DP_LINKCTL_LANE_COUNT(lanes); - - if (link.capabilities & DP_LINK_CAP_ENHANCED_FRAMING) - value |= SOR_DP_LINKCTL_ENHANCED_FRAME; - - tegra_sor_writel(sor, value, SOR_DP_LINKCTL0); - - /* disable training pattern generator */ - - for (i = 0; i < link.num_lanes; i++) { - unsigned long lane = SOR_DP_TPG_CHANNEL_CODING | - SOR_DP_TPG_SCRAMBLER_GALIOS | - SOR_DP_TPG_PATTERN_NONE; - value = (value << 8) | lane; - } - - tegra_sor_writel(sor, value, SOR_DP_TPG); - - err = tegra_sor_dp_train_fast(sor, &link); - if (err < 0) - dev_err(sor->dev, "DP fast link training failed: %d\n", err); - - dev_dbg(sor->dev, "fast link training succeeded\n"); - - err = tegra_sor_power_up(sor, 250); - if (err < 0) - dev_err(sor->dev, "failed to power up SOR: %d\n", err); - - /* CSTM (LVDS, link A/B, upper) */ - value = SOR_CSTM_LVDS | SOR_CSTM_LINK_ACT_A | SOR_CSTM_LINK_ACT_B | - SOR_CSTM_UPPER; - tegra_sor_writel(sor, value, SOR_CSTM); - - /* use DP-A protocol */ - value = tegra_sor_readl(sor, SOR_STATE1); - value &= ~SOR_STATE_ASY_PROTOCOL_MASK; - value |= SOR_STATE_ASY_PROTOCOL_DP_A; - tegra_sor_writel(sor, value, SOR_STATE1); - - tegra_sor_mode_set(sor, mode, state); - - /* PWM setup */ - err = tegra_sor_setup_pwm(sor, 250); - if (err < 0) - dev_err(sor->dev, "failed to setup PWM: %d\n", err); - - tegra_sor_update(sor); - - value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); - value |= SOR_ENABLE(0); - tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); - - tegra_dc_commit(dc); - - err = tegra_sor_attach(sor); - if (err < 0) - dev_err(sor->dev, "failed to attach SOR: %d\n", err); - - err = tegra_sor_wakeup(sor); - if (err < 0) - dev_err(sor->dev, "failed to enable DC: %d\n", err); - - if (output->panel) - drm_panel_enable(output->panel); -} - static int tegra_sor_encoder_atomic_check(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state, @@ -2030,12 +1858,6 @@ tegra_sor_encoder_atomic_check(struct drm_encoder *encoder, return 0; } -static const struct drm_encoder_helper_funcs tegra_sor_edp_helpers = { - .disable = tegra_sor_edp_disable, - .enable = tegra_sor_edp_enable, - .atomic_check = tegra_sor_encoder_atomic_check, -}; - static inline u32 tegra_sor_hdmi_subpack(const u8 *ptr, size_t size) { u32 value = 0; @@ -2160,6 +1982,15 @@ static void tegra_sor_audio_prepare(struct tegra_sor *sor) { u32 value; + /* + * Enable and unmask the HDA codec SCRATCH0 register interrupt. This + * is used for interoperability between the HDA codec driver and the + * HDMI/DP driver. + */ + value = SOR_INT_CODEC_SCRATCH1 | SOR_INT_CODEC_SCRATCH0; + tegra_sor_writel(sor, value, SOR_INT_ENABLE); + tegra_sor_writel(sor, value, SOR_INT_MASK); + tegra_sor_write_eld(sor); value = SOR_AUDIO_HDA_PRESENSE_ELDV | SOR_AUDIO_HDA_PRESENSE_PD; @@ -2169,6 +2000,32 @@ static void tegra_sor_audio_prepare(struct tegra_sor *sor) static void tegra_sor_audio_unprepare(struct tegra_sor *sor) { tegra_sor_writel(sor, 0, SOR_AUDIO_HDA_PRESENSE); + tegra_sor_writel(sor, 0, SOR_INT_MASK); + tegra_sor_writel(sor, 0, SOR_INT_ENABLE); +} + +static void tegra_sor_audio_enable(struct tegra_sor *sor) +{ + u32 value; + + value = tegra_sor_readl(sor, SOR_AUDIO_CNTRL); + + /* select HDA audio input */ + value &= ~SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_MASK); + value |= SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_HDA); + + /* inject null samples */ + if (sor->format.channels != 2) + value &= ~SOR_AUDIO_CNTRL_INJECT_NULLSMPL; + else + value |= SOR_AUDIO_CNTRL_INJECT_NULLSMPL; + + value |= SOR_AUDIO_CNTRL_AFIFO_FLUSH; + + tegra_sor_writel(sor, value, SOR_AUDIO_CNTRL); + + /* enable advertising HBR capability */ + tegra_sor_writel(sor, SOR_AUDIO_SPARE_HBR_ENABLE, SOR_AUDIO_SPARE); } static int tegra_sor_hdmi_enable_audio_infoframe(struct tegra_sor *sor) @@ -2206,24 +2063,7 @@ static void tegra_sor_hdmi_audio_enable(struct tegra_sor *sor) { u32 value; - value = tegra_sor_readl(sor, SOR_AUDIO_CNTRL); - - /* select HDA audio input */ - value &= ~SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_MASK); - value |= SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_HDA); - - /* inject null samples */ - if (sor->format.channels != 2) - value &= ~SOR_AUDIO_CNTRL_INJECT_NULLSMPL; - else - value |= SOR_AUDIO_CNTRL_INJECT_NULLSMPL; - - value |= SOR_AUDIO_CNTRL_AFIFO_FLUSH; - - tegra_sor_writel(sor, value, SOR_AUDIO_CNTRL); - - /* enable advertising HBR capability */ - tegra_sor_writel(sor, SOR_AUDIO_SPARE_HBR_ENABLE, SOR_AUDIO_SPARE); + tegra_sor_audio_enable(sor); tegra_sor_writel(sor, 0, SOR_HDMI_ACR_CTRL); @@ -2399,9 +2239,9 @@ static void tegra_sor_hdmi_disable(struct drm_encoder *encoder) value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); if (!sor->soc->has_nvdisplay) - value &= ~(SOR1_TIMING_CYA | SOR_ENABLE(1)); - else - value &= ~SOR_ENABLE(sor->index); + value &= ~SOR1_TIMING_CYA; + + value &= ~SOR_ENABLE(sor->index); tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); @@ -2559,16 +2399,34 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder) tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL); tegra_sor_writel(sor, value, SOR_XBAR_CTRL); - /* switch to parent clock */ - err = clk_set_parent(sor->clk, sor->clk_parent); + /* + * Switch the pad clock to the DP clock. Note that we cannot actually + * do this because Tegra186 and later don't support clk_set_parent() + * on the sorX_pad_clkout clocks. We already do the equivalent above + * using the DP_CLK_SEL mux of the SOR_CLK_CNTRL register. + */ +#if 0 + err = clk_set_parent(sor->clk_pad, sor->clk_dp); if (err < 0) { - dev_err(sor->dev, "failed to set parent clock: %d\n", err); + dev_err(sor->dev, "failed to select pad parent clock: %d\n", + err); return; } +#endif + /* switch the SOR clock to the pad clock */ err = tegra_sor_set_parent_clock(sor, sor->clk_pad); if (err < 0) { - dev_err(sor->dev, "failed to set pad clock: %d\n", err); + dev_err(sor->dev, "failed to select SOR parent clock: %d\n", + err); + return; + } + + /* switch the output clock to the parent pixel clock */ + err = clk_set_parent(sor->clk, sor->clk_parent); + if (err < 0) { + dev_err(sor->dev, "failed to select output parent clock: %d\n", + err); return; } @@ -2774,9 +2632,9 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder) value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); if (!sor->soc->has_nvdisplay) - value |= SOR_ENABLE(1) | SOR1_TIMING_CYA; - else - value |= SOR_ENABLE(sor->index); + value |= SOR1_TIMING_CYA; + + value |= SOR_ENABLE(sor->index); tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); @@ -2803,6 +2661,396 @@ static const struct drm_encoder_helper_funcs tegra_sor_hdmi_helpers = { .atomic_check = tegra_sor_encoder_atomic_check, }; +static void tegra_sor_dp_disable(struct drm_encoder *encoder) +{ + struct tegra_output *output = encoder_to_output(encoder); + struct tegra_dc *dc = to_tegra_dc(encoder->crtc); + struct tegra_sor *sor = to_sor(output); + u32 value; + int err; + + if (output->panel) + drm_panel_disable(output->panel); + + /* + * Do not attempt to power down a DP link if we're not connected since + * the AUX transactions would just be timing out. + */ + if (output->connector.status != connector_status_disconnected) { + err = drm_dp_link_power_down(sor->aux, &sor->link); + if (err < 0) + dev_err(sor->dev, "failed to power down link: %d\n", + err); + } + + err = tegra_sor_detach(sor); + if (err < 0) + dev_err(sor->dev, "failed to detach SOR: %d\n", err); + + tegra_sor_writel(sor, 0, SOR_STATE1); + tegra_sor_update(sor); + + value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); + value &= ~SOR_ENABLE(sor->index); + tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); + tegra_dc_commit(dc); + + value = tegra_sor_readl(sor, SOR_STATE1); + value &= ~SOR_STATE_ASY_PROTOCOL_MASK; + value &= ~SOR_STATE_ASY_SUBOWNER_MASK; + value &= ~SOR_STATE_ASY_OWNER_MASK; + tegra_sor_writel(sor, value, SOR_STATE1); + tegra_sor_update(sor); + + /* switch to safe parent clock */ + err = tegra_sor_set_parent_clock(sor, sor->clk_safe); + if (err < 0) + dev_err(sor->dev, "failed to set safe clock: %d\n", err); + + err = tegra_sor_power_down(sor); + if (err < 0) + dev_err(sor->dev, "failed to power down SOR: %d\n", err); + + err = tegra_io_pad_power_disable(sor->pad); + if (err < 0) + dev_err(sor->dev, "failed to power off I/O pad: %d\n", err); + + err = drm_dp_aux_disable(sor->aux); + if (err < 0) + dev_err(sor->dev, "failed disable DPAUX: %d\n", err); + + if (output->panel) + drm_panel_unprepare(output->panel); + + pm_runtime_put(sor->dev); +} + +static void tegra_sor_dp_enable(struct drm_encoder *encoder) +{ + struct tegra_output *output = encoder_to_output(encoder); + struct tegra_dc *dc = to_tegra_dc(encoder->crtc); + struct tegra_sor *sor = to_sor(output); + struct tegra_sor_config config; + struct tegra_sor_state *state; + struct drm_display_mode *mode; + struct drm_display_info *info; + unsigned int i; + u32 value; + int err; + + state = to_sor_state(output->connector.state); + mode = &encoder->crtc->state->adjusted_mode; + info = &output->connector.display_info; + + pm_runtime_get_sync(sor->dev); + + /* switch to safe parent clock */ + err = tegra_sor_set_parent_clock(sor, sor->clk_safe); + if (err < 0) + dev_err(sor->dev, "failed to set safe parent clock: %d\n", err); + + err = tegra_io_pad_power_enable(sor->pad); + if (err < 0) + dev_err(sor->dev, "failed to power on LVDS rail: %d\n", err); + + usleep_range(20, 100); + + err = drm_dp_aux_enable(sor->aux); + if (err < 0) + dev_err(sor->dev, "failed to enable DPAUX: %d\n", err); + + err = drm_dp_link_probe(sor->aux, &sor->link); + if (err < 0) + dev_err(sor->dev, "failed to probe DP link: %d\n", err); + + tegra_sor_filter_rates(sor); + + err = drm_dp_link_choose(&sor->link, mode, info); + if (err < 0) + dev_err(sor->dev, "failed to choose link: %d\n", err); + + if (output->panel) + drm_panel_prepare(output->panel); + + value = tegra_sor_readl(sor, sor->soc->regs->pll2); + value &= ~SOR_PLL2_BANDGAP_POWERDOWN; + tegra_sor_writel(sor, value, sor->soc->regs->pll2); + + usleep_range(20, 40); + + value = tegra_sor_readl(sor, sor->soc->regs->pll3); + value |= SOR_PLL3_PLL_VDD_MODE_3V3; + tegra_sor_writel(sor, value, sor->soc->regs->pll3); + + value = tegra_sor_readl(sor, sor->soc->regs->pll0); + value &= ~(SOR_PLL0_VCOPD | SOR_PLL0_PWR); + tegra_sor_writel(sor, value, sor->soc->regs->pll0); + + value = tegra_sor_readl(sor, sor->soc->regs->pll2); + value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE; + value |= SOR_PLL2_SEQ_PLLCAPPD; + tegra_sor_writel(sor, value, sor->soc->regs->pll2); + + usleep_range(200, 400); + + value = tegra_sor_readl(sor, sor->soc->regs->pll2); + value &= ~SOR_PLL2_POWERDOWN_OVERRIDE; + value &= ~SOR_PLL2_PORT_POWERDOWN; + tegra_sor_writel(sor, value, sor->soc->regs->pll2); + + value = tegra_sor_readl(sor, SOR_CLK_CNTRL); + value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK; + + if (output->panel) + value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK; + else + value |= SOR_CLK_CNTRL_DP_CLK_SEL_DIFF_DPCLK; + + tegra_sor_writel(sor, value, SOR_CLK_CNTRL); + + usleep_range(200, 400); + + value = tegra_sor_readl(sor, SOR_DP_SPARE0); + /* XXX not in TRM */ + if (output->panel) + value |= SOR_DP_SPARE_PANEL_INTERNAL; + else + value &= ~SOR_DP_SPARE_PANEL_INTERNAL; + + value |= SOR_DP_SPARE_SEQ_ENABLE; + tegra_sor_writel(sor, value, SOR_DP_SPARE0); + + /* XXX not in TRM */ + tegra_sor_writel(sor, 0, SOR_LVDS); + + value = tegra_sor_readl(sor, sor->soc->regs->pll0); + value &= ~SOR_PLL0_ICHPMP_MASK; + value &= ~SOR_PLL0_VCOCAP_MASK; + value |= SOR_PLL0_ICHPMP(0x1); + value |= SOR_PLL0_VCOCAP(0x3); + value |= SOR_PLL0_RESISTOR_EXT; + tegra_sor_writel(sor, value, sor->soc->regs->pll0); + + /* XXX not in TRM */ + for (value = 0, i = 0; i < 5; i++) + value |= SOR_XBAR_CTRL_LINK0_XSEL(i, sor->soc->xbar_cfg[i]) | + SOR_XBAR_CTRL_LINK1_XSEL(i, i); + + tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL); + tegra_sor_writel(sor, value, SOR_XBAR_CTRL); + + /* + * Switch the pad clock to the DP clock. Note that we cannot actually + * do this because Tegra186 and later don't support clk_set_parent() + * on the sorX_pad_clkout clocks. We already do the equivalent above + * using the DP_CLK_SEL mux of the SOR_CLK_CNTRL register. + */ +#if 0 + err = clk_set_parent(sor->clk_pad, sor->clk_parent); + if (err < 0) { + dev_err(sor->dev, "failed to select pad parent clock: %d\n", + err); + return; + } +#endif + + /* switch the SOR clock to the pad clock */ + err = tegra_sor_set_parent_clock(sor, sor->clk_pad); + if (err < 0) { + dev_err(sor->dev, "failed to select SOR parent clock: %d\n", + err); + return; + } + + /* switch the output clock to the parent pixel clock */ + err = clk_set_parent(sor->clk, sor->clk_parent); + if (err < 0) { + dev_err(sor->dev, "failed to select output parent clock: %d\n", + err); + return; + } + + /* use DP-A protocol */ + value = tegra_sor_readl(sor, SOR_STATE1); + value &= ~SOR_STATE_ASY_PROTOCOL_MASK; + value |= SOR_STATE_ASY_PROTOCOL_DP_A; + tegra_sor_writel(sor, value, SOR_STATE1); + + /* enable port */ + value = tegra_sor_readl(sor, SOR_DP_LINKCTL0); + value |= SOR_DP_LINKCTL_ENABLE; + tegra_sor_writel(sor, value, SOR_DP_LINKCTL0); + + tegra_sor_dp_term_calibrate(sor); + + err = drm_dp_link_train(&sor->link); + if (err < 0) + dev_err(sor->dev, "link training failed: %d\n", err); + else + dev_dbg(sor->dev, "link training succeeded\n"); + + err = drm_dp_link_power_up(sor->aux, &sor->link); + if (err < 0) + dev_err(sor->dev, "failed to power up DP link: %d\n", err); + + /* compute configuration */ + memset(&config, 0, sizeof(config)); + config.bits_per_pixel = state->bpc * 3; + + err = tegra_sor_compute_config(sor, mode, &config, &sor->link); + if (err < 0) + dev_err(sor->dev, "failed to compute configuration: %d\n", err); + + tegra_sor_apply_config(sor, &config); + tegra_sor_mode_set(sor, mode, state); + + if (output->panel) { + /* CSTM (LVDS, link A/B, upper) */ + value = SOR_CSTM_LVDS | SOR_CSTM_LINK_ACT_A | SOR_CSTM_LINK_ACT_B | + SOR_CSTM_UPPER; + tegra_sor_writel(sor, value, SOR_CSTM); + + /* PWM setup */ + err = tegra_sor_setup_pwm(sor, 250); + if (err < 0) + dev_err(sor->dev, "failed to setup PWM: %d\n", err); + } + + tegra_sor_update(sor); + + err = tegra_sor_power_up(sor, 250); + if (err < 0) + dev_err(sor->dev, "failed to power up SOR: %d\n", err); + + /* attach and wake up */ + err = tegra_sor_attach(sor); + if (err < 0) + dev_err(sor->dev, "failed to attach SOR: %d\n", err); + + value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); + value |= SOR_ENABLE(sor->index); + tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); + + tegra_dc_commit(dc); + + err = tegra_sor_wakeup(sor); + if (err < 0) + dev_err(sor->dev, "failed to wakeup SOR: %d\n", err); + + if (output->panel) + drm_panel_enable(output->panel); +} + +static const struct drm_encoder_helper_funcs tegra_sor_dp_helpers = { + .disable = tegra_sor_dp_disable, + .enable = tegra_sor_dp_enable, + .atomic_check = tegra_sor_encoder_atomic_check, +}; + +static int tegra_sor_hdmi_probe(struct tegra_sor *sor) +{ + int err; + + sor->avdd_io_supply = devm_regulator_get(sor->dev, "avdd-io"); + if (IS_ERR(sor->avdd_io_supply)) { + dev_err(sor->dev, "cannot get AVDD I/O supply: %ld\n", + PTR_ERR(sor->avdd_io_supply)); + return PTR_ERR(sor->avdd_io_supply); + } + + err = regulator_enable(sor->avdd_io_supply); + if (err < 0) { + dev_err(sor->dev, "failed to enable AVDD I/O supply: %d\n", + err); + return err; + } + + sor->vdd_pll_supply = devm_regulator_get(sor->dev, "vdd-pll"); + if (IS_ERR(sor->vdd_pll_supply)) { + dev_err(sor->dev, "cannot get VDD PLL supply: %ld\n", + PTR_ERR(sor->vdd_pll_supply)); + return PTR_ERR(sor->vdd_pll_supply); + } + + err = regulator_enable(sor->vdd_pll_supply); + if (err < 0) { + dev_err(sor->dev, "failed to enable VDD PLL supply: %d\n", + err); + return err; + } + + sor->hdmi_supply = devm_regulator_get(sor->dev, "hdmi"); + if (IS_ERR(sor->hdmi_supply)) { + dev_err(sor->dev, "cannot get HDMI supply: %ld\n", + PTR_ERR(sor->hdmi_supply)); + return PTR_ERR(sor->hdmi_supply); + } + + err = regulator_enable(sor->hdmi_supply); + if (err < 0) { + dev_err(sor->dev, "failed to enable HDMI supply: %d\n", err); + return err; + } + + INIT_DELAYED_WORK(&sor->scdc, tegra_sor_hdmi_scdc_work); + + return 0; +} + +static int tegra_sor_hdmi_remove(struct tegra_sor *sor) +{ + regulator_disable(sor->hdmi_supply); + regulator_disable(sor->vdd_pll_supply); + regulator_disable(sor->avdd_io_supply); + + return 0; +} + +static const struct tegra_sor_ops tegra_sor_hdmi_ops = { + .name = "HDMI", + .probe = tegra_sor_hdmi_probe, + .remove = tegra_sor_hdmi_remove, + .audio_enable = tegra_sor_hdmi_audio_enable, + .audio_disable = tegra_sor_hdmi_audio_disable, +}; + +static int tegra_sor_dp_probe(struct tegra_sor *sor) +{ + int err; + + sor->avdd_io_supply = devm_regulator_get(sor->dev, "avdd-io-hdmi-dp"); + if (IS_ERR(sor->avdd_io_supply)) + return PTR_ERR(sor->avdd_io_supply); + + err = regulator_enable(sor->avdd_io_supply); + if (err < 0) + return err; + + sor->vdd_pll_supply = devm_regulator_get(sor->dev, "vdd-hdmi-dp-pll"); + if (IS_ERR(sor->vdd_pll_supply)) + return PTR_ERR(sor->vdd_pll_supply); + + err = regulator_enable(sor->vdd_pll_supply); + if (err < 0) + return err; + + return 0; +} + +static int tegra_sor_dp_remove(struct tegra_sor *sor) +{ + regulator_disable(sor->vdd_pll_supply); + regulator_disable(sor->avdd_io_supply); + + return 0; +} + +static const struct tegra_sor_ops tegra_sor_dp_ops = { + .name = "DP", + .probe = tegra_sor_dp_probe, + .remove = tegra_sor_dp_remove, +}; + static int tegra_sor_init(struct host1x_client *client) { struct drm_device *drm = dev_get_drvdata(client->parent); @@ -2810,11 +3058,10 @@ static int tegra_sor_init(struct host1x_client *client) struct tegra_sor *sor = host1x_client_to_sor(client); int connector = DRM_MODE_CONNECTOR_Unknown; int encoder = DRM_MODE_ENCODER_NONE; - u32 value; int err; if (!sor->aux) { - if (sor->soc->supports_hdmi) { + if (sor->ops == &tegra_sor_hdmi_ops) { connector = DRM_MODE_CONNECTOR_HDMIA; encoder = DRM_MODE_ENCODER_TMDS; helpers = &tegra_sor_hdmi_helpers; @@ -2823,14 +3070,18 @@ static int tegra_sor_init(struct host1x_client *client) encoder = DRM_MODE_ENCODER_LVDS; } } else { - if (sor->soc->supports_edp) { + if (sor->output.panel) { connector = DRM_MODE_CONNECTOR_eDP; encoder = DRM_MODE_ENCODER_TMDS; - helpers = &tegra_sor_edp_helpers; - } else if (sor->soc->supports_dp) { + helpers = &tegra_sor_dp_helpers; + } else { connector = DRM_MODE_CONNECTOR_DisplayPort; encoder = DRM_MODE_ENCODER_TMDS; + helpers = &tegra_sor_dp_helpers; } + + sor->link.ops = &tegra_sor_dp_link_ops; + sor->link.aux = sor->aux; } sor->output.dev = sor->dev; @@ -2913,15 +3164,6 @@ static int tegra_sor_init(struct host1x_client *client) if (err < 0) return err; - /* - * Enable and unmask the HDA codec SCRATCH0 register interrupt. This - * is used for interoperability between the HDA codec driver and the - * HDMI/DP driver. - */ - value = SOR_INT_CODEC_SCRATCH1 | SOR_INT_CODEC_SCRATCH0; - tegra_sor_writel(sor, value, SOR_INT_ENABLE); - tegra_sor_writel(sor, value, SOR_INT_MASK); - return 0; } @@ -2930,9 +3172,6 @@ static int tegra_sor_exit(struct host1x_client *client) struct tegra_sor *sor = host1x_client_to_sor(client); int err; - tegra_sor_writel(sor, 0, SOR_INT_MASK); - tegra_sor_writel(sor, 0, SOR_INT_ENABLE); - tegra_output_exit(&sor->output); if (sor->aux) { @@ -2955,75 +3194,6 @@ static const struct host1x_client_ops sor_client_ops = { .exit = tegra_sor_exit, }; -static const struct tegra_sor_ops tegra_sor_edp_ops = { - .name = "eDP", -}; - -static int tegra_sor_hdmi_probe(struct tegra_sor *sor) -{ - int err; - - sor->avdd_io_supply = devm_regulator_get(sor->dev, "avdd-io"); - if (IS_ERR(sor->avdd_io_supply)) { - dev_err(sor->dev, "cannot get AVDD I/O supply: %ld\n", - PTR_ERR(sor->avdd_io_supply)); - return PTR_ERR(sor->avdd_io_supply); - } - - err = regulator_enable(sor->avdd_io_supply); - if (err < 0) { - dev_err(sor->dev, "failed to enable AVDD I/O supply: %d\n", - err); - return err; - } - - sor->vdd_pll_supply = devm_regulator_get(sor->dev, "vdd-pll"); - if (IS_ERR(sor->vdd_pll_supply)) { - dev_err(sor->dev, "cannot get VDD PLL supply: %ld\n", - PTR_ERR(sor->vdd_pll_supply)); - return PTR_ERR(sor->vdd_pll_supply); - } - - err = regulator_enable(sor->vdd_pll_supply); - if (err < 0) { - dev_err(sor->dev, "failed to enable VDD PLL supply: %d\n", - err); - return err; - } - - sor->hdmi_supply = devm_regulator_get(sor->dev, "hdmi"); - if (IS_ERR(sor->hdmi_supply)) { - dev_err(sor->dev, "cannot get HDMI supply: %ld\n", - PTR_ERR(sor->hdmi_supply)); - return PTR_ERR(sor->hdmi_supply); - } - - err = regulator_enable(sor->hdmi_supply); - if (err < 0) { - dev_err(sor->dev, "failed to enable HDMI supply: %d\n", err); - return err; - } - - INIT_DELAYED_WORK(&sor->scdc, tegra_sor_hdmi_scdc_work); - - return 0; -} - -static int tegra_sor_hdmi_remove(struct tegra_sor *sor) -{ - regulator_disable(sor->hdmi_supply); - regulator_disable(sor->vdd_pll_supply); - regulator_disable(sor->avdd_io_supply); - - return 0; -} - -static const struct tegra_sor_ops tegra_sor_hdmi_ops = { - .name = "HDMI", - .probe = tegra_sor_hdmi_probe, - .remove = tegra_sor_hdmi_remove, -}; - static const u8 tegra124_sor_xbar_cfg[5] = { 0, 1, 2, 3, 4 }; @@ -3043,14 +3213,161 @@ static const struct tegra_sor_regs tegra124_sor_regs = { .dp_padctl2 = 0x73, }; +/* Tegra124 and Tegra132 have lanes 0 and 2 swapped. */ +static const u8 tegra124_sor_lane_map[4] = { + 2, 1, 0, 3, +}; + +static const u8 tegra124_sor_voltage_swing[4][4][4] = { + { + { 0x13, 0x19, 0x1e, 0x28 }, + { 0x1e, 0x25, 0x2d, }, + { 0x28, 0x32, }, + { 0x3c, }, + }, { + { 0x12, 0x17, 0x1b, 0x25 }, + { 0x1c, 0x23, 0x2a, }, + { 0x25, 0x2f, }, + { 0x39, } + }, { + { 0x12, 0x16, 0x1a, 0x22 }, + { 0x1b, 0x20, 0x27, }, + { 0x24, 0x2d, }, + { 0x36, }, + }, { + { 0x11, 0x14, 0x17, 0x1f }, + { 0x19, 0x1e, 0x24, }, + { 0x22, 0x2a, }, + { 0x32, }, + }, +}; + +static const u8 tegra124_sor_pre_emphasis[4][4][4] = { + { + { 0x00, 0x09, 0x13, 0x25 }, + { 0x00, 0x0f, 0x1e, }, + { 0x00, 0x14, }, + { 0x00, }, + }, { + { 0x00, 0x0a, 0x14, 0x28 }, + { 0x00, 0x0f, 0x1e, }, + { 0x00, 0x14, }, + { 0x00 }, + }, { + { 0x00, 0x0a, 0x14, 0x28 }, + { 0x00, 0x0f, 0x1e, }, + { 0x00, 0x14, }, + { 0x00, }, + }, { + { 0x00, 0x0a, 0x14, 0x28 }, + { 0x00, 0x0f, 0x1e, }, + { 0x00, 0x14, }, + { 0x00, }, + }, +}; + +static const u8 tegra124_sor_post_cursor[4][4][4] = { + { + { 0x00, 0x00, 0x00, 0x00 }, + { 0x00, 0x00, 0x00, }, + { 0x00, 0x00, }, + { 0x00, }, + }, { + { 0x02, 0x02, 0x04, 0x05 }, + { 0x02, 0x04, 0x05, }, + { 0x04, 0x05, }, + { 0x05, }, + }, { + { 0x04, 0x05, 0x08, 0x0b }, + { 0x05, 0x09, 0x0b, }, + { 0x08, 0x0a, }, + { 0x0b, }, + }, { + { 0x05, 0x09, 0x0b, 0x12 }, + { 0x09, 0x0d, 0x12, }, + { 0x0b, 0x0f, }, + { 0x12, }, + }, +}; + +static const u8 tegra124_sor_tx_pu[4][4][4] = { + { + { 0x20, 0x30, 0x40, 0x60 }, + { 0x30, 0x40, 0x60, }, + { 0x40, 0x60, }, + { 0x60, }, + }, { + { 0x20, 0x20, 0x30, 0x50 }, + { 0x30, 0x40, 0x50, }, + { 0x40, 0x50, }, + { 0x60, }, + }, { + { 0x20, 0x20, 0x30, 0x40, }, + { 0x30, 0x30, 0x40, }, + { 0x40, 0x50, }, + { 0x60, }, + }, { + { 0x20, 0x20, 0x20, 0x40, }, + { 0x30, 0x30, 0x40, }, + { 0x40, 0x40, }, + { 0x60, }, + }, +}; + static const struct tegra_sor_soc tegra124_sor = { - .supports_edp = true, .supports_lvds = true, .supports_hdmi = false, - .supports_dp = false, + .supports_dp = true, + .supports_audio = false, + .supports_hdcp = false, .regs = &tegra124_sor_regs, .has_nvdisplay = false, .xbar_cfg = tegra124_sor_xbar_cfg, + .lane_map = tegra124_sor_lane_map, + .voltage_swing = tegra124_sor_voltage_swing, + .pre_emphasis = tegra124_sor_pre_emphasis, + .post_cursor = tegra124_sor_post_cursor, + .tx_pu = tegra124_sor_tx_pu, +}; + +static const u8 tegra132_sor_pre_emphasis[4][4][4] = { + { + { 0x00, 0x08, 0x12, 0x24 }, + { 0x01, 0x0e, 0x1d, }, + { 0x01, 0x13, }, + { 0x00, }, + }, { + { 0x00, 0x08, 0x12, 0x24 }, + { 0x00, 0x0e, 0x1d, }, + { 0x00, 0x13, }, + { 0x00 }, + }, { + { 0x00, 0x08, 0x12, 0x24 }, + { 0x00, 0x0e, 0x1d, }, + { 0x00, 0x13, }, + { 0x00, }, + }, { + { 0x00, 0x08, 0x12, 0x24 }, + { 0x00, 0x0e, 0x1d, }, + { 0x00, 0x13, }, + { 0x00, }, + }, +}; + +static const struct tegra_sor_soc tegra132_sor = { + .supports_lvds = true, + .supports_hdmi = false, + .supports_dp = true, + .supports_audio = false, + .supports_hdcp = false, + .regs = &tegra124_sor_regs, + .has_nvdisplay = false, + .xbar_cfg = tegra124_sor_xbar_cfg, + .lane_map = tegra124_sor_lane_map, + .voltage_swing = tegra124_sor_voltage_swing, + .pre_emphasis = tegra132_sor_pre_emphasis, + .post_cursor = tegra124_sor_post_cursor, + .tx_pu = tegra124_sor_tx_pu, }; static const struct tegra_sor_regs tegra210_sor_regs = { @@ -3068,33 +3385,50 @@ static const struct tegra_sor_regs tegra210_sor_regs = { .dp_padctl2 = 0x73, }; +static const u8 tegra210_sor_xbar_cfg[5] = { + 2, 1, 0, 3, 4 +}; + +static const u8 tegra210_sor_lane_map[4] = { + 0, 1, 2, 3, +}; + static const struct tegra_sor_soc tegra210_sor = { - .supports_edp = true, .supports_lvds = false, .supports_hdmi = false, - .supports_dp = false, + .supports_dp = true, + .supports_audio = false, + .supports_hdcp = false, + .regs = &tegra210_sor_regs, .has_nvdisplay = false, - .xbar_cfg = tegra124_sor_xbar_cfg, -}; -static const u8 tegra210_sor_xbar_cfg[5] = { - 2, 1, 0, 3, 4 + .xbar_cfg = tegra210_sor_xbar_cfg, + .lane_map = tegra210_sor_lane_map, + .voltage_swing = tegra124_sor_voltage_swing, + .pre_emphasis = tegra124_sor_pre_emphasis, + .post_cursor = tegra124_sor_post_cursor, + .tx_pu = tegra124_sor_tx_pu, }; static const struct tegra_sor_soc tegra210_sor1 = { - .supports_edp = false, .supports_lvds = false, .supports_hdmi = true, .supports_dp = true, + .supports_audio = true, + .supports_hdcp = true, .regs = &tegra210_sor_regs, .has_nvdisplay = false, .num_settings = ARRAY_SIZE(tegra210_sor_hdmi_defaults), .settings = tegra210_sor_hdmi_defaults, - .xbar_cfg = tegra210_sor_xbar_cfg, + .lane_map = tegra210_sor_lane_map, + .voltage_swing = tegra124_sor_voltage_swing, + .pre_emphasis = tegra124_sor_pre_emphasis, + .post_cursor = tegra124_sor_post_cursor, + .tx_pu = tegra124_sor_tx_pu, }; static const struct tegra_sor_regs tegra186_sor_regs = { @@ -3112,31 +3446,72 @@ static const struct tegra_sor_regs tegra186_sor_regs = { .dp_padctl2 = 0x16a, }; -static const struct tegra_sor_soc tegra186_sor = { - .supports_edp = false, - .supports_lvds = false, - .supports_hdmi = false, - .supports_dp = true, - - .regs = &tegra186_sor_regs, - .has_nvdisplay = true, +static const u8 tegra186_sor_voltage_swing[4][4][4] = { + { + { 0x13, 0x19, 0x1e, 0x28 }, + { 0x1e, 0x25, 0x2d, }, + { 0x28, 0x32, }, + { 0x39, }, + }, { + { 0x12, 0x16, 0x1b, 0x25 }, + { 0x1c, 0x23, 0x2a, }, + { 0x25, 0x2f, }, + { 0x37, } + }, { + { 0x12, 0x16, 0x1a, 0x22 }, + { 0x1b, 0x20, 0x27, }, + { 0x24, 0x2d, }, + { 0x35, }, + }, { + { 0x11, 0x14, 0x17, 0x1f }, + { 0x19, 0x1e, 0x24, }, + { 0x22, 0x2a, }, + { 0x32, }, + }, +}; - .xbar_cfg = tegra124_sor_xbar_cfg, +static const u8 tegra186_sor_pre_emphasis[4][4][4] = { + { + { 0x00, 0x08, 0x12, 0x24 }, + { 0x01, 0x0e, 0x1d, }, + { 0x01, 0x13, }, + { 0x00, }, + }, { + { 0x00, 0x08, 0x12, 0x24 }, + { 0x00, 0x0e, 0x1d, }, + { 0x00, 0x13, }, + { 0x00 }, + }, { + { 0x00, 0x08, 0x14, 0x24 }, + { 0x00, 0x0e, 0x1d, }, + { 0x00, 0x13, }, + { 0x00, }, + }, { + { 0x00, 0x08, 0x12, 0x24 }, + { 0x00, 0x0e, 0x1d, }, + { 0x00, 0x13, }, + { 0x00, }, + }, }; -static const struct tegra_sor_soc tegra186_sor1 = { - .supports_edp = false, +static const struct tegra_sor_soc tegra186_sor = { .supports_lvds = false, .supports_hdmi = true, .supports_dp = true, + .supports_audio = true, + .supports_hdcp = true, .regs = &tegra186_sor_regs, .has_nvdisplay = true, .num_settings = ARRAY_SIZE(tegra186_sor_hdmi_defaults), .settings = tegra186_sor_hdmi_defaults, - .xbar_cfg = tegra124_sor_xbar_cfg, + .lane_map = tegra124_sor_lane_map, + .voltage_swing = tegra186_sor_voltage_swing, + .pre_emphasis = tegra186_sor_pre_emphasis, + .post_cursor = tegra124_sor_post_cursor, + .tx_pu = tegra124_sor_tx_pu, }; static const struct tegra_sor_regs tegra194_sor_regs = { @@ -3155,10 +3530,11 @@ static const struct tegra_sor_regs tegra194_sor_regs = { }; static const struct tegra_sor_soc tegra194_sor = { - .supports_edp = true, .supports_lvds = false, .supports_hdmi = true, .supports_dp = true, + .supports_audio = true, + .supports_hdcp = true, .regs = &tegra194_sor_regs, .has_nvdisplay = true, @@ -3167,14 +3543,19 @@ static const struct tegra_sor_soc tegra194_sor = { .settings = tegra194_sor_hdmi_defaults, .xbar_cfg = tegra210_sor_xbar_cfg, + .lane_map = tegra124_sor_lane_map, + .voltage_swing = tegra186_sor_voltage_swing, + .pre_emphasis = tegra186_sor_pre_emphasis, + .post_cursor = tegra124_sor_post_cursor, + .tx_pu = tegra124_sor_tx_pu, }; static const struct of_device_id tegra_sor_of_match[] = { { .compatible = "nvidia,tegra194-sor", .data = &tegra194_sor }, - { .compatible = "nvidia,tegra186-sor1", .data = &tegra186_sor1 }, { .compatible = "nvidia,tegra186-sor", .data = &tegra186_sor }, { .compatible = "nvidia,tegra210-sor1", .data = &tegra210_sor1 }, { .compatible = "nvidia,tegra210-sor", .data = &tegra210_sor }, + { .compatible = "nvidia,tegra132-sor", .data = &tegra132_sor }, { .compatible = "nvidia,tegra124-sor", .data = &tegra124_sor }, { }, }; @@ -3200,6 +3581,11 @@ static int tegra_sor_parse_dt(struct tegra_sor *sor) * earlier */ sor->pad = TEGRA_IO_PAD_HDMI_DP0 + sor->index; + } else { + if (!sor->soc->supports_audio) + sor->index = 0; + else + sor->index = 1; } err = of_property_read_u32_array(np, "nvidia,xbar-cfg", xbar_cfg, 5); @@ -3234,9 +3620,11 @@ static irqreturn_t tegra_sor_irq(int irq, void *data) tegra_hda_parse_format(format, &sor->format); - tegra_sor_hdmi_audio_enable(sor); + if (sor->ops->audio_enable) + sor->ops->audio_enable(sor); } else { - tegra_sor_hdmi_audio_disable(sor); + if (sor->ops->audio_disable) + sor->ops->audio_disable(sor); } } @@ -3273,6 +3661,8 @@ static int tegra_sor_probe(struct platform_device *pdev) if (!sor->aux) return -EPROBE_DEFER; + + sor->output.ddc = &sor->aux->ddc; } if (!sor->aux) { @@ -3287,16 +3677,15 @@ static int tegra_sor_probe(struct platform_device *pdev) return -ENODEV; } } else { - if (sor->soc->supports_edp) { - sor->ops = &tegra_sor_edp_ops; - sor->pad = TEGRA_IO_PAD_LVDS; - } else if (sor->soc->supports_dp) { - dev_err(&pdev->dev, "DisplayPort not supported yet\n"); - return -ENODEV; - } else { - dev_err(&pdev->dev, "unknown (DP) support\n"); - return -ENODEV; - } + np = of_parse_phandle(pdev->dev.of_node, "nvidia,panel", 0); + /* + * No need to keep this around since we only use it as a check + * to see if a panel is connected (eDP) or not (DP). + */ + of_node_put(np); + + sor->ops = &tegra_sor_dp_ops; + sor->pad = TEGRA_IO_PAD_LVDS; } err = tegra_sor_parse_dt(sor); @@ -3451,6 +3840,8 @@ static int tegra_sor_probe(struct platform_device *pdev) * pad output clock. */ if (!sor->clk_pad) { + char *name; + err = pm_runtime_get_sync(&pdev->dev); if (err < 0) { dev_err(&pdev->dev, "failed to get runtime PM: %d\n", @@ -3458,8 +3849,13 @@ static int tegra_sor_probe(struct platform_device *pdev) goto remove; } - sor->clk_pad = tegra_clk_sor_pad_register(sor, - "sor1_pad_clkout"); + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "sor%u_pad_clkout", sor->index); + if (!name) { + err = -ENOMEM; + goto remove; + } + + sor->clk_pad = tegra_clk_sor_pad_register(sor, name); pm_runtime_put(&pdev->dev); } diff --git a/drivers/gpu/drm/tegra/sor.h b/drivers/gpu/drm/tegra/sor.h index f8efd8be4b7c..00e09d5dca30 100644 --- a/drivers/gpu/drm/tegra/sor.h +++ b/drivers/gpu/drm/tegra/sor.h @@ -39,6 +39,7 @@ #define SOR_STATE_ASY_CRC_MODE_NON_ACTIVE (0x2 << 6) #define SOR_STATE_ASY_CRC_MODE_COMPLETE (0x1 << 6) #define SOR_STATE_ASY_CRC_MODE_ACTIVE (0x0 << 6) +#define SOR_STATE_ASY_SUBOWNER_MASK (0x3 << 4) #define SOR_STATE_ASY_OWNER_MASK 0xf #define SOR_STATE_ASY_OWNER(x) (((x) & 0xf) << 0) @@ -283,10 +284,12 @@ #define SOR_DP_PADCTL_CM_TXD_2 (1 << 6) #define SOR_DP_PADCTL_CM_TXD_1 (1 << 5) #define SOR_DP_PADCTL_CM_TXD_0 (1 << 4) +#define SOR_DP_PADCTL_CM_TXD(x) (1 << (4 + (x))) #define SOR_DP_PADCTL_PD_TXD_3 (1 << 3) #define SOR_DP_PADCTL_PD_TXD_0 (1 << 2) #define SOR_DP_PADCTL_PD_TXD_1 (1 << 1) #define SOR_DP_PADCTL_PD_TXD_2 (1 << 0) +#define SOR_DP_PADCTL_PD_TXD(x) (1 << (0 + (x))) #define SOR_DP_PADCTL1 0x5d diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c index cd0399fd8c63..9444ba183990 100644 --- a/drivers/gpu/drm/tegra/vic.c +++ b/drivers/gpu/drm/tegra/vic.c @@ -34,7 +34,6 @@ struct vic { void __iomem *regs; struct tegra_drm_client client; struct host1x_channel *channel; - struct iommu_domain *domain; struct device *dev; struct clk *clk; struct reset_control *rst; @@ -97,6 +96,9 @@ static int vic_runtime_suspend(struct device *dev) static int vic_boot(struct vic *vic) { +#ifdef CONFIG_IOMMU_API + struct iommu_fwspec *spec = dev_iommu_fwspec_get(vic->dev); +#endif u32 fce_ucode_size, fce_bin_data_offset; void *hdr; int err = 0; @@ -105,15 +107,14 @@ static int vic_boot(struct vic *vic) return 0; #ifdef CONFIG_IOMMU_API - if (vic->config->supports_sid) { - struct iommu_fwspec *spec = dev_iommu_fwspec_get(vic->dev); + if (vic->config->supports_sid && spec) { u32 value; value = TRANSCFG_ATT(1, TRANSCFG_SID_FALCON) | TRANSCFG_ATT(0, TRANSCFG_SID_HW); vic_writel(vic, value, VIC_TFBIF_TRANSCFG); - if (spec && spec->num_ids > 0) { + if (spec->num_ids > 0) { value = spec->ids[0] & 0xffff; vic_writel(vic, value, VIC_THI_STREAMID0); @@ -132,9 +133,9 @@ static int vic_boot(struct vic *vic) if (err < 0) return err; - hdr = vic->falcon.firmware.vaddr; + hdr = vic->falcon.firmware.virt; fce_bin_data_offset = *(u32 *)(hdr + VIC_UCODE_FCE_DATA_OFFSET); - hdr = vic->falcon.firmware.vaddr + + hdr = vic->falcon.firmware.virt + *(u32 *)(hdr + VIC_UCODE_FCE_HEADER_OFFSET); fce_ucode_size = *(u32 *)(hdr + FCE_UCODE_SIZE_OFFSET); @@ -142,7 +143,7 @@ static int vic_boot(struct vic *vic) falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_SIZE, fce_ucode_size); falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_OFFSET, - (vic->falcon.firmware.paddr + fce_bin_data_offset) + (vic->falcon.firmware.iova + fce_bin_data_offset) >> 8); err = falcon_wait_idle(&vic->falcon); @@ -157,48 +158,21 @@ static int vic_boot(struct vic *vic) return 0; } -static void *vic_falcon_alloc(struct falcon *falcon, size_t size, - dma_addr_t *iova) -{ - struct tegra_drm *tegra = falcon->data; - - return tegra_drm_alloc(tegra, size, iova); -} - -static void vic_falcon_free(struct falcon *falcon, size_t size, - dma_addr_t iova, void *va) -{ - struct tegra_drm *tegra = falcon->data; - - return tegra_drm_free(tegra, size, va, iova); -} - -static const struct falcon_ops vic_falcon_ops = { - .alloc = vic_falcon_alloc, - .free = vic_falcon_free -}; - static int vic_init(struct host1x_client *client) { struct tegra_drm_client *drm = host1x_to_drm_client(client); - struct iommu_group *group = iommu_group_get(client->dev); struct drm_device *dev = dev_get_drvdata(client->parent); struct tegra_drm *tegra = dev->dev_private; struct vic *vic = to_vic(drm); int err; - if (group && tegra->domain) { - err = iommu_attach_group(tegra->domain, group); - if (err < 0) { - dev_err(vic->dev, "failed to attach to domain: %d\n", - err); - return err; - } - - vic->domain = tegra->domain; + err = host1x_client_iommu_attach(client); + if (err < 0) { + dev_err(vic->dev, "failed to attach to domain: %d\n", err); + return err; } - vic->channel = host1x_channel_request(client->dev); + vic->channel = host1x_channel_request(client); if (!vic->channel) { err = -ENOMEM; goto detach; @@ -214,6 +188,12 @@ static int vic_init(struct host1x_client *client) if (err < 0) goto free_syncpt; + /* + * Inherit the DMA parameters (such as maximum segment size) from the + * parent device. + */ + client->dev->dma_parms = client->parent->dma_parms; + return 0; free_syncpt: @@ -221,8 +201,7 @@ free_syncpt: free_channel: host1x_channel_put(vic->channel); detach: - if (group && tegra->domain) - iommu_detach_group(tegra->domain, group); + host1x_client_iommu_detach(client); return err; } @@ -230,22 +209,32 @@ detach: static int vic_exit(struct host1x_client *client) { struct tegra_drm_client *drm = host1x_to_drm_client(client); - struct iommu_group *group = iommu_group_get(client->dev); struct drm_device *dev = dev_get_drvdata(client->parent); struct tegra_drm *tegra = dev->dev_private; struct vic *vic = to_vic(drm); int err; + /* avoid a dangling pointer just in case this disappears */ + client->dev->dma_parms = NULL; + err = tegra_drm_unregister_client(tegra, drm); if (err < 0) return err; host1x_syncpt_free(client->syncpts[0]); host1x_channel_put(vic->channel); - - if (vic->domain) { - iommu_detach_group(vic->domain, group); - vic->domain = NULL; + host1x_client_iommu_detach(client); + + if (client->group) { + dma_unmap_single(vic->dev, vic->falcon.firmware.phys, + vic->falcon.firmware.size, DMA_TO_DEVICE); + tegra_drm_free(tegra, vic->falcon.firmware.size, + vic->falcon.firmware.virt, + vic->falcon.firmware.iova); + } else { + dma_free_coherent(vic->dev, vic->falcon.firmware.size, + vic->falcon.firmware.virt, + vic->falcon.firmware.iova); } return 0; @@ -258,25 +247,64 @@ static const struct host1x_client_ops vic_client_ops = { static int vic_load_firmware(struct vic *vic) { + struct host1x_client *client = &vic->client.base; + struct tegra_drm *tegra = vic->client.drm; + dma_addr_t iova; + size_t size; + void *virt; int err; - if (vic->falcon.data) + if (vic->falcon.firmware.virt) return 0; - vic->falcon.data = vic->client.drm; - err = falcon_read_firmware(&vic->falcon, vic->config->firmware); if (err < 0) - goto cleanup; + return err; + + size = vic->falcon.firmware.size; + + if (!client->group) { + virt = dma_alloc_coherent(vic->dev, size, &iova, GFP_KERNEL); + + err = dma_mapping_error(vic->dev, iova); + if (err < 0) + return err; + } else { + virt = tegra_drm_alloc(tegra, size, &iova); + } + + vic->falcon.firmware.virt = virt; + vic->falcon.firmware.iova = iova; err = falcon_load_firmware(&vic->falcon); if (err < 0) goto cleanup; + /* + * In this case we have received an IOVA from the shared domain, so we + * need to make sure to get the physical address so that the DMA API + * knows what memory pages to flush the cache for. + */ + if (client->group) { + dma_addr_t phys; + + phys = dma_map_single(vic->dev, virt, size, DMA_TO_DEVICE); + + err = dma_mapping_error(vic->dev, phys); + if (err < 0) + goto cleanup; + + vic->falcon.firmware.phys = phys; + } + return 0; cleanup: - vic->falcon.data = NULL; + if (!client->group) + dma_free_coherent(vic->dev, size, virt, iova); + else + tegra_drm_free(tegra, size, virt, iova); + return err; } @@ -374,6 +402,13 @@ static int vic_probe(struct platform_device *pdev) struct vic *vic; int err; + /* inherit DMA mask from host1x parent */ + err = dma_coerce_mask_and_coherent(dev, *dev->parent->dma_mask); + if (err < 0) { + dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err); + return err; + } + vic = devm_kzalloc(dev, sizeof(*vic), GFP_KERNEL); if (!vic) return -ENOMEM; @@ -410,7 +445,6 @@ static int vic_probe(struct platform_device *pdev) vic->falcon.dev = dev; vic->falcon.regs = vic->regs; - vic->falcon.ops = &vic_falcon_ops; err = falcon_init(&vic->falcon); if (err < 0) diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c index 03d0e2df6774..94fb1f593564 100644 --- a/drivers/gpu/drm/tiny/gm12u320.c +++ b/drivers/gpu/drm/tiny/gm12u320.c @@ -649,7 +649,7 @@ static void gm12u320_driver_release(struct drm_device *dev) kfree(gm12u320); } -DEFINE_DRM_GEM_SHMEM_FOPS(gm12u320_fops); +DEFINE_DRM_GEM_FOPS(gm12u320_fops); static struct drm_driver gm12u320_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile index 01fc670ce7a2..caea2a099496 100644 --- a/drivers/gpu/drm/ttm/Makefile +++ b/drivers/gpu/drm/ttm/Makefile @@ -4,8 +4,8 @@ ttm-y := ttm_memory.o ttm_tt.o ttm_bo.o \ ttm_bo_util.o ttm_bo_vm.o ttm_module.o \ - ttm_execbuf_util.o ttm_page_alloc.o ttm_bo_manager.o \ - ttm_page_alloc_dma.o + ttm_execbuf_util.o ttm_page_alloc.o ttm_bo_manager.o ttm-$(CONFIG_AGP) += ttm_agp_backend.o +ttm-$(CONFIG_DRM_TTM_DMA_PAGE_POOL) += ttm_page_alloc_dma.o obj-$(CONFIG_DRM_TTM) += ttm.o diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c index ea4d59eb8966..6050dc846894 100644 --- a/drivers/gpu/drm/ttm/ttm_agp_backend.c +++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c @@ -51,7 +51,7 @@ struct ttm_agp_backend { static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) { struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm); - struct page *dummy_read_page = ttm->bdev->glob->dummy_read_page; + struct page *dummy_read_page = ttm_bo_glob.dummy_read_page; struct drm_mm_node *node = bo_mem->mm_node; struct agp_memory *mem; int ret, cached = (bo_mem->placement & TTM_PL_FLAG_CACHED); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index c7f86499165f..8d91b0428af1 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -51,6 +51,7 @@ static void ttm_bo_global_kobj_release(struct kobject *kobj); DEFINE_MUTEX(ttm_global_mutex); unsigned ttm_bo_glob_use_count; struct ttm_bo_global ttm_bo_glob; +EXPORT_SYMBOL(ttm_bo_glob); static struct attribute ttm_bo_count = { .name = "bo_count", @@ -148,23 +149,21 @@ static void ttm_bo_release_list(struct kref *list_kref) { struct ttm_buffer_object *bo = container_of(list_kref, struct ttm_buffer_object, list_kref); - struct ttm_bo_device *bdev = bo->bdev; size_t acc_size = bo->acc_size; BUG_ON(kref_read(&bo->list_kref)); BUG_ON(kref_read(&bo->kref)); - BUG_ON(atomic_read(&bo->cpu_writers)); BUG_ON(bo->mem.mm_node != NULL); BUG_ON(!list_empty(&bo->lru)); BUG_ON(!list_empty(&bo->ddestroy)); ttm_tt_destroy(bo->ttm); - atomic_dec(&bo->bdev->glob->bo_count); + atomic_dec(&ttm_bo_glob.bo_count); dma_fence_put(bo->moving); if (!ttm_bo_uses_embedded_gem_object(bo)) dma_resv_fini(&bo->base._resv); mutex_destroy(&bo->wu_mutex); bo->destroy(bo); - ttm_mem_global_free(bdev->glob->mem_glob, acc_size); + ttm_mem_global_free(&ttm_mem_glob, acc_size); } static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo, @@ -185,25 +184,20 @@ static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo, list_add_tail(&bo->lru, &man->lru[bo->priority]); kref_get(&bo->list_kref); - if (bo->ttm && !(bo->ttm->page_flags & - (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) { - list_add_tail(&bo->swap, &bdev->glob->swap_lru[bo->priority]); + if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm && + !(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG | + TTM_PAGE_FLAG_SWAPPED))) { + list_add_tail(&bo->swap, &ttm_bo_glob.swap_lru[bo->priority]); kref_get(&bo->list_kref); } } -void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) -{ - ttm_bo_add_mem_to_lru(bo, &bo->mem); -} -EXPORT_SYMBOL(ttm_bo_add_to_lru); - static void ttm_bo_ref_bug(struct kref *list_kref) { BUG(); } -void ttm_bo_del_from_lru(struct ttm_buffer_object *bo) +static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; bool notify = false; @@ -223,16 +217,6 @@ void ttm_bo_del_from_lru(struct ttm_buffer_object *bo) bdev->driver->del_from_lru_notify(bo); } -void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo) -{ - struct ttm_bo_global *glob = bo->bdev->glob; - - spin_lock(&glob->lru_lock); - ttm_bo_del_from_lru(bo); - spin_unlock(&glob->lru_lock); -} -EXPORT_SYMBOL(ttm_bo_del_sub_from_lru); - static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos, struct ttm_buffer_object *bo) { @@ -247,7 +231,7 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo, dma_resv_assert_held(bo->base.resv); ttm_bo_del_from_lru(bo); - ttm_bo_add_to_lru(bo); + ttm_bo_add_mem_to_lru(bo, &bo->mem); if (bulk && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { switch (bo->mem.mem_type) { @@ -310,7 +294,7 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk) dma_resv_assert_held(pos->first->base.resv); dma_resv_assert_held(pos->last->base.resv); - lru = &pos->first->bdev->glob->swap_lru[i]; + lru = &ttm_bo_glob.swap_lru[i]; list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap); } } @@ -474,7 +458,6 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_bo_global *glob = bdev->glob; int ret; ret = ttm_bo_individualize_resv(bo); @@ -484,16 +467,16 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) */ dma_resv_wait_timeout_rcu(bo->base.resv, true, false, 30 * HZ); - spin_lock(&glob->lru_lock); + spin_lock(&ttm_bo_glob.lru_lock); goto error; } - spin_lock(&glob->lru_lock); + spin_lock(&ttm_bo_glob.lru_lock); ret = dma_resv_trylock(bo->base.resv) ? 0 : -EBUSY; if (!ret) { if (dma_resv_test_signaled_rcu(&bo->base._resv, true)) { ttm_bo_del_from_lru(bo); - spin_unlock(&glob->lru_lock); + spin_unlock(&ttm_bo_glob.lru_lock); if (bo->base.resv != &bo->base._resv) dma_resv_unlock(&bo->base._resv); @@ -511,7 +494,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) */ if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) { bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT; - ttm_bo_add_to_lru(bo); + ttm_bo_move_to_lru_tail(bo, NULL); } dma_resv_unlock(bo->base.resv); @@ -522,7 +505,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) error: kref_get(&bo->list_kref); list_add_tail(&bo->ddestroy, &bdev->ddestroy); - spin_unlock(&glob->lru_lock); + spin_unlock(&ttm_bo_glob.lru_lock); schedule_delayed_work(&bdev->wq, ((HZ / 100) < 1) ? 1 : HZ / 100); @@ -545,7 +528,6 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool interruptible, bool no_wait_gpu, bool unlock_resv) { - struct ttm_bo_global *glob = bo->bdev->glob; struct dma_resv *resv; int ret; @@ -564,7 +546,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, if (unlock_resv) dma_resv_unlock(bo->base.resv); - spin_unlock(&glob->lru_lock); + spin_unlock(&ttm_bo_glob.lru_lock); lret = dma_resv_wait_timeout_rcu(resv, true, interruptible, @@ -575,7 +557,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, else if (lret == 0) return -EBUSY; - spin_lock(&glob->lru_lock); + spin_lock(&ttm_bo_glob.lru_lock); if (unlock_resv && !dma_resv_trylock(bo->base.resv)) { /* * We raced, and lost, someone else holds the reservation now, @@ -585,7 +567,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, * delayed destruction would succeed, so just return success * here. */ - spin_unlock(&glob->lru_lock); + spin_unlock(&ttm_bo_glob.lru_lock); return 0; } ret = 0; @@ -594,7 +576,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, if (ret || unlikely(list_empty(&bo->ddestroy))) { if (unlock_resv) dma_resv_unlock(bo->base.resv); - spin_unlock(&glob->lru_lock); + spin_unlock(&ttm_bo_glob.lru_lock); return ret; } @@ -602,7 +584,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, list_del_init(&bo->ddestroy); kref_put(&bo->list_kref, ttm_bo_ref_bug); - spin_unlock(&glob->lru_lock); + spin_unlock(&ttm_bo_glob.lru_lock); ttm_bo_cleanup_memtype_use(bo); if (unlock_resv) @@ -617,7 +599,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, */ static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) { - struct ttm_bo_global *glob = bdev->glob; + struct ttm_bo_global *glob = &ttm_bo_glob; struct list_head removed; bool empty; @@ -841,13 +823,12 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev, struct ww_acquire_ctx *ticket) { struct ttm_buffer_object *bo = NULL, *busy_bo = NULL; - struct ttm_bo_global *glob = bdev->glob; struct ttm_mem_type_manager *man = &bdev->man[mem_type]; bool locked = false; unsigned i; int ret; - spin_lock(&glob->lru_lock); + spin_lock(&ttm_bo_glob.lru_lock); for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { list_for_each_entry(bo, &man->lru[i], lru) { bool busy; @@ -878,11 +859,11 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev, if (!bo) { if (busy_bo) - ttm_bo_get(busy_bo); - spin_unlock(&glob->lru_lock); + kref_get(&busy_bo->list_kref); + spin_unlock(&ttm_bo_glob.lru_lock); ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket); if (busy_bo) - ttm_bo_put(busy_bo); + kref_put(&busy_bo->list_kref, ttm_bo_release_list); return ret; } @@ -895,17 +876,11 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev, return ret; } - ttm_bo_del_from_lru(bo); - spin_unlock(&glob->lru_lock); + spin_unlock(&ttm_bo_glob.lru_lock); ret = ttm_bo_evict(bo, ctx); - if (locked) { + if (locked) ttm_bo_unreserve(bo); - } else { - spin_lock(&glob->lru_lock); - ttm_bo_add_to_lru(bo); - spin_unlock(&glob->lru_lock); - } kref_put(&bo->list_kref, ttm_bo_release_list); return ret; @@ -925,7 +900,8 @@ EXPORT_SYMBOL(ttm_bo_mem_put); */ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, struct ttm_mem_type_manager *man, - struct ttm_mem_reg *mem) + struct ttm_mem_reg *mem, + bool no_wait_gpu) { struct dma_fence *fence; int ret; @@ -934,19 +910,22 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, fence = dma_fence_get(man->move); spin_unlock(&man->move_lock); - if (fence) { - dma_resv_add_shared_fence(bo->base.resv, fence); + if (!fence) + return 0; - ret = dma_resv_reserve_shared(bo->base.resv, 1); - if (unlikely(ret)) { - dma_fence_put(fence); - return ret; - } + if (no_wait_gpu) + return -EBUSY; + + dma_resv_add_shared_fence(bo->base.resv, fence); - dma_fence_put(bo->moving); - bo->moving = fence; + ret = dma_resv_reserve_shared(bo->base.resv, 1); + if (unlikely(ret)) { + dma_fence_put(fence); + return ret; } + dma_fence_put(bo->moving); + bo->moving = fence; return 0; } @@ -977,7 +956,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, return ret; } while (1); - return ttm_bo_add_move_fence(bo, man, mem); + return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu); } static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, @@ -1067,12 +1046,10 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo, mem->mem_type = mem_type; mem->placement = cur_flags; - if (bo->mem.mem_type < mem_type && !list_empty(&bo->lru)) { - spin_lock(&bo->bdev->glob->lru_lock); - ttm_bo_del_from_lru(bo); - ttm_bo_add_mem_to_lru(bo, mem); - spin_unlock(&bo->bdev->glob->lru_lock); - } + spin_lock(&ttm_bo_glob.lru_lock); + ttm_bo_del_from_lru(bo); + ttm_bo_add_mem_to_lru(bo, mem); + spin_unlock(&ttm_bo_glob.lru_lock); return 0; } @@ -1119,14 +1096,18 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, if (unlikely(ret)) goto error; - if (mem->mm_node) { - ret = ttm_bo_add_move_fence(bo, man, mem); - if (unlikely(ret)) { - (*man->func->put_node)(man, mem); - goto error; - } - return 0; + if (!mem->mm_node) + continue; + + ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu); + if (unlikely(ret)) { + (*man->func->put_node)(man, mem); + if (ret == -EBUSY) + continue; + + goto error; } + return 0; } for (i = 0; i < placement->num_busy_placement; ++i) { @@ -1159,9 +1140,9 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, error: if (bo->mem.mem_type == TTM_PL_SYSTEM && !list_empty(&bo->lru)) { - spin_lock(&bo->bdev->glob->lru_lock); + spin_lock(&ttm_bo_glob.lru_lock); ttm_bo_move_to_lru_tail(bo, NULL); - spin_unlock(&bo->bdev->glob->lru_lock); + spin_unlock(&ttm_bo_glob.lru_lock); } return ret; @@ -1285,9 +1266,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, struct dma_resv *resv, void (*destroy) (struct ttm_buffer_object *)) { + struct ttm_mem_global *mem_glob = &ttm_mem_glob; int ret = 0; unsigned long num_pages; - struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; bool locked; ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx); @@ -1314,7 +1295,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, kref_init(&bo->kref); kref_init(&bo->list_kref); - atomic_set(&bo->cpu_writers, 0); INIT_LIST_HEAD(&bo->lru); INIT_LIST_HEAD(&bo->ddestroy); INIT_LIST_HEAD(&bo->swap); @@ -1348,7 +1328,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, dma_resv_init(&bo->base._resv); drm_vma_node_reset(&bo->base.vma_node); } - atomic_inc(&bo->bdev->glob->bo_count); + atomic_inc(&ttm_bo_glob.bo_count); /* * For ttm_bo_type_device buffers, allocate @@ -1378,11 +1358,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, return ret; } - if (resv && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { - spin_lock(&bdev->glob->lru_lock); - ttm_bo_add_to_lru(bo); - spin_unlock(&bdev->glob->lru_lock); - } + spin_lock(&ttm_bo_glob.lru_lock); + ttm_bo_move_to_lru_tail(bo, NULL); + spin_unlock(&ttm_bo_glob.lru_lock); return ret; } @@ -1480,7 +1458,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, .flags = TTM_OPT_FLAG_FORCE_ALLOC }; struct ttm_mem_type_manager *man = &bdev->man[mem_type]; - struct ttm_bo_global *glob = bdev->glob; + struct ttm_bo_global *glob = &ttm_bo_glob; struct dma_fence *fence; int ret; unsigned i; @@ -1649,8 +1627,6 @@ static int ttm_bo_global_init(void) goto out; spin_lock_init(&glob->lru_lock); - glob->mem_glob = &ttm_mem_glob; - glob->mem_glob->bo_glob = glob; glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32); if (unlikely(glob->dummy_read_page == NULL)) { @@ -1674,10 +1650,10 @@ out: int ttm_bo_device_release(struct ttm_bo_device *bdev) { + struct ttm_bo_global *glob = &ttm_bo_glob; int ret = 0; unsigned i = TTM_NUM_MEM_TYPES; struct ttm_mem_type_manager *man; - struct ttm_bo_global *glob = bdev->glob; while (i--) { man = &bdev->man[i]; @@ -1746,7 +1722,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); INIT_LIST_HEAD(&bdev->ddestroy); bdev->dev_mapping = mapping; - bdev->glob = glob; bdev->need_dma32 = need_dma32; mutex_lock(&ttm_global_mutex); list_add_tail(&bdev->device_list, &glob->device_list); @@ -1826,31 +1801,6 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, } EXPORT_SYMBOL(ttm_bo_wait); -int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) -{ - int ret = 0; - - /* - * Using ttm_bo_reserve makes sure the lru lists are updated. - */ - - ret = ttm_bo_reserve(bo, true, no_wait, NULL); - if (unlikely(ret != 0)) - return ret; - ret = ttm_bo_wait(bo, true, no_wait); - if (likely(ret == 0)) - atomic_inc(&bo->cpu_writers); - ttm_bo_unreserve(bo); - return ret; -} -EXPORT_SYMBOL(ttm_bo_synccpu_write_grab); - -void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo) -{ - atomic_dec(&bo->cpu_writers); -} -EXPORT_SYMBOL(ttm_bo_synccpu_write_release); - /** * A buffer object shrink method that tries to swap out the first * buffer object on the bo_global::swap_lru list. @@ -1950,8 +1900,7 @@ void ttm_bo_swapout_all(struct ttm_bo_device *bdev) .no_wait_gpu = false }; - while (ttm_bo_swapout(bdev->glob, &ctx) == 0) - ; + while (ttm_bo_swapout(&ttm_bo_glob, &ctx) == 0); } EXPORT_SYMBOL(ttm_bo_swapout_all); diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index fe81c565e7ef..6b0883a1776e 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -102,7 +102,6 @@ int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible) mutex_lock(&man->io_reserve_mutex); return 0; } -EXPORT_SYMBOL(ttm_mem_io_lock); void ttm_mem_io_unlock(struct ttm_mem_type_manager *man) { @@ -111,7 +110,6 @@ void ttm_mem_io_unlock(struct ttm_mem_type_manager *man) mutex_unlock(&man->io_reserve_mutex); } -EXPORT_SYMBOL(ttm_mem_io_unlock); static int ttm_mem_io_evict(struct ttm_mem_type_manager *man) { @@ -153,7 +151,6 @@ retry: } return ret; } -EXPORT_SYMBOL(ttm_mem_io_reserve); void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) @@ -169,7 +166,6 @@ void ttm_mem_io_free(struct ttm_bo_device *bdev, bdev->driver->io_mem_free(bdev, mem); } -EXPORT_SYMBOL(ttm_mem_io_free); int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo) { @@ -503,7 +499,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, * TODO: Explicit member copy would probably be better here. */ - atomic_inc(&bo->bdev->glob->bo_count); + atomic_inc(&ttm_bo_glob.bo_count); INIT_LIST_HEAD(&fbo->base.ddestroy); INIT_LIST_HEAD(&fbo->base.lru); INIT_LIST_HEAD(&fbo->base.swap); @@ -511,15 +507,16 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, mutex_init(&fbo->base.wu_mutex); fbo->base.moving = NULL; drm_vma_node_reset(&fbo->base.base.vma_node); - atomic_set(&fbo->base.cpu_writers, 0); kref_init(&fbo->base.list_kref); kref_init(&fbo->base.kref); fbo->base.destroy = &ttm_transfered_destroy; fbo->base.acc_size = 0; - fbo->base.base.resv = &fbo->base.base._resv; - dma_resv_init(fbo->base.base.resv); - ret = dma_resv_trylock(fbo->base.base.resv); + if (bo->base.resv == &bo->base._resv) + fbo->base.base.resv = &fbo->base.base._resv; + + dma_resv_init(&fbo->base.base._resv); + ret = dma_resv_trylock(&fbo->base.base._resv); WARN_ON(!ret); *new_obj = &fbo->base; @@ -716,7 +713,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, if (ret) return ret; - dma_resv_add_excl_fence(ghost_obj->base.resv, fence); + dma_resv_add_excl_fence(&ghost_obj->base._resv, fence); /** * If we're not moving to fixed memory, the TTM object @@ -729,7 +726,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, else bo->ttm = NULL; - ttm_bo_unreserve(ghost_obj); + dma_resv_unlock(&ghost_obj->base._resv); ttm_bo_put(ghost_obj); } @@ -772,7 +769,7 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, if (ret) return ret; - dma_resv_add_excl_fence(ghost_obj->base.resv, fence); + dma_resv_add_excl_fence(&ghost_obj->base._resv, fence); /** * If we're not moving to fixed memory, the TTM object @@ -785,7 +782,7 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, else bo->ttm = NULL; - ttm_bo_unreserve(ghost_obj); + dma_resv_unlock(&ghost_obj->base._resv); ttm_bo_put(ghost_obj); } else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) { @@ -841,7 +838,7 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo) if (ret) return ret; - ret = dma_resv_copy_fences(ghost->base.resv, bo->base.resv); + ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv); /* Last resort, wait for the BO to be idle when we are OOM */ if (ret) ttm_bo_wait(bo, false, false); @@ -850,7 +847,7 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo) bo->mem.mem_type = TTM_PL_SYSTEM; bo->ttm = NULL; - ttm_bo_unreserve(ghost); + dma_resv_unlock(&ghost->base._resv); ttm_bo_put(ghost); return 0; diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 4aa007edffb0..4b34a278d65b 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -177,9 +177,9 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf) } if (bo->moving != moving) { - spin_lock(&bdev->glob->lru_lock); + spin_lock(&ttm_bo_glob.lru_lock); ttm_bo_move_to_lru_tail(bo, NULL); - spin_unlock(&bdev->glob->lru_lock); + spin_unlock(&ttm_bo_glob.lru_lock); } dma_fence_put(moving); } @@ -278,15 +278,13 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf) else ret = vmf_insert_pfn(&cvma, address, pfn); - /* - * Somebody beat us to this PTE or prefaulting to - * an already populated PTE, or prefaulting error. - */ - - if (unlikely((ret == VM_FAULT_NOPAGE && i > 0))) - break; - else if (unlikely(ret & VM_FAULT_ERROR)) - goto out_io_unlock; + /* Never error on prefaulted PTEs */ + if (unlikely((ret & VM_FAULT_ERROR))) { + if (i == 0) + goto out_io_unlock; + else + break; + } address += PAGE_SIZE; if (unlikely(++page_offset >= page_last)) @@ -426,6 +424,28 @@ static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev, return bo; } +static void ttm_bo_mmap_vma_setup(struct ttm_buffer_object *bo, struct vm_area_struct *vma) +{ + vma->vm_ops = &ttm_bo_vm_ops; + + /* + * Note: We're transferring the bo reference to + * vma->vm_private_data here. + */ + + vma->vm_private_data = bo; + + /* + * We'd like to use VM_PFNMAP on shared mappings, where + * (vma->vm_flags & VM_SHARED) != 0, for performance reasons, + * but for some reason VM_PFNMAP + x86 PAT + write-combine is very + * bad for performance. Until that has been sorted out, use + * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719 + */ + vma->vm_flags |= VM_MIXEDMAP; + vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; +} + int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, struct ttm_bo_device *bdev) { @@ -449,24 +469,7 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, if (unlikely(ret != 0)) goto out_unref; - vma->vm_ops = &ttm_bo_vm_ops; - - /* - * Note: We're transferring the bo reference to - * vma->vm_private_data here. - */ - - vma->vm_private_data = bo; - - /* - * We'd like to use VM_PFNMAP on shared mappings, where - * (vma->vm_flags & VM_SHARED) != 0, for performance reasons, - * but for some reason VM_PFNMAP + x86 PAT + write-combine is very - * bad for performance. Until that has been sorted out, use - * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719 - */ - vma->vm_flags |= VM_MIXEDMAP; - vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; + ttm_bo_mmap_vma_setup(bo, vma); return 0; out_unref: ttm_bo_put(bo); @@ -474,17 +477,17 @@ out_unref: } EXPORT_SYMBOL(ttm_bo_mmap); -int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo) +int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo) { - if (vma->vm_pgoff != 0) - return -EACCES; - ttm_bo_get(bo); - vma->vm_ops = &ttm_bo_vm_ops; - vma->vm_private_data = bo; - vma->vm_flags |= VM_MIXEDMAP; - vma->vm_flags |= VM_IO | VM_DONTEXPAND; + /* + * FIXME: &drm_gem_object_funcs.mmap is called with the fake offset + * removed. Add it back here until the rest of TTM works without it. + */ + vma->vm_pgoff += drm_vma_node_start(&bo->base.vma_node); + + ttm_bo_mmap_vma_setup(bo, vma); return 0; } -EXPORT_SYMBOL(ttm_fbdev_mmap); +EXPORT_SYMBOL(ttm_bo_mmap_obj); diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index 131dae8f4170..1797f04c0534 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c @@ -43,37 +43,22 @@ static void ttm_eu_backoff_reservation_reverse(struct list_head *list, } } -static void ttm_eu_del_from_lru_locked(struct list_head *list) -{ - struct ttm_validate_buffer *entry; - - list_for_each_entry(entry, list, head) { - struct ttm_buffer_object *bo = entry->bo; - ttm_bo_del_from_lru(bo); - } -} - void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, struct list_head *list) { struct ttm_validate_buffer *entry; - struct ttm_bo_global *glob; if (list_empty(list)) return; - entry = list_first_entry(list, struct ttm_validate_buffer, head); - glob = entry->bo->bdev->glob; - - spin_lock(&glob->lru_lock); + spin_lock(&ttm_bo_glob.lru_lock); list_for_each_entry(entry, list, head) { struct ttm_buffer_object *bo = entry->bo; - if (list_empty(&bo->lru)) - ttm_bo_add_to_lru(bo); + ttm_bo_move_to_lru_tail(bo, NULL); dma_resv_unlock(bo->base.resv); } - spin_unlock(&glob->lru_lock); + spin_unlock(&ttm_bo_glob.lru_lock); if (ticket) ww_acquire_fini(ticket); @@ -94,18 +79,14 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation); int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, struct list_head *list, bool intr, - struct list_head *dups, bool del_lru) + struct list_head *dups) { - struct ttm_bo_global *glob; struct ttm_validate_buffer *entry; int ret; if (list_empty(list)) return 0; - entry = list_first_entry(list, struct ttm_validate_buffer, head); - glob = entry->bo->bdev->glob; - if (ticket) ww_acquire_init(ticket, &reservation_ww_class); @@ -113,12 +94,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, struct ttm_buffer_object *bo = entry->bo; ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket); - if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) { - dma_resv_unlock(bo->base.resv); - - ret = -EBUSY; - - } else if (ret == -EALREADY && dups) { + if (ret == -EALREADY && dups) { struct ttm_validate_buffer *safe = entry; entry = list_prev_entry(entry, head); list_del(&safe->head); @@ -173,11 +149,6 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, list_add(&entry->head, list); } - if (del_lru) { - spin_lock(&glob->lru_lock); - ttm_eu_del_from_lru_locked(list); - spin_unlock(&glob->lru_lock); - } return 0; } EXPORT_SYMBOL(ttm_eu_reserve_buffers); @@ -187,30 +158,22 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, struct dma_fence *fence) { struct ttm_validate_buffer *entry; - struct ttm_buffer_object *bo; - struct ttm_bo_global *glob; if (list_empty(list)) return; - bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo; - glob = bo->bdev->glob; - - spin_lock(&glob->lru_lock); - + spin_lock(&ttm_bo_glob.lru_lock); list_for_each_entry(entry, list, head) { - bo = entry->bo; + struct ttm_buffer_object *bo = entry->bo; + if (entry->num_shared) dma_resv_add_shared_fence(bo->base.resv, fence); else dma_resv_add_excl_fence(bo->base.resv, fence); - if (list_empty(&bo->lru)) - ttm_bo_add_to_lru(bo); - else - ttm_bo_move_to_lru_tail(bo, NULL); + ttm_bo_move_to_lru_tail(bo, NULL); dma_resv_unlock(bo->base.resv); } - spin_unlock(&glob->lru_lock); + spin_unlock(&ttm_bo_glob.lru_lock); if (ticket) ww_acquire_fini(ticket); } diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c index 8617958b7ae6..acd63b70d814 100644 --- a/drivers/gpu/drm/ttm/ttm_memory.c +++ b/drivers/gpu/drm/ttm/ttm_memory.c @@ -275,7 +275,7 @@ static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq, while (ttm_zones_above_swap_target(glob, from_wq, extra)) { spin_unlock(&glob->lock); - ret = ttm_bo_swapout(glob->bo_glob, ctx); + ret = ttm_bo_swapout(&ttm_bo_glob, ctx); spin_lock(&glob->lock); if (unlikely(ret != 0)) break; diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 627f8dc91d0e..b40a4678c296 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -1028,7 +1028,7 @@ void ttm_page_alloc_fini(void) static void ttm_pool_unpopulate_helper(struct ttm_tt *ttm, unsigned mem_count_update) { - struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob; + struct ttm_mem_global *mem_glob = &ttm_mem_glob; unsigned i; if (mem_count_update == 0) @@ -1049,7 +1049,7 @@ put_pages: int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) { - struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob; + struct ttm_mem_global *mem_glob = &ttm_mem_glob; unsigned i; int ret; diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c index 7d78e6deac89..bf876faea592 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c @@ -33,7 +33,6 @@ * when freed). */ -#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU) #define pr_fmt(fmt) "[TTM] " fmt #include <linux/dma-mapping.h> @@ -886,8 +885,8 @@ static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge) int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev, struct ttm_operation_ctx *ctx) { + struct ttm_mem_global *mem_glob = &ttm_mem_glob; struct ttm_tt *ttm = &ttm_dma->ttm; - struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob; unsigned long num_pages = ttm->num_pages; struct dma_pool *pool; struct dma_page *d_page; @@ -991,8 +990,8 @@ EXPORT_SYMBOL_GPL(ttm_dma_populate); /* Put all pages in pages list to correct pool to wait for reuse */ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev) { + struct ttm_mem_global *mem_glob = &ttm_mem_glob; struct ttm_tt *ttm = &ttm_dma->ttm; - struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob; struct dma_pool *pool; struct dma_page *d_page, *next; enum pool_type type; @@ -1238,5 +1237,3 @@ int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data) return 0; } EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs); - -#endif diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c index a22b75a3a533..edd299ab53d8 100644 --- a/drivers/gpu/drm/v3d/v3d_bo.c +++ b/drivers/gpu/drm/v3d/v3d_bo.c @@ -58,7 +58,7 @@ static const struct drm_gem_object_funcs v3d_gem_funcs = { .get_sg_table = drm_gem_shmem_get_sg_table, .vmap = drm_gem_shmem_vmap, .vunmap = drm_gem_shmem_vunmap, - .vm_ops = &drm_gem_shmem_vm_ops, + .mmap = drm_gem_shmem_mmap, }; /* gem_create_object function for allocating a BO struct and doing diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c index e94bf75368be..1a07462b4528 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.c +++ b/drivers/gpu/drm/v3d/v3d_drv.c @@ -172,7 +172,7 @@ v3d_postclose(struct drm_device *dev, struct drm_file *file) kfree(v3d_priv); } -DEFINE_DRM_GEM_SHMEM_FOPS(v3d_drm_fops); +DEFINE_DRM_GEM_FOPS(v3d_drm_fops); /* DRM_AUTH is required on SUBMIT_CL for now, while we don't have GMP * protection between clients. Note that render nodes would be be diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index 4c4b59ae2c81..549dde83408b 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -560,14 +560,17 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, if (args->bcl_start != args->bcl_end) { bin = kcalloc(1, sizeof(*bin), GFP_KERNEL); - if (!bin) + if (!bin) { + v3d_job_put(&render->base); return -ENOMEM; + } ret = v3d_job_init(v3d, file_priv, &bin->base, v3d_job_free, args->in_sync_bcl); if (ret) { kfree(bin); v3d_job_put(&render->base); + kfree(bin); return ret; } diff --git a/drivers/gpu/drm/vboxvideo/Makefile b/drivers/gpu/drm/vboxvideo/Makefile index 55d798c76b21..f2e968b5ffa6 100644 --- a/drivers/gpu/drm/vboxvideo/Makefile +++ b/drivers/gpu/drm/vboxvideo/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 vboxvideo-y := hgsmi_base.o modesetting.o vbva_base.o \ - vbox_drv.o vbox_fb.o vbox_hgsmi.o vbox_irq.o vbox_main.o \ + vbox_drv.o vbox_hgsmi.o vbox_irq.o vbox_main.o \ vbox_mode.o vbox_ttm.o obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo.o diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c index 862db495d111..8512d970a09f 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_drv.c +++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c @@ -14,6 +14,7 @@ #include <drm/drm_crtc_helper.h> #include <drm/drm_drv.h> +#include <drm/drm_fb_helper.h> #include <drm/drm_file.h> #include <drm/drm_ioctl.h> @@ -32,10 +33,6 @@ static const struct pci_device_id pciidlist[] = { }; MODULE_DEVICE_TABLE(pci, pciidlist); -static const struct drm_fb_helper_funcs vbox_fb_helper_funcs = { - .fb_probe = vboxfb_create, -}; - static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct vbox_private *vbox; @@ -79,20 +76,16 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto err_mode_fini; - ret = drm_fb_helper_fbdev_setup(&vbox->ddev, &vbox->fb_helper, - &vbox_fb_helper_funcs, 32, - vbox->num_crtcs); + ret = drm_fbdev_generic_setup(&vbox->ddev, 32); if (ret) goto err_irq_fini; ret = drm_dev_register(&vbox->ddev, 0); if (ret) - goto err_fbdev_fini; + goto err_irq_fini; return 0; -err_fbdev_fini: - vbox_fbdev_fini(vbox); err_irq_fini: vbox_irq_fini(vbox); err_mode_fini: @@ -113,7 +106,6 @@ static void vbox_pci_remove(struct pci_dev *pdev) struct vbox_private *vbox = pci_get_drvdata(pdev); drm_dev_unregister(&vbox->ddev); - vbox_fbdev_fini(vbox); vbox_irq_fini(vbox); vbox_mode_fini(vbox); vbox_mm_fini(vbox); @@ -189,10 +181,7 @@ static struct pci_driver vbox_pci_driver = { #endif }; -static const struct file_operations vbox_fops = { - .owner = THIS_MODULE, - DRM_VRAM_MM_FILE_OPERATIONS -}; +DEFINE_DRM_GEM_FOPS(vbox_fops); static struct drm_driver driver = { .driver_features = diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.h b/drivers/gpu/drm/vboxvideo/vbox_drv.h index fb436ec760ea..87421903816c 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_drv.h +++ b/drivers/gpu/drm/vboxvideo/vbox_drv.h @@ -16,7 +16,6 @@ #include <linux/string.h> #include <drm/drm_encoder.h> -#include <drm/drm_fb_helper.h> #include <drm/drm_gem.h> #include <drm/drm_gem_vram_helper.h> @@ -46,16 +45,9 @@ sizeof(struct hgsmi_host_flags)) #define HOST_FLAGS_OFFSET GUEST_HEAP_USABLE_SIZE -struct vbox_framebuffer { - struct drm_framebuffer base; - struct drm_gem_object *obj; -}; - struct vbox_private { /* Must be first; or we must define our own release callback */ struct drm_device ddev; - struct drm_fb_helper fb_helper; - struct vbox_framebuffer afb; u8 __iomem *guest_heap; u8 __iomem *vbva_buffers; @@ -135,7 +127,6 @@ struct vbox_encoder { #define to_vbox_crtc(x) container_of(x, struct vbox_crtc, base) #define to_vbox_connector(x) container_of(x, struct vbox_connector, base) #define to_vbox_encoder(x) container_of(x, struct vbox_encoder, base) -#define to_vbox_framebuffer(x) container_of(x, struct vbox_framebuffer, base) bool vbox_check_supported(u16 id); int vbox_hw_init(struct vbox_private *vbox); @@ -146,25 +137,9 @@ void vbox_mode_fini(struct vbox_private *vbox); void vbox_report_caps(struct vbox_private *vbox); -void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb, - struct drm_clip_rect *rects, - unsigned int num_rects); - -int vbox_framebuffer_init(struct vbox_private *vbox, - struct vbox_framebuffer *vbox_fb, - const struct drm_mode_fb_cmd2 *mode_cmd, - struct drm_gem_object *obj); - -int vboxfb_create(struct drm_fb_helper *helper, - struct drm_fb_helper_surface_size *sizes); -void vbox_fbdev_fini(struct vbox_private *vbox); - int vbox_mm_init(struct vbox_private *vbox); void vbox_mm_fini(struct vbox_private *vbox); -int vbox_gem_create(struct vbox_private *vbox, - u32 size, bool iskernel, struct drm_gem_object **obj); - /* vbox_irq.c */ int vbox_irq_init(struct vbox_private *vbox); void vbox_irq_fini(struct vbox_private *vbox); diff --git a/drivers/gpu/drm/vboxvideo/vbox_fb.c b/drivers/gpu/drm/vboxvideo/vbox_fb.c deleted file mode 100644 index 8f74bcffc034..000000000000 --- a/drivers/gpu/drm/vboxvideo/vbox_fb.c +++ /dev/null @@ -1,149 +0,0 @@ -// SPDX-License-Identifier: MIT -/* - * Copyright (C) 2013-2017 Oracle Corporation - * This file is based on ast_fb.c - * Copyright 2012 Red Hat Inc. - * Authors: Dave Airlie <airlied@redhat.com> - * Michael Thayer <michael.thayer@oracle.com, - */ -#include <linux/delay.h> -#include <linux/errno.h> -#include <linux/fb.h> -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/mm.h> -#include <linux/module.h> -#include <linux/pci.h> -#include <linux/string.h> -#include <linux/sysrq.h> -#include <linux/tty.h> - -#include <drm/drm_crtc.h> -#include <drm/drm_crtc_helper.h> -#include <drm/drm_fb_helper.h> -#include <drm/drm_fourcc.h> - -#include "vbox_drv.h" -#include "vboxvideo.h" - -#ifdef CONFIG_DRM_KMS_FB_HELPER -static struct fb_deferred_io vbox_defio = { - .delay = HZ / 30, - .deferred_io = drm_fb_helper_deferred_io, -}; -#endif - -static struct fb_ops vboxfb_ops = { - .owner = THIS_MODULE, - DRM_FB_HELPER_DEFAULT_OPS, - .fb_fillrect = drm_fb_helper_sys_fillrect, - .fb_copyarea = drm_fb_helper_sys_copyarea, - .fb_imageblit = drm_fb_helper_sys_imageblit, -}; - -int vboxfb_create(struct drm_fb_helper *helper, - struct drm_fb_helper_surface_size *sizes) -{ - struct vbox_private *vbox = - container_of(helper, struct vbox_private, fb_helper); - struct pci_dev *pdev = vbox->ddev.pdev; - struct drm_mode_fb_cmd2 mode_cmd; - struct drm_framebuffer *fb; - struct fb_info *info; - struct drm_gem_object *gobj; - struct drm_gem_vram_object *gbo; - int size, ret; - s64 gpu_addr; - u32 pitch; - - mode_cmd.width = sizes->surface_width; - mode_cmd.height = sizes->surface_height; - pitch = mode_cmd.width * ((sizes->surface_bpp + 7) / 8); - mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, - sizes->surface_depth); - mode_cmd.pitches[0] = pitch; - - size = pitch * mode_cmd.height; - - ret = vbox_gem_create(vbox, size, true, &gobj); - if (ret) { - DRM_ERROR("failed to create fbcon backing object %d\n", ret); - return ret; - } - - ret = vbox_framebuffer_init(vbox, &vbox->afb, &mode_cmd, gobj); - if (ret) - return ret; - - gbo = drm_gem_vram_of_gem(gobj); - - ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM); - if (ret) - return ret; - - info = drm_fb_helper_alloc_fbi(helper); - if (IS_ERR(info)) - return PTR_ERR(info); - - info->screen_size = size; - info->screen_base = (char __iomem *)drm_gem_vram_kmap(gbo, true, NULL); - if (IS_ERR(info->screen_base)) - return PTR_ERR(info->screen_base); - - fb = &vbox->afb.base; - helper->fb = fb; - - info->fbops = &vboxfb_ops; - - /* - * This seems to be done for safety checking that the framebuffer - * is not registered twice by different drivers. - */ - info->apertures->ranges[0].base = pci_resource_start(pdev, 0); - info->apertures->ranges[0].size = pci_resource_len(pdev, 0); - - drm_fb_helper_fill_info(info, helper, sizes); - - gpu_addr = drm_gem_vram_offset(gbo); - if (gpu_addr < 0) - return (int)gpu_addr; - info->fix.smem_start = info->apertures->ranges[0].base + gpu_addr; - info->fix.smem_len = vbox->available_vram_size - gpu_addr; - -#ifdef CONFIG_DRM_KMS_FB_HELPER - info->fbdefio = &vbox_defio; - fb_deferred_io_init(info); -#endif - - info->pixmap.flags = FB_PIXMAP_SYSTEM; - - DRM_DEBUG_KMS("allocated %dx%d\n", fb->width, fb->height); - - return 0; -} - -void vbox_fbdev_fini(struct vbox_private *vbox) -{ - struct vbox_framebuffer *afb = &vbox->afb; - -#ifdef CONFIG_DRM_KMS_FB_HELPER - if (vbox->fb_helper.fbdev && vbox->fb_helper.fbdev->fbdefio) - fb_deferred_io_cleanup(vbox->fb_helper.fbdev); -#endif - - drm_fb_helper_unregister_fbi(&vbox->fb_helper); - - if (afb->obj) { - struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(afb->obj); - - drm_gem_vram_kunmap(gbo); - drm_gem_vram_unpin(gbo); - - drm_gem_object_put_unlocked(afb->obj); - afb->obj = NULL; - } - drm_fb_helper_fini(&vbox->fb_helper); - - drm_framebuffer_unregister_private(&afb->base); - drm_framebuffer_cleanup(&afb->base); -} diff --git a/drivers/gpu/drm/vboxvideo/vbox_main.c b/drivers/gpu/drm/vboxvideo/vbox_main.c index 02fa8277ff1e..9dcab115a261 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_main.c +++ b/drivers/gpu/drm/vboxvideo/vbox_main.c @@ -11,22 +11,12 @@ #include <linux/vbox_err.h> #include <drm/drm_fb_helper.h> #include <drm/drm_crtc_helper.h> +#include <drm/drm_damage_helper.h> #include "vbox_drv.h" #include "vboxvideo_guest.h" #include "vboxvideo_vbe.h" -static void vbox_user_framebuffer_destroy(struct drm_framebuffer *fb) -{ - struct vbox_framebuffer *vbox_fb = to_vbox_framebuffer(fb); - - if (vbox_fb->obj) - drm_gem_object_put_unlocked(vbox_fb->obj); - - drm_framebuffer_cleanup(fb); - kfree(fb); -} - void vbox_report_caps(struct vbox_private *vbox) { u32 caps = VBVACAPS_DISABLE_CURSOR_INTEGRATION | @@ -38,87 +28,6 @@ void vbox_report_caps(struct vbox_private *vbox) hgsmi_send_caps_info(vbox->guest_pool, caps); } -/* Send information about dirty rectangles to VBVA. */ -void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb, - struct drm_clip_rect *rects, - unsigned int num_rects) -{ - struct vbox_private *vbox = fb->dev->dev_private; - struct drm_display_mode *mode; - struct drm_crtc *crtc; - int crtc_x, crtc_y; - unsigned int i; - - mutex_lock(&vbox->hw_mutex); - list_for_each_entry(crtc, &fb->dev->mode_config.crtc_list, head) { - if (crtc->primary->state->fb != fb) - continue; - - mode = &crtc->state->mode; - crtc_x = crtc->primary->state->src_x >> 16; - crtc_y = crtc->primary->state->src_y >> 16; - - for (i = 0; i < num_rects; ++i) { - struct vbva_cmd_hdr cmd_hdr; - unsigned int crtc_id = to_vbox_crtc(crtc)->crtc_id; - - if (rects[i].x1 > crtc_x + mode->hdisplay || - rects[i].y1 > crtc_y + mode->vdisplay || - rects[i].x2 < crtc_x || - rects[i].y2 < crtc_y) - continue; - - cmd_hdr.x = (s16)rects[i].x1; - cmd_hdr.y = (s16)rects[i].y1; - cmd_hdr.w = (u16)rects[i].x2 - rects[i].x1; - cmd_hdr.h = (u16)rects[i].y2 - rects[i].y1; - - if (!vbva_buffer_begin_update(&vbox->vbva_info[crtc_id], - vbox->guest_pool)) - continue; - - vbva_write(&vbox->vbva_info[crtc_id], vbox->guest_pool, - &cmd_hdr, sizeof(cmd_hdr)); - vbva_buffer_end_update(&vbox->vbva_info[crtc_id]); - } - } - mutex_unlock(&vbox->hw_mutex); -} - -static int vbox_user_framebuffer_dirty(struct drm_framebuffer *fb, - struct drm_file *file_priv, - unsigned int flags, unsigned int color, - struct drm_clip_rect *rects, - unsigned int num_rects) -{ - vbox_framebuffer_dirty_rectangles(fb, rects, num_rects); - - return 0; -} - -static const struct drm_framebuffer_funcs vbox_fb_funcs = { - .destroy = vbox_user_framebuffer_destroy, - .dirty = vbox_user_framebuffer_dirty, -}; - -int vbox_framebuffer_init(struct vbox_private *vbox, - struct vbox_framebuffer *vbox_fb, - const struct drm_mode_fb_cmd2 *mode_cmd, - struct drm_gem_object *obj) -{ - int ret; - - drm_helper_mode_fill_fb_struct(&vbox->ddev, &vbox_fb->base, mode_cmd); - vbox_fb->obj = obj; - ret = drm_framebuffer_init(&vbox->ddev, &vbox_fb->base, &vbox_fb_funcs); - if (ret) { - DRM_ERROR("framebuffer init failed %d\n", ret); - return ret; - } - - return 0; -} - static int vbox_accel_init(struct vbox_private *vbox) { struct vbva_buffer *vbva; @@ -270,29 +179,3 @@ void vbox_hw_fini(struct vbox_private *vbox) gen_pool_destroy(vbox->guest_pool); pci_iounmap(vbox->ddev.pdev, vbox->guest_heap); } - -int vbox_gem_create(struct vbox_private *vbox, - u32 size, bool iskernel, struct drm_gem_object **obj) -{ - struct drm_gem_vram_object *gbo; - int ret; - - *obj = NULL; - - size = roundup(size, PAGE_SIZE); - if (size == 0) - return -EINVAL; - - gbo = drm_gem_vram_create(&vbox->ddev, &vbox->ddev.vram_mm->bdev, - size, 0, false); - if (IS_ERR(gbo)) { - ret = PTR_ERR(gbo); - if (ret != -ERESTARTSYS) - DRM_ERROR("failed to allocate GEM object\n"); - return ret; - } - - *obj = &gbo->bo.base; - - return 0; -} diff --git a/drivers/gpu/drm/vboxvideo/vbox_mode.c b/drivers/gpu/drm/vboxvideo/vbox_mode.c index e1e48ba919eb..19612132c8a3 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_mode.c +++ b/drivers/gpu/drm/vboxvideo/vbox_mode.c @@ -13,7 +13,9 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_fb_helper.h> #include <drm/drm_fourcc.h> +#include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_plane_helper.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> @@ -133,7 +135,7 @@ static bool vbox_set_up_input_mapping(struct vbox_private *vbox) if (!fb1) { fb1 = fb; - if (to_vbox_framebuffer(fb1) == &vbox->afb) + if (fb1 == vbox->ddev.fb_helper->fb) break; } else if (fb != fb1) { single_framebuffer = false; @@ -172,8 +174,7 @@ static void vbox_crtc_set_base_and_mode(struct drm_crtc *crtc, struct drm_framebuffer *fb, int x, int y) { - struct drm_gem_vram_object *gbo = - drm_gem_vram_of_gem(to_vbox_framebuffer(fb)->obj); + struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(fb->obj[0]); struct vbox_private *vbox = crtc->dev->dev_private; struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc); bool needs_modeset = drm_atomic_crtc_needs_modeset(crtc->state); @@ -283,10 +284,43 @@ static void vbox_primary_atomic_update(struct drm_plane *plane, { struct drm_crtc *crtc = plane->state->crtc; struct drm_framebuffer *fb = plane->state->fb; + struct vbox_private *vbox = fb->dev->dev_private; + struct drm_mode_rect *clips; + uint32_t num_clips, i; vbox_crtc_set_base_and_mode(crtc, fb, plane->state->src_x >> 16, plane->state->src_y >> 16); + + /* Send information about dirty rectangles to VBVA. */ + + clips = drm_plane_get_damage_clips(plane->state); + num_clips = drm_plane_get_damage_clips_count(plane->state); + + if (!num_clips) + return; + + mutex_lock(&vbox->hw_mutex); + + for (i = 0; i < num_clips; ++i, ++clips) { + struct vbva_cmd_hdr cmd_hdr; + unsigned int crtc_id = to_vbox_crtc(crtc)->crtc_id; + + cmd_hdr.x = (s16)clips->x1; + cmd_hdr.y = (s16)clips->y1; + cmd_hdr.w = (u16)clips->x2 - clips->x1; + cmd_hdr.h = (u16)clips->y2 - clips->y1; + + if (!vbva_buffer_begin_update(&vbox->vbva_info[crtc_id], + vbox->guest_pool)) + continue; + + vbva_write(&vbox->vbva_info[crtc_id], vbox->guest_pool, + &cmd_hdr, sizeof(cmd_hdr)); + vbva_buffer_end_update(&vbox->vbva_info[crtc_id]); + } + + mutex_unlock(&vbox->hw_mutex); } static void vbox_primary_atomic_disable(struct drm_plane *plane, @@ -300,35 +334,6 @@ static void vbox_primary_atomic_disable(struct drm_plane *plane, old_state->src_y >> 16); } -static int vbox_primary_prepare_fb(struct drm_plane *plane, - struct drm_plane_state *new_state) -{ - struct drm_gem_vram_object *gbo; - int ret; - - if (!new_state->fb) - return 0; - - gbo = drm_gem_vram_of_gem(to_vbox_framebuffer(new_state->fb)->obj); - ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM); - if (ret) - DRM_WARN("Error %d pinning new fb, out of video mem?\n", ret); - - return ret; -} - -static void vbox_primary_cleanup_fb(struct drm_plane *plane, - struct drm_plane_state *old_state) -{ - struct drm_gem_vram_object *gbo; - - if (!old_state->fb) - return; - - gbo = drm_gem_vram_of_gem(to_vbox_framebuffer(old_state->fb)->obj); - drm_gem_vram_unpin(gbo); -} - static int vbox_cursor_atomic_check(struct drm_plane *plane, struct drm_plane_state *new_state) { @@ -386,8 +391,7 @@ static void vbox_cursor_atomic_update(struct drm_plane *plane, container_of(plane->dev, struct vbox_private, ddev); struct vbox_crtc *vbox_crtc = to_vbox_crtc(plane->state->crtc); struct drm_framebuffer *fb = plane->state->fb; - struct drm_gem_vram_object *gbo = - drm_gem_vram_of_gem(to_vbox_framebuffer(fb)->obj); + struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(fb->obj[0]); u32 width = plane->state->crtc_w; u32 height = plane->state->crtc_h; size_t data_size, mask_size; @@ -459,30 +463,6 @@ static void vbox_cursor_atomic_disable(struct drm_plane *plane, mutex_unlock(&vbox->hw_mutex); } -static int vbox_cursor_prepare_fb(struct drm_plane *plane, - struct drm_plane_state *new_state) -{ - struct drm_gem_vram_object *gbo; - - if (!new_state->fb) - return 0; - - gbo = drm_gem_vram_of_gem(to_vbox_framebuffer(new_state->fb)->obj); - return drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_SYSTEM); -} - -static void vbox_cursor_cleanup_fb(struct drm_plane *plane, - struct drm_plane_state *old_state) -{ - struct drm_gem_vram_object *gbo; - - if (!plane->state->fb) - return; - - gbo = drm_gem_vram_of_gem(to_vbox_framebuffer(plane->state->fb)->obj); - drm_gem_vram_unpin(gbo); -} - static const u32 vbox_cursor_plane_formats[] = { DRM_FORMAT_ARGB8888, }; @@ -491,8 +471,8 @@ static const struct drm_plane_helper_funcs vbox_cursor_helper_funcs = { .atomic_check = vbox_cursor_atomic_check, .atomic_update = vbox_cursor_atomic_update, .atomic_disable = vbox_cursor_atomic_disable, - .prepare_fb = vbox_cursor_prepare_fb, - .cleanup_fb = vbox_cursor_cleanup_fb, + .prepare_fb = drm_gem_vram_plane_helper_prepare_fb, + .cleanup_fb = drm_gem_vram_plane_helper_cleanup_fb, }; static const struct drm_plane_funcs vbox_cursor_plane_funcs = { @@ -513,8 +493,8 @@ static const struct drm_plane_helper_funcs vbox_primary_helper_funcs = { .atomic_check = vbox_primary_atomic_check, .atomic_update = vbox_primary_atomic_update, .atomic_disable = vbox_primary_atomic_disable, - .prepare_fb = vbox_primary_prepare_fb, - .cleanup_fb = vbox_primary_cleanup_fb, + .prepare_fb = drm_gem_vram_plane_helper_prepare_fb, + .cleanup_fb = drm_gem_vram_plane_helper_cleanup_fb, }; static const struct drm_plane_funcs vbox_primary_plane_funcs = { @@ -856,40 +836,8 @@ static int vbox_connector_init(struct drm_device *dev, return 0; } -static struct drm_framebuffer *vbox_user_framebuffer_create( - struct drm_device *dev, - struct drm_file *filp, - const struct drm_mode_fb_cmd2 *mode_cmd) -{ - struct vbox_private *vbox = - container_of(dev, struct vbox_private, ddev); - struct drm_gem_object *obj; - struct vbox_framebuffer *vbox_fb; - int ret = -ENOMEM; - - obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]); - if (!obj) - return ERR_PTR(-ENOENT); - - vbox_fb = kzalloc(sizeof(*vbox_fb), GFP_KERNEL); - if (!vbox_fb) - goto err_unref_obj; - - ret = vbox_framebuffer_init(vbox, vbox_fb, mode_cmd, obj); - if (ret) - goto err_free_vbox_fb; - - return &vbox_fb->base; - -err_free_vbox_fb: - kfree(vbox_fb); -err_unref_obj: - drm_gem_object_put_unlocked(obj); - return ERR_PTR(ret); -} - static const struct drm_mode_config_funcs vbox_mode_funcs = { - .fb_create = vbox_user_framebuffer_create, + .fb_create = drm_gem_fb_create_with_dirty, .atomic_check = drm_atomic_helper_check, .atomic_commit = drm_atomic_helper_commit, }; diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index 0853b980bcb3..1c62c6c9244b 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -398,10 +398,7 @@ static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder) HDMI_QUANTIZATION_RANGE_LIMITED : HDMI_QUANTIZATION_RANGE_FULL); - frame.avi.right_bar = cstate->tv.margins.right; - frame.avi.left_bar = cstate->tv.margins.left; - frame.avi.top_bar = cstate->tv.margins.top; - frame.avi.bottom_bar = cstate->tv.margins.bottom; + drm_hdmi_avi_infoframe_bars(&frame.avi, cstate); vc4_hdmi_write_infoframe(encoder, &frame); } diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index a5cb58754f7d..8dee698c90ff 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -184,7 +184,7 @@ MODULE_AUTHOR("Dave Airlie <airlied@redhat.com>"); MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>"); MODULE_AUTHOR("Alon Levy"); -DEFINE_DRM_GEM_SHMEM_FOPS(virtio_gpu_driver_fops); +DEFINE_DRM_GEM_FOPS(virtio_gpu_driver_fops); static struct drm_driver driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC, diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index 314e02f94d9c..0b56ba005e25 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -267,8 +267,8 @@ void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev, uint32_t resource_id); void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, uint64_t offset, - __le32 width, __le32 height, - __le32 x, __le32 y, + uint32_t width, uint32_t height, + uint32_t x, uint32_t y, struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence); void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev, diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c index 0b3cdb0d83b0..2f5773e43557 100644 --- a/drivers/gpu/drm/virtio/virtgpu_kms.c +++ b/drivers/gpu/drm/virtio/virtgpu_kms.c @@ -155,16 +155,15 @@ int virtio_gpu_init(struct drm_device *dev) #ifdef __LITTLE_ENDIAN if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_VIRGL)) vgdev->has_virgl_3d = true; - DRM_INFO("virgl 3d acceleration %s\n", - vgdev->has_virgl_3d ? "enabled" : "not supported by host"); -#else - DRM_INFO("virgl 3d acceleration not supported by guest\n"); #endif if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID)) { vgdev->has_edid = true; - DRM_INFO("EDID support available.\n"); } + DRM_INFO("features: %cvirgl %cedid\n", + vgdev->has_virgl_3d ? '+' : '-', + vgdev->has_edid ? '+' : '-'); + ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL); if (ret) { DRM_ERROR("failed to find virt queues\n"); diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c index 69a3d310ff70..017a9e0fc3bb 100644 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -86,7 +86,7 @@ static const struct drm_gem_object_funcs virtio_gpu_gem_funcs = { .get_sg_table = drm_gem_shmem_get_sg_table, .vmap = drm_gem_shmem_vmap, .vunmap = drm_gem_shmem_vunmap, - .vm_ops = &drm_gem_shmem_vm_ops, + .mmap = &drm_gem_shmem_mmap, }; struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev, diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c index f4b7360282ce..390524143139 100644 --- a/drivers/gpu/drm/virtio/virtgpu_plane.c +++ b/drivers/gpu/drm/virtio/virtgpu_plane.c @@ -132,10 +132,10 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane, virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]); virtio_gpu_cmd_transfer_to_host_2d (vgdev, 0, - cpu_to_le32(plane->state->src_w >> 16), - cpu_to_le32(plane->state->src_h >> 16), - cpu_to_le32(plane->state->src_x >> 16), - cpu_to_le32(plane->state->src_y >> 16), + plane->state->src_w >> 16, + plane->state->src_h >> 16, + plane->state->src_x >> 16, + plane->state->src_y >> 16, objs, NULL); } } else { @@ -234,8 +234,8 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]); virtio_gpu_cmd_transfer_to_host_2d (vgdev, 0, - cpu_to_le32(plane->state->crtc_w), - cpu_to_le32(plane->state->crtc_h), + plane->state->crtc_w, + plane->state->crtc_h, 0, 0, objs, vgfb->fence); dma_fence_wait(&vgfb->fence->f, true); dma_fence_put(&vgfb->fence->f); diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index 80176f379ad5..74ad3bc3ebe8 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -549,8 +549,8 @@ void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev, void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, uint64_t offset, - __le32 width, __le32 height, - __le32 x, __le32 y, + uint32_t width, uint32_t height, + uint32_t x, uint32_t y, struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence) { @@ -571,10 +571,10 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D); cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); cmd_p->offset = cpu_to_le64(offset); - cmd_p->r.width = width; - cmd_p->r.height = height; - cmd_p->r.x = x; - cmd_p->r.y = y; + cmd_p->r.width = cpu_to_le32(width); + cmd_p->r.height = cpu_to_le32(height); + cmd_p->r.x = cpu_to_le32(x); + cmd_p->r.y = cpu_to_le32(y); virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); } diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c index 80524a22412a..d1fe144aa289 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.c +++ b/drivers/gpu/drm/vkms/vkms_drv.c @@ -11,13 +11,14 @@ #include <linux/module.h> #include <linux/platform_device.h> +#include <linux/dma-mapping.h> +#include <drm/drm_gem.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> #include <drm/drm_fb_helper.h> #include <drm/drm_file.h> -#include <drm/drm_gem.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_ioctl.h> #include <drm/drm_probe_helper.h> @@ -103,6 +104,8 @@ static struct drm_driver vkms_driver = { .gem_vm_ops = &vkms_gem_vm_ops, .gem_free_object_unlocked = vkms_gem_free_object, .get_vblank_timestamp = vkms_get_vblank_timestamp, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + .gem_prime_import_sg_table = vkms_prime_import_sg_table, .name = DRIVER_NAME, .desc = DRIVER_DESC, @@ -157,6 +160,14 @@ static int __init vkms_init(void) if (ret) goto out_unregister; + ret = dma_coerce_mask_and_coherent(vkms_device->drm.dev, + DMA_BIT_MASK(64)); + + if (ret) { + DRM_ERROR("Could not initialize DMA support\n"); + goto out_fini; + } + vkms_device->drm.irq_enabled = true; ret = drm_vblank_init(&vkms_device->drm, 1); diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h index 5a95100fa18b..7d52e24564db 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.h +++ b/drivers/gpu/drm/vkms/vkms_drv.h @@ -137,6 +137,12 @@ int vkms_gem_vmap(struct drm_gem_object *obj); void vkms_gem_vunmap(struct drm_gem_object *obj); +/* Prime */ +struct drm_gem_object * +vkms_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sg); + /* CRC Support */ const char *const *vkms_get_crc_sources(struct drm_crtc *crtc, size_t *count); diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c index 6489bfe0a149..2e01186fb943 100644 --- a/drivers/gpu/drm/vkms/vkms_gem.c +++ b/drivers/gpu/drm/vkms/vkms_gem.c @@ -1,7 +1,9 @@ // SPDX-License-Identifier: GPL-2.0+ +#include <linux/dma-buf.h> #include <linux/shmem_fs.h> #include <linux/vmalloc.h> +#include <drm/drm_prime.h> #include "vkms_drv.h" @@ -218,3 +220,28 @@ out: mutex_unlock(&vkms_obj->pages_lock); return ret; } + +struct drm_gem_object * +vkms_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sg) +{ + struct vkms_gem_object *obj; + int npages; + + obj = __vkms_gem_create(dev, attach->dmabuf->size); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE; + DRM_DEBUG_PRIME("Importing %d pages\n", npages); + + obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); + if (!obj->pages) { + vkms_gem_free_object(&obj->gem); + return ERR_PTR(-ENOMEM); + } + + drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages); + return &obj->gem; +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c index aad8d8140259..74016a08d118 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c @@ -566,7 +566,7 @@ static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base, switch (ref_type) { case TTM_REF_SYNCCPU_WRITE: - ttm_bo_synccpu_write_release(&user_bo->vbo.base); + atomic_dec(&user_bo->vbo.cpu_writers); break; default: WARN_ONCE(true, "Undefined buffer object reference release.\n"); @@ -682,12 +682,12 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo, struct ttm_object_file *tfile, uint32_t flags) { + bool nonblock = !!(flags & drm_vmw_synccpu_dontblock); struct ttm_buffer_object *bo = &user_bo->vbo.base; bool existed; int ret; if (flags & drm_vmw_synccpu_allow_cs) { - bool nonblock = !!(flags & drm_vmw_synccpu_dontblock); long lret; lret = dma_resv_wait_timeout_rcu @@ -700,15 +700,22 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo, return 0; } - ret = ttm_bo_synccpu_write_grab - (bo, !!(flags & drm_vmw_synccpu_dontblock)); + ret = ttm_bo_reserve(bo, true, nonblock, NULL); + if (unlikely(ret != 0)) + return ret; + + ret = ttm_bo_wait(bo, true, nonblock); + if (likely(ret == 0)) + atomic_inc(&user_bo->vbo.cpu_writers); + + ttm_bo_unreserve(bo); if (unlikely(ret != 0)) return ret; ret = ttm_ref_object_add(tfile, &user_bo->prime.base, TTM_REF_SYNCCPU_WRITE, &existed, false); if (ret != 0 || existed) - ttm_bo_synccpu_write_release(&user_bo->vbo.base); + atomic_dec(&user_bo->vbo.cpu_writers); return ret; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 81a95651643f..e962048f65d2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -576,8 +576,7 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv) else dev_priv->map_mode = vmw_dma_map_populate; - /* No TTM coherent page pool? FIXME: Ask TTM instead! */ - if (!(IS_ENABLED(CONFIG_SWIOTLB) || IS_ENABLED(CONFIG_INTEL_IOMMU)) && + if (!IS_ENABLED(CONFIG_DRM_TTM_DMA_PAGE_POOL) && (dev_priv->map_mode == vmw_dma_alloc_coherent)) return -EINVAL; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 29f60e027a38..b18842f73081 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -102,6 +102,8 @@ struct vmw_fpriv { * @base: The TTM buffer object * @res_list: List of resources using this buffer object as a backing MOB * @pin_count: pin depth + * @cpu_writers: Number of synccpu write grabs. Protected by reservation when + * increased. May be decreased without reservation. * @dx_query_ctx: DX context if this buffer object is used as a DX query MOB * @map: Kmap object for semi-persistent mappings * @res_prios: Eviction priority counts for attached resources @@ -110,6 +112,7 @@ struct vmw_buffer_object { struct ttm_buffer_object base; struct list_head res_list; s32 pin_count; + atomic_t cpu_writers; /* Not ref-counted. Protected by binding_mutex */ struct vmw_resource *dx_query_ctx; /* Protected by reservation */ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 5581a7826b4c..6dfe36fb817c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -492,8 +492,7 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket, val_buf->bo = &res->backup->base; val_buf->num_shared = 0; list_add_tail(&val_buf->head, &val_list); - ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL, - true); + ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL); if (unlikely(ret != 0)) goto out_no_reserve; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 29d8794f0421..de0530b4dc1b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -336,7 +336,6 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res) { struct vmw_private *dev_priv = res->dev_priv; - struct vmw_surface *srf; void *cmd; if (res->func->destroy == vmw_gb_surface_destroy) { @@ -360,7 +359,6 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res) */ mutex_lock(&dev_priv->cmdbuf_mutex); - srf = vmw_res_to_srf(res); dev_priv->used_memory_size -= res->backup_size; mutex_unlock(&dev_priv->cmdbuf_mutex); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c index f611b2290a1b..7bff3628fc54 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c @@ -521,6 +521,9 @@ int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo, }; int ret; + if (atomic_read(&vbo->cpu_writers)) + return -EBUSY; + if (vbo->pin_count > 0) return 0; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h index 0e063743dd86..71ce4b318850 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h @@ -170,7 +170,7 @@ vmw_validation_bo_reserve(struct vmw_validation_context *ctx, bool intr) { return ttm_eu_reserve_buffers(&ctx->ticket, &ctx->bo_list, intr, - NULL, true); + NULL); } /** diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c index ba1828acd8c9..4be49c1aef51 100644 --- a/drivers/gpu/drm/xen/xen_drm_front.c +++ b/drivers/gpu/drm/xen/xen_drm_front.c @@ -718,17 +718,9 @@ static int xen_drv_probe(struct xenbus_device *xb_dev, struct device *dev = &xb_dev->dev; int ret; - /* - * The device is not spawn from a device tree, so arch_setup_dma_ops - * is not called, thus leaving the device with dummy DMA ops. - * This makes the device return error on PRIME buffer import, which - * is not correct: to fix this call of_dma_configure() with a NULL - * node to set default DMA ops. - */ - dev->coherent_dma_mask = DMA_BIT_MASK(32); - ret = of_dma_configure(dev, NULL, true); + ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64)); if (ret < 0) { - DRM_ERROR("Cannot setup DMA ops, ret %d", ret); + DRM_ERROR("Cannot setup DMA mask, ret %d", ret); return ret; } diff --git a/drivers/gpu/drm/xen/xen_drm_front_kms.c b/drivers/gpu/drm/xen/xen_drm_front_kms.c index 21ad1c359b61..ff506bc99414 100644 --- a/drivers/gpu/drm/xen/xen_drm_front_kms.c +++ b/drivers/gpu/drm/xen/xen_drm_front_kms.c @@ -270,11 +270,12 @@ static void display_update(struct drm_simple_display_pipe *pipe, } static enum drm_mode_status -display_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode) +display_mode_valid(struct drm_simple_display_pipe *pipe, + const struct drm_display_mode *mode) { struct xen_drm_front_drm_pipeline *pipeline = - container_of(crtc, struct xen_drm_front_drm_pipeline, - pipe.crtc); + container_of(pipe, struct xen_drm_front_drm_pipeline, + pipe); if (mode->hdisplay != pipeline->width) return MODE_ERROR; diff --git a/drivers/gpu/host1x/Kconfig b/drivers/gpu/host1x/Kconfig index cf987a317a55..6dab94adf25e 100644 --- a/drivers/gpu/host1x/Kconfig +++ b/drivers/gpu/host1x/Kconfig @@ -2,7 +2,7 @@ config TEGRA_HOST1X tristate "NVIDIA Tegra host1x driver" depends on ARCH_TEGRA || (ARM && COMPILE_TEST) - select IOMMU_IOVA if IOMMU_SUPPORT + select IOMMU_IOVA help Driver for the NVIDIA Tegra host1x hardware. diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c index 742aa9ff21b8..2c8559ff3481 100644 --- a/drivers/gpu/host1x/bus.c +++ b/drivers/gpu/host1x/bus.c @@ -445,7 +445,7 @@ static int host1x_device_add(struct host1x *host1x, of_dma_configure(&device->dev, host1x->dev->of_node, true); device->dev.dma_parms = &device->dma_parms; - dma_set_max_seg_size(&device->dev, SZ_4M); + dma_set_max_seg_size(&device->dev, UINT_MAX); err = host1x_device_parse_dt(device, driver); if (err < 0) { diff --git a/drivers/gpu/host1x/cdma.c b/drivers/gpu/host1x/cdma.c index 48c84c48299c..e8d3fda91d8a 100644 --- a/drivers/gpu/host1x/cdma.c +++ b/drivers/gpu/host1x/cdma.c @@ -232,9 +232,9 @@ unsigned int host1x_cdma_wait_locked(struct host1x_cdma *cdma, * * Must be called with the cdma lock held. */ -int host1x_cdma_wait_pushbuffer_space(struct host1x *host1x, - struct host1x_cdma *cdma, - unsigned int needed) +static int host1x_cdma_wait_pushbuffer_space(struct host1x *host1x, + struct host1x_cdma *cdma, + unsigned int needed) { while (true) { struct push_buffer *pb = &cdma->push_buffer; diff --git a/drivers/gpu/host1x/channel.c b/drivers/gpu/host1x/channel.c index 1436295aa450..4cd212bb570d 100644 --- a/drivers/gpu/host1x/channel.c +++ b/drivers/gpu/host1x/channel.c @@ -115,14 +115,14 @@ static struct host1x_channel *acquire_unused_channel(struct host1x *host) /** * host1x_channel_request() - Allocate a channel - * @device: Host1x unit this channel will be used to send commands to + * @client: Host1x client this channel will be used to send commands to * - * Allocates a new host1x channel for @device. May return NULL if CDMA + * Allocates a new host1x channel for @client. May return NULL if CDMA * initialization fails. */ -struct host1x_channel *host1x_channel_request(struct device *dev) +struct host1x_channel *host1x_channel_request(struct host1x_client *client) { - struct host1x *host = dev_get_drvdata(dev->parent); + struct host1x *host = dev_get_drvdata(client->dev->parent); struct host1x_channel_list *chlist = &host->channel_list; struct host1x_channel *channel; int err; @@ -133,7 +133,8 @@ struct host1x_channel *host1x_channel_request(struct device *dev) kref_init(&channel->refcount); mutex_init(&channel->submitlock); - channel->dev = dev; + channel->client = client; + channel->dev = client->dev; err = host1x_hw_channel_init(host, channel, channel->id); if (err < 0) @@ -148,7 +149,7 @@ struct host1x_channel *host1x_channel_request(struct device *dev) fail: clear_bit(channel->id, chlist->allocated_channels); - dev_err(dev, "failed to initialize channel\n"); + dev_err(client->dev, "failed to initialize channel\n"); return NULL; } diff --git a/drivers/gpu/host1x/channel.h b/drivers/gpu/host1x/channel.h index 4fd694834f74..39044ff6c3aa 100644 --- a/drivers/gpu/host1x/channel.h +++ b/drivers/gpu/host1x/channel.h @@ -26,6 +26,7 @@ struct host1x_channel { unsigned int id; struct mutex submitlock; void __iomem *regs; + struct host1x_client *client; struct device *dev; struct host1x_cdma cdma; }; diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index 5a3f797240d4..a738ea55e407 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c @@ -18,10 +18,6 @@ #include <trace/events/host1x.h> #undef CREATE_TRACE_POINTS -#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) -#include <asm/dma-iommu.h> -#endif - #include "bus.h" #include "channel.h" #include "debug.h" @@ -77,6 +73,10 @@ static const struct host1x_info host1x01_info = { .init = host1x01_init, .sync_offset = 0x3000, .dma_mask = DMA_BIT_MASK(32), + .has_wide_gather = false, + .has_hypervisor = false, + .num_sid_entries = 0, + .sid_table = NULL, }; static const struct host1x_info host1x02_info = { @@ -87,6 +87,10 @@ static const struct host1x_info host1x02_info = { .init = host1x02_init, .sync_offset = 0x3000, .dma_mask = DMA_BIT_MASK(32), + .has_wide_gather = false, + .has_hypervisor = false, + .num_sid_entries = 0, + .sid_table = NULL, }; static const struct host1x_info host1x04_info = { @@ -97,6 +101,10 @@ static const struct host1x_info host1x04_info = { .init = host1x04_init, .sync_offset = 0x2100, .dma_mask = DMA_BIT_MASK(34), + .has_wide_gather = false, + .has_hypervisor = false, + .num_sid_entries = 0, + .sid_table = NULL, }; static const struct host1x_info host1x05_info = { @@ -107,6 +115,10 @@ static const struct host1x_info host1x05_info = { .init = host1x05_init, .sync_offset = 0x2100, .dma_mask = DMA_BIT_MASK(34), + .has_wide_gather = false, + .has_hypervisor = false, + .num_sid_entries = 0, + .sid_table = NULL, }; static const struct host1x_sid_entry tegra186_sid_table[] = { @@ -126,6 +138,7 @@ static const struct host1x_info host1x06_info = { .init = host1x06_init, .sync_offset = 0x0, .dma_mask = DMA_BIT_MASK(40), + .has_wide_gather = true, .has_hypervisor = true, .num_sid_entries = ARRAY_SIZE(tegra186_sid_table), .sid_table = tegra186_sid_table, @@ -148,6 +161,7 @@ static const struct host1x_info host1x07_info = { .init = host1x07_init, .sync_offset = 0x0, .dma_mask = DMA_BIT_MASK(40), + .has_wide_gather = true, .has_hypervisor = true, .num_sid_entries = ARRAY_SIZE(tegra194_sid_table), .sid_table = tegra194_sid_table, @@ -178,6 +192,117 @@ static void host1x_setup_sid_table(struct host1x *host) } } +static struct iommu_domain *host1x_iommu_attach(struct host1x *host) +{ + struct iommu_domain *domain = iommu_get_domain_for_dev(host->dev); + int err; + + /* + * If the host1x firewall is enabled, there's no need to enable IOMMU + * support. Similarly, if host1x is already attached to an IOMMU (via + * the DMA API), don't try to attach again. + */ + if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) || domain) + return domain; + + host->group = iommu_group_get(host->dev); + if (host->group) { + struct iommu_domain_geometry *geometry; + dma_addr_t start, end; + unsigned long order; + + err = iova_cache_get(); + if (err < 0) + goto put_group; + + host->domain = iommu_domain_alloc(&platform_bus_type); + if (!host->domain) { + err = -ENOMEM; + goto put_cache; + } + + err = iommu_attach_group(host->domain, host->group); + if (err) { + if (err == -ENODEV) + err = 0; + + goto free_domain; + } + + geometry = &host->domain->geometry; + start = geometry->aperture_start & host->info->dma_mask; + end = geometry->aperture_end & host->info->dma_mask; + + order = __ffs(host->domain->pgsize_bitmap); + init_iova_domain(&host->iova, 1UL << order, start >> order); + host->iova_end = end; + + domain = host->domain; + } + + return domain; + +free_domain: + iommu_domain_free(host->domain); + host->domain = NULL; +put_cache: + iova_cache_put(); +put_group: + iommu_group_put(host->group); + host->group = NULL; + + return ERR_PTR(err); +} + +static int host1x_iommu_init(struct host1x *host) +{ + u64 mask = host->info->dma_mask; + struct iommu_domain *domain; + int err; + + domain = host1x_iommu_attach(host); + if (IS_ERR(domain)) { + err = PTR_ERR(domain); + dev_err(host->dev, "failed to attach to IOMMU: %d\n", err); + return err; + } + + /* + * If we're not behind an IOMMU make sure we don't get push buffers + * that are allocated outside of the range addressable by the GATHER + * opcode. + * + * Newer generations of Tegra (Tegra186 and later) support a wide + * variant of the GATHER opcode that allows addressing more bits. + */ + if (!domain && !host->info->has_wide_gather) + mask = DMA_BIT_MASK(32); + + err = dma_coerce_mask_and_coherent(host->dev, mask); + if (err < 0) { + dev_err(host->dev, "failed to set DMA mask: %d\n", err); + return err; + } + + return 0; +} + +static void host1x_iommu_exit(struct host1x *host) +{ + if (host->domain) { + put_iova_domain(&host->iova); + iommu_detach_group(host->domain, host->group); + + iommu_domain_free(host->domain); + host->domain = NULL; + + iova_cache_put(); + + iommu_group_put(host->group); + host->group = NULL; + } +} + static int host1x_probe(struct platform_device *pdev) { struct host1x *host; @@ -237,7 +362,8 @@ static int host1x_probe(struct platform_device *pdev) return PTR_ERR(host->hv_regs); } - dma_set_mask_and_coherent(host->dev, host->info->dma_mask); + host->dev->dma_parms = &host->dma_parms; + dma_set_max_seg_size(host->dev, UINT_MAX); if (host->info->init) { err = host->info->init(host); @@ -261,87 +387,42 @@ static int host1x_probe(struct platform_device *pdev) dev_err(&pdev->dev, "failed to get reset: %d\n", err); return err; } -#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) - if (host->dev->archdata.mapping) { - struct dma_iommu_mapping *mapping = - to_dma_iommu_mapping(host->dev); - arm_iommu_detach_device(host->dev); - arm_iommu_release_mapping(mapping); - } -#endif - if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) - goto skip_iommu; - - host->group = iommu_group_get(&pdev->dev); - if (host->group) { - struct iommu_domain_geometry *geometry; - u64 mask = dma_get_mask(host->dev); - dma_addr_t start, end; - unsigned long order; - - err = iova_cache_get(); - if (err < 0) - goto put_group; - - host->domain = iommu_domain_alloc(&platform_bus_type); - if (!host->domain) { - err = -ENOMEM; - goto put_cache; - } - err = iommu_attach_group(host->domain, host->group); - if (err) { - if (err == -ENODEV) { - iommu_domain_free(host->domain); - host->domain = NULL; - iova_cache_put(); - iommu_group_put(host->group); - host->group = NULL; - goto skip_iommu; - } - - goto fail_free_domain; - } - - geometry = &host->domain->geometry; - start = geometry->aperture_start & mask; - end = geometry->aperture_end & mask; - - order = __ffs(host->domain->pgsize_bitmap); - init_iova_domain(&host->iova, 1UL << order, start >> order); - host->iova_end = end; + err = host1x_iommu_init(host); + if (err < 0) { + dev_err(&pdev->dev, "failed to setup IOMMU: %d\n", err); + return err; } -skip_iommu: err = host1x_channel_list_init(&host->channel_list, host->info->nb_channels); if (err) { dev_err(&pdev->dev, "failed to initialize channel list\n"); - goto fail_detach_device; + goto iommu_exit; } err = clk_prepare_enable(host->clk); if (err < 0) { dev_err(&pdev->dev, "failed to enable clock\n"); - goto fail_free_channels; + goto free_channels; } err = reset_control_deassert(host->rst); if (err < 0) { dev_err(&pdev->dev, "failed to deassert reset: %d\n", err); - goto fail_unprepare_disable; + goto unprepare_disable; } err = host1x_syncpt_init(host); if (err) { dev_err(&pdev->dev, "failed to initialize syncpts\n"); - goto fail_reset_assert; + goto reset_assert; } err = host1x_intr_init(host, syncpt_irq); if (err) { dev_err(&pdev->dev, "failed to initialize interrupts\n"); - goto fail_deinit_syncpt; + goto deinit_syncpt; } host1x_debug_init(host); @@ -351,33 +432,22 @@ skip_iommu: err = host1x_register(host); if (err < 0) - goto fail_deinit_intr; + goto deinit_intr; return 0; -fail_deinit_intr: +deinit_intr: host1x_intr_deinit(host); -fail_deinit_syncpt: +deinit_syncpt: host1x_syncpt_deinit(host); -fail_reset_assert: +reset_assert: reset_control_assert(host->rst); -fail_unprepare_disable: +unprepare_disable: clk_disable_unprepare(host->clk); -fail_free_channels: +free_channels: host1x_channel_list_free(&host->channel_list); -fail_detach_device: - if (host->group && host->domain) { - put_iova_domain(&host->iova); - iommu_detach_group(host->domain, host->group); - } -fail_free_domain: - if (host->domain) - iommu_domain_free(host->domain); -put_cache: - if (host->group) - iova_cache_put(); -put_group: - iommu_group_put(host->group); +iommu_exit: + host1x_iommu_exit(host); return err; } @@ -387,18 +457,12 @@ static int host1x_remove(struct platform_device *pdev) struct host1x *host = platform_get_drvdata(pdev); host1x_unregister(host); + host1x_debug_deinit(host); host1x_intr_deinit(host); host1x_syncpt_deinit(host); reset_control_assert(host->rst); clk_disable_unprepare(host->clk); - - if (host->domain) { - put_iova_domain(&host->iova); - iommu_detach_group(host->domain, host->group); - iommu_domain_free(host->domain); - iova_cache_put(); - iommu_group_put(host->group); - } + host1x_iommu_exit(host); return 0; } diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h index ff56f5e23a02..f781a9b0f39d 100644 --- a/drivers/gpu/host1x/dev.h +++ b/drivers/gpu/host1x/dev.h @@ -97,6 +97,7 @@ struct host1x_info { int (*init)(struct host1x *host1x); /* initialize per SoC ops */ unsigned int sync_offset; /* offset of syncpoint registers */ u64 dma_mask; /* mask of addressable memory */ + bool has_wide_gather; /* supports GATHER_W opcode */ bool has_hypervisor; /* has hypervisor registers */ unsigned int num_sid_entries; const struct host1x_sid_entry *sid_table; @@ -140,6 +141,8 @@ struct host1x { struct list_head devices; struct list_head list; + + struct device_dma_parameters dma_parms; }; void host1x_hypervisor_writel(struct host1x *host1x, u32 r, u32 v); diff --git a/drivers/gpu/host1x/intr.c b/drivers/gpu/host1x/intr.c index 26f3c741d085..9245add23b5d 100644 --- a/drivers/gpu/host1x/intr.c +++ b/drivers/gpu/host1x/intr.c @@ -105,7 +105,6 @@ static void action_submit_complete(struct host1x_waitlist *waiter) /* Add nr_completed to trace */ trace_host1x_channel_submit_complete(dev_name(channel->dev), waiter->count, waiter->thresh); - } static void action_wakeup(struct host1x_waitlist *waiter) diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c index eaa5c3352c13..25ca54de8fc5 100644 --- a/drivers/gpu/host1x/job.c +++ b/drivers/gpu/host1x/job.c @@ -99,6 +99,8 @@ EXPORT_SYMBOL(host1x_job_add_gather); static unsigned int pin_job(struct host1x *host, struct host1x_job *job) { + struct host1x_client *client = job->client; + struct device *dev = client->dev; unsigned int i; int err; @@ -106,8 +108,8 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job) for (i = 0; i < job->num_relocs; i++) { struct host1x_reloc *reloc = &job->relocs[i]; + dma_addr_t phys_addr, *phys; struct sg_table *sgt; - dma_addr_t phys_addr; reloc->target.bo = host1x_bo_get(reloc->target.bo); if (!reloc->target.bo) { @@ -115,7 +117,50 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job) goto unpin; } - phys_addr = host1x_bo_pin(reloc->target.bo, &sgt); + if (client->group) + phys = &phys_addr; + else + phys = NULL; + + sgt = host1x_bo_pin(dev, reloc->target.bo, phys); + if (IS_ERR(sgt)) { + err = PTR_ERR(sgt); + goto unpin; + } + + if (sgt) { + unsigned long mask = HOST1X_RELOC_READ | + HOST1X_RELOC_WRITE; + enum dma_data_direction dir; + + switch (reloc->flags & mask) { + case HOST1X_RELOC_READ: + dir = DMA_TO_DEVICE; + break; + + case HOST1X_RELOC_WRITE: + dir = DMA_FROM_DEVICE; + break; + + case HOST1X_RELOC_READ | HOST1X_RELOC_WRITE: + dir = DMA_BIDIRECTIONAL; + break; + + default: + err = -EINVAL; + goto unpin; + } + + err = dma_map_sg(dev, sgt->sgl, sgt->nents, dir); + if (!err) { + err = -ENOMEM; + goto unpin; + } + + job->unpins[job->num_unpins].dev = dev; + job->unpins[job->num_unpins].dir = dir; + phys_addr = sg_dma_address(sgt->sgl); + } job->addr_phys[job->num_unpins] = phys_addr; job->unpins[job->num_unpins].bo = reloc->target.bo; @@ -139,7 +184,11 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job) goto unpin; } - phys_addr = host1x_bo_pin(g->bo, &sgt); + sgt = host1x_bo_pin(host->dev, g->bo, NULL); + if (IS_ERR(sgt)) { + err = PTR_ERR(sgt); + goto unpin; + } if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && host->domain) { for_each_sg(sgt->sgl, sg, sgt->nents, j) @@ -163,15 +212,24 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job) goto unpin; } - job->addr_phys[job->num_unpins] = - iova_dma_addr(&host->iova, alloc); job->unpins[job->num_unpins].size = gather_size; + phys_addr = iova_dma_addr(&host->iova, alloc); } else { - job->addr_phys[job->num_unpins] = phys_addr; + err = dma_map_sg(host->dev, sgt->sgl, sgt->nents, + DMA_TO_DEVICE); + if (!err) { + err = -ENOMEM; + goto unpin; + } + + job->unpins[job->num_unpins].dev = host->dev; + phys_addr = sg_dma_address(sgt->sgl); } - job->gather_addr_phys[i] = job->addr_phys[job->num_unpins]; + job->addr_phys[job->num_unpins] = phys_addr; + job->gather_addr_phys[i] = phys_addr; + job->unpins[job->num_unpins].dir = DMA_TO_DEVICE; job->unpins[job->num_unpins].bo = g->bo; job->unpins[job->num_unpins].sgt = sgt; job->num_unpins++; @@ -436,7 +494,8 @@ out: return err; } -static inline int copy_gathers(struct host1x_job *job, struct device *dev) +static inline int copy_gathers(struct device *host, struct host1x_job *job, + struct device *dev) { struct host1x_firewall fw; size_t size = 0; @@ -459,12 +518,12 @@ static inline int copy_gathers(struct host1x_job *job, struct device *dev) * Try a non-blocking allocation from a higher priority pools first, * as awaiting for the allocation here is a major performance hit. */ - job->gather_copy_mapped = dma_alloc_wc(dev, size, &job->gather_copy, + job->gather_copy_mapped = dma_alloc_wc(host, size, &job->gather_copy, GFP_NOWAIT); /* the higher priority allocation failed, try the generic-blocking */ if (!job->gather_copy_mapped) - job->gather_copy_mapped = dma_alloc_wc(dev, size, + job->gather_copy_mapped = dma_alloc_wc(host, size, &job->gather_copy, GFP_KERNEL); if (!job->gather_copy_mapped) @@ -512,7 +571,7 @@ int host1x_job_pin(struct host1x_job *job, struct device *dev) goto out; if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) { - err = copy_gathers(job, dev); + err = copy_gathers(host->dev, job, dev); if (err) goto out; } @@ -557,6 +616,8 @@ void host1x_job_unpin(struct host1x_job *job) for (i = 0; i < job->num_unpins; i++) { struct host1x_job_unpin_data *unpin = &job->unpins[i]; + struct device *dev = unpin->dev ?: host->dev; + struct sg_table *sgt = unpin->sgt; if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && unpin->size && host->domain) { @@ -566,14 +627,18 @@ void host1x_job_unpin(struct host1x_job *job) iova_pfn(&host->iova, job->addr_phys[i])); } - host1x_bo_unpin(unpin->bo, unpin->sgt); + if (unpin->dev && sgt) + dma_unmap_sg(unpin->dev, sgt->sgl, sgt->nents, + unpin->dir); + + host1x_bo_unpin(dev, unpin->bo, sgt); host1x_bo_put(unpin->bo); } job->num_unpins = 0; if (job->gather_copy_size) - dma_free_wc(job->channel->dev, job->gather_copy_size, + dma_free_wc(host->dev, job->gather_copy_size, job->gather_copy_mapped, job->gather_copy); } EXPORT_SYMBOL(host1x_job_unpin); diff --git a/drivers/gpu/host1x/job.h b/drivers/gpu/host1x/job.h index 62b8805e6b35..94bc2e4ae241 100644 --- a/drivers/gpu/host1x/job.h +++ b/drivers/gpu/host1x/job.h @@ -8,6 +8,8 @@ #ifndef __HOST1X_JOB_H #define __HOST1X_JOB_H +#include <linux/dma-direction.h> + struct host1x_job_gather { unsigned int words; dma_addr_t base; @@ -19,7 +21,9 @@ struct host1x_job_gather { struct host1x_job_unpin_data { struct host1x_bo *bo; struct sg_table *sgt; + struct device *dev; size_t size; + enum dma_data_direction dir; }; /* diff --git a/drivers/hid/hid-axff.c b/drivers/hid/hid-axff.c index 6654c1550e2e..fbe4e16ab029 100644 --- a/drivers/hid/hid-axff.c +++ b/drivers/hid/hid-axff.c @@ -63,13 +63,20 @@ static int axff_init(struct hid_device *hid) { struct axff_device *axff; struct hid_report *report; - struct hid_input *hidinput = list_first_entry(&hid->inputs, struct hid_input, list); + struct hid_input *hidinput; struct list_head *report_list =&hid->report_enum[HID_OUTPUT_REPORT].report_list; - struct input_dev *dev = hidinput->input; + struct input_dev *dev; int field_count = 0; int i, j; int error; + if (list_empty(&hid->inputs)) { + hid_err(hid, "no inputs found\n"); + return -ENODEV; + } + hidinput = list_first_entry(&hid->inputs, struct hid_input, list); + dev = hidinput->input; + if (list_empty(report_list)) { hid_err(hid, "no output reports found\n"); return -ENODEV; diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 3eaee2c37931..63fdbf09b044 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1139,6 +1139,7 @@ int hid_open_report(struct hid_device *device) __u8 *start; __u8 *buf; __u8 *end; + __u8 *next; int ret; static int (*dispatch_type[])(struct hid_parser *parser, struct hid_item *item) = { @@ -1192,7 +1193,8 @@ int hid_open_report(struct hid_device *device) device->collection_size = HID_DEFAULT_NUM_COLLECTIONS; ret = -EINVAL; - while ((start = fetch_item(start, end, &item)) != NULL) { + while ((next = fetch_item(start, end, &item)) != NULL) { + start = next; if (item.format != HID_ITEM_FORMAT_SHORT) { hid_err(device, "unexpected long global item\n"); @@ -1230,7 +1232,8 @@ int hid_open_report(struct hid_device *device) } } - hid_err(device, "item fetching failed at offset %d\n", (int)(end - start)); + hid_err(device, "item fetching failed at offset %u/%u\n", + size - (unsigned int)(end - start), size); err: kfree(parser->collection_stack); alloc_err: diff --git a/drivers/hid/hid-dr.c b/drivers/hid/hid-dr.c index 17e17f9a597b..947f19f8685f 100644 --- a/drivers/hid/hid-dr.c +++ b/drivers/hid/hid-dr.c @@ -75,13 +75,19 @@ static int drff_init(struct hid_device *hid) { struct drff_device *drff; struct hid_report *report; - struct hid_input *hidinput = list_first_entry(&hid->inputs, - struct hid_input, list); + struct hid_input *hidinput; struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; - struct input_dev *dev = hidinput->input; + struct input_dev *dev; int error; + if (list_empty(&hid->inputs)) { + hid_err(hid, "no inputs found\n"); + return -ENODEV; + } + hidinput = list_first_entry(&hid->inputs, struct hid_input, list); + dev = hidinput->input; + if (list_empty(report_list)) { hid_err(hid, "no output reports found\n"); return -ENODEV; diff --git a/drivers/hid/hid-emsff.c b/drivers/hid/hid-emsff.c index 7cd5651872d3..c34f2e5a049f 100644 --- a/drivers/hid/hid-emsff.c +++ b/drivers/hid/hid-emsff.c @@ -47,13 +47,19 @@ static int emsff_init(struct hid_device *hid) { struct emsff_device *emsff; struct hid_report *report; - struct hid_input *hidinput = list_first_entry(&hid->inputs, - struct hid_input, list); + struct hid_input *hidinput; struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; - struct input_dev *dev = hidinput->input; + struct input_dev *dev; int error; + if (list_empty(&hid->inputs)) { + hid_err(hid, "no inputs found\n"); + return -ENODEV; + } + hidinput = list_first_entry(&hid->inputs, struct hid_input, list); + dev = hidinput->input; + if (list_empty(report_list)) { hid_err(hid, "no output reports found\n"); return -ENODEV; diff --git a/drivers/hid/hid-gaff.c b/drivers/hid/hid-gaff.c index 0f95c96b70f8..ecbd3995a4eb 100644 --- a/drivers/hid/hid-gaff.c +++ b/drivers/hid/hid-gaff.c @@ -64,14 +64,20 @@ static int gaff_init(struct hid_device *hid) { struct gaff_device *gaff; struct hid_report *report; - struct hid_input *hidinput = list_entry(hid->inputs.next, - struct hid_input, list); + struct hid_input *hidinput; struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; struct list_head *report_ptr = report_list; - struct input_dev *dev = hidinput->input; + struct input_dev *dev; int error; + if (list_empty(&hid->inputs)) { + hid_err(hid, "no inputs found\n"); + return -ENODEV; + } + hidinput = list_entry(hid->inputs.next, struct hid_input, list); + dev = hidinput->input; + if (list_empty(report_list)) { hid_err(hid, "no output reports found\n"); return -ENODEV; diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c index 84f8c127ebdc..d86a9189e88f 100644 --- a/drivers/hid/hid-google-hammer.c +++ b/drivers/hid/hid-google-hammer.c @@ -470,6 +470,10 @@ static const struct hid_device_id hammer_devices[] = { { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) }, { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, + USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MAGNEMITE) }, + { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, + USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MASTERBALL) }, + { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_STAFF) }, { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_WAND) }, diff --git a/drivers/hid/hid-holtekff.c b/drivers/hid/hid-holtekff.c index 10a720558830..8619b80c834c 100644 --- a/drivers/hid/hid-holtekff.c +++ b/drivers/hid/hid-holtekff.c @@ -124,13 +124,19 @@ static int holtekff_init(struct hid_device *hid) { struct holtekff_device *holtekff; struct hid_report *report; - struct hid_input *hidinput = list_entry(hid->inputs.next, - struct hid_input, list); + struct hid_input *hidinput; struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; - struct input_dev *dev = hidinput->input; + struct input_dev *dev; int error; + if (list_empty(&hid->inputs)) { + hid_err(hid, "no inputs found\n"); + return -ENODEV; + } + hidinput = list_entry(hid->inputs.next, struct hid_input, list); + dev = hidinput->input; + if (list_empty(report_list)) { hid_err(hid, "no output report found\n"); return -ENODEV; diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c index cc5b09b87ab0..79a28fc91521 100644 --- a/drivers/hid/hid-hyperv.c +++ b/drivers/hid/hid-hyperv.c @@ -314,60 +314,24 @@ static void mousevsc_on_receive(struct hv_device *device, static void mousevsc_on_channel_callback(void *context) { - const int packet_size = 0x100; - int ret; struct hv_device *device = context; - u32 bytes_recvd; - u64 req_id; struct vmpacket_descriptor *desc; - unsigned char *buffer; - int bufferlen = packet_size; - - buffer = kmalloc(bufferlen, GFP_ATOMIC); - if (!buffer) - return; - - do { - ret = vmbus_recvpacket_raw(device->channel, buffer, - bufferlen, &bytes_recvd, &req_id); - - switch (ret) { - case 0: - if (bytes_recvd <= 0) { - kfree(buffer); - return; - } - desc = (struct vmpacket_descriptor *)buffer; - - switch (desc->type) { - case VM_PKT_COMP: - break; - - case VM_PKT_DATA_INBAND: - mousevsc_on_receive(device, desc); - break; - - default: - pr_err("unhandled packet type %d, tid %llx len %d\n", - desc->type, req_id, bytes_recvd); - break; - } + foreach_vmbus_pkt(desc, device->channel) { + switch (desc->type) { + case VM_PKT_COMP: break; - case -ENOBUFS: - kfree(buffer); - /* Handle large packet */ - bufferlen = bytes_recvd; - buffer = kmalloc(bytes_recvd, GFP_ATOMIC); - - if (!buffer) - return; + case VM_PKT_DATA_INBAND: + mousevsc_on_receive(device, desc); + break; + default: + pr_err("Unhandled packet type %d, tid %llx len %d\n", + desc->type, desc->trans_id, desc->len8 * 8); break; } - } while (1); - + } } static int mousevsc_connect_to_vsp(struct hv_device *device) diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 76969a22b0f2..447e8db21174 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -476,6 +476,8 @@ #define USB_DEVICE_ID_GOOGLE_STAFF 0x502b #define USB_DEVICE_ID_GOOGLE_WAND 0x502d #define USB_DEVICE_ID_GOOGLE_WHISKERS 0x5030 +#define USB_DEVICE_ID_GOOGLE_MASTERBALL 0x503c +#define USB_DEVICE_ID_GOOGLE_MAGNEMITE 0x503d #define USB_VENDOR_ID_GOTOP 0x08f2 #define USB_DEVICE_ID_SUPER_Q2 0x007f diff --git a/drivers/hid/hid-lg2ff.c b/drivers/hid/hid-lg2ff.c index dd1a6c3a7de6..73d07e35f12a 100644 --- a/drivers/hid/hid-lg2ff.c +++ b/drivers/hid/hid-lg2ff.c @@ -50,11 +50,17 @@ int lg2ff_init(struct hid_device *hid) { struct lg2ff_device *lg2ff; struct hid_report *report; - struct hid_input *hidinput = list_entry(hid->inputs.next, - struct hid_input, list); - struct input_dev *dev = hidinput->input; + struct hid_input *hidinput; + struct input_dev *dev; int error; + if (list_empty(&hid->inputs)) { + hid_err(hid, "no inputs found\n"); + return -ENODEV; + } + hidinput = list_entry(hid->inputs.next, struct hid_input, list); + dev = hidinput->input; + /* Check that the report looks ok */ report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7); if (!report) diff --git a/drivers/hid/hid-lg3ff.c b/drivers/hid/hid-lg3ff.c index 9ecb6fd06203..b7e1949f3cf7 100644 --- a/drivers/hid/hid-lg3ff.c +++ b/drivers/hid/hid-lg3ff.c @@ -117,12 +117,19 @@ static const signed short ff3_joystick_ac[] = { int lg3ff_init(struct hid_device *hid) { - struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list); - struct input_dev *dev = hidinput->input; + struct hid_input *hidinput; + struct input_dev *dev; const signed short *ff_bits = ff3_joystick_ac; int error; int i; + if (list_empty(&hid->inputs)) { + hid_err(hid, "no inputs found\n"); + return -ENODEV; + } + hidinput = list_entry(hid->inputs.next, struct hid_input, list); + dev = hidinput->input; + /* Check that the report looks ok */ if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 35)) return -ENODEV; diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c index 03f0220062ca..5e6a0cef2a06 100644 --- a/drivers/hid/hid-lg4ff.c +++ b/drivers/hid/hid-lg4ff.c @@ -1253,8 +1253,8 @@ static int lg4ff_handle_multimode_wheel(struct hid_device *hid, u16 *real_produc int lg4ff_init(struct hid_device *hid) { - struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list); - struct input_dev *dev = hidinput->input; + struct hid_input *hidinput; + struct input_dev *dev; struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; struct hid_report *report = list_entry(report_list->next, struct hid_report, list); const struct usb_device_descriptor *udesc = &(hid_to_usb_dev(hid)->descriptor); @@ -1266,6 +1266,13 @@ int lg4ff_init(struct hid_device *hid) int mmode_ret, mmode_idx = -1; u16 real_product_id; + if (list_empty(&hid->inputs)) { + hid_err(hid, "no inputs found\n"); + return -ENODEV; + } + hidinput = list_entry(hid->inputs.next, struct hid_input, list); + dev = hidinput->input; + /* Check that the report looks ok */ if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7)) return -1; diff --git a/drivers/hid/hid-lgff.c b/drivers/hid/hid-lgff.c index c79a6ec43745..aed4ddc397a9 100644 --- a/drivers/hid/hid-lgff.c +++ b/drivers/hid/hid-lgff.c @@ -115,12 +115,19 @@ static void hid_lgff_set_autocenter(struct input_dev *dev, u16 magnitude) int lgff_init(struct hid_device* hid) { - struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list); - struct input_dev *dev = hidinput->input; + struct hid_input *hidinput; + struct input_dev *dev; const signed short *ff_bits = ff_joystick; int error; int i; + if (list_empty(&hid->inputs)) { + hid_err(hid, "no inputs found\n"); + return -ENODEV; + } + hidinput = list_entry(hid->inputs.next, struct hid_input, list); + dev = hidinput->input; + /* Check that the report looks ok */ if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7)) return -ENODEV; diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c index 0179f7ed77e5..8e91e2f06cb4 100644 --- a/drivers/hid/hid-logitech-hidpp.c +++ b/drivers/hid/hid-logitech-hidpp.c @@ -1669,6 +1669,7 @@ static void hidpp_touchpad_raw_xy_event(struct hidpp_device *hidpp_dev, #define HIDPP_FF_EFFECTID_NONE -1 #define HIDPP_FF_EFFECTID_AUTOCENTER -2 +#define HIDPP_AUTOCENTER_PARAMS_LENGTH 18 #define HIDPP_FF_MAX_PARAMS 20 #define HIDPP_FF_RESERVED_SLOTS 1 @@ -2009,7 +2010,7 @@ static int hidpp_ff_erase_effect(struct input_dev *dev, int effect_id) static void hidpp_ff_set_autocenter(struct input_dev *dev, u16 magnitude) { struct hidpp_ff_private_data *data = dev->ff->private; - u8 params[18]; + u8 params[HIDPP_AUTOCENTER_PARAMS_LENGTH]; dbg_hid("Setting autocenter to %d.\n", magnitude); @@ -2077,23 +2078,34 @@ static DEVICE_ATTR(range, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH, hidpp static void hidpp_ff_destroy(struct ff_device *ff) { struct hidpp_ff_private_data *data = ff->private; + struct hid_device *hid = data->hidpp->hid_dev; + hid_info(hid, "Unloading HID++ force feedback.\n"); + + device_remove_file(&hid->dev, &dev_attr_range); + destroy_workqueue(data->wq); kfree(data->effect_ids); } -static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index) +static int hidpp_ff_init(struct hidpp_device *hidpp, + struct hidpp_ff_private_data *data) { struct hid_device *hid = hidpp->hid_dev; - struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list); - struct input_dev *dev = hidinput->input; + struct hid_input *hidinput; + struct input_dev *dev; const struct usb_device_descriptor *udesc = &(hid_to_usb_dev(hid)->descriptor); const u16 bcdDevice = le16_to_cpu(udesc->bcdDevice); struct ff_device *ff; - struct hidpp_report response; - struct hidpp_ff_private_data *data; - int error, j, num_slots; + int error, j, num_slots = data->num_effects; u8 version; + if (list_empty(&hid->inputs)) { + hid_err(hid, "no inputs found\n"); + return -ENODEV; + } + hidinput = list_entry(hid->inputs.next, struct hid_input, list); + dev = hidinput->input; + if (!dev) { hid_err(hid, "Struct input_dev not set!\n"); return -EINVAL; @@ -2109,27 +2121,17 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index) for (j = 0; hidpp_ff_effects_v2[j] >= 0; j++) set_bit(hidpp_ff_effects_v2[j], dev->ffbit); - /* Read number of slots available in device */ - error = hidpp_send_fap_command_sync(hidpp, feature_index, - HIDPP_FF_GET_INFO, NULL, 0, &response); - if (error) { - if (error < 0) - return error; - hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n", - __func__, error); - return -EPROTO; - } - - num_slots = response.fap.params[0] - HIDPP_FF_RESERVED_SLOTS; - error = input_ff_create(dev, num_slots); if (error) { hid_err(dev, "Failed to create FF device!\n"); return error; } - - data = kzalloc(sizeof(*data), GFP_KERNEL); + /* + * Create a copy of passed data, so we can transfer memory + * ownership to FF core + */ + data = kmemdup(data, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; data->effect_ids = kcalloc(num_slots, sizeof(int), GFP_KERNEL); @@ -2145,10 +2147,7 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index) } data->hidpp = hidpp; - data->feature_index = feature_index; data->version = version; - data->slot_autocenter = 0; - data->num_effects = num_slots; for (j = 0; j < num_slots; j++) data->effect_ids[j] = -1; @@ -2162,68 +2161,20 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index) ff->set_autocenter = hidpp_ff_set_autocenter; ff->destroy = hidpp_ff_destroy; - - /* reset all forces */ - error = hidpp_send_fap_command_sync(hidpp, feature_index, - HIDPP_FF_RESET_ALL, NULL, 0, &response); - - /* Read current Range */ - error = hidpp_send_fap_command_sync(hidpp, feature_index, - HIDPP_FF_GET_APERTURE, NULL, 0, &response); - if (error) - hid_warn(hidpp->hid_dev, "Failed to read range from device!\n"); - data->range = error ? 900 : get_unaligned_be16(&response.fap.params[0]); - /* Create sysfs interface */ error = device_create_file(&(hidpp->hid_dev->dev), &dev_attr_range); if (error) hid_warn(hidpp->hid_dev, "Unable to create sysfs interface for \"range\", errno %d!\n", error); - /* Read the current gain values */ - error = hidpp_send_fap_command_sync(hidpp, feature_index, - HIDPP_FF_GET_GLOBAL_GAINS, NULL, 0, &response); - if (error) - hid_warn(hidpp->hid_dev, "Failed to read gain values from device!\n"); - data->gain = error ? 0xffff : get_unaligned_be16(&response.fap.params[0]); - /* ignore boost value at response.fap.params[2] */ - /* init the hardware command queue */ atomic_set(&data->workqueue_size, 0); - /* initialize with zero autocenter to get wheel in usable state */ - hidpp_ff_set_autocenter(dev, 0); - hid_info(hid, "Force feedback support loaded (firmware release %d).\n", version); return 0; } -static int hidpp_ff_deinit(struct hid_device *hid) -{ - struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list); - struct input_dev *dev = hidinput->input; - struct hidpp_ff_private_data *data; - - if (!dev) { - hid_err(hid, "Struct input_dev not found!\n"); - return -EINVAL; - } - - hid_info(hid, "Unloading HID++ force feedback.\n"); - data = dev->ff->private; - if (!data) { - hid_err(hid, "Private data not found!\n"); - return -EINVAL; - } - - destroy_workqueue(data->wq); - device_remove_file(&hid->dev, &dev_attr_range); - - return 0; -} - - /* ************************************************************************** */ /* */ /* Device Support */ @@ -2725,24 +2676,93 @@ static int k400_connect(struct hid_device *hdev, bool connected) #define HIDPP_PAGE_G920_FORCE_FEEDBACK 0x8123 -static int g920_get_config(struct hidpp_device *hidpp) +static int g920_ff_set_autocenter(struct hidpp_device *hidpp, + struct hidpp_ff_private_data *data) { + struct hidpp_report response; + u8 params[HIDPP_AUTOCENTER_PARAMS_LENGTH] = { + [1] = HIDPP_FF_EFFECT_SPRING | HIDPP_FF_EFFECT_AUTOSTART, + }; + int ret; + + /* initialize with zero autocenter to get wheel in usable state */ + + dbg_hid("Setting autocenter to 0.\n"); + ret = hidpp_send_fap_command_sync(hidpp, data->feature_index, + HIDPP_FF_DOWNLOAD_EFFECT, + params, ARRAY_SIZE(params), + &response); + if (ret) + hid_warn(hidpp->hid_dev, "Failed to autocenter device!\n"); + else + data->slot_autocenter = response.fap.params[0]; + + return ret; +} + +static int g920_get_config(struct hidpp_device *hidpp, + struct hidpp_ff_private_data *data) +{ + struct hidpp_report response; u8 feature_type; - u8 feature_index; int ret; + memset(data, 0, sizeof(*data)); + /* Find feature and store for later use */ ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_G920_FORCE_FEEDBACK, - &feature_index, &feature_type); + &data->feature_index, &feature_type); if (ret) return ret; - ret = hidpp_ff_init(hidpp, feature_index); + /* Read number of slots available in device */ + ret = hidpp_send_fap_command_sync(hidpp, data->feature_index, + HIDPP_FF_GET_INFO, + NULL, 0, + &response); + if (ret) { + if (ret < 0) + return ret; + hid_err(hidpp->hid_dev, + "%s: received protocol error 0x%02x\n", __func__, ret); + return -EPROTO; + } + + data->num_effects = response.fap.params[0] - HIDPP_FF_RESERVED_SLOTS; + + /* reset all forces */ + ret = hidpp_send_fap_command_sync(hidpp, data->feature_index, + HIDPP_FF_RESET_ALL, + NULL, 0, + &response); if (ret) - hid_warn(hidpp->hid_dev, "Unable to initialize force feedback support, errno %d\n", - ret); + hid_warn(hidpp->hid_dev, "Failed to reset all forces!\n"); - return 0; + ret = hidpp_send_fap_command_sync(hidpp, data->feature_index, + HIDPP_FF_GET_APERTURE, + NULL, 0, + &response); + if (ret) { + hid_warn(hidpp->hid_dev, + "Failed to read range from device!\n"); + } + data->range = ret ? + 900 : get_unaligned_be16(&response.fap.params[0]); + + /* Read the current gain values */ + ret = hidpp_send_fap_command_sync(hidpp, data->feature_index, + HIDPP_FF_GET_GLOBAL_GAINS, + NULL, 0, + &response); + if (ret) + hid_warn(hidpp->hid_dev, + "Failed to read gain values from device!\n"); + data->gain = ret ? + 0xffff : get_unaligned_be16(&response.fap.params[0]); + + /* ignore boost value at response.fap.params[2] */ + + return g920_ff_set_autocenter(hidpp, data); } /* -------------------------------------------------------------------------- */ @@ -3458,34 +3478,45 @@ static int hidpp_get_report_length(struct hid_device *hdev, int id) return report->field[0]->report_count + 1; } -static bool hidpp_validate_report(struct hid_device *hdev, int id, - int expected_length, bool optional) +static bool hidpp_validate_device(struct hid_device *hdev) { - int report_length; + struct hidpp_device *hidpp = hid_get_drvdata(hdev); + int id, report_length, supported_reports = 0; - if (id >= HID_MAX_IDS || id < 0) { - hid_err(hdev, "invalid HID report id %u\n", id); - return false; + id = REPORT_ID_HIDPP_SHORT; + report_length = hidpp_get_report_length(hdev, id); + if (report_length) { + if (report_length < HIDPP_REPORT_SHORT_LENGTH) + goto bad_device; + + supported_reports++; } + id = REPORT_ID_HIDPP_LONG; report_length = hidpp_get_report_length(hdev, id); - if (!report_length) - return optional; + if (report_length) { + if (report_length < HIDPP_REPORT_LONG_LENGTH) + goto bad_device; - if (report_length < expected_length) { - hid_warn(hdev, "not enough values in hidpp report %d\n", id); - return false; + supported_reports++; } - return true; -} + id = REPORT_ID_HIDPP_VERY_LONG; + report_length = hidpp_get_report_length(hdev, id); + if (report_length) { + if (report_length < HIDPP_REPORT_LONG_LENGTH || + report_length > HIDPP_REPORT_VERY_LONG_MAX_LENGTH) + goto bad_device; -static bool hidpp_validate_device(struct hid_device *hdev) -{ - return hidpp_validate_report(hdev, REPORT_ID_HIDPP_SHORT, - HIDPP_REPORT_SHORT_LENGTH, false) && - hidpp_validate_report(hdev, REPORT_ID_HIDPP_LONG, - HIDPP_REPORT_LONG_LENGTH, true); + supported_reports++; + hidpp->very_long_report_length = report_length; + } + + return supported_reports; + +bad_device: + hid_warn(hdev, "not enough values in hidpp report %d\n", id); + return false; } static bool hidpp_application_equals(struct hid_device *hdev, @@ -3505,6 +3536,7 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id) int ret; bool connected; unsigned int connect_mask = HID_CONNECT_DEFAULT; + struct hidpp_ff_private_data data; /* report_fixup needs drvdata to be set before we call hid_parse */ hidpp = devm_kzalloc(&hdev->dev, sizeof(*hidpp), GFP_KERNEL); @@ -3531,11 +3563,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id) return hid_hw_start(hdev, HID_CONNECT_DEFAULT); } - hidpp->very_long_report_length = - hidpp_get_report_length(hdev, REPORT_ID_HIDPP_VERY_LONG); - if (hidpp->very_long_report_length > HIDPP_REPORT_VERY_LONG_MAX_LENGTH) - hidpp->very_long_report_length = HIDPP_REPORT_VERY_LONG_MAX_LENGTH; - if (id->group == HID_GROUP_LOGITECH_DJ_DEVICE) hidpp->quirks |= HIDPP_QUIRK_UNIFYING; @@ -3614,7 +3641,7 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id) if (ret) goto hid_hw_init_fail; } else if (connected && (hidpp->quirks & HIDPP_QUIRK_CLASS_G920)) { - ret = g920_get_config(hidpp); + ret = g920_get_config(hidpp, &data); if (ret) goto hid_hw_init_fail; } @@ -3636,6 +3663,14 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id) goto hid_hw_start_fail; } + if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) { + ret = hidpp_ff_init(hidpp, &data); + if (ret) + hid_warn(hidpp->hid_dev, + "Unable to initialize force feedback support, errno %d\n", + ret); + } + return ret; hid_hw_init_fail: @@ -3658,9 +3693,6 @@ static void hidpp_remove(struct hid_device *hdev) sysfs_remove_group(&hdev->dev.kobj, &ps_attribute_group); - if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) - hidpp_ff_deinit(hdev); - hid_hw_stop(hdev); cancel_work_sync(&hidpp->work); mutex_destroy(&hidpp->send_mutex); diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c index 2cf83856f2e4..2d8b589201a4 100644 --- a/drivers/hid/hid-microsoft.c +++ b/drivers/hid/hid-microsoft.c @@ -328,11 +328,17 @@ static int ms_play_effect(struct input_dev *dev, void *data, static int ms_init_ff(struct hid_device *hdev) { - struct hid_input *hidinput = list_entry(hdev->inputs.next, - struct hid_input, list); - struct input_dev *input_dev = hidinput->input; + struct hid_input *hidinput; + struct input_dev *input_dev; struct ms_data *ms = hid_get_drvdata(hdev); + if (list_empty(&hdev->inputs)) { + hid_err(hdev, "no inputs found\n"); + return -ENODEV; + } + hidinput = list_entry(hdev->inputs.next, struct hid_input, list); + input_dev = hidinput->input; + if (!(ms->quirks & MS_QUIRK_FF)) return 0; diff --git a/drivers/hid/hid-prodikeys.c b/drivers/hid/hid-prodikeys.c index 5a3b3d974d84..2666af02d5c1 100644 --- a/drivers/hid/hid-prodikeys.c +++ b/drivers/hid/hid-prodikeys.c @@ -516,7 +516,7 @@ static void pcmidi_setup_extra_keys( MY PICTURES => KEY_WORDPROCESSOR MY MUSIC=> KEY_SPREADSHEET */ - unsigned int keys[] = { + static const unsigned int keys[] = { KEY_FN, KEY_MESSENGER, KEY_CALENDAR, KEY_ADDRESSBOOK, KEY_DOCUMENTS, @@ -532,7 +532,7 @@ static void pcmidi_setup_extra_keys( 0 }; - unsigned int *pkeys = &keys[0]; + const unsigned int *pkeys = &keys[0]; unsigned short i; if (pm->ifnum != 1) /* only set up ONCE for interace 1 */ diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c index 73c0f7a95e2d..4c6ed6ef31f1 100644 --- a/drivers/hid/hid-sony.c +++ b/drivers/hid/hid-sony.c @@ -2254,9 +2254,15 @@ static int sony_play_effect(struct input_dev *dev, void *data, static int sony_init_ff(struct sony_sc *sc) { - struct hid_input *hidinput = list_entry(sc->hdev->inputs.next, - struct hid_input, list); - struct input_dev *input_dev = hidinput->input; + struct hid_input *hidinput; + struct input_dev *input_dev; + + if (list_empty(&sc->hdev->inputs)) { + hid_err(sc->hdev, "no inputs found\n"); + return -ENODEV; + } + hidinput = list_entry(sc->hdev->inputs.next, struct hid_input, list); + input_dev = hidinput->input; input_set_capability(input_dev, EV_FF, FF_RUMBLE); return input_ff_create_memless(input_dev, NULL, sony_play_effect); diff --git a/drivers/hid/hid-tmff.c b/drivers/hid/hid-tmff.c index bdfc5ff3b2c5..90acef304536 100644 --- a/drivers/hid/hid-tmff.c +++ b/drivers/hid/hid-tmff.c @@ -124,12 +124,18 @@ static int tmff_init(struct hid_device *hid, const signed short *ff_bits) struct tmff_device *tmff; struct hid_report *report; struct list_head *report_list; - struct hid_input *hidinput = list_entry(hid->inputs.next, - struct hid_input, list); - struct input_dev *input_dev = hidinput->input; + struct hid_input *hidinput; + struct input_dev *input_dev; int error; int i; + if (list_empty(&hid->inputs)) { + hid_err(hid, "no inputs found\n"); + return -ENODEV; + } + hidinput = list_entry(hid->inputs.next, struct hid_input, list); + input_dev = hidinput->input; + tmff = kzalloc(sizeof(struct tmff_device), GFP_KERNEL); if (!tmff) return -ENOMEM; diff --git a/drivers/hid/hid-zpff.c b/drivers/hid/hid-zpff.c index f90959e94028..3abaca045869 100644 --- a/drivers/hid/hid-zpff.c +++ b/drivers/hid/hid-zpff.c @@ -54,11 +54,17 @@ static int zpff_init(struct hid_device *hid) { struct zpff_device *zpff; struct hid_report *report; - struct hid_input *hidinput = list_entry(hid->inputs.next, - struct hid_input, list); - struct input_dev *dev = hidinput->input; + struct hid_input *hidinput; + struct input_dev *dev; int i, error; + if (list_empty(&hid->inputs)) { + hid_err(hid, "no inputs found\n"); + return -ENODEV; + } + hidinput = list_entry(hid->inputs.next, struct hid_input, list); + dev = hidinput->input; + for (i = 0; i < 4; i++) { report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, i, 1); if (!report) diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c index 2a7c6e33bb1c..04c088131e04 100644 --- a/drivers/hid/i2c-hid/i2c-hid-core.c +++ b/drivers/hid/i2c-hid/i2c-hid-core.c @@ -26,7 +26,6 @@ #include <linux/delay.h> #include <linux/slab.h> #include <linux/pm.h> -#include <linux/pm_runtime.h> #include <linux/device.h> #include <linux/wait.h> #include <linux/err.h> @@ -48,8 +47,6 @@ /* quirks to control the device */ #define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0) #define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1) -#define I2C_HID_QUIRK_NO_RUNTIME_PM BIT(2) -#define I2C_HID_QUIRK_DELAY_AFTER_SLEEP BIT(3) #define I2C_HID_QUIRK_BOGUS_IRQ BIT(4) /* flags */ @@ -172,14 +169,7 @@ static const struct i2c_hid_quirks { { USB_VENDOR_ID_WEIDA, HID_ANY_ID, I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV }, { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288, - I2C_HID_QUIRK_NO_IRQ_AFTER_RESET | - I2C_HID_QUIRK_NO_RUNTIME_PM }, - { I2C_VENDOR_ID_RAYDIUM, I2C_PRODUCT_ID_RAYDIUM_4B33, - I2C_HID_QUIRK_DELAY_AFTER_SLEEP }, - { USB_VENDOR_ID_LG, I2C_DEVICE_ID_LG_8001, - I2C_HID_QUIRK_NO_RUNTIME_PM }, - { I2C_VENDOR_ID_GOODIX, I2C_DEVICE_ID_GOODIX_01F0, - I2C_HID_QUIRK_NO_RUNTIME_PM }, + I2C_HID_QUIRK_NO_IRQ_AFTER_RESET }, { USB_VENDOR_ID_ELAN, HID_ANY_ID, I2C_HID_QUIRK_BOGUS_IRQ }, { 0, 0 } @@ -397,7 +387,6 @@ static int i2c_hid_set_power(struct i2c_client *client, int power_state) { struct i2c_hid *ihid = i2c_get_clientdata(client); int ret; - unsigned long now, delay; i2c_hid_dbg(ihid, "%s\n", __func__); @@ -415,22 +404,9 @@ static int i2c_hid_set_power(struct i2c_client *client, int power_state) goto set_pwr_exit; } - if (ihid->quirks & I2C_HID_QUIRK_DELAY_AFTER_SLEEP && - power_state == I2C_HID_PWR_ON) { - now = jiffies; - if (time_after(ihid->sleep_delay, now)) { - delay = jiffies_to_usecs(ihid->sleep_delay - now); - usleep_range(delay, delay + 1); - } - } - ret = __i2c_hid_command(client, &hid_set_power_cmd, power_state, 0, NULL, 0, NULL, 0); - if (ihid->quirks & I2C_HID_QUIRK_DELAY_AFTER_SLEEP && - power_state == I2C_HID_PWR_SLEEP) - ihid->sleep_delay = jiffies + msecs_to_jiffies(20); - if (ret) dev_err(&client->dev, "failed to change power setting.\n"); @@ -471,8 +447,12 @@ static int i2c_hid_hwreset(struct i2c_client *client) if (ret) { dev_err(&client->dev, "failed to reset device.\n"); i2c_hid_set_power(client, I2C_HID_PWR_SLEEP); + goto out_unlock; } + /* At least some SIS devices need this after reset */ + ret = i2c_hid_set_power(client, I2C_HID_PWR_ON); + out_unlock: mutex_unlock(&ihid->reset_lock); return ret; @@ -791,11 +771,6 @@ static int i2c_hid_open(struct hid_device *hid) { struct i2c_client *client = hid->driver_data; struct i2c_hid *ihid = i2c_get_clientdata(client); - int ret = 0; - - ret = pm_runtime_get_sync(&client->dev); - if (ret < 0) - return ret; set_bit(I2C_HID_STARTED, &ihid->flags); return 0; @@ -807,27 +782,6 @@ static void i2c_hid_close(struct hid_device *hid) struct i2c_hid *ihid = i2c_get_clientdata(client); clear_bit(I2C_HID_STARTED, &ihid->flags); - - /* Save some power */ - pm_runtime_put(&client->dev); -} - -static int i2c_hid_power(struct hid_device *hid, int lvl) -{ - struct i2c_client *client = hid->driver_data; - struct i2c_hid *ihid = i2c_get_clientdata(client); - - i2c_hid_dbg(ihid, "%s lvl:%d\n", __func__, lvl); - - switch (lvl) { - case PM_HINT_FULLON: - pm_runtime_get_sync(&client->dev); - break; - case PM_HINT_NORMAL: - pm_runtime_put(&client->dev); - break; - } - return 0; } struct hid_ll_driver i2c_hid_ll_driver = { @@ -836,7 +790,6 @@ struct hid_ll_driver i2c_hid_ll_driver = { .stop = i2c_hid_stop, .open = i2c_hid_open, .close = i2c_hid_close, - .power = i2c_hid_power, .output_report = i2c_hid_output_report, .raw_request = i2c_hid_raw_request, }; @@ -1104,9 +1057,6 @@ static int i2c_hid_probe(struct i2c_client *client, i2c_hid_acpi_fix_up_power(&client->dev); - pm_runtime_get_noresume(&client->dev); - pm_runtime_set_active(&client->dev); - pm_runtime_enable(&client->dev); device_enable_async_suspend(&client->dev); /* Make sure there is something at this address */ @@ -1114,16 +1064,16 @@ static int i2c_hid_probe(struct i2c_client *client, if (ret < 0) { dev_dbg(&client->dev, "nothing at this address: %d\n", ret); ret = -ENXIO; - goto err_pm; + goto err_regulator; } ret = i2c_hid_fetch_hid_descriptor(ihid); if (ret < 0) - goto err_pm; + goto err_regulator; ret = i2c_hid_init_irq(client); if (ret < 0) - goto err_pm; + goto err_regulator; hid = hid_allocate_device(); if (IS_ERR(hid)) { @@ -1154,9 +1104,6 @@ static int i2c_hid_probe(struct i2c_client *client, goto err_mem_free; } - if (!(ihid->quirks & I2C_HID_QUIRK_NO_RUNTIME_PM)) - pm_runtime_put(&client->dev); - return 0; err_mem_free: @@ -1165,10 +1112,6 @@ err_mem_free: err_irq: free_irq(client->irq, ihid); -err_pm: - pm_runtime_put_noidle(&client->dev); - pm_runtime_disable(&client->dev); - err_regulator: regulator_bulk_disable(ARRAY_SIZE(ihid->pdata.supplies), ihid->pdata.supplies); @@ -1181,12 +1124,6 @@ static int i2c_hid_remove(struct i2c_client *client) struct i2c_hid *ihid = i2c_get_clientdata(client); struct hid_device *hid; - if (!(ihid->quirks & I2C_HID_QUIRK_NO_RUNTIME_PM)) - pm_runtime_get_sync(&client->dev); - pm_runtime_disable(&client->dev); - pm_runtime_set_suspended(&client->dev); - pm_runtime_put_noidle(&client->dev); - hid = ihid->hid; hid_destroy_device(hid); @@ -1219,25 +1156,15 @@ static int i2c_hid_suspend(struct device *dev) int wake_status; if (hid->driver && hid->driver->suspend) { - /* - * Wake up the device so that IO issues in - * HID driver's suspend code can succeed. - */ - ret = pm_runtime_resume(dev); - if (ret < 0) - return ret; - ret = hid->driver->suspend(hid, PMSG_SUSPEND); if (ret < 0) return ret; } - if (!pm_runtime_suspended(dev)) { - /* Save some power */ - i2c_hid_set_power(client, I2C_HID_PWR_SLEEP); + /* Save some power */ + i2c_hid_set_power(client, I2C_HID_PWR_SLEEP); - disable_irq(client->irq); - } + disable_irq(client->irq); if (device_may_wakeup(&client->dev)) { wake_status = enable_irq_wake(client->irq); @@ -1279,11 +1206,6 @@ static int i2c_hid_resume(struct device *dev) wake_status); } - /* We'll resume to full power */ - pm_runtime_disable(dev); - pm_runtime_set_active(dev); - pm_runtime_enable(dev); - enable_irq(client->irq); /* Instead of resetting device, simply powers the device on. This @@ -1304,30 +1226,8 @@ static int i2c_hid_resume(struct device *dev) } #endif -#ifdef CONFIG_PM -static int i2c_hid_runtime_suspend(struct device *dev) -{ - struct i2c_client *client = to_i2c_client(dev); - - i2c_hid_set_power(client, I2C_HID_PWR_SLEEP); - disable_irq(client->irq); - return 0; -} - -static int i2c_hid_runtime_resume(struct device *dev) -{ - struct i2c_client *client = to_i2c_client(dev); - - enable_irq(client->irq); - i2c_hid_set_power(client, I2C_HID_PWR_ON); - return 0; -} -#endif - static const struct dev_pm_ops i2c_hid_pm = { SET_SYSTEM_SLEEP_PM_OPS(i2c_hid_suspend, i2c_hid_resume) - SET_RUNTIME_PM_OPS(i2c_hid_runtime_suspend, i2c_hid_runtime_resume, - NULL) }; static const struct i2c_device_id i2c_hid_id_table[] = { diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c index 75078c83be1a..d31ea82b84c1 100644 --- a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c +++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c @@ -323,6 +323,25 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = { .driver_data = (void *)&sipodev_desc }, { + /* + * There are at least 2 Primebook C11B versions, the older + * version has a product-name of "Primebook C11B", and a + * bios version / release / firmware revision of: + * V2.1.2 / 05/03/2018 / 18.2 + * The new version has "PRIMEBOOK C11B" as product-name and a + * bios version / release / firmware revision of: + * CFALKSW05_BIOS_V1.1.2 / 11/19/2018 / 19.2 + * Only the older version needs this quirk, note the newer + * version will not match as it has a different product-name. + */ + .ident = "Trekstor Primebook C11B", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TREKSTOR"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Primebook C11B"), + }, + .driver_data = (void *)&sipodev_desc + }, + { .ident = "Direkt-Tek DTLAPY116-2", .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Direkt-Tek"), diff --git a/drivers/hid/intel-ish-hid/ishtp/client-buffers.c b/drivers/hid/intel-ish-hid/ishtp/client-buffers.c index 1b0a0cc605e7..513d7a4a1b8a 100644 --- a/drivers/hid/intel-ish-hid/ishtp/client-buffers.c +++ b/drivers/hid/intel-ish-hid/ishtp/client-buffers.c @@ -84,7 +84,7 @@ int ishtp_cl_alloc_tx_ring(struct ishtp_cl *cl) return 0; out: dev_err(&cl->device->dev, "error in allocating Tx pool\n"); - ishtp_cl_free_rx_ring(cl); + ishtp_cl_free_tx_ring(cl); return -ENOMEM; } diff --git a/drivers/hid/wacom.h b/drivers/hid/wacom.h index 4a7f8d363220..203d27d198b8 100644 --- a/drivers/hid/wacom.h +++ b/drivers/hid/wacom.h @@ -202,6 +202,21 @@ static inline void wacom_schedule_work(struct wacom_wac *wacom_wac, } } +/* + * Convert a signed 32-bit integer to an unsigned n-bit integer. Undoes + * the normally-helpful work of 'hid_snto32' for fields that use signed + * ranges for questionable reasons. + */ +static inline __u32 wacom_s32tou(s32 value, __u8 n) +{ + switch (n) { + case 8: return ((__u8)value); + case 16: return ((__u16)value); + case 32: return ((__u32)value); + } + return value & (1 << (n - 1)) ? value & (~(~0U << n)) : value; +} + extern const struct hid_device_id wacom_ids[]; void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len); diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 2b0a5b8ca6e6..ccb74529bc78 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -2303,7 +2303,7 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field case HID_DG_TOOLSERIALNUMBER: if (value) { wacom_wac->serial[0] = (wacom_wac->serial[0] & ~0xFFFFFFFFULL); - wacom_wac->serial[0] |= (__u32)value; + wacom_wac->serial[0] |= wacom_s32tou(value, field->report_size); } return; case HID_DG_TWIST: @@ -2319,15 +2319,17 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field return; case WACOM_HID_WD_SERIALHI: if (value) { + __u32 raw_value = wacom_s32tou(value, field->report_size); + wacom_wac->serial[0] = (wacom_wac->serial[0] & 0xFFFFFFFF); - wacom_wac->serial[0] |= ((__u64)value) << 32; + wacom_wac->serial[0] |= ((__u64)raw_value) << 32; /* * Non-USI EMR devices may contain additional tool type * information here. See WACOM_HID_WD_TOOLTYPE case for * more details. */ if (value >> 20 == 1) { - wacom_wac->id[0] |= value & 0xFFFFF; + wacom_wac->id[0] |= raw_value & 0xFFFFF; } } return; @@ -2339,7 +2341,7 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field * bitwise OR so the complete value can be built * up over time :( */ - wacom_wac->id[0] |= value; + wacom_wac->id[0] |= wacom_s32tou(value, field->report_size); return; case WACOM_HID_WD_OFFSETLEFT: if (features->offset_left && value != features->offset_left) diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 391f0b225c9a..53a60c81e220 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -912,6 +912,7 @@ static void vmbus_shutdown(struct device *child_device) drv->shutdown(dev); } +#ifdef CONFIG_PM_SLEEP /* * vmbus_suspend - Suspend a vmbus device */ @@ -949,6 +950,7 @@ static int vmbus_resume(struct device *child_device) return drv->resume(dev); } +#endif /* CONFIG_PM_SLEEP */ /* * vmbus_device_release - Final callback release of the vmbus child device @@ -1070,6 +1072,7 @@ msg_handled: vmbus_signal_eom(msg, message_type); } +#ifdef CONFIG_PM_SLEEP /* * Fake RESCIND_CHANNEL messages to clean up hv_sock channels by force for * hibernation, because hv_sock connections can not persist across hibernation. @@ -1105,6 +1108,7 @@ static void vmbus_force_channel_rescinded(struct vmbus_channel *channel) vmbus_connection.work_queue, &ctx->work); } +#endif /* CONFIG_PM_SLEEP */ /* * Direct callback for channels using other deferred processing @@ -2125,6 +2129,7 @@ acpi_walk_err: return ret_val; } +#ifdef CONFIG_PM_SLEEP static int vmbus_bus_suspend(struct device *dev) { struct vmbus_channel *channel, *sc; @@ -2247,6 +2252,7 @@ static int vmbus_bus_resume(struct device *dev) return 0; } +#endif /* CONFIG_PM_SLEEP */ static const struct acpi_device_id vmbus_acpi_device_ids[] = { {"VMBUS", 0}, diff --git a/drivers/hwmon/ina3221.c b/drivers/hwmon/ina3221.c index 0037e2bdacd6..8a51dcf055ea 100644 --- a/drivers/hwmon/ina3221.c +++ b/drivers/hwmon/ina3221.c @@ -170,7 +170,7 @@ static inline int ina3221_wait_for_data(struct ina3221_data *ina) /* Polling the CVRF bit to make sure read data is ready */ return regmap_field_read_poll_timeout(ina->fields[F_CVRF], - cvrf, cvrf, wait, 100000); + cvrf, cvrf, wait, wait * 2); } static int ina3221_read_value(struct ina3221_data *ina, unsigned int reg, diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c index 95b447cfa24c..281c81edabc6 100644 --- a/drivers/hwmon/nct7904.c +++ b/drivers/hwmon/nct7904.c @@ -82,6 +82,10 @@ #define FANCTL1_FMR_REG 0x00 /* Bank 3; 1 reg per channel */ #define FANCTL1_OUT_REG 0x10 /* Bank 3; 1 reg per channel */ +#define VOLT_MONITOR_MODE 0x0 +#define THERMAL_DIODE_MODE 0x1 +#define THERMISTOR_MODE 0x3 + #define ENABLE_TSI BIT(1) static const unsigned short normal_i2c[] = { @@ -99,6 +103,8 @@ struct nct7904_data { u8 enable_dts; u8 has_dts; u8 temp_mode; /* 0: TR mode, 1: TD mode */ + u8 fan_alarm[2]; + u8 vsen_alarm[3]; }; /* Access functions */ @@ -214,7 +220,15 @@ static int nct7904_read_fan(struct device *dev, u32 attr, int channel, SMI_STS5_REG + (channel >> 3)); if (ret < 0) return ret; - *val = (ret >> (channel & 0x07)) & 1; + if (!data->fan_alarm[channel >> 3]) + data->fan_alarm[channel >> 3] = ret & 0xff; + else + /* If there is new alarm showing up */ + data->fan_alarm[channel >> 3] |= (ret & 0xff); + *val = (data->fan_alarm[channel >> 3] >> (channel & 0x07)) & 1; + /* Needs to clean the alarm if alarm existing */ + if (*val) + data->fan_alarm[channel >> 3] ^= 1 << (channel & 0x07); return 0; default: return -EOPNOTSUPP; @@ -298,7 +312,15 @@ static int nct7904_read_in(struct device *dev, u32 attr, int channel, SMI_STS1_REG + (index >> 3)); if (ret < 0) return ret; - *val = (ret >> (index & 0x07)) & 1; + if (!data->vsen_alarm[index >> 3]) + data->vsen_alarm[index >> 3] = ret & 0xff; + else + /* If there is new alarm showing up */ + data->vsen_alarm[index >> 3] |= (ret & 0xff); + *val = (data->vsen_alarm[index >> 3] >> (index & 0x07)) & 1; + /* Needs to clean the alarm if alarm existing */ + if (*val) + data->vsen_alarm[index >> 3] ^= 1 << (index & 0x07); return 0; default: return -EOPNOTSUPP; @@ -915,12 +937,20 @@ static int nct7904_probe(struct i2c_client *client, data->temp_mode = 0; for (i = 0; i < 4; i++) { - val = (ret & (0x03 << i)) >> (i * 2); + val = (ret >> (i * 2)) & 0x03; bit = (1 << i); - if (val == 0) + if (val == VOLT_MONITOR_MODE) { data->tcpu_mask &= ~bit; - else if (val == 0x1 || val == 0x2) + } else if (val == THERMAL_DIODE_MODE && i < 2) { data->temp_mode |= bit; + data->vsen_mask &= ~(0x06 << (i * 2)); + } else if (val == THERMISTOR_MODE) { + data->vsen_mask &= ~(0x02 << (i * 2)); + } else { + /* Reserved */ + data->tcpu_mask &= ~bit; + data->vsen_mask &= ~(0x06 << (i * 2)); + } } /* PECI */ diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c index fa9d34af87ac..f72803a02391 100644 --- a/drivers/hwtracing/intel_th/gth.c +++ b/drivers/hwtracing/intel_th/gth.c @@ -626,6 +626,9 @@ static void intel_th_gth_switch(struct intel_th_device *thdev, if (!count) dev_dbg(&thdev->dev, "timeout waiting for CTS Trigger\n"); + /* De-assert the trigger */ + iowrite32(0, gth->base + REG_CTS_CTL); + intel_th_gth_stop(gth, output, false); intel_th_gth_start(gth, output); } diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c index fc9f15f36ad4..6d240dfae9d9 100644 --- a/drivers/hwtracing/intel_th/msu.c +++ b/drivers/hwtracing/intel_th/msu.c @@ -164,7 +164,7 @@ struct msc { }; static LIST_HEAD(msu_buffer_list); -static struct mutex msu_buffer_mutex; +static DEFINE_MUTEX(msu_buffer_mutex); /** * struct msu_buffer_entry - internal MSU buffer bookkeeping @@ -327,7 +327,7 @@ static size_t msc_win_total_sz(struct msc_window *win) struct msc_block_desc *bdesc = sg_virt(sg); if (msc_block_wrapped(bdesc)) - return win->nr_blocks << PAGE_SHIFT; + return (size_t)win->nr_blocks << PAGE_SHIFT; size += msc_total_sz(bdesc); if (msc_block_last_written(bdesc)) @@ -1848,9 +1848,14 @@ mode_store(struct device *dev, struct device_attribute *attr, const char *buf, len = cp - buf; mode = kstrndup(buf, len, GFP_KERNEL); + if (!mode) + return -ENOMEM; + i = match_string(msc_mode, ARRAY_SIZE(msc_mode), mode); - if (i >= 0) + if (i >= 0) { + kfree(mode); goto found; + } /* Buffer sinks only work with a usable IRQ */ if (!msc->do_irq) { diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c index 91dfeba62485..03ca5b1bef9f 100644 --- a/drivers/hwtracing/intel_th/pci.c +++ b/drivers/hwtracing/intel_th/pci.c @@ -200,6 +200,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = { .driver_data = (kernel_ulong_t)&intel_th_2x, }, { + /* Comet Lake PCH */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x06a6), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, + { /* Ice Lake NNPI */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x45c5), .driver_data = (kernel_ulong_t)&intel_th_2x, @@ -209,6 +214,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa0a6), .driver_data = (kernel_ulong_t)&intel_th_2x, }, + { + /* Jasper Lake PCH */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4da6), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, { 0 }, }; diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c index fa66951b05d0..7b098ff5f5dd 100644 --- a/drivers/i2c/busses/i2c-aspeed.c +++ b/drivers/i2c/busses/i2c-aspeed.c @@ -108,6 +108,12 @@ #define ASPEED_I2CD_S_TX_CMD BIT(2) #define ASPEED_I2CD_M_TX_CMD BIT(1) #define ASPEED_I2CD_M_START_CMD BIT(0) +#define ASPEED_I2CD_MASTER_CMDS_MASK \ + (ASPEED_I2CD_M_STOP_CMD | \ + ASPEED_I2CD_M_S_RX_CMD_LAST | \ + ASPEED_I2CD_M_RX_CMD | \ + ASPEED_I2CD_M_TX_CMD | \ + ASPEED_I2CD_M_START_CMD) /* 0x18 : I2CD Slave Device Address Register */ #define ASPEED_I2CD_DEV_ADDR_MASK GENMASK(6, 0) @@ -336,18 +342,19 @@ static void aspeed_i2c_do_start(struct aspeed_i2c_bus *bus) struct i2c_msg *msg = &bus->msgs[bus->msgs_index]; u8 slave_addr = i2c_8bit_addr_from_msg(msg); - bus->master_state = ASPEED_I2C_MASTER_START; - #if IS_ENABLED(CONFIG_I2C_SLAVE) /* * If it's requested in the middle of a slave session, set the master * state to 'pending' then H/W will continue handling this master * command when the bus comes back to the idle state. */ - if (bus->slave_state != ASPEED_I2C_SLAVE_INACTIVE) + if (bus->slave_state != ASPEED_I2C_SLAVE_INACTIVE) { bus->master_state = ASPEED_I2C_MASTER_PENDING; + return; + } #endif /* CONFIG_I2C_SLAVE */ + bus->master_state = ASPEED_I2C_MASTER_START; bus->buf_index = 0; if (msg->flags & I2C_M_RD) { @@ -422,20 +429,6 @@ static u32 aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus, u32 irq_status) } } -#if IS_ENABLED(CONFIG_I2C_SLAVE) - /* - * A pending master command will be started by H/W when the bus comes - * back to idle state after completing a slave operation so change the - * master state from 'pending' to 'start' at here if slave is inactive. - */ - if (bus->master_state == ASPEED_I2C_MASTER_PENDING) { - if (bus->slave_state != ASPEED_I2C_SLAVE_INACTIVE) - goto out_no_complete; - - bus->master_state = ASPEED_I2C_MASTER_START; - } -#endif /* CONFIG_I2C_SLAVE */ - /* Master is not currently active, irq was for someone else. */ if (bus->master_state == ASPEED_I2C_MASTER_INACTIVE || bus->master_state == ASPEED_I2C_MASTER_PENDING) @@ -462,11 +455,15 @@ static u32 aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus, u32 irq_status) #if IS_ENABLED(CONFIG_I2C_SLAVE) /* * If a peer master starts a xfer immediately after it queues a - * master command, change its state to 'pending' then H/W will - * continue the queued master xfer just after completing the - * slave mode session. + * master command, clear the queued master command and change + * its state to 'pending'. To simplify handling of pending + * cases, it uses S/W solution instead of H/W command queue + * handling. */ if (unlikely(irq_status & ASPEED_I2CD_INTR_SLAVE_MATCH)) { + writel(readl(bus->base + ASPEED_I2C_CMD_REG) & + ~ASPEED_I2CD_MASTER_CMDS_MASK, + bus->base + ASPEED_I2C_CMD_REG); bus->master_state = ASPEED_I2C_MASTER_PENDING; dev_dbg(bus->dev, "master goes pending due to a slave start\n"); @@ -629,6 +626,14 @@ static irqreturn_t aspeed_i2c_bus_irq(int irq, void *dev_id) irq_handled |= aspeed_i2c_master_irq(bus, irq_remaining); } + + /* + * Start a pending master command at here if a slave operation is + * completed. + */ + if (bus->master_state == ASPEED_I2C_MASTER_PENDING && + bus->slave_state == ASPEED_I2C_SLAVE_INACTIVE) + aspeed_i2c_do_start(bus); #else irq_handled = aspeed_i2c_master_irq(bus, irq_remaining); #endif /* CONFIG_I2C_SLAVE */ @@ -691,6 +696,15 @@ static int aspeed_i2c_master_xfer(struct i2c_adapter *adap, ASPEED_I2CD_BUS_BUSY_STS)) aspeed_i2c_recover_bus(bus); + /* + * If timed out and the state is still pending, drop the pending + * master command. + */ + spin_lock_irqsave(&bus->lock, flags); + if (bus->master_state == ASPEED_I2C_MASTER_PENDING) + bus->master_state = ASPEED_I2C_MASTER_INACTIVE; + spin_unlock_irqrestore(&bus->lock, flags); + return -ETIMEDOUT; } diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c index 29eae1bf4f86..2152ec5f535c 100644 --- a/drivers/i2c/busses/i2c-mt65xx.c +++ b/drivers/i2c/busses/i2c-mt65xx.c @@ -875,7 +875,7 @@ static irqreturn_t mtk_i2c_irq(int irqno, void *dev_id) static u32 mtk_i2c_functionality(struct i2c_adapter *adap) { - if (adap->quirks->flags & I2C_AQ_NO_ZERO_LEN) + if (i2c_check_quirks(adap, I2C_AQ_NO_ZERO_LEN)) return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK); else diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c index d36cf08461f7..b24e7b937f21 100644 --- a/drivers/i2c/busses/i2c-stm32f7.c +++ b/drivers/i2c/busses/i2c-stm32f7.c @@ -305,7 +305,7 @@ struct stm32f7_i2c_dev { struct regmap *regmap; }; -/** +/* * All these values are coming from I2C Specification, Version 6.0, 4th of * April 2014. * @@ -1192,6 +1192,8 @@ static void stm32f7_i2c_slave_start(struct stm32f7_i2c_dev *i2c_dev) STM32F7_I2C_CR1_TXIE; stm32f7_i2c_set_bits(base + STM32F7_I2C_CR1, mask); + /* Write 1st data byte */ + writel_relaxed(value, base + STM32F7_I2C_TXDR); } else { /* Notify i2c slave that new write transfer is starting */ i2c_slave_event(slave, I2C_SLAVE_WRITE_REQUESTED, &value); @@ -1501,7 +1503,7 @@ static irqreturn_t stm32f7_i2c_isr_error(int irq, void *data) void __iomem *base = i2c_dev->base; struct device *dev = i2c_dev->dev; struct stm32_i2c_dma *dma = i2c_dev->dma; - u32 mask, status; + u32 status; status = readl_relaxed(i2c_dev->base + STM32F7_I2C_ISR); @@ -1526,12 +1528,15 @@ static irqreturn_t stm32f7_i2c_isr_error(int irq, void *data) f7_msg->result = -EINVAL; } - /* Disable interrupts */ - if (stm32f7_i2c_is_slave_registered(i2c_dev)) - mask = STM32F7_I2C_XFER_IRQ_MASK; - else - mask = STM32F7_I2C_ALL_IRQ_MASK; - stm32f7_i2c_disable_irq(i2c_dev, mask); + if (!i2c_dev->slave_running) { + u32 mask; + /* Disable interrupts */ + if (stm32f7_i2c_is_slave_registered(i2c_dev)) + mask = STM32F7_I2C_XFER_IRQ_MASK; + else + mask = STM32F7_I2C_ALL_IRQ_MASK; + stm32f7_i2c_disable_irq(i2c_dev, mask); + } /* Disable dma */ if (i2c_dev->use_dma) { diff --git a/drivers/iio/accel/adxl372.c b/drivers/iio/accel/adxl372.c index 055227cb3d43..67b8817995c0 100644 --- a/drivers/iio/accel/adxl372.c +++ b/drivers/iio/accel/adxl372.c @@ -474,12 +474,17 @@ static int adxl372_configure_fifo(struct adxl372_state *st) if (ret < 0) return ret; - fifo_samples = st->watermark & 0xFF; + /* + * watermark stores the number of sets; we need to write the FIFO + * registers with the number of samples + */ + fifo_samples = (st->watermark * st->fifo_set_size); fifo_ctl = ADXL372_FIFO_CTL_FORMAT_MODE(st->fifo_format) | ADXL372_FIFO_CTL_MODE_MODE(st->fifo_mode) | - ADXL372_FIFO_CTL_SAMPLES_MODE(st->watermark); + ADXL372_FIFO_CTL_SAMPLES_MODE(fifo_samples); - ret = regmap_write(st->regmap, ADXL372_FIFO_SAMPLES, fifo_samples); + ret = regmap_write(st->regmap, + ADXL372_FIFO_SAMPLES, fifo_samples & 0xFF); if (ret < 0) return ret; @@ -548,8 +553,7 @@ static irqreturn_t adxl372_trigger_handler(int irq, void *p) goto err; /* Each sample is 2 bytes */ - for (i = 0; i < fifo_entries * sizeof(u16); - i += st->fifo_set_size * sizeof(u16)) + for (i = 0; i < fifo_entries; i += st->fifo_set_size) iio_push_to_buffers(indio_dev, &st->fifo_buf[i]); } err: @@ -571,6 +575,14 @@ static int adxl372_setup(struct adxl372_state *st) return -ENODEV; } + /* + * Perform a software reset to make sure the device is in a consistent + * state after start up. + */ + ret = regmap_write(st->regmap, ADXL372_RESET, ADXL372_RESET_CODE); + if (ret < 0) + return ret; + ret = adxl372_set_op_mode(st, ADXL372_STANDBY); if (ret < 0) return ret; diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c index cf6c0e3a83d3..121b4e89f038 100644 --- a/drivers/iio/accel/bmc150-accel-core.c +++ b/drivers/iio/accel/bmc150-accel-core.c @@ -117,7 +117,7 @@ #define BMC150_ACCEL_SLEEP_1_SEC 0x0F #define BMC150_ACCEL_REG_TEMP 0x08 -#define BMC150_ACCEL_TEMP_CENTER_VAL 24 +#define BMC150_ACCEL_TEMP_CENTER_VAL 23 #define BMC150_ACCEL_AXIS_TO_REG(axis) (BMC150_ACCEL_REG_XOUT_L + (axis * 2)) #define BMC150_AUTO_SUSPEND_DELAY_MS 2000 diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c index 5a3ca5904ded..f658012baad8 100644 --- a/drivers/iio/adc/ad799x.c +++ b/drivers/iio/adc/ad799x.c @@ -810,10 +810,10 @@ static int ad799x_probe(struct i2c_client *client, ret = ad799x_write_config(st, st->chip_config->default_config); if (ret < 0) - goto error_disable_reg; + goto error_disable_vref; ret = ad799x_read_config(st); if (ret < 0) - goto error_disable_reg; + goto error_disable_vref; st->config = ret; ret = iio_triggered_buffer_setup(indio_dev, NULL, diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c index adc9cf7a075d..8ea2aed6d6f5 100644 --- a/drivers/iio/adc/axp288_adc.c +++ b/drivers/iio/adc/axp288_adc.c @@ -7,6 +7,7 @@ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ +#include <linux/dmi.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/device.h> @@ -25,6 +26,11 @@ #define AXP288_ADC_EN_MASK 0xF0 #define AXP288_ADC_TS_ENABLE 0x01 +#define AXP288_ADC_TS_BIAS_MASK GENMASK(5, 4) +#define AXP288_ADC_TS_BIAS_20UA (0 << 4) +#define AXP288_ADC_TS_BIAS_40UA (1 << 4) +#define AXP288_ADC_TS_BIAS_60UA (2 << 4) +#define AXP288_ADC_TS_BIAS_80UA (3 << 4) #define AXP288_ADC_TS_CURRENT_ON_OFF_MASK GENMASK(1, 0) #define AXP288_ADC_TS_CURRENT_OFF (0 << 0) #define AXP288_ADC_TS_CURRENT_ON_WHEN_CHARGING (1 << 0) @@ -177,10 +183,36 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev, return ret; } +/* + * We rely on the machine's firmware to correctly setup the TS pin bias current + * at boot. This lists systems with broken fw where we need to set it ourselves. + */ +static const struct dmi_system_id axp288_adc_ts_bias_override[] = { + { + /* Lenovo Ideapad 100S (11 inch) */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 100S-11IBY"), + }, + .driver_data = (void *)(uintptr_t)AXP288_ADC_TS_BIAS_80UA, + }, + {} +}; + static int axp288_adc_initialize(struct axp288_adc_info *info) { + const struct dmi_system_id *bias_override; int ret, adc_enable_val; + bias_override = dmi_first_match(axp288_adc_ts_bias_override); + if (bias_override) { + ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL, + AXP288_ADC_TS_BIAS_MASK, + (uintptr_t)bias_override->driver_data); + if (ret) + return ret; + } + /* * Determine if the TS pin is enabled and set the TS current-source * accordingly. diff --git a/drivers/iio/adc/hx711.c b/drivers/iio/adc/hx711.c index 88c7fe15003b..62e6c8badd22 100644 --- a/drivers/iio/adc/hx711.c +++ b/drivers/iio/adc/hx711.c @@ -100,14 +100,14 @@ struct hx711_data { static int hx711_cycle(struct hx711_data *hx711_data) { - int val; + unsigned long flags; /* * if preempted for more then 60us while PD_SCK is high: * hx711 is going in reset * ==> measuring is false */ - preempt_disable(); + local_irq_save(flags); gpiod_set_value(hx711_data->gpiod_pd_sck, 1); /* @@ -117,7 +117,6 @@ static int hx711_cycle(struct hx711_data *hx711_data) */ ndelay(hx711_data->data_ready_delay_ns); - val = gpiod_get_value(hx711_data->gpiod_dout); /* * here we are not waiting for 0.2 us as suggested by the datasheet, * because the oscilloscope showed in a test scenario @@ -125,7 +124,7 @@ static int hx711_cycle(struct hx711_data *hx711_data) * and 0.56 us for PD_SCK low on TI Sitara with 800 MHz */ gpiod_set_value(hx711_data->gpiod_pd_sck, 0); - preempt_enable(); + local_irq_restore(flags); /* * make it a square wave for addressing cases with capacitance on @@ -133,7 +132,8 @@ static int hx711_cycle(struct hx711_data *hx711_data) */ ndelay(hx711_data->data_ready_delay_ns); - return val; + /* sample as late as possible */ + return gpiod_get_value(hx711_data->gpiod_dout); } static int hx711_read(struct hx711_data *hx711_data) diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c index 7b28d045d271..7b27306330a3 100644 --- a/drivers/iio/adc/meson_saradc.c +++ b/drivers/iio/adc/meson_saradc.c @@ -1219,6 +1219,11 @@ static int meson_sar_adc_probe(struct platform_device *pdev) if (IS_ERR(base)) return PTR_ERR(base); + priv->regmap = devm_regmap_init_mmio(&pdev->dev, base, + priv->param->regmap_config); + if (IS_ERR(priv->regmap)) + return PTR_ERR(priv->regmap); + irq = irq_of_parse_and_map(pdev->dev.of_node, 0); if (!irq) return -EINVAL; @@ -1228,11 +1233,6 @@ static int meson_sar_adc_probe(struct platform_device *pdev) if (ret) return ret; - priv->regmap = devm_regmap_init_mmio(&pdev->dev, base, - priv->param->regmap_config); - if (IS_ERR(priv->regmap)) - return PTR_ERR(priv->regmap); - priv->clkin = devm_clk_get(&pdev->dev, "clkin"); if (IS_ERR(priv->clkin)) { dev_err(&pdev->dev, "failed to get clkin\n"); diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c index 9b85fefc0a96..93a096a91f8c 100644 --- a/drivers/iio/adc/stm32-adc-core.c +++ b/drivers/iio/adc/stm32-adc-core.c @@ -24,33 +24,6 @@ #include "stm32-adc-core.h" -/* STM32F4 - common registers for all ADC instances: 1, 2 & 3 */ -#define STM32F4_ADC_CSR (STM32_ADCX_COMN_OFFSET + 0x00) -#define STM32F4_ADC_CCR (STM32_ADCX_COMN_OFFSET + 0x04) - -/* STM32F4_ADC_CSR - bit fields */ -#define STM32F4_EOC3 BIT(17) -#define STM32F4_EOC2 BIT(9) -#define STM32F4_EOC1 BIT(1) - -/* STM32F4_ADC_CCR - bit fields */ -#define STM32F4_ADC_ADCPRE_SHIFT 16 -#define STM32F4_ADC_ADCPRE_MASK GENMASK(17, 16) - -/* STM32H7 - common registers for all ADC instances */ -#define STM32H7_ADC_CSR (STM32_ADCX_COMN_OFFSET + 0x00) -#define STM32H7_ADC_CCR (STM32_ADCX_COMN_OFFSET + 0x08) - -/* STM32H7_ADC_CSR - bit fields */ -#define STM32H7_EOC_SLV BIT(18) -#define STM32H7_EOC_MST BIT(2) - -/* STM32H7_ADC_CCR - bit fields */ -#define STM32H7_PRESC_SHIFT 18 -#define STM32H7_PRESC_MASK GENMASK(21, 18) -#define STM32H7_CKMODE_SHIFT 16 -#define STM32H7_CKMODE_MASK GENMASK(17, 16) - #define STM32_ADC_CORE_SLEEP_DELAY_MS 2000 /* SYSCFG registers */ @@ -71,6 +44,8 @@ * @eoc1: adc1 end of conversion flag in @csr * @eoc2: adc2 end of conversion flag in @csr * @eoc3: adc3 end of conversion flag in @csr + * @ier: interrupt enable register offset for each adc + * @eocie_msk: end of conversion interrupt enable mask in @ier */ struct stm32_adc_common_regs { u32 csr; @@ -78,6 +53,8 @@ struct stm32_adc_common_regs { u32 eoc1_msk; u32 eoc2_msk; u32 eoc3_msk; + u32 ier; + u32 eocie_msk; }; struct stm32_adc_priv; @@ -303,6 +280,8 @@ static const struct stm32_adc_common_regs stm32f4_adc_common_regs = { .eoc1_msk = STM32F4_EOC1, .eoc2_msk = STM32F4_EOC2, .eoc3_msk = STM32F4_EOC3, + .ier = STM32F4_ADC_CR1, + .eocie_msk = STM32F4_EOCIE, }; /* STM32H7 common registers definitions */ @@ -311,8 +290,24 @@ static const struct stm32_adc_common_regs stm32h7_adc_common_regs = { .ccr = STM32H7_ADC_CCR, .eoc1_msk = STM32H7_EOC_MST, .eoc2_msk = STM32H7_EOC_SLV, + .ier = STM32H7_ADC_IER, + .eocie_msk = STM32H7_EOCIE, +}; + +static const unsigned int stm32_adc_offset[STM32_ADC_MAX_ADCS] = { + 0, STM32_ADC_OFFSET, STM32_ADC_OFFSET * 2, }; +static unsigned int stm32_adc_eoc_enabled(struct stm32_adc_priv *priv, + unsigned int adc) +{ + u32 ier, offset = stm32_adc_offset[adc]; + + ier = readl_relaxed(priv->common.base + offset + priv->cfg->regs->ier); + + return ier & priv->cfg->regs->eocie_msk; +} + /* ADC common interrupt for all instances */ static void stm32_adc_irq_handler(struct irq_desc *desc) { @@ -323,13 +318,28 @@ static void stm32_adc_irq_handler(struct irq_desc *desc) chained_irq_enter(chip, desc); status = readl_relaxed(priv->common.base + priv->cfg->regs->csr); - if (status & priv->cfg->regs->eoc1_msk) + /* + * End of conversion may be handled by using IRQ or DMA. There may be a + * race here when two conversions complete at the same time on several + * ADCs. EOC may be read 'set' for several ADCs, with: + * - an ADC configured to use DMA (EOC triggers the DMA request, and + * is then automatically cleared by DR read in hardware) + * - an ADC configured to use IRQs (EOCIE bit is set. The handler must + * be called in this case) + * So both EOC status bit in CSR and EOCIE control bit must be checked + * before invoking the interrupt handler (e.g. call ISR only for + * IRQ-enabled ADCs). + */ + if (status & priv->cfg->regs->eoc1_msk && + stm32_adc_eoc_enabled(priv, 0)) generic_handle_irq(irq_find_mapping(priv->domain, 0)); - if (status & priv->cfg->regs->eoc2_msk) + if (status & priv->cfg->regs->eoc2_msk && + stm32_adc_eoc_enabled(priv, 1)) generic_handle_irq(irq_find_mapping(priv->domain, 1)); - if (status & priv->cfg->regs->eoc3_msk) + if (status & priv->cfg->regs->eoc3_msk && + stm32_adc_eoc_enabled(priv, 2)) generic_handle_irq(irq_find_mapping(priv->domain, 2)); chained_irq_exit(chip, desc); diff --git a/drivers/iio/adc/stm32-adc-core.h b/drivers/iio/adc/stm32-adc-core.h index 8af507b3f32d..2579d514c2a3 100644 --- a/drivers/iio/adc/stm32-adc-core.h +++ b/drivers/iio/adc/stm32-adc-core.h @@ -25,8 +25,145 @@ * -------------------------------------------------------- */ #define STM32_ADC_MAX_ADCS 3 +#define STM32_ADC_OFFSET 0x100 #define STM32_ADCX_COMN_OFFSET 0x300 +/* STM32F4 - Registers for each ADC instance */ +#define STM32F4_ADC_SR 0x00 +#define STM32F4_ADC_CR1 0x04 +#define STM32F4_ADC_CR2 0x08 +#define STM32F4_ADC_SMPR1 0x0C +#define STM32F4_ADC_SMPR2 0x10 +#define STM32F4_ADC_HTR 0x24 +#define STM32F4_ADC_LTR 0x28 +#define STM32F4_ADC_SQR1 0x2C +#define STM32F4_ADC_SQR2 0x30 +#define STM32F4_ADC_SQR3 0x34 +#define STM32F4_ADC_JSQR 0x38 +#define STM32F4_ADC_JDR1 0x3C +#define STM32F4_ADC_JDR2 0x40 +#define STM32F4_ADC_JDR3 0x44 +#define STM32F4_ADC_JDR4 0x48 +#define STM32F4_ADC_DR 0x4C + +/* STM32F4 - common registers for all ADC instances: 1, 2 & 3 */ +#define STM32F4_ADC_CSR (STM32_ADCX_COMN_OFFSET + 0x00) +#define STM32F4_ADC_CCR (STM32_ADCX_COMN_OFFSET + 0x04) + +/* STM32F4_ADC_SR - bit fields */ +#define STM32F4_STRT BIT(4) +#define STM32F4_EOC BIT(1) + +/* STM32F4_ADC_CR1 - bit fields */ +#define STM32F4_RES_SHIFT 24 +#define STM32F4_RES_MASK GENMASK(25, 24) +#define STM32F4_SCAN BIT(8) +#define STM32F4_EOCIE BIT(5) + +/* STM32F4_ADC_CR2 - bit fields */ +#define STM32F4_SWSTART BIT(30) +#define STM32F4_EXTEN_SHIFT 28 +#define STM32F4_EXTEN_MASK GENMASK(29, 28) +#define STM32F4_EXTSEL_SHIFT 24 +#define STM32F4_EXTSEL_MASK GENMASK(27, 24) +#define STM32F4_EOCS BIT(10) +#define STM32F4_DDS BIT(9) +#define STM32F4_DMA BIT(8) +#define STM32F4_ADON BIT(0) + +/* STM32F4_ADC_CSR - bit fields */ +#define STM32F4_EOC3 BIT(17) +#define STM32F4_EOC2 BIT(9) +#define STM32F4_EOC1 BIT(1) + +/* STM32F4_ADC_CCR - bit fields */ +#define STM32F4_ADC_ADCPRE_SHIFT 16 +#define STM32F4_ADC_ADCPRE_MASK GENMASK(17, 16) + +/* STM32H7 - Registers for each ADC instance */ +#define STM32H7_ADC_ISR 0x00 +#define STM32H7_ADC_IER 0x04 +#define STM32H7_ADC_CR 0x08 +#define STM32H7_ADC_CFGR 0x0C +#define STM32H7_ADC_SMPR1 0x14 +#define STM32H7_ADC_SMPR2 0x18 +#define STM32H7_ADC_PCSEL 0x1C +#define STM32H7_ADC_SQR1 0x30 +#define STM32H7_ADC_SQR2 0x34 +#define STM32H7_ADC_SQR3 0x38 +#define STM32H7_ADC_SQR4 0x3C +#define STM32H7_ADC_DR 0x40 +#define STM32H7_ADC_DIFSEL 0xC0 +#define STM32H7_ADC_CALFACT 0xC4 +#define STM32H7_ADC_CALFACT2 0xC8 + +/* STM32H7 - common registers for all ADC instances */ +#define STM32H7_ADC_CSR (STM32_ADCX_COMN_OFFSET + 0x00) +#define STM32H7_ADC_CCR (STM32_ADCX_COMN_OFFSET + 0x08) + +/* STM32H7_ADC_ISR - bit fields */ +#define STM32MP1_VREGREADY BIT(12) +#define STM32H7_EOC BIT(2) +#define STM32H7_ADRDY BIT(0) + +/* STM32H7_ADC_IER - bit fields */ +#define STM32H7_EOCIE STM32H7_EOC + +/* STM32H7_ADC_CR - bit fields */ +#define STM32H7_ADCAL BIT(31) +#define STM32H7_ADCALDIF BIT(30) +#define STM32H7_DEEPPWD BIT(29) +#define STM32H7_ADVREGEN BIT(28) +#define STM32H7_LINCALRDYW6 BIT(27) +#define STM32H7_LINCALRDYW5 BIT(26) +#define STM32H7_LINCALRDYW4 BIT(25) +#define STM32H7_LINCALRDYW3 BIT(24) +#define STM32H7_LINCALRDYW2 BIT(23) +#define STM32H7_LINCALRDYW1 BIT(22) +#define STM32H7_ADCALLIN BIT(16) +#define STM32H7_BOOST BIT(8) +#define STM32H7_ADSTP BIT(4) +#define STM32H7_ADSTART BIT(2) +#define STM32H7_ADDIS BIT(1) +#define STM32H7_ADEN BIT(0) + +/* STM32H7_ADC_CFGR bit fields */ +#define STM32H7_EXTEN_SHIFT 10 +#define STM32H7_EXTEN_MASK GENMASK(11, 10) +#define STM32H7_EXTSEL_SHIFT 5 +#define STM32H7_EXTSEL_MASK GENMASK(9, 5) +#define STM32H7_RES_SHIFT 2 +#define STM32H7_RES_MASK GENMASK(4, 2) +#define STM32H7_DMNGT_SHIFT 0 +#define STM32H7_DMNGT_MASK GENMASK(1, 0) + +enum stm32h7_adc_dmngt { + STM32H7_DMNGT_DR_ONLY, /* Regular data in DR only */ + STM32H7_DMNGT_DMA_ONESHOT, /* DMA one shot mode */ + STM32H7_DMNGT_DFSDM, /* DFSDM mode */ + STM32H7_DMNGT_DMA_CIRC, /* DMA circular mode */ +}; + +/* STM32H7_ADC_CALFACT - bit fields */ +#define STM32H7_CALFACT_D_SHIFT 16 +#define STM32H7_CALFACT_D_MASK GENMASK(26, 16) +#define STM32H7_CALFACT_S_SHIFT 0 +#define STM32H7_CALFACT_S_MASK GENMASK(10, 0) + +/* STM32H7_ADC_CALFACT2 - bit fields */ +#define STM32H7_LINCALFACT_SHIFT 0 +#define STM32H7_LINCALFACT_MASK GENMASK(29, 0) + +/* STM32H7_ADC_CSR - bit fields */ +#define STM32H7_EOC_SLV BIT(18) +#define STM32H7_EOC_MST BIT(2) + +/* STM32H7_ADC_CCR - bit fields */ +#define STM32H7_PRESC_SHIFT 18 +#define STM32H7_PRESC_MASK GENMASK(21, 18) +#define STM32H7_CKMODE_SHIFT 16 +#define STM32H7_CKMODE_MASK GENMASK(17, 16) + /** * struct stm32_adc_common - stm32 ADC driver common data (for all instances) * @base: control registers base cpu addr diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c index 6a7dd08b1e0b..73aee5949b6b 100644 --- a/drivers/iio/adc/stm32-adc.c +++ b/drivers/iio/adc/stm32-adc.c @@ -28,115 +28,6 @@ #include "stm32-adc-core.h" -/* STM32F4 - Registers for each ADC instance */ -#define STM32F4_ADC_SR 0x00 -#define STM32F4_ADC_CR1 0x04 -#define STM32F4_ADC_CR2 0x08 -#define STM32F4_ADC_SMPR1 0x0C -#define STM32F4_ADC_SMPR2 0x10 -#define STM32F4_ADC_HTR 0x24 -#define STM32F4_ADC_LTR 0x28 -#define STM32F4_ADC_SQR1 0x2C -#define STM32F4_ADC_SQR2 0x30 -#define STM32F4_ADC_SQR3 0x34 -#define STM32F4_ADC_JSQR 0x38 -#define STM32F4_ADC_JDR1 0x3C -#define STM32F4_ADC_JDR2 0x40 -#define STM32F4_ADC_JDR3 0x44 -#define STM32F4_ADC_JDR4 0x48 -#define STM32F4_ADC_DR 0x4C - -/* STM32F4_ADC_SR - bit fields */ -#define STM32F4_STRT BIT(4) -#define STM32F4_EOC BIT(1) - -/* STM32F4_ADC_CR1 - bit fields */ -#define STM32F4_RES_SHIFT 24 -#define STM32F4_RES_MASK GENMASK(25, 24) -#define STM32F4_SCAN BIT(8) -#define STM32F4_EOCIE BIT(5) - -/* STM32F4_ADC_CR2 - bit fields */ -#define STM32F4_SWSTART BIT(30) -#define STM32F4_EXTEN_SHIFT 28 -#define STM32F4_EXTEN_MASK GENMASK(29, 28) -#define STM32F4_EXTSEL_SHIFT 24 -#define STM32F4_EXTSEL_MASK GENMASK(27, 24) -#define STM32F4_EOCS BIT(10) -#define STM32F4_DDS BIT(9) -#define STM32F4_DMA BIT(8) -#define STM32F4_ADON BIT(0) - -/* STM32H7 - Registers for each ADC instance */ -#define STM32H7_ADC_ISR 0x00 -#define STM32H7_ADC_IER 0x04 -#define STM32H7_ADC_CR 0x08 -#define STM32H7_ADC_CFGR 0x0C -#define STM32H7_ADC_SMPR1 0x14 -#define STM32H7_ADC_SMPR2 0x18 -#define STM32H7_ADC_PCSEL 0x1C -#define STM32H7_ADC_SQR1 0x30 -#define STM32H7_ADC_SQR2 0x34 -#define STM32H7_ADC_SQR3 0x38 -#define STM32H7_ADC_SQR4 0x3C -#define STM32H7_ADC_DR 0x40 -#define STM32H7_ADC_DIFSEL 0xC0 -#define STM32H7_ADC_CALFACT 0xC4 -#define STM32H7_ADC_CALFACT2 0xC8 - -/* STM32H7_ADC_ISR - bit fields */ -#define STM32MP1_VREGREADY BIT(12) -#define STM32H7_EOC BIT(2) -#define STM32H7_ADRDY BIT(0) - -/* STM32H7_ADC_IER - bit fields */ -#define STM32H7_EOCIE STM32H7_EOC - -/* STM32H7_ADC_CR - bit fields */ -#define STM32H7_ADCAL BIT(31) -#define STM32H7_ADCALDIF BIT(30) -#define STM32H7_DEEPPWD BIT(29) -#define STM32H7_ADVREGEN BIT(28) -#define STM32H7_LINCALRDYW6 BIT(27) -#define STM32H7_LINCALRDYW5 BIT(26) -#define STM32H7_LINCALRDYW4 BIT(25) -#define STM32H7_LINCALRDYW3 BIT(24) -#define STM32H7_LINCALRDYW2 BIT(23) -#define STM32H7_LINCALRDYW1 BIT(22) -#define STM32H7_ADCALLIN BIT(16) -#define STM32H7_BOOST BIT(8) -#define STM32H7_ADSTP BIT(4) -#define STM32H7_ADSTART BIT(2) -#define STM32H7_ADDIS BIT(1) -#define STM32H7_ADEN BIT(0) - -/* STM32H7_ADC_CFGR bit fields */ -#define STM32H7_EXTEN_SHIFT 10 -#define STM32H7_EXTEN_MASK GENMASK(11, 10) -#define STM32H7_EXTSEL_SHIFT 5 -#define STM32H7_EXTSEL_MASK GENMASK(9, 5) -#define STM32H7_RES_SHIFT 2 -#define STM32H7_RES_MASK GENMASK(4, 2) -#define STM32H7_DMNGT_SHIFT 0 -#define STM32H7_DMNGT_MASK GENMASK(1, 0) - -enum stm32h7_adc_dmngt { - STM32H7_DMNGT_DR_ONLY, /* Regular data in DR only */ - STM32H7_DMNGT_DMA_ONESHOT, /* DMA one shot mode */ - STM32H7_DMNGT_DFSDM, /* DFSDM mode */ - STM32H7_DMNGT_DMA_CIRC, /* DMA circular mode */ -}; - -/* STM32H7_ADC_CALFACT - bit fields */ -#define STM32H7_CALFACT_D_SHIFT 16 -#define STM32H7_CALFACT_D_MASK GENMASK(26, 16) -#define STM32H7_CALFACT_S_SHIFT 0 -#define STM32H7_CALFACT_S_MASK GENMASK(10, 0) - -/* STM32H7_ADC_CALFACT2 - bit fields */ -#define STM32H7_LINCALFACT_SHIFT 0 -#define STM32H7_LINCALFACT_MASK GENMASK(29, 0) - /* Number of linear calibration shadow registers / LINCALRDYW control bits */ #define STM32H7_LINCALFACT_NUM 6 @@ -1508,7 +1399,7 @@ static int stm32_adc_dma_start(struct iio_dev *indio_dev) cookie = dmaengine_submit(desc); ret = dma_submit_error(cookie); if (ret) { - dmaengine_terminate_all(adc->dma_chan); + dmaengine_terminate_sync(adc->dma_chan); return ret; } @@ -1586,7 +1477,7 @@ static void __stm32_adc_buffer_predisable(struct iio_dev *indio_dev) stm32_adc_conv_irq_disable(adc); if (adc->dma_chan) - dmaengine_terminate_all(adc->dma_chan); + dmaengine_terminate_sync(adc->dma_chan); if (stm32_adc_set_trig(indio_dev, NULL)) dev_err(&indio_dev->dev, "Can't clear trigger\n"); diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c index b99d73887c9f..8743b2f376e2 100644 --- a/drivers/iio/imu/adis16480.c +++ b/drivers/iio/imu/adis16480.c @@ -317,8 +317,11 @@ static int adis16480_set_freq(struct iio_dev *indio_dev, int val, int val2) struct adis16480 *st = iio_priv(indio_dev); unsigned int t, reg; + if (val < 0 || val2 < 0) + return -EINVAL; + t = val * 1000 + val2 / 1000; - if (t <= 0) + if (t == 0) return -EINVAL; /* diff --git a/drivers/iio/imu/adis_buffer.c b/drivers/iio/imu/adis_buffer.c index 9ac8356d9a95..4998a89d083d 100644 --- a/drivers/iio/imu/adis_buffer.c +++ b/drivers/iio/imu/adis_buffer.c @@ -35,8 +35,11 @@ static int adis_update_scan_mode_burst(struct iio_dev *indio_dev, return -ENOMEM; adis->buffer = kzalloc(burst_length + sizeof(u16), GFP_KERNEL); - if (!adis->buffer) + if (!adis->buffer) { + kfree(adis->xfer); + adis->xfer = NULL; return -ENOMEM; + } tx = adis->buffer + burst_length; tx[0] = ADIS_READ_REG(adis->burst->reg_cmd); @@ -78,8 +81,11 @@ int adis_update_scan_mode(struct iio_dev *indio_dev, return -ENOMEM; adis->buffer = kcalloc(indio_dev->scan_bytes, 2, GFP_KERNEL); - if (!adis->buffer) + if (!adis->buffer) { + kfree(adis->xfer); + adis->xfer = NULL; return -ENOMEM; + } rx = adis->buffer; tx = rx + scan_count; diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c index b17f060b52fc..868281b8adb0 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c @@ -114,54 +114,63 @@ static const struct inv_mpu6050_hw hw_info[] = { .name = "MPU6050", .reg = ®_set_6050, .config = &chip_config_6050, + .fifo_size = 1024, }, { .whoami = INV_MPU6500_WHOAMI_VALUE, .name = "MPU6500", .reg = ®_set_6500, .config = &chip_config_6050, + .fifo_size = 512, }, { .whoami = INV_MPU6515_WHOAMI_VALUE, .name = "MPU6515", .reg = ®_set_6500, .config = &chip_config_6050, + .fifo_size = 512, }, { .whoami = INV_MPU6000_WHOAMI_VALUE, .name = "MPU6000", .reg = ®_set_6050, .config = &chip_config_6050, + .fifo_size = 1024, }, { .whoami = INV_MPU9150_WHOAMI_VALUE, .name = "MPU9150", .reg = ®_set_6050, .config = &chip_config_6050, + .fifo_size = 1024, }, { .whoami = INV_MPU9250_WHOAMI_VALUE, .name = "MPU9250", .reg = ®_set_6500, .config = &chip_config_6050, + .fifo_size = 512, }, { .whoami = INV_MPU9255_WHOAMI_VALUE, .name = "MPU9255", .reg = ®_set_6500, .config = &chip_config_6050, + .fifo_size = 512, }, { .whoami = INV_ICM20608_WHOAMI_VALUE, .name = "ICM20608", .reg = ®_set_6500, .config = &chip_config_6050, + .fifo_size = 512, }, { .whoami = INV_ICM20602_WHOAMI_VALUE, .name = "ICM20602", .reg = ®_set_icm20602, .config = &chip_config_6050, + .fifo_size = 1008, }, }; diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h index db1c6904388b..51235677c534 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h @@ -100,12 +100,14 @@ struct inv_mpu6050_chip_config { * @name: name of the chip. * @reg: register map of the chip. * @config: configuration of the chip. + * @fifo_size: size of the FIFO in bytes. */ struct inv_mpu6050_hw { u8 whoami; u8 *name; const struct inv_mpu6050_reg_map *reg; const struct inv_mpu6050_chip_config *config; + size_t fifo_size; }; /* diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c index 5f9a5de0bab4..72d8c5790076 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c @@ -180,9 +180,6 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p) "failed to ack interrupt\n"); goto flush_fifo; } - /* handle fifo overflow by reseting fifo */ - if (int_status & INV_MPU6050_BIT_FIFO_OVERFLOW_INT) - goto flush_fifo; if (!(int_status & INV_MPU6050_BIT_RAW_DATA_RDY_INT)) { dev_warn(regmap_get_device(st->map), "spurious interrupt with status 0x%x\n", int_status); @@ -211,6 +208,18 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p) if (result) goto end_session; fifo_count = get_unaligned_be16(&data[0]); + + /* + * Handle fifo overflow by resetting fifo. + * Reset if there is only 3 data set free remaining to mitigate + * possible delay between reading fifo count and fifo data. + */ + nb = 3 * bytes_per_datum; + if (fifo_count >= st->hw->fifo_size - nb) { + dev_warn(regmap_get_device(st->map), "fifo overflow reset\n"); + goto flush_fifo; + } + /* compute and process all complete datum */ nb = fifo_count / bytes_per_datum; inv_mpu6050_update_period(st, pf->timestamp, nb); diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h index 80e42c7dbcbe..0fe6999b8257 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h @@ -99,7 +99,9 @@ struct st_lsm6dsx_fs { #define ST_LSM6DSX_FS_LIST_SIZE 4 struct st_lsm6dsx_fs_table_entry { struct st_lsm6dsx_reg reg; + struct st_lsm6dsx_fs fs_avl[ST_LSM6DSX_FS_LIST_SIZE]; + int fs_len; }; /** diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c index 2d3495560136..fd5ebe1e1594 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c @@ -145,6 +145,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = { .fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 }, .fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 }, .fs_avl[3] = { IIO_G_TO_M_S_2(732), 0x1 }, + .fs_len = 4, }, [ST_LSM6DSX_ID_GYRO] = { .reg = { @@ -154,6 +155,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = { .fs_avl[0] = { IIO_DEGREE_TO_RAD(245), 0x0 }, .fs_avl[1] = { IIO_DEGREE_TO_RAD(500), 0x1 }, .fs_avl[2] = { IIO_DEGREE_TO_RAD(2000), 0x3 }, + .fs_len = 3, }, }, }, @@ -215,6 +217,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = { .fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 }, .fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 }, .fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 }, + .fs_len = 4, }, [ST_LSM6DSX_ID_GYRO] = { .reg = { @@ -225,6 +228,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = { .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 }, .fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 }, .fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 }, + .fs_len = 4, }, }, .decimator = { @@ -327,6 +331,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = { .fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 }, .fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 }, .fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 }, + .fs_len = 4, }, [ST_LSM6DSX_ID_GYRO] = { .reg = { @@ -337,6 +342,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = { .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 }, .fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 }, .fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 }, + .fs_len = 4, }, }, .decimator = { @@ -448,6 +454,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = { .fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 }, .fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 }, .fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 }, + .fs_len = 4, }, [ST_LSM6DSX_ID_GYRO] = { .reg = { @@ -458,6 +465,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = { .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 }, .fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 }, .fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 }, + .fs_len = 4, }, }, .decimator = { @@ -563,6 +571,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = { .fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 }, .fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 }, .fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 }, + .fs_len = 4, }, [ST_LSM6DSX_ID_GYRO] = { .reg = { @@ -573,6 +582,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = { .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 }, .fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 }, .fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 }, + .fs_len = 4, }, }, .batch = { @@ -693,6 +703,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = { .fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 }, .fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 }, .fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 }, + .fs_len = 4, }, [ST_LSM6DSX_ID_GYRO] = { .reg = { @@ -703,6 +714,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = { .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 }, .fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 }, .fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 }, + .fs_len = 4, }, }, .batch = { @@ -800,6 +812,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = { .fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 }, .fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 }, .fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 }, + .fs_len = 4, }, [ST_LSM6DSX_ID_GYRO] = { .reg = { @@ -810,6 +823,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = { .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 }, .fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 }, .fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 }, + .fs_len = 4, }, }, .batch = { @@ -933,11 +947,12 @@ static int st_lsm6dsx_set_full_scale(struct st_lsm6dsx_sensor *sensor, int i, err; fs_table = &sensor->hw->settings->fs_table[sensor->id]; - for (i = 0; i < ST_LSM6DSX_FS_LIST_SIZE; i++) + for (i = 0; i < fs_table->fs_len; i++) { if (fs_table->fs_avl[i].gain == gain) break; + } - if (i == ST_LSM6DSX_FS_LIST_SIZE) + if (i == fs_table->fs_len) return -EINVAL; data = ST_LSM6DSX_SHIFT_VAL(fs_table->fs_avl[i].val, @@ -1196,18 +1211,13 @@ static ssize_t st_lsm6dsx_sysfs_scale_avail(struct device *dev, { struct st_lsm6dsx_sensor *sensor = iio_priv(dev_get_drvdata(dev)); const struct st_lsm6dsx_fs_table_entry *fs_table; - enum st_lsm6dsx_sensor_id id = sensor->id; struct st_lsm6dsx_hw *hw = sensor->hw; int i, len = 0; - fs_table = &hw->settings->fs_table[id]; - for (i = 0; i < ST_LSM6DSX_FS_LIST_SIZE; i++) { - if (!fs_table->fs_avl[i].gain) - break; - + fs_table = &hw->settings->fs_table[sensor->id]; + for (i = 0; i < fs_table->fs_len; i++) len += scnprintf(buf + len, PAGE_SIZE - len, "0.%06u ", fs_table->fs_avl[i].gain); - } buf[len - 1] = '\n'; return len; diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c index 66fbcd94642d..ea472cf6db7b 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c @@ -61,6 +61,7 @@ static const struct st_lsm6dsx_ext_dev_settings st_lsm6dsx_ext_dev_table[] = { .gain = 1500, .val = 0x0, }, /* 1500 uG/LSB */ + .fs_len = 1, }, .temp_comp = { .addr = 0x60, @@ -92,9 +93,11 @@ static const struct st_lsm6dsx_ext_dev_settings st_lsm6dsx_ext_dev_table[] = { static void st_lsm6dsx_shub_wait_complete(struct st_lsm6dsx_hw *hw) { struct st_lsm6dsx_sensor *sensor; + u16 odr; sensor = iio_priv(hw->iio_devs[ST_LSM6DSX_ID_ACC]); - msleep((2000U / sensor->odr) + 1); + odr = (hw->enable_mask & BIT(ST_LSM6DSX_ID_ACC)) ? sensor->odr : 13; + msleep((2000U / odr) + 1); } /** @@ -555,13 +558,9 @@ static ssize_t st_lsm6dsx_shub_scale_avail(struct device *dev, int i, len = 0; settings = sensor->ext_info.settings; - for (i = 0; i < ST_LSM6DSX_FS_LIST_SIZE; i++) { - u16 val = settings->fs_table.fs_avl[i].gain; - - if (val > 0) - len += scnprintf(buf + len, PAGE_SIZE - len, "0.%06u ", - val); - } + for (i = 0; i < settings->fs_table.fs_len; i++) + len += scnprintf(buf + len, PAGE_SIZE - len, "0.%06u ", + settings->fs_table.fs_avl[i].gain); buf[len - 1] = '\n'; return len; diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig index 08d7e1ef2186..4a1a883dc061 100644 --- a/drivers/iio/light/Kconfig +++ b/drivers/iio/light/Kconfig @@ -314,6 +314,7 @@ config MAX44009 config NOA1305 tristate "ON Semiconductor NOA1305 ambient light sensor" depends on I2C + select REGMAP_I2C help Say Y here if you want to build support for the ON Semiconductor NOA1305 ambient light sensor. diff --git a/drivers/iio/light/opt3001.c b/drivers/iio/light/opt3001.c index e666879007d2..92004a2563ea 100644 --- a/drivers/iio/light/opt3001.c +++ b/drivers/iio/light/opt3001.c @@ -686,6 +686,7 @@ static irqreturn_t opt3001_irq(int irq, void *_iio) struct iio_dev *iio = _iio; struct opt3001 *opt = iio_priv(iio); int ret; + bool wake_result_ready_queue = false; if (!opt->ok_to_ignore_lock) mutex_lock(&opt->lock); @@ -720,13 +721,16 @@ static irqreturn_t opt3001_irq(int irq, void *_iio) } opt->result = ret; opt->result_ready = true; - wake_up(&opt->result_ready_queue); + wake_result_ready_queue = true; } out: if (!opt->ok_to_ignore_lock) mutex_unlock(&opt->lock); + if (wake_result_ready_queue) + wake_up(&opt->result_ready_queue); + return IRQ_HANDLED; } diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c index 51421ac32517..16dacea9eadf 100644 --- a/drivers/iio/light/vcnl4000.c +++ b/drivers/iio/light/vcnl4000.c @@ -398,19 +398,23 @@ static int vcnl4000_probe(struct i2c_client *client, static const struct of_device_id vcnl_4000_of_match[] = { { .compatible = "vishay,vcnl4000", - .data = "VCNL4000", + .data = (void *)VCNL4000, }, { .compatible = "vishay,vcnl4010", - .data = "VCNL4010", + .data = (void *)VCNL4010, }, { - .compatible = "vishay,vcnl4010", - .data = "VCNL4020", + .compatible = "vishay,vcnl4020", + .data = (void *)VCNL4010, + }, + { + .compatible = "vishay,vcnl4040", + .data = (void *)VCNL4040, }, { .compatible = "vishay,vcnl4200", - .data = "VCNL4200", + .data = (void *)VCNL4200, }, {}, }; diff --git a/drivers/iio/proximity/srf04.c b/drivers/iio/proximity/srf04.c index 8b50d56b0a03..01eb8cc63076 100644 --- a/drivers/iio/proximity/srf04.c +++ b/drivers/iio/proximity/srf04.c @@ -110,7 +110,7 @@ static int srf04_read(struct srf04_data *data) udelay(data->cfg->trigger_pulse_us); gpiod_set_value(data->gpiod_trig, 0); - /* it cannot take more than 20 ms */ + /* it should not take more than 20 ms until echo is rising */ ret = wait_for_completion_killable_timeout(&data->rising, HZ/50); if (ret < 0) { mutex_unlock(&data->lock); @@ -120,7 +120,8 @@ static int srf04_read(struct srf04_data *data) return -ETIMEDOUT; } - ret = wait_for_completion_killable_timeout(&data->falling, HZ/50); + /* it cannot take more than 50 ms until echo is falling */ + ret = wait_for_completion_killable_timeout(&data->falling, HZ/20); if (ret < 0) { mutex_unlock(&data->lock); return ret; @@ -135,19 +136,19 @@ static int srf04_read(struct srf04_data *data) dt_ns = ktime_to_ns(ktime_dt); /* - * measuring more than 3 meters is beyond the capabilities of - * the sensor + * measuring more than 6,45 meters is beyond the capabilities of + * the supported sensors * ==> filter out invalid results for not measuring echos of * another us sensor * * formula: - * distance 3 m - * time = ---------- = --------- = 9404389 ns - * speed 319 m/s + * distance 6,45 * 2 m + * time = ---------- = ------------ = 40438871 ns + * speed 319 m/s * * using a minimum speed at -20 °C of 319 m/s */ - if (dt_ns > 9404389) + if (dt_ns > 40438871) return -EIO; time_ns = dt_ns; @@ -159,20 +160,20 @@ static int srf04_read(struct srf04_data *data) * with Temp in °C * and speed in m/s * - * use 343 m/s as ultrasonic speed at 20 °C here in absence of the + * use 343,5 m/s as ultrasonic speed at 20 °C here in absence of the * temperature * * therefore: - * time 343 - * distance = ------ * ----- - * 10^6 2 + * time 343,5 time * 106 + * distance = ------ * ------- = ------------ + * 10^6 2 617176 * with time in ns * and distance in mm (one way) * - * because we limit to 3 meters the multiplication with 343 just + * because we limit to 6,45 meters the multiplication with 106 just * fits into 32 bit */ - distance_mm = time_ns * 343 / 2000000; + distance_mm = time_ns * 106 / 617176; return distance_mm; } diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index da10e6ccb43c..5920c0085d35 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -4399,6 +4399,7 @@ error2: error1: port_modify.set_port_cap_mask = 0; port_modify.clr_port_cap_mask = IB_PORT_CM_SUP; + kfree(port); while (--i) { if (!rdma_cap_ib_cm(ib_device, i)) continue; @@ -4407,6 +4408,7 @@ error1: ib_modify_port(ib_device, port->port_num, 0, &port_modify); ib_unregister_mad_agent(port->mad_agent); cm_remove_port_fs(port); + kfree(port); } free: kfree(cm_dev); @@ -4460,6 +4462,7 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data) spin_unlock_irq(&cm.state_lock); ib_unregister_mad_agent(cur_mad_agent); cm_remove_port_fs(port); + kfree(port); } kfree(cm_dev); diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 0e3cf3461999..d78f67623f24 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -2396,9 +2396,10 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, conn_id->cm_id.iw = NULL; cma_exch(conn_id, RDMA_CM_DESTROYING); mutex_unlock(&conn_id->handler_mutex); + mutex_unlock(&listen_id->handler_mutex); cma_deref_id(conn_id); rdma_destroy_id(&conn_id->id); - goto out; + return ret; } mutex_unlock(&conn_id->handler_mutex); diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h index 3a8b0911c3bc..9d07378b5b42 100644 --- a/drivers/infiniband/core/core_priv.h +++ b/drivers/infiniband/core/core_priv.h @@ -199,6 +199,7 @@ void ib_mad_cleanup(void); int ib_sa_init(void); void ib_sa_cleanup(void); +void rdma_nl_init(void); void rdma_nl_exit(void); int ib_nl_handle_resolve_resp(struct sk_buff *skb, diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 99c4a55545cf..50a92442c4f7 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -1987,8 +1987,6 @@ static int iw_query_port(struct ib_device *device, if (!netdev) return -ENODEV; - dev_put(netdev); - port_attr->max_mtu = IB_MTU_4096; port_attr->active_mtu = ib_mtu_int_to_enum(netdev->mtu); @@ -1996,19 +1994,22 @@ static int iw_query_port(struct ib_device *device, port_attr->state = IB_PORT_DOWN; port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED; } else { - inetdev = in_dev_get(netdev); + rcu_read_lock(); + inetdev = __in_dev_get_rcu(netdev); if (inetdev && inetdev->ifa_list) { port_attr->state = IB_PORT_ACTIVE; port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP; - in_dev_put(inetdev); } else { port_attr->state = IB_PORT_INIT; port_attr->phys_state = IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING; } + + rcu_read_unlock(); } + dev_put(netdev); err = device->ops.query_port(device, port_num, port_attr); if (err) return err; @@ -2715,6 +2716,8 @@ static int __init ib_core_init(void) goto err_comp_unbound; } + rdma_nl_init(); + ret = addr_init(); if (ret) { pr_warn("Could't init IB address resolution\n"); diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c index 72141c5b7c95..ade71823370f 100644 --- a/drivers/infiniband/core/iwcm.c +++ b/drivers/infiniband/core/iwcm.c @@ -372,6 +372,7 @@ EXPORT_SYMBOL(iw_cm_disconnect); static void destroy_cm_id(struct iw_cm_id *cm_id) { struct iwcm_id_private *cm_id_priv; + struct ib_qp *qp; unsigned long flags; cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); @@ -389,6 +390,9 @@ static void destroy_cm_id(struct iw_cm_id *cm_id) set_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags); spin_lock_irqsave(&cm_id_priv->lock, flags); + qp = cm_id_priv->qp; + cm_id_priv->qp = NULL; + switch (cm_id_priv->state) { case IW_CM_STATE_LISTEN: cm_id_priv->state = IW_CM_STATE_DESTROYING; @@ -401,7 +405,7 @@ static void destroy_cm_id(struct iw_cm_id *cm_id) cm_id_priv->state = IW_CM_STATE_DESTROYING; spin_unlock_irqrestore(&cm_id_priv->lock, flags); /* Abrupt close of the connection */ - (void)iwcm_modify_qp_err(cm_id_priv->qp); + (void)iwcm_modify_qp_err(qp); spin_lock_irqsave(&cm_id_priv->lock, flags); break; case IW_CM_STATE_IDLE: @@ -426,11 +430,9 @@ static void destroy_cm_id(struct iw_cm_id *cm_id) BUG(); break; } - if (cm_id_priv->qp) { - cm_id_priv->id.device->ops.iw_rem_ref(cm_id_priv->qp); - cm_id_priv->qp = NULL; - } spin_unlock_irqrestore(&cm_id_priv->lock, flags); + if (qp) + cm_id_priv->id.device->ops.iw_rem_ref(qp); if (cm_id->mapped) { iwpm_remove_mapinfo(&cm_id->local_addr, &cm_id->m_local_addr); @@ -671,11 +673,11 @@ int iw_cm_accept(struct iw_cm_id *cm_id, BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV); cm_id_priv->state = IW_CM_STATE_IDLE; spin_lock_irqsave(&cm_id_priv->lock, flags); - if (cm_id_priv->qp) { - cm_id->device->ops.iw_rem_ref(qp); - cm_id_priv->qp = NULL; - } + qp = cm_id_priv->qp; + cm_id_priv->qp = NULL; spin_unlock_irqrestore(&cm_id_priv->lock, flags); + if (qp) + cm_id->device->ops.iw_rem_ref(qp); clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); wake_up_all(&cm_id_priv->connect_wait); } @@ -696,7 +698,7 @@ int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) struct iwcm_id_private *cm_id_priv; int ret; unsigned long flags; - struct ib_qp *qp; + struct ib_qp *qp = NULL; cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); @@ -730,13 +732,13 @@ int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) return 0; /* success */ spin_lock_irqsave(&cm_id_priv->lock, flags); - if (cm_id_priv->qp) { - cm_id->device->ops.iw_rem_ref(qp); - cm_id_priv->qp = NULL; - } + qp = cm_id_priv->qp; + cm_id_priv->qp = NULL; cm_id_priv->state = IW_CM_STATE_IDLE; err: spin_unlock_irqrestore(&cm_id_priv->lock, flags); + if (qp) + cm_id->device->ops.iw_rem_ref(qp); clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); wake_up_all(&cm_id_priv->connect_wait); return ret; @@ -878,6 +880,7 @@ static int cm_conn_est_handler(struct iwcm_id_private *cm_id_priv, static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv, struct iw_cm_event *iw_event) { + struct ib_qp *qp = NULL; unsigned long flags; int ret; @@ -896,11 +899,13 @@ static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv, cm_id_priv->state = IW_CM_STATE_ESTABLISHED; } else { /* REJECTED or RESET */ - cm_id_priv->id.device->ops.iw_rem_ref(cm_id_priv->qp); + qp = cm_id_priv->qp; cm_id_priv->qp = NULL; cm_id_priv->state = IW_CM_STATE_IDLE; } spin_unlock_irqrestore(&cm_id_priv->lock, flags); + if (qp) + cm_id_priv->id.device->ops.iw_rem_ref(qp); ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event); if (iw_event->private_data_len) @@ -942,21 +947,18 @@ static void cm_disconnect_handler(struct iwcm_id_private *cm_id_priv, static int cm_close_handler(struct iwcm_id_private *cm_id_priv, struct iw_cm_event *iw_event) { + struct ib_qp *qp; unsigned long flags; - int ret = 0; + int ret = 0, notify_event = 0; spin_lock_irqsave(&cm_id_priv->lock, flags); + qp = cm_id_priv->qp; + cm_id_priv->qp = NULL; - if (cm_id_priv->qp) { - cm_id_priv->id.device->ops.iw_rem_ref(cm_id_priv->qp); - cm_id_priv->qp = NULL; - } switch (cm_id_priv->state) { case IW_CM_STATE_ESTABLISHED: case IW_CM_STATE_CLOSING: cm_id_priv->state = IW_CM_STATE_IDLE; - spin_unlock_irqrestore(&cm_id_priv->lock, flags); - ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event); - spin_lock_irqsave(&cm_id_priv->lock, flags); + notify_event = 1; break; case IW_CM_STATE_DESTROYING: break; @@ -965,6 +967,10 @@ static int cm_close_handler(struct iwcm_id_private *cm_id_priv, } spin_unlock_irqrestore(&cm_id_priv->lock, flags); + if (qp) + cm_id_priv->id.device->ops.iw_rem_ref(qp); + if (notify_event) + ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event); return ret; } diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c index 81dbd5f41bed..8cd31ef25eff 100644 --- a/drivers/infiniband/core/netlink.c +++ b/drivers/infiniband/core/netlink.c @@ -42,9 +42,12 @@ #include <linux/module.h> #include "core_priv.h" -static DEFINE_MUTEX(rdma_nl_mutex); static struct { - const struct rdma_nl_cbs *cb_table; + const struct rdma_nl_cbs *cb_table; + /* Synchronizes between ongoing netlink commands and netlink client + * unregistration. + */ + struct rw_semaphore sem; } rdma_nl_types[RDMA_NL_NUM_CLIENTS]; bool rdma_nl_chk_listeners(unsigned int group) @@ -75,70 +78,53 @@ static bool is_nl_msg_valid(unsigned int type, unsigned int op) return (op < max_num_ops[type]) ? true : false; } -static bool -is_nl_valid(const struct sk_buff *skb, unsigned int type, unsigned int op) +static const struct rdma_nl_cbs * +get_cb_table(const struct sk_buff *skb, unsigned int type, unsigned int op) { const struct rdma_nl_cbs *cb_table; - if (!is_nl_msg_valid(type, op)) - return false; - /* * Currently only NLDEV client is supporting netlink commands in * non init_net net namespace. */ if (sock_net(skb->sk) != &init_net && type != RDMA_NL_NLDEV) - return false; + return NULL; - if (!rdma_nl_types[type].cb_table) { - mutex_unlock(&rdma_nl_mutex); - request_module("rdma-netlink-subsys-%d", type); - mutex_lock(&rdma_nl_mutex); - } + cb_table = READ_ONCE(rdma_nl_types[type].cb_table); + if (!cb_table) { + /* + * Didn't get valid reference of the table, attempt module + * load once. + */ + up_read(&rdma_nl_types[type].sem); - cb_table = rdma_nl_types[type].cb_table; + request_module("rdma-netlink-subsys-%d", type); + down_read(&rdma_nl_types[type].sem); + cb_table = READ_ONCE(rdma_nl_types[type].cb_table); + } if (!cb_table || (!cb_table[op].dump && !cb_table[op].doit)) - return false; - return true; + return NULL; + return cb_table; } void rdma_nl_register(unsigned int index, const struct rdma_nl_cbs cb_table[]) { - mutex_lock(&rdma_nl_mutex); - if (!is_nl_msg_valid(index, 0)) { - /* - * All clients are not interesting in success/failure of - * this call. They want to see the print to error log and - * continue their initialization. Print warning for them, - * because it is programmer's error to be here. - */ - mutex_unlock(&rdma_nl_mutex); - WARN(true, - "The not-valid %u index was supplied to RDMA netlink\n", - index); + if (WARN_ON(!is_nl_msg_valid(index, 0)) || + WARN_ON(READ_ONCE(rdma_nl_types[index].cb_table))) return; - } - - if (rdma_nl_types[index].cb_table) { - mutex_unlock(&rdma_nl_mutex); - WARN(true, - "The %u index is already registered in RDMA netlink\n", - index); - return; - } - rdma_nl_types[index].cb_table = cb_table; - mutex_unlock(&rdma_nl_mutex); + /* Pairs with the READ_ONCE in is_nl_valid() */ + smp_store_release(&rdma_nl_types[index].cb_table, cb_table); } EXPORT_SYMBOL(rdma_nl_register); void rdma_nl_unregister(unsigned int index) { - mutex_lock(&rdma_nl_mutex); + down_write(&rdma_nl_types[index].sem); rdma_nl_types[index].cb_table = NULL; - mutex_unlock(&rdma_nl_mutex); + up_write(&rdma_nl_types[index].sem); } EXPORT_SYMBOL(rdma_nl_unregister); @@ -170,15 +156,21 @@ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, unsigned int index = RDMA_NL_GET_CLIENT(type); unsigned int op = RDMA_NL_GET_OP(type); const struct rdma_nl_cbs *cb_table; + int err = -EINVAL; - if (!is_nl_valid(skb, index, op)) + if (!is_nl_msg_valid(index, op)) return -EINVAL; - cb_table = rdma_nl_types[index].cb_table; + down_read(&rdma_nl_types[index].sem); + cb_table = get_cb_table(skb, index, op); + if (!cb_table) + goto done; if ((cb_table[op].flags & RDMA_NL_ADMIN_PERM) && - !netlink_capable(skb, CAP_NET_ADMIN)) - return -EPERM; + !netlink_capable(skb, CAP_NET_ADMIN)) { + err = -EPERM; + goto done; + } /* * LS responses overload the 0x100 (NLM_F_ROOT) flag. Don't @@ -186,8 +178,8 @@ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, */ if (index == RDMA_NL_LS) { if (cb_table[op].doit) - return cb_table[op].doit(skb, nlh, extack); - return -EINVAL; + err = cb_table[op].doit(skb, nlh, extack); + goto done; } /* FIXME: Convert IWCM to properly handle doit callbacks */ if ((nlh->nlmsg_flags & NLM_F_DUMP) || index == RDMA_NL_IWCM) { @@ -195,14 +187,15 @@ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, .dump = cb_table[op].dump, }; if (c.dump) - return netlink_dump_start(skb->sk, skb, nlh, &c); - return -EINVAL; + err = netlink_dump_start(skb->sk, skb, nlh, &c); + goto done; } if (cb_table[op].doit) - return cb_table[op].doit(skb, nlh, extack); - - return 0; + err = cb_table[op].doit(skb, nlh, extack); +done: + up_read(&rdma_nl_types[index].sem); + return err; } /* @@ -263,9 +256,7 @@ skip: static void rdma_nl_rcv(struct sk_buff *skb) { - mutex_lock(&rdma_nl_mutex); rdma_nl_rcv_skb(skb, &rdma_nl_rcv_msg); - mutex_unlock(&rdma_nl_mutex); } int rdma_nl_unicast(struct net *net, struct sk_buff *skb, u32 pid) @@ -297,6 +288,14 @@ int rdma_nl_multicast(struct net *net, struct sk_buff *skb, } EXPORT_SYMBOL(rdma_nl_multicast); +void rdma_nl_init(void) +{ + int idx; + + for (idx = 0; idx < RDMA_NL_NUM_CLIENTS; idx++) + init_rwsem(&rdma_nl_types[idx].sem); +} + void rdma_nl_exit(void) { int idx; diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index 7a7474000100..c03af08b80e7 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -778,7 +778,7 @@ static int fill_res_counter_entry(struct sk_buff *msg, bool has_cap_net_admin, container_of(res, struct rdma_counter, res); if (port && port != counter->port) - return 0; + return -EAGAIN; /* Dump it even query failed */ rdma_counter_query_stats(counter); @@ -1230,7 +1230,7 @@ static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh, msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { ret = -ENOMEM; - goto err; + goto err_get; } nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, @@ -1787,10 +1787,6 @@ static int nldev_stat_del_doit(struct sk_buff *skb, struct nlmsghdr *nlh, cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]); qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]); - ret = rdma_counter_unbind_qpn(device, port, qpn, cntn); - if (ret) - goto err_unbind; - if (fill_nldev_handle(msg, device) || nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) || nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) || @@ -1799,13 +1795,15 @@ static int nldev_stat_del_doit(struct sk_buff *skb, struct nlmsghdr *nlh, goto err_fill; } + ret = rdma_counter_unbind_qpn(device, port, qpn, cntn); + if (ret) + goto err_fill; + nlmsg_end(msg, nlh); ib_device_put(device); return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); err_fill: - rdma_counter_bind_qpn(device, port, qpn, cntn); -err_unbind: nlmsg_free(msg); err: ib_device_put(device); diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c index 1ab423b19f77..6eb6d2717ca5 100644 --- a/drivers/infiniband/core/security.c +++ b/drivers/infiniband/core/security.c @@ -426,7 +426,7 @@ int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev) int ret; rdma_for_each_port (dev, i) { - is_ib = rdma_protocol_ib(dev, i++); + is_ib = rdma_protocol_ib(dev, i); if (is_ib) break; } diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c index f67a30fda1ed..163ff7ba92b7 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c @@ -451,8 +451,10 @@ void ib_umem_odp_release(struct ib_umem_odp *umem_odp) * that the hardware will not attempt to access the MR any more. */ if (!umem_odp->is_implicit_odp) { + mutex_lock(&umem_odp->umem_mutex); ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp), ib_umem_end(umem_odp)); + mutex_unlock(&umem_odp->umem_mutex); kvfree(umem_odp->dma_list); kvfree(umem_odp->page_list); } @@ -719,6 +721,8 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt, u64 addr; struct ib_device *dev = umem_odp->umem.ibdev; + lockdep_assert_held(&umem_odp->umem_mutex); + virt = max_t(u64, virt, ib_umem_start(umem_odp)); bound = min_t(u64, bound, ib_umem_end(umem_odp)); /* Note that during the run of this function, the @@ -726,7 +730,6 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt, * faults from completion. We might be racing with other * invalidations, so we must make sure we free each page only * once. */ - mutex_lock(&umem_odp->umem_mutex); for (addr = virt; addr < bound; addr += BIT(umem_odp->page_shift)) { idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift; if (umem_odp->page_list[idx]) { @@ -757,7 +760,6 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt, umem_odp->npages--; } } - mutex_unlock(&umem_odp->umem_mutex); } EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages); diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h index 1e5aeb39f774..63f7f7db5902 100644 --- a/drivers/infiniband/core/uverbs.h +++ b/drivers/infiniband/core/uverbs.h @@ -98,7 +98,7 @@ ib_uverbs_init_udata_buf_or_null(struct ib_udata *udata, struct ib_uverbs_device { atomic_t refcount; - int num_comp_vectors; + u32 num_comp_vectors; struct completion comp; struct device dev; /* First group for device attributes, NULL terminated array */ diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index f974b6854224..35c2841a569e 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -662,16 +662,17 @@ static bool find_gid_index(const union ib_gid *gid, void *context) { struct find_gid_index_context *ctx = context; + u16 vlan_id = 0xffff; + int ret; if (ctx->gid_type != gid_attr->gid_type) return false; - if ((!!(ctx->vlan_id != 0xffff) == !is_vlan_dev(gid_attr->ndev)) || - (is_vlan_dev(gid_attr->ndev) && - vlan_dev_vlan_id(gid_attr->ndev) != ctx->vlan_id)) + ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL); + if (ret) return false; - return true; + return ctx->vlan_id == vlan_id; } static const struct ib_gid_attr * diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index e87fc0408470..347dc242fb88 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -495,7 +495,6 @@ static int _put_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb) ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))); release_ep_resources(ep); - kfree_skb(skb); return 0; } @@ -506,7 +505,6 @@ static int _put_pass_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb) ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))); c4iw_put_ep(&ep->parent_ep->com); release_ep_resources(ep); - kfree_skb(skb); return 0; } @@ -2424,20 +2422,6 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb, enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type; pr_debug("ep %p tid %u\n", ep, ep->hwtid); - - skb_get(skb); - rpl = cplhdr(skb); - if (!is_t4(adapter_type)) { - skb_trim(skb, roundup(sizeof(*rpl5), 16)); - rpl5 = (void *)rpl; - INIT_TP_WR(rpl5, ep->hwtid); - } else { - skb_trim(skb, sizeof(*rpl)); - INIT_TP_WR(rpl, ep->hwtid); - } - OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, - ep->hwtid)); - cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx, enable_tcp_timestamps && req->tcpopt.tstamp, (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1); @@ -2483,6 +2467,20 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb, if (tcph->ece && tcph->cwr) opt2 |= CCTRL_ECN_V(1); } + + skb_get(skb); + rpl = cplhdr(skb); + if (!is_t4(adapter_type)) { + skb_trim(skb, roundup(sizeof(*rpl5), 16)); + rpl5 = (void *)rpl; + INIT_TP_WR(rpl5, ep->hwtid); + } else { + skb_trim(skb, sizeof(*rpl)); + INIT_TP_WR(rpl, ep->hwtid); + } + OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, + ep->hwtid)); + if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T4) { u32 isn = (prandom_u32() & ~7UL) - 1; opt2 |= T5_OPT_2_VALID_F; diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index a8b9548bd1a2..599340c1f0b8 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c @@ -242,10 +242,13 @@ static void set_ep_sin6_addrs(struct c4iw_ep *ep, } } -static int dump_qp(struct c4iw_qp *qp, struct c4iw_debugfs_data *qpd) +static int dump_qp(unsigned long id, struct c4iw_qp *qp, + struct c4iw_debugfs_data *qpd) { int space; int cc; + if (id != qp->wq.sq.qid) + return 0; space = qpd->bufsize - qpd->pos - 1; if (space == 0) @@ -350,7 +353,7 @@ static int qp_open(struct inode *inode, struct file *file) xa_lock_irq(&qpd->devp->qps); xa_for_each(&qpd->devp->qps, index, qp) - dump_qp(qp, qpd); + dump_qp(index, qp, qpd); xa_unlock_irq(&qpd->devp->qps); qpd->buf[qpd->pos++] = 0; diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index aa772ee0706f..35c284af574d 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c @@ -275,13 +275,17 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry, struct sk_buff *skb, struct c4iw_wr_wait *wr_waitp) { int err; - struct fw_ri_tpte tpt; + struct fw_ri_tpte *tpt; u32 stag_idx; static atomic_t key; if (c4iw_fatal_error(rdev)) return -EIO; + tpt = kmalloc(sizeof(*tpt), GFP_KERNEL); + if (!tpt) + return -ENOMEM; + stag_state = stag_state > 0; stag_idx = (*stag) >> 8; @@ -291,6 +295,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry, mutex_lock(&rdev->stats.lock); rdev->stats.stag.fail++; mutex_unlock(&rdev->stats.lock); + kfree(tpt); return -ENOMEM; } mutex_lock(&rdev->stats.lock); @@ -305,28 +310,28 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry, /* write TPT entry */ if (reset_tpt_entry) - memset(&tpt, 0, sizeof(tpt)); + memset(tpt, 0, sizeof(*tpt)); else { - tpt.valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F | + tpt->valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F | FW_RI_TPTE_STAGKEY_V((*stag & FW_RI_TPTE_STAGKEY_M)) | FW_RI_TPTE_STAGSTATE_V(stag_state) | FW_RI_TPTE_STAGTYPE_V(type) | FW_RI_TPTE_PDID_V(pdid)); - tpt.locread_to_qpid = cpu_to_be32(FW_RI_TPTE_PERM_V(perm) | + tpt->locread_to_qpid = cpu_to_be32(FW_RI_TPTE_PERM_V(perm) | (bind_enabled ? FW_RI_TPTE_MWBINDEN_F : 0) | FW_RI_TPTE_ADDRTYPE_V((zbva ? FW_RI_ZERO_BASED_TO : FW_RI_VA_BASED_TO))| FW_RI_TPTE_PS_V(page_size)); - tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32( + tpt->nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32( FW_RI_TPTE_PBLADDR_V(PBL_OFF(rdev, pbl_addr)>>3)); - tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL)); - tpt.va_hi = cpu_to_be32((u32)(to >> 32)); - tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL)); - tpt.dca_mwbcnt_pstag = cpu_to_be32(0); - tpt.len_hi = cpu_to_be32((u32)(len >> 32)); + tpt->len_lo = cpu_to_be32((u32)(len & 0xffffffffUL)); + tpt->va_hi = cpu_to_be32((u32)(to >> 32)); + tpt->va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL)); + tpt->dca_mwbcnt_pstag = cpu_to_be32(0); + tpt->len_hi = cpu_to_be32((u32)(len >> 32)); } err = write_adapter_mem(rdev, stag_idx + (rdev->lldi.vr->stag.start >> 5), - sizeof(tpt), &tpt, skb, wr_waitp); + sizeof(*tpt), tpt, skb, wr_waitp); if (reset_tpt_entry) { c4iw_put_resource(&rdev->resource.tpt_table, stag_idx); @@ -334,6 +339,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry, rdev->stats.stag.cur -= 32; mutex_unlock(&rdev->stats.lock); } + kfree(tpt); return err; } diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index eb9368be28c1..bbcac539777a 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -2737,15 +2737,11 @@ int c4iw_create_srq(struct ib_srq *ib_srq, struct ib_srq_init_attr *attrs, if (CHELSIO_CHIP_VERSION(rhp->rdev.lldi.adapter_type) > CHELSIO_T6) srq->flags = T4_SRQ_LIMIT_SUPPORT; - ret = xa_insert_irq(&rhp->qps, srq->wq.qid, srq, GFP_KERNEL); - if (ret) - goto err_free_queue; - if (udata) { srq_key_mm = kmalloc(sizeof(*srq_key_mm), GFP_KERNEL); if (!srq_key_mm) { ret = -ENOMEM; - goto err_remove_handle; + goto err_free_queue; } srq_db_key_mm = kmalloc(sizeof(*srq_db_key_mm), GFP_KERNEL); if (!srq_db_key_mm) { @@ -2789,8 +2785,6 @@ err_free_srq_db_key_mm: kfree(srq_db_key_mm); err_free_srq_key_mm: kfree(srq_key_mm); -err_remove_handle: - xa_erase_irq(&rhp->qps, srq->wq.qid); err_free_queue: free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx, srq->wr_waitp); @@ -2813,8 +2807,6 @@ void c4iw_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) rhp = srq->rhp; pr_debug("%s id %d\n", __func__, srq->wq.qid); - - xa_erase_irq(&rhp->qps, srq->wq.qid); ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext, ibucontext); free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx, diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c index 2395fd4233a7..c61b6022575e 100644 --- a/drivers/infiniband/hw/hfi1/sdma.c +++ b/drivers/infiniband/hw/hfi1/sdma.c @@ -65,6 +65,7 @@ #define SDMA_DESCQ_CNT 2048 #define SDMA_DESC_INTR 64 #define INVALID_TAIL 0xffff +#define SDMA_PAD max_t(size_t, MAX_16B_PADDING, sizeof(u32)) static uint sdma_descq_cnt = SDMA_DESCQ_CNT; module_param(sdma_descq_cnt, uint, S_IRUGO); @@ -1296,7 +1297,7 @@ void sdma_clean(struct hfi1_devdata *dd, size_t num_engines) struct sdma_engine *sde; if (dd->sdma_pad_dma) { - dma_free_coherent(&dd->pcidev->dev, 4, + dma_free_coherent(&dd->pcidev->dev, SDMA_PAD, (void *)dd->sdma_pad_dma, dd->sdma_pad_phys); dd->sdma_pad_dma = NULL; @@ -1491,7 +1492,7 @@ int sdma_init(struct hfi1_devdata *dd, u8 port) } /* Allocate memory for pad */ - dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, sizeof(u32), + dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, SDMA_PAD, &dd->sdma_pad_phys, GFP_KERNEL); if (!dd->sdma_pad_dma) { dd_dev_err(dd, "failed to allocate SendDMA pad memory\n"); @@ -1526,8 +1527,11 @@ int sdma_init(struct hfi1_devdata *dd, u8 port) } ret = rhashtable_init(tmp_sdma_rht, &sdma_rht_params); - if (ret < 0) + if (ret < 0) { + kfree(tmp_sdma_rht); goto bail; + } + dd->sdma_rht = tmp_sdma_rht; dd_dev_info(dd, "SDMA num_sdma: %u\n", dd->num_sdma); diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c index b4dcc4d29f84..f21fca3617d5 100644 --- a/drivers/infiniband/hw/hfi1/tid_rdma.c +++ b/drivers/infiniband/hw/hfi1/tid_rdma.c @@ -2736,11 +2736,6 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd, diff = cmp_psn(psn, flow->flow_state.r_next_psn); if (diff > 0) { - if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) - restart_tid_rdma_read_req(rcd, - qp, - wqe); - /* Drop the packet.*/ goto s_unlock; } else if (diff < 0) { diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c index 7bff0a1e713d..089e201d7550 100644 --- a/drivers/infiniband/hw/hfi1/verbs.c +++ b/drivers/infiniband/hw/hfi1/verbs.c @@ -147,9 +147,6 @@ static int pio_wait(struct rvt_qp *qp, /* Length of buffer to create verbs txreq cache name */ #define TXREQ_NAME_LEN 24 -/* 16B trailing buffer */ -static const u8 trail_buf[MAX_16B_PADDING]; - static uint wss_threshold = 80; module_param(wss_threshold, uint, S_IRUGO); MODULE_PARM_DESC(wss_threshold, "Percentage (1-100) of LLC to use as a threshold for a cacheless copy"); @@ -820,8 +817,8 @@ static int build_verbs_tx_desc( /* add icrc, lt byte, and padding to flit */ if (extra_bytes) - ret = sdma_txadd_kvaddr(sde->dd, &tx->txreq, - (void *)trail_buf, extra_bytes); + ret = sdma_txadd_daddr(sde->dd, &tx->txreq, + sde->dd->sdma_pad_phys, extra_bytes); bail_txadd: return ret; @@ -1089,7 +1086,8 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps, } /* add icrc, lt byte, and padding to flit */ if (extra_bytes) - seg_pio_copy_mid(pbuf, trail_buf, extra_bytes); + seg_pio_copy_mid(pbuf, ppd->dd->sdma_pad_dma, + extra_bytes); seg_pio_copy_end(pbuf); } diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 7a89d669f8bf..e82567fcdeb7 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -5389,9 +5389,9 @@ static void hns_roce_v2_free_eq(struct hns_roce_dev *hr_dev, return; } - if (eq->buf_list) - dma_free_coherent(hr_dev->dev, buf_chk_sz, - eq->buf_list->buf, eq->buf_list->map); + dma_free_coherent(hr_dev->dev, buf_chk_sz, eq->buf_list->buf, + eq->buf_list->map); + kfree(eq->buf_list); } static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev, diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index 8056930bbe2c..cd9ee1664a69 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c @@ -2773,6 +2773,10 @@ int i40iw_register_rdma_device(struct i40iw_device *iwdev) return -ENOMEM; iwibdev = iwdev->iwibdev; rdma_set_device_sysfs_group(&iwibdev->ibdev, &i40iw_attr_group); + ret = ib_device_set_netdev(&iwibdev->ibdev, iwdev->netdev, 1); + if (ret) + goto error; + ret = ib_register_device(&iwibdev->ibdev, "i40iw%d"); if (ret) goto error; diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c index 59022b744144..d609f4659afb 100644 --- a/drivers/infiniband/hw/mlx5/devx.c +++ b/drivers/infiniband/hw/mlx5/devx.c @@ -1298,29 +1298,6 @@ static int devx_handle_mkey_create(struct mlx5_ib_dev *dev, return 0; } -static void devx_free_indirect_mkey(struct rcu_head *rcu) -{ - kfree(container_of(rcu, struct devx_obj, devx_mr.rcu)); -} - -/* This function to delete from the radix tree needs to be called before - * destroying the underlying mkey. Otherwise a race might occur in case that - * other thread will get the same mkey before this one will be deleted, - * in that case it will fail via inserting to the tree its own data. - * - * Note: - * An error in the destroy is not expected unless there is some other indirect - * mkey which points to this one. In a kernel cleanup flow it will be just - * destroyed in the iterative destruction call. In a user flow, in case - * the application didn't close in the expected order it's its own problem, - * the mkey won't be part of the tree, in both cases the kernel is safe. - */ -static void devx_cleanup_mkey(struct devx_obj *obj) -{ - xa_erase(&obj->ib_dev->mdev->priv.mkey_table, - mlx5_base_mkey(obj->devx_mr.mmkey.key)); -} - static void devx_cleanup_subscription(struct mlx5_ib_dev *dev, struct devx_event_subscription *sub) { @@ -1362,8 +1339,16 @@ static int devx_obj_cleanup(struct ib_uobject *uobject, int ret; dev = mlx5_udata_to_mdev(&attrs->driver_udata); - if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) - devx_cleanup_mkey(obj); + if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) { + /* + * The pagefault_single_data_segment() does commands against + * the mmkey, we must wait for that to stop before freeing the + * mkey, as another allocation could get the same mkey #. + */ + xa_erase(&obj->ib_dev->mdev->priv.mkey_table, + mlx5_base_mkey(obj->devx_mr.mmkey.key)); + synchronize_srcu(&dev->mr_srcu); + } if (obj->flags & DEVX_OBJ_FLAGS_DCT) ret = mlx5_core_destroy_dct(obj->ib_dev->mdev, &obj->core_dct); @@ -1382,12 +1367,6 @@ static int devx_obj_cleanup(struct ib_uobject *uobject, devx_cleanup_subscription(dev, sub_entry); mutex_unlock(&devx_event_table->event_xa_lock); - if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) { - call_srcu(&dev->mr_srcu, &obj->devx_mr.rcu, - devx_free_indirect_mkey); - return ret; - } - kfree(obj); return ret; } @@ -1491,26 +1470,21 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)( &obj_id); WARN_ON(obj->dinlen > MLX5_MAX_DESTROY_INBOX_SIZE_DW * sizeof(u32)); - if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) { - err = devx_handle_mkey_indirect(obj, dev, cmd_in, cmd_out); - if (err) - goto obj_destroy; - } - err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len); if (err) - goto err_copy; + goto obj_destroy; if (opcode == MLX5_CMD_OP_CREATE_GENERAL_OBJECT) obj_type = MLX5_GET(general_obj_in_cmd_hdr, cmd_in, obj_type); - obj->obj_id = get_enc_obj_id(opcode | obj_type << 16, obj_id); + if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) { + err = devx_handle_mkey_indirect(obj, dev, cmd_in, cmd_out); + if (err) + goto obj_destroy; + } return 0; -err_copy: - if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) - devx_cleanup_mkey(obj); obj_destroy: if (obj->flags & DEVX_OBJ_FLAGS_DCT) mlx5_core_destroy_dct(obj->ib_dev->mdev, &obj->core_dct); diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 2ceaef3ea3fb..1a98ee2e01c4 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -606,7 +606,7 @@ struct mlx5_ib_mr { struct mlx5_ib_dev *dev; u32 out[MLX5_ST_SZ_DW(create_mkey_out)]; struct mlx5_core_sig_ctx *sig; - int live; + unsigned int live; void *descs_alloc; int access_flags; /* Needed for rereg MR */ @@ -639,7 +639,6 @@ struct mlx5_ib_mw { struct mlx5_ib_devx_mr { struct mlx5_core_mkey mmkey; int ndescs; - struct rcu_head rcu; }; struct mlx5_ib_umr_context { diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 1eff031ef048..7019c12005f4 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -84,32 +84,6 @@ static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length) length + (start & (MLX5_ADAPTER_PAGE_SIZE - 1)); } -static void update_odp_mr(struct mlx5_ib_mr *mr) -{ - if (is_odp_mr(mr)) { - /* - * This barrier prevents the compiler from moving the - * setting of umem->odp_data->private to point to our - * MR, before reg_umr finished, to ensure that the MR - * initialization have finished before starting to - * handle invalidations. - */ - smp_wmb(); - to_ib_umem_odp(mr->umem)->private = mr; - /* - * Make sure we will see the new - * umem->odp_data->private value in the invalidation - * routines, before we can get page faults on the - * MR. Page faults can happen once we put the MR in - * the tree, below this line. Without the barrier, - * there can be a fault handling and an invalidation - * before umem->odp_data->private == mr is visible to - * the invalidation handler. - */ - smp_wmb(); - } -} - static void reg_mr_callback(int status, struct mlx5_async_work *context) { struct mlx5_ib_mr *mr = @@ -1346,8 +1320,6 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, mr->umem = umem; set_mr_fields(dev, mr, npages, length, access_flags); - update_odp_mr(mr); - if (use_umr) { int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE; @@ -1363,10 +1335,12 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, } } - if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) { - mr->live = 1; + if (is_odp_mr(mr)) { + to_ib_umem_odp(mr->umem)->private = mr; atomic_set(&mr->num_pending_prefetch, 0); } + if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) + smp_store_release(&mr->live, 1); return &mr->ibmr; error: @@ -1441,6 +1415,9 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, if (!mr->umem) return -EINVAL; + if (is_odp_mr(mr)) + return -EOPNOTSUPP; + if (flags & IB_MR_REREG_TRANS) { addr = virt_addr; len = length; @@ -1486,8 +1463,6 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, } mr->allocated_from_cache = 0; - if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) - mr->live = 1; } else { /* * Send a UMR WQE @@ -1516,7 +1491,6 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, set_mr_fields(dev, mr, npages, len, access_flags); - update_odp_mr(mr); return 0; err: @@ -1607,15 +1581,16 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) /* Prevent new page faults and * prefetch requests from succeeding */ - mr->live = 0; + WRITE_ONCE(mr->live, 0); + + /* Wait for all running page-fault handlers to finish. */ + synchronize_srcu(&dev->mr_srcu); /* dequeue pending prefetch requests for the mr */ if (atomic_read(&mr->num_pending_prefetch)) flush_workqueue(system_unbound_wq); WARN_ON(atomic_read(&mr->num_pending_prefetch)); - /* Wait for all running page-fault handlers to finish. */ - synchronize_srcu(&dev->mr_srcu); /* Destroy all page mappings */ if (!umem_odp->is_implicit_odp) mlx5_ib_invalidate_range(umem_odp, @@ -1987,14 +1962,25 @@ free: int mlx5_ib_dealloc_mw(struct ib_mw *mw) { + struct mlx5_ib_dev *dev = to_mdev(mw->device); struct mlx5_ib_mw *mmw = to_mmw(mw); int err; - err = mlx5_core_destroy_mkey((to_mdev(mw->device))->mdev, - &mmw->mmkey); - if (!err) - kfree(mmw); - return err; + if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) { + xa_erase_irq(&dev->mdev->priv.mkey_table, + mlx5_base_mkey(mmw->mmkey.key)); + /* + * pagefault_single_data_segment() may be accessing mmw under + * SRCU if the user bound an ODP MR to this MW. + */ + synchronize_srcu(&dev->mr_srcu); + } + + err = mlx5_core_destroy_mkey(dev->mdev, &mmw->mmkey); + if (err) + return err; + kfree(mmw); + return 0; } int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask, diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index 2e9b43061797..3f9478d19376 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -178,6 +178,29 @@ void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset, return; } + /* + * The locking here is pretty subtle. Ideally the implicit children + * list would be protected by the umem_mutex, however that is not + * possible. Instead this uses a weaker update-then-lock pattern: + * + * srcu_read_lock() + * <change children list> + * mutex_lock(umem_mutex) + * mlx5_ib_update_xlt() + * mutex_unlock(umem_mutex) + * destroy lkey + * + * ie any change the children list must be followed by the locked + * update_xlt before destroying. + * + * The umem_mutex provides the acquire/release semantic needed to make + * the children list visible to a racing thread. While SRCU is not + * technically required, using it gives consistent use of the SRCU + * locking around the children list. + */ + lockdep_assert_held(&to_ib_umem_odp(mr->umem)->umem_mutex); + lockdep_assert_held(&mr->dev->mr_srcu); + odp = odp_lookup(offset * MLX5_IMR_MTT_SIZE, nentries * MLX5_IMR_MTT_SIZE, mr); @@ -202,15 +225,22 @@ static void mr_leaf_free_action(struct work_struct *work) struct ib_umem_odp *odp = container_of(work, struct ib_umem_odp, work); int idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT; struct mlx5_ib_mr *mr = odp->private, *imr = mr->parent; + struct ib_umem_odp *odp_imr = to_ib_umem_odp(imr->umem); + int srcu_key; mr->parent = NULL; synchronize_srcu(&mr->dev->mr_srcu); - ib_umem_odp_release(odp); - if (imr->live) + if (smp_load_acquire(&imr->live)) { + srcu_key = srcu_read_lock(&mr->dev->mr_srcu); + mutex_lock(&odp_imr->umem_mutex); mlx5_ib_update_xlt(imr, idx, 1, 0, MLX5_IB_UPD_XLT_INDIRECT | MLX5_IB_UPD_XLT_ATOMIC); + mutex_unlock(&odp_imr->umem_mutex); + srcu_read_unlock(&mr->dev->mr_srcu, srcu_key); + } + ib_umem_odp_release(odp); mlx5_mr_cache_free(mr->dev, mr); if (atomic_dec_and_test(&imr->num_leaf_free)) @@ -278,7 +308,6 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start, idx - blk_start_idx + 1, 0, MLX5_IB_UPD_XLT_ZAP | MLX5_IB_UPD_XLT_ATOMIC); - mutex_unlock(&umem_odp->umem_mutex); /* * We are now sure that the device will not access the * memory. We can safely unmap it, and mark it as dirty if @@ -289,10 +318,12 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start, if (unlikely(!umem_odp->npages && mr->parent && !umem_odp->dying)) { - WRITE_ONCE(umem_odp->dying, 1); + WRITE_ONCE(mr->live, 0); + umem_odp->dying = 1; atomic_inc(&mr->parent->num_leaf_free); schedule_work(&umem_odp->work); } + mutex_unlock(&umem_odp->umem_mutex); } void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev) @@ -429,8 +460,6 @@ static struct mlx5_ib_mr *implicit_mr_alloc(struct ib_pd *pd, mr->ibmr.lkey = mr->mmkey.key; mr->ibmr.rkey = mr->mmkey.key; - mr->live = 1; - mlx5_ib_dbg(dev, "key %x dev %p mr %p\n", mr->mmkey.key, dev->mdev, mr); @@ -484,6 +513,8 @@ next_mr: mtt->parent = mr; INIT_WORK(&odp->work, mr_leaf_free_action); + smp_store_release(&mtt->live, 1); + if (!nentries) start_idx = addr >> MLX5_IMR_MTT_SHIFT; nentries++; @@ -536,6 +567,7 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd, init_waitqueue_head(&imr->q_leaf_free); atomic_set(&imr->num_leaf_free, 0); atomic_set(&imr->num_pending_prefetch, 0); + smp_store_release(&imr->live, 1); return imr; } @@ -555,15 +587,19 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr) if (mr->parent != imr) continue; + mutex_lock(&umem_odp->umem_mutex); ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp), ib_umem_end(umem_odp)); - if (umem_odp->dying) + if (umem_odp->dying) { + mutex_unlock(&umem_odp->umem_mutex); continue; + } - WRITE_ONCE(umem_odp->dying, 1); + umem_odp->dying = 1; atomic_inc(&imr->num_leaf_free); schedule_work(&umem_odp->work); + mutex_unlock(&umem_odp->umem_mutex); } up_read(&per_mm->umem_rwsem); @@ -773,7 +809,7 @@ next_mr: switch (mmkey->type) { case MLX5_MKEY_MR: mr = container_of(mmkey, struct mlx5_ib_mr, mmkey); - if (!mr->live || !mr->ibmr.pd) { + if (!smp_load_acquire(&mr->live) || !mr->ibmr.pd) { mlx5_ib_dbg(dev, "got dead MR\n"); ret = -EFAULT; goto srcu_unlock; @@ -1641,12 +1677,12 @@ static bool num_pending_prefetch_inc(struct ib_pd *pd, mr = container_of(mmkey, struct mlx5_ib_mr, mmkey); - if (mr->ibmr.pd != pd) { + if (!smp_load_acquire(&mr->live)) { ret = false; break; } - if (!mr->live) { + if (mr->ibmr.pd != pd) { ret = false; break; } diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 8937d72ddcf6..5fd071c05944 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -3249,10 +3249,12 @@ static int modify_raw_packet_qp_sq( } /* Only remove the old rate after new rate was set */ - if ((old_rl.rate && - !mlx5_rl_are_equal(&old_rl, &new_rl)) || - (new_state != MLX5_SQC_STATE_RDY)) + if ((old_rl.rate && !mlx5_rl_are_equal(&old_rl, &new_rl)) || + (new_state != MLX5_SQC_STATE_RDY)) { mlx5_rl_remove_rate(dev, &old_rl); + if (new_state != MLX5_SQC_STATE_RDY) + memset(&new_rl, 0, sizeof(new_rl)); + } ibqp->rl = new_rl; sq->state = new_state; diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index 5136b835e1ba..dc71b6e16a07 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -76,7 +76,7 @@ static void qedr_get_dev_fw_str(struct ib_device *ibdev, char *str) struct qedr_dev *qedr = get_qedr_dev(ibdev); u32 fw_ver = (u32)qedr->attr.fw_ver; - snprintf(str, IB_FW_VERSION_NAME_MAX, "%d. %d. %d. %d", + snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d", (fw_ver >> 24) & 0xFF, (fw_ver >> 16) & 0xFF, (fw_ver >> 8) & 0xFF, fw_ver & 0xFF); } diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c index 6cac0c88cf39..36cdfbdbd325 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c @@ -230,8 +230,6 @@ static void pvrdma_free_srq(struct pvrdma_dev *dev, struct pvrdma_srq *srq) pvrdma_page_dir_cleanup(dev, &srq->pdir); - kfree(srq); - atomic_dec(&dev->num_srqs); } diff --git a/drivers/infiniband/sw/siw/siw_qp.c b/drivers/infiniband/sw/siw/siw_qp.c index 430314c8abd9..b4317480cee7 100644 --- a/drivers/infiniband/sw/siw/siw_qp.c +++ b/drivers/infiniband/sw/siw/siw_qp.c @@ -182,12 +182,19 @@ void siw_qp_llp_close(struct siw_qp *qp) */ void siw_qp_llp_write_space(struct sock *sk) { - struct siw_cep *cep = sk_to_cep(sk); + struct siw_cep *cep; - cep->sk_write_space(sk); + read_lock(&sk->sk_callback_lock); + + cep = sk_to_cep(sk); + if (cep) { + cep->sk_write_space(sk); - if (!test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) - (void)siw_sq_start(cep->qp); + if (!test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) + (void)siw_sq_start(cep->qp); + } + + read_unlock(&sk->sk_callback_lock); } static int siw_qp_readq_init(struct siw_qp *qp, int irq_size, int orq_size) @@ -1305,6 +1312,7 @@ int siw_qp_add(struct siw_device *sdev, struct siw_qp *qp) void siw_free_qp(struct kref *ref) { struct siw_qp *found, *qp = container_of(ref, struct siw_qp, ref); + struct siw_base_qp *siw_base_qp = to_siw_base_qp(qp->ib_qp); struct siw_device *sdev = qp->sdev; unsigned long flags; @@ -1327,4 +1335,5 @@ void siw_free_qp(struct kref *ref) atomic_dec(&sdev->num_qp); siw_dbg_qp(qp, "free QP\n"); kfree_rcu(qp, rcu); + kfree(siw_base_qp); } diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c index 869e02b69a01..b18a677832e1 100644 --- a/drivers/infiniband/sw/siw/siw_verbs.c +++ b/drivers/infiniband/sw/siw/siw_verbs.c @@ -604,7 +604,6 @@ out: int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata) { struct siw_qp *qp = to_siw_qp(base_qp); - struct siw_base_qp *siw_base_qp = to_siw_base_qp(base_qp); struct siw_ucontext *uctx = rdma_udata_to_drv_context(udata, struct siw_ucontext, base_ucontext); @@ -641,7 +640,6 @@ int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata) qp->scq = qp->rcq = NULL; siw_qp_put(qp); - kfree(siw_base_qp); return 0; } diff --git a/drivers/input/misc/da9063_onkey.c b/drivers/input/misc/da9063_onkey.c index dace8577fa43..79851923ee57 100644 --- a/drivers/input/misc/da9063_onkey.c +++ b/drivers/input/misc/da9063_onkey.c @@ -232,10 +232,7 @@ static int da9063_onkey_probe(struct platform_device *pdev) onkey->input->phys = onkey->phys; onkey->input->dev.parent = &pdev->dev; - if (onkey->key_power) - input_set_capability(onkey->input, EV_KEY, KEY_POWER); - - input_set_capability(onkey->input, EV_KEY, KEY_SLEEP); + input_set_capability(onkey->input, EV_KEY, KEY_POWER); INIT_DELAYED_WORK(&onkey->work, da9063_poll_on); diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c index 97e3639e99d0..08520b3a18b8 100644 --- a/drivers/input/misc/soc_button_array.c +++ b/drivers/input/misc/soc_button_array.c @@ -92,11 +92,18 @@ soc_button_device_create(struct platform_device *pdev, continue; gpio = soc_button_lookup_gpio(&pdev->dev, info->acpi_index); - if (gpio < 0 && gpio != -ENOENT) { - error = gpio; - goto err_free_mem; - } else if (!gpio_is_valid(gpio)) { - /* Skip GPIO if not present */ + if (!gpio_is_valid(gpio)) { + /* + * Skip GPIO if not present. Note we deliberately + * ignore -EPROBE_DEFER errors here. On some devices + * Intel is using so called virtual GPIOs which are not + * GPIOs at all but some way for AML code to check some + * random status bits without need a custom opregion. + * In some cases the resources table we parse points to + * such a virtual GPIO, since these are not real GPIOs + * we do not have a driver for these so they will never + * show up, therefore we ignore -EPROBE_DEFER. + */ continue; } diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 04fe43440a3c..2d8434b7b623 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -1827,31 +1827,6 @@ static int elantech_create_smbus(struct psmouse *psmouse, leave_breadcrumbs); } -static bool elantech_use_host_notify(struct psmouse *psmouse, - struct elantech_device_info *info) -{ - if (ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version)) - return true; - - switch (info->bus) { - case ETP_BUS_PS2_ONLY: - /* expected case */ - break; - case ETP_BUS_SMB_HST_NTFY_ONLY: - case ETP_BUS_PS2_SMB_HST_NTFY: - /* SMbus implementation is stable since 2018 */ - if (dmi_get_bios_year() >= 2018) - return true; - /* fall through */ - default: - psmouse_dbg(psmouse, - "Ignoring SMBus bus provider %d\n", info->bus); - break; - } - - return false; -} - /** * elantech_setup_smbus - called once the PS/2 devices are enumerated * and decides to instantiate a SMBus InterTouch device. @@ -1871,7 +1846,7 @@ static int elantech_setup_smbus(struct psmouse *psmouse, * i2c_blacklist_pnp_ids. * Old ICs are up to the user to decide. */ - if (!elantech_use_host_notify(psmouse, info) || + if (!ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version) || psmouse_matches_pnp_id(psmouse, i2c_blacklist_pnp_ids)) return -ENXIO; } @@ -1891,6 +1866,34 @@ static int elantech_setup_smbus(struct psmouse *psmouse, return 0; } +static bool elantech_use_host_notify(struct psmouse *psmouse, + struct elantech_device_info *info) +{ + if (ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version)) + return true; + + switch (info->bus) { + case ETP_BUS_PS2_ONLY: + /* expected case */ + break; + case ETP_BUS_SMB_ALERT_ONLY: + /* fall-through */ + case ETP_BUS_PS2_SMB_ALERT: + psmouse_dbg(psmouse, "Ignoring SMBus provider through alert protocol.\n"); + break; + case ETP_BUS_SMB_HST_NTFY_ONLY: + /* fall-through */ + case ETP_BUS_PS2_SMB_HST_NTFY: + return true; + default: + psmouse_dbg(psmouse, + "Ignoring SMBus bus provider %d.\n", + info->bus); + } + + return false; +} + int elantech_init_smbus(struct psmouse *psmouse) { struct elantech_device_info info; diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c index 772493b1f665..190b9974526b 100644 --- a/drivers/input/rmi4/rmi_driver.c +++ b/drivers/input/rmi4/rmi_driver.c @@ -146,7 +146,7 @@ static int rmi_process_interrupt_requests(struct rmi_device *rmi_dev) } mutex_lock(&data->irq_mutex); - bitmap_and(data->irq_status, data->irq_status, data->current_irq_mask, + bitmap_and(data->irq_status, data->irq_status, data->fn_irq_bits, data->irq_count); /* * At this point, irq_status has all bits that are set in the @@ -385,6 +385,8 @@ static int rmi_driver_set_irq_bits(struct rmi_device *rmi_dev, bitmap_copy(data->current_irq_mask, data->new_irq_mask, data->num_of_irq_regs); + bitmap_or(data->fn_irq_bits, data->fn_irq_bits, mask, data->irq_count); + error_unlock: mutex_unlock(&data->irq_mutex); return error; @@ -398,6 +400,8 @@ static int rmi_driver_clear_irq_bits(struct rmi_device *rmi_dev, struct device *dev = &rmi_dev->dev; mutex_lock(&data->irq_mutex); + bitmap_andnot(data->fn_irq_bits, + data->fn_irq_bits, mask, data->irq_count); bitmap_andnot(data->new_irq_mask, data->current_irq_mask, mask, data->irq_count); diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c index 5178ea8b5f30..fb43aa708660 100644 --- a/drivers/input/touchscreen/goodix.c +++ b/drivers/input/touchscreen/goodix.c @@ -53,6 +53,7 @@ struct goodix_ts_data { const char *cfg_name; struct completion firmware_loading_complete; unsigned long irq_flags; + unsigned int contact_size; }; #define GOODIX_GPIO_INT_NAME "irq" @@ -62,6 +63,7 @@ struct goodix_ts_data { #define GOODIX_MAX_WIDTH 4096 #define GOODIX_INT_TRIGGER 1 #define GOODIX_CONTACT_SIZE 8 +#define GOODIX_MAX_CONTACT_SIZE 9 #define GOODIX_MAX_CONTACTS 10 #define GOODIX_CONFIG_MAX_LENGTH 240 @@ -144,6 +146,19 @@ static const struct dmi_system_id rotated_screen[] = { {} }; +static const struct dmi_system_id nine_bytes_report[] = { +#if defined(CONFIG_DMI) && defined(CONFIG_X86) + { + .ident = "Lenovo YogaBook", + /* YB1-X91L/F and YB1-X90L/F */ + .matches = { + DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9") + } + }, +#endif + {} +}; + /** * goodix_i2c_read - read data from a register of the i2c slave device. * @@ -249,7 +264,7 @@ static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data) max_timeout = jiffies + msecs_to_jiffies(GOODIX_BUFFER_STATUS_TIMEOUT); do { error = goodix_i2c_read(ts->client, GOODIX_READ_COOR_ADDR, - data, GOODIX_CONTACT_SIZE + 1); + data, ts->contact_size + 1); if (error) { dev_err(&ts->client->dev, "I2C transfer error: %d\n", error); @@ -262,12 +277,12 @@ static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data) return -EPROTO; if (touch_num > 1) { - data += 1 + GOODIX_CONTACT_SIZE; + data += 1 + ts->contact_size; error = goodix_i2c_read(ts->client, GOODIX_READ_COOR_ADDR + - 1 + GOODIX_CONTACT_SIZE, + 1 + ts->contact_size, data, - GOODIX_CONTACT_SIZE * + ts->contact_size * (touch_num - 1)); if (error) return error; @@ -286,7 +301,7 @@ static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data) return 0; } -static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data) +static void goodix_ts_report_touch_8b(struct goodix_ts_data *ts, u8 *coor_data) { int id = coor_data[0] & 0x0F; int input_x = get_unaligned_le16(&coor_data[1]); @@ -301,6 +316,21 @@ static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data) input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, input_w); } +static void goodix_ts_report_touch_9b(struct goodix_ts_data *ts, u8 *coor_data) +{ + int id = coor_data[1] & 0x0F; + int input_x = get_unaligned_le16(&coor_data[3]); + int input_y = get_unaligned_le16(&coor_data[5]); + int input_w = get_unaligned_le16(&coor_data[7]); + + input_mt_slot(ts->input_dev, id); + input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER, true); + touchscreen_report_pos(ts->input_dev, &ts->prop, + input_x, input_y, true); + input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, input_w); + input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, input_w); +} + /** * goodix_process_events - Process incoming events * @@ -311,7 +341,7 @@ static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data) */ static void goodix_process_events(struct goodix_ts_data *ts) { - u8 point_data[1 + GOODIX_CONTACT_SIZE * GOODIX_MAX_CONTACTS]; + u8 point_data[1 + GOODIX_MAX_CONTACT_SIZE * GOODIX_MAX_CONTACTS]; int touch_num; int i; @@ -326,8 +356,12 @@ static void goodix_process_events(struct goodix_ts_data *ts) input_report_key(ts->input_dev, KEY_LEFTMETA, point_data[0] & BIT(4)); for (i = 0; i < touch_num; i++) - goodix_ts_report_touch(ts, - &point_data[1 + GOODIX_CONTACT_SIZE * i]); + if (ts->contact_size == 9) + goodix_ts_report_touch_9b(ts, + &point_data[1 + ts->contact_size * i]); + else + goodix_ts_report_touch_8b(ts, + &point_data[1 + ts->contact_size * i]); input_mt_sync_frame(ts->input_dev); input_sync(ts->input_dev); @@ -730,6 +764,13 @@ static int goodix_configure_dev(struct goodix_ts_data *ts) "Applying '180 degrees rotated screen' quirk\n"); } + if (dmi_check_system(nine_bytes_report)) { + ts->contact_size = 9; + + dev_dbg(&ts->client->dev, + "Non-standard 9-bytes report format quirk\n"); + } + error = input_mt_init_slots(ts->input_dev, ts->max_touch_num, INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED); if (error) { @@ -810,6 +851,7 @@ static int goodix_ts_probe(struct i2c_client *client, ts->client = client; i2c_set_clientdata(client, ts); init_completion(&ts->firmware_loading_complete); + ts->contact_size = GOODIX_CONTACT_SIZE; error = goodix_get_gpio_config(ts); if (error) diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c index 34923399ece4..1139714e72e2 100644 --- a/drivers/input/touchscreen/st1232.c +++ b/drivers/input/touchscreen/st1232.c @@ -81,8 +81,10 @@ static int st1232_ts_read_data(struct st1232_ts_data *ts) for (i = 0, y = 0; i < ts->chip_info->max_fingers; i++, y += 3) { finger[i].is_valid = buf[i + y] >> 7; if (finger[i].is_valid) { - finger[i].x = ((buf[i + y] & 0x0070) << 4) | buf[i + 1]; - finger[i].y = ((buf[i + y] & 0x0007) << 8) | buf[i + 2]; + finger[i].x = ((buf[i + y] & 0x0070) << 4) | + buf[i + y + 1]; + finger[i].y = ((buf[i + y] & 0x0007) << 8) | + buf[i + y + 2]; /* st1232 includes a z-axis / touch strength */ if (ts->chip_info->have_z) diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c index 7b971228df38..c498796adc07 100644 --- a/drivers/interconnect/core.c +++ b/drivers/interconnect/core.c @@ -405,8 +405,12 @@ void icc_set_tag(struct icc_path *path, u32 tag) if (!path) return; + mutex_lock(&icc_lock); + for (i = 0; i < path->num_nodes; i++) path->reqs[i].tag = tag; + + mutex_unlock(&icc_lock); } EXPORT_SYMBOL_GPL(icc_set_tag); diff --git a/drivers/interconnect/qcom/qcs404.c b/drivers/interconnect/qcom/qcs404.c index 910081d6ddc0..b4966d8f3348 100644 --- a/drivers/interconnect/qcom/qcs404.c +++ b/drivers/interconnect/qcom/qcs404.c @@ -433,7 +433,8 @@ static int qnoc_probe(struct platform_device *pdev) if (!qp) return -ENOMEM; - data = devm_kcalloc(dev, num_nodes, sizeof(*node), GFP_KERNEL); + data = devm_kzalloc(dev, struct_size(data, nodes, num_nodes), + GFP_KERNEL); if (!data) return -ENOMEM; diff --git a/drivers/interconnect/qcom/sdm845.c b/drivers/interconnect/qcom/sdm845.c index 57955596bb59..502a6c22b41e 100644 --- a/drivers/interconnect/qcom/sdm845.c +++ b/drivers/interconnect/qcom/sdm845.c @@ -790,7 +790,8 @@ static int qnoc_probe(struct platform_device *pdev) if (!qp) return -ENOMEM; - data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL); + data = devm_kzalloc(&pdev->dev, struct_size(data, nodes, num_nodes), + GFP_KERNEL); if (!data) return -ENOMEM; diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 2369b8af81f3..dd555078258c 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -583,7 +583,8 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt) retry: type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK; devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK; - pasid = PPR_PASID(*(u64 *)&event[0]); + pasid = (event[0] & EVENT_DOMID_MASK_HI) | + (event[1] & EVENT_DOMID_MASK_LO); flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK; address = (u64)(((u64)event[3]) << 32) | event[2]; @@ -616,7 +617,7 @@ retry: address, flags); break; case EVENT_TYPE_PAGE_TAB_ERR: - dev_err(dev, "Event logged [PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x domain=0x%04x address=0x%llx flags=0x%04x]\n", + dev_err(dev, "Event logged [PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x pasid=0x%04x address=0x%llx flags=0x%04x]\n", PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), pasid, address, flags); break; @@ -1463,6 +1464,7 @@ static void free_pagetable(struct protection_domain *domain) * to 64 bits. */ static bool increase_address_space(struct protection_domain *domain, + unsigned long address, gfp_t gfp) { unsigned long flags; @@ -1471,8 +1473,8 @@ static bool increase_address_space(struct protection_domain *domain, spin_lock_irqsave(&domain->lock, flags); - if (WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL)) - /* address space already 64 bit large */ + if (address <= PM_LEVEL_SIZE(domain->mode) || + WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL)) goto out; pte = (void *)get_zeroed_page(gfp); @@ -1505,7 +1507,7 @@ static u64 *alloc_pte(struct protection_domain *domain, BUG_ON(!is_power_of_2(page_size)); while (address > PM_LEVEL_SIZE(domain->mode)) - *updated = increase_address_space(domain, gfp) || *updated; + *updated = increase_address_space(domain, address, gfp) || *updated; level = domain->mode - 1; pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; diff --git a/drivers/iommu/amd_iommu_quirks.c b/drivers/iommu/amd_iommu_quirks.c index c235f79b7a20..5120ce4fdce3 100644 --- a/drivers/iommu/amd_iommu_quirks.c +++ b/drivers/iommu/amd_iommu_quirks.c @@ -74,6 +74,19 @@ static const struct dmi_system_id ivrs_quirks[] __initconst = { .driver_data = (void *)&ivrs_ioapic_quirks[DELL_LATITUDE_5495], }, { + /* + * Acer Aspire A315-41 requires the very same workaround as + * Dell Latitude 5495 + */ + .callback = ivrs_ioapic_quirk_cb, + .ident = "Acer Aspire A315-41", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A315-41"), + }, + .driver_data = (void *)&ivrs_ioapic_quirks[DELL_LATITUDE_5495], + }, + { .callback = ivrs_ioapic_quirk_cb, .ident = "Lenovo ideapad 330S-15ARR", .matches = { diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index c9c1612d52e0..17bd5a349119 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h @@ -130,8 +130,8 @@ #define EVENT_TYPE_INV_PPR_REQ 0x9 #define EVENT_DEVID_MASK 0xffff #define EVENT_DEVID_SHIFT 0 -#define EVENT_DOMID_MASK 0xffff -#define EVENT_DOMID_SHIFT 0 +#define EVENT_DOMID_MASK_LO 0xffff +#define EVENT_DOMID_MASK_HI 0xf0000 #define EVENT_FLAGS_MASK 0xfff #define EVENT_FLAGS_SHIFT 0x10 diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index b18aac4c105e..7c503a6bc585 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -812,6 +812,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, return 0; out_clear_smmu: + __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx); smmu_domain->smmu = NULL; out_unlock: mutex_unlock(&smmu_domain->init_mutex); diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 3f974919d3bd..6db6d969e31c 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -2794,7 +2794,7 @@ static int identity_mapping(struct device *dev) struct device_domain_info *info; info = dev->archdata.iommu; - if (info && info != DUMMY_DEVICE_DOMAIN_INFO) + if (info && info != DUMMY_DEVICE_DOMAIN_INFO && info != DEFER_DEVICE_DOMAIN_INFO) return (info->domain == si_domain); return 0; @@ -3471,7 +3471,7 @@ static bool iommu_need_mapping(struct device *dev) if (dev->coherent_dma_mask && dev->coherent_dma_mask < dma_mask) dma_mask = dev->coherent_dma_mask; - if (dma_mask >= dma_get_required_mask(dev)) + if (dma_mask >= dma_direct_get_required_mask(dev)) return false; /* @@ -3775,6 +3775,13 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele return nelems; } +static u64 intel_get_required_mask(struct device *dev) +{ + if (!iommu_need_mapping(dev)) + return dma_direct_get_required_mask(dev); + return DMA_BIT_MASK(32); +} + static const struct dma_map_ops intel_dma_ops = { .alloc = intel_alloc_coherent, .free = intel_free_coherent, @@ -3787,6 +3794,7 @@ static const struct dma_map_ops intel_dma_ops = { .dma_supported = dma_direct_supported, .mmap = dma_common_mmap, .get_sgtable = dma_common_get_sgtable, + .get_required_mask = intel_get_required_mask, }; static void diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 4c91359057c5..ca51036aa53c 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -166,6 +166,9 @@ #define ARM_MALI_LPAE_TTBR_READ_INNER BIT(2) #define ARM_MALI_LPAE_TTBR_SHARE_OUTER BIT(4) +#define ARM_MALI_LPAE_MEMATTR_IMP_DEF 0x88ULL +#define ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 0x8DULL + /* IOPTE accessors */ #define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d)) @@ -1015,27 +1018,56 @@ arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) static struct io_pgtable * arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie) { - struct io_pgtable *iop; + struct arm_lpae_io_pgtable *data; - if (cfg->ias != 48 || cfg->oas > 40) + /* No quirks for Mali (hopefully) */ + if (cfg->quirks) + return NULL; + + if (cfg->ias > 48 || cfg->oas > 40) return NULL; cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); - iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie); - if (iop) { - u64 mair, ttbr; - /* Copy values as union fields overlap */ - mair = cfg->arm_lpae_s1_cfg.mair[0]; - ttbr = cfg->arm_lpae_s1_cfg.ttbr[0]; + data = arm_lpae_alloc_pgtable(cfg); + if (!data) + return NULL; - cfg->arm_mali_lpae_cfg.memattr = mair; - cfg->arm_mali_lpae_cfg.transtab = ttbr | - ARM_MALI_LPAE_TTBR_READ_INNER | - ARM_MALI_LPAE_TTBR_ADRMODE_TABLE; + /* Mali seems to need a full 4-level table regardless of IAS */ + if (data->levels < ARM_LPAE_MAX_LEVELS) { + data->levels = ARM_LPAE_MAX_LEVELS; + data->pgd_size = sizeof(arm_lpae_iopte); } + /* + * MEMATTR: Mali has no actual notion of a non-cacheable type, so the + * best we can do is mimic the out-of-tree driver and hope that the + * "implementation-defined caching policy" is good enough. Similarly, + * we'll use it for the sake of a valid attribute for our 'device' + * index, although callers should never request that in practice. + */ + cfg->arm_mali_lpae_cfg.memattr = + (ARM_MALI_LPAE_MEMATTR_IMP_DEF + << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) | + (ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC + << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) | + (ARM_MALI_LPAE_MEMATTR_IMP_DEF + << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)); - return iop; + data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg); + if (!data->pgd) + goto out_free_data; + + /* Ensure the empty pgd is visible before TRANSTAB can be written */ + wmb(); + + cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) | + ARM_MALI_LPAE_TTBR_READ_INNER | + ARM_MALI_LPAE_TTBR_ADRMODE_TABLE; + return &data->iop; + +out_free_data: + kfree(data); + return NULL; } struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = { diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 9da8309f7170..2639fc718117 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -1086,8 +1086,6 @@ static int ipmmu_probe(struct platform_device *pdev) mmu->num_ctx = min(IPMMU_CTX_MAX, mmu->features->number_of_contexts); - irq = platform_get_irq(pdev, 0); - /* * Determine if this IPMMU instance is a root device by checking for * the lack of has_cache_leaf_nodes flag or renesas,ipmmu-main property. @@ -1106,10 +1104,9 @@ static int ipmmu_probe(struct platform_device *pdev) /* Root devices have mandatory IRQs */ if (ipmmu_is_root(mmu)) { - if (irq < 0) { - dev_err(&pdev->dev, "no IRQ found\n"); + irq = platform_get_irq(pdev, 0); + if (irq < 0) return irq; - } ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0, dev_name(&pdev->dev), mmu); diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index 26290f310f90..4dcbf68dfda4 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -100,6 +100,7 @@ struct rk_iommu { struct device *dev; void __iomem **bases; int num_mmu; + int num_irq; struct clk_bulk_data *clocks; int num_clocks; bool reset_disabled; @@ -1136,7 +1137,7 @@ static int rk_iommu_probe(struct platform_device *pdev) struct rk_iommu *iommu; struct resource *res; int num_res = pdev->num_resources; - int err, i, irq; + int err, i; iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL); if (!iommu) @@ -1163,6 +1164,10 @@ static int rk_iommu_probe(struct platform_device *pdev) if (iommu->num_mmu == 0) return PTR_ERR(iommu->bases[0]); + iommu->num_irq = platform_irq_count(pdev); + if (iommu->num_irq < 0) + return iommu->num_irq; + iommu->reset_disabled = device_property_read_bool(dev, "rockchip,disable-mmu-reset"); @@ -1219,8 +1224,9 @@ static int rk_iommu_probe(struct platform_device *pdev) pm_runtime_enable(dev); - i = 0; - while ((irq = platform_get_irq(pdev, i++)) != -ENXIO) { + for (i = 0; i < iommu->num_irq; i++) { + int irq = platform_get_irq(pdev, i); + if (irq < 0) return irq; @@ -1245,10 +1251,13 @@ err_unprepare_clocks: static void rk_iommu_shutdown(struct platform_device *pdev) { struct rk_iommu *iommu = platform_get_drvdata(pdev); - int i = 0, irq; + int i; + + for (i = 0; i < iommu->num_irq; i++) { + int irq = platform_get_irq(pdev, i); - while ((irq = platform_get_irq(pdev, i++)) != -ENXIO) devm_free_irq(iommu->dev, irq, iommu); + } pm_runtime_force_suspend(&pdev->dev); } diff --git a/drivers/irqchip/irq-al-fic.c b/drivers/irqchip/irq-al-fic.c index 1a57cee3efab..0b0a73739756 100644 --- a/drivers/irqchip/irq-al-fic.c +++ b/drivers/irqchip/irq-al-fic.c @@ -15,6 +15,7 @@ /* FIC Registers */ #define AL_FIC_CAUSE 0x00 +#define AL_FIC_SET_CAUSE 0x08 #define AL_FIC_MASK 0x10 #define AL_FIC_CONTROL 0x28 @@ -126,6 +127,16 @@ static void al_fic_irq_handler(struct irq_desc *desc) chained_irq_exit(irqchip, desc); } +static int al_fic_irq_retrigger(struct irq_data *data) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); + struct al_fic *fic = gc->private; + + writel_relaxed(BIT(data->hwirq), fic->base + AL_FIC_SET_CAUSE); + + return 1; +} + static int al_fic_register(struct device_node *node, struct al_fic *fic) { @@ -159,6 +170,7 @@ static int al_fic_register(struct device_node *node, gc->chip_types->chip.irq_unmask = irq_gc_mask_clr_bit; gc->chip_types->chip.irq_ack = irq_gc_ack_clr_bit; gc->chip_types->chip.irq_set_type = al_fic_irq_set_type; + gc->chip_types->chip.irq_retrigger = al_fic_irq_retrigger; gc->chip_types->chip.flags = IRQCHIP_SKIP_SET_WAKE; gc->private = fic; diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c index 6acad2ea0fb3..29333497ba10 100644 --- a/drivers/irqchip/irq-atmel-aic5.c +++ b/drivers/irqchip/irq-atmel-aic5.c @@ -313,6 +313,7 @@ static void __init sama5d3_aic_irq_fixup(void) static const struct of_device_id aic5_irq_fixups[] __initconst = { { .compatible = "atmel,sama5d3", .data = sama5d3_aic_irq_fixup }, { .compatible = "atmel,sama5d4", .data = sama5d3_aic_irq_fixup }, + { .compatible = "microchip,sam9x60", .data = sama5d3_aic_irq_fixup }, { /* sentinel */ }, }; @@ -390,3 +391,12 @@ static int __init sama5d4_aic5_of_init(struct device_node *node, return aic5_of_init(node, parent, NR_SAMA5D4_IRQS); } IRQCHIP_DECLARE(sama5d4_aic5, "atmel,sama5d4-aic", sama5d4_aic5_of_init); + +#define NR_SAM9X60_IRQS 50 + +static int __init sam9x60_aic5_of_init(struct device_node *node, + struct device_node *parent) +{ + return aic5_of_init(node, parent, NR_SAM9X60_IRQS); +} +IRQCHIP_DECLARE(sam9x60_aic5, "microchip,sam9x60-aic", sam9x60_aic5_of_init); diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 62e54f1a248b..787e8eec9a7f 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -175,6 +175,22 @@ static DEFINE_IDA(its_vpeid_ida); #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) #define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K) +static u16 get_its_list(struct its_vm *vm) +{ + struct its_node *its; + unsigned long its_list = 0; + + list_for_each_entry(its, &its_nodes, entry) { + if (!its->is_v4) + continue; + + if (vm->vlpi_count[its->list_nr]) + __set_bit(its->list_nr, &its_list); + } + + return (u16)its_list; +} + static struct its_collection *dev_event_to_col(struct its_device *its_dev, u32 event) { @@ -976,17 +992,15 @@ static void its_send_vmapp(struct its_node *its, static void its_send_vmovp(struct its_vpe *vpe) { - struct its_cmd_desc desc; + struct its_cmd_desc desc = {}; struct its_node *its; unsigned long flags; int col_id = vpe->col_idx; desc.its_vmovp_cmd.vpe = vpe; - desc.its_vmovp_cmd.its_list = (u16)its_list_map; if (!its_list_map) { its = list_first_entry(&its_nodes, struct its_node, entry); - desc.its_vmovp_cmd.seq_num = 0; desc.its_vmovp_cmd.col = &its->collections[col_id]; its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); return; @@ -1003,6 +1017,7 @@ static void its_send_vmovp(struct its_vpe *vpe) raw_spin_lock_irqsave(&vmovp_lock, flags); desc.its_vmovp_cmd.seq_num = vmovp_seq_num++; + desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm); /* Emit VMOVPs */ list_for_each_entry(its, &its_nodes, entry) { diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 422664ac5f53..1edc99335a94 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -59,7 +59,7 @@ static struct gic_chip_data gic_data __read_mostly; static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key); #define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer)) -#define GIC_LINE_NR max(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U) +#define GIC_LINE_NR min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U) #define GIC_ESPI_NR GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer) /* diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c index c72c036aea76..7d0a12fe2714 100644 --- a/drivers/irqchip/irq-sifive-plic.c +++ b/drivers/irqchip/irq-sifive-plic.c @@ -97,7 +97,7 @@ static inline void plic_irq_toggle(const struct cpumask *mask, } } -static void plic_irq_enable(struct irq_data *d) +static void plic_irq_unmask(struct irq_data *d) { unsigned int cpu = cpumask_any_and(irq_data_get_affinity_mask(d), cpu_online_mask); @@ -106,7 +106,7 @@ static void plic_irq_enable(struct irq_data *d) plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1); } -static void plic_irq_disable(struct irq_data *d) +static void plic_irq_mask(struct irq_data *d) { plic_irq_toggle(cpu_possible_mask, d->hwirq, 0); } @@ -125,10 +125,8 @@ static int plic_set_affinity(struct irq_data *d, if (cpu >= nr_cpu_ids) return -EINVAL; - if (!irqd_irq_disabled(d)) { - plic_irq_toggle(cpu_possible_mask, d->hwirq, 0); - plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1); - } + plic_irq_toggle(cpu_possible_mask, d->hwirq, 0); + plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1); irq_data_update_effective_affinity(d, cpumask_of(cpu)); @@ -136,14 +134,18 @@ static int plic_set_affinity(struct irq_data *d, } #endif +static void plic_irq_eoi(struct irq_data *d) +{ + struct plic_handler *handler = this_cpu_ptr(&plic_handlers); + + writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM); +} + static struct irq_chip plic_chip = { .name = "SiFive PLIC", - /* - * There is no need to mask/unmask PLIC interrupts. They are "masked" - * by reading claim and "unmasked" when writing it back. - */ - .irq_enable = plic_irq_enable, - .irq_disable = plic_irq_disable, + .irq_mask = plic_irq_mask, + .irq_unmask = plic_irq_unmask, + .irq_eoi = plic_irq_eoi, #ifdef CONFIG_SMP .irq_set_affinity = plic_set_affinity, #endif @@ -152,7 +154,7 @@ static struct irq_chip plic_chip = { static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq) { - irq_set_chip_and_handler(irq, &plic_chip, handle_simple_irq); + irq_set_chip_and_handler(irq, &plic_chip, handle_fasteoi_irq); irq_set_chip_data(irq, NULL); irq_set_noprobe(irq); return 0; @@ -188,7 +190,6 @@ static void plic_handle_irq(struct pt_regs *regs) hwirq); else generic_handle_irq(irq); - writel(hwirq, claim); } csr_set(sie, SIE_SEIE); } @@ -251,8 +252,8 @@ static int __init plic_init(struct device_node *node, continue; } - /* skip context holes */ - if (parent.args[0] == -1) + /* skip contexts other than supervisor external interrupt */ + if (parent.args[0] != IRQ_S_EXT) continue; hartid = plic_find_hart_id(parent.np); diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c index c92b405b7646..ba8619524231 100644 --- a/drivers/isdn/capi/capi.c +++ b/drivers/isdn/capi/capi.c @@ -744,7 +744,7 @@ capi_poll(struct file *file, poll_table *wait) poll_wait(file, &(cdev->recvwait), wait); mask = EPOLLOUT | EPOLLWRNORM; - if (!skb_queue_empty(&cdev->recvqueue)) + if (!skb_queue_empty_lockless(&cdev->recvqueue)) mask |= EPOLLIN | EPOLLRDNORM; return mask; } diff --git a/drivers/macintosh/windfarm_cpufreq_clamp.c b/drivers/macintosh/windfarm_cpufreq_clamp.c index 705c6200814b..7b726f00f183 100644 --- a/drivers/macintosh/windfarm_cpufreq_clamp.c +++ b/drivers/macintosh/windfarm_cpufreq_clamp.c @@ -18,7 +18,7 @@ static int clamped; static struct wf_control *clamp_control; -static struct dev_pm_qos_request qos_req; +static struct freq_qos_request qos_req; static unsigned int min_freq, max_freq; static int clamp_set(struct wf_control *ct, s32 value) @@ -35,7 +35,7 @@ static int clamp_set(struct wf_control *ct, s32 value) } clamped = value; - return dev_pm_qos_update_request(&qos_req, freq); + return freq_qos_update_request(&qos_req, freq); } static int clamp_get(struct wf_control *ct, s32 *value) @@ -77,38 +77,44 @@ static int __init wf_cpufreq_clamp_init(void) min_freq = policy->cpuinfo.min_freq; max_freq = policy->cpuinfo.max_freq; + + ret = freq_qos_add_request(&policy->constraints, &qos_req, FREQ_QOS_MAX, + max_freq); + cpufreq_cpu_put(policy); + if (ret < 0) { + pr_err("%s: Failed to add freq constraint (%d)\n", __func__, + ret); + return ret; + } + dev = get_cpu_device(0); if (unlikely(!dev)) { pr_warn("%s: No cpu device for cpu0\n", __func__); - return -ENODEV; + ret = -ENODEV; + goto fail; } clamp = kmalloc(sizeof(struct wf_control), GFP_KERNEL); - if (clamp == NULL) - return -ENOMEM; - - ret = dev_pm_qos_add_request(dev, &qos_req, DEV_PM_QOS_MAX_FREQUENCY, - max_freq); - if (ret < 0) { - pr_err("%s: Failed to add freq constraint (%d)\n", __func__, - ret); - goto free; + if (clamp == NULL) { + ret = -ENOMEM; + goto fail; } clamp->ops = &clamp_ops; clamp->name = "cpufreq-clamp"; ret = wf_register_control(clamp); if (ret) - goto fail; + goto free; + clamp_control = clamp; return 0; - fail: - dev_pm_qos_remove_request(&qos_req); free: kfree(clamp); + fail: + freq_qos_remove_request(&qos_req); return ret; } @@ -116,7 +122,7 @@ static void __exit wf_cpufreq_clamp_exit(void) { if (clamp_control) { wf_unregister_control(clamp_control); - dev_pm_qos_remove_request(&qos_req); + freq_qos_remove_request(&qos_req); } } diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index d249cf8ac277..8346e6d1816c 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -542,7 +542,7 @@ static void wake_migration_worker(struct cache *cache) static struct dm_bio_prison_cell_v2 *alloc_prison_cell(struct cache *cache) { - return dm_bio_prison_alloc_cell_v2(cache->prison, GFP_NOWAIT); + return dm_bio_prison_alloc_cell_v2(cache->prison, GFP_NOIO); } static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell_v2 *cell) @@ -554,9 +554,7 @@ static struct dm_cache_migration *alloc_migration(struct cache *cache) { struct dm_cache_migration *mg; - mg = mempool_alloc(&cache->migration_pool, GFP_NOWAIT); - if (!mg) - return NULL; + mg = mempool_alloc(&cache->migration_pool, GFP_NOIO); memset(mg, 0, sizeof(*mg)); @@ -664,10 +662,6 @@ static bool bio_detain_shared(struct cache *cache, dm_oblock_t oblock, struct bi struct dm_bio_prison_cell_v2 *cell_prealloc, *cell; cell_prealloc = alloc_prison_cell(cache); /* FIXME: allow wait if calling from worker */ - if (!cell_prealloc) { - defer_bio(cache, bio); - return false; - } build_key(oblock, end, &key); r = dm_cell_get_v2(cache->prison, &key, lock_level(bio), bio, cell_prealloc, &cell); @@ -1493,11 +1487,6 @@ static int mg_lock_writes(struct dm_cache_migration *mg) struct dm_bio_prison_cell_v2 *prealloc; prealloc = alloc_prison_cell(cache); - if (!prealloc) { - DMERR_LIMIT("%s: alloc_prison_cell failed", cache_device_name(cache)); - mg_complete(mg, false); - return -ENOMEM; - } /* * Prevent writes to the block, but allow reads to continue. @@ -1535,11 +1524,6 @@ static int mg_start(struct cache *cache, struct policy_work *op, struct bio *bio } mg = alloc_migration(cache); - if (!mg) { - policy_complete_background_work(cache->policy, op, false); - background_work_end(cache); - return -ENOMEM; - } mg->op = op; mg->overwrite_bio = bio; @@ -1628,10 +1612,6 @@ static int invalidate_lock(struct dm_cache_migration *mg) struct dm_bio_prison_cell_v2 *prealloc; prealloc = alloc_prison_cell(cache); - if (!prealloc) { - invalidate_complete(mg, false); - return -ENOMEM; - } build_key(mg->invalidate_oblock, oblock_succ(mg->invalidate_oblock), &key); r = dm_cell_lock_v2(cache->prison, &key, @@ -1669,10 +1649,6 @@ static int invalidate_start(struct cache *cache, dm_cblock_t cblock, return -EPERM; mg = alloc_migration(cache); - if (!mg) { - background_work_end(cache); - return -ENOMEM; - } mg->overwrite_bio = bio; mg->invalidate_cblock = cblock; diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c index cd6f9e9fc98e..4ca8f1977222 100644 --- a/drivers/md/dm-clone-target.c +++ b/drivers/md/dm-clone-target.c @@ -591,8 +591,8 @@ static struct hash_table_bucket *get_hash_table_bucket(struct clone *clone, * * NOTE: Must be called with the bucket lock held */ -struct dm_clone_region_hydration *__hash_find(struct hash_table_bucket *bucket, - unsigned long region_nr) +static struct dm_clone_region_hydration *__hash_find(struct hash_table_bucket *bucket, + unsigned long region_nr) { struct dm_clone_region_hydration *hd; diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index f150f5c5492b..4fb1a40e68a0 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -18,7 +18,6 @@ #include <linux/vmalloc.h> #include <linux/log2.h> #include <linux/dm-kcopyd.h> -#include <linux/semaphore.h> #include "dm.h" @@ -107,8 +106,8 @@ struct dm_snapshot { /* The on disk metadata handler */ struct dm_exception_store *store; - /* Maximum number of in-flight COW jobs. */ - struct semaphore cow_count; + unsigned in_progress; + struct wait_queue_head in_progress_wait; struct dm_kcopyd_client *kcopyd_client; @@ -162,8 +161,8 @@ struct dm_snapshot { */ #define DEFAULT_COW_THRESHOLD 2048 -static int cow_threshold = DEFAULT_COW_THRESHOLD; -module_param_named(snapshot_cow_threshold, cow_threshold, int, 0644); +static unsigned cow_threshold = DEFAULT_COW_THRESHOLD; +module_param_named(snapshot_cow_threshold, cow_threshold, uint, 0644); MODULE_PARM_DESC(snapshot_cow_threshold, "Maximum number of chunks being copied on write"); DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle, @@ -1327,7 +1326,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad_hash_tables; } - sema_init(&s->cow_count, (cow_threshold > 0) ? cow_threshold : INT_MAX); + init_waitqueue_head(&s->in_progress_wait); s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle); if (IS_ERR(s->kcopyd_client)) { @@ -1509,9 +1508,56 @@ static void snapshot_dtr(struct dm_target *ti) dm_put_device(ti, s->origin); + WARN_ON(s->in_progress); + kfree(s); } +static void account_start_copy(struct dm_snapshot *s) +{ + spin_lock(&s->in_progress_wait.lock); + s->in_progress++; + spin_unlock(&s->in_progress_wait.lock); +} + +static void account_end_copy(struct dm_snapshot *s) +{ + spin_lock(&s->in_progress_wait.lock); + BUG_ON(!s->in_progress); + s->in_progress--; + if (likely(s->in_progress <= cow_threshold) && + unlikely(waitqueue_active(&s->in_progress_wait))) + wake_up_locked(&s->in_progress_wait); + spin_unlock(&s->in_progress_wait.lock); +} + +static bool wait_for_in_progress(struct dm_snapshot *s, bool unlock_origins) +{ + if (unlikely(s->in_progress > cow_threshold)) { + spin_lock(&s->in_progress_wait.lock); + if (likely(s->in_progress > cow_threshold)) { + /* + * NOTE: this throttle doesn't account for whether + * the caller is servicing an IO that will trigger a COW + * so excess throttling may result for chunks not required + * to be COW'd. But if cow_threshold was reached, extra + * throttling is unlikely to negatively impact performance. + */ + DECLARE_WAITQUEUE(wait, current); + __add_wait_queue(&s->in_progress_wait, &wait); + __set_current_state(TASK_UNINTERRUPTIBLE); + spin_unlock(&s->in_progress_wait.lock); + if (unlock_origins) + up_read(&_origins_lock); + io_schedule(); + remove_wait_queue(&s->in_progress_wait, &wait); + return false; + } + spin_unlock(&s->in_progress_wait.lock); + } + return true; +} + /* * Flush a list of buffers. */ @@ -1527,7 +1573,7 @@ static void flush_bios(struct bio *bio) } } -static int do_origin(struct dm_dev *origin, struct bio *bio); +static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit); /* * Flush a list of buffers. @@ -1540,7 +1586,7 @@ static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio) while (bio) { n = bio->bi_next; bio->bi_next = NULL; - r = do_origin(s->origin, bio); + r = do_origin(s->origin, bio, false); if (r == DM_MAPIO_REMAPPED) generic_make_request(bio); bio = n; @@ -1732,7 +1778,7 @@ static void copy_callback(int read_err, unsigned long write_err, void *context) rb_link_node(&pe->out_of_order_node, parent, p); rb_insert_color(&pe->out_of_order_node, &s->out_of_order_tree); } - up(&s->cow_count); + account_end_copy(s); } /* @@ -1756,7 +1802,7 @@ static void start_copy(struct dm_snap_pending_exception *pe) dest.count = src.count; /* Hand over to kcopyd */ - down(&s->cow_count); + account_start_copy(s); dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe); } @@ -1776,7 +1822,7 @@ static void start_full_bio(struct dm_snap_pending_exception *pe, pe->full_bio = bio; pe->full_bio_end_io = bio->bi_end_io; - down(&s->cow_count); + account_start_copy(s); callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client, copy_callback, pe); @@ -1866,7 +1912,7 @@ static void zero_callback(int read_err, unsigned long write_err, void *context) struct bio *bio = context; struct dm_snapshot *s = bio->bi_private; - up(&s->cow_count); + account_end_copy(s); bio->bi_status = write_err ? BLK_STS_IOERR : 0; bio_endio(bio); } @@ -1880,7 +1926,7 @@ static void zero_exception(struct dm_snapshot *s, struct dm_exception *e, dest.sector = bio->bi_iter.bi_sector; dest.count = s->store->chunk_size; - down(&s->cow_count); + account_start_copy(s); WARN_ON_ONCE(bio->bi_private); bio->bi_private = s; dm_kcopyd_zero(s->kcopyd_client, 1, &dest, 0, zero_callback, bio); @@ -1916,6 +1962,11 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio) if (!s->valid) return DM_MAPIO_KILL; + if (bio_data_dir(bio) == WRITE) { + while (unlikely(!wait_for_in_progress(s, false))) + ; /* wait_for_in_progress() has slept */ + } + down_read(&s->lock); dm_exception_table_lock(&lock); @@ -2112,7 +2163,7 @@ redirect_to_origin: if (bio_data_dir(bio) == WRITE) { up_write(&s->lock); - return do_origin(s->origin, bio); + return do_origin(s->origin, bio, false); } out_unlock: @@ -2487,15 +2538,24 @@ next_snapshot: /* * Called on a write from the origin driver. */ -static int do_origin(struct dm_dev *origin, struct bio *bio) +static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit) { struct origin *o; int r = DM_MAPIO_REMAPPED; +again: down_read(&_origins_lock); o = __lookup_origin(origin->bdev); - if (o) + if (o) { + if (limit) { + struct dm_snapshot *s; + list_for_each_entry(s, &o->snapshots, list) + if (unlikely(!wait_for_in_progress(s, true))) + goto again; + } + r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio); + } up_read(&_origins_lock); return r; @@ -2608,7 +2668,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio) dm_accept_partial_bio(bio, available_sectors); /* Only tell snapshots if this is a write */ - return do_origin(o->dev, bio); + return do_origin(o->dev, bio, true); } /* diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index f61693e59684..1e772287b1c8 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -154,7 +154,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) } else { pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n", mdname(mddev)); - pr_err("md/raid0: please set raid.default_layout to 1 or 2\n"); + pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n"); err = -ENOTSUPP; goto abort; } diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c index cfca3c70599b..21f90a887485 100644 --- a/drivers/media/usb/stkwebcam/stk-webcam.c +++ b/drivers/media/usb/stkwebcam/stk-webcam.c @@ -643,8 +643,7 @@ static int v4l_stk_release(struct file *fp) dev->owner = NULL; } - if (is_present(dev)) - usb_autopm_put_interface(dev->interface); + usb_autopm_put_interface(dev->interface); mutex_unlock(&dev->lock); return v4l2_fh_release(fp); } diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c index 32747425297d..64fff6abe60e 100644 --- a/drivers/memstick/host/jmb38x_ms.c +++ b/drivers/memstick/host/jmb38x_ms.c @@ -941,7 +941,7 @@ static int jmb38x_ms_probe(struct pci_dev *pdev, if (!cnt) { rc = -ENODEV; pci_dev_busy = 1; - goto err_out; + goto err_out_int; } jm = kzalloc(sizeof(struct jmb38x_ms) diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c index 310dae26ddff..b2c325ead1c8 100644 --- a/drivers/mfd/mt6397-core.c +++ b/drivers/mfd/mt6397-core.c @@ -129,11 +129,27 @@ static int mt6397_irq_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(mt6397_pm_ops, mt6397_irq_suspend, mt6397_irq_resume); +struct chip_data { + u32 cid_addr; + u32 cid_shift; +}; + +static const struct chip_data mt6323_core = { + .cid_addr = MT6323_CID, + .cid_shift = 0, +}; + +static const struct chip_data mt6397_core = { + .cid_addr = MT6397_CID, + .cid_shift = 0, +}; + static int mt6397_probe(struct platform_device *pdev) { int ret; unsigned int id; struct mt6397_chip *pmic; + const struct chip_data *pmic_core; pmic = devm_kzalloc(&pdev->dev, sizeof(*pmic), GFP_KERNEL); if (!pmic) @@ -149,28 +165,30 @@ static int mt6397_probe(struct platform_device *pdev) if (!pmic->regmap) return -ENODEV; - platform_set_drvdata(pdev, pmic); + pmic_core = of_device_get_match_data(&pdev->dev); + if (!pmic_core) + return -ENODEV; - ret = regmap_read(pmic->regmap, MT6397_CID, &id); + ret = regmap_read(pmic->regmap, pmic_core->cid_addr, &id); if (ret) { - dev_err(pmic->dev, "Failed to read chip id: %d\n", ret); + dev_err(&pdev->dev, "Failed to read chip id: %d\n", ret); return ret; } + pmic->chip_id = (id >> pmic_core->cid_shift) & 0xff; + + platform_set_drvdata(pdev, pmic); + pmic->irq = platform_get_irq(pdev, 0); if (pmic->irq <= 0) return pmic->irq; - switch (id & 0xff) { - case MT6323_CHIP_ID: - pmic->int_con[0] = MT6323_INT_CON0; - pmic->int_con[1] = MT6323_INT_CON1; - pmic->int_status[0] = MT6323_INT_STATUS0; - pmic->int_status[1] = MT6323_INT_STATUS1; - ret = mt6397_irq_init(pmic); - if (ret) - return ret; + ret = mt6397_irq_init(pmic); + if (ret) + return ret; + switch (pmic->chip_id) { + case MT6323_CHIP_ID: ret = devm_mfd_add_devices(&pdev->dev, -1, mt6323_devs, ARRAY_SIZE(mt6323_devs), NULL, 0, pmic->irq_domain); @@ -178,21 +196,13 @@ static int mt6397_probe(struct platform_device *pdev) case MT6391_CHIP_ID: case MT6397_CHIP_ID: - pmic->int_con[0] = MT6397_INT_CON0; - pmic->int_con[1] = MT6397_INT_CON1; - pmic->int_status[0] = MT6397_INT_STATUS0; - pmic->int_status[1] = MT6397_INT_STATUS1; - ret = mt6397_irq_init(pmic); - if (ret) - return ret; - ret = devm_mfd_add_devices(&pdev->dev, -1, mt6397_devs, ARRAY_SIZE(mt6397_devs), NULL, 0, pmic->irq_domain); break; default: - dev_err(&pdev->dev, "unsupported chip: %d\n", id); + dev_err(&pdev->dev, "unsupported chip: %d\n", pmic->chip_id); return -ENODEV; } @@ -205,9 +215,15 @@ static int mt6397_probe(struct platform_device *pdev) } static const struct of_device_id mt6397_of_match[] = { - { .compatible = "mediatek,mt6397" }, - { .compatible = "mediatek,mt6323" }, - { } + { + .compatible = "mediatek,mt6323", + .data = &mt6323_core, + }, { + .compatible = "mediatek,mt6397", + .data = &mt6397_core, + }, { + /* sentinel */ + } }; MODULE_DEVICE_TABLE(of, mt6397_of_match); diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c index 47ae84afac2e..1b1a794d639d 100644 --- a/drivers/misc/fastrpc.c +++ b/drivers/misc/fastrpc.c @@ -527,6 +527,7 @@ static int fastrpc_dma_buf_attach(struct dma_buf *dmabuf, FASTRPC_PHYS(buffer->phys), buffer->size); if (ret < 0) { dev_err(buffer->dev, "failed to get scatterlist from DMA API\n"); + kfree(a); return -EINVAL; } diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c index 32e9b1aed2ca..0a2b99e1af45 100644 --- a/drivers/misc/mei/bus-fixup.c +++ b/drivers/misc/mei/bus-fixup.c @@ -218,13 +218,21 @@ static void mei_mkhi_fix(struct mei_cl_device *cldev) { int ret; + /* No need to enable the client if nothing is needed from it */ + if (!cldev->bus->fw_f_fw_ver_supported && + !cldev->bus->hbm_f_os_supported) + return; + ret = mei_cldev_enable(cldev); if (ret) return; - ret = mei_fwver(cldev); - if (ret < 0) - dev_err(&cldev->dev, "FW version command failed %d\n", ret); + if (cldev->bus->fw_f_fw_ver_supported) { + ret = mei_fwver(cldev); + if (ret < 0) + dev_err(&cldev->dev, "FW version command failed %d\n", + ret); + } if (cldev->bus->hbm_f_os_supported) { ret = mei_osver(cldev); diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index 77f7dff7098d..c09f8bb49495 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h @@ -79,6 +79,9 @@ #define MEI_DEV_ID_CNP_H 0xA360 /* Cannon Point H */ #define MEI_DEV_ID_CNP_H_4 0xA364 /* Cannon Point H 4 (iTouch) */ +#define MEI_DEV_ID_CMP_LP 0x02e0 /* Comet Point LP */ +#define MEI_DEV_ID_CMP_LP_3 0x02e4 /* Comet Point LP 3 (iTouch) */ + #define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */ #define MEI_DEV_ID_TGP_LP 0xA0E0 /* Tiger Lake Point LP */ diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c index abe1b1f4362f..c4f6991d3028 100644 --- a/drivers/misc/mei/hw-me.c +++ b/drivers/misc/mei/hw-me.c @@ -1355,6 +1355,8 @@ static bool mei_me_fw_type_sps(struct pci_dev *pdev) #define MEI_CFG_FW_SPS \ .quirk_probe = mei_me_fw_type_sps +#define MEI_CFG_FW_VER_SUPP \ + .fw_ver_supported = 1 #define MEI_CFG_ICH_HFS \ .fw_status.count = 0 @@ -1392,31 +1394,41 @@ static const struct mei_cfg mei_me_ich10_cfg = { MEI_CFG_ICH10_HFS, }; -/* PCH devices */ -static const struct mei_cfg mei_me_pch_cfg = { +/* PCH6 devices */ +static const struct mei_cfg mei_me_pch6_cfg = { MEI_CFG_PCH_HFS, }; +/* PCH7 devices */ +static const struct mei_cfg mei_me_pch7_cfg = { + MEI_CFG_PCH_HFS, + MEI_CFG_FW_VER_SUPP, +}; + /* PCH Cougar Point and Patsburg with quirk for Node Manager exclusion */ static const struct mei_cfg mei_me_pch_cpt_pbg_cfg = { MEI_CFG_PCH_HFS, + MEI_CFG_FW_VER_SUPP, MEI_CFG_FW_NM, }; /* PCH8 Lynx Point and newer devices */ static const struct mei_cfg mei_me_pch8_cfg = { MEI_CFG_PCH8_HFS, + MEI_CFG_FW_VER_SUPP, }; /* PCH8 Lynx Point with quirk for SPS Firmware exclusion */ static const struct mei_cfg mei_me_pch8_sps_cfg = { MEI_CFG_PCH8_HFS, + MEI_CFG_FW_VER_SUPP, MEI_CFG_FW_SPS, }; /* Cannon Lake and newer devices */ static const struct mei_cfg mei_me_pch12_cfg = { MEI_CFG_PCH8_HFS, + MEI_CFG_FW_VER_SUPP, MEI_CFG_DMA_128, }; @@ -1428,7 +1440,8 @@ static const struct mei_cfg *const mei_cfg_list[] = { [MEI_ME_UNDEF_CFG] = NULL, [MEI_ME_ICH_CFG] = &mei_me_ich_cfg, [MEI_ME_ICH10_CFG] = &mei_me_ich10_cfg, - [MEI_ME_PCH_CFG] = &mei_me_pch_cfg, + [MEI_ME_PCH6_CFG] = &mei_me_pch6_cfg, + [MEI_ME_PCH7_CFG] = &mei_me_pch7_cfg, [MEI_ME_PCH_CPT_PBG_CFG] = &mei_me_pch_cpt_pbg_cfg, [MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg, [MEI_ME_PCH8_SPS_CFG] = &mei_me_pch8_sps_cfg, @@ -1473,6 +1486,8 @@ struct mei_device *mei_me_dev_init(struct pci_dev *pdev, mei_device_init(dev, &pdev->dev, &mei_me_hw_ops); hw->cfg = cfg; + dev->fw_f_fw_ver_supported = cfg->fw_ver_supported; + return dev; } diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h index 08c84a0de4a8..1d8794828cbc 100644 --- a/drivers/misc/mei/hw-me.h +++ b/drivers/misc/mei/hw-me.h @@ -20,11 +20,13 @@ * @fw_status: FW status * @quirk_probe: device exclusion quirk * @dma_size: device DMA buffers size + * @fw_ver_supported: is fw version retrievable from FW */ struct mei_cfg { const struct mei_fw_status fw_status; bool (*quirk_probe)(struct pci_dev *pdev); size_t dma_size[DMA_DSCR_NUM]; + u32 fw_ver_supported:1; }; @@ -62,7 +64,8 @@ struct mei_me_hw { * @MEI_ME_UNDEF_CFG: Lower sentinel. * @MEI_ME_ICH_CFG: I/O Controller Hub legacy devices. * @MEI_ME_ICH10_CFG: I/O Controller Hub platforms Gen10 - * @MEI_ME_PCH_CFG: Platform Controller Hub platforms (Up to Gen8). + * @MEI_ME_PCH6_CFG: Platform Controller Hub platforms (Gen6). + * @MEI_ME_PCH7_CFG: Platform Controller Hub platforms (Gen7). * @MEI_ME_PCH_CPT_PBG_CFG:Platform Controller Hub workstations * with quirk for Node Manager exclusion. * @MEI_ME_PCH8_CFG: Platform Controller Hub Gen8 and newer @@ -77,7 +80,8 @@ enum mei_cfg_idx { MEI_ME_UNDEF_CFG, MEI_ME_ICH_CFG, MEI_ME_ICH10_CFG, - MEI_ME_PCH_CFG, + MEI_ME_PCH6_CFG, + MEI_ME_PCH7_CFG, MEI_ME_PCH_CPT_PBG_CFG, MEI_ME_PCH8_CFG, MEI_ME_PCH8_SPS_CFG, diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h index f71a023aed3c..0f2141178299 100644 --- a/drivers/misc/mei/mei_dev.h +++ b/drivers/misc/mei/mei_dev.h @@ -426,6 +426,8 @@ struct mei_fw_version { * * @fw_ver : FW versions * + * @fw_f_fw_ver_supported : fw feature: fw version supported + * * @me_clients_rwsem: rw lock over me_clients list * @me_clients : list of FW clients * @me_clients_map : FW clients bit map @@ -506,6 +508,8 @@ struct mei_device { struct mei_fw_version fw_ver[MEI_MAX_FW_VER_BLOCKS]; + unsigned int fw_f_fw_ver_supported:1; + struct rw_semaphore me_clients_rwsem; struct list_head me_clients; DECLARE_BITMAP(me_clients_map, MEI_CLIENTS_MAX); diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index d5a92c6eadb3..3dca63eddaa0 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -61,13 +61,13 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_3, MEI_ME_ICH10_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_4, MEI_ME_ICH10_CFG)}, - {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_1, MEI_ME_PCH_CFG)}, - {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_2, MEI_ME_PCH_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_1, MEI_ME_PCH6_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_2, MEI_ME_PCH6_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_CPT_1, MEI_ME_PCH_CPT_PBG_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_PBG_1, MEI_ME_PCH_CPT_PBG_CFG)}, - {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, MEI_ME_PCH_CFG)}, - {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, MEI_ME_PCH_CFG)}, - {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, MEI_ME_PCH_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, MEI_ME_PCH7_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, MEI_ME_PCH7_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, MEI_ME_PCH7_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, MEI_ME_PCH8_SPS_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, MEI_ME_PCH8_SPS_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, MEI_ME_PCH8_CFG)}, @@ -96,6 +96,9 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP, MEI_ME_PCH12_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP_3, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH12_CFG)}, diff --git a/drivers/mmc/host/cqhci.c b/drivers/mmc/host/cqhci.c index f7bdae5354c3..5047f7343ffc 100644 --- a/drivers/mmc/host/cqhci.c +++ b/drivers/mmc/host/cqhci.c @@ -611,7 +611,8 @@ static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq) cq_host->slot[tag].flags = 0; cq_host->qcnt += 1; - + /* Make sure descriptors are ready before ringing the doorbell */ + wmb(); cqhci_writel(cq_host, 1 << tag, CQHCI_TDBR); if (!(cqhci_readl(cq_host, CQHCI_TDBR) & (1 << tag))) pr_debug("%s: cqhci: doorbell not set for tag %d\n", diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c index 78e7e350655c..4031217d21c3 100644 --- a/drivers/mmc/host/mxs-mmc.c +++ b/drivers/mmc/host/mxs-mmc.c @@ -17,6 +17,7 @@ #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <linux/dmaengine.h> +#include <linux/dma/mxs-dma.h> #include <linux/highmem.h> #include <linux/clk.h> #include <linux/err.h> @@ -266,7 +267,7 @@ static void mxs_mmc_bc(struct mxs_mmc_host *host) ssp->ssp_pio_words[2] = cmd1; ssp->dma_dir = DMA_NONE; ssp->slave_dirn = DMA_TRANS_NONE; - desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK); + desc = mxs_mmc_prep_dma(host, MXS_DMA_CTRL_WAIT4END); if (!desc) goto out; @@ -311,7 +312,7 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host) ssp->ssp_pio_words[2] = cmd1; ssp->dma_dir = DMA_NONE; ssp->slave_dirn = DMA_TRANS_NONE; - desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK); + desc = mxs_mmc_prep_dma(host, MXS_DMA_CTRL_WAIT4END); if (!desc) goto out; @@ -441,7 +442,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host) host->data = data; ssp->dma_dir = dma_data_dir; ssp->slave_dirn = slave_dirn; - desc = mxs_mmc_prep_dma(host, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + desc = mxs_mmc_prep_dma(host, DMA_PREP_INTERRUPT | MXS_DMA_CTRL_WAIT4END); if (!desc) goto out; diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c index d4ada5cca2d1..234551a68739 100644 --- a/drivers/mmc/host/renesas_sdhi_core.c +++ b/drivers/mmc/host/renesas_sdhi_core.c @@ -646,8 +646,8 @@ int renesas_sdhi_probe(struct platform_device *pdev, struct tmio_mmc_dma *dma_priv; struct tmio_mmc_host *host; struct renesas_sdhi *priv; + int num_irqs, irq, ret, i; struct resource *res; - int irq, ret, i; u16 ver; of_data = of_device_get_match_data(&pdev->dev); @@ -825,24 +825,31 @@ int renesas_sdhi_probe(struct platform_device *pdev, host->hs400_complete = renesas_sdhi_hs400_complete; } - i = 0; - while (1) { + num_irqs = platform_irq_count(pdev); + if (num_irqs < 0) { + ret = num_irqs; + goto eirq; + } + + /* There must be at least one IRQ source */ + if (!num_irqs) { + ret = -ENXIO; + goto eirq; + } + + for (i = 0; i < num_irqs; i++) { irq = platform_get_irq(pdev, i); - if (irq < 0) - break; - i++; + if (irq < 0) { + ret = irq; + goto eirq; + } + ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_irq, 0, dev_name(&pdev->dev), host); if (ret) goto eirq; } - /* There must be at least one IRQ source */ - if (!i) { - ret = irq; - goto eirq; - } - dev_info(&pdev->dev, "%s base at 0x%08lx max clock rate %u MHz\n", mmc_hostname(host->mmc), (unsigned long) (platform_get_resource(pdev, IORESOURCE_MEM, 0)->start), diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c index 2b9cdcd1dd9d..f4f5f0a70cda 100644 --- a/drivers/mmc/host/sdhci-iproc.c +++ b/drivers/mmc/host/sdhci-iproc.c @@ -262,6 +262,7 @@ static const struct sdhci_iproc_data bcm2835_data = { }; static const struct sdhci_pltfm_data sdhci_bcm2711_pltfm_data = { + .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, .ops = &sdhci_iproc_32only_ops, }; diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c index 41c2677c587f..083e7e053c95 100644 --- a/drivers/mmc/host/sdhci-omap.c +++ b/drivers/mmc/host/sdhci-omap.c @@ -372,7 +372,7 @@ static int sdhci_omap_execute_tuning(struct mmc_host *mmc, u32 opcode) * on temperature */ if (temperature < -20000) - phase_delay = min(max_window + 4 * max_len - 24, + phase_delay = min(max_window + 4 * (max_len - 1) - 24, max_window + DIV_ROUND_UP(13 * max_len, 16) * 4); else if (temperature < 20000) diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c index 81bd9afb0980..98c575de43c7 100644 --- a/drivers/mmc/host/sh_mmcif.c +++ b/drivers/mmc/host/sh_mmcif.c @@ -1393,11 +1393,9 @@ static int sh_mmcif_probe(struct platform_device *pdev) const char *name; irq[0] = platform_get_irq(pdev, 0); - irq[1] = platform_get_irq(pdev, 1); - if (irq[0] < 0) { - dev_err(dev, "Get irq error\n"); + irq[1] = platform_get_irq_optional(pdev, 1); + if (irq[0] < 0) return -ENXIO; - } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); reg = devm_ioremap_resource(dev, res); diff --git a/drivers/mtd/nand/raw/au1550nd.c b/drivers/mtd/nand/raw/au1550nd.c index 97a97a9ccc36..e10b76089048 100644 --- a/drivers/mtd/nand/raw/au1550nd.c +++ b/drivers/mtd/nand/raw/au1550nd.c @@ -134,16 +134,15 @@ static void au_write_buf16(struct nand_chip *this, const u_char *buf, int len) /** * au_read_buf16 - read chip data into buffer - * @mtd: MTD device structure + * @this: NAND chip object * @buf: buffer to store date * @len: number of bytes to read * * read function for 16bit buswidth */ -static void au_read_buf16(struct mtd_info *mtd, u_char *buf, int len) +static void au_read_buf16(struct nand_chip *this, u_char *buf, int len) { int i; - struct nand_chip *this = mtd_to_nand(mtd); u16 *p = (u16 *) buf; len >>= 1; diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c index 1d8621d43160..7acf4a93b592 100644 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c @@ -487,7 +487,7 @@ static int write_sr(struct spi_nor *nor, u8 val) SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1), SPI_MEM_OP_NO_ADDR, SPI_MEM_OP_NO_DUMMY, - SPI_MEM_OP_DATA_IN(1, nor->bouncebuf, 1)); + SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1)); return spi_mem_exec_op(nor->spimem, &op); } diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 8c79bad2a9a5..4f2e6910c623 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -952,7 +952,7 @@ static int alb_upper_dev_walk(struct net_device *upper, void *_data) struct bond_vlan_tag *tags; if (is_vlan_dev(upper) && - bond->nest_level == vlan_get_encap_level(upper) - 1) { + bond->dev->lower_level == upper->lower_level - 1) { if (upper->addr_assign_type == NET_ADDR_STOLEN) { alb_send_lp_vid(slave, mac_addr, vlan_dev_vlan_proto(upper), diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 931d9d935686..62f65573eb04 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1733,8 +1733,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, goto err_upper_unlink; } - bond->nest_level = dev_get_nest_level(bond_dev) + 1; - /* If the mode uses primary, then the following is handled by * bond_change_active_slave(). */ @@ -1816,7 +1814,8 @@ err_detach: slave_disable_netpoll(new_slave); err_close: - slave_dev->priv_flags &= ~IFF_BONDING; + if (!netif_is_bond_master(slave_dev)) + slave_dev->priv_flags &= ~IFF_BONDING; dev_close(slave_dev); err_restore_mac: @@ -1956,9 +1955,6 @@ static int __bond_release_one(struct net_device *bond_dev, if (!bond_has_slaves(bond)) { bond_set_carrier(bond); eth_hw_addr_random(bond_dev); - bond->nest_level = SINGLE_DEPTH_NESTING; - } else { - bond->nest_level = dev_get_nest_level(bond_dev) + 1; } unblock_netpoll_tx(); @@ -2017,7 +2013,8 @@ static int __bond_release_one(struct net_device *bond_dev, else dev_set_mtu(slave_dev, slave->original_mtu); - slave_dev->priv_flags &= ~IFF_BONDING; + if (!netif_is_bond_master(slave_dev)) + slave_dev->priv_flags &= ~IFF_BONDING; bond_free_slave(slave); @@ -2086,8 +2083,7 @@ static int bond_miimon_inspect(struct bonding *bond) ignore_updelay = !rcu_dereference(bond->curr_active_slave); bond_for_each_slave_rcu(bond, slave, iter) { - slave->new_link = BOND_LINK_NOCHANGE; - slave->link_new_state = slave->link; + bond_propose_link_state(slave, BOND_LINK_NOCHANGE); link_state = bond_check_dev_link(bond, slave->dev, 0); @@ -2121,7 +2117,7 @@ static int bond_miimon_inspect(struct bonding *bond) } if (slave->delay <= 0) { - slave->new_link = BOND_LINK_DOWN; + bond_propose_link_state(slave, BOND_LINK_DOWN); commit++; continue; } @@ -2158,7 +2154,7 @@ static int bond_miimon_inspect(struct bonding *bond) slave->delay = 0; if (slave->delay <= 0) { - slave->new_link = BOND_LINK_UP; + bond_propose_link_state(slave, BOND_LINK_UP); commit++; ignore_updelay = false; continue; @@ -2196,7 +2192,7 @@ static void bond_miimon_commit(struct bonding *bond) struct slave *slave, *primary; bond_for_each_slave(bond, slave, iter) { - switch (slave->new_link) { + switch (slave->link_new_state) { case BOND_LINK_NOCHANGE: /* For 802.3ad mode, check current slave speed and * duplex again in case its port was disabled after @@ -2268,8 +2264,8 @@ static void bond_miimon_commit(struct bonding *bond) default: slave_err(bond->dev, slave->dev, "invalid new link %d on slave\n", - slave->new_link); - slave->new_link = BOND_LINK_NOCHANGE; + slave->link_new_state); + bond_propose_link_state(slave, BOND_LINK_NOCHANGE); continue; } @@ -2677,13 +2673,13 @@ static void bond_loadbalance_arp_mon(struct bonding *bond) bond_for_each_slave_rcu(bond, slave, iter) { unsigned long trans_start = dev_trans_start(slave->dev); - slave->new_link = BOND_LINK_NOCHANGE; + bond_propose_link_state(slave, BOND_LINK_NOCHANGE); if (slave->link != BOND_LINK_UP) { if (bond_time_in_interval(bond, trans_start, 1) && bond_time_in_interval(bond, slave->last_rx, 1)) { - slave->new_link = BOND_LINK_UP; + bond_propose_link_state(slave, BOND_LINK_UP); slave_state_changed = 1; /* primary_slave has no meaning in round-robin @@ -2708,7 +2704,7 @@ static void bond_loadbalance_arp_mon(struct bonding *bond) if (!bond_time_in_interval(bond, trans_start, 2) || !bond_time_in_interval(bond, slave->last_rx, 2)) { - slave->new_link = BOND_LINK_DOWN; + bond_propose_link_state(slave, BOND_LINK_DOWN); slave_state_changed = 1; if (slave->link_failure_count < UINT_MAX) @@ -2739,8 +2735,8 @@ static void bond_loadbalance_arp_mon(struct bonding *bond) goto re_arm; bond_for_each_slave(bond, slave, iter) { - if (slave->new_link != BOND_LINK_NOCHANGE) - slave->link = slave->new_link; + if (slave->link_new_state != BOND_LINK_NOCHANGE) + slave->link = slave->link_new_state; } if (slave_state_changed) { @@ -2763,9 +2759,9 @@ re_arm: } /* Called to inspect slaves for active-backup mode ARP monitor link state - * changes. Sets new_link in slaves to specify what action should take - * place for the slave. Returns 0 if no changes are found, >0 if changes - * to link states must be committed. + * changes. Sets proposed link state in slaves to specify what action + * should take place for the slave. Returns 0 if no changes are found, >0 + * if changes to link states must be committed. * * Called with rcu_read_lock held. */ @@ -2777,12 +2773,12 @@ static int bond_ab_arp_inspect(struct bonding *bond) int commit = 0; bond_for_each_slave_rcu(bond, slave, iter) { - slave->new_link = BOND_LINK_NOCHANGE; + bond_propose_link_state(slave, BOND_LINK_NOCHANGE); last_rx = slave_last_rx(bond, slave); if (slave->link != BOND_LINK_UP) { if (bond_time_in_interval(bond, last_rx, 1)) { - slave->new_link = BOND_LINK_UP; + bond_propose_link_state(slave, BOND_LINK_UP); commit++; } continue; @@ -2810,7 +2806,7 @@ static int bond_ab_arp_inspect(struct bonding *bond) if (!bond_is_active_slave(slave) && !rcu_access_pointer(bond->current_arp_slave) && !bond_time_in_interval(bond, last_rx, 3)) { - slave->new_link = BOND_LINK_DOWN; + bond_propose_link_state(slave, BOND_LINK_DOWN); commit++; } @@ -2823,7 +2819,7 @@ static int bond_ab_arp_inspect(struct bonding *bond) if (bond_is_active_slave(slave) && (!bond_time_in_interval(bond, trans_start, 2) || !bond_time_in_interval(bond, last_rx, 2))) { - slave->new_link = BOND_LINK_DOWN; + bond_propose_link_state(slave, BOND_LINK_DOWN); commit++; } } @@ -2843,7 +2839,7 @@ static void bond_ab_arp_commit(struct bonding *bond) struct slave *slave; bond_for_each_slave(bond, slave, iter) { - switch (slave->new_link) { + switch (slave->link_new_state) { case BOND_LINK_NOCHANGE: continue; @@ -2893,8 +2889,9 @@ static void bond_ab_arp_commit(struct bonding *bond) continue; default: - slave_err(bond->dev, slave->dev, "impossible: new_link %d on slave\n", - slave->new_link); + slave_err(bond->dev, slave->dev, + "impossible: link_new_state %d on slave\n", + slave->link_new_state); continue; } @@ -3442,13 +3439,6 @@ static void bond_fold_stats(struct rtnl_link_stats64 *_res, } } -static int bond_get_nest_level(struct net_device *bond_dev) -{ - struct bonding *bond = netdev_priv(bond_dev); - - return bond->nest_level; -} - static void bond_get_stats(struct net_device *bond_dev, struct rtnl_link_stats64 *stats) { @@ -3457,7 +3447,7 @@ static void bond_get_stats(struct net_device *bond_dev, struct list_head *iter; struct slave *slave; - spin_lock_nested(&bond->stats_lock, bond_get_nest_level(bond_dev)); + spin_lock(&bond->stats_lock); memcpy(stats, &bond->bond_stats, sizeof(*stats)); rcu_read_lock(); @@ -4039,7 +4029,7 @@ out: * this to-be-skipped slave to send a packet out. */ old_arr = rtnl_dereference(bond->slave_arr); - for (idx = 0; idx < old_arr->count; idx++) { + for (idx = 0; old_arr != NULL && idx < old_arr->count; idx++) { if (skipslave == old_arr->arr[idx]) { old_arr->arr[idx] = old_arr->arr[old_arr->count-1]; @@ -4268,7 +4258,6 @@ static const struct net_device_ops bond_netdev_ops = { .ndo_neigh_setup = bond_neigh_setup, .ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid, - .ndo_get_lock_subclass = bond_get_nest_level, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_netpoll_setup = bond_netpoll_setup, .ndo_netpoll_cleanup = bond_netpoll_cleanup, @@ -4296,7 +4285,6 @@ void bond_setup(struct net_device *bond_dev) struct bonding *bond = netdev_priv(bond_dev); spin_lock_init(&bond->mode_lock); - spin_lock_init(&bond->stats_lock); bond->params = bonding_defaults; /* Initialize pointers */ @@ -4365,6 +4353,7 @@ static void bond_uninit(struct net_device *bond_dev) list_del(&bond->bond_list); + lockdep_unregister_key(&bond->stats_lock_key); bond_debug_unregister(bond); } @@ -4768,8 +4757,9 @@ static int bond_init(struct net_device *bond_dev) if (!bond->wq) return -ENOMEM; - bond->nest_level = SINGLE_DEPTH_NESTING; - netdev_lockdep_set_classes(bond_dev); + spin_lock_init(&bond->stats_lock); + lockdep_register_key(&bond->stats_lock_key); + lockdep_set_class(&bond->stats_lock, &bond->stats_lock_key); list_add_tail(&bond->bond_list, &bn->dev_list); diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index 606b7d8ffe13..8e9f5620c9a2 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c @@ -52,6 +52,7 @@ #define CONTROL_EX_PDR BIT(8) /* control register */ +#define CONTROL_SWR BIT(15) #define CONTROL_TEST BIT(7) #define CONTROL_CCE BIT(6) #define CONTROL_DISABLE_AR BIT(5) @@ -97,6 +98,9 @@ #define BTR_TSEG2_SHIFT 12 #define BTR_TSEG2_MASK (0x7 << BTR_TSEG2_SHIFT) +/* interrupt register */ +#define INT_STS_PENDING 0x8000 + /* brp extension register */ #define BRP_EXT_BRPE_MASK 0x0f #define BRP_EXT_BRPE_SHIFT 0 @@ -569,6 +573,26 @@ static void c_can_configure_msg_objects(struct net_device *dev) IF_MCONT_RCV_EOB); } +static int c_can_software_reset(struct net_device *dev) +{ + struct c_can_priv *priv = netdev_priv(dev); + int retry = 0; + + if (priv->type != BOSCH_D_CAN) + return 0; + + priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_SWR | CONTROL_INIT); + while (priv->read_reg(priv, C_CAN_CTRL_REG) & CONTROL_SWR) { + msleep(20); + if (retry++ > 100) { + netdev_err(dev, "CCTRL: software reset failed\n"); + return -EIO; + } + } + + return 0; +} + /* * Configure C_CAN chip: * - enable/disable auto-retransmission @@ -578,6 +602,11 @@ static void c_can_configure_msg_objects(struct net_device *dev) static int c_can_chip_config(struct net_device *dev) { struct c_can_priv *priv = netdev_priv(dev); + int err; + + err = c_can_software_reset(dev); + if (err) + return err; /* enable automatic retransmission */ priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_ENABLE_AR); @@ -886,6 +915,9 @@ static int c_can_handle_state_change(struct net_device *dev, struct can_berr_counter bec; switch (error_type) { + case C_CAN_NO_ERROR: + priv->can.state = CAN_STATE_ERROR_ACTIVE; + break; case C_CAN_ERROR_WARNING: /* error warning state */ priv->can.can_stats.error_warning++; @@ -916,6 +948,13 @@ static int c_can_handle_state_change(struct net_device *dev, ERR_CNT_RP_SHIFT; switch (error_type) { + case C_CAN_NO_ERROR: + /* error warning state */ + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = CAN_ERR_CRTL_ACTIVE; + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; + break; case C_CAN_ERROR_WARNING: /* error warning state */ cf->can_id |= CAN_ERR_CRTL; @@ -1029,10 +1068,16 @@ static int c_can_poll(struct napi_struct *napi, int quota) u16 curr, last = priv->last_status; int work_done = 0; - priv->last_status = curr = priv->read_reg(priv, C_CAN_STS_REG); - /* Ack status on C_CAN. D_CAN is self clearing */ - if (priv->type != BOSCH_D_CAN) - priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED); + /* Only read the status register if a status interrupt was pending */ + if (atomic_xchg(&priv->sie_pending, 0)) { + priv->last_status = curr = priv->read_reg(priv, C_CAN_STS_REG); + /* Ack status on C_CAN. D_CAN is self clearing */ + if (priv->type != BOSCH_D_CAN) + priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED); + } else { + /* no change detected ... */ + curr = last; + } /* handle state changes */ if ((curr & STATUS_EWARN) && (!(last & STATUS_EWARN))) { @@ -1054,11 +1099,17 @@ static int c_can_poll(struct napi_struct *napi, int quota) /* handle bus recovery events */ if ((!(curr & STATUS_BOFF)) && (last & STATUS_BOFF)) { netdev_dbg(dev, "left bus off state\n"); - priv->can.state = CAN_STATE_ERROR_ACTIVE; + work_done += c_can_handle_state_change(dev, C_CAN_ERROR_PASSIVE); } + if ((!(curr & STATUS_EPASS)) && (last & STATUS_EPASS)) { netdev_dbg(dev, "left error passive state\n"); - priv->can.state = CAN_STATE_ERROR_ACTIVE; + work_done += c_can_handle_state_change(dev, C_CAN_ERROR_WARNING); + } + + if ((!(curr & STATUS_EWARN)) && (last & STATUS_EWARN)) { + netdev_dbg(dev, "left error warning state\n"); + work_done += c_can_handle_state_change(dev, C_CAN_NO_ERROR); } /* handle lec errors on the bus */ @@ -1083,10 +1134,16 @@ static irqreturn_t c_can_isr(int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; struct c_can_priv *priv = netdev_priv(dev); + int reg_int; - if (!priv->read_reg(priv, C_CAN_INT_REG)) + reg_int = priv->read_reg(priv, C_CAN_INT_REG); + if (!reg_int) return IRQ_NONE; + /* save for later use */ + if (reg_int & INT_STS_PENDING) + atomic_set(&priv->sie_pending, 1); + /* disable all interrupts and schedule the NAPI */ c_can_irq_control(priv, false); napi_schedule(&priv->napi); diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h index 8acdc7fa4792..d5567a7c1c6d 100644 --- a/drivers/net/can/c_can/c_can.h +++ b/drivers/net/can/c_can/c_can.h @@ -198,6 +198,7 @@ struct c_can_priv { struct net_device *dev; struct device *device; atomic_t tx_active; + atomic_t sie_pending; unsigned long tx_dir; int last_status; u16 (*read_reg) (const struct c_can_priv *priv, enum reg index); diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index ac86be52b461..1c88c361938c 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -848,6 +848,7 @@ void of_can_transceiver(struct net_device *dev) return; ret = of_property_read_u32(dn, "max-bitrate", &priv->bitrate_max); + of_node_put(dn); if ((ret && ret != -EINVAL) || (!ret && !priv->bitrate_max)) netdev_warn(dev, "Invalid value for transceiver max bitrate. Ignoring bitrate limit.\n"); } diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index dc5695dffc2e..57f9a2f51085 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -677,6 +677,7 @@ static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr) struct can_frame *cf; bool rx_errors = false, tx_errors = false; u32 timestamp; + int err; timestamp = priv->read(®s->timer) << 16; @@ -725,7 +726,9 @@ static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr) if (tx_errors) dev->stats.tx_errors++; - can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); + err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); + if (err) + dev->stats.rx_fifo_errors++; } static void flexcan_irq_state(struct net_device *dev, u32 reg_esr) @@ -738,6 +741,7 @@ static void flexcan_irq_state(struct net_device *dev, u32 reg_esr) int flt; struct can_berr_counter bec; u32 timestamp; + int err; timestamp = priv->read(®s->timer) << 16; @@ -769,7 +773,9 @@ static void flexcan_irq_state(struct net_device *dev, u32 reg_esr) if (unlikely(new_state == CAN_STATE_BUS_OFF)) can_bus_off(dev); - can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); + err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); + if (err) + dev->stats.rx_fifo_errors++; } static inline struct flexcan_priv *rx_offload_to_priv(struct can_rx_offload *offload) @@ -1188,6 +1194,7 @@ static int flexcan_chip_start(struct net_device *dev) reg_mecr = priv->read(®s->mecr); reg_mecr &= ~FLEXCAN_MECR_ECRWRDIS; priv->write(reg_mecr, ®s->mecr); + reg_mecr |= FLEXCAN_MECR_ECCDIS; reg_mecr &= ~(FLEXCAN_MECR_NCEFAFRZ | FLEXCAN_MECR_HANCEI_MSK | FLEXCAN_MECR_FANCEI_MSK); priv->write(reg_mecr, ®s->mecr); diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c index e6a668ee7730..84cae167e42f 100644 --- a/drivers/net/can/rx-offload.c +++ b/drivers/net/can/rx-offload.c @@ -107,37 +107,95 @@ static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b) return cb_b->timestamp - cb_a->timestamp; } -static struct sk_buff *can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n) +/** + * can_rx_offload_offload_one() - Read one CAN frame from HW + * @offload: pointer to rx_offload context + * @n: number of mailbox to read + * + * The task of this function is to read a CAN frame from mailbox @n + * from the device and return the mailbox's content as a struct + * sk_buff. + * + * If the struct can_rx_offload::skb_queue exceeds the maximal queue + * length (struct can_rx_offload::skb_queue_len_max) or no skb can be + * allocated, the mailbox contents is discarded by reading it into an + * overflow buffer. This way the mailbox is marked as free by the + * driver. + * + * Return: A pointer to skb containing the CAN frame on success. + * + * NULL if the mailbox @n is empty. + * + * ERR_PTR() in case of an error + */ +static struct sk_buff * +can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n) { - struct sk_buff *skb = NULL; + struct sk_buff *skb = NULL, *skb_error = NULL; struct can_rx_offload_cb *cb; struct can_frame *cf; int ret; - /* If queue is full or skb not available, read to discard mailbox */ - if (likely(skb_queue_len(&offload->skb_queue) <= - offload->skb_queue_len_max)) + if (likely(skb_queue_len(&offload->skb_queue) < + offload->skb_queue_len_max)) { skb = alloc_can_skb(offload->dev, &cf); + if (unlikely(!skb)) + skb_error = ERR_PTR(-ENOMEM); /* skb alloc failed */ + } else { + skb_error = ERR_PTR(-ENOBUFS); /* skb_queue is full */ + } - if (!skb) { + /* If queue is full or skb not available, drop by reading into + * overflow buffer. + */ + if (unlikely(skb_error)) { struct can_frame cf_overflow; u32 timestamp; ret = offload->mailbox_read(offload, &cf_overflow, ×tamp, n); - if (ret) - offload->dev->stats.rx_dropped++; - return NULL; + /* Mailbox was empty. */ + if (unlikely(!ret)) + return NULL; + + /* Mailbox has been read and we're dropping it or + * there was a problem reading the mailbox. + * + * Increment error counters in any case. + */ + offload->dev->stats.rx_dropped++; + offload->dev->stats.rx_fifo_errors++; + + /* There was a problem reading the mailbox, propagate + * error value. + */ + if (unlikely(ret < 0)) + return ERR_PTR(ret); + + return skb_error; } cb = can_rx_offload_get_cb(skb); ret = offload->mailbox_read(offload, cf, &cb->timestamp, n); - if (!ret) { + + /* Mailbox was empty. */ + if (unlikely(!ret)) { kfree_skb(skb); return NULL; } + /* There was a problem reading the mailbox, propagate error value. */ + if (unlikely(ret < 0)) { + kfree_skb(skb); + + offload->dev->stats.rx_dropped++; + offload->dev->stats.rx_fifo_errors++; + + return ERR_PTR(ret); + } + + /* Mailbox was read. */ return skb; } @@ -157,8 +215,8 @@ int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 pen continue; skb = can_rx_offload_offload_one(offload, i); - if (!skb) - break; + if (IS_ERR_OR_NULL(skb)) + continue; __skb_queue_add_sort(&skb_queue, skb, can_rx_offload_compare); } @@ -188,7 +246,13 @@ int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload) struct sk_buff *skb; int received = 0; - while ((skb = can_rx_offload_offload_one(offload, 0))) { + while (1) { + skb = can_rx_offload_offload_one(offload, 0); + if (IS_ERR(skb)) + continue; + if (!skb) + break; + skb_queue_tail(&offload->skb_queue, skb); received++; } @@ -207,8 +271,10 @@ int can_rx_offload_queue_sorted(struct can_rx_offload *offload, unsigned long flags; if (skb_queue_len(&offload->skb_queue) > - offload->skb_queue_len_max) - return -ENOMEM; + offload->skb_queue_len_max) { + kfree_skb(skb); + return -ENOBUFS; + } cb = can_rx_offload_get_cb(skb); cb->timestamp = timestamp; @@ -250,8 +316,10 @@ int can_rx_offload_queue_tail(struct can_rx_offload *offload, struct sk_buff *skb) { if (skb_queue_len(&offload->skb_queue) > - offload->skb_queue_len_max) - return -ENOMEM; + offload->skb_queue_len_max) { + kfree_skb(skb); + return -ENOBUFS; + } skb_queue_tail(&offload->skb_queue, skb); can_rx_offload_schedule(offload); diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c index bee9f7b8dad6..bb20a9b75cc6 100644 --- a/drivers/net/can/spi/mcp251x.c +++ b/drivers/net/can/spi/mcp251x.c @@ -717,6 +717,7 @@ static void mcp251x_restart_work_handler(struct work_struct *ws) if (priv->after_suspend) { mcp251x_hw_reset(spi); mcp251x_setup(net, spi); + priv->force_quit = 0; if (priv->after_suspend & AFTER_SUSPEND_RESTART) { mcp251x_set_normal_mode(spi); } else if (priv->after_suspend & AFTER_SUSPEND_UP) { @@ -728,7 +729,6 @@ static void mcp251x_restart_work_handler(struct work_struct *ws) mcp251x_hw_sleep(spi); } priv->after_suspend = 0; - priv->force_quit = 0; } if (priv->restart_tx) { diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c index f8b19eef5d26..31ad364a89bb 100644 --- a/drivers/net/can/ti_hecc.c +++ b/drivers/net/can/ti_hecc.c @@ -73,6 +73,7 @@ MODULE_VERSION(HECC_MODULE_VERSION); */ #define HECC_MAX_RX_MBOX (HECC_MAX_MAILBOXES - HECC_MAX_TX_MBOX) #define HECC_RX_FIRST_MBOX (HECC_MAX_MAILBOXES - 1) +#define HECC_RX_LAST_MBOX (HECC_MAX_TX_MBOX) /* TI HECC module registers */ #define HECC_CANME 0x0 /* Mailbox enable */ @@ -82,7 +83,7 @@ MODULE_VERSION(HECC_MODULE_VERSION); #define HECC_CANTA 0x10 /* Transmission acknowledge */ #define HECC_CANAA 0x14 /* Abort acknowledge */ #define HECC_CANRMP 0x18 /* Receive message pending */ -#define HECC_CANRML 0x1C /* Remote message lost */ +#define HECC_CANRML 0x1C /* Receive message lost */ #define HECC_CANRFP 0x20 /* Remote frame pending */ #define HECC_CANGAM 0x24 /* SECC only:Global acceptance mask */ #define HECC_CANMC 0x28 /* Master control */ @@ -149,6 +150,8 @@ MODULE_VERSION(HECC_MODULE_VERSION); #define HECC_BUS_ERROR (HECC_CANES_FE | HECC_CANES_BE |\ HECC_CANES_CRCE | HECC_CANES_SE |\ HECC_CANES_ACKE) +#define HECC_CANES_FLAGS (HECC_BUS_ERROR | HECC_CANES_BO |\ + HECC_CANES_EP | HECC_CANES_EW) #define HECC_CANMCF_RTR BIT(4) /* Remote transmit request */ @@ -382,8 +385,18 @@ static void ti_hecc_start(struct net_device *ndev) hecc_set_bit(priv, HECC_CANMIM, mbx_mask); } - /* Prevent message over-write & Enable interrupts */ - hecc_write(priv, HECC_CANOPC, HECC_SET_REG); + /* Enable tx interrupts */ + hecc_set_bit(priv, HECC_CANMIM, BIT(HECC_MAX_TX_MBOX) - 1); + + /* Prevent message over-write to create a rx fifo, but not for + * the lowest priority mailbox, since that allows detecting + * overflows instead of the hardware silently dropping the + * messages. + */ + mbx_mask = ~BIT(HECC_RX_LAST_MBOX); + hecc_write(priv, HECC_CANOPC, mbx_mask); + + /* Enable interrupts */ if (priv->use_hecc1int) { hecc_write(priv, HECC_CANMIL, HECC_SET_REG); hecc_write(priv, HECC_CANGIM, HECC_CANGIM_DEF_MASK | @@ -400,6 +413,9 @@ static void ti_hecc_stop(struct net_device *ndev) { struct ti_hecc_priv *priv = netdev_priv(ndev); + /* Disable the CPK; stop sending, erroring and acking */ + hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_CCR); + /* Disable interrupts and disable mailboxes */ hecc_write(priv, HECC_CANGIM, 0); hecc_write(priv, HECC_CANMIM, 0); @@ -508,8 +524,6 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev) hecc_set_bit(priv, HECC_CANME, mbx_mask); spin_unlock_irqrestore(&priv->mbx_lock, flags); - hecc_clear_bit(priv, HECC_CANMD, mbx_mask); - hecc_set_bit(priv, HECC_CANMIM, mbx_mask); hecc_write(priv, HECC_CANTRS, mbx_mask); return NETDEV_TX_OK; @@ -526,8 +540,10 @@ static unsigned int ti_hecc_mailbox_read(struct can_rx_offload *offload, u32 *timestamp, unsigned int mbxno) { struct ti_hecc_priv *priv = rx_offload_to_priv(offload); - u32 data; + u32 data, mbx_mask; + int ret = 1; + mbx_mask = BIT(mbxno); data = hecc_read_mbx(priv, mbxno, HECC_CANMID); if (data & HECC_CANMID_IDE) cf->can_id = (data & CAN_EFF_MASK) | CAN_EFF_FLAG; @@ -548,7 +564,25 @@ static unsigned int ti_hecc_mailbox_read(struct can_rx_offload *offload, *timestamp = hecc_read_stamp(priv, mbxno); - return 1; + /* Check for FIFO overrun. + * + * All but the last RX mailbox have activated overwrite + * protection. So skip check for overrun, if we're not + * handling the last RX mailbox. + * + * As the overwrite protection for the last RX mailbox is + * disabled, the CAN core might update while we're reading + * it. This means the skb might be inconsistent. + * + * Return an error to let rx-offload discard this CAN frame. + */ + if (unlikely(mbxno == HECC_RX_LAST_MBOX && + hecc_read(priv, HECC_CANRML) & mbx_mask)) + ret = -ENOBUFS; + + hecc_write(priv, HECC_CANRMP, mbx_mask); + + return ret; } static int ti_hecc_error(struct net_device *ndev, int int_status, @@ -558,92 +592,73 @@ static int ti_hecc_error(struct net_device *ndev, int int_status, struct can_frame *cf; struct sk_buff *skb; u32 timestamp; + int err; - /* propagate the error condition to the can stack */ - skb = alloc_can_err_skb(ndev, &cf); - if (!skb) { - if (printk_ratelimit()) - netdev_err(priv->ndev, - "%s: alloc_can_err_skb() failed\n", - __func__); - return -ENOMEM; - } - - if (int_status & HECC_CANGIF_WLIF) { /* warning level int */ - if ((int_status & HECC_CANGIF_BOIF) == 0) { - priv->can.state = CAN_STATE_ERROR_WARNING; - ++priv->can.can_stats.error_warning; - cf->can_id |= CAN_ERR_CRTL; - if (hecc_read(priv, HECC_CANTEC) > 96) - cf->data[1] |= CAN_ERR_CRTL_TX_WARNING; - if (hecc_read(priv, HECC_CANREC) > 96) - cf->data[1] |= CAN_ERR_CRTL_RX_WARNING; - } - hecc_set_bit(priv, HECC_CANES, HECC_CANES_EW); - netdev_dbg(priv->ndev, "Error Warning interrupt\n"); - hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR); - } - - if (int_status & HECC_CANGIF_EPIF) { /* error passive int */ - if ((int_status & HECC_CANGIF_BOIF) == 0) { - priv->can.state = CAN_STATE_ERROR_PASSIVE; - ++priv->can.can_stats.error_passive; - cf->can_id |= CAN_ERR_CRTL; - if (hecc_read(priv, HECC_CANTEC) > 127) - cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE; - if (hecc_read(priv, HECC_CANREC) > 127) - cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE; + if (err_status & HECC_BUS_ERROR) { + /* propagate the error condition to the can stack */ + skb = alloc_can_err_skb(ndev, &cf); + if (!skb) { + if (net_ratelimit()) + netdev_err(priv->ndev, + "%s: alloc_can_err_skb() failed\n", + __func__); + return -ENOMEM; } - hecc_set_bit(priv, HECC_CANES, HECC_CANES_EP); - netdev_dbg(priv->ndev, "Error passive interrupt\n"); - hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR); - } - - /* Need to check busoff condition in error status register too to - * ensure warning interrupts don't hog the system - */ - if ((int_status & HECC_CANGIF_BOIF) || (err_status & HECC_CANES_BO)) { - priv->can.state = CAN_STATE_BUS_OFF; - cf->can_id |= CAN_ERR_BUSOFF; - hecc_set_bit(priv, HECC_CANES, HECC_CANES_BO); - hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR); - /* Disable all interrupts in bus-off to avoid int hog */ - hecc_write(priv, HECC_CANGIM, 0); - ++priv->can.can_stats.bus_off; - can_bus_off(ndev); - } - if (err_status & HECC_BUS_ERROR) { ++priv->can.can_stats.bus_error; cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT; - if (err_status & HECC_CANES_FE) { - hecc_set_bit(priv, HECC_CANES, HECC_CANES_FE); + if (err_status & HECC_CANES_FE) cf->data[2] |= CAN_ERR_PROT_FORM; - } - if (err_status & HECC_CANES_BE) { - hecc_set_bit(priv, HECC_CANES, HECC_CANES_BE); + if (err_status & HECC_CANES_BE) cf->data[2] |= CAN_ERR_PROT_BIT; - } - if (err_status & HECC_CANES_SE) { - hecc_set_bit(priv, HECC_CANES, HECC_CANES_SE); + if (err_status & HECC_CANES_SE) cf->data[2] |= CAN_ERR_PROT_STUFF; - } - if (err_status & HECC_CANES_CRCE) { - hecc_set_bit(priv, HECC_CANES, HECC_CANES_CRCE); + if (err_status & HECC_CANES_CRCE) cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; - } - if (err_status & HECC_CANES_ACKE) { - hecc_set_bit(priv, HECC_CANES, HECC_CANES_ACKE); + if (err_status & HECC_CANES_ACKE) cf->data[3] = CAN_ERR_PROT_LOC_ACK; - } + + timestamp = hecc_read(priv, HECC_CANLNT); + err = can_rx_offload_queue_sorted(&priv->offload, skb, + timestamp); + if (err) + ndev->stats.rx_fifo_errors++; } - timestamp = hecc_read(priv, HECC_CANLNT); - can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); + hecc_write(priv, HECC_CANES, HECC_CANES_FLAGS); return 0; } +static void ti_hecc_change_state(struct net_device *ndev, + enum can_state rx_state, + enum can_state tx_state) +{ + struct ti_hecc_priv *priv = netdev_priv(ndev); + struct can_frame *cf; + struct sk_buff *skb; + u32 timestamp; + int err; + + skb = alloc_can_err_skb(priv->ndev, &cf); + if (unlikely(!skb)) { + priv->can.state = max(tx_state, rx_state); + return; + } + + can_change_state(priv->ndev, cf, tx_state, rx_state); + + if (max(tx_state, rx_state) != CAN_STATE_BUS_OFF) { + cf->data[6] = hecc_read(priv, HECC_CANTEC); + cf->data[7] = hecc_read(priv, HECC_CANREC); + } + + timestamp = hecc_read(priv, HECC_CANLNT); + err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); + if (err) + ndev->stats.rx_fifo_errors++; +} + static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id) { struct net_device *ndev = (struct net_device *)dev_id; @@ -651,6 +666,7 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id) struct net_device_stats *stats = &ndev->stats; u32 mbxno, mbx_mask, int_status, err_status, stamp; unsigned long flags, rx_pending; + u32 handled = 0; int_status = hecc_read(priv, priv->use_hecc1int ? @@ -660,17 +676,66 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id) return IRQ_NONE; err_status = hecc_read(priv, HECC_CANES); - if (err_status & (HECC_BUS_ERROR | HECC_CANES_BO | - HECC_CANES_EP | HECC_CANES_EW)) + if (unlikely(err_status & HECC_CANES_FLAGS)) ti_hecc_error(ndev, int_status, err_status); + if (unlikely(int_status & HECC_CANGIM_DEF_MASK)) { + enum can_state rx_state, tx_state; + u32 rec = hecc_read(priv, HECC_CANREC); + u32 tec = hecc_read(priv, HECC_CANTEC); + + if (int_status & HECC_CANGIF_WLIF) { + handled |= HECC_CANGIF_WLIF; + rx_state = rec >= tec ? CAN_STATE_ERROR_WARNING : 0; + tx_state = rec <= tec ? CAN_STATE_ERROR_WARNING : 0; + netdev_dbg(priv->ndev, "Error Warning interrupt\n"); + ti_hecc_change_state(ndev, rx_state, tx_state); + } + + if (int_status & HECC_CANGIF_EPIF) { + handled |= HECC_CANGIF_EPIF; + rx_state = rec >= tec ? CAN_STATE_ERROR_PASSIVE : 0; + tx_state = rec <= tec ? CAN_STATE_ERROR_PASSIVE : 0; + netdev_dbg(priv->ndev, "Error passive interrupt\n"); + ti_hecc_change_state(ndev, rx_state, tx_state); + } + + if (int_status & HECC_CANGIF_BOIF) { + handled |= HECC_CANGIF_BOIF; + rx_state = CAN_STATE_BUS_OFF; + tx_state = CAN_STATE_BUS_OFF; + netdev_dbg(priv->ndev, "Bus off interrupt\n"); + + /* Disable all interrupts */ + hecc_write(priv, HECC_CANGIM, 0); + can_bus_off(ndev); + ti_hecc_change_state(ndev, rx_state, tx_state); + } + } else if (unlikely(priv->can.state != CAN_STATE_ERROR_ACTIVE)) { + enum can_state new_state, tx_state, rx_state; + u32 rec = hecc_read(priv, HECC_CANREC); + u32 tec = hecc_read(priv, HECC_CANTEC); + + if (rec >= 128 || tec >= 128) + new_state = CAN_STATE_ERROR_PASSIVE; + else if (rec >= 96 || tec >= 96) + new_state = CAN_STATE_ERROR_WARNING; + else + new_state = CAN_STATE_ERROR_ACTIVE; + + if (new_state < priv->can.state) { + rx_state = rec >= tec ? new_state : 0; + tx_state = rec <= tec ? new_state : 0; + ti_hecc_change_state(ndev, rx_state, tx_state); + } + } + if (int_status & HECC_CANGIF_GMIF) { while (priv->tx_tail - priv->tx_head > 0) { mbxno = get_tx_tail_mb(priv); mbx_mask = BIT(mbxno); if (!(mbx_mask & hecc_read(priv, HECC_CANTA))) break; - hecc_clear_bit(priv, HECC_CANMIM, mbx_mask); hecc_write(priv, HECC_CANTA, mbx_mask); spin_lock_irqsave(&priv->mbx_lock, flags); hecc_clear_bit(priv, HECC_CANME, mbx_mask); @@ -695,16 +760,15 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id) while ((rx_pending = hecc_read(priv, HECC_CANRMP))) { can_rx_offload_irq_offload_timestamp(&priv->offload, rx_pending); - hecc_write(priv, HECC_CANRMP, rx_pending); } } /* clear all interrupt conditions - read back to avoid spurious ints */ if (priv->use_hecc1int) { - hecc_write(priv, HECC_CANGIF1, HECC_SET_REG); + hecc_write(priv, HECC_CANGIF1, handled); int_status = hecc_read(priv, HECC_CANGIF1); } else { - hecc_write(priv, HECC_CANGIF0, HECC_SET_REG); + hecc_write(priv, HECC_CANGIF0, handled); int_status = hecc_read(priv, HECC_CANGIF0); } @@ -877,7 +941,7 @@ static int ti_hecc_probe(struct platform_device *pdev) priv->offload.mailbox_read = ti_hecc_mailbox_read; priv->offload.mb_first = HECC_RX_FIRST_MBOX; - priv->offload.mb_last = HECC_MAX_TX_MBOX; + priv->offload.mb_last = HECC_RX_LAST_MBOX; err = can_rx_offload_add_timestamp(ndev, &priv->offload); if (err) { dev_err(&pdev->dev, "can_rx_offload_add_timestamp() failed\n"); diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index bd6eb9967630..2f74f6704c12 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -623,6 +623,7 @@ static int gs_can_open(struct net_device *netdev) rc); usb_unanchor_urb(urb); + usb_free_urb(urb); break; } diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c index 19a702ac49e4..21faa2ec4632 100644 --- a/drivers/net/can/usb/mcba_usb.c +++ b/drivers/net/can/usb/mcba_usb.c @@ -876,9 +876,8 @@ static void mcba_usb_disconnect(struct usb_interface *intf) netdev_info(priv->netdev, "device disconnected\n"); unregister_candev(priv->netdev); - free_candev(priv->netdev); - mcba_urb_unlink(priv); + free_candev(priv->netdev); } static struct usb_driver mcba_usb_driver = { diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c index 617da295b6c1..d2539c95adb6 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb.c @@ -100,7 +100,7 @@ struct pcan_usb_msg_context { u8 *end; u8 rec_cnt; u8 rec_idx; - u8 rec_data_idx; + u8 rec_ts_idx; struct net_device *netdev; struct pcan_usb *pdev; }; @@ -436,8 +436,8 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n, } if ((n & PCAN_USB_ERROR_BUS_LIGHT) == 0) { /* no error (back to active state) */ - mc->pdev->dev.can.state = CAN_STATE_ERROR_ACTIVE; - return 0; + new_state = CAN_STATE_ERROR_ACTIVE; + break; } break; @@ -460,9 +460,9 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n, } if ((n & PCAN_USB_ERROR_BUS_HEAVY) == 0) { - /* no error (back to active state) */ - mc->pdev->dev.can.state = CAN_STATE_ERROR_ACTIVE; - return 0; + /* no error (back to warning state) */ + new_state = CAN_STATE_ERROR_WARNING; + break; } break; @@ -501,6 +501,11 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n, mc->pdev->dev.can.can_stats.error_warning++; break; + case CAN_STATE_ERROR_ACTIVE: + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = CAN_ERR_CRTL_ACTIVE; + break; + default: /* CAN_STATE_MAX (trick to handle other errors) */ cf->can_id |= CAN_ERR_CRTL; @@ -547,10 +552,15 @@ static int pcan_usb_decode_status(struct pcan_usb_msg_context *mc, mc->ptr += PCAN_USB_CMD_ARGS; if (status_len & PCAN_USB_STATUSLEN_TIMESTAMP) { - int err = pcan_usb_decode_ts(mc, !mc->rec_idx); + int err = pcan_usb_decode_ts(mc, !mc->rec_ts_idx); if (err) return err; + + /* Next packet in the buffer will have a timestamp on a single + * byte + */ + mc->rec_ts_idx++; } switch (f) { @@ -632,10 +642,13 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len) cf->can_dlc = get_can_dlc(rec_len); - /* first data packet timestamp is a word */ - if (pcan_usb_decode_ts(mc, !mc->rec_data_idx)) + /* Only first packet timestamp is a word */ + if (pcan_usb_decode_ts(mc, !mc->rec_ts_idx)) goto decode_failed; + /* Next packet in the buffer will have a timestamp on a single byte */ + mc->rec_ts_idx++; + /* read data */ memset(cf->data, 0x0, sizeof(cf->data)); if (status_len & PCAN_USB_STATUSLEN_RTR) { @@ -688,7 +701,6 @@ static int pcan_usb_decode_msg(struct peak_usb_device *dev, u8 *ibuf, u32 lbuf) /* handle normal can frames here */ } else { err = pcan_usb_decode_data(&mc, sl); - mc.rec_data_idx++; } } diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c index 65dce642b86b..0b7766b715fd 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c @@ -750,7 +750,7 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter, dev = netdev_priv(netdev); /* allocate a buffer large enough to send commands */ - dev->cmd_buf = kmalloc(PCAN_USB_MAX_CMD_LEN, GFP_KERNEL); + dev->cmd_buf = kzalloc(PCAN_USB_MAX_CMD_LEN, GFP_KERNEL); if (!dev->cmd_buf) { err = -ENOMEM; goto lbl_free_candev; diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c index d596a2ad7f78..8fa224b28218 100644 --- a/drivers/net/can/usb/usb_8dev.c +++ b/drivers/net/can/usb/usb_8dev.c @@ -996,9 +996,8 @@ static void usb_8dev_disconnect(struct usb_interface *intf) netdev_info(priv->netdev, "device disconnected\n"); unregister_netdev(priv->netdev); - free_candev(priv->netdev); - unlink_all_urbs(priv); + free_candev(priv->netdev); } } diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index 911b34316c9d..7c482b2d78d2 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -1599,7 +1599,6 @@ static const struct xcan_devtype_data xcan_zynq_data = { static const struct xcan_devtype_data xcan_axi_data = { .cantype = XAXI_CAN, - .flags = XCAN_FLAG_TXFEMP, .bittiming_const = &xcan_bittiming_const, .btr_ts2_shift = XCAN_BTR_TS2_SHIFT, .btr_sjw_shift = XCAN_BTR_SJW_SHIFT, diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 526ba2ab66f1..cc3536315eff 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1845,7 +1845,6 @@ int b53_mirror_add(struct dsa_switch *ds, int port, loc = B53_EG_MIR_CTL; b53_read16(dev, B53_MGMT_PAGE, loc, ®); - reg &= ~MIRROR_MASK; reg |= BIT(port); b53_write16(dev, B53_MGMT_PAGE, loc, reg); diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 26509fa37a50..69fc13046ac7 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -37,22 +37,11 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port) unsigned int i; u32 reg, offset; - if (priv->type == BCM7445_DEVICE_ID) - offset = CORE_STS_OVERRIDE_IMP; - else - offset = CORE_STS_OVERRIDE_IMP2; - /* Enable the port memories */ reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); reg &= ~P_TXQ_PSM_VDD(port); core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); - /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */ - reg = core_readl(priv, CORE_IMP_CTL); - reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN); - reg &= ~(RX_DIS | TX_DIS); - core_writel(priv, reg, CORE_IMP_CTL); - /* Enable forwarding */ core_writel(priv, SW_FWDG_EN, CORE_SWMODE); @@ -71,10 +60,27 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port) b53_brcm_hdr_setup(ds, port); - /* Force link status for IMP port */ - reg = core_readl(priv, offset); - reg |= (MII_SW_OR | LINK_STS); - core_writel(priv, reg, offset); + if (port == 8) { + if (priv->type == BCM7445_DEVICE_ID) + offset = CORE_STS_OVERRIDE_IMP; + else + offset = CORE_STS_OVERRIDE_IMP2; + + /* Force link status for IMP port */ + reg = core_readl(priv, offset); + reg |= (MII_SW_OR | LINK_STS); + core_writel(priv, reg, offset); + + /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */ + reg = core_readl(priv, CORE_IMP_CTL); + reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN); + reg &= ~(RX_DIS | TX_DIS); + core_writel(priv, reg, CORE_IMP_CTL); + } else { + reg = core_readl(priv, CORE_G_PCTL_PORT(port)); + reg &= ~(RX_DIS | TX_DIS); + core_writel(priv, reg, CORE_G_PCTL_PORT(port)); + } } static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable) @@ -1209,10 +1215,10 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev) struct bcm_sf2_priv *priv = platform_get_drvdata(pdev); priv->wol_ports_mask = 0; + /* Disable interrupts */ + bcm_sf2_intr_disable(priv); dsa_unregister_switch(priv->dev->ds); bcm_sf2_cfp_exit(priv->dev->ds); - /* Disable all ports and interrupts */ - bcm_sf2_sw_suspend(priv->dev->ds); bcm_sf2_mdio_unregister(priv); return 0; diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c index a23d3ffdf0c4..24a5e99f7fd5 100644 --- a/drivers/net/dsa/microchip/ksz8795.c +++ b/drivers/net/dsa/microchip/ksz8795.c @@ -1224,10 +1224,6 @@ static int ksz8795_switch_init(struct ksz_device *dev) { int i; - mutex_init(&dev->stats_mutex); - mutex_init(&dev->alu_mutex); - mutex_init(&dev->vlan_mutex); - dev->ds->ops = &ksz8795_switch_ops; for (i = 0; i < ARRAY_SIZE(ksz8795_switch_chips); i++) { diff --git a/drivers/net/dsa/microchip/ksz8795_spi.c b/drivers/net/dsa/microchip/ksz8795_spi.c index d0f8153e86b7..8b00f8e6c02f 100644 --- a/drivers/net/dsa/microchip/ksz8795_spi.c +++ b/drivers/net/dsa/microchip/ksz8795_spi.c @@ -25,6 +25,7 @@ KSZ_REGMAP_TABLE(ksz8795, 16, SPI_ADDR_SHIFT, static int ksz8795_spi_probe(struct spi_device *spi) { + struct regmap_config rc; struct ksz_device *dev; int i, ret; @@ -33,9 +34,9 @@ static int ksz8795_spi_probe(struct spi_device *spi) return -ENOMEM; for (i = 0; i < ARRAY_SIZE(ksz8795_regmap_config); i++) { - dev->regmap[i] = devm_regmap_init_spi(spi, - &ksz8795_regmap_config - [i]); + rc = ksz8795_regmap_config[i]; + rc.lock_arg = &dev->regmap_mutex; + dev->regmap[i] = devm_regmap_init_spi(spi, &rc); if (IS_ERR(dev->regmap[i])) { ret = PTR_ERR(dev->regmap[i]); dev_err(&spi->dev, diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c index 0b1e01f0873d..fdffd9e0c518 100644 --- a/drivers/net/dsa/microchip/ksz9477_i2c.c +++ b/drivers/net/dsa/microchip/ksz9477_i2c.c @@ -17,6 +17,7 @@ KSZ_REGMAP_TABLE(ksz9477, not_used, 16, 0, 0); static int ksz9477_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *i2c_id) { + struct regmap_config rc; struct ksz_device *dev; int i, ret; @@ -25,8 +26,9 @@ static int ksz9477_i2c_probe(struct i2c_client *i2c, return -ENOMEM; for (i = 0; i < ARRAY_SIZE(ksz9477_regmap_config); i++) { - dev->regmap[i] = devm_regmap_init_i2c(i2c, - &ksz9477_regmap_config[i]); + rc = ksz9477_regmap_config[i]; + rc.lock_arg = &dev->regmap_mutex; + dev->regmap[i] = devm_regmap_init_i2c(i2c, &rc); if (IS_ERR(dev->regmap[i])) { ret = PTR_ERR(dev->regmap[i]); dev_err(&i2c->dev, diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h index 2938e892b631..16939f29faa5 100644 --- a/drivers/net/dsa/microchip/ksz9477_reg.h +++ b/drivers/net/dsa/microchip/ksz9477_reg.h @@ -1,5 +1,5 @@ -/* SPDX-License-Identifier: GPL-2.0 - * +/* SPDX-License-Identifier: GPL-2.0 */ +/* * Microchip KSZ9477 register definitions * * Copyright (C) 2017-2018 Microchip Technology Inc. diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c index f4198d6f72be..c5f64959a184 100644 --- a/drivers/net/dsa/microchip/ksz9477_spi.c +++ b/drivers/net/dsa/microchip/ksz9477_spi.c @@ -24,6 +24,7 @@ KSZ_REGMAP_TABLE(ksz9477, 32, SPI_ADDR_SHIFT, static int ksz9477_spi_probe(struct spi_device *spi) { + struct regmap_config rc; struct ksz_device *dev; int i, ret; @@ -32,8 +33,9 @@ static int ksz9477_spi_probe(struct spi_device *spi) return -ENOMEM; for (i = 0; i < ARRAY_SIZE(ksz9477_regmap_config); i++) { - dev->regmap[i] = devm_regmap_init_spi(spi, - &ksz9477_regmap_config[i]); + rc = ksz9477_regmap_config[i]; + rc.lock_arg = &dev->regmap_mutex; + dev->regmap[i] = devm_regmap_init_spi(spi, &rc); if (IS_ERR(dev->regmap[i])) { ret = PTR_ERR(dev->regmap[i]); dev_err(&spi->dev, diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index b0b870f0c252..fe47180c908b 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -436,7 +436,7 @@ int ksz_switch_register(struct ksz_device *dev, } mutex_init(&dev->dev_mutex); - mutex_init(&dev->stats_mutex); + mutex_init(&dev->regmap_mutex); mutex_init(&dev->alu_mutex); mutex_init(&dev->vlan_mutex); diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h index dd60d0837fc6..a20ebb749377 100644 --- a/drivers/net/dsa/microchip/ksz_common.h +++ b/drivers/net/dsa/microchip/ksz_common.h @@ -1,5 +1,5 @@ -/* SPDX-License-Identifier: GPL-2.0 - * Microchip switch driver common header +/* SPDX-License-Identifier: GPL-2.0 */ +/* Microchip switch driver common header * * Copyright (C) 2017-2019 Microchip Technology Inc. */ @@ -47,7 +47,7 @@ struct ksz_device { const char *name; struct mutex dev_mutex; /* device access */ - struct mutex stats_mutex; /* status access */ + struct mutex regmap_mutex; /* regmap access */ struct mutex alu_mutex; /* ALU access */ struct mutex vlan_mutex; /* vlan access */ const struct ksz_dev_ops *dev_ops; @@ -290,6 +290,18 @@ static inline void ksz_pwrite32(struct ksz_device *dev, int port, int offset, ksz_write32(dev, dev->dev_ops->get_port_addr(port, offset), data); } +static inline void ksz_regmap_lock(void *__mtx) +{ + struct mutex *mtx = __mtx; + mutex_lock(mtx); +} + +static inline void ksz_regmap_unlock(void *__mtx) +{ + struct mutex *mtx = __mtx; + mutex_unlock(mtx); +} + /* Regmap tables generation */ #define KSZ_SPI_OP_RD 3 #define KSZ_SPI_OP_WR 2 @@ -314,6 +326,8 @@ static inline void ksz_pwrite32(struct ksz_device *dev, int port, int offset, .write_flag_mask = \ KSZ_SPI_OP_FLAG_MASK(KSZ_SPI_OP_WR, swp, \ regbits, regpad), \ + .lock = ksz_regmap_lock, \ + .unlock = ksz_regmap_unlock, \ .reg_format_endian = REGMAP_ENDIAN_BIG, \ .val_format_endian = REGMAP_ENDIAN_BIG \ } diff --git a/drivers/net/dsa/sja1105/Kconfig b/drivers/net/dsa/sja1105/Kconfig index f40b248f0b23..ffac0ea4e8d5 100644 --- a/drivers/net/dsa/sja1105/Kconfig +++ b/drivers/net/dsa/sja1105/Kconfig @@ -26,8 +26,8 @@ config NET_DSA_SJA1105_PTP config NET_DSA_SJA1105_TAS bool "Support for the Time-Aware Scheduler on NXP SJA1105" - depends on NET_DSA_SJA1105 - depends on NET_SCH_TAPRIO + depends on NET_DSA_SJA1105 && NET_SCH_TAPRIO + depends on NET_SCH_TAPRIO=y || NET_DSA_SJA1105=m help This enables support for the TTEthernet-based egress scheduling engine in the SJA1105 DSA driver, which is controlled using a diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h index e53e494c22e0..fbb564c3beb8 100644 --- a/drivers/net/dsa/sja1105/sja1105.h +++ b/drivers/net/dsa/sja1105/sja1105.h @@ -1,5 +1,5 @@ -/* SPDX-License-Identifier: GPL-2.0 - * Copyright (c) 2018, Sensor-Technik Wiedemann GmbH +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018, Sensor-Technik Wiedemann GmbH * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com> */ #ifndef _SJA1105_H diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.h b/drivers/net/dsa/sja1105/sja1105_dynamic_config.h index 740dadf43f01..1fc0d13dc623 100644 --- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.h +++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.h @@ -1,5 +1,5 @@ -/* SPDX-License-Identifier: GPL-2.0 - * Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com> +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com> */ #ifndef _SJA1105_DYNAMIC_CONFIG_H #define _SJA1105_DYNAMIC_CONFIG_H diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.h b/drivers/net/dsa/sja1105/sja1105_ptp.h index af456b0a4d27..394e12a6ad59 100644 --- a/drivers/net/dsa/sja1105/sja1105_ptp.h +++ b/drivers/net/dsa/sja1105/sja1105_ptp.h @@ -1,5 +1,5 @@ -/* SPDX-License-Identifier: GPL-2.0 - * Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com> +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com> */ #ifndef _SJA1105_PTP_H #define _SJA1105_PTP_H diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.h b/drivers/net/dsa/sja1105/sja1105_static_config.h index 7f87022a2d61..f4a5c5c04311 100644 --- a/drivers/net/dsa/sja1105/sja1105_static_config.h +++ b/drivers/net/dsa/sja1105/sja1105_static_config.h @@ -1,5 +1,5 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright (c) 2016-2018, NXP Semiconductors +/* SPDX-License-Identifier: BSD-3-Clause */ +/* Copyright (c) 2016-2018, NXP Semiconductors * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com> */ #ifndef _SJA1105_STATIC_CONFIG_H diff --git a/drivers/net/dsa/sja1105/sja1105_tas.h b/drivers/net/dsa/sja1105/sja1105_tas.h index 0b803c30e640..0aad212d88b2 100644 --- a/drivers/net/dsa/sja1105/sja1105_tas.h +++ b/drivers/net/dsa/sja1105/sja1105_tas.h @@ -1,5 +1,5 @@ -/* SPDX-License-Identifier: GPL-2.0 - * Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com> +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com> */ #ifndef _SJA1105_TAS_H #define _SJA1105_TAS_H diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c index b4a0fb281e69..bb65dd39f847 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c @@ -194,9 +194,7 @@ static void aq_ndev_set_multicast_settings(struct net_device *ndev) { struct aq_nic_s *aq_nic = netdev_priv(ndev); - aq_nic_set_packet_filter(aq_nic, ndev->flags); - - aq_nic_set_multicast_list(aq_nic, ndev); + (void)aq_nic_set_multicast_list(aq_nic, ndev); } static int aq_ndo_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 8f66e7817811..137c1de4c6ec 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -631,9 +631,12 @@ err_exit: int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev) { - unsigned int packet_filter = self->packet_filter; + const struct aq_hw_ops *hw_ops = self->aq_hw_ops; + struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; + unsigned int packet_filter = ndev->flags; struct netdev_hw_addr *ha = NULL; unsigned int i = 0U; + int err = 0; self->mc_list.count = 0; if (netdev_uc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) { @@ -641,29 +644,28 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev) } else { netdev_for_each_uc_addr(ha, ndev) { ether_addr_copy(self->mc_list.ar[i++], ha->addr); - - if (i >= AQ_HW_MULTICAST_ADDRESS_MAX) - break; } } - if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) { - packet_filter |= IFF_ALLMULTI; - } else { - netdev_for_each_mc_addr(ha, ndev) { - ether_addr_copy(self->mc_list.ar[i++], ha->addr); - - if (i >= AQ_HW_MULTICAST_ADDRESS_MAX) - break; + cfg->is_mc_list_enabled = !!(packet_filter & IFF_MULTICAST); + if (cfg->is_mc_list_enabled) { + if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) { + packet_filter |= IFF_ALLMULTI; + } else { + netdev_for_each_mc_addr(ha, ndev) { + ether_addr_copy(self->mc_list.ar[i++], + ha->addr); + } } } if (i > 0 && i <= AQ_HW_MULTICAST_ADDRESS_MAX) { - packet_filter |= IFF_MULTICAST; self->mc_list.count = i; - self->aq_hw_ops->hw_multicast_list_set(self->aq_hw, - self->mc_list.ar, - self->mc_list.count); + err = hw_ops->hw_multicast_list_set(self->aq_hw, + self->mc_list.ar, + self->mc_list.count); + if (err < 0) + return err; } return aq_nic_set_packet_filter(self, packet_filter); } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index 3901d7994ca1..76bdbe1596d6 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -313,6 +313,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self, break; buff->is_error |= buff_->is_error; + buff->is_cso_err |= buff_->is_cso_err; } while (!buff_->is_eop); @@ -320,7 +321,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self, err = 0; goto err_exit; } - if (buff->is_error) { + if (buff->is_error || buff->is_cso_err) { buff_ = buff; do { next_ = buff_->next, diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index 30f7fc4c97ff..2ad3fa6316ce 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -818,14 +818,15 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self, cfg->is_vlan_force_promisc); hw_atl_rpfl2multicast_flr_en_set(self, - IS_FILTER_ENABLED(IFF_ALLMULTI), 0); + IS_FILTER_ENABLED(IFF_ALLMULTI) && + IS_FILTER_ENABLED(IFF_MULTICAST), 0); hw_atl_rpfl2_accept_all_mc_packets_set(self, - IS_FILTER_ENABLED(IFF_ALLMULTI)); + IS_FILTER_ENABLED(IFF_ALLMULTI) && + IS_FILTER_ENABLED(IFF_MULTICAST)); hw_atl_rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST)); - cfg->is_mc_list_enabled = IS_FILTER_ENABLED(IFF_MULTICAST); for (i = HW_ATL_B0_MAC_MIN; i < HW_ATL_B0_MAC_MAX; ++i) hw_atl_rpfl2_uc_flr_en_set(self, @@ -968,14 +969,26 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self) static int hw_atl_b0_hw_stop(struct aq_hw_s *self) { + int err; + u32 val; + hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK); /* Invalidate Descriptor Cache to prevent writing to the cached * descriptors and to the data pointer of those descriptors */ - hw_atl_rdm_rx_dma_desc_cache_init_set(self, 1); + hw_atl_rdm_rx_dma_desc_cache_init_tgl(self); - return aq_hw_err_from_flags(self); + err = aq_hw_err_from_flags(self); + + if (err) + goto err_exit; + + readx_poll_timeout_atomic(hw_atl_rdm_rx_dma_desc_cache_init_done_get, + self, val, val == 1, 1000U, 10000U); + +err_exit: + return err; } static int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self, diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c index 1149812ae463..6f340695e6bd 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c @@ -606,12 +606,25 @@ void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode HW_ATL_RPB_RX_FC_MODE_SHIFT, rx_flow_ctl_mode); } -void hw_atl_rdm_rx_dma_desc_cache_init_set(struct aq_hw_s *aq_hw, u32 init) +void hw_atl_rdm_rx_dma_desc_cache_init_tgl(struct aq_hw_s *aq_hw) { + u32 val; + + val = aq_hw_read_reg_bit(aq_hw, HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR, + HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK, + HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT); + aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR, HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK, HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT, - init); + val ^ 1); +} + +u32 hw_atl_rdm_rx_dma_desc_cache_init_done_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg_bit(aq_hw, RDM_RX_DMA_DESC_CACHE_INIT_DONE_ADR, + RDM_RX_DMA_DESC_CACHE_INIT_DONE_MSK, + RDM_RX_DMA_DESC_CACHE_INIT_DONE_SHIFT); } void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw, diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h index 0c37abbabca5..c3ee278c3747 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h @@ -313,8 +313,11 @@ void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_pkt_buff_size_per_tc, u32 buffer); -/* set rdm rx dma descriptor cache init */ -void hw_atl_rdm_rx_dma_desc_cache_init_set(struct aq_hw_s *aq_hw, u32 init); +/* toggle rdm rx dma descriptor cache init */ +void hw_atl_rdm_rx_dma_desc_cache_init_tgl(struct aq_hw_s *aq_hw); + +/* get rdm rx dma descriptor cache init done */ +u32 hw_atl_rdm_rx_dma_desc_cache_init_done_get(struct aq_hw_s *aq_hw); /* set rx xoff enable (per tc) */ void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc, diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h index c3febcdfa92e..35887ad89025 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h @@ -318,6 +318,25 @@ /* default value of bitfield rdm_desc_init_i */ #define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_DEFAULT 0x0 +/* rdm_desc_init_done_i bitfield definitions + * preprocessor definitions for the bitfield rdm_desc_init_done_i. + * port="pif_rdm_desc_init_done_i" + */ + +/* register address for bitfield rdm_desc_init_done_i */ +#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_ADR 0x00005a10 +/* bitmask for bitfield rdm_desc_init_done_i */ +#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_MSK 0x00000001U +/* inverted bitmask for bitfield rdm_desc_init_done_i */ +#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_MSKN 0xfffffffe +/* lower bit position of bitfield rdm_desc_init_done_i */ +#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_SHIFT 0U +/* width of bitfield rdm_desc_init_done_i */ +#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_WIDTH 1 +/* default value of bitfield rdm_desc_init_done_i */ +#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_DEFAULT 0x0 + + /* rx int_desc_wrb_en bitfield definitions * preprocessor definitions for the bitfield "int_desc_wrb_en". * port="pif_rdm_int_desc_wrb_en_i" diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c index da726489e3c8..7bc51f8d6f2f 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c @@ -337,7 +337,7 @@ static int aq_fw2x_get_phy_temp(struct aq_hw_s *self, int *temp) /* Convert PHY temperature from 1/256 degree Celsius * to 1/1000 degree Celsius. */ - *temp = temp_res * 1000 / 256; + *temp = (temp_res & 0xFFFF) * 1000 / 256; return 0; } diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c index 42d2e1b02c44..664d664e0925 100644 --- a/drivers/net/ethernet/arc/emac_rockchip.c +++ b/drivers/net/ethernet/arc/emac_rockchip.c @@ -256,6 +256,9 @@ static int emac_rockchip_remove(struct platform_device *pdev) if (priv->regulator) regulator_disable(priv->regulator); + if (priv->soc_data->need_div_macclk) + clk_disable_unprepare(priv->macclk); + free_netdev(ndev); return err; } diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index e24f5d2b6afe..53055ce5dfd6 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -8,7 +8,6 @@ config NET_VENDOR_BROADCOM default y depends on (SSB_POSSIBLE && HAS_DMA) || PCI || BCM63XX || \ SIBYTE_SB1xxx_SOC - select DIMLIB ---help--- If you have a network (Ethernet) chipset belonging to this class, say Y. @@ -69,6 +68,7 @@ config BCMGENET select FIXED_PHY select BCM7XXX_PHY select MDIO_BCM_UNIMAC + select DIMLIB help This driver supports the built-in Ethernet MACs found in the Broadcom BCM7xxx Set Top Box family chipset. @@ -188,6 +188,7 @@ config SYSTEMPORT select MII select PHYLIB select FIXED_PHY + select DIMLIB help This driver supports the built-in Ethernet MACs found in the Broadcom BCM7xxx Set Top Box family chipset using an internal @@ -200,6 +201,7 @@ config BNXT select LIBCRC32C select NET_DEVLINK select PAGE_POOL + select DIMLIB ---help--- This driver supports Broadcom NetXtreme-C/E 10/25/40/50 gigabit Ethernet cards. To compile this driver as a module, choose M here: diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index b4a8cf620a0c..04ec909e06df 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -10382,7 +10382,8 @@ static void bnxt_cleanup_pci(struct bnxt *bp) { bnxt_unmap_bars(bp, bp->pdev); pci_release_regions(bp->pdev); - pci_disable_device(bp->pdev); + if (pci_is_enabled(bp->pdev)) + pci_disable_device(bp->pdev); } static void bnxt_init_dflt_coal(struct bnxt *bp) @@ -10669,14 +10670,11 @@ static void bnxt_fw_reset_task(struct work_struct *work) bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; } /* fall through */ - case BNXT_FW_RESET_STATE_RESET_FW: { - u32 wait_dsecs = bp->fw_health->post_reset_wait_dsecs; - + case BNXT_FW_RESET_STATE_RESET_FW: bnxt_reset_all(bp); bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; - bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10); + bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10); return; - } case BNXT_FW_RESET_STATE_ENABLE_DEV: if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) && bp->fw_health) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index e664392dccc0..7151244f8c7d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -29,25 +29,20 @@ static int bnxt_fw_reporter_diagnose(struct devlink_health_reporter *reporter, val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); health_status = val & 0xffff; - if (health_status == BNXT_FW_STATUS_HEALTHY) { - rc = devlink_fmsg_string_pair_put(fmsg, "FW status", - "Healthy;"); - if (rc) - return rc; - } else if (health_status < BNXT_FW_STATUS_HEALTHY) { - rc = devlink_fmsg_string_pair_put(fmsg, "FW status", - "Not yet completed initialization;"); + if (health_status < BNXT_FW_STATUS_HEALTHY) { + rc = devlink_fmsg_string_pair_put(fmsg, "Description", + "Not yet completed initialization"); if (rc) return rc; } else if (health_status > BNXT_FW_STATUS_HEALTHY) { - rc = devlink_fmsg_string_pair_put(fmsg, "FW status", - "Encountered fatal error and cannot recover;"); + rc = devlink_fmsg_string_pair_put(fmsg, "Description", + "Encountered fatal error and cannot recover"); if (rc) return rc; } if (val >> 16) { - rc = devlink_fmsg_u32_pair_put(fmsg, "Error", val >> 16); + rc = devlink_fmsg_u32_pair_put(fmsg, "Error code", val >> 16); if (rc) return rc; } @@ -215,25 +210,68 @@ enum bnxt_dl_param_id { static const struct bnxt_dl_nvm_param nvm_params[] = { {DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV, NVM_OFF_ENABLE_SRIOV, - BNXT_NVM_SHARED_CFG, 1}, + BNXT_NVM_SHARED_CFG, 1, 1}, {DEVLINK_PARAM_GENERIC_ID_IGNORE_ARI, NVM_OFF_IGNORE_ARI, - BNXT_NVM_SHARED_CFG, 1}, + BNXT_NVM_SHARED_CFG, 1, 1}, {DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX, - NVM_OFF_MSIX_VEC_PER_PF_MAX, BNXT_NVM_SHARED_CFG, 10}, + NVM_OFF_MSIX_VEC_PER_PF_MAX, BNXT_NVM_SHARED_CFG, 10, 4}, {DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN, - NVM_OFF_MSIX_VEC_PER_PF_MIN, BNXT_NVM_SHARED_CFG, 7}, + NVM_OFF_MSIX_VEC_PER_PF_MIN, BNXT_NVM_SHARED_CFG, 7, 4}, {BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, NVM_OFF_DIS_GRE_VER_CHECK, - BNXT_NVM_SHARED_CFG, 1}, + BNXT_NVM_SHARED_CFG, 1, 1}, }; +union bnxt_nvm_data { + u8 val8; + __le32 val32; +}; + +static void bnxt_copy_to_nvm_data(union bnxt_nvm_data *dst, + union devlink_param_value *src, + int nvm_num_bits, int dl_num_bytes) +{ + u32 val32 = 0; + + if (nvm_num_bits == 1) { + dst->val8 = src->vbool; + return; + } + if (dl_num_bytes == 4) + val32 = src->vu32; + else if (dl_num_bytes == 2) + val32 = (u32)src->vu16; + else if (dl_num_bytes == 1) + val32 = (u32)src->vu8; + dst->val32 = cpu_to_le32(val32); +} + +static void bnxt_copy_from_nvm_data(union devlink_param_value *dst, + union bnxt_nvm_data *src, + int nvm_num_bits, int dl_num_bytes) +{ + u32 val32; + + if (nvm_num_bits == 1) { + dst->vbool = src->val8; + return; + } + val32 = le32_to_cpu(src->val32); + if (dl_num_bytes == 4) + dst->vu32 = val32; + else if (dl_num_bytes == 2) + dst->vu16 = (u16)val32; + else if (dl_num_bytes == 1) + dst->vu8 = (u8)val32; +} + static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, int msg_len, union devlink_param_value *val) { struct hwrm_nvm_get_variable_input *req = msg; - void *data_addr = NULL, *buf = NULL; struct bnxt_dl_nvm_param nvm_param; - int bytesize, idx = 0, rc, i; + union bnxt_nvm_data *data; dma_addr_t data_dma_addr; + int idx = 0, rc, i; /* Get/Set NVM CFG parameter is supported only on PFs */ if (BNXT_VF(bp)) @@ -254,47 +292,31 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG) idx = bp->pf.fw_fid - BNXT_FIRST_PF_FID; - bytesize = roundup(nvm_param.num_bits, BITS_PER_BYTE) / BITS_PER_BYTE; - switch (bytesize) { - case 1: - if (nvm_param.num_bits == 1) - buf = &val->vbool; - else - buf = &val->vu8; - break; - case 2: - buf = &val->vu16; - break; - case 4: - buf = &val->vu32; - break; - default: - return -EFAULT; - } - - data_addr = dma_alloc_coherent(&bp->pdev->dev, bytesize, - &data_dma_addr, GFP_KERNEL); - if (!data_addr) + data = dma_alloc_coherent(&bp->pdev->dev, sizeof(*data), + &data_dma_addr, GFP_KERNEL); + if (!data) return -ENOMEM; req->dest_data_addr = cpu_to_le64(data_dma_addr); - req->data_len = cpu_to_le16(nvm_param.num_bits); + req->data_len = cpu_to_le16(nvm_param.nvm_num_bits); req->option_num = cpu_to_le16(nvm_param.offset); req->index_0 = cpu_to_le16(idx); if (idx) req->dimensions = cpu_to_le16(1); if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) { - memcpy(data_addr, buf, bytesize); + bnxt_copy_to_nvm_data(data, val, nvm_param.nvm_num_bits, + nvm_param.dl_num_bytes); rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT); } else { rc = hwrm_send_message_silent(bp, msg, msg_len, HWRM_CMD_TIMEOUT); + if (!rc) + bnxt_copy_from_nvm_data(val, data, + nvm_param.nvm_num_bits, + nvm_param.dl_num_bytes); } - if (!rc && req->req_type == cpu_to_le16(HWRM_NVM_GET_VARIABLE)) - memcpy(buf, data_addr, bytesize); - - dma_free_coherent(&bp->pdev->dev, bytesize, data_addr, data_dma_addr); + dma_free_coherent(&bp->pdev->dev, sizeof(*data), data, data_dma_addr); if (rc == -EACCES) netdev_err(bp->dev, "PF does not have admin privileges to modify NVM config\n"); return rc; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h index b97e0baeb42d..2f4fd0a7d04b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h @@ -52,7 +52,8 @@ struct bnxt_dl_nvm_param { u16 id; u16 offset; u16 dir_type; - u16 num_bits; + u16 nvm_num_bits; + u8 dl_num_bytes; }; void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 12cb77ef1081..1de51811fcb4 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1996,8 +1996,6 @@ static void reset_umac(struct bcmgenet_priv *priv) /* issue soft reset with (rg)mii loopback to ensure a stable rxclk */ bcmgenet_umac_writel(priv, CMD_SW_RESET | CMD_LCL_LOOP_EN, UMAC_CMD); - udelay(2); - bcmgenet_umac_writel(priv, 0, UMAC_CMD); } static void bcmgenet_intr_disable(struct bcmgenet_priv *priv) @@ -2018,6 +2016,8 @@ static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv) */ if (priv->internal_phy) { int0_enable |= UMAC_IRQ_LINK_EVENT; + if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv)) + int0_enable |= UMAC_IRQ_PHY_DET_R; } else if (priv->ext_phy) { int0_enable |= UMAC_IRQ_LINK_EVENT; } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { @@ -2611,11 +2611,16 @@ static void bcmgenet_irq_task(struct work_struct *work) priv->irq0_stat = 0; spin_unlock_irq(&priv->lock); + if (status & UMAC_IRQ_PHY_DET_R && + priv->dev->phydev->autoneg != AUTONEG_ENABLE) { + phy_init_hw(priv->dev->phydev); + genphy_config_aneg(priv->dev->phydev); + } + /* Link UP/DOWN event */ - if (status & UMAC_IRQ_LINK_EVENT) { - priv->dev->phydev->link = !!(status & UMAC_IRQ_LINK_UP); + if (status & UMAC_IRQ_LINK_EVENT) phy_mac_interrupt(priv->dev->phydev); - } + } /* bcmgenet_isr1: handle Rx and Tx priority queues */ @@ -2710,7 +2715,7 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) } /* all other interested interrupts handled in bottom half */ - status &= UMAC_IRQ_LINK_EVENT; + status &= (UMAC_IRQ_LINK_EVENT | UMAC_IRQ_PHY_DET_R); if (status) { /* Save irq status for bottom-half processing. */ spin_lock_irqsave(&priv->lock, flags); @@ -3631,6 +3636,7 @@ static int bcmgenet_resume(struct device *d) phy_init_hw(dev->phydev); /* Speed settings must be restored */ + genphy_config_aneg(dev->phydev); bcmgenet_mii_config(priv->dev, false); bcmgenet_set_hw_addr(priv, dev->dev_addr); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index 4a8fc03d82fd..dbc69d8fa05f 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -366,6 +366,7 @@ struct bcmgenet_mib_counters { #define EXT_PWR_DOWN_PHY_EN (1 << 20) #define EXT_RGMII_OOB_CTRL 0x0C +#define RGMII_MODE_EN_V123 (1 << 0) #define RGMII_LINK (1 << 4) #define OOB_DISABLE (1 << 5) #define RGMII_MODE_EN (1 << 6) diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 970e478a9017..dbe18cdf6c1b 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -181,8 +181,38 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) const char *phy_name = NULL; u32 id_mode_dis = 0; u32 port_ctrl; + int bmcr = -1; + int ret; u32 reg; + /* MAC clocking workaround during reset of umac state machines */ + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + if (reg & CMD_SW_RESET) { + /* An MII PHY must be isolated to prevent TXC contention */ + if (priv->phy_interface == PHY_INTERFACE_MODE_MII) { + ret = phy_read(phydev, MII_BMCR); + if (ret >= 0) { + bmcr = ret; + ret = phy_write(phydev, MII_BMCR, + bmcr | BMCR_ISOLATE); + } + if (ret) { + netdev_err(dev, "failed to isolate PHY\n"); + return ret; + } + } + /* Switch MAC clocking to RGMII generated clock */ + bcmgenet_sys_writel(priv, PORT_MODE_EXT_GPHY, SYS_PORT_CTRL); + /* Ensure 5 clks with Rx disabled + * followed by 5 clks with Reset asserted + */ + udelay(4); + reg &= ~(CMD_SW_RESET | CMD_LCL_LOOP_EN); + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + /* Ensure 5 more clocks before Rx is enabled */ + udelay(2); + } + priv->ext_phy = !priv->internal_phy && (priv->phy_interface != PHY_INTERFACE_MODE_MOCA); @@ -214,6 +244,9 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) phy_set_max_speed(phydev, SPEED_100); bcmgenet_sys_writel(priv, PORT_MODE_EXT_EPHY, SYS_PORT_CTRL); + /* Restore the MII PHY after isolation */ + if (bmcr >= 0) + phy_write(phydev, MII_BMCR, bmcr); break; case PHY_INTERFACE_MODE_REVMII: @@ -258,7 +291,11 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) */ if (priv->ext_phy) { reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL); - reg |= RGMII_MODE_EN | id_mode_dis; + reg |= id_mode_dis; + if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv)) + reg |= RGMII_MODE_EN_V123; + else + reg |= RGMII_MODE_EN; bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); } @@ -273,11 +310,12 @@ int bcmgenet_mii_probe(struct net_device *dev) struct bcmgenet_priv *priv = netdev_priv(dev); struct device_node *dn = priv->pdev->dev.of_node; struct phy_device *phydev; - u32 phy_flags; + u32 phy_flags = 0; int ret; /* Communicate the integrated PHY revision */ - phy_flags = priv->gphy_rev; + if (priv->internal_phy) + phy_flags = priv->gphy_rev; /* Initialize link state variables that bcmgenet_mii_setup() uses */ priv->old_link = -1; diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 8e8d557901a9..1e1b774e1953 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -3405,17 +3405,17 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, return err; } - *tx_clk = devm_clk_get(&pdev->dev, "tx_clk"); + *tx_clk = devm_clk_get_optional(&pdev->dev, "tx_clk"); if (IS_ERR(*tx_clk)) - *tx_clk = NULL; + return PTR_ERR(*tx_clk); - *rx_clk = devm_clk_get(&pdev->dev, "rx_clk"); + *rx_clk = devm_clk_get_optional(&pdev->dev, "rx_clk"); if (IS_ERR(*rx_clk)) - *rx_clk = NULL; + return PTR_ERR(*rx_clk); - *tsu_clk = devm_clk_get(&pdev->dev, "tsu_clk"); + *tsu_clk = devm_clk_get_optional(&pdev->dev, "tsu_clk"); if (IS_ERR(*tsu_clk)) - *tsu_clk = NULL; + return PTR_ERR(*tsu_clk); err = clk_prepare_enable(*pclk); if (err) { diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.h b/drivers/net/ethernet/cavium/common/cavium_ptp.h index be2bafc7beeb..a04eccbc78e8 100644 --- a/drivers/net/ethernet/cavium/common/cavium_ptp.h +++ b/drivers/net/ethernet/cavium/common/cavium_ptp.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* cavium_ptp.h - PTP 1588 clock on Cavium hardware * Copyright (c) 2003-2015, 2017 Cavium, Inc. */ diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c index 0e5de88fd6e8..cdd7e5da4a74 100644 --- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c @@ -1499,7 +1499,7 @@ static int octeon_mgmt_probe(struct platform_device *pdev) netdev->ethtool_ops = &octeon_mgmt_ethtool_ops; netdev->min_mtu = 64 - OCTEON_MGMT_RX_HEADROOM; - netdev->max_mtu = 16383 - OCTEON_MGMT_RX_HEADROOM; + netdev->max_mtu = 16383 - OCTEON_MGMT_RX_HEADROOM - VLAN_HLEN; mac = of_get_mac_address(pdev->dev.of_node); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c index a4dead4ab0ed..86b528d8364c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c @@ -695,10 +695,10 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld) lld->write_cmpl_support = adap->params.write_cmpl_support; } -static void uld_attach(struct adapter *adap, unsigned int uld) +static int uld_attach(struct adapter *adap, unsigned int uld) { - void *handle; struct cxgb4_lld_info lli; + void *handle; uld_init(adap, &lli); uld_queue_init(adap, uld, &lli); @@ -708,7 +708,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld) dev_warn(adap->pdev_dev, "could not attach to the %s driver, error %ld\n", adap->uld[uld].name, PTR_ERR(handle)); - return; + return PTR_ERR(handle); } adap->uld[uld].handle = handle; @@ -716,22 +716,22 @@ static void uld_attach(struct adapter *adap, unsigned int uld) if (adap->flags & CXGB4_FULL_INIT_DONE) adap->uld[uld].state_change(handle, CXGB4_STATE_UP); + + return 0; } -/** - * cxgb4_register_uld - register an upper-layer driver - * @type: the ULD type - * @p: the ULD methods +/* cxgb4_register_uld - register an upper-layer driver + * @type: the ULD type + * @p: the ULD methods * - * Registers an upper-layer driver with this driver and notifies the ULD - * about any presently available devices that support its type. Returns - * %-EBUSY if a ULD of the same type is already registered. + * Registers an upper-layer driver with this driver and notifies the ULD + * about any presently available devices that support its type. */ void cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p) { - int ret = 0; struct adapter *adap; + int ret = 0; if (type >= CXGB4_ULD_MAX) return; @@ -763,8 +763,12 @@ void cxgb4_register_uld(enum cxgb4_uld type, if (ret) goto free_irq; adap->uld[type] = *p; - uld_attach(adap, type); + ret = uld_attach(adap, type); + if (ret) + goto free_txq; continue; +free_txq: + release_sge_txq_uld(adap, type); free_irq: if (adap->flags & CXGB4_FULL_INIT_DONE) quiesce_rx_uld(adap, type); diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index b3da81e90132..928bfea5457b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -3791,15 +3791,11 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, * write the CIDX Updates into the Status Page at the end of the * TX Queue. */ - c.autoequiqe_to_viid = htonl((dbqt - ? FW_EQ_ETH_CMD_AUTOEQUIQE_F - : FW_EQ_ETH_CMD_AUTOEQUEQE_F) | + c.autoequiqe_to_viid = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F | FW_EQ_ETH_CMD_VIID_V(pi->viid)); c.fetchszm_to_iqid = - htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(dbqt - ? HOSTFCMODE_INGRESS_QUEUE_X - : HOSTFCMODE_STATUS_PAGE_X) | + htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) | FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) | FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid)); diff --git a/drivers/net/ethernet/cortina/gemini.h b/drivers/net/ethernet/cortina/gemini.h index 0b12f89bf89a..9fdf77d5eb37 100644 --- a/drivers/net/ethernet/cortina/gemini.h +++ b/drivers/net/ethernet/cortina/gemini.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* Register definitions for Gemini GMAC Ethernet device driver * * Copyright (C) 2006 Storlink, Corp. diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 9b7af94a40bb..96e9565f1e08 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -727,6 +727,18 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb, */ nfrags = skb_shinfo(skb)->nr_frags; + /* Setup HW checksumming */ + csum_vlan = 0; + if (skb->ip_summed == CHECKSUM_PARTIAL && + !ftgmac100_prep_tx_csum(skb, &csum_vlan)) + goto drop; + + /* Add VLAN tag */ + if (skb_vlan_tag_present(skb)) { + csum_vlan |= FTGMAC100_TXDES1_INS_VLANTAG; + csum_vlan |= skb_vlan_tag_get(skb) & 0xffff; + } + /* Get header len */ len = skb_headlen(skb); @@ -753,19 +765,6 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb, if (nfrags == 0) f_ctl_stat |= FTGMAC100_TXDES0_LTS; txdes->txdes3 = cpu_to_le32(map); - - /* Setup HW checksumming */ - csum_vlan = 0; - if (skb->ip_summed == CHECKSUM_PARTIAL && - !ftgmac100_prep_tx_csum(skb, &csum_vlan)) - goto drop; - - /* Add VLAN tag */ - if (skb_vlan_tag_present(skb)) { - csum_vlan |= FTGMAC100_TXDES1_INS_VLANTAG; - csum_vlan |= skb_vlan_tag_get(skb) & 0xffff; - } - txdes->txdes1 = cpu_to_le32(csum_vlan); /* Next descriptor */ diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 162d7d8fb295..19379bae0144 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -1235,6 +1235,8 @@ static void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv, bool enable) priv->rx_td_enabled = enable; } +static void update_tx_fqids(struct dpaa2_eth_priv *priv); + static int link_state_update(struct dpaa2_eth_priv *priv) { struct dpni_link_state state = {0}; @@ -1261,6 +1263,7 @@ static int link_state_update(struct dpaa2_eth_priv *priv) goto out; if (state.up) { + update_tx_fqids(priv); netif_carrier_on(priv->net_dev); netif_tx_start_all_queues(priv->net_dev); } else { @@ -2533,6 +2536,47 @@ static int set_pause(struct dpaa2_eth_priv *priv) return 0; } +static void update_tx_fqids(struct dpaa2_eth_priv *priv) +{ + struct dpni_queue_id qid = {0}; + struct dpaa2_eth_fq *fq; + struct dpni_queue queue; + int i, j, err; + + /* We only use Tx FQIDs for FQID-based enqueue, so check + * if DPNI version supports it before updating FQIDs + */ + if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR, + DPNI_ENQUEUE_FQID_VER_MINOR) < 0) + return; + + for (i = 0; i < priv->num_fqs; i++) { + fq = &priv->fq[i]; + if (fq->type != DPAA2_TX_CONF_FQ) + continue; + for (j = 0; j < dpaa2_eth_tc_count(priv); j++) { + err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, + DPNI_QUEUE_TX, j, fq->flowid, + &queue, &qid); + if (err) + goto out_err; + + fq->tx_fqid[j] = qid.fqid; + if (fq->tx_fqid[j] == 0) + goto out_err; + } + } + + priv->enqueue = dpaa2_eth_enqueue_fq; + + return; + +out_err: + netdev_info(priv->net_dev, + "Error reading Tx FQID, fallback to QDID-based enqueue\n"); + priv->enqueue = dpaa2_eth_enqueue_qd; +} + /* Configure the DPNI object this interface is associated with */ static int setup_dpni(struct fsl_mc_device *ls_dev) { @@ -3306,6 +3350,9 @@ static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg) if (status & DPNI_IRQ_EVENT_LINK_CHANGED) link_state_update(netdev_priv(net_dev)); + if (status & DPNI_IRQ_EVENT_ENDPOINT_CHANGED) + set_mac_addr(netdev_priv(net_dev)); + return IRQ_HANDLED; } @@ -3331,7 +3378,8 @@ static int setup_irqs(struct fsl_mc_device *ls_dev) } err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle, - DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED); + DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED | + DPNI_IRQ_EVENT_ENDPOINT_CHANGED); if (err < 0) { dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err); goto free_irq; diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h index ff2e177395d4..df2458a5e9ef 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright 2018 NXP */ diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.h b/drivers/net/ethernet/freescale/dpaa2/dpni.h index fd583911b6c0..ee0711d06b3a 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpni.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpni.h @@ -133,9 +133,12 @@ int dpni_reset(struct fsl_mc_io *mc_io, */ #define DPNI_IRQ_INDEX 0 /** - * IRQ event - indicates a change in link state + * IRQ events: + * indicates a change in link state + * indicates a change in endpoint */ #define DPNI_IRQ_EVENT_LINK_CHANGED 0x00000001 +#define DPNI_IRQ_EVENT_ENDPOINT_CHANGED 0x00000002 int dpni_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, diff --git a/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h index 720cd50f5895..4ac05bfef338 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h +++ b/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright 2013-2016 Freescale Semiconductor Inc. * Copyright 2016-2018 NXP diff --git a/drivers/net/ethernet/freescale/dpaa2/dprtc.h b/drivers/net/ethernet/freescale/dpaa2/dprtc.h index be7914c1634d..311c184e1aef 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dprtc.h +++ b/drivers/net/ethernet/freescale/dpaa2/dprtc.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright 2013-2016 Freescale Semiconductor Inc. * Copyright 2016-2018 NXP diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index d4d4c72adf49..a9c386b63581 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -3558,7 +3558,7 @@ fec_probe(struct platform_device *pdev) for (i = 0; i < irq_cnt; i++) { snprintf(irq_name, sizeof(irq_name), "int%d", i); - irq = platform_get_irq_byname(pdev, irq_name); + irq = platform_get_irq_byname_optional(pdev, irq_name); if (irq < 0) irq = platform_get_irq(pdev, i); if (irq < 0) { @@ -3645,6 +3645,8 @@ fec_drv_remove(struct platform_device *pdev) regulator_disable(fep->reg_phy); pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); + clk_disable_unprepare(fep->clk_ahb); + clk_disable_unprepare(fep->clk_ipg); if (of_phy_is_fixed_link(np)) of_phy_deregister_fixed_link(np); of_node_put(fep->phy_node); diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index 19e2365be7d8..945643c02615 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -600,9 +600,9 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx) INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep); - irq = platform_get_irq_byname(pdev, "pps"); + irq = platform_get_irq_byname_optional(pdev, "pps"); if (irq < 0) - irq = platform_get_irq(pdev, irq_idx); + irq = platform_get_irq_optional(pdev, irq_idx); /* Failure to get an irq is not fatal, * only the PTP_CLOCK_PPS clock events should stop */ diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c index 59564ac99d2a..edec61dfc868 100644 --- a/drivers/net/ethernet/google/gve/gve_rx.c +++ b/drivers/net/ethernet/google/gve/gve_rx.c @@ -289,6 +289,8 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc, len = be16_to_cpu(rx_desc->len) - GVE_RX_PAD; page_info = &rx->data.page_info[idx]; + dma_sync_single_for_cpu(&priv->pdev->dev, rx->data.qpl->page_buses[idx], + PAGE_SIZE, DMA_FROM_DEVICE); /* gvnic can only receive into registered segments. If the buffer * can't be recycled, our only choice is to copy the data out of diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c index 778b87b5a06c..0a9a7ee2a866 100644 --- a/drivers/net/ethernet/google/gve/gve_tx.c +++ b/drivers/net/ethernet/google/gve/gve_tx.c @@ -390,7 +390,21 @@ static void gve_tx_fill_seg_desc(union gve_tx_desc *seg_desc, seg_desc->seg.seg_addr = cpu_to_be64(addr); } -static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb) +static void gve_dma_sync_for_device(struct device *dev, dma_addr_t *page_buses, + u64 iov_offset, u64 iov_len) +{ + dma_addr_t dma; + u64 addr; + + for (addr = iov_offset; addr < iov_offset + iov_len; + addr += PAGE_SIZE) { + dma = page_buses[addr / PAGE_SIZE]; + dma_sync_single_for_device(dev, dma, PAGE_SIZE, DMA_TO_DEVICE); + } +} + +static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb, + struct device *dev) { int pad_bytes, hlen, hdr_nfrags, payload_nfrags, l4_hdr_offset; union gve_tx_desc *pkt_desc, *seg_desc; @@ -432,6 +446,9 @@ static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb) skb_copy_bits(skb, 0, tx->tx_fifo.base + info->iov[hdr_nfrags - 1].iov_offset, hlen); + gve_dma_sync_for_device(dev, tx->tx_fifo.qpl->page_buses, + info->iov[hdr_nfrags - 1].iov_offset, + info->iov[hdr_nfrags - 1].iov_len); copy_offset = hlen; for (i = payload_iov; i < payload_nfrags + payload_iov; i++) { @@ -445,6 +462,9 @@ static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb) skb_copy_bits(skb, copy_offset, tx->tx_fifo.base + info->iov[i].iov_offset, info->iov[i].iov_len); + gve_dma_sync_for_device(dev, tx->tx_fifo.qpl->page_buses, + info->iov[i].iov_offset, + info->iov[i].iov_len); copy_offset += info->iov[i].iov_len; } @@ -473,7 +493,7 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev) gve_tx_put_doorbell(priv, tx->q_resources, tx->req); return NETDEV_TX_BUSY; } - nsegs = gve_tx_add_skb(tx, skb); + nsegs = gve_tx_add_skb(tx, skb, &priv->pdev->dev); netdev_tx_sent_queue(tx->netdev_txq, skb->len); skb_tx_timestamp(skb); diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index c84167447abe..4606a7e4a6d1 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c @@ -237,6 +237,7 @@ struct hip04_priv { dma_addr_t rx_phys[RX_DESC_NUM]; unsigned int rx_head; unsigned int rx_buf_size; + unsigned int rx_cnt_remaining; struct device_node *phy_node; struct phy_device *phy; @@ -575,7 +576,6 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget) struct hip04_priv *priv = container_of(napi, struct hip04_priv, napi); struct net_device *ndev = priv->ndev; struct net_device_stats *stats = &ndev->stats; - unsigned int cnt = hip04_recv_cnt(priv); struct rx_desc *desc; struct sk_buff *skb; unsigned char *buf; @@ -588,8 +588,8 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget) /* clean up tx descriptors */ tx_remaining = hip04_tx_reclaim(ndev, false); - - while (cnt && !last) { + priv->rx_cnt_remaining += hip04_recv_cnt(priv); + while (priv->rx_cnt_remaining && !last) { buf = priv->rx_buf[priv->rx_head]; skb = build_skb(buf, priv->rx_buf_size); if (unlikely(!skb)) { @@ -635,11 +635,13 @@ refill: hip04_set_recv_desc(priv, phys); priv->rx_head = RX_NEXT(priv->rx_head); - if (rx >= budget) + if (rx >= budget) { + --priv->rx_cnt_remaining; goto done; + } - if (--cnt == 0) - cnt = hip04_recv_cnt(priv); + if (--priv->rx_cnt_remaining == 0) + priv->rx_cnt_remaining += hip04_recv_cnt(priv); } if (!(priv->reg_inten & RCV_INT)) { @@ -724,6 +726,7 @@ static int hip04_mac_open(struct net_device *ndev) int i; priv->rx_head = 0; + priv->rx_cnt_remaining = 0; priv->tx_head = 0; priv->tx_tail = 0; hip04_reset_ppe(priv); @@ -1038,7 +1041,6 @@ static int hip04_remove(struct platform_device *pdev) hip04_free_ring(ndev, d); unregister_netdev(ndev); - free_irq(ndev->irq, ndev); of_node_put(priv->phy_node); cancel_work_sync(&priv->tx_timeout_task); free_netdev(ndev); diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c index 6d0457eb4faa..08339278c722 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.c +++ b/drivers/net/ethernet/hisilicon/hns/hnae.c @@ -199,7 +199,6 @@ hnae_init_ring(struct hnae_queue *q, struct hnae_ring *ring, int flags) ring->q = q; ring->flags = flags; - spin_lock_init(&ring->lock); ring->coal_param = q->handle->coal_param; assert(!ring->desc && !ring->desc_cb && !ring->desc_dma_addr); diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h index e9c67c06bfd2..6ab9458302e1 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.h +++ b/drivers/net/ethernet/hisilicon/hns/hnae.h @@ -274,9 +274,6 @@ struct hnae_ring { /* statistic */ struct ring_stats stats; - /* ring lock for poll one */ - spinlock_t lock; - dma_addr_t desc_dma_addr; u32 buf_size; /* size for hnae_desc->addr, preset by AE */ u16 desc_num; /* total number of desc */ diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index a48396dd4ebb..14ab20491fd0 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -943,15 +943,6 @@ static int is_valid_clean_head(struct hnae_ring *ring, int h) return u > c ? (h > c && h <= u) : (h > c || h <= u); } -/* netif_tx_lock will turn down the performance, set only when necessary */ -#ifdef CONFIG_NET_POLL_CONTROLLER -#define NETIF_TX_LOCK(ring) spin_lock(&(ring)->lock) -#define NETIF_TX_UNLOCK(ring) spin_unlock(&(ring)->lock) -#else -#define NETIF_TX_LOCK(ring) -#define NETIF_TX_UNLOCK(ring) -#endif - /* reclaim all desc in one budget * return error or number of desc left */ @@ -965,21 +956,16 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data, int head; int bytes, pkts; - NETIF_TX_LOCK(ring); - head = readl_relaxed(ring->io_base + RCB_REG_HEAD); rmb(); /* make sure head is ready before touch any data */ - if (is_ring_empty(ring) || head == ring->next_to_clean) { - NETIF_TX_UNLOCK(ring); + if (is_ring_empty(ring) || head == ring->next_to_clean) return 0; /* no data to poll */ - } if (!is_valid_clean_head(ring, head)) { netdev_err(ndev, "wrong head (%d, %d-%d)\n", head, ring->next_to_use, ring->next_to_clean); ring->stats.io_err_cnt++; - NETIF_TX_UNLOCK(ring); return -EIO; } @@ -994,8 +980,6 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data, ring->stats.tx_pkts += pkts; ring->stats.tx_bytes += bytes; - NETIF_TX_UNLOCK(ring); - dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index); netdev_tx_completed_queue(dev_queue, pkts, bytes); @@ -1055,16 +1039,12 @@ static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data) int head; int bytes, pkts; - NETIF_TX_LOCK(ring); - head = ring->next_to_use; /* ntu :soft setted ring position*/ bytes = 0; pkts = 0; while (head != ring->next_to_clean) hns_nic_reclaim_one_desc(ring, &bytes, &pkts); - NETIF_TX_UNLOCK(ring); - dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index); netdev_tx_reset_queue(dev_queue); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index c4b7bf851a28..a0998937727d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0+ +/* SPDX-License-Identifier: GPL-2.0+ */ // Copyright (c) 2016-2017 Hisilicon Limited. #ifndef __HNAE3_H @@ -32,6 +32,8 @@ #define HNAE3_MOD_VERSION "1.0" +#define HNAE3_MIN_VECTOR_NUM 2 /* first one for misc, another for IO */ + /* Device IDs */ #define HNAE3_DEV_ID_GE 0xA220 #define HNAE3_DEV_ID_25GE 0xA221 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 2110fa3b4479..5d468ed404a6 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0+ +/* SPDX-License-Identifier: GPL-2.0+ */ // Copyright (c) 2016-2017 Hisilicon Limited. #ifndef __HNS3_ENET_H diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index 4821fe08b5e4..1426eb5ddf3d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0+ +/* SPDX-License-Identifier: GPL-2.0+ */ // Copyright (c) 2016-2017 Hisilicon Limited. #ifndef __HCLGE_CMD_H diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h index 278f21e02736..b04702e65689 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0+ +/* SPDX-License-Identifier: GPL-2.0+ */ // Copyright (c) 2016-2017 Hisilicon Limited. #ifndef __HCLGE_DCB_H__ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index fd7f94372ff0..16f7d0e15b4f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -906,6 +906,9 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev) hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number), HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); + /* nic's msix numbers is always equals to the roce's. */ + hdev->num_nic_msi = hdev->num_roce_msi; + /* PF should have NIC vectors and Roce vectors, * NIC vectors are queued before Roce vectors. */ @@ -915,6 +918,15 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev) hdev->num_msi = hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number), HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); + + hdev->num_nic_msi = hdev->num_msi; + } + + if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) { + dev_err(&hdev->pdev->dev, + "Just %u msi resources, not enough for pf(min:2).\n", + hdev->num_nic_msi); + return -EINVAL; } return 0; @@ -1507,6 +1519,10 @@ static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps) kinfo->rss_size = min_t(u16, hdev->rss_size_max, vport->alloc_tqps / hdev->tm_info.num_tc); + /* ensure one to one mapping between irq and queue at default */ + kinfo->rss_size = min_t(u16, kinfo->rss_size, + (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc); + return 0; } @@ -2285,7 +2301,8 @@ static int hclge_init_msi(struct hclge_dev *hdev) int vectors; int i; - vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, + vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM, + hdev->num_msi, PCI_IRQ_MSI | PCI_IRQ_MSIX); if (vectors < 0) { dev_err(&pdev->dev, @@ -2300,6 +2317,7 @@ static int hclge_init_msi(struct hclge_dev *hdev) hdev->num_msi = vectors; hdev->num_msi_left = vectors; + hdev->base_msi_vector = pdev->irq; hdev->roce_base_vector = hdev->base_msi_vector + hdev->roce_base_msix_offset; @@ -3569,12 +3587,28 @@ static int hclge_set_rst_done(struct hclge_dev *hdev) { struct hclge_pf_rst_done_cmd *req; struct hclge_desc desc; + int ret; req = (struct hclge_pf_rst_done_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false); req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT; - return hclge_cmd_send(&hdev->hw, &desc, 1); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + /* To be compatible with the old firmware, which does not support + * command HCLGE_OPC_PF_RST_DONE, just print a warning and + * return success + */ + if (ret == -EOPNOTSUPP) { + dev_warn(&hdev->pdev->dev, + "current firmware does not support command(0x%x)!\n", + HCLGE_OPC_PF_RST_DONE); + return 0; + } else if (ret) { + dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n", + ret); + } + + return ret; } static int hclge_reset_prepare_up(struct hclge_dev *hdev) @@ -3903,6 +3937,7 @@ static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num, int alloc = 0; int i, j; + vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num); vector_num = min(hdev->num_msi_left, vector_num); for (j = 0; j < vector_num; j++) { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 3e9574a9e22d..59b824347ba4 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0+ +/* SPDX-License-Identifier: GPL-2.0+ */ // Copyright (c) 2016-2017 Hisilicon Limited. #ifndef __HCLGE_MAIN_H @@ -763,6 +763,7 @@ struct hclge_dev { u32 base_msi_vector; u16 *vector_status; int *vector_irq; + u16 num_nic_msi; /* Num of nic vectors for this PF */ u16 num_roce_msi; /* Num of roce vectors for this PF */ int roce_base_vector; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h index ef095d9c566f..dd9a1218a7b0 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0+ +/* SPDX-License-Identifier: GPL-2.0+ */ // Copyright (c) 2016-2017 Hisilicon Limited. #ifndef __HCLGE_MDIO_H diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index 9f0e35f27789..62399cc1c5a6 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -537,9 +537,16 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) kinfo->rss_size = kinfo->req_rss_size; } else if (kinfo->rss_size > max_rss_size || (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) { + /* if user not set rss, the rss_size should compare with the + * valid msi numbers to ensure one to one map between tqp and + * irq as default. + */ + if (!kinfo->req_rss_size) + max_rss_size = min_t(u16, max_rss_size, + (hdev->num_nic_msi - 1) / + kinfo->num_tc); + /* Set to the maximum specification value (max_rss_size). */ - dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n", - kinfo->rss_size, max_rss_size); kinfo->rss_size = max_rss_size; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h index 818610988d34..260f22d19d81 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0+ +/* SPDX-License-Identifier: GPL-2.0+ */ // Copyright (c) 2016-2017 Hisilicon Limited. #ifndef __HCLGE_TM_H diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index e3090b3dab1d..7d7e712691b9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -411,6 +411,13 @@ static int hclgevf_knic_setup(struct hclgevf_dev *hdev) kinfo->tqp[i] = &hdev->htqp[i].q; } + /* after init the max rss_size and tqps, adjust the default tqp numbers + * and rss size with the actual vector numbers + */ + kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps); + kinfo->rss_size = min_t(u16, kinfo->num_tqps / kinfo->num_tc, + kinfo->rss_size); + return 0; } @@ -502,6 +509,7 @@ static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, int alloc = 0; int i, j; + vector_num = min_t(u16, hdev->num_nic_msix - 1, vector_num); vector_num = min(hdev->num_msi_left, vector_num); for (j = 0; j < vector_num; j++) { @@ -2246,13 +2254,14 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev) int vectors; int i; - if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) + if (hnae3_dev_roce_supported(hdev)) vectors = pci_alloc_irq_vectors(pdev, hdev->roce_base_msix_offset + 1, hdev->num_msi, PCI_IRQ_MSIX); else - vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, + vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM, + hdev->num_msi, PCI_IRQ_MSI | PCI_IRQ_MSIX); if (vectors < 0) { @@ -2268,6 +2277,7 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev) hdev->num_msi = vectors; hdev->num_msi_left = vectors; + hdev->base_msi_vector = pdev->irq; hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset; @@ -2533,7 +2543,7 @@ static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) req = (struct hclgevf_query_res_cmd *)desc.data; - if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) { + if (hnae3_dev_roce_supported(hdev)) { hdev->roce_base_msix_offset = hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee), HCLGEVF_MSIX_OFT_ROCEE_M, @@ -2542,6 +2552,9 @@ static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); + /* nic's msix numbers is always equals to the roce's. */ + hdev->num_nic_msix = hdev->num_roce_msix; + /* VF should have NIC vectors and Roce vectors, NIC vectors * are queued before Roce vectors. The offset is fixed to 64. */ @@ -2551,6 +2564,15 @@ static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) hdev->num_msi = hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); + + hdev->num_nic_msix = hdev->num_msi; + } + + if (hdev->num_nic_msix < HNAE3_MIN_VECTOR_NUM) { + dev_err(&hdev->pdev->dev, + "Just %u msi resources, not enough for vf(min:2).\n", + hdev->num_nic_msix); + return -EINVAL; } return 0; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index bdde3afc286b..2b8d6bc6d224 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -270,6 +270,7 @@ struct hclgevf_dev { u16 num_msi; u16 num_msi_left; u16 num_msi_used; + u16 num_nic_msix; /* Num of nic vectors for this VF */ u16 num_roce_msix; /* Num of roce vectors for this VF */ u16 roce_base_msix_offset; int roce_base_vector; diff --git a/drivers/net/ethernet/i825xx/lasi_82596.c b/drivers/net/ethernet/i825xx/lasi_82596.c index 211c5f74b4c8..aec7e98bcc85 100644 --- a/drivers/net/ethernet/i825xx/lasi_82596.c +++ b/drivers/net/ethernet/i825xx/lasi_82596.c @@ -96,6 +96,8 @@ #define OPT_SWAP_PORT 0x0001 /* Need to wordswp on the MPU port */ +#define LIB82596_DMA_ATTR DMA_ATTR_NON_CONSISTENT + #define DMA_WBACK(ndev, addr, len) \ do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_TO_DEVICE); } while (0) @@ -200,7 +202,7 @@ static int __exit lan_remove_chip(struct parisc_device *pdev) unregister_netdev (dev); dma_free_attrs(&pdev->dev, sizeof(struct i596_private), lp->dma, - lp->dma_addr, DMA_ATTR_NON_CONSISTENT); + lp->dma_addr, LIB82596_DMA_ATTR); free_netdev (dev); return 0; } diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c index 1274ad24d6af..f9742af7f142 100644 --- a/drivers/net/ethernet/i825xx/lib82596.c +++ b/drivers/net/ethernet/i825xx/lib82596.c @@ -1065,7 +1065,7 @@ static int i82596_probe(struct net_device *dev) dma = dma_alloc_attrs(dev->dev.parent, sizeof(struct i596_dma), &lp->dma_addr, GFP_KERNEL, - DMA_ATTR_NON_CONSISTENT); + LIB82596_DMA_ATTR); if (!dma) { printk(KERN_ERR "%s: Couldn't get shared memory\n", __FILE__); return -ENOMEM; @@ -1087,7 +1087,7 @@ static int i82596_probe(struct net_device *dev) i = register_netdev(dev); if (i) { dma_free_attrs(dev->dev.parent, sizeof(struct i596_dma), - dma, lp->dma_addr, DMA_ATTR_NON_CONSISTENT); + dma, lp->dma_addr, LIB82596_DMA_ATTR); return i; } diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c index 6eb6c2ff7f09..6436a98c5953 100644 --- a/drivers/net/ethernet/i825xx/sni_82596.c +++ b/drivers/net/ethernet/i825xx/sni_82596.c @@ -24,6 +24,8 @@ static const char sni_82596_string[] = "snirm_82596"; +#define LIB82596_DMA_ATTR 0 + #define DMA_WBACK(priv, addr, len) do { } while (0) #define DMA_INV(priv, addr, len) do { } while (0) #define DMA_WBACK_INV(priv, addr, len) do { } while (0) @@ -152,7 +154,7 @@ static int sni_82596_driver_remove(struct platform_device *pdev) unregister_netdev(dev); dma_free_attrs(dev->dev.parent, sizeof(struct i596_private), lp->dma, - lp->dma_addr, DMA_ATTR_NON_CONSISTENT); + lp->dma_addr, LIB82596_DMA_ATTR); iounmap(lp->ca); iounmap(lp->mpu_port); free_netdev (dev); diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 2b073a3c0b84..f59d9a8e35e2 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -2878,12 +2878,10 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter, if (test_bit(0, &adapter->resetting) && adapter->reset_reason == VNIC_RESET_MOBILITY) { - u64 val = (0xff000000) | scrq->hw_irq; + struct irq_desc *desc = irq_to_desc(scrq->irq); + struct irq_chip *chip = irq_desc_get_chip(desc); - rc = plpar_hcall_norets(H_EOI, val); - if (rc) - dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", - val, rc); + chip->irq_eoi(&desc->irq_data); } rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index 71d3d8854d8f..be56e631d693 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c @@ -607,6 +607,7 @@ static int e1000_set_ringparam(struct net_device *netdev, for (i = 0; i < adapter->num_rx_queues; i++) rxdr[i].count = rxdr->count; + err = 0; if (netif_running(adapter->netdev)) { /* Try to get new resources before deleting old */ err = e1000_setup_all_rx_resources(adapter); @@ -627,14 +628,13 @@ static int e1000_set_ringparam(struct net_device *netdev, adapter->rx_ring = rxdr; adapter->tx_ring = txdr; err = e1000_up(adapter); - if (err) - goto err_setup; } kfree(tx_old); kfree(rx_old); clear_bit(__E1000_RESETTING, &adapter->flags); - return 0; + return err; + err_setup_tx: e1000_free_all_rx_resources(adapter); err_setup_rx: @@ -646,7 +646,6 @@ err_alloc_rx: err_alloc_tx: if (netif_running(adapter->netdev)) e1000_up(adapter); -err_setup: clear_bit(__E1000_RESETTING, &adapter->flags); return err; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 530613f31527..69a2daaca5c5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -20,6 +20,8 @@ /* API version 1.7 implements additional link and PHY-specific APIs */ #define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007 +/* API version 1.9 for X722 implements additional link and PHY-specific APIs */ +#define I40E_MINOR_VER_GET_LINK_INFO_X722 0x0009 /* API version 1.6 for X722 devices adds ability to stop FW LLDP agent */ #define I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722 0x0006 diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index d37c6e0e5f08..7560f06768e0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1876,7 +1876,8 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw, hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE) hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU; - if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { + if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE && + hw->mac.type != I40E_MAC_X722) { __le32 tmp; memcpy(&tmp, resp->link_type, sizeof(tmp)); diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index b1c3227ae4ab..d07e1a890428 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -157,11 +157,6 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid) err = i40e_queue_pair_enable(vsi, qid); if (err) return err; - - /* Kick start the NAPI context so that receiving will start */ - err = i40e_xsk_wakeup(vsi->netdev, qid, XDP_WAKEUP_RX); - if (err) - return err; } return 0; @@ -694,8 +689,6 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget) i40e_xdp_ring_update_tail(xdp_ring); xsk_umem_consume_tx_done(xdp_ring->xsk_umem); - if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem)) - xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem); } return !!budget && work_done; @@ -774,12 +767,8 @@ bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, i40e_update_tx_stats(tx_ring, completed_frames, total_bytes); out_xmit: - if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem)) { - if (tx_ring->next_to_clean == tx_ring->next_to_use) - xsk_set_tx_need_wakeup(tx_ring->xsk_umem); - else - xsk_clear_tx_need_wakeup(tx_ring->xsk_umem); - } + if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem)) + xsk_set_tx_need_wakeup(tx_ring->xsk_umem); xmit_done = i40e_xmit_zc(tx_ring, budget); diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 8f310e520b06..821987da5698 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -314,7 +314,7 @@ iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx) q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting); q_vector->ring_mask |= BIT(r_idx); wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx), - q_vector->rx.current_itr); + q_vector->rx.current_itr >> 1); q_vector->rx.current_itr = q_vector->rx.target_itr; } @@ -340,7 +340,7 @@ iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx) q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting); q_vector->num_ringpairs++; wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx), - q_vector->tx.target_itr); + q_vector->tx.target_itr >> 1); q_vector->tx.current_itr = q_vector->tx.target_itr; } diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index fc624b73d05d..2fde9653a608 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -1036,7 +1036,7 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw) struct ice_aqc_query_txsched_res_resp *buf; enum ice_status status = 0; __le16 max_sibl; - u8 i; + u16 i; if (hw->layer_info) return status; diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index 3ec2ce0725d5..8a6ef3514129 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -466,7 +466,7 @@ static s32 igb_init_mac_params_82575(struct e1000_hw *hw) ? igb_setup_copper_link_82575 : igb_setup_serdes_link_82575; - if (mac->type == e1000_82580) { + if (mac->type == e1000_82580 || mac->type == e1000_i350) { switch (hw->device_id) { /* feature not supported on these id's */ case E1000_DEV_ID_DH89XXCC_SGMII: diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 105b0624081a..ed7e667d7eb2 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -753,7 +753,8 @@ u32 igb_rd32(struct e1000_hw *hw, u32 reg) struct net_device *netdev = igb->netdev; hw->hw_addr = NULL; netdev_err(netdev, "PCIe link lost\n"); - WARN(1, "igb: Failed to read reg 0x%x!\n", reg); + WARN(pci_device_is_present(igb->pdev), + "igb: Failed to read reg 0x%x!\n", reg); } return value; @@ -2064,7 +2065,8 @@ static void igb_check_swap_media(struct igb_adapter *adapter) if ((hw->phy.media_type == e1000_media_type_copper) && (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) { swap_now = true; - } else if (!(connsw & E1000_CONNSW_SERDESD)) { + } else if ((hw->phy.media_type != e1000_media_type_copper) && + !(connsw & E1000_CONNSW_SERDESD)) { /* copper signal takes time to appear */ if (adapter->copper_tries < 4) { adapter->copper_tries++; @@ -2370,7 +2372,7 @@ void igb_reset(struct igb_adapter *adapter) adapter->ei.get_invariants(hw); adapter->flags &= ~IGB_FLAG_MEDIA_RESET; } - if ((mac->type == e1000_82575) && + if ((mac->type == e1000_82575 || mac->type == e1000_i350) && (adapter->flags & IGB_FLAG_MAS_ENABLE)) { igb_enable_mas(adapter); } @@ -5673,8 +5675,8 @@ static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, * should have been handled by the upper layers. */ if (tx_ring->launchtime_enable) { - ts = ns_to_timespec64(first->skb->tstamp); - first->skb->tstamp = 0; + ts = ktime_to_timespec64(first->skb->tstamp); + first->skb->tstamp = ktime_set(0, 0); context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32); } else { context_desc->seqnum_seed = 0; diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 63b62d74f961..24888676f69b 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -824,8 +824,8 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring, * should have been handled by the upper layers. */ if (tx_ring->launchtime_enable) { - ts = ns_to_timespec64(first->skb->tstamp); - first->skb->tstamp = 0; + ts = ktime_to_timespec64(first->skb->tstamp); + first->skb->tstamp = ktime_set(0, 0); context_desc->launch_time = cpu_to_le32(ts.tv_nsec / 32); } else { context_desc->launch_time = 0; @@ -4047,7 +4047,8 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg) hw->hw_addr = NULL; netif_device_detach(netdev); netdev_err(netdev, "PCIe link lost, device now detached\n"); - WARN(1, "igc: Failed to read reg 0x%x!\n", reg); + WARN(pci_device_is_present(igc->pdev), + "igc: Failed to read reg 0x%x!\n", reg); } return value; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 1ce2397306b9..91b3780ddb04 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -4310,7 +4310,6 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state)) set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); - clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state); if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY) continue; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index 100ac89b345d..d6feaacfbf89 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -622,8 +622,6 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget) if (tx_desc) { ixgbe_xdp_ring_update_tail(xdp_ring); xsk_umem_consume_tx_done(xdp_ring->xsk_umem); - if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem)) - xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem); } return !!budget && work_done; @@ -691,12 +689,8 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, if (xsk_frames) xsk_umem_complete_tx(umem, xsk_frames); - if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem)) { - if (tx_ring->next_to_clean == tx_ring->next_to_use) - xsk_set_tx_need_wakeup(tx_ring->xsk_umem); - else - xsk_clear_tx_need_wakeup(tx_ring->xsk_umem); - } + if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem)) + xsk_set_tx_need_wakeup(tx_ring->xsk_umem); return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit); } diff --git a/drivers/net/ethernet/marvell/mvneta_bm.h b/drivers/net/ethernet/marvell/mvneta_bm.h index c8425d35c049..e47783ce77e0 100644 --- a/drivers/net/ethernet/marvell/mvneta_bm.h +++ b/drivers/net/ethernet/marvell/mvneta_bm.h @@ -160,16 +160,23 @@ static inline u32 mvneta_bm_pool_get_bp(struct mvneta_bm *priv, (bm_pool->id << MVNETA_BM_POOL_ACCESS_OFFS)); } #else -void mvneta_bm_pool_destroy(struct mvneta_bm *priv, - struct mvneta_bm_pool *bm_pool, u8 port_map) {} -void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, - u8 port_map) {} -int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf) { return 0; } -int mvneta_bm_pool_refill(struct mvneta_bm *priv, - struct mvneta_bm_pool *bm_pool) {return 0; } -struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id, - enum mvneta_bm_type type, u8 port_id, - int pkt_size) { return NULL; } +static inline void mvneta_bm_pool_destroy(struct mvneta_bm *priv, + struct mvneta_bm_pool *bm_pool, + u8 port_map) {} +static inline void mvneta_bm_bufs_free(struct mvneta_bm *priv, + struct mvneta_bm_pool *bm_pool, + u8 port_map) {} +static inline int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf) +{ return 0; } +static inline int mvneta_bm_pool_refill(struct mvneta_bm *priv, + struct mvneta_bm_pool *bm_pool) +{ return 0; } +static inline struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, + u8 pool_id, + enum mvneta_bm_type type, + u8 port_id, + int pkt_size) +{ return NULL; } static inline void mvneta_bm_pool_put_bp(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, @@ -178,7 +185,8 @@ static inline void mvneta_bm_pool_put_bp(struct mvneta_bm *priv, static inline u32 mvneta_bm_pool_get_bp(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool) { return 0; } -struct mvneta_bm *mvneta_bm_get(struct device_node *node) { return NULL; } -void mvneta_bm_put(struct mvneta_bm *priv) {} +static inline struct mvneta_bm *mvneta_bm_get(struct device_node *node) +{ return NULL; } +static inline void mvneta_bm_put(struct mvneta_bm *priv) {} #endif /* CONFIG_MVNETA_BM */ #endif diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index c61069340f4f..703adb96429e 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -261,6 +261,7 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode, ge_mode = 0; switch (state->interface) { case PHY_INTERFACE_MODE_MII: + case PHY_INTERFACE_MODE_GMII: ge_mode = 1; break; case PHY_INTERFACE_MODE_REVMII: diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index fce9b3a24347..69bb6bb06e76 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -514,8 +514,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz; /* * Subtract 1 from the limit because we need to allocate a - * spare CQE so the HCA HW can tell the difference between an - * empty CQ and a full CQ. + * spare CQE to enable resizing the CQ. */ dev->caps.max_cqes = dev_cap->max_cq_sz - 1; dev->caps.reserved_cqs = dev_cap->reserved_cqs; diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 4356f3a58002..1187ef1375e2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -471,12 +471,31 @@ void mlx4_init_quotas(struct mlx4_dev *dev) priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf]; } -static int get_max_gauranteed_vfs_counter(struct mlx4_dev *dev) +static int +mlx4_calc_res_counter_guaranteed(struct mlx4_dev *dev, + struct resource_allocator *res_alloc, + int vf) { - /* reduce the sink counter */ - return (dev->caps.max_counters - 1 - - (MLX4_PF_COUNTERS_PER_PORT * MLX4_MAX_PORTS)) - / MLX4_MAX_PORTS; + struct mlx4_active_ports actv_ports; + int ports, counters_guaranteed; + + /* For master, only allocate according to the number of phys ports */ + if (vf == mlx4_master_func_num(dev)) + return MLX4_PF_COUNTERS_PER_PORT * dev->caps.num_ports; + + /* calculate real number of ports for the VF */ + actv_ports = mlx4_get_active_ports(dev, vf); + ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports); + counters_guaranteed = ports * MLX4_VF_COUNTERS_PER_PORT; + + /* If we do not have enough counters for this VF, do not + * allocate any for it. '-1' to reduce the sink counter. + */ + if ((res_alloc->res_reserved + counters_guaranteed) > + (dev->caps.max_counters - 1)) + return 0; + + return counters_guaranteed; } int mlx4_init_resource_tracker(struct mlx4_dev *dev) @@ -484,7 +503,6 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev) struct mlx4_priv *priv = mlx4_priv(dev); int i, j; int t; - int max_vfs_guarantee_counter = get_max_gauranteed_vfs_counter(dev); priv->mfunc.master.res_tracker.slave_list = kcalloc(dev->num_slaves, sizeof(struct slave_list), @@ -603,16 +621,8 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev) break; case RES_COUNTER: res_alloc->quota[t] = dev->caps.max_counters; - if (t == mlx4_master_func_num(dev)) - res_alloc->guaranteed[t] = - MLX4_PF_COUNTERS_PER_PORT * - MLX4_MAX_PORTS; - else if (t <= max_vfs_guarantee_counter) - res_alloc->guaranteed[t] = - MLX4_VF_COUNTERS_PER_PORT * - MLX4_MAX_PORTS; - else - res_alloc->guaranteed[t] = 0; + res_alloc->guaranteed[t] = + mlx4_calc_res_counter_guaranteed(dev, res_alloc, t); break; default: break; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 8d76452cacdc..f1a7bc46f1c0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -345,7 +345,7 @@ struct mlx5e_tx_wqe_info { u8 num_wqebbs; u8 num_dma; #ifdef CONFIG_MLX5_EN_TLS - skb_frag_t *resync_dump_frag; + struct page *resync_dump_frag_page; #endif }; @@ -410,6 +410,7 @@ struct mlx5e_txqsq { struct device *pdev; __be32 mkey_be; unsigned long state; + unsigned int hw_mtu; struct hwtstamp_config *tstamp; struct mlx5_clock *clock; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c index b3a249b2a482..ac44bbe95c5c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c @@ -141,7 +141,7 @@ int mlx5e_hv_vhca_stats_create(struct mlx5e_priv *priv) "Failed to create hv vhca stats agent, err = %ld\n", PTR_ERR(agent)); - kfree(priv->stats_agent.buf); + kvfree(priv->stats_agent.buf); return IS_ERR_OR_NULL(agent); } @@ -157,5 +157,5 @@ void mlx5e_hv_vhca_stats_destroy(struct mlx5e_priv *priv) return; mlx5_hv_vhca_agent_destroy(priv->stats_agent.agent); - kfree(priv->stats_agent.buf); + kvfree(priv->stats_agent.buf); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c index f8ee18b4da6f..13af72556987 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c @@ -97,15 +97,19 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, if (ret) return ret; - if (mlx5_lag_is_multipath(mdev) && rt->rt_gw_family != AF_INET) + if (mlx5_lag_is_multipath(mdev) && rt->rt_gw_family != AF_INET) { + ip_rt_put(rt); return -ENETUNREACH; + } #else return -EOPNOTSUPP; #endif ret = get_route_and_out_devs(priv, rt->dst.dev, route_dev, out_dev); - if (ret < 0) + if (ret < 0) { + ip_rt_put(rt); return ret; + } if (!(*out_ttl)) *out_ttl = ip4_dst_hoplimit(&rt->dst); @@ -149,8 +153,10 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, *out_ttl = ip6_dst_hoplimit(dst); ret = get_route_and_out_devs(priv, dst->dev, route_dev, out_dev); - if (ret < 0) + if (ret < 0) { + dst_release(dst); return ret; + } #else return -EOPNOTSUPP; #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index 87be96747902..7c8796d9743f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -15,15 +15,14 @@ #else /* TLS offload requires additional stop_room for: * - a resync SKB. - * kTLS offload requires additional stop_room for: - * - static params WQE, - * - progress params WQE, and - * - resync DUMP per frag. + * kTLS offload requires fixed additional stop_room for: + * - a static params WQE, and a progress params WQE. + * The additional MTU-depending room for the resync DUMP WQEs + * will be calculated and added in runtime. */ #define MLX5E_SQ_TLS_ROOM \ (MLX5_SEND_WQE_MAX_WQEBBS + \ - MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS + \ - MAX_SKB_FRAGS * MLX5E_KTLS_MAX_DUMP_WQEBBS) + MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS) #endif #define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start)) @@ -92,7 +91,7 @@ mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq, struct mlx5_wq_cyc *wq, /* fill sq frag edge with nops to avoid wqe wrapping two pages */ for (; wi < edge_wi; wi++) { - wi->skb = NULL; + memset(wi, 0, sizeof(*wi)); wi->num_wqebbs = 1; mlx5e_post_nop(wq, sq->sqn, &sq->pc); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c index d2ff74d52720..46725cd743a3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c @@ -38,7 +38,7 @@ static int mlx5e_ktls_add(struct net_device *netdev, struct sock *sk, return -ENOMEM; tx_priv->expected_seq = start_offload_tcp_sn; - tx_priv->crypto_info = crypto_info; + tx_priv->crypto_info = *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info; mlx5e_set_ktls_tx_priv_ctx(tls_ctx, tx_priv); /* tc and underlay_qpn values are not in use for tls tis */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h index b7298f9ee3d3..a3efa29a4629 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h @@ -21,7 +21,14 @@ MLX5_ST_SZ_BYTES(tls_progress_params)) #define MLX5E_KTLS_PROGRESS_WQEBBS \ (DIV_ROUND_UP(MLX5E_KTLS_PROGRESS_WQE_SZ, MLX5_SEND_WQE_BB)) -#define MLX5E_KTLS_MAX_DUMP_WQEBBS 2 + +struct mlx5e_dump_wqe { + struct mlx5_wqe_ctrl_seg ctrl; + struct mlx5_wqe_data_seg data; +}; + +#define MLX5E_KTLS_DUMP_WQEBBS \ + (DIV_ROUND_UP(sizeof(struct mlx5e_dump_wqe), MLX5_SEND_WQE_BB)) enum { MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD = 0, @@ -37,7 +44,7 @@ enum { struct mlx5e_ktls_offload_context_tx { struct tls_offload_context_tx *tx_ctx; - struct tls_crypto_info *crypto_info; + struct tls12_crypto_info_aes_gcm_128 crypto_info; u32 expected_seq; u32 tisn; u32 key_id; @@ -86,14 +93,28 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_tx_wqe **wqe, u16 *pi); void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi, - struct mlx5e_sq_dma *dma); - + u32 *dma_fifo_cc); +static inline u8 +mlx5e_ktls_dumps_num_wqebbs(struct mlx5e_txqsq *sq, unsigned int nfrags, + unsigned int sync_len) +{ + /* Given the MTU and sync_len, calculates an upper bound for the + * number of WQEBBs needed for the TX resync DUMP WQEs of a record. + */ + return MLX5E_KTLS_DUMP_WQEBBS * + (nfrags + DIV_ROUND_UP(sync_len, sq->hw_mtu)); +} #else static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv) { } +static inline void +mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq, + struct mlx5e_tx_wqe_info *wi, + u32 *dma_fifo_cc) {} + #endif #endif /* __MLX5E_TLS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c index d195366461c9..778dab1af8fc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c @@ -24,17 +24,12 @@ enum { static void fill_static_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx) { - struct tls_crypto_info *crypto_info = priv_tx->crypto_info; - struct tls12_crypto_info_aes_gcm_128 *info; + struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info; char *initial_rn, *gcm_iv; u16 salt_sz, rec_seq_sz; char *salt, *rec_seq; u8 tls_version; - if (WARN_ON(crypto_info->cipher_type != TLS_CIPHER_AES_GCM_128)) - return; - - info = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info; EXTRACT_INFO_FIELDS; gcm_iv = MLX5_ADDR_OF(tls_static_params, ctx, gcm_iv); @@ -108,16 +103,15 @@ build_progress_params(struct mlx5e_tx_wqe *wqe, u16 pc, u32 sqn, } static void tx_fill_wi(struct mlx5e_txqsq *sq, - u16 pi, u8 num_wqebbs, - skb_frag_t *resync_dump_frag, - u32 num_bytes) + u16 pi, u8 num_wqebbs, u32 num_bytes, + struct page *page) { struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi]; - wi->skb = NULL; - wi->num_wqebbs = num_wqebbs; - wi->resync_dump_frag = resync_dump_frag; - wi->num_bytes = num_bytes; + memset(wi, 0, sizeof(*wi)); + wi->num_wqebbs = num_wqebbs; + wi->num_bytes = num_bytes; + wi->resync_dump_frag_page = page; } void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx) @@ -145,7 +139,7 @@ post_static_params(struct mlx5e_txqsq *sq, umr_wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_STATIC_UMR_WQE_SZ, &pi); build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence); - tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, NULL, 0); + tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, 0, NULL); sq->pc += MLX5E_KTLS_STATIC_WQEBBS; } @@ -159,7 +153,7 @@ post_progress_params(struct mlx5e_txqsq *sq, wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_PROGRESS_WQE_SZ, &pi); build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence); - tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, NULL, 0); + tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, 0, NULL); sq->pc += MLX5E_KTLS_PROGRESS_WQEBBS; } @@ -169,6 +163,14 @@ mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq, bool skip_static_post, bool fence_first_post) { bool progress_fence = skip_static_post || !fence_first_post; + struct mlx5_wq_cyc *wq = &sq->wq; + u16 contig_wqebbs_room, pi; + + pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); + contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); + if (unlikely(contig_wqebbs_room < + MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS)) + mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room); if (!skip_static_post) post_static_params(sq, priv_tx, fence_first_post); @@ -180,29 +182,36 @@ struct tx_sync_info { u64 rcd_sn; s32 sync_len; int nr_frags; - skb_frag_t *frags[MAX_SKB_FRAGS]; + skb_frag_t frags[MAX_SKB_FRAGS]; +}; + +enum mlx5e_ktls_sync_retval { + MLX5E_KTLS_SYNC_DONE, + MLX5E_KTLS_SYNC_FAIL, + MLX5E_KTLS_SYNC_SKIP_NO_DATA, }; -static bool tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx, - u32 tcp_seq, struct tx_sync_info *info) +static enum mlx5e_ktls_sync_retval +tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx, + u32 tcp_seq, struct tx_sync_info *info) { struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx; + enum mlx5e_ktls_sync_retval ret = MLX5E_KTLS_SYNC_DONE; struct tls_record_info *record; int remaining, i = 0; unsigned long flags; - bool ret = true; spin_lock_irqsave(&tx_ctx->lock, flags); record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn); if (unlikely(!record)) { - ret = false; + ret = MLX5E_KTLS_SYNC_FAIL; goto out; } if (unlikely(tcp_seq < tls_record_start_seq(record))) { - if (!tls_record_is_start_marker(record)) - ret = false; + ret = tls_record_is_start_marker(record) ? + MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL; goto out; } @@ -211,13 +220,13 @@ static bool tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx, while (remaining > 0) { skb_frag_t *frag = &record->frags[i]; - __skb_frag_ref(frag); + get_page(skb_frag_page(frag)); remaining -= skb_frag_size(frag); - info->frags[i++] = frag; + info->frags[i++] = *frag; } /* reduce the part which will be sent with the original SKB */ if (remaining < 0) - skb_frag_size_add(info->frags[i - 1], remaining); + skb_frag_size_add(&info->frags[i - 1], remaining); info->nr_frags = i; out: spin_unlock_irqrestore(&tx_ctx->lock, flags); @@ -229,17 +238,12 @@ tx_post_resync_params(struct mlx5e_txqsq *sq, struct mlx5e_ktls_offload_context_tx *priv_tx, u64 rcd_sn) { - struct tls_crypto_info *crypto_info = priv_tx->crypto_info; - struct tls12_crypto_info_aes_gcm_128 *info; + struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info; __be64 rn_be = cpu_to_be64(rcd_sn); bool skip_static_post; u16 rec_seq_sz; char *rec_seq; - if (WARN_ON(crypto_info->cipher_type != TLS_CIPHER_AES_GCM_128)) - return; - - info = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info; rec_seq = info->rec_seq; rec_seq_sz = sizeof(info->rec_seq); @@ -250,11 +254,6 @@ tx_post_resync_params(struct mlx5e_txqsq *sq, mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true); } -struct mlx5e_dump_wqe { - struct mlx5_wqe_ctrl_seg ctrl; - struct mlx5_wqe_data_seg data; -}; - static int tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool first) { @@ -262,7 +261,6 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool fir struct mlx5_wqe_data_seg *dseg; struct mlx5e_dump_wqe *wqe; dma_addr_t dma_addr = 0; - u8 num_wqebbs; u16 ds_cnt; int fsz; u16 pi; @@ -270,7 +268,6 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool fir wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi); ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; - num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); cseg = &wqe->ctrl; dseg = &wqe->data; @@ -291,24 +288,27 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool fir dseg->byte_count = cpu_to_be32(fsz); mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE); - tx_fill_wi(sq, pi, num_wqebbs, frag, fsz); - sq->pc += num_wqebbs; - - WARN(num_wqebbs > MLX5E_KTLS_MAX_DUMP_WQEBBS, - "unexpected DUMP num_wqebbs, %d > %d", - num_wqebbs, MLX5E_KTLS_MAX_DUMP_WQEBBS); + tx_fill_wi(sq, pi, MLX5E_KTLS_DUMP_WQEBBS, fsz, skb_frag_page(frag)); + sq->pc += MLX5E_KTLS_DUMP_WQEBBS; return 0; } void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi, - struct mlx5e_sq_dma *dma) + u32 *dma_fifo_cc) { - struct mlx5e_sq_stats *stats = sq->stats; + struct mlx5e_sq_stats *stats; + struct mlx5e_sq_dma *dma; + + if (!wi->resync_dump_frag_page) + return; + + dma = mlx5e_dma_get(sq, (*dma_fifo_cc)++); + stats = sq->stats; mlx5e_tx_dma_unmap(sq->pdev, dma); - __skb_frag_unref(wi->resync_dump_frag); + put_page(wi->resync_dump_frag_page); stats->tls_dump_packets++; stats->tls_dump_bytes += wi->num_bytes; } @@ -318,25 +318,31 @@ static void tx_post_fence_nop(struct mlx5e_txqsq *sq) struct mlx5_wq_cyc *wq = &sq->wq; u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); - tx_fill_wi(sq, pi, 1, NULL, 0); + tx_fill_wi(sq, pi, 1, 0, NULL); mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc); } -static struct sk_buff * +static enum mlx5e_ktls_sync_retval mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx, struct mlx5e_txqsq *sq, - struct sk_buff *skb, + int datalen, u32 seq) { struct mlx5e_sq_stats *stats = sq->stats; struct mlx5_wq_cyc *wq = &sq->wq; + enum mlx5e_ktls_sync_retval ret; struct tx_sync_info info = {}; u16 contig_wqebbs_room, pi; u8 num_wqebbs; - int i; - - if (!tx_sync_info_get(priv_tx, seq, &info)) { + int i = 0; + + ret = tx_sync_info_get(priv_tx, seq, &info); + if (unlikely(ret != MLX5E_KTLS_SYNC_DONE)) { + if (ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA) { + stats->tls_skip_no_sync_data++; + return MLX5E_KTLS_SYNC_SKIP_NO_DATA; + } /* We might get here if a retransmission reaches the driver * after the relevant record is acked. * It should be safe to drop the packet in this case @@ -346,13 +352,8 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx, } if (unlikely(info.sync_len < 0)) { - u32 payload; - int headln; - - headln = skb_transport_offset(skb) + tcp_hdrlen(skb); - payload = skb->len - headln; - if (likely(payload <= -info.sync_len)) - return skb; + if (likely(datalen <= -info.sync_len)) + return MLX5E_KTLS_SYNC_DONE; stats->tls_drop_bypass_req++; goto err_out; @@ -360,30 +361,62 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx, stats->tls_ooo++; - num_wqebbs = MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS + - (info.nr_frags ? info.nr_frags * MLX5E_KTLS_MAX_DUMP_WQEBBS : 1); + tx_post_resync_params(sq, priv_tx, info.rcd_sn); + + /* If no dump WQE was sent, we need to have a fence NOP WQE before the + * actual data xmit. + */ + if (!info.nr_frags) { + tx_post_fence_nop(sq); + return MLX5E_KTLS_SYNC_DONE; + } + + num_wqebbs = mlx5e_ktls_dumps_num_wqebbs(sq, info.nr_frags, info.sync_len); pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); + if (unlikely(contig_wqebbs_room < num_wqebbs)) mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room); tx_post_resync_params(sq, priv_tx, info.rcd_sn); - for (i = 0; i < info.nr_frags; i++) - if (tx_post_resync_dump(sq, info.frags[i], priv_tx->tisn, !i)) - goto err_out; + for (; i < info.nr_frags; i++) { + unsigned int orig_fsz, frag_offset = 0, n = 0; + skb_frag_t *f = &info.frags[i]; - /* If no dump WQE was sent, we need to have a fence NOP WQE before the - * actual data xmit. - */ - if (!info.nr_frags) - tx_post_fence_nop(sq); + orig_fsz = skb_frag_size(f); - return skb; + do { + bool fence = !(i || frag_offset); + unsigned int fsz; + + n++; + fsz = min_t(unsigned int, sq->hw_mtu, orig_fsz - frag_offset); + skb_frag_size_set(f, fsz); + if (tx_post_resync_dump(sq, f, priv_tx->tisn, fence)) { + page_ref_add(skb_frag_page(f), n - 1); + goto err_out; + } + + skb_frag_off_add(f, fsz); + frag_offset += fsz; + } while (frag_offset < orig_fsz); + + page_ref_add(skb_frag_page(f), n - 1); + } + + return MLX5E_KTLS_SYNC_DONE; err_out: - dev_kfree_skb_any(skb); - return NULL; + for (; i < info.nr_frags; i++) + /* The put_page() here undoes the page ref obtained in tx_sync_info_get(). + * Page refs obtained for the DUMP WQEs above (by page_ref_add) will be + * released only upon their completions (or in mlx5e_free_txqsq_descs, + * if channel closes). + */ + put_page(skb_frag_page(&info.frags[i])); + + return MLX5E_KTLS_SYNC_FAIL; } struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev, @@ -419,10 +452,15 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev, seq = ntohl(tcp_hdr(skb)->seq); if (unlikely(priv_tx->expected_seq != seq)) { - skb = mlx5e_ktls_tx_handle_ooo(priv_tx, sq, skb, seq); - if (unlikely(!skb)) + enum mlx5e_ktls_sync_retval ret = + mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq); + + if (likely(ret == MLX5E_KTLS_SYNC_DONE)) + *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi); + else if (ret == MLX5E_KTLS_SYNC_FAIL) + goto err_out; + else /* ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA */ goto out; - *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi); } priv_tx->expected_seq = seq + datalen; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index c5a9c20d7f00..327c93a7bd55 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1021,7 +1021,7 @@ static bool ext_link_mode_requested(const unsigned long *adver) { #define MLX5E_MIN_PTYS_EXT_LINK_MODE_BIT ETHTOOL_LINK_MODE_50000baseKR_Full_BIT int size = __ETHTOOL_LINK_MODE_MASK_NBITS - MLX5E_MIN_PTYS_EXT_LINK_MODE_BIT; - __ETHTOOL_DECLARE_LINK_MODE_MASK(modes); + __ETHTOOL_DECLARE_LINK_MODE_MASK(modes) = {0,}; bitmap_set(modes, MLX5E_MIN_PTYS_EXT_LINK_MODE_BIT, size); return bitmap_intersects(modes, adver, __ETHTOOL_LINK_MODE_MASK_NBITS); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 7569287f8f3c..772bfdbdeb9c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1128,6 +1128,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, sq->txq_ix = txq_ix; sq->uar_map = mdev->mlx5e_res.bfreg.map; sq->min_inline_mode = params->tx_min_inline_mode; + sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); sq->stats = &c->priv->channel_stats[c->ix].sq[tc]; sq->stop_room = MLX5E_SQ_STOP_ROOM; INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work); @@ -1135,10 +1136,14 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state); if (MLX5_IPSEC_DEV(c->priv->mdev)) set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state); +#ifdef CONFIG_MLX5_EN_TLS if (mlx5_accel_is_tls_device(c->priv->mdev)) { set_bit(MLX5E_SQ_STATE_TLS, &sq->state); - sq->stop_room += MLX5E_SQ_TLS_ROOM; + sq->stop_room += MLX5E_SQ_TLS_ROOM + + mlx5e_ktls_dumps_num_wqebbs(sq, MAX_SKB_FRAGS, + TLS_MAX_PAYLOAD_SIZE); } +#endif param->wq.db_numa_node = cpu_to_node(c->cpu); err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl); @@ -1349,9 +1354,13 @@ static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq) /* last doorbell out, godspeed .. */ if (mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1)) { u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); + struct mlx5e_tx_wqe_info *wi; struct mlx5e_tx_wqe *nop; - sq->db.wqe_info[pi].skb = NULL; + wi = &sq->db.wqe_info[pi]; + + memset(wi, 0, sizeof(*wi)); + wi->num_wqebbs = 1; nop = mlx5e_post_nop(wq, sq->sqn, &sq->pc); mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nop->ctrl); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 95892a3b63a1..cd9bb7c7b341 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -611,8 +611,8 @@ static void mlx5e_rep_update_flows(struct mlx5e_priv *priv, mutex_lock(&esw->offloads.encap_tbl_lock); encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID); - if (e->compl_result || (encap_connected == neigh_connected && - ether_addr_equal(e->h_dest, ha))) + if (e->compl_result < 0 || (encap_connected == neigh_connected && + ether_addr_equal(e->h_dest, ha))) goto unlock; mlx5e_take_all_encap_flows(e, &flow_list); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index d6a547238de0..82cffb3a9964 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -1386,8 +1386,11 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) return 0; - if (rq->cqd.left) + if (rq->cqd.left) { work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget); + if (rq->cqd.left || work_done >= budget) + goto out; + } cqe = mlx5_cqwq_get_cqe(cqwq); if (!cqe) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c index 840ec945ccba..bbff8d8ded76 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c @@ -35,6 +35,7 @@ #include <linux/udp.h> #include <net/udp.h> #include "en.h" +#include "en/port.h" enum { MLX5E_ST_LINK_STATE, @@ -80,22 +81,12 @@ static int mlx5e_test_link_state(struct mlx5e_priv *priv) static int mlx5e_test_link_speed(struct mlx5e_priv *priv) { - u32 out[MLX5_ST_SZ_DW(ptys_reg)]; - u32 eth_proto_oper; - int i; + u32 speed; if (!netif_carrier_ok(priv->netdev)) return 1; - if (mlx5_query_port_ptys(priv->mdev, out, sizeof(out), MLX5_PTYS_EN, 1)) - return 1; - - eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper); - for (i = 0; i < MLX5E_LINK_MODES_NUMBER; i++) { - if (eth_proto_oper & MLX5E_PROT_MASK(i)) - return 0; - } - return 1; + return mlx5e_port_linkspeed(priv->mdev, &speed); } struct mlx5ehdr { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index ac6fdcda7019..7e6ebd0505cc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -52,11 +52,12 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ctx) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_skip_no_sync_data) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_no_sync_data) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_bypass_req) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) }, #endif { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) }, @@ -288,11 +289,12 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->tx_tls_encrypted_bytes += sq_stats->tls_encrypted_bytes; s->tx_tls_ctx += sq_stats->tls_ctx; s->tx_tls_ooo += sq_stats->tls_ooo; + s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes; + s->tx_tls_dump_packets += sq_stats->tls_dump_packets; s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes; + s->tx_tls_skip_no_sync_data += sq_stats->tls_skip_no_sync_data; s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data; s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req; - s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes; - s->tx_tls_dump_packets += sq_stats->tls_dump_packets; #endif s->tx_cqes += sq_stats->cqes; } @@ -1472,10 +1474,12 @@ static const struct counter_desc sq_stats_desc[] = { { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ctx) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) }, - { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) }, - { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) }, #endif { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) }, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 79f261bf86ac..869f3502f631 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -129,11 +129,12 @@ struct mlx5e_sw_stats { u64 tx_tls_encrypted_bytes; u64 tx_tls_ctx; u64 tx_tls_ooo; + u64 tx_tls_dump_packets; + u64 tx_tls_dump_bytes; u64 tx_tls_resync_bytes; + u64 tx_tls_skip_no_sync_data; u64 tx_tls_drop_no_sync_data; u64 tx_tls_drop_bypass_req; - u64 tx_tls_dump_packets; - u64 tx_tls_dump_bytes; #endif u64 rx_xsk_packets; @@ -273,11 +274,12 @@ struct mlx5e_sq_stats { u64 tls_encrypted_bytes; u64 tls_ctx; u64 tls_ooo; + u64 tls_dump_packets; + u64 tls_dump_bytes; u64 tls_resync_bytes; + u64 tls_skip_no_sync_data; u64 tls_drop_no_sync_data; u64 tls_drop_bypass_req; - u64 tls_dump_packets; - u64 tls_dump_bytes; #endif /* less likely accessed in data path */ u64 csum_none; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 3e78a727f3e6..fda0b37075e8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1278,8 +1278,10 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, mlx5_eswitch_del_vlan_action(esw, attr); for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) - if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) + if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) { mlx5e_detach_encap(priv, flow, out_index); + kfree(attr->parse_attr->tun_info[out_index]); + } kvfree(attr->parse_attr); if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) @@ -1559,6 +1561,7 @@ static void mlx5e_encap_dealloc(struct mlx5e_priv *priv, struct mlx5e_encap_entr mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat); } + kfree(e->tun_info); kfree(e->encap_header); kfree_rcu(e, rcu); } @@ -2972,6 +2975,13 @@ mlx5e_encap_get(struct mlx5e_priv *priv, struct encap_key *key, return NULL; } +static struct ip_tunnel_info *dup_tun_info(const struct ip_tunnel_info *tun_info) +{ + size_t tun_size = sizeof(*tun_info) + tun_info->options_len; + + return kmemdup(tun_info, tun_size, GFP_KERNEL); +} + static int mlx5e_attach_encap(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow, struct net_device *mirred_dev, @@ -3028,13 +3038,15 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, refcount_set(&e->refcnt, 1); init_completion(&e->res_ready); + tun_info = dup_tun_info(tun_info); + if (!tun_info) { + err = -ENOMEM; + goto out_err_init; + } e->tun_info = tun_info; err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack); - if (err) { - kfree(e); - e = NULL; - goto out_err; - } + if (err) + goto out_err_init; INIT_LIST_HEAD(&e->flows); hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key); @@ -3075,6 +3087,12 @@ out_err: if (e) mlx5e_encap_put(priv, e); return err; + +out_err_init: + mutex_unlock(&esw->offloads.encap_tbl_lock); + kfree(tun_info); + kfree(e); + return err; } static int parse_tc_vlan_action(struct mlx5e_priv *priv, @@ -3160,7 +3178,7 @@ static int add_vlan_pop_action(struct mlx5e_priv *priv, struct mlx5_esw_flow_attr *attr, u32 *action) { - int nest_level = vlan_get_encap_level(attr->parse_attr->filter_dev); + int nest_level = attr->parse_attr->filter_dev->lower_level; struct flow_action_entry vlan_act = { .id = FLOW_ACTION_VLAN_POP, }; @@ -3295,7 +3313,9 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, } else if (encap) { parse_attr->mirred_ifindex[attr->out_count] = out_dev->ifindex; - parse_attr->tun_info[attr->out_count] = info; + parse_attr->tun_info[attr->out_count] = dup_tun_info(info); + if (!parse_attr->tun_info[attr->out_count]) + return -ENOMEM; encap = false; attr->dests[attr->out_count].flags |= MLX5_ESW_DEST_ENCAP; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index d3a67a9b4eba..67dc4f0921b6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -403,7 +403,10 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) static void mlx5e_dump_error_cqe(struct mlx5e_txqsq *sq, struct mlx5_err_cqe *err_cqe) { - u32 ci = mlx5_cqwq_get_ci(&sq->cq.wq); + struct mlx5_cqwq *wq = &sq->cq.wq; + u32 ci; + + ci = mlx5_cqwq_ctr2ix(wq, wq->cc - 1); netdev_err(sq->channel->netdev, "Error cqe on cqn 0x%x, ci 0x%x, sqn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n", @@ -479,14 +482,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) skb = wi->skb; if (unlikely(!skb)) { -#ifdef CONFIG_MLX5_EN_TLS - if (wi->resync_dump_frag) { - struct mlx5e_sq_dma *dma = - mlx5e_dma_get(sq, dma_fifo_cc++); - - mlx5e_ktls_tx_handle_resync_dump_comp(sq, wi, dma); - } -#endif + mlx5e_ktls_tx_handle_resync_dump_comp(sq, wi, &dma_fifo_cc); sqcc += wi->num_wqebbs; continue; } @@ -542,29 +538,38 @@ void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq) { struct mlx5e_tx_wqe_info *wi; struct sk_buff *skb; + u32 dma_fifo_cc; + u16 sqcc; u16 ci; int i; - while (sq->cc != sq->pc) { - ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc); + sqcc = sq->cc; + dma_fifo_cc = sq->dma_fifo_cc; + + while (sqcc != sq->pc) { + ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); wi = &sq->db.wqe_info[ci]; skb = wi->skb; - if (!skb) { /* nop */ - sq->cc++; + if (!skb) { + mlx5e_ktls_tx_handle_resync_dump_comp(sq, wi, &dma_fifo_cc); + sqcc += wi->num_wqebbs; continue; } for (i = 0; i < wi->num_dma; i++) { struct mlx5e_sq_dma *dma = - mlx5e_dma_get(sq, sq->dma_fifo_cc++); + mlx5e_dma_get(sq, dma_fifo_cc++); mlx5e_tx_dma_unmap(sq->pdev, dma); } dev_kfree_skb_any(skb); - sq->cc += wi->num_wqebbs; + sqcc += wi->num_wqebbs; } + + sq->dma_fifo_cc = dma_fifo_cc; + sq->cc = sqcc; } #ifdef CONFIG_MLX5_CORE_IPOIB diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 00d71db15f22..9004a07e457a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -285,7 +285,6 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, mlx5_eswitch_set_rule_source_port(esw, spec, attr); - spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS; if (attr->outer_match_level != MLX5_MATCH_NONE) spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; @@ -1080,7 +1079,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) MLX5_CAP_GEN(dev, max_flow_counter_15_0); fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size); - esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d), groups(%d), max flow table size(2^%d))\n", + esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d), groups(%d), max flow table size(%d))\n", MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size), max_flow_counter, ESW_OFFLOADS_NUM_GROUPS, fdb_max); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c index 1d55a324a17e..366bda1bb1c3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c @@ -177,22 +177,33 @@ mlx5_eswitch_termtbl_actions_move(struct mlx5_flow_act *src, memset(&src->vlan[1], 0, sizeof(src->vlan[1])); } +static bool mlx5_eswitch_offload_is_uplink_port(const struct mlx5_eswitch *esw, + const struct mlx5_flow_spec *spec) +{ + u32 port_mask, port_value; + + if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source)) + return spec->flow_context.flow_source == + MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK; + + port_mask = MLX5_GET(fte_match_param, spec->match_criteria, + misc_parameters.source_port); + port_value = MLX5_GET(fte_match_param, spec->match_value, + misc_parameters.source_port); + return (port_mask & port_value & 0xffff) == MLX5_VPORT_UPLINK; +} + bool mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw, struct mlx5_flow_act *flow_act, struct mlx5_flow_spec *spec) { - u32 port_mask = MLX5_GET(fte_match_param, spec->match_criteria, - misc_parameters.source_port); - u32 port_value = MLX5_GET(fte_match_param, spec->match_value, - misc_parameters.source_port); - if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, termination_table)) return false; /* push vlan on RX */ return (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) && - ((port_mask & port_value) == MLX5_VPORT_UPLINK); + mlx5_eswitch_offload_is_uplink_port(esw, spec); } struct mlx5_flow_handle * diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c index 4c50efe4e7f1..61021133029e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c @@ -464,8 +464,10 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size) } err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn, &irqn); - if (err) + if (err) { + kvfree(in); goto err_cqwq; + } cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); MLX5_SET(cqc, cqc, log_cq_size, ilog2(cq_size)); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index 579c306caa7b..3c816e81f8d9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -507,7 +507,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, MLX5_SET(dest_format_struct, in_dests, destination_eswitch_owner_vhca_id, dst->dest_attr.vport.vhca_id); - if (extended_dest) { + if (extended_dest && + dst->dest_attr.vport.pkt_reformat) { MLX5_SET(dest_format_struct, in_dests, packet_reformat, !!(dst->dest_attr.vport.flags & diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index d685122d9ff7..c07f3154437c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -572,7 +572,7 @@ mlx5_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter, return -ENOMEM; err = mlx5_crdump_collect(dev, cr_data); if (err) - return err; + goto free_data; if (priv_ctx) { struct mlx5_fw_reporter_ctx *fw_reporter_ctx = priv_ctx; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c index 9231b39d18b2..c501bf2a0252 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c @@ -112,17 +112,11 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, u32 out[MLX5_ST_SZ_DW(destroy_mkey_out)] = {0}; u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)] = {0}; struct xarray *mkeys = &dev->priv.mkey_table; - struct mlx5_core_mkey *deleted_mkey; unsigned long flags; xa_lock_irqsave(mkeys, flags); - deleted_mkey = __xa_erase(mkeys, mlx5_base_mkey(mkey->key)); + __xa_erase(mkeys, mlx5_base_mkey(mkey->key)); xa_unlock_irqrestore(mkeys, flags); - if (!deleted_mkey) { - mlx5_core_dbg(dev, "failed xarray delete of mkey 0x%x\n", - mlx5_base_mkey(mkey->key)); - return -ENOENT; - } MLX5_SET(destroy_mkey_in, in, opcode, MLX5_CMD_OP_DESTROY_MKEY); MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey->key)); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c index b74b7d0f6590..004c56c2fc0c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c @@ -1577,6 +1577,7 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action) break; case DR_ACTION_TYP_MODIFY_HDR: mlx5dr_icm_free_chunk(action->rewrite.chunk); + kfree(action->rewrite.data); refcount_dec(&action->rewrite.dmn->refcount); break; default: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c index 4187f2b112b8..5dcb8baf491a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c @@ -788,12 +788,10 @@ again: * it means that all the previous stes are the same, * if so, this rule is duplicated. */ - if (mlx5dr_ste_is_last_in_rule(nic_matcher, - matched_ste->ste_chain_location)) { - mlx5dr_info(dmn, "Duplicate rule inserted, aborting!!\n"); - return NULL; - } - return matched_ste; + if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste_location)) + return matched_ste; + + mlx5dr_dbg(dmn, "Duplicate rule inserted\n"); } if (!skip_rehash && dr_rule_need_enlarge_hash(cur_htbl, dmn, nic_dmn)) { @@ -1098,6 +1096,8 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule, if (htbl) mlx5dr_htbl_put(htbl); + kfree(hw_ste_arr); + return 0; free_ste: diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 14dcc786926d..4421ab22182f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -1186,7 +1186,7 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, if (err) goto err_thermal_init; - if (mlxsw_driver->params_register && !reload) + if (mlxsw_driver->params_register) devlink_params_publish(devlink); return 0; @@ -1259,7 +1259,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, return; } - if (mlxsw_core->driver->params_unregister && !reload) + if (mlxsw_core->driver->params_unregister) devlink_params_unpublish(devlink); mlxsw_thermal_fini(mlxsw_core->thermal); mlxsw_hwmon_fini(mlxsw_core->hwmon); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c index 899450b28621..7c03b661ae7e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c @@ -99,6 +99,7 @@ static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u8 local_port, devlink = priv_to_devlink(mlxsw_sp->core); in_devlink_port = mlxsw_core_port_devlink_port_get(mlxsw_sp->core, local_port); + skb_push(skb, ETH_HLEN); devlink_trap_report(devlink, skb, trap_ctx, in_devlink_port); consume_skb(skb); } diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 4d1bce4389c7..672ea1342add 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -261,8 +261,15 @@ static int ocelot_vlan_vid_add(struct net_device *dev, u16 vid, bool pvid, port->pvid = vid; /* Untagged egress vlan clasification */ - if (untagged) + if (untagged && port->vid != vid) { + if (port->vid) { + dev_err(ocelot->dev, + "Port already has a native VLAN: %d\n", + port->vid); + return -EBUSY; + } port->vid = vid; + } ocelot_vlan_port_apply(ocelot, port); @@ -934,7 +941,7 @@ end: static int ocelot_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) { - return ocelot_vlan_vid_add(dev, vid, false, true); + return ocelot_vlan_vid_add(dev, vid, false, false); } static int ocelot_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, @@ -1673,9 +1680,6 @@ static int ocelot_netdevice_port_event(struct net_device *dev, struct ocelot_port *ocelot_port = netdev_priv(dev); int err = 0; - if (!ocelot_netdevice_dev_check(dev)) - return 0; - switch (event) { case NETDEV_CHANGEUPPER: if (netif_is_bridge_master(info->upper_dev)) { @@ -1712,12 +1716,16 @@ static int ocelot_netdevice_event(struct notifier_block *unused, struct net_device *dev = netdev_notifier_info_to_dev(ptr); int ret = 0; + if (!ocelot_netdevice_dev_check(dev)) + return 0; + if (event == NETDEV_PRECHANGEUPPER && netif_is_lag_master(info->upper_dev)) { struct netdev_lag_upper_info *lag_upper_info = info->upper_info; struct netlink_ext_ack *extack; - if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { + if (lag_upper_info && + lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { extack = netdev_notifier_info_to_extack(&info->info); NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h index e40773c01a44..06ac806052bc 100644 --- a/drivers/net/ethernet/mscc/ocelot.h +++ b/drivers/net/ethernet/mscc/ocelot.h @@ -523,7 +523,7 @@ void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset); #define ocelot_write_rix(ocelot, val, reg, ri) __ocelot_write_ix(ocelot, val, reg, reg##_RSZ * (ri)) #define ocelot_write(ocelot, val, reg) __ocelot_write_ix(ocelot, val, reg, 0) -void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 mask, +void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg, u32 offset); #define ocelot_rmw_ix(ocelot, val, m, reg, gi, ri) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri)) #define ocelot_rmw_gix(ocelot, val, m, reg, gi) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi)) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c index 1eef446036d6..79d72c88bbef 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c @@ -299,22 +299,6 @@ static void nfp_repr_clean(struct nfp_repr *repr) nfp_port_free(repr->port); } -static struct lock_class_key nfp_repr_netdev_xmit_lock_key; -static struct lock_class_key nfp_repr_netdev_addr_lock_key; - -static void nfp_repr_set_lockdep_class_one(struct net_device *dev, - struct netdev_queue *txq, - void *_unused) -{ - lockdep_set_class(&txq->_xmit_lock, &nfp_repr_netdev_xmit_lock_key); -} - -static void nfp_repr_set_lockdep_class(struct net_device *dev) -{ - lockdep_set_class(&dev->addr_list_lock, &nfp_repr_netdev_addr_lock_key); - netdev_for_each_tx_queue(dev, nfp_repr_set_lockdep_class_one, NULL); -} - int nfp_repr_init(struct nfp_app *app, struct net_device *netdev, u32 cmsg_port_id, struct nfp_port *port, struct net_device *pf_netdev) @@ -324,8 +308,6 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev, u32 repr_cap = nn->tlv_caps.repr_cap; int err; - nfp_repr_set_lockdep_class(netdev); - repr->port = port; repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, GFP_KERNEL); if (!repr->dst) diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index 141571e2ec11..544012a67221 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -1356,9 +1356,6 @@ static int lpc_eth_drv_probe(struct platform_device *pdev) if (!is_valid_ether_addr(ndev->dev_addr)) eth_hw_addr_random(ndev); - /* Reset the ethernet controller */ - __lpc_eth_reset(pldat); - /* then shut everything down to save power */ __lpc_eth_shutdown(pldat); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index 72107a0627a9..20faa8d24c9f 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -1,6 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ +#include <linux/printk.h> +#include <linux/dynamic_debug.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/rtnetlink.h> diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h index 812190e729c2..6a95b42a8d8c 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h @@ -182,6 +182,8 @@ struct ionic_lif { #define lif_to_txqcq(lif, i) ((lif)->txqcqs[i].qcq) #define lif_to_rxqcq(lif, i) ((lif)->rxqcqs[i].qcq) +#define lif_to_txstats(lif, i) ((lif)->txqcqs[i].stats->tx) +#define lif_to_rxstats(lif, i) ((lif)->rxqcqs[i].stats->rx) #define lif_to_txq(lif, i) (&lif_to_txqcq((lif), i)->q) #define lif_to_rxq(lif, i) (&lif_to_txqcq((lif), i)->q) diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c index 15e432386b35..aab311413412 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_main.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c @@ -1,6 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ +#include <linux/printk.h> +#include <linux/dynamic_debug.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/utsname.h> diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.c b/drivers/net/ethernet/pensando/ionic/ionic_stats.c index e2907884f843..03916b6d47f2 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_stats.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.c @@ -117,7 +117,8 @@ static u64 ionic_sw_stats_get_count(struct ionic_lif *lif) /* rx stats */ total += MAX_Q(lif) * IONIC_NUM_RX_STATS; - if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) { + if (test_bit(IONIC_LIF_UP, lif->state) && + test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) { /* tx debug stats */ total += MAX_Q(lif) * (IONIC_NUM_DBG_CQ_STATS + IONIC_NUM_TX_Q_STATS + @@ -149,7 +150,8 @@ static void ionic_sw_stats_get_strings(struct ionic_lif *lif, u8 **buf) *buf += ETH_GSTRING_LEN; } - if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) { + if (test_bit(IONIC_LIF_UP, lif->state) && + test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) { for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) { snprintf(*buf, ETH_GSTRING_LEN, "txq_%d_%s", @@ -187,7 +189,8 @@ static void ionic_sw_stats_get_strings(struct ionic_lif *lif, u8 **buf) *buf += ETH_GSTRING_LEN; } - if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) { + if (test_bit(IONIC_LIF_UP, lif->state) && + test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) { for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) { snprintf(*buf, ETH_GSTRING_LEN, "rxq_%d_cq_%s", @@ -223,6 +226,8 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf) { struct ionic_lif_sw_stats lif_stats; struct ionic_qcq *txqcq, *rxqcq; + struct ionic_tx_stats *txstats; + struct ionic_rx_stats *rxstats; int i, q_num; ionic_get_lif_stats(lif, &lif_stats); @@ -233,15 +238,17 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf) } for (q_num = 0; q_num < MAX_Q(lif); q_num++) { - txqcq = lif_to_txqcq(lif, q_num); + txstats = &lif_to_txstats(lif, q_num); for (i = 0; i < IONIC_NUM_TX_STATS; i++) { - **buf = IONIC_READ_STAT64(&txqcq->stats->tx, + **buf = IONIC_READ_STAT64(txstats, &ionic_tx_stats_desc[i]); (*buf)++; } - if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) { + if (test_bit(IONIC_LIF_UP, lif->state) && + test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) { + txqcq = lif_to_txqcq(lif, q_num); for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) { **buf = IONIC_READ_STAT64(&txqcq->q, &ionic_txq_stats_desc[i]); @@ -258,22 +265,24 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf) (*buf)++; } for (i = 0; i < IONIC_MAX_NUM_SG_CNTR; i++) { - **buf = txqcq->stats->tx.sg_cntr[i]; + **buf = txstats->sg_cntr[i]; (*buf)++; } } } for (q_num = 0; q_num < MAX_Q(lif); q_num++) { - rxqcq = lif_to_rxqcq(lif, q_num); + rxstats = &lif_to_rxstats(lif, q_num); for (i = 0; i < IONIC_NUM_RX_STATS; i++) { - **buf = IONIC_READ_STAT64(&rxqcq->stats->rx, + **buf = IONIC_READ_STAT64(rxstats, &ionic_rx_stats_desc[i]); (*buf)++; } - if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) { + if (test_bit(IONIC_LIF_UP, lif->state) && + test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) { + rxqcq = lif_to_rxqcq(lif, q_num); for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) { **buf = IONIC_READ_STAT64(&rxqcq->cq, &ionic_dbg_cq_stats_desc[i]); diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 2ce70097d018..38f7f40b3a4d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -67,10 +67,9 @@ #define QED_ROCE_QPS (8192) #define QED_ROCE_DPIS (8) #define QED_RDMA_SRQS QED_ROCE_QPS -#define QED_NVM_CFG_SET_FLAGS 0xE -#define QED_NVM_CFG_SET_PF_FLAGS 0x1E #define QED_NVM_CFG_GET_FLAGS 0xA #define QED_NVM_CFG_GET_PF_FLAGS 0x1A +#define QED_NVM_CFG_MAX_ATTRS 50 static char version[] = "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n"; @@ -2255,6 +2254,7 @@ static int qed_nvm_flash_cfg_write(struct qed_dev *cdev, const u8 **data) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); u8 entity_id, len, buf[32]; + bool need_nvm_init = true; struct qed_ptt *ptt; u16 cfg_id, count; int rc = 0, i; @@ -2271,8 +2271,10 @@ static int qed_nvm_flash_cfg_write(struct qed_dev *cdev, const u8 **data) DP_VERBOSE(cdev, NETIF_MSG_DRV, "Read config ids: num_attrs = %0d\n", count); - /* NVM CFG ID attributes */ - for (i = 0; i < count; i++) { + /* NVM CFG ID attributes. Start loop index from 1 to avoid additional + * arithmetic operations in the implementation. + */ + for (i = 1; i <= count; i++) { cfg_id = *((u16 *)*data); *data += 2; entity_id = **data; @@ -2282,8 +2284,21 @@ static int qed_nvm_flash_cfg_write(struct qed_dev *cdev, const u8 **data) memcpy(buf, *data, len); *data += len; - flags = entity_id ? QED_NVM_CFG_SET_PF_FLAGS : - QED_NVM_CFG_SET_FLAGS; + flags = 0; + if (need_nvm_init) { + flags |= QED_NVM_CFG_OPTION_INIT; + need_nvm_init = false; + } + + /* Commit to flash and free the resources */ + if (!(i % QED_NVM_CFG_MAX_ATTRS) || i == count) { + flags |= QED_NVM_CFG_OPTION_COMMIT | + QED_NVM_CFG_OPTION_FREE; + need_nvm_init = true; + } + + if (entity_id) + flags |= QED_NVM_CFG_OPTION_ENTITY_SEL; DP_VERBOSE(cdev, NETIF_MSG_DRV, "cfg_id = %d entity = %d len = %d\n", cfg_id, diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 78f77b712b10..dcb5c917f373 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -2005,7 +2005,7 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn, (qed_iov_validate_active_txq(p_hwfn, vf))) { vf->b_malicious = true; DP_NOTICE(p_hwfn, - "VF [%02x] - considered malicious; Unable to stop RX/TX queuess\n", + "VF [%02x] - considered malicious; Unable to stop RX/TX queues\n", vf->abs_vf_id); status = PFVF_STATUS_MALICIOUS; goto out; diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 8d1c208f778f..a220cc7c947a 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -1208,8 +1208,16 @@ enum qede_remove_mode { static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) { struct net_device *ndev = pci_get_drvdata(pdev); - struct qede_dev *edev = netdev_priv(ndev); - struct qed_dev *cdev = edev->cdev; + struct qede_dev *edev; + struct qed_dev *cdev; + + if (!ndev) { + dev_info(&pdev->dev, "Device has already been removed\n"); + return; + } + + edev = netdev_priv(ndev); + cdev = edev->cdev; DP_INFO(edev, "Starting qede_remove\n"); diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c index 9c54b715228e..06de59521fc4 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c @@ -57,10 +57,10 @@ static int rmnet_unregister_real_device(struct net_device *real_dev, if (port->nr_rmnet_devs) return -EINVAL; - kfree(port); - netdev_rx_handler_unregister(real_dev); + kfree(port); + /* release reference on real_dev */ dev_put(real_dev); diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 74f81fe03810..c4e961ea44d5 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -916,6 +916,9 @@ static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value) static int r8168g_mdio_read(struct rtl8169_private *tp, int reg) { + if (reg == 0x1f) + return tp->ocp_base == OCP_STD_PHY_BASE ? 0 : tp->ocp_base >> 4; + if (tp->ocp_base != OCP_STD_PHY_BASE) reg -= 0x10; @@ -1029,6 +1032,10 @@ static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg) { int value; + /* Work around issue with chip reporting wrong PHY ID */ + if (reg == MII_PHYSID2) + return 0xc912; + r8168dp_2_mdio_start(tp); value = r8169_mdio_read(tp, reg); @@ -4146,6 +4153,14 @@ static void rtl_hw_jumbo_disable(struct rtl8169_private *tp) rtl_lock_config_regs(tp); } +static void rtl_jumbo_config(struct rtl8169_private *tp, int mtu) +{ + if (mtu > ETH_DATA_LEN) + rtl_hw_jumbo_enable(tp); + else + rtl_hw_jumbo_disable(tp); +} + DECLARE_RTL_COND(rtl_chipcmd_cond) { return RTL_R8(tp, ChipCmd) & CmdReset; @@ -4442,11 +4457,6 @@ static void rtl8168g_set_pause_thresholds(struct rtl8169_private *tp, static void rtl_hw_start_8168bb(struct rtl8169_private *tp) { RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); - - if (tp->dev->mtu <= ETH_DATA_LEN) { - rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B | - PCI_EXP_DEVCTL_NOSNOOP_EN); - } } static void rtl_hw_start_8168bef(struct rtl8169_private *tp) @@ -4462,9 +4472,6 @@ static void __rtl_hw_start_8168cp(struct rtl8169_private *tp) RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); - if (tp->dev->mtu <= ETH_DATA_LEN) - rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); - rtl_disable_clock_request(tp); } @@ -4490,9 +4497,6 @@ static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp) rtl_set_def_aspm_entry_latency(tp); RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); - - if (tp->dev->mtu <= ETH_DATA_LEN) - rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); } static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp) @@ -4503,9 +4507,6 @@ static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp) /* Magic. */ RTL_W8(tp, DBG_REG, 0x20); - - if (tp->dev->mtu <= ETH_DATA_LEN) - rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); } static void rtl_hw_start_8168c_1(struct rtl8169_private *tp) @@ -4611,9 +4612,6 @@ static void rtl_hw_start_8168e_1(struct rtl8169_private *tp) rtl_ephy_init(tp, e_info_8168e_1); - if (tp->dev->mtu <= ETH_DATA_LEN) - rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); - rtl_disable_clock_request(tp); /* Reset tx FIFO pointer */ @@ -4636,9 +4634,6 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp) rtl_ephy_init(tp, e_info_8168e_2); - if (tp->dev->mtu <= ETH_DATA_LEN) - rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); - rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06); @@ -5485,6 +5480,8 @@ static void rtl_hw_start(struct rtl8169_private *tp) rtl_set_rx_tx_desc_registers(tp); rtl_lock_config_regs(tp); + rtl_jumbo_config(tp, tp->dev->mtu); + /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ RTL_R16(tp, CPlusCmd); RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); @@ -5498,10 +5495,7 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu) { struct rtl8169_private *tp = netdev_priv(dev); - if (new_mtu > ETH_DATA_LEN) - rtl_hw_jumbo_enable(tp); - else - rtl_hw_jumbo_disable(tp); + rtl_jumbo_config(tp, new_mtu); dev->mtu = new_mtu; netdev_update_features(dev); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index f97a4096f8fc..ddcc191febdb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -651,7 +651,8 @@ static void sun8i_dwmac_set_filter(struct mac_device_info *hw, } } } else { - netdev_info(dev, "Too many address, switching to promiscuous\n"); + if (!(readl(ioaddr + EMAC_RX_FRM_FLT) & EMAC_FRM_FLT_RXALL)) + netdev_info(dev, "Too many address, switching to promiscuous\n"); v = EMAC_FRM_FLT_RXALL; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index 2cb9c53f93b8..66e60c7e9850 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -432,7 +432,7 @@ static void dwmac4_set_filter(struct mac_device_info *hw, * bits used depends on the hardware configuration * selected at core configuration time. */ - int bit_nr = bitrev32(~crc32_le(~0, ha->addr, + u32 bit_nr = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN)) >> (32 - mcbitslog2); /* The most significant bit determines the register to * use (H/L) while the other 5 bits determine the bit @@ -448,7 +448,7 @@ static void dwmac4_set_filter(struct mac_device_info *hw, value |= GMAC_PACKET_FILTER_HPF; /* Handle multiple unicast addresses */ - if (netdev_uc_count(dev) > GMAC_MAX_PERFECT_ADDRESSES) { + if (netdev_uc_count(dev) > hw->unicast_filter_entries) { /* Switch to promiscuous mode if more than 128 addrs * are required */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c index 3f4f3132e16b..e436fa160c7d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c @@ -515,6 +515,7 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index, if (!enable) { val |= PPSCMDx(index, 0x5); + val |= PPSEN0; writel(val, ioaddr + MAC_PPS_CONTROL); return 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c index 5031398e612c..070bd7d1ae4c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c @@ -224,6 +224,7 @@ static void dwxgmac2_config_cbs(struct mac_device_info *hw, writel(low_credit, ioaddr + XGMAC_MTL_TCx_LOCREDIT(queue)); value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue)); + value &= ~XGMAC_TSA; value |= XGMAC_CC | XGMAC_CBS; writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue)); } @@ -463,7 +464,7 @@ static void dwxgmac2_set_filter(struct mac_device_info *hw, value |= XGMAC_FILTER_HMC; netdev_for_each_mc_addr(ha, dev) { - int nr = (bitrev32(~crc32_le(~0, ha->addr, 6)) >> + u32 nr = (bitrev32(~crc32_le(~0, ha->addr, 6)) >> (32 - mcbitslog2)); mc_filter[nr >> 5] |= (1 << (nr & 0x1F)); } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c index ae48154f933c..bd5838ce1e8a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c @@ -288,7 +288,8 @@ static int dwxgmac2_get_rx_hash(struct dma_desc *p, u32 *hash, static int dwxgmac2_get_rx_header_len(struct dma_desc *p, unsigned int *len) { - *len = le32_to_cpu(p->des2) & XGMAC_RDES2_HL; + if (le32_to_cpu(p->des3) & XGMAC_RDES3_L34T) + *len = le32_to_cpu(p->des2) & XGMAC_RDES2_HL; return 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c index 965cbe3e6f51..f70ca5300b82 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c @@ -369,7 +369,7 @@ static void dwxgmac2_get_hw_feature(void __iomem *ioaddr, dma_cap->eee = (hw_cap & XGMAC_HWFEAT_EEESEL) >> 13; dma_cap->atime_stamp = (hw_cap & XGMAC_HWFEAT_TSSEL) >> 12; dma_cap->av = (hw_cap & XGMAC_HWFEAT_AVSEL) >> 11; - dma_cap->av &= !(hw_cap & XGMAC_HWFEAT_RAVSEL) >> 10; + dma_cap->av &= !((hw_cap & XGMAC_HWFEAT_RAVSEL) >> 10); dma_cap->arpoffsel = (hw_cap & XGMAC_HWFEAT_ARPOFFSEL) >> 9; dma_cap->rmon = (hw_cap & XGMAC_HWFEAT_MMCSEL) >> 8; dma_cap->pmt_magic_frame = (hw_cap & XGMAC_HWFEAT_MGKSEL) >> 7; @@ -470,6 +470,7 @@ static void dwxgmac2_enable_tso(void __iomem *ioaddr, bool en, u32 chan) static void dwxgmac2_qmode(void __iomem *ioaddr, u32 channel, u8 qmode) { u32 value = readl(ioaddr + XGMAC_MTL_TXQ_OPMODE(channel)); + u32 flow = readl(ioaddr + XGMAC_RX_FLOW_CTRL); value &= ~XGMAC_TXQEN; if (qmode != MTL_QUEUE_AVB) { @@ -477,6 +478,7 @@ static void dwxgmac2_qmode(void __iomem *ioaddr, u32 channel, u8 qmode) writel(0, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(channel)); } else { value |= 0x1 << XGMAC_TXQEN_SHIFT; + writel(flow & (~XGMAC_RFE), ioaddr + XGMAC_RX_FLOW_CTRL); } writel(value, ioaddr + XGMAC_MTL_TXQ_OPMODE(channel)); diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c index a223584f5f9a..252cf48c5816 100644 --- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c @@ -176,6 +176,7 @@ #define MMC_XGMAC_RX_PKT_SMD_ERR 0x22c #define MMC_XGMAC_RX_PKT_ASSEMBLY_OK 0x230 #define MMC_XGMAC_RX_FPE_FRAG 0x234 +#define MMC_XGMAC_RX_IPC_INTR_MASK 0x25c static void dwmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode) { @@ -333,8 +334,9 @@ static void dwxgmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode) static void dwxgmac_mmc_intr_all_mask(void __iomem *mmcaddr) { - writel(MMC_DEFAULT_MASK, mmcaddr + MMC_RX_INTR_MASK); - writel(MMC_DEFAULT_MASK, mmcaddr + MMC_TX_INTR_MASK); + writel(0x0, mmcaddr + MMC_RX_INTR_MASK); + writel(0x0, mmcaddr + MMC_TX_INTR_MASK); + writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_RX_IPC_INTR_MASK); } static void dwxgmac_read_mmc_reg(void __iomem *addr, u32 reg, u32 *dest) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index c76a1336a451..f826365c979d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2610,7 +2610,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) } if (priv->hw->pcs) - stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0); + stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0); /* set TX and RX rings length */ stmmac_set_rings_length(priv); @@ -2995,6 +2995,8 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) } else { stmmac_set_desc_addr(priv, first, des); tmp_pay_len = pay_len; + des += proto_hdr_len; + pay_len = 0; } stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue); @@ -3022,6 +3024,19 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) /* Only the last descriptor gets to point to the skb. */ tx_q->tx_skbuff[tx_q->cur_tx] = skb; + /* Manage tx mitigation */ + tx_q->tx_count_frames += nfrags + 1; + if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) && + !((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + priv->hwts_tx_en)) { + stmmac_tx_timer_arm(priv, queue); + } else { + desc = &tx_q->dma_tx[tx_q->cur_tx]; + tx_q->tx_count_frames = 0; + stmmac_set_tx_ic(priv, desc); + priv->xstats.tx_set_ic_bit++; + } + /* We've used all descriptors we need for this skb, however, * advance cur_tx so that it references a fresh descriptor. * ndo_start_xmit will fill this descriptor the next time it's @@ -3039,19 +3054,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) priv->xstats.tx_tso_frames++; priv->xstats.tx_tso_nfrags += nfrags; - /* Manage tx mitigation */ - tx_q->tx_count_frames += nfrags + 1; - if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) && - !(priv->synopsys_id >= DWMAC_CORE_4_00 && - (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && - priv->hwts_tx_en)) { - stmmac_tx_timer_arm(priv, queue); - } else { - tx_q->tx_count_frames = 0; - stmmac_set_tx_ic(priv, desc); - priv->xstats.tx_set_ic_bit++; - } - if (priv->sarc_type) stmmac_set_desc_sarc(priv, first, priv->sarc_type); @@ -3223,6 +3225,27 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) /* Only the last descriptor gets to point to the skb. */ tx_q->tx_skbuff[entry] = skb; + /* According to the coalesce parameter the IC bit for the latest + * segment is reset and the timer re-started to clean the tx status. + * This approach takes care about the fragments: desc is the first + * element in case of no SG. + */ + tx_q->tx_count_frames += nfrags + 1; + if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) && + !((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + priv->hwts_tx_en)) { + stmmac_tx_timer_arm(priv, queue); + } else { + if (likely(priv->extend_desc)) + desc = &tx_q->dma_etx[entry].basic; + else + desc = &tx_q->dma_tx[entry]; + + tx_q->tx_count_frames = 0; + stmmac_set_tx_ic(priv, desc); + priv->xstats.tx_set_ic_bit++; + } + /* We've used all descriptors we need for this skb, however, * advance cur_tx so that it references a fresh descriptor. * ndo_start_xmit will fill this descriptor the next time it's @@ -3258,23 +3281,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) dev->stats.tx_bytes += skb->len; - /* According to the coalesce parameter the IC bit for the latest - * segment is reset and the timer re-started to clean the tx status. - * This approach takes care about the fragments: desc is the first - * element in case of no SG. - */ - tx_q->tx_count_frames += nfrags + 1; - if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) && - !(priv->synopsys_id >= DWMAC_CORE_4_00 && - (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && - priv->hwts_tx_en)) { - stmmac_tx_timer_arm(priv, queue); - } else { - tx_q->tx_count_frames = 0; - stmmac_set_tx_ic(priv, desc); - priv->xstats.tx_set_ic_bit++; - } - if (priv->sarc_type) stmmac_set_desc_sarc(priv, first, priv->sarc_type); @@ -3505,8 +3511,6 @@ read_again: if (unlikely(status & dma_own)) break; - count++; - rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE); next_entry = rx_q->cur_rx; @@ -3533,6 +3537,7 @@ read_again: goto read_again; if (unlikely(error)) { dev_kfree_skb(skb); + count++; continue; } @@ -3572,6 +3577,7 @@ read_again: skb = napi_alloc_skb(&ch->rx_napi, len); if (!skb) { priv->dev->stats.rx_dropped++; + count++; continue; } @@ -3637,6 +3643,7 @@ read_again: priv->dev->stats.rx_packets++; priv->dev->stats.rx_bytes += len; + count++; } if (status & rx_not_ls) { @@ -4742,8 +4749,10 @@ int stmmac_suspend(struct device *dev) stmmac_mac_set(priv, priv->ioaddr, false); pinctrl_pm_select_sleep_state(priv->device); /* Disable clock in case of PWM is off */ - clk_disable(priv->plat->pclk); - clk_disable(priv->plat->stmmac_clk); + if (priv->plat->clk_ptp_ref) + clk_disable_unprepare(priv->plat->clk_ptp_ref); + clk_disable_unprepare(priv->plat->pclk); + clk_disable_unprepare(priv->plat->stmmac_clk); } mutex_unlock(&priv->lock); @@ -4806,8 +4815,10 @@ int stmmac_resume(struct device *dev) } else { pinctrl_pm_select_default_state(priv->device); /* enable the clk previously disabled */ - clk_enable(priv->plat->stmmac_clk); - clk_enable(priv->plat->pclk); + clk_prepare_enable(priv->plat->stmmac_clk); + clk_prepare_enable(priv->plat->pclk); + if (priv->plat->clk_ptp_ref) + clk_prepare_enable(priv->plat->clk_ptp_ref); /* reset the phy so that it's ready */ if (priv->mii) stmmac_mdio_reset(priv->mii); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index 173493db038c..df638b18b72c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c @@ -164,7 +164,7 @@ static int stmmac_enable(struct ptp_clock_info *ptp, /* structure describing a PTP hardware clock */ static struct ptp_clock_info stmmac_ptp_clock_ops = { .owner = THIS_MODULE, - .name = "stmmac_ptp_clock", + .name = "stmmac ptp", .max_adj = 62500000, .n_alarm = 0, .n_ext_ts = 0, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c index cc76a42c7466..ac3f658105c0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c @@ -6,7 +6,9 @@ * Author: Jose Abreu <joabreu@synopsys.com> */ +#include <linux/bitrev.h> #include <linux/completion.h> +#include <linux/crc32.h> #include <linux/ethtool.h> #include <linux/ip.h> #include <linux/phy.h> @@ -485,17 +487,66 @@ static int stmmac_filter_check(struct stmmac_priv *priv) return -EOPNOTSUPP; } +static bool stmmac_hash_check(struct stmmac_priv *priv, unsigned char *addr) +{ + int mc_offset = 32 - priv->hw->mcast_bits_log2; + struct netdev_hw_addr *ha; + u32 hash, hash_nr; + + /* First compute the hash for desired addr */ + hash = bitrev32(~crc32_le(~0, addr, 6)) >> mc_offset; + hash_nr = hash >> 5; + hash = 1 << (hash & 0x1f); + + /* Now, check if it collides with any existing one */ + netdev_for_each_mc_addr(ha, priv->dev) { + u32 nr = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN)) >> mc_offset; + if (((nr >> 5) == hash_nr) && ((1 << (nr & 0x1f)) == hash)) + return false; + } + + /* No collisions, address is good to go */ + return true; +} + +static bool stmmac_perfect_check(struct stmmac_priv *priv, unsigned char *addr) +{ + struct netdev_hw_addr *ha; + + /* Check if it collides with any existing one */ + netdev_for_each_uc_addr(ha, priv->dev) { + if (!memcmp(ha->addr, addr, ETH_ALEN)) + return false; + } + + /* No collisions, address is good to go */ + return true; +} + static int stmmac_test_hfilt(struct stmmac_priv *priv) { - unsigned char gd_addr[ETH_ALEN] = {0x01, 0x00, 0xcc, 0xcc, 0xdd, 0xdd}; - unsigned char bd_addr[ETH_ALEN] = {0x09, 0x00, 0xaa, 0xaa, 0xbb, 0xbb}; + unsigned char gd_addr[ETH_ALEN] = {0xf1, 0xee, 0xdd, 0xcc, 0xbb, 0xaa}; + unsigned char bd_addr[ETH_ALEN] = {0xf1, 0xff, 0xff, 0xff, 0xff, 0xff}; struct stmmac_packet_attrs attr = { }; - int ret; + int ret, tries = 256; ret = stmmac_filter_check(priv); if (ret) return ret; + if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins) + return -EOPNOTSUPP; + + while (--tries) { + /* We only need to check the bd_addr for collisions */ + bd_addr[ETH_ALEN - 1] = tries; + if (stmmac_hash_check(priv, bd_addr)) + break; + } + + if (!tries) + return -EOPNOTSUPP; + ret = dev_mc_add(priv->dev, gd_addr); if (ret) return ret; @@ -520,13 +571,25 @@ cleanup: static int stmmac_test_pfilt(struct stmmac_priv *priv) { - unsigned char gd_addr[ETH_ALEN] = {0x00, 0x01, 0x44, 0x55, 0x66, 0x77}; - unsigned char bd_addr[ETH_ALEN] = {0x08, 0x00, 0x22, 0x33, 0x44, 0x55}; + unsigned char gd_addr[ETH_ALEN] = {0xf0, 0x01, 0x44, 0x55, 0x66, 0x77}; + unsigned char bd_addr[ETH_ALEN] = {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff}; struct stmmac_packet_attrs attr = { }; - int ret; + int ret, tries = 256; if (stmmac_filter_check(priv)) return -EOPNOTSUPP; + if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries) + return -EOPNOTSUPP; + + while (--tries) { + /* We only need to check the bd_addr for collisions */ + bd_addr[ETH_ALEN - 1] = tries; + if (stmmac_perfect_check(priv, bd_addr)) + break; + } + + if (!tries) + return -EOPNOTSUPP; ret = dev_uc_add(priv->dev, gd_addr); if (ret) @@ -550,37 +613,31 @@ cleanup: return ret; } -static int stmmac_dummy_sync(struct net_device *netdev, const u8 *addr) -{ - return 0; -} - -static void stmmac_test_set_rx_mode(struct net_device *netdev) -{ - /* As we are in test mode of ethtool we already own the rtnl lock - * so no address will change from user. We can just call the - * ndo_set_rx_mode() callback directly */ - if (netdev->netdev_ops->ndo_set_rx_mode) - netdev->netdev_ops->ndo_set_rx_mode(netdev); -} - static int stmmac_test_mcfilt(struct stmmac_priv *priv) { - unsigned char uc_addr[ETH_ALEN] = {0x00, 0x01, 0x44, 0x55, 0x66, 0x77}; - unsigned char mc_addr[ETH_ALEN] = {0x01, 0x01, 0x44, 0x55, 0x66, 0x77}; + unsigned char uc_addr[ETH_ALEN] = {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff}; + unsigned char mc_addr[ETH_ALEN] = {0xf1, 0xff, 0xff, 0xff, 0xff, 0xff}; struct stmmac_packet_attrs attr = { }; - int ret; + int ret, tries = 256; if (stmmac_filter_check(priv)) return -EOPNOTSUPP; + if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries) + return -EOPNOTSUPP; + + while (--tries) { + /* We only need to check the mc_addr for collisions */ + mc_addr[ETH_ALEN - 1] = tries; + if (stmmac_hash_check(priv, mc_addr)) + break; + } - /* Remove all MC addresses */ - __dev_mc_unsync(priv->dev, NULL); - stmmac_test_set_rx_mode(priv->dev); + if (!tries) + return -EOPNOTSUPP; ret = dev_uc_add(priv->dev, uc_addr); if (ret) - goto cleanup; + return ret; attr.dst = uc_addr; @@ -597,28 +654,34 @@ static int stmmac_test_mcfilt(struct stmmac_priv *priv) cleanup: dev_uc_del(priv->dev, uc_addr); - __dev_mc_sync(priv->dev, stmmac_dummy_sync, NULL); - stmmac_test_set_rx_mode(priv->dev); return ret; } static int stmmac_test_ucfilt(struct stmmac_priv *priv) { - unsigned char uc_addr[ETH_ALEN] = {0x00, 0x01, 0x44, 0x55, 0x66, 0x77}; - unsigned char mc_addr[ETH_ALEN] = {0x01, 0x01, 0x44, 0x55, 0x66, 0x77}; + unsigned char uc_addr[ETH_ALEN] = {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff}; + unsigned char mc_addr[ETH_ALEN] = {0xf1, 0xff, 0xff, 0xff, 0xff, 0xff}; struct stmmac_packet_attrs attr = { }; - int ret; + int ret, tries = 256; if (stmmac_filter_check(priv)) return -EOPNOTSUPP; + if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins) + return -EOPNOTSUPP; - /* Remove all UC addresses */ - __dev_uc_unsync(priv->dev, NULL); - stmmac_test_set_rx_mode(priv->dev); + while (--tries) { + /* We only need to check the uc_addr for collisions */ + uc_addr[ETH_ALEN - 1] = tries; + if (stmmac_perfect_check(priv, uc_addr)) + break; + } + + if (!tries) + return -EOPNOTSUPP; ret = dev_mc_add(priv->dev, mc_addr); if (ret) - goto cleanup; + return ret; attr.dst = mc_addr; @@ -635,8 +698,6 @@ static int stmmac_test_ucfilt(struct stmmac_priv *priv) cleanup: dev_mc_del(priv->dev, mc_addr); - __dev_uc_sync(priv->dev, stmmac_dummy_sync, NULL); - stmmac_test_set_rx_mode(priv->dev); return ret; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index e231098061b6..f9a9a9d82233 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -510,7 +510,7 @@ static struct stmmac_flow_entry *tc_find_flow(struct stmmac_priv *priv, return NULL; } -struct { +static struct { int (*fn)(struct stmmac_priv *priv, struct flow_cls_offload *cls, struct stmmac_flow_entry *entry); } tc_flow_parsers[] = { diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index a65edd2770e6..37ba708ac781 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -722,7 +722,7 @@ static void cpdma_chan_set_descs(struct cpdma_ctlr *ctlr, * cpdma_chan_split_pool - Splits ctrl pool between all channels. * Has to be called under ctlr lock */ -int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr) +static int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr) { int tx_per_ch_desc = 0, rx_per_ch_desc = 0; int free_rx_num = 0, free_tx_num = 0; diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c index bbbc1dcb6ab5..b517c1af9de0 100644 --- a/drivers/net/fjes/fjes_main.c +++ b/drivers/net/fjes/fjes_main.c @@ -1237,8 +1237,17 @@ static int fjes_probe(struct platform_device *plat_dev) adapter->open_guard = false; adapter->txrx_wq = alloc_workqueue(DRV_NAME "/txrx", WQ_MEM_RECLAIM, 0); + if (unlikely(!adapter->txrx_wq)) { + err = -ENOMEM; + goto err_free_netdev; + } + adapter->control_wq = alloc_workqueue(DRV_NAME "/control", WQ_MEM_RECLAIM, 0); + if (unlikely(!adapter->control_wq)) { + err = -ENOMEM; + goto err_free_txrx_wq; + } INIT_WORK(&adapter->tx_stall_task, fjes_tx_stall_task); INIT_WORK(&adapter->raise_intr_rxdata_task, @@ -1255,7 +1264,7 @@ static int fjes_probe(struct platform_device *plat_dev) hw->hw_res.irq = platform_get_irq(plat_dev, 0); err = fjes_hw_init(&adapter->hw); if (err) - goto err_free_netdev; + goto err_free_control_wq; /* setup MAC address (02:00:00:00:00:[epid])*/ netdev->dev_addr[0] = 2; @@ -1277,6 +1286,10 @@ static int fjes_probe(struct platform_device *plat_dev) err_hw_exit: fjes_hw_exit(&adapter->hw); +err_free_control_wq: + destroy_workqueue(adapter->control_wq); +err_free_txrx_wq: + destroy_workqueue(adapter->txrx_wq); err_free_netdev: free_netdev(netdev); err_out: diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c index fbec711ff514..fbea6f232819 100644 --- a/drivers/net/hamradio/bpqether.c +++ b/drivers/net/hamradio/bpqether.c @@ -107,27 +107,6 @@ struct bpqdev { static LIST_HEAD(bpq_devices); -/* - * bpqether network devices are paired with ethernet devices below them, so - * form a special "super class" of normal ethernet devices; split their locks - * off into a separate class since they always nest. - */ -static struct lock_class_key bpq_netdev_xmit_lock_key; -static struct lock_class_key bpq_netdev_addr_lock_key; - -static void bpq_set_lockdep_class_one(struct net_device *dev, - struct netdev_queue *txq, - void *_unused) -{ - lockdep_set_class(&txq->_xmit_lock, &bpq_netdev_xmit_lock_key); -} - -static void bpq_set_lockdep_class(struct net_device *dev) -{ - lockdep_set_class(&dev->addr_list_lock, &bpq_netdev_addr_lock_key); - netdev_for_each_tx_queue(dev, bpq_set_lockdep_class_one, NULL); -} - /* ------------------------------------------------------------------------ */ @@ -498,7 +477,6 @@ static int bpq_new_device(struct net_device *edev) err = register_netdevice(ndev); if (err) goto error; - bpq_set_lockdep_class(ndev); /* List protected by RTNL */ list_add_rcu(&bpq->bpq_list, &bpq_devices); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 39dddcd8b3cb..963509add611 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -982,7 +982,7 @@ static int netvsc_attach(struct net_device *ndev, if (netif_running(ndev)) { ret = rndis_filter_open(nvdev); if (ret) - return ret; + goto err; rdev = nvdev->extension; if (!rdev->link_state) @@ -990,6 +990,13 @@ static int netvsc_attach(struct net_device *ndev, } return 0; + +err: + netif_device_detach(ndev); + + rndis_filter_device_remove(hdev, nvdev); + + return ret; } static int netvsc_set_channels(struct net_device *net, @@ -1807,8 +1814,10 @@ static int netvsc_set_features(struct net_device *ndev, ret = rndis_filter_set_offload_params(ndev, nvdev, &offloads); - if (ret) + if (ret) { features ^= NETIF_F_LRO; + ndev->features = features; + } syncvf: if (!vf_netdev) @@ -2335,8 +2344,6 @@ static int netvsc_probe(struct hv_device *dev, NETIF_F_HW_VLAN_CTAG_RX; net->vlan_features = net->features; - netdev_lockdep_set_classes(net); - /* MTU range: 68 - 1500 or 65521 */ net->min_mtu = NETVSC_MTU_MIN; if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2) diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 887bbba4631e..ba3dfac1d904 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -131,8 +131,6 @@ static int ipvlan_init(struct net_device *dev) dev->gso_max_segs = phy_dev->gso_max_segs; dev->hard_header_len = phy_dev->hard_header_len; - netdev_lockdep_set_classes(dev); - ipvlan->pcpu_stats = netdev_alloc_pcpu_stats(struct ipvl_pcpu_stats); if (!ipvlan->pcpu_stats) return -ENOMEM; diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index cb7637364b40..afd8b2a08245 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -267,7 +267,6 @@ struct macsec_dev { struct pcpu_secy_stats __percpu *stats; struct list_head secys; struct gro_cells gro_cells; - unsigned int nest_level; }; /** @@ -2750,7 +2749,6 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, #define MACSEC_FEATURES \ (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST) -static struct lock_class_key macsec_netdev_addr_lock_key; static int macsec_dev_init(struct net_device *dev) { @@ -2958,11 +2956,6 @@ static int macsec_get_iflink(const struct net_device *dev) return macsec_priv(dev)->real_dev->ifindex; } -static int macsec_get_nest_level(struct net_device *dev) -{ - return macsec_priv(dev)->nest_level; -} - static const struct net_device_ops macsec_netdev_ops = { .ndo_init = macsec_dev_init, .ndo_uninit = macsec_dev_uninit, @@ -2976,7 +2969,6 @@ static const struct net_device_ops macsec_netdev_ops = { .ndo_start_xmit = macsec_start_xmit, .ndo_get_stats64 = macsec_get_stats64, .ndo_get_iflink = macsec_get_iflink, - .ndo_get_lock_subclass = macsec_get_nest_level, }; static const struct device_type macsec_type = { @@ -3001,12 +2993,10 @@ static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = { static void macsec_free_netdev(struct net_device *dev) { struct macsec_dev *macsec = macsec_priv(dev); - struct net_device *real_dev = macsec->real_dev; free_percpu(macsec->stats); free_percpu(macsec->secy.tx_sc.stats); - dev_put(real_dev); } static void macsec_setup(struct net_device *dev) @@ -3261,14 +3251,6 @@ static int macsec_newlink(struct net *net, struct net_device *dev, if (err < 0) return err; - dev_hold(real_dev); - - macsec->nest_level = dev_get_nest_level(real_dev) + 1; - netdev_lockdep_set_classes(dev); - lockdep_set_class_and_subclass(&dev->addr_list_lock, - &macsec_netdev_addr_lock_key, - macsec_get_nest_level(dev)); - err = netdev_upper_dev_link(real_dev, dev, extack); if (err < 0) goto unregister; diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 940192c057b6..34fc59bd1e20 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -852,8 +852,6 @@ static int macvlan_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) * "super class" of normal network devices; split their locks off into a * separate class since they always nest. */ -static struct lock_class_key macvlan_netdev_addr_lock_key; - #define ALWAYS_ON_OFFLOADS \ (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE | \ NETIF_F_GSO_ROBUST | NETIF_F_GSO_ENCAP_ALL) @@ -869,19 +867,6 @@ static struct lock_class_key macvlan_netdev_addr_lock_key; #define MACVLAN_STATE_MASK \ ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT)) -static int macvlan_get_nest_level(struct net_device *dev) -{ - return ((struct macvlan_dev *)netdev_priv(dev))->nest_level; -} - -static void macvlan_set_lockdep_class(struct net_device *dev) -{ - netdev_lockdep_set_classes(dev); - lockdep_set_class_and_subclass(&dev->addr_list_lock, - &macvlan_netdev_addr_lock_key, - macvlan_get_nest_level(dev)); -} - static int macvlan_init(struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); @@ -900,8 +885,6 @@ static int macvlan_init(struct net_device *dev) dev->gso_max_segs = lowerdev->gso_max_segs; dev->hard_header_len = lowerdev->hard_header_len; - macvlan_set_lockdep_class(dev); - vlan->pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats); if (!vlan->pcpu_stats) return -ENOMEM; @@ -1161,7 +1144,6 @@ static const struct net_device_ops macvlan_netdev_ops = { .ndo_fdb_add = macvlan_fdb_add, .ndo_fdb_del = macvlan_fdb_del, .ndo_fdb_dump = ndo_dflt_fdb_dump, - .ndo_get_lock_subclass = macvlan_get_nest_level, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = macvlan_dev_poll_controller, .ndo_netpoll_setup = macvlan_dev_netpoll_setup, @@ -1445,7 +1427,6 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, vlan->dev = dev; vlan->port = port; vlan->set_features = MACVLAN_FEATURES; - vlan->nest_level = dev_get_nest_level(lowerdev) + 1; vlan->mode = MACVLAN_MODE_VEPA; if (data && data[IFLA_MACVLAN_MODE]) diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c index 56576d4f34a5..54ca6681ba31 100644 --- a/drivers/net/netdevsim/dev.c +++ b/drivers/net/netdevsim/dev.c @@ -806,9 +806,11 @@ static void nsim_dev_port_del_all(struct nsim_dev *nsim_dev) { struct nsim_dev_port *nsim_dev_port, *tmp; + mutex_lock(&nsim_dev->port_list_lock); list_for_each_entry_safe(nsim_dev_port, tmp, &nsim_dev->port_list, list) __nsim_dev_port_del(nsim_dev_port); + mutex_unlock(&nsim_dev->port_list_lock); } int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev) @@ -822,14 +824,17 @@ int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev) return PTR_ERR(nsim_dev); dev_set_drvdata(&nsim_bus_dev->dev, nsim_dev); + mutex_lock(&nsim_dev->port_list_lock); for (i = 0; i < nsim_bus_dev->port_count; i++) { err = __nsim_dev_port_add(nsim_dev, i); if (err) goto err_port_del_all; } + mutex_unlock(&nsim_dev->port_list_lock); return 0; err_port_del_all: + mutex_unlock(&nsim_dev->port_list_lock); nsim_dev_port_del_all(nsim_dev); nsim_dev_destroy(nsim_dev); return err; diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c index f61d094746c0..1a251f76d09b 100644 --- a/drivers/net/netdevsim/fib.c +++ b/drivers/net/netdevsim/fib.c @@ -241,8 +241,8 @@ static struct pernet_operations nsim_fib_net_ops = { void nsim_fib_exit(void) { - unregister_pernet_subsys(&nsim_fib_net_ops); unregister_fib_notifier(&nsim_fib_nb); + unregister_pernet_subsys(&nsim_fib_net_ops); } int nsim_fib_init(void) @@ -258,6 +258,7 @@ int nsim_fib_init(void) err = register_fib_notifier(&nsim_fib_nb, nsim_fib_dump_inconsistent); if (err < 0) { pr_err("Failed to register fib notifier\n"); + unregister_pernet_subsys(&nsim_fib_net_ops); goto err_out; } diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c index 8fc33867e524..af8eabe7a6d4 100644 --- a/drivers/net/phy/bcm7xxx.c +++ b/drivers/net/phy/bcm7xxx.c @@ -572,6 +572,7 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev) .name = _name, \ /* PHY_BASIC_FEATURES */ \ .flags = PHY_IS_INTERNAL, \ + .soft_reset = genphy_soft_reset, \ .config_init = bcm7xxx_config_init, \ .suspend = bcm7xxx_suspend, \ .resume = bcm7xxx_config_init, \ diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 2fea5541c35a..63dedec0433d 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -341,6 +341,35 @@ static int ksz8041_config_aneg(struct phy_device *phydev) return genphy_config_aneg(phydev); } +static int ksz8051_ksz8795_match_phy_device(struct phy_device *phydev, + const u32 ksz_phy_id) +{ + int ret; + + if ((phydev->phy_id & MICREL_PHY_ID_MASK) != ksz_phy_id) + return 0; + + ret = phy_read(phydev, MII_BMSR); + if (ret < 0) + return ret; + + /* KSZ8051 PHY and KSZ8794/KSZ8795/KSZ8765 switch share the same + * exact PHY ID. However, they can be told apart by the extended + * capability registers presence. The KSZ8051 PHY has them while + * the switch does not. + */ + ret &= BMSR_ERCAP; + if (ksz_phy_id == PHY_ID_KSZ8051) + return ret; + else + return !ret; +} + +static int ksz8051_match_phy_device(struct phy_device *phydev) +{ + return ksz8051_ksz8795_match_phy_device(phydev, PHY_ID_KSZ8051); +} + static int ksz8081_config_init(struct phy_device *phydev) { /* KSZPHY_OMSO_FACTORY_TEST is set at de-assertion of the reset line @@ -364,6 +393,11 @@ static int ksz8061_config_init(struct phy_device *phydev) return kszphy_config_init(phydev); } +static int ksz8795_match_phy_device(struct phy_device *phydev) +{ + return ksz8051_ksz8795_match_phy_device(phydev, PHY_ID_KSZ87XX); +} + static int ksz9021_load_values_from_of(struct phy_device *phydev, const struct device_node *of_node, u16 reg, @@ -1017,8 +1051,6 @@ static struct phy_driver ksphy_driver[] = { .suspend = genphy_suspend, .resume = genphy_resume, }, { - .phy_id = PHY_ID_KSZ8051, - .phy_id_mask = MICREL_PHY_ID_MASK, .name = "Micrel KSZ8051", /* PHY_BASIC_FEATURES */ .driver_data = &ksz8051_type, @@ -1029,6 +1061,7 @@ static struct phy_driver ksphy_driver[] = { .get_sset_count = kszphy_get_sset_count, .get_strings = kszphy_get_strings, .get_stats = kszphy_get_stats, + .match_phy_device = ksz8051_match_phy_device, .suspend = genphy_suspend, .resume = genphy_resume, }, { @@ -1141,13 +1174,12 @@ static struct phy_driver ksphy_driver[] = { .suspend = genphy_suspend, .resume = genphy_resume, }, { - .phy_id = PHY_ID_KSZ8795, - .phy_id_mask = MICREL_PHY_ID_MASK, - .name = "Micrel KSZ8795", + .name = "Micrel KSZ87XX Switch", /* PHY_BASIC_FEATURES */ .config_init = kszphy_config_init, .config_aneg = ksz8873mll_config_aneg, .read_status = ksz8873mll_read_status, + .match_phy_device = ksz8795_match_phy_device, .suspend = genphy_suspend, .resume = genphy_resume, }, { diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c index 7935593debb1..a1caeee12236 100644 --- a/drivers/net/phy/phy-c45.c +++ b/drivers/net/phy/phy-c45.c @@ -323,6 +323,8 @@ int genphy_c45_read_pma(struct phy_device *phydev) { int val; + linkmode_zero(phydev->lp_advertising); + val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1); if (val < 0) return val; diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 119e6f466056..105d389b58e7 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -572,9 +572,6 @@ int phy_start_aneg(struct phy_device *phydev) if (AUTONEG_DISABLE == phydev->autoneg) phy_sanitize_settings(phydev); - /* Invalidate LP advertising flags */ - linkmode_zero(phydev->lp_advertising); - err = phy_config_aneg(phydev); if (err < 0) goto out_unlock; diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 9d2bbb13293e..adb66a2fae18 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -1787,7 +1787,14 @@ int genphy_read_lpa(struct phy_device *phydev) { int lpa, lpagb; - if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete) { + if (phydev->autoneg == AUTONEG_ENABLE) { + if (!phydev->autoneg_complete) { + mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising, + 0); + mii_lpa_mod_linkmode_lpa_t(phydev->lp_advertising, 0); + return 0; + } + if (phydev->is_gigabit_capable) { lpagb = phy_read(phydev, MII_STAT1000); if (lpagb < 0) @@ -1815,6 +1822,8 @@ int genphy_read_lpa(struct phy_device *phydev) return lpa; mii_lpa_mod_linkmode_lpa_t(phydev->lp_advertising, lpa); + } else { + linkmode_zero(phydev->lp_advertising); } return 0; diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index a5a57ca94c1a..a578f7ebf715 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -87,8 +87,24 @@ struct phylink { phylink_printk(KERN_WARNING, pl, fmt, ##__VA_ARGS__) #define phylink_info(pl, fmt, ...) \ phylink_printk(KERN_INFO, pl, fmt, ##__VA_ARGS__) +#if defined(CONFIG_DYNAMIC_DEBUG) #define phylink_dbg(pl, fmt, ...) \ +do { \ + if ((pl)->config->type == PHYLINK_NETDEV) \ + netdev_dbg((pl)->netdev, fmt, ##__VA_ARGS__); \ + else if ((pl)->config->type == PHYLINK_DEV) \ + dev_dbg((pl)->dev, fmt, ##__VA_ARGS__); \ +} while (0) +#elif defined(DEBUG) +#define phylink_dbg(pl, fmt, ...) \ phylink_printk(KERN_DEBUG, pl, fmt, ##__VA_ARGS__) +#else +#define phylink_dbg(pl, fmt, ...) \ +({ \ + if (0) \ + phylink_printk(KERN_DEBUG, pl, fmt, ##__VA_ARGS__); \ +}) +#endif /** * phylink_set_port_modes() - set the port type modes in the ethtool mask @@ -576,7 +592,7 @@ static int phylink_register_sfp(struct phylink *pl, /** * phylink_create() - create a phylink instance - * @ndev: a pointer to the &struct net_device + * @config: a pointer to the target &struct phylink_config * @fwnode: a pointer to a &struct fwnode_handle describing the network * interface * @iface: the desired link mode defined by &typedef phy_interface_t diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c index dc3d92d340c4..b73298250793 100644 --- a/drivers/net/phy/smsc.c +++ b/drivers/net/phy/smsc.c @@ -327,6 +327,7 @@ static struct phy_driver smsc_phy_driver[] = { .name = "SMSC LAN8740", /* PHY_BASIC_FEATURES */ + .flags = PHY_RST_AFTER_CLK_EN, .probe = smsc_phy_probe, diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 9a1b006904a7..61824bbb5588 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -1324,8 +1324,6 @@ static int ppp_dev_init(struct net_device *dev) { struct ppp *ppp; - netdev_lockdep_set_classes(dev); - ppp = netdev_priv(dev); /* Let the netdevice take a reference on the ppp file. This ensures * that ppp_destroy_interface() won't run before the device gets diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index e8089def5a46..8156b33ee3e7 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1615,7 +1615,6 @@ static int team_init(struct net_device *dev) int err; team->dev = dev; - mutex_init(&team->lock); team_set_no_mode(team); team->pcpu_stats = netdev_alloc_pcpu_stats(struct team_pcpu_stats); @@ -1642,7 +1641,8 @@ static int team_init(struct net_device *dev) goto err_options_register; netif_carrier_off(dev); - netdev_lockdep_set_classes(dev); + lockdep_register_key(&team->team_lock_key); + __mutex_init(&team->lock, "team->team_lock_key", &team->team_lock_key); return 0; @@ -1673,6 +1673,7 @@ static void team_uninit(struct net_device *dev) team_queue_override_fini(team); mutex_unlock(&team->lock); netdev_change_features(dev); + lockdep_unregister_key(&team->team_lock_key); } static void team_destructor(struct net_device *dev) @@ -1976,8 +1977,15 @@ static int team_del_slave(struct net_device *dev, struct net_device *port_dev) err = team_port_del(team, port_dev); mutex_unlock(&team->lock); - if (!err) - netdev_change_features(dev); + if (err) + return err; + + if (netif_is_team_master(port_dev)) { + lockdep_unregister_key(&team->team_lock_key); + lockdep_register_key(&team->team_lock_key); + lockdep_set_class(&team->lock, &team->team_lock_key); + } + netdev_change_features(dev); return err; } diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 812dc3a65efb..a8d3141582a5 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -526,8 +526,8 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash, e = tun_flow_find(head, rxhash); if (likely(e)) { /* TODO: keep queueing to old queue until it's empty? */ - if (e->queue_index != queue_index) - e->queue_index = queue_index; + if (READ_ONCE(e->queue_index) != queue_index) + WRITE_ONCE(e->queue_index, queue_index); if (e->updated != jiffies) e->updated = jiffies; sock_rps_record_flow_hash(e->rps_rxhash); diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 32f53de5b1fe..fe630438f67b 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -787,6 +787,13 @@ static const struct usb_device_id products[] = { .driver_info = 0, }, +/* ThinkPad USB-C Dock Gen 2 (based on Realtek RTL8153) */ +{ + USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0xa387, USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), + .driver_info = 0, +}, + /* NVIDIA Tegra USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */ { USB_DEVICE_AND_INTERFACE_INFO(NVIDIA_VENDOR_ID, 0x09ff, USB_CLASS_COMM, diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 00cab3f43a4c..a245597a3902 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -578,8 +578,8 @@ static void cdc_ncm_set_dgram_size(struct usbnet *dev, int new_size) /* read current mtu value from device */ err = usbnet_read_cmd(dev, USB_CDC_GET_MAX_DATAGRAM_SIZE, USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE, - 0, iface_no, &max_datagram_size, 2); - if (err < 0) { + 0, iface_no, &max_datagram_size, sizeof(max_datagram_size)); + if (err < sizeof(max_datagram_size)) { dev_dbg(&dev->intf->dev, "GET_MAX_DATAGRAM_SIZE failed\n"); goto out; } @@ -590,7 +590,7 @@ static void cdc_ncm_set_dgram_size(struct usbnet *dev, int new_size) max_datagram_size = cpu_to_le16(ctx->max_datagram_size); err = usbnet_write_cmd(dev, USB_CDC_SET_MAX_DATAGRAM_SIZE, USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE, - 0, iface_no, &max_datagram_size, 2); + 0, iface_no, &max_datagram_size, sizeof(max_datagram_size)); if (err < 0) dev_dbg(&dev->intf->dev, "SET_MAX_DATAGRAM_SIZE failed\n"); diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index a505b2ab88b8..74849da031fa 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -186,7 +186,7 @@ struct hso_tiocmget { int intr_completed; struct usb_endpoint_descriptor *endp; struct urb *urb; - struct hso_serial_state_notification serial_state_notification; + struct hso_serial_state_notification *serial_state_notification; u16 prev_UART_state_bitmap; struct uart_icount icount; }; @@ -1432,7 +1432,7 @@ static int tiocmget_submit_urb(struct hso_serial *serial, usb_rcvintpipe(usb, tiocmget->endp-> bEndpointAddress & 0x7F), - &tiocmget->serial_state_notification, + tiocmget->serial_state_notification, sizeof(struct hso_serial_state_notification), tiocmget_intr_callback, serial, tiocmget->endp->bInterval); @@ -1479,7 +1479,7 @@ static void tiocmget_intr_callback(struct urb *urb) /* wIndex should be the USB interface number of the port to which the * notification applies, which should always be the Modem port. */ - serial_state_notification = &tiocmget->serial_state_notification; + serial_state_notification = tiocmget->serial_state_notification; if (serial_state_notification->bmRequestType != BM_REQUEST_TYPE || serial_state_notification->bNotification != B_NOTIFICATION || le16_to_cpu(serial_state_notification->wValue) != W_VALUE || @@ -2565,6 +2565,8 @@ static void hso_free_tiomget(struct hso_serial *serial) usb_free_urb(tiocmget->urb); tiocmget->urb = NULL; serial->tiocmget = NULL; + kfree(tiocmget->serial_state_notification); + tiocmget->serial_state_notification = NULL; kfree(tiocmget); } } @@ -2615,10 +2617,13 @@ static struct hso_device *hso_create_bulk_serial_device( num_urbs = 2; serial->tiocmget = kzalloc(sizeof(struct hso_tiocmget), GFP_KERNEL); + serial->tiocmget->serial_state_notification + = kzalloc(sizeof(struct hso_serial_state_notification), + GFP_KERNEL); /* it isn't going to break our heart if serial->tiocmget * allocation fails don't bother checking this. */ - if (serial->tiocmget) { + if (serial->tiocmget && serial->tiocmget->serial_state_notification) { tiocmget = serial->tiocmget; tiocmget->endp = hso_get_ep(interface, USB_ENDPOINT_XFER_INT, diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 58f5a219fb65..f24a1b0b801f 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -1264,8 +1264,11 @@ static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb) netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata); lan78xx_defer_kevent(dev, EVENT_LINK_RESET); - if (dev->domain_data.phyirq > 0) + if (dev->domain_data.phyirq > 0) { + local_irq_disable(); generic_handle_irq(dev->domain_data.phyirq); + local_irq_enable(); + } } else netdev_warn(dev->net, "unexpected interrupt: 0x%08x\n", intdata); @@ -3782,10 +3785,14 @@ static int lan78xx_probe(struct usb_interface *intf, /* driver requires remote-wakeup capability during autosuspend. */ intf->needs_remote_wakeup = 1; + ret = lan78xx_phy_init(dev); + if (ret < 0) + goto out4; + ret = register_netdev(netdev); if (ret != 0) { netif_err(dev, probe, netdev, "couldn't register the device\n"); - goto out4; + goto out5; } usb_set_intfdata(intf, dev); @@ -3798,14 +3805,10 @@ static int lan78xx_probe(struct usb_interface *intf, pm_runtime_set_autosuspend_delay(&udev->dev, DEFAULT_AUTOSUSPEND_DELAY); - ret = lan78xx_phy_init(dev); - if (ret < 0) - goto out5; - return 0; out5: - unregister_netdev(netdev); + phy_disconnect(netdev->phydev); out4: usb_free_urb(dev->urb_intr); out3: diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 3d77cd402ba9..56d334b9ad45 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1327,6 +1327,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */ {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */ {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ @@ -1361,6 +1362,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */ {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */ {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */ + {QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/ {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */ {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */ diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index cee9fef925cd..d4a95b50bda6 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -5755,6 +5755,7 @@ static const struct usb_device_id rtl8152_table[] = { {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)}, {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x720c)}, {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7214)}, + {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0xa387)}, {REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)}, {REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)}, {REALTEK_USB_DEVICE(VENDOR_ID_TPLINK, 0x0601)}, diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c index c5d4a0060124..681e0def6356 100644 --- a/drivers/net/usb/sr9800.c +++ b/drivers/net/usb/sr9800.c @@ -335,7 +335,7 @@ static void sr_set_multicast(struct net_device *net) static int sr_mdio_read(struct net_device *net, int phy_id, int loc) { struct usbnet *dev = netdev_priv(net); - __le16 res; + __le16 res = 0; mutex_lock(&dev->phy_mutex); sr_set_sw_mii(dev); diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index ee52bde058df..b8228f50bc94 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -865,7 +865,6 @@ static int vrf_dev_init(struct net_device *dev) /* similarly, oper state is irrelevant; set to up to avoid confusion */ dev->operstate = IF_OPER_UP; - netdev_lockdep_set_classes(dev); return 0; out_rth: diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 3d9bcc957f7d..8869154fad88 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2487,9 +2487,11 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, vni = tunnel_id_to_key32(info->key.tun_id); ifindex = 0; dst_cache = &info->dst_cache; - if (info->options_len && - info->key.tun_flags & TUNNEL_VXLAN_OPT) + if (info->key.tun_flags & TUNNEL_VXLAN_OPT) { + if (info->options_len < sizeof(*md)) + goto drop; md = ip_tunnel_info_opts(info); + } ttl = info->key.ttl; tos = info->key.tos; label = info->key.label; @@ -3566,10 +3568,13 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev, { struct vxlan_net *vn = net_generic(net, vxlan_net_id); struct vxlan_dev *vxlan = netdev_priv(dev); + struct net_device *remote_dev = NULL; struct vxlan_fdb *f = NULL; bool unregister = false; + struct vxlan_rdst *dst; int err; + dst = &vxlan->default_dst; err = vxlan_dev_configure(net, dev, conf, false, extack); if (err) return err; @@ -3577,14 +3582,14 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev, dev->ethtool_ops = &vxlan_ethtool_ops; /* create an fdb entry for a valid default destination */ - if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) { + if (!vxlan_addr_any(&dst->remote_ip)) { err = vxlan_fdb_create(vxlan, all_zeros_mac, - &vxlan->default_dst.remote_ip, + &dst->remote_ip, NUD_REACHABLE | NUD_PERMANENT, vxlan->cfg.dst_port, - vxlan->default_dst.remote_vni, - vxlan->default_dst.remote_vni, - vxlan->default_dst.remote_ifindex, + dst->remote_vni, + dst->remote_vni, + dst->remote_ifindex, NTF_SELF, &f); if (err) return err; @@ -3595,26 +3600,41 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev, goto errout; unregister = true; + if (dst->remote_ifindex) { + remote_dev = __dev_get_by_index(net, dst->remote_ifindex); + if (!remote_dev) + goto errout; + + err = netdev_upper_dev_link(remote_dev, dev, extack); + if (err) + goto errout; + } + err = rtnl_configure_link(dev, NULL); if (err) - goto errout; + goto unlink; if (f) { - vxlan_fdb_insert(vxlan, all_zeros_mac, - vxlan->default_dst.remote_vni, f); + vxlan_fdb_insert(vxlan, all_zeros_mac, dst->remote_vni, f); /* notify default fdb entry */ err = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH, true, extack); if (err) { vxlan_fdb_destroy(vxlan, f, false, false); + if (remote_dev) + netdev_upper_dev_unlink(remote_dev, dev); goto unregister; } } list_add(&vxlan->next, &vn->vxlan_list); + if (remote_dev) + dst->remote_dev = remote_dev; return 0; - +unlink: + if (remote_dev) + netdev_upper_dev_unlink(remote_dev, dev); errout: /* unregister_netdevice() destroys the default FDB entry with deletion * notification. But the addition notification was not sent yet, so @@ -3932,11 +3952,12 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], struct netlink_ext_ack *extack) { struct vxlan_dev *vxlan = netdev_priv(dev); - struct vxlan_rdst *dst = &vxlan->default_dst; struct net_device *lowerdev; struct vxlan_config conf; + struct vxlan_rdst *dst; int err; + dst = &vxlan->default_dst; err = vxlan_nl2conf(tb, data, dev, &conf, true, extack); if (err) return err; @@ -3946,6 +3967,14 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], if (err) return err; + if (dst->remote_dev == lowerdev) + lowerdev = NULL; + + err = netdev_adjacent_change_prepare(dst->remote_dev, lowerdev, dev, + extack); + if (err) + return err; + /* handle default dst entry */ if (!vxlan_addr_equal(&conf.remote_ip, &dst->remote_ip)) { u32 hash_index = fdb_head_index(vxlan, all_zeros_mac, conf.vni); @@ -3962,6 +3991,8 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], NTF_SELF, true, extack); if (err) { spin_unlock_bh(&vxlan->hash_lock[hash_index]); + netdev_adjacent_change_abort(dst->remote_dev, + lowerdev, dev); return err; } } @@ -3979,6 +4010,11 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], if (conf.age_interval != vxlan->cfg.age_interval) mod_timer(&vxlan->age_timer, jiffies); + netdev_adjacent_change_commit(dst->remote_dev, lowerdev, dev); + if (lowerdev && lowerdev != dst->remote_dev) { + dst->remote_dev = lowerdev; + netdev_update_lockdep_key(lowerdev); + } vxlan_config_apply(dev, &conf, lowerdev, vxlan->net, true); return 0; } @@ -3991,6 +4027,8 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head) list_del(&vxlan->next); unregister_netdevice_queue(dev, head); + if (vxlan->default_dst.remote_dev) + netdev_upper_dev_unlink(vxlan->default_dst.remote_dev, dev); } static size_t vxlan_get_size(const struct net_device *dev) diff --git a/drivers/net/wimax/i2400m/op-rfkill.c b/drivers/net/wimax/i2400m/op-rfkill.c index 8efb493ceec2..5c79f052cad2 100644 --- a/drivers/net/wimax/i2400m/op-rfkill.c +++ b/drivers/net/wimax/i2400m/op-rfkill.c @@ -127,12 +127,12 @@ int i2400m_op_rfkill_sw_toggle(struct wimax_dev *wimax_dev, "%d\n", result); result = 0; error_cmd: - kfree(cmd); kfree_skb(ack_skb); error_msg_to_dev: error_alloc: d_fnend(4, dev, "(wimax_dev %p state %d) = %d\n", wimax_dev, state, result); + kfree(cmd); return result; } diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index dc45d16e8d21..383d4fa555a8 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -2118,12 +2118,15 @@ static int ath10k_init_uart(struct ath10k *ar) return ret; } - if (!uart_print && ar->hw_params.uart_pin_workaround) { - ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin, - ar->hw_params.uart_pin); - if (ret) { - ath10k_warn(ar, "failed to set UART TX pin: %d", ret); - return ret; + if (!uart_print) { + if (ar->hw_params.uart_pin_workaround) { + ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin, + ar->hw_params.uart_pin); + if (ret) { + ath10k_warn(ar, "failed to set UART TX pin: %d", + ret); + return ret; + } } return 0; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c index 7573af2d88ce..c2db758b9d54 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c @@ -162,12 +162,13 @@ int iwl_acpi_get_mcc(struct device *dev, char *mcc) wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_WRDD_WIFI_DATA_SIZE, &tbl_rev); - if (IS_ERR(wifi_pkg) || tbl_rev != 0) { + if (IS_ERR(wifi_pkg)) { ret = PTR_ERR(wifi_pkg); goto out_free; } - if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) { + if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER || + tbl_rev != 0) { ret = -EINVAL; goto out_free; } @@ -224,12 +225,13 @@ int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk) wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_ECKV_WIFI_DATA_SIZE, &tbl_rev); - if (IS_ERR(wifi_pkg) || tbl_rev != 0) { + if (IS_ERR(wifi_pkg)) { ret = PTR_ERR(wifi_pkg); goto out_free; } - if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) { + if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER || + tbl_rev != 0) { ret = -EINVAL; goto out_free; } diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h index 39c64850cb6f..c0750ced5ac2 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h @@ -520,7 +520,7 @@ struct iwl_scan_dwell { } __packed; /** - * struct iwl_scan_config + * struct iwl_scan_config_v1 * @flags: enum scan_config_flags * @tx_chains: valid_tx antenna - ANT_* definitions * @rx_chains: valid_rx antenna - ANT_* definitions @@ -552,7 +552,7 @@ struct iwl_scan_config_v1 { #define SCAN_LB_LMAC_IDX 0 #define SCAN_HB_LMAC_IDX 1 -struct iwl_scan_config { +struct iwl_scan_config_v2 { __le32 flags; __le32 tx_chains; __le32 rx_chains; @@ -564,6 +564,24 @@ struct iwl_scan_config { u8 bcast_sta_id; u8 channel_flags; u8 channel_array[]; +} __packed; /* SCAN_CONFIG_DB_CMD_API_S_2 */ + +/** + * struct iwl_scan_config + * @enable_cam_mode: whether to enable CAM mode. + * @enable_promiscouos_mode: whether to enable promiscouos mode + * @bcast_sta_id: the index of the station in the fw + * @reserved: reserved + * @tx_chains: valid_tx antenna - ANT_* definitions + * @rx_chains: valid_rx antenna - ANT_* definitions + */ +struct iwl_scan_config { + u8 enable_cam_mode; + u8 enable_promiscouos_mode; + u8 bcast_sta_id; + u8 reserved; + __le32 tx_chains; + __le32 rx_chains; } __packed; /* SCAN_CONFIG_DB_CMD_API_S_3 */ /** diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index 5c8602de9168..87421807e040 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -646,6 +646,7 @@ static struct scatterlist *alloc_sgtable(int size) if (new_page) __free_page(new_page); } + kfree(table); return NULL; } alloc_size = min_t(int, size, PAGE_SIZE); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h index 423cc0cf8e78..0d5bc4ce5c07 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/file.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h @@ -288,6 +288,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t; * STA_CONTEXT_DOT11AX_API_S * @IWL_UCODE_TLV_CAPA_SAR_TABLE_VER: This ucode supports different sar * version tables. + * @IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG: This ucode supports v3 of + * SCAN_CONFIG_DB_CMD_API_S. * * @NUM_IWL_UCODE_TLV_API: number of bits used */ @@ -321,6 +323,7 @@ enum iwl_ucode_tlv_api { IWL_UCODE_TLV_API_WOWLAN_TCP_SYN_WAKE = (__force iwl_ucode_tlv_api_t)53, IWL_UCODE_TLV_API_FTM_RTT_ACCURACY = (__force iwl_ucode_tlv_api_t)54, IWL_UCODE_TLV_API_SAR_TABLE_VER = (__force iwl_ucode_tlv_api_t)55, + IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG = (__force iwl_ucode_tlv_api_t)56, IWL_UCODE_TLV_API_ADWELL_HB_DEF_N_AP = (__force iwl_ucode_tlv_api_t)57, IWL_UCODE_TLV_API_SCAN_EXT_CHAN_VER = (__force iwl_ucode_tlv_api_t)58, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h index cb4c5514a556..695bbaa86273 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h @@ -279,6 +279,7 @@ * Indicates MAC is entering a power-saving sleep power-down. * Not a good time to access device-internal resources. */ +#define CSR_GP_CNTRL_REG_FLAG_INIT_DONE (0x00000004) #define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP (0x00000010) #define CSR_GP_CNTRL_REG_FLAG_XTAL_ON (0x00000400) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.h b/drivers/net/wireless/intel/iwlwifi/iwl-io.h index f8e4f0f5de0c..f09e368c7040 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-io.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.h @@ -112,38 +112,38 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf); */ static inline u32 iwl_umac_prph(struct iwl_trans *trans, u32 ofs) { - return ofs + trans->cfg->trans.umac_prph_offset; + return ofs + trans->trans_cfg->umac_prph_offset; } static inline u32 iwl_read_umac_prph_no_grab(struct iwl_trans *trans, u32 ofs) { return iwl_read_prph_no_grab(trans, ofs + - trans->cfg->trans.umac_prph_offset); + trans->trans_cfg->umac_prph_offset); } static inline u32 iwl_read_umac_prph(struct iwl_trans *trans, u32 ofs) { - return iwl_read_prph(trans, ofs + trans->cfg->trans.umac_prph_offset); + return iwl_read_prph(trans, ofs + trans->trans_cfg->umac_prph_offset); } static inline void iwl_write_umac_prph_no_grab(struct iwl_trans *trans, u32 ofs, u32 val) { - iwl_write_prph_no_grab(trans, ofs + trans->cfg->trans.umac_prph_offset, + iwl_write_prph_no_grab(trans, ofs + trans->trans_cfg->umac_prph_offset, val); } static inline void iwl_write_umac_prph(struct iwl_trans *trans, u32 ofs, u32 val) { - iwl_write_prph(trans, ofs + trans->cfg->trans.umac_prph_offset, val); + iwl_write_prph(trans, ofs + trans->trans_cfg->umac_prph_offset, val); } static inline int iwl_poll_umac_prph_bit(struct iwl_trans *trans, u32 addr, u32 bits, u32 mask, int timeout) { return iwl_poll_prph_bit(trans, addr + - trans->cfg->trans.umac_prph_offset, + trans->trans_cfg->umac_prph_offset, bits, mask, timeout); } diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h index f47e0f97acf8..23c25a7665f2 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h @@ -449,6 +449,11 @@ enum { #define PERSISTENCE_BIT BIT(12) #define PREG_WFPM_ACCESS BIT(12) +#define HPM_HIPM_GEN_CFG 0xA03458 +#define HPM_HIPM_GEN_CFG_CR_PG_EN BIT(0) +#define HPM_HIPM_GEN_CFG_CR_SLP_EN BIT(1) +#define HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE BIT(10) + #define UREG_DOORBELL_TO_ISR6 0xA05C04 #define UREG_DOORBELL_TO_ISR6_NMI_BIT BIT(0) #define UREG_DOORBELL_TO_ISR6_SUSPEND BIT(18) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 32a5e4e5461f..d9eb2b286438 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -420,6 +420,9 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) }; int ret; + if (mvm->trans->cfg->tx_with_siso_diversity) + init_cfg.init_flags |= cpu_to_le32(BIT(IWL_INIT_PHY)); + lockdep_assert_held(&mvm->mutex); mvm->rfkill_safe_init_done = false; @@ -694,12 +697,13 @@ static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm) wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data, ACPI_WRDS_WIFI_DATA_SIZE, &tbl_rev); - if (IS_ERR(wifi_pkg) || tbl_rev != 0) { + if (IS_ERR(wifi_pkg)) { ret = PTR_ERR(wifi_pkg); goto out_free; } - if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) { + if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER || + tbl_rev != 0) { ret = -EINVAL; goto out_free; } @@ -731,13 +735,14 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm) wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data, ACPI_EWRD_WIFI_DATA_SIZE, &tbl_rev); - if (IS_ERR(wifi_pkg) || tbl_rev != 0) { + if (IS_ERR(wifi_pkg)) { ret = PTR_ERR(wifi_pkg); goto out_free; } if ((wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) || - (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER)) { + (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER) || + tbl_rev != 0) { ret = -EINVAL; goto out_free; } @@ -791,11 +796,16 @@ static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm) wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data, ACPI_WGDS_WIFI_DATA_SIZE, &tbl_rev); - if (IS_ERR(wifi_pkg) || tbl_rev > 1) { + if (IS_ERR(wifi_pkg)) { ret = PTR_ERR(wifi_pkg); goto out_free; } + if (tbl_rev != 0) { + ret = -EINVAL; + goto out_free; + } + mvm->geo_rev = tbl_rev; for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) { for (j = 0; j < ACPI_GEO_TABLE_SIZE; j++) { @@ -889,15 +899,17 @@ static bool iwl_mvm_sar_geo_support(struct iwl_mvm *mvm) * firmware versions. Unfortunately, we don't have a TLV API * flag to rely on, so rely on the major version which is in * the first byte of ucode_ver. This was implemented - * initially on version 38 and then backported to29 and 17. - * The intention was to have it in 36 as well, but not all - * 8000 family got this feature enabled. The 8000 family is - * the only one using version 36, so skip this version - * entirely. + * initially on version 38 and then backported to 17. It was + * also backported to 29, but only for 7265D devices. The + * intention was to have it in 36 as well, but not all 8000 + * family got this feature enabled. The 8000 family is the + * only one using version 36, so skip this version entirely. */ return IWL_UCODE_SERIAL(mvm->fw->ucode_ver) >= 38 || - IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 29 || - IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 17; + IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 17 || + (IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 29 && + ((mvm->trans->hw_rev & CSR_HW_REV_TYPE_MSK) == + CSR_HW_REV_TYPE_7265D)); } int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm) @@ -1020,11 +1032,16 @@ static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm) wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data, ACPI_PPAG_WIFI_DATA_SIZE, &tbl_rev); - if (IS_ERR(wifi_pkg) || tbl_rev != 0) { + if (IS_ERR(wifi_pkg)) { ret = PTR_ERR(wifi_pkg); goto out_free; } + if (tbl_rev != 0) { + ret = -EINVAL; + goto out_free; + } + enabled = &wifi_pkg->package.elements[1]; if (enabled->type != ACPI_TYPE_INTEGER || (enabled->integer.value != 0 && enabled->integer.value != 1)) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index cd1b10042fbf..d31f96c3f925 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -4881,11 +4881,11 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, if (!iwl_mvm_has_new_rx_api(mvm)) return; - notif->cookie = mvm->queue_sync_cookie; - - if (notif->sync) + if (notif->sync) { + notif->cookie = mvm->queue_sync_cookie; atomic_set(&mvm->queue_sync_counter, mvm->trans->num_rx_queues); + } ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif, size, !notif->sync); @@ -4905,7 +4905,8 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, out: atomic_set(&mvm->queue_sync_counter, 0); - mvm->queue_sync_cookie++; + if (notif->sync) + mvm->queue_sync_cookie++; } static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 843d00bf2bd5..5ca50f39a023 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1405,6 +1405,12 @@ static inline bool iwl_mvm_is_scan_ext_chan_supported(struct iwl_mvm *mvm) IWL_UCODE_TLV_API_SCAN_EXT_CHAN_VER); } +static inline bool iwl_mvm_is_reduced_config_scan_supported(struct iwl_mvm *mvm) +{ + return fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG); +} + static inline bool iwl_mvm_has_new_rx_stats_api(struct iwl_mvm *mvm) { return fw_has_api(&mvm->fw->ucode_capa, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index f6b3045badbd..fcafa22ec6ce 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -1137,11 +1137,11 @@ static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config, iwl_mvm_fill_channels(mvm, cfg->channel_array, max_channels); } -static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config, - u32 flags, u8 channel_flags, - u32 max_channels) +static void iwl_mvm_fill_scan_config_v2(struct iwl_mvm *mvm, void *config, + u32 flags, u8 channel_flags, + u32 max_channels) { - struct iwl_scan_config *cfg = config; + struct iwl_scan_config_v2 *cfg = config; cfg->flags = cpu_to_le32(flags); cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm)); @@ -1185,7 +1185,7 @@ static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config, iwl_mvm_fill_channels(mvm, cfg->channel_array, max_channels); } -int iwl_mvm_config_scan(struct iwl_mvm *mvm) +static int iwl_mvm_legacy_config_scan(struct iwl_mvm *mvm) { void *cfg; int ret, cmd_size; @@ -1217,7 +1217,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) } if (iwl_mvm_cdb_scan_api(mvm)) - cmd_size = sizeof(struct iwl_scan_config); + cmd_size = sizeof(struct iwl_scan_config_v2); else cmd_size = sizeof(struct iwl_scan_config_v1); cmd_size += num_channels; @@ -1254,8 +1254,8 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) flags |= (iwl_mvm_is_scan_fragmented(hb_type)) ? SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED : SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED; - iwl_mvm_fill_scan_config(mvm, cfg, flags, channel_flags, - num_channels); + iwl_mvm_fill_scan_config_v2(mvm, cfg, flags, channel_flags, + num_channels); } else { iwl_mvm_fill_scan_config_v1(mvm, cfg, flags, channel_flags, num_channels); @@ -1277,6 +1277,30 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) return ret; } +int iwl_mvm_config_scan(struct iwl_mvm *mvm) +{ + struct iwl_scan_config cfg; + struct iwl_host_cmd cmd = { + .id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0), + .len[0] = sizeof(cfg), + .data[0] = &cfg, + .dataflags[0] = IWL_HCMD_DFL_NOCOPY, + }; + + if (!iwl_mvm_is_reduced_config_scan_supported(mvm)) + return iwl_mvm_legacy_config_scan(mvm); + + memset(&cfg, 0, sizeof(cfg)); + + cfg.bcast_sta_id = mvm->aux_sta.sta_id; + cfg.tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm)); + cfg.rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm)); + + IWL_DEBUG_SCAN(mvm, "Sending UMAC scan config\n"); + + return iwl_mvm_send_cmd(mvm, &cmd); +} + static int iwl_mvm_scan_uid_by_status(struct iwl_mvm *mvm, int status) { int i; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 0bedba4c61f2..b3768d5d852a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -1482,6 +1482,13 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm, mvm_sta->sta_id, i); txq_id = iwl_mvm_tvqm_enable_txq(mvm, mvm_sta->sta_id, i, wdg); + /* + * on failures, just set it to IWL_MVM_INVALID_QUEUE + * to try again later, we have no other good way of + * failing here + */ + if (txq_id < 0) + txq_id = IWL_MVM_INVALID_QUEUE; tid_data->txq_id = txq_id; /* @@ -1950,30 +1957,73 @@ void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta) sta->sta_id = IWL_MVM_INVALID_STA; } -static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue, +static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 queue, u8 sta_id, u8 fifo) { unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ? mvm->trans->trans_cfg->base_params->wd_timeout : IWL_WATCHDOG_DISABLED; + struct iwl_trans_txq_scd_cfg cfg = { + .fifo = fifo, + .sta_id = sta_id, + .tid = IWL_MAX_TID_COUNT, + .aggregate = false, + .frame_limit = IWL_FRAME_LIMIT, + }; + + WARN_ON(iwl_mvm_has_new_tx_api(mvm)); + + iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout); +} + +static int iwl_mvm_enable_aux_snif_queue_tvqm(struct iwl_mvm *mvm, u8 sta_id) +{ + unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ? + mvm->trans->trans_cfg->base_params->wd_timeout : + IWL_WATCHDOG_DISABLED; + + WARN_ON(!iwl_mvm_has_new_tx_api(mvm)); + + return iwl_mvm_tvqm_enable_txq(mvm, sta_id, IWL_MAX_TID_COUNT, + wdg_timeout); +} +static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx, + int maccolor, + struct iwl_mvm_int_sta *sta, + u16 *queue, int fifo) +{ + int ret; + + /* Map queue to fifo - needs to happen before adding station */ + if (!iwl_mvm_has_new_tx_api(mvm)) + iwl_mvm_enable_aux_snif_queue(mvm, *queue, sta->sta_id, fifo); + + ret = iwl_mvm_add_int_sta_common(mvm, sta, NULL, macidx, maccolor); + if (ret) { + if (!iwl_mvm_has_new_tx_api(mvm)) + iwl_mvm_disable_txq(mvm, NULL, *queue, + IWL_MAX_TID_COUNT, 0); + return ret; + } + + /* + * For 22000 firmware and on we cannot add queue to a station unknown + * to firmware so enable queue here - after the station was added + */ if (iwl_mvm_has_new_tx_api(mvm)) { - int tvqm_queue = - iwl_mvm_tvqm_enable_txq(mvm, sta_id, - IWL_MAX_TID_COUNT, - wdg_timeout); - *queue = tvqm_queue; - } else { - struct iwl_trans_txq_scd_cfg cfg = { - .fifo = fifo, - .sta_id = sta_id, - .tid = IWL_MAX_TID_COUNT, - .aggregate = false, - .frame_limit = IWL_FRAME_LIMIT, - }; + int txq; - iwl_mvm_enable_txq(mvm, NULL, *queue, 0, &cfg, wdg_timeout); + txq = iwl_mvm_enable_aux_snif_queue_tvqm(mvm, sta->sta_id); + if (txq < 0) { + iwl_mvm_rm_sta_common(mvm, sta->sta_id); + return txq; + } + + *queue = txq; } + + return 0; } int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm) @@ -1989,59 +2039,26 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm) if (ret) return ret; - /* Map Aux queue to fifo - needs to happen before adding Aux station */ - if (!iwl_mvm_has_new_tx_api(mvm)) - iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue, - mvm->aux_sta.sta_id, - IWL_MVM_TX_FIFO_MCAST); - - ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL, - MAC_INDEX_AUX, 0); + ret = iwl_mvm_add_int_sta_with_queue(mvm, MAC_INDEX_AUX, 0, + &mvm->aux_sta, &mvm->aux_queue, + IWL_MVM_TX_FIFO_MCAST); if (ret) { iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); return ret; } - /* - * For 22000 firmware and on we cannot add queue to a station unknown - * to firmware so enable queue here - after the station was added - */ - if (iwl_mvm_has_new_tx_api(mvm)) - iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue, - mvm->aux_sta.sta_id, - IWL_MVM_TX_FIFO_MCAST); - return 0; } int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - int ret; lockdep_assert_held(&mvm->mutex); - /* Map snif queue to fifo - must happen before adding snif station */ - if (!iwl_mvm_has_new_tx_api(mvm)) - iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue, - mvm->snif_sta.sta_id, + return iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color, + &mvm->snif_sta, &mvm->snif_queue, IWL_MVM_TX_FIFO_BE); - - ret = iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr, - mvmvif->id, 0); - if (ret) - return ret; - - /* - * For 22000 firmware and on we cannot add queue to a station unknown - * to firmware so enable queue here - after the station was added - */ - if (iwl_mvm_has_new_tx_api(mvm)) - iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue, - mvm->snif_sta.sta_id, - IWL_MVM_TX_FIFO_BE); - - return 0; } int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) @@ -2133,6 +2150,10 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) queue = iwl_mvm_tvqm_enable_txq(mvm, bsta->sta_id, IWL_MAX_TID_COUNT, wdg_timeout); + if (queue < 0) { + iwl_mvm_rm_sta_common(mvm, bsta->sta_id); + return queue; + } if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC) @@ -2307,10 +2328,8 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) } ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr, mvmvif->id, mvmvif->color); - if (ret) { - iwl_mvm_dealloc_int_sta(mvm, msta); - return ret; - } + if (ret) + goto err; /* * Enable cab queue after the ADD_STA command is sent. @@ -2323,6 +2342,10 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) int queue = iwl_mvm_tvqm_enable_txq(mvm, msta->sta_id, 0, timeout); + if (queue < 0) { + ret = queue; + goto err; + } mvmvif->cab_queue = queue; } else if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) @@ -2330,6 +2353,9 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) timeout); return 0; +err: + iwl_mvm_dealloc_int_sta(mvm, msta); + return ret; } static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c index 75fa8a6aafee..74980382e64c 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c @@ -107,13 +107,9 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, /* allocate ucode sections in dram and set addresses */ ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram); - if (ret) { - dma_free_coherent(trans->dev, - sizeof(*prph_scratch), - prph_scratch, - trans_pcie->prph_scratch_dma_addr); - return ret; - } + if (ret) + goto err_free_prph_scratch; + /* Allocate prph information * currently we don't assign to the prph info anything, but it would get @@ -121,16 +117,20 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, prph_info = dma_alloc_coherent(trans->dev, sizeof(*prph_info), &trans_pcie->prph_info_dma_addr, GFP_KERNEL); - if (!prph_info) - return -ENOMEM; + if (!prph_info) { + ret = -ENOMEM; + goto err_free_prph_scratch; + } /* Allocate context info */ ctxt_info_gen3 = dma_alloc_coherent(trans->dev, sizeof(*ctxt_info_gen3), &trans_pcie->ctxt_info_dma_addr, GFP_KERNEL); - if (!ctxt_info_gen3) - return -ENOMEM; + if (!ctxt_info_gen3) { + ret = -ENOMEM; + goto err_free_prph_info; + } ctxt_info_gen3->prph_info_base_addr = cpu_to_le64(trans_pcie->prph_info_dma_addr); @@ -186,6 +186,20 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, iwl_set_bit(trans, CSR_GP_CNTRL, CSR_AUTO_FUNC_INIT); return 0; + +err_free_prph_info: + dma_free_coherent(trans->dev, + sizeof(*prph_info), + prph_info, + trans_pcie->prph_info_dma_addr); + +err_free_prph_scratch: + dma_free_coherent(trans->dev, + sizeof(*prph_scratch), + prph_scratch, + trans_pcie->prph_scratch_dma_addr); + return ret; + } void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index e29c47744ef5..040cec17d3ad 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -513,31 +513,33 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)}, /* 9000 Series */ - {IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)}, - {IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)}, - {IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)}, - {IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)}, - {IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)}, - {IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)}, - {IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)}, - {IWL_PCI_DEVICE(0x02F0, 0x00A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)}, - {IWL_PCI_DEVICE(0x02F0, 0x0230, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)}, - {IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)}, - {IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)}, - {IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)}, - {IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)}, - {IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)}, - {IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)}, - {IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)}, - {IWL_PCI_DEVICE(0x02F0, 0x1551, iwl9560_killer_s_2ac_cfg_quz_a0_jf_b0_soc)}, - {IWL_PCI_DEVICE(0x02F0, 0x1552, iwl9560_killer_i_2ac_cfg_quz_a0_jf_b0_soc)}, - {IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)}, - {IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)}, - {IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)}, - {IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)}, - {IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)}, - {IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)}, - {IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x02F0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x02F0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x02F0, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x02F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x02F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x06F0, 0x0030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x0034, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x0038, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)}, @@ -571,20 +573,20 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x2526, 0x0034, iwl9560_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0038, iwl9560_2ac_160_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x003C, iwl9560_2ac_160_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x0060, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x0064, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x00A0, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x00A4, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x0060, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2526, 0x0064, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2526, 0x00A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2526, 0x00A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2526, 0x0210, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0214, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0230, iwl9560_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0234, iwl9560_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0238, iwl9560_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x023C, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x0260, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x0260, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2526, 0x0264, iwl9461_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2526, 0x1010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x1030, iwl9560_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x1210, iwl9260_2ac_cfg)}, @@ -601,7 +603,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x2526, 0x401C, iwl9260_2ac_160_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_160_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x4034, iwl9560_2ac_160_cfg_soc)}, - {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2526, 0x6010, iwl9260_2ac_160_cfg)}, @@ -616,33 +618,33 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)}, {IWL_PCI_DEVICE(0x271B, 0x0214, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x271C, 0x0214, iwl9260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_160_cfg)}, - {IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_160_cfg)}, - {IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_160_cfg)}, - {IWL_PCI_DEVICE(0x2720, 0x0060, iwl9461_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x2720, 0x0064, iwl9461_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x2720, 0x00A0, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x2720, 0x00A4, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x2720, 0x0230, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2720, 0x0234, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2720, 0x0238, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2720, 0x023C, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2720, 0x0260, iwl9461_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x2720, 0x0264, iwl9461_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x2720, 0x02A0, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x2720, 0x02A4, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x2720, 0x1010, iwl9260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2720, 0x1030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x2720, 0x1210, iwl9260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2720, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x2720, 0x1552, iwl9560_killer_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x2720, 0x2030, iwl9560_2ac_160_cfg_soc)}, - {IWL_PCI_DEVICE(0x2720, 0x2034, iwl9560_2ac_160_cfg_soc)}, - {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_160_cfg)}, - {IWL_PCI_DEVICE(0x2720, 0x4034, iwl9560_2ac_160_cfg_soc)}, - {IWL_PCI_DEVICE(0x2720, 0x40A4, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x2720, 0x4234, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x2720, 0x42A4, iwl9462_2ac_cfg_soc)}, + + {IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x2720, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x2720, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x2720, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x2720, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x2720, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x2720, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x2720, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x2720, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x2720, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x2720, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x2720, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x2720, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x2720, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x2720, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x2720, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x2720, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x2720, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x2720, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x2720, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x2720, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x2720, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x30DC, 0x0030, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x0034, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x0038, iwl9560_2ac_160_cfg_soc)}, @@ -671,6 +673,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x30DC, 0x40A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x4234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x42A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_160_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_160_cfg_shared_clk)}, @@ -726,62 +729,60 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x34F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)}, - {IWL_PCI_DEVICE(0x3DF0, 0x0030, iwl9560_2ac_160_cfg_soc)}, - {IWL_PCI_DEVICE(0x3DF0, 0x0034, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x3DF0, 0x0038, iwl9560_2ac_160_cfg_soc)}, - {IWL_PCI_DEVICE(0x3DF0, 0x003C, iwl9560_2ac_160_cfg_soc)}, - {IWL_PCI_DEVICE(0x3DF0, 0x0060, iwl9461_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x3DF0, 0x0064, iwl9461_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x3DF0, 0x00A0, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x3DF0, 0x00A4, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x3DF0, 0x0230, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x3DF0, 0x0234, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x3DF0, 0x0238, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x3DF0, 0x023C, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x3DF0, 0x0260, iwl9461_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x3DF0, 0x0264, iwl9461_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x3DF0, 0x02A0, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x3DF0, 0x02A4, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x3DF0, 0x1010, iwl9260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x3DF0, 0x1030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x3DF0, 0x1210, iwl9260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x3DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x3DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_160_cfg_soc)}, - {IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_160_cfg_soc)}, - {IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_160_cfg_soc)}, - {IWL_PCI_DEVICE(0x3DF0, 0x4034, iwl9560_2ac_160_cfg_soc)}, - {IWL_PCI_DEVICE(0x3DF0, 0x40A4, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x3DF0, 0x4234, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x3DF0, 0x42A4, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x43F0, 0x0030, iwl9560_2ac_160_cfg_soc)}, - {IWL_PCI_DEVICE(0x43F0, 0x0034, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x43F0, 0x0038, iwl9560_2ac_160_cfg_soc)}, - {IWL_PCI_DEVICE(0x43F0, 0x003C, iwl9560_2ac_160_cfg_soc)}, - {IWL_PCI_DEVICE(0x43F0, 0x0060, iwl9461_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x43F0, 0x0064, iwl9461_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x43F0, 0x00A0, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x43F0, 0x00A4, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x43F0, 0x0230, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x43F0, 0x0234, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x43F0, 0x0238, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x43F0, 0x023C, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x43F0, 0x0260, iwl9461_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x43F0, 0x0264, iwl9461_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x43F0, 0x02A0, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x43F0, 0x02A4, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x43F0, 0x1010, iwl9260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x43F0, 0x1030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x43F0, 0x1210, iwl9260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x43F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x43F0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_160_cfg_soc)}, - {IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_160_cfg_soc)}, - {IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_160_cfg_soc)}, - {IWL_PCI_DEVICE(0x43F0, 0x4034, iwl9560_2ac_160_cfg_soc)}, - {IWL_PCI_DEVICE(0x43F0, 0x40A4, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x43F0, 0x4234, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x43F0, 0x42A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x3DF0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x3DF0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x3DF0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x3DF0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x3DF0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x3DF0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x3DF0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x3DF0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x3DF0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x3DF0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x3DF0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x3DF0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x3DF0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x3DF0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x3DF0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x3DF0, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x3DF0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x3DF0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x3DF0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x3DF0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x3DF0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x3DF0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)}, + + {IWL_PCI_DEVICE(0x43F0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x43F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x43F0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x43F0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x43F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x43F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x43F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x43F0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x43F0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x43F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x43F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x43F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x43F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x43F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x43F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x43F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x43F0, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x43F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x43F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x43F0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x43F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x43F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x43F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_160_cfg_soc)}, @@ -821,34 +822,34 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x9DF0, 0x40A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x4234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x42A4, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0xA0F0, 0x0030, iwl9560_2ac_160_cfg_soc)}, - {IWL_PCI_DEVICE(0xA0F0, 0x0034, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0xA0F0, 0x0038, iwl9560_2ac_160_cfg_soc)}, - {IWL_PCI_DEVICE(0xA0F0, 0x003C, iwl9560_2ac_160_cfg_soc)}, - {IWL_PCI_DEVICE(0xA0F0, 0x0060, iwl9461_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0xA0F0, 0x0064, iwl9461_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0xA0F0, 0x00A0, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0xA0F0, 0x00A4, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0xA0F0, 0x0230, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0xA0F0, 0x0234, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0xA0F0, 0x0238, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0xA0F0, 0x023C, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0xA0F0, 0x0260, iwl9461_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0xA0F0, 0x0264, iwl9461_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0xA0F0, 0x02A0, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0xA0F0, 0x02A4, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0xA0F0, 0x1010, iwl9260_2ac_cfg)}, - {IWL_PCI_DEVICE(0xA0F0, 0x1030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0xA0F0, 0x1210, iwl9260_2ac_cfg)}, - {IWL_PCI_DEVICE(0xA0F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0xA0F0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_160_cfg_soc)}, - {IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_160_cfg_soc)}, - {IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_160_cfg_soc)}, - {IWL_PCI_DEVICE(0xA0F0, 0x4034, iwl9560_2ac_160_cfg_soc)}, - {IWL_PCI_DEVICE(0xA0F0, 0x40A4, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0xA0F0, 0x4234, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0xA0F0, 0x42A4, iwl9462_2ac_cfg_soc)}, + + {IWL_PCI_DEVICE(0xA0F0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0xA0F0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0xA0F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0xA0F0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0xA0F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0xA0F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0xA0F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0xA0F0, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0xA0F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0xA0F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0xA0F0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0xA0F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0xA0F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0xA0F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_160_cfg_soc)}, @@ -1067,11 +1068,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) == CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) && - ((cfg != &iwl_ax200_cfg_cc && - cfg != &killer1650x_2ax_cfg && - cfg != &killer1650w_2ax_cfg && - cfg != &iwl_ax201_cfg_quz_hr) || - iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) { + iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) { u32 hw_status; hw_status = iwl_read_prph(iwl_trans, UMAG_GEN_HW_STATUS); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c index df8455f14e4d..ca3bb4d65b00 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c @@ -57,6 +57,24 @@ #include "internal.h" #include "fw/dbg.h" +static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans) +{ + iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG, + HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE); + udelay(20); + iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG, + HPM_HIPM_GEN_CFG_CR_PG_EN | + HPM_HIPM_GEN_CFG_CR_SLP_EN); + udelay(20); + iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG, + HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE); + + iwl_trans_sw_reset(trans); + iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + + return 0; +} + /* * Start up NIC's basic functionality after it has been reset * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop()) @@ -92,6 +110,13 @@ int iwl_pcie_gen2_apm_init(struct iwl_trans *trans) iwl_pcie_apm_config(trans); + if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 && + trans->cfg->integrated) { + ret = iwl_pcie_gen2_force_power_gating(trans); + if (ret) + return ret; + } + ret = iwl_finish_nic_init(trans, trans->trans_cfg); if (ret) return ret; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index f8a1f985a1d8..6961f00ff812 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -3272,11 +3272,17 @@ static struct iwl_trans_dump_data ptr = cmdq->write_ptr; for (i = 0; i < cmdq->n_window; i++) { u8 idx = iwl_pcie_get_cmd_index(cmdq, ptr); + u8 tfdidx; u32 caplen, cmdlen; + if (trans->trans_cfg->use_tfh) + tfdidx = idx; + else + tfdidx = ptr; + cmdlen = iwl_trans_pcie_get_cmdlen(trans, - cmdq->tfds + - tfd_size * ptr); + (u8 *)cmdq->tfds + + tfd_size * tfdidx); caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen); if (cmdlen) { @@ -3450,6 +3456,15 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, spin_lock_init(&trans_pcie->reg_lock); mutex_init(&trans_pcie->mutex); init_waitqueue_head(&trans_pcie->ucode_write_waitq); + + trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator", + WQ_HIGHPRI | WQ_UNBOUND, 1); + if (!trans_pcie->rba.alloc_wq) { + ret = -ENOMEM; + goto out_free_trans; + } + INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work); + trans_pcie->tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page); if (!trans_pcie->tso_hdr_page) { ret = -ENOMEM; @@ -3584,10 +3599,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, trans_pcie->inta_mask = CSR_INI_SET_MASK; } - trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator", - WQ_HIGHPRI | WQ_UNBOUND, 1); - INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work); - #ifdef CONFIG_IWLWIFI_DEBUGFS trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED; mutex_init(&trans_pcie->fw_mon_data.mutex); @@ -3599,6 +3610,8 @@ out_free_ict: iwl_pcie_free_ict(trans); out_no_pci: free_percpu(trans_pcie->tso_hdr_page); + destroy_workqueue(trans_pcie->rba.alloc_wq); +out_free_trans: iwl_trans_free(trans); return ERR_PTR(ret); } diff --git a/drivers/net/wireless/intersil/hostap/hostap_hw.c b/drivers/net/wireless/intersil/hostap/hostap_hw.c index 158a3d762e55..e323e9a5999f 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_hw.c +++ b/drivers/net/wireless/intersil/hostap/hostap_hw.c @@ -3041,30 +3041,6 @@ static void prism2_clear_set_tim_queue(local_info_t *local) } } - -/* - * HostAP uses two layers of net devices, where the inner - * layer gets called all the time from the outer layer. - * This is a natural nesting, which needs a split lock type. - */ -static struct lock_class_key hostap_netdev_xmit_lock_key; -static struct lock_class_key hostap_netdev_addr_lock_key; - -static void prism2_set_lockdep_class_one(struct net_device *dev, - struct netdev_queue *txq, - void *_unused) -{ - lockdep_set_class(&txq->_xmit_lock, - &hostap_netdev_xmit_lock_key); -} - -static void prism2_set_lockdep_class(struct net_device *dev) -{ - lockdep_set_class(&dev->addr_list_lock, - &hostap_netdev_addr_lock_key); - netdev_for_each_tx_queue(dev, prism2_set_lockdep_class_one, NULL); -} - static struct net_device * prism2_init_local_data(struct prism2_helper_functions *funcs, int card_idx, struct device *sdev) @@ -3223,7 +3199,6 @@ while (0) if (ret >= 0) ret = register_netdevice(dev); - prism2_set_lockdep_class(dev); rtnl_unlock(); if (ret < 0) { printk(KERN_WARNING "%s: register netdevice failed!\n", diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 45c73a6f09a1..14f562cd715c 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -4026,7 +4026,7 @@ static int __init init_mac80211_hwsim(void) err = dev_alloc_name(hwsim_mon, hwsim_mon->name); if (err < 0) { rtnl_unlock(); - goto out_free_radios; + goto out_free_mon; } err = register_netdevice(hwsim_mon); diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile index 4d03596e891f..d7a1ddc9e407 100644 --- a/drivers/net/wireless/mediatek/mt76/Makefile +++ b/drivers/net/wireless/mediatek/mt76/Makefile @@ -8,6 +8,8 @@ mt76-y := \ mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o \ tx.o agg-rx.o mcu.o +mt76-$(CONFIG_PCI) += pci.o + mt76-usb-y := usb.o usb_trace.o CFLAGS_trace.o := -I$(src) diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index c747eb24581c..8f69d00bd940 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -53,8 +53,10 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q, u32 ctrl; int i, idx = -1; - if (txwi) + if (txwi) { q->entry[q->head].txwi = DMA_DUMMY_DATA; + q->entry[q->head].skip_buf0 = true; + } for (i = 0; i < nbufs; i += 2, buf += 2) { u32 buf0 = buf[0].addr, buf1 = 0; @@ -97,7 +99,7 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx, __le32 __ctrl = READ_ONCE(q->desc[idx].ctrl); u32 ctrl = le32_to_cpu(__ctrl); - if (!e->txwi || !e->skb) { + if (!e->skip_buf0) { __le32 addr = READ_ONCE(q->desc[idx].buf0); u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl); diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index 570c159515a0..8aec7ccf2d79 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -93,8 +93,9 @@ struct mt76_queue_entry { struct urb *urb; }; enum mt76_txq_id qid; - bool schedule; - bool done; + bool skip_buf0:1; + bool schedule:1; + bool done:1; }; struct mt76_queue_regs { @@ -578,6 +579,7 @@ bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val, #define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__) void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs); +void mt76_pci_disable_aspm(struct pci_dev *pdev); static inline u16 mt76_chip(struct mt76_dev *dev) { diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c index 73c3104f8858..cf611d1b817c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c @@ -81,6 +81,8 @@ mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) /* RG_SSUSB_CDR_BR_PE1D = 0x3 */ mt76_rmw_field(dev, 0x15c58, 0x3 << 6, 0x3); + mt76_pci_disable_aspm(pdev); + return 0; error: diff --git a/drivers/net/wireless/mediatek/mt76/pci.c b/drivers/net/wireless/mediatek/mt76/pci.c new file mode 100644 index 000000000000..04c5a692bc85 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/pci.c @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (C) 2019 Lorenzo Bianconi <lorenzo@kernel.org> + */ + +#include <linux/pci.h> + +void mt76_pci_disable_aspm(struct pci_dev *pdev) +{ + struct pci_dev *parent = pdev->bus->self; + u16 aspm_conf, parent_aspm_conf = 0; + + pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &aspm_conf); + aspm_conf &= PCI_EXP_LNKCTL_ASPMC; + if (parent) { + pcie_capability_read_word(parent, PCI_EXP_LNKCTL, + &parent_aspm_conf); + parent_aspm_conf &= PCI_EXP_LNKCTL_ASPMC; + } + + if (!aspm_conf && (!parent || !parent_aspm_conf)) { + /* aspm already disabled */ + return; + } + + dev_info(&pdev->dev, "disabling ASPM %s %s\n", + (aspm_conf & PCI_EXP_LNKCTL_ASPM_L0S) ? "L0s" : "", + (aspm_conf & PCI_EXP_LNKCTL_ASPM_L1) ? "L1" : ""); + + if (IS_ENABLED(CONFIG_PCIEASPM)) { + int err; + + err = pci_disable_link_state(pdev, aspm_conf); + if (!err) + return; + } + + /* both device and parent should have the same ASPM setting. + * disable ASPM in downstream component first and then upstream. + */ + pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, aspm_conf); + if (parent) + pcie_capability_clear_word(parent, PCI_EXP_LNKCTL, + aspm_conf); +} +EXPORT_SYMBOL_GPL(mt76_pci_disable_aspm); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h index 2b216edd0c7d..a90a518b40d3 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h @@ -23,7 +23,6 @@ #include <linux/leds.h> #include <linux/mutex.h> #include <linux/etherdevice.h> -#include <linux/input-polldev.h> #include <linux/kfifo.h> #include <linux/hrtimer.h> #include <linux/average.h> diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c index 4d4e3888ef20..f2395309ec00 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c @@ -555,7 +555,7 @@ static ssize_t rt2x00debug_write_restart_hw(struct file *file, { struct rt2x00debug_intf *intf = file->private_data; struct rt2x00_dev *rt2x00dev = intf->rt2x00dev; - static unsigned long last_reset; + static unsigned long last_reset = INITIAL_JIFFIES; if (!rt2x00_has_cap_restart_hw(rt2x00dev)) return -EOPNOTSUPP; diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c index 6087ec7a90a6..f88d26535978 100644 --- a/drivers/net/wireless/realtek/rtlwifi/pci.c +++ b/drivers/net/wireless/realtek/rtlwifi/pci.c @@ -822,7 +822,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) hdr = rtl_get_hdr(skb); fc = rtl_get_fc(skb); - if (!stats.crc && !stats.hwerror) { + if (!stats.crc && !stats.hwerror && (skb->len > FCS_LEN)) { memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); @@ -859,6 +859,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) _rtl_pci_rx_to_mac80211(hw, skb, rx_status); } } else { + /* drop packets with errors or those too short */ dev_kfree_skb_any(skb); } new_trx_end: diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c index 70f04c2f5b17..fff8dda14023 100644 --- a/drivers/net/wireless/realtek/rtlwifi/ps.c +++ b/drivers/net/wireless/realtek/rtlwifi/ps.c @@ -754,6 +754,9 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data, return; } else { noa_num = (noa_len - 2) / 13; + if (noa_num > P2P_MAX_NOA_NUM) + noa_num = P2P_MAX_NOA_NUM; + } noa_index = ie[3]; if (rtlpriv->psc.p2p_ps_info.p2p_ps_mode == @@ -848,6 +851,9 @@ static void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data, return; } else { noa_num = (noa_len - 2) / 13; + if (noa_num > P2P_MAX_NOA_NUM) + noa_num = P2P_MAX_NOA_NUM; + } noa_index = ie[3]; if (rtlpriv->psc.p2p_ps_info.p2p_ps_mode == diff --git a/drivers/net/wireless/virt_wifi.c b/drivers/net/wireless/virt_wifi.c index be92e1220284..7997cc6de334 100644 --- a/drivers/net/wireless/virt_wifi.c +++ b/drivers/net/wireless/virt_wifi.c @@ -548,6 +548,7 @@ static int virt_wifi_newlink(struct net *src_net, struct net_device *dev, priv->is_connected = false; priv->is_up = false; INIT_DELAYED_WORK(&priv->connect, virt_wifi_connect_complete); + __module_get(THIS_MODULE); return 0; unregister_netdev: @@ -578,6 +579,7 @@ static void virt_wifi_dellink(struct net_device *dev, netdev_upper_dev_unlink(priv->lowerdev, dev); unregister_netdevice_queue(dev, head); + module_put(THIS_MODULE); /* Deleting the wiphy is handled in the module destructor. */ } @@ -590,6 +592,42 @@ static struct rtnl_link_ops virt_wifi_link_ops = { .priv_size = sizeof(struct virt_wifi_netdev_priv), }; +static bool netif_is_virt_wifi_dev(const struct net_device *dev) +{ + return rcu_access_pointer(dev->rx_handler) == virt_wifi_rx_handler; +} + +static int virt_wifi_event(struct notifier_block *this, unsigned long event, + void *ptr) +{ + struct net_device *lower_dev = netdev_notifier_info_to_dev(ptr); + struct virt_wifi_netdev_priv *priv; + struct net_device *upper_dev; + LIST_HEAD(list_kill); + + if (!netif_is_virt_wifi_dev(lower_dev)) + return NOTIFY_DONE; + + switch (event) { + case NETDEV_UNREGISTER: + priv = rtnl_dereference(lower_dev->rx_handler_data); + if (!priv) + return NOTIFY_DONE; + + upper_dev = priv->upperdev; + + upper_dev->rtnl_link_ops->dellink(upper_dev, &list_kill); + unregister_netdevice_many(&list_kill); + break; + } + + return NOTIFY_DONE; +} + +static struct notifier_block virt_wifi_notifier = { + .notifier_call = virt_wifi_event, +}; + /* Acquires and releases the rtnl lock. */ static int __init virt_wifi_init_module(void) { @@ -598,14 +636,25 @@ static int __init virt_wifi_init_module(void) /* Guaranteed to be locallly-administered and not multicast. */ eth_random_addr(fake_router_bssid); + err = register_netdevice_notifier(&virt_wifi_notifier); + if (err) + return err; + + err = -ENOMEM; common_wiphy = virt_wifi_make_wiphy(); if (!common_wiphy) - return -ENOMEM; + goto notifier; err = rtnl_link_register(&virt_wifi_link_ops); if (err) - virt_wifi_destroy_wiphy(common_wiphy); + goto destroy_wiphy; + return 0; + +destroy_wiphy: + virt_wifi_destroy_wiphy(common_wiphy); +notifier: + unregister_netdevice_notifier(&virt_wifi_notifier); return err; } @@ -615,6 +664,7 @@ static void __exit virt_wifi_cleanup_module(void) /* Will delete any devices that depend on the wiphy. */ rtnl_link_unregister(&virt_wifi_link_ops); virt_wifi_destroy_wiphy(common_wiphy); + unregister_netdevice_notifier(&virt_wifi_notifier); } module_init(virt_wifi_init_module); diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 240f762b3749..103ed00775eb 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -719,7 +719,6 @@ err_unmap: xenvif_unmap_frontend_data_rings(queue); netif_napi_del(&queue->napi); err: - module_put(THIS_MODULE); return err; } diff --git a/drivers/nfc/fdp/i2c.c b/drivers/nfc/fdp/i2c.c index 1cd113c8d7cb..ad0abb1f0bae 100644 --- a/drivers/nfc/fdp/i2c.c +++ b/drivers/nfc/fdp/i2c.c @@ -259,7 +259,7 @@ static void fdp_nci_i2c_read_device_properties(struct device *dev, *fw_vsc_cfg, len); if (r) { - devm_kfree(dev, fw_vsc_cfg); + devm_kfree(dev, *fw_vsc_cfg); goto vsc_read_err; } } else { diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c index c5289eaf17ee..e897e4d768ef 100644 --- a/drivers/nfc/pn533/usb.c +++ b/drivers/nfc/pn533/usb.c @@ -547,18 +547,25 @@ static int pn533_usb_probe(struct usb_interface *interface, rc = pn533_finalize_setup(priv); if (rc) - goto error; + goto err_deregister; usb_set_intfdata(interface, phy); return 0; +err_deregister: + pn533_unregister_device(phy->priv); error: + usb_kill_urb(phy->in_urb); + usb_kill_urb(phy->out_urb); + usb_kill_urb(phy->ack_urb); + usb_free_urb(phy->in_urb); usb_free_urb(phy->out_urb); usb_free_urb(phy->ack_urb); usb_put_dev(phy->udev); kfree(in_buf); + kfree(phy->ack_buffer); return rc; } diff --git a/drivers/nfc/st21nfca/core.c b/drivers/nfc/st21nfca/core.c index f9ac176cf257..2ce17932a073 100644 --- a/drivers/nfc/st21nfca/core.c +++ b/drivers/nfc/st21nfca/core.c @@ -708,6 +708,7 @@ static int st21nfca_hci_complete_target_discovered(struct nfc_hci_dev *hdev, NFC_PROTO_FELICA_MASK; } else { kfree_skb(nfcid_skb); + nfcid_skb = NULL; /* P2P in type A */ r = nfc_hci_get_param(hdev, ST21NFCA_RF_READER_F_GATE, ST21NFCA_RF_READER_F_NFCID1, diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index fd7dea36c3b6..fa7ba09dca77 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -116,10 +116,26 @@ static void nvme_queue_scan(struct nvme_ctrl *ctrl) /* * Only new queue scan work when admin and IO queues are both alive */ - if (ctrl->state == NVME_CTRL_LIVE) + if (ctrl->state == NVME_CTRL_LIVE && ctrl->tagset) queue_work(nvme_wq, &ctrl->scan_work); } +/* + * Use this function to proceed with scheduling reset_work for a controller + * that had previously been set to the resetting state. This is intended for + * code paths that can't be interrupted by other reset attempts. A hot removal + * may prevent this from succeeding. + */ +int nvme_try_sched_reset(struct nvme_ctrl *ctrl) +{ + if (ctrl->state != NVME_CTRL_RESETTING) + return -EBUSY; + if (!queue_work(nvme_reset_wq, &ctrl->reset_work)) + return -EBUSY; + return 0; +} +EXPORT_SYMBOL_GPL(nvme_try_sched_reset); + int nvme_reset_ctrl(struct nvme_ctrl *ctrl) { if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) @@ -137,8 +153,7 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl) ret = nvme_reset_ctrl(ctrl); if (!ret) { flush_work(&ctrl->reset_work); - if (ctrl->state != NVME_CTRL_LIVE && - ctrl->state != NVME_CTRL_ADMIN_ONLY) + if (ctrl->state != NVME_CTRL_LIVE) ret = -ENETRESET; } @@ -315,15 +330,6 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, old_state = ctrl->state; switch (new_state) { - case NVME_CTRL_ADMIN_ONLY: - switch (old_state) { - case NVME_CTRL_CONNECTING: - changed = true; - /* FALLTHRU */ - default: - break; - } - break; case NVME_CTRL_LIVE: switch (old_state) { case NVME_CTRL_NEW: @@ -339,7 +345,6 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, switch (old_state) { case NVME_CTRL_NEW: case NVME_CTRL_LIVE: - case NVME_CTRL_ADMIN_ONLY: changed = true; /* FALLTHRU */ default: @@ -359,7 +364,6 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, case NVME_CTRL_DELETING: switch (old_state) { case NVME_CTRL_LIVE: - case NVME_CTRL_ADMIN_ONLY: case NVME_CTRL_RESETTING: case NVME_CTRL_CONNECTING: changed = true; @@ -381,8 +385,10 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, break; } - if (changed) + if (changed) { ctrl->state = new_state; + wake_up_all(&ctrl->state_wq); + } spin_unlock_irqrestore(&ctrl->lock, flags); if (changed && ctrl->state == NVME_CTRL_LIVE) @@ -391,6 +397,39 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, } EXPORT_SYMBOL_GPL(nvme_change_ctrl_state); +/* + * Returns true for sink states that can't ever transition back to live. + */ +static bool nvme_state_terminal(struct nvme_ctrl *ctrl) +{ + switch (ctrl->state) { + case NVME_CTRL_NEW: + case NVME_CTRL_LIVE: + case NVME_CTRL_RESETTING: + case NVME_CTRL_CONNECTING: + return false; + case NVME_CTRL_DELETING: + case NVME_CTRL_DEAD: + return true; + default: + WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state); + return true; + } +} + +/* + * Waits for the controller state to be resetting, or returns false if it is + * not possible to ever transition to that state. + */ +bool nvme_wait_reset(struct nvme_ctrl *ctrl) +{ + wait_event(ctrl->state_wq, + nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING) || + nvme_state_terminal(ctrl)); + return ctrl->state == NVME_CTRL_RESETTING; +} +EXPORT_SYMBOL_GPL(nvme_wait_reset); + static void nvme_free_ns_head(struct kref *ref) { struct nvme_ns_head *head = @@ -1306,8 +1345,6 @@ static void nvme_update_formats(struct nvme_ctrl *ctrl) if (ns->disk && nvme_revalidate_disk(ns->disk)) nvme_set_queue_dying(ns); up_read(&ctrl->namespaces_rwsem); - - nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL); } static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects) @@ -1323,6 +1360,7 @@ static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects) nvme_unfreeze(ctrl); nvme_mpath_unfreeze(ctrl->subsys); mutex_unlock(&ctrl->subsys->lock); + nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL); mutex_unlock(&ctrl->scan_lock); } if (effects & NVME_CMD_EFFECTS_CCC) @@ -2874,7 +2912,6 @@ static int nvme_dev_open(struct inode *inode, struct file *file) switch (ctrl->state) { case NVME_CTRL_LIVE: - case NVME_CTRL_ADMIN_ONLY: break; default: return -EWOULDBLOCK; @@ -3168,7 +3205,6 @@ static ssize_t nvme_sysfs_show_state(struct device *dev, static const char *const state_name[] = { [NVME_CTRL_NEW] = "new", [NVME_CTRL_LIVE] = "live", - [NVME_CTRL_ADMIN_ONLY] = "only-admin", [NVME_CTRL_RESETTING] = "resetting", [NVME_CTRL_CONNECTING] = "connecting", [NVME_CTRL_DELETING] = "deleting", @@ -3679,11 +3715,10 @@ static void nvme_scan_work(struct work_struct *work) struct nvme_id_ctrl *id; unsigned nn; - if (ctrl->state != NVME_CTRL_LIVE) + /* No tagset on a live ctrl means IO queues could not created */ + if (ctrl->state != NVME_CTRL_LIVE || !ctrl->tagset) return; - WARN_ON_ONCE(!ctrl->tagset); - if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) { dev_info(ctrl->device, "rescanning namespaces.\n"); nvme_clear_changed_ns_log(ctrl); @@ -3844,13 +3879,13 @@ static void nvme_fw_act_work(struct work_struct *work) if (time_after(jiffies, fw_act_timeout)) { dev_warn(ctrl->device, "Fw activation timeout, reset controller\n"); - nvme_reset_ctrl(ctrl); - break; + nvme_try_sched_reset(ctrl); + return; } msleep(100); } - if (ctrl->state != NVME_CTRL_LIVE) + if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) return; nvme_start_queues(ctrl); @@ -3870,7 +3905,13 @@ static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result) nvme_queue_scan(ctrl); break; case NVME_AER_NOTICE_FW_ACT_STARTING: - queue_work(nvme_wq, &ctrl->fw_act_work); + /* + * We are (ab)using the RESETTING state to prevent subsequent + * recovery actions from interfering with the controller's + * firmware activation. + */ + if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) + queue_work(nvme_wq, &ctrl->fw_act_work); break; #ifdef CONFIG_NVME_MULTIPATH case NVME_AER_NOTICE_ANA: @@ -3993,6 +4034,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, INIT_WORK(&ctrl->async_event_work, nvme_async_event_work); INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work); INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work); + init_waitqueue_head(&ctrl->state_wq); INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work); memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd)); diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h index 93f08d77c896..a0ec40ab62ee 100644 --- a/drivers/nvme/host/fabrics.h +++ b/drivers/nvme/host/fabrics.h @@ -182,8 +182,7 @@ bool nvmf_ip_options_match(struct nvme_ctrl *ctrl, static inline bool nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq, bool queue_live) { - if (likely(ctrl->state == NVME_CTRL_LIVE || - ctrl->state == NVME_CTRL_ADMIN_ONLY)) + if (likely(ctrl->state == NVME_CTRL_LIVE)) return true; return __nvmf_check_ready(ctrl, rq, queue_live); } diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 30de7efef003..e0f064dcbd02 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -158,9 +158,11 @@ void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl) struct nvme_ns *ns; mutex_lock(&ctrl->scan_lock); + down_read(&ctrl->namespaces_rwsem); list_for_each_entry(ns, &ctrl->namespaces, list) if (nvme_mpath_clear_current_path(ns)) kblockd_schedule_work(&ns->head->requeue_work); + up_read(&ctrl->namespaces_rwsem); mutex_unlock(&ctrl->scan_lock); } @@ -522,14 +524,13 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl, return 0; } -static int nvme_read_ana_log(struct nvme_ctrl *ctrl, bool groups_only) +static int nvme_read_ana_log(struct nvme_ctrl *ctrl) { u32 nr_change_groups = 0; int error; mutex_lock(&ctrl->ana_lock); - error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA, - groups_only ? NVME_ANA_LOG_RGO : 0, + error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA, 0, ctrl->ana_log_buf, ctrl->ana_log_size, 0); if (error) { dev_warn(ctrl->device, "Failed to get ANA log: %d\n", error); @@ -565,7 +566,7 @@ static void nvme_ana_work(struct work_struct *work) { struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work); - nvme_read_ana_log(ctrl, false); + nvme_read_ana_log(ctrl); } static void nvme_anatt_timeout(struct timer_list *t) @@ -715,7 +716,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) goto out; } - error = nvme_read_ana_log(ctrl, true); + error = nvme_read_ana_log(ctrl); if (error) goto out_free_ana_log_buf; return 0; diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 38a83ef5bcd3..22e8401352c2 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -15,6 +15,7 @@ #include <linux/sed-opal.h> #include <linux/fault-inject.h> #include <linux/rcupdate.h> +#include <linux/wait.h> #include <trace/events/block.h> @@ -161,7 +162,6 @@ static inline u16 nvme_req_qid(struct request *req) enum nvme_ctrl_state { NVME_CTRL_NEW, NVME_CTRL_LIVE, - NVME_CTRL_ADMIN_ONLY, /* Only admin queue live */ NVME_CTRL_RESETTING, NVME_CTRL_CONNECTING, NVME_CTRL_DELETING, @@ -199,6 +199,7 @@ struct nvme_ctrl { struct cdev cdev; struct work_struct reset_work; struct work_struct delete_work; + wait_queue_head_t state_wq; struct nvme_subsystem *subsys; struct list_head subsys_entry; @@ -449,6 +450,7 @@ void nvme_complete_rq(struct request *req); bool nvme_cancel_request(struct request *req, void *data, bool reserved); bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, enum nvme_ctrl_state new_state); +bool nvme_wait_reset(struct nvme_ctrl *ctrl); int nvme_disable_ctrl(struct nvme_ctrl *ctrl); int nvme_enable_ctrl(struct nvme_ctrl *ctrl); int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl); @@ -499,6 +501,7 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count); void nvme_stop_keep_alive(struct nvme_ctrl *ctrl); int nvme_reset_ctrl(struct nvme_ctrl *ctrl); int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl); +int nvme_try_sched_reset(struct nvme_ctrl *ctrl); int nvme_delete_ctrl(struct nvme_ctrl *ctrl); int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index bb88681f4dc3..869f462e6b6e 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -773,7 +773,8 @@ static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev, struct bio_vec *bv) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); - unsigned int first_prp_len = dev->ctrl.page_size - bv->bv_offset; + unsigned int offset = bv->bv_offset & (dev->ctrl.page_size - 1); + unsigned int first_prp_len = dev->ctrl.page_size - offset; iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); if (dma_mapping_error(dev->dev, iod->first_dma)) @@ -2263,10 +2264,7 @@ static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode) return true; } -/* - * return error value only when tagset allocation failed - */ -static int nvme_dev_add(struct nvme_dev *dev) +static void nvme_dev_add(struct nvme_dev *dev) { int ret; @@ -2296,7 +2294,7 @@ static int nvme_dev_add(struct nvme_dev *dev) if (ret) { dev_warn(dev->ctrl.device, "IO queues tagset allocation failed %d\n", ret); - return ret; + return; } dev->ctrl.tagset = &dev->tagset; } else { @@ -2307,7 +2305,6 @@ static int nvme_dev_add(struct nvme_dev *dev) } nvme_dbbuf_set(dev); - return 0; } static int nvme_pci_enable(struct nvme_dev *dev) @@ -2467,6 +2464,14 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) mutex_unlock(&dev->shutdown_lock); } +static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown) +{ + if (!nvme_wait_reset(&dev->ctrl)) + return -EBUSY; + nvme_dev_disable(dev, shutdown); + return 0; +} + static int nvme_setup_prp_pools(struct nvme_dev *dev) { dev->prp_page_pool = dma_pool_create("prp list page", dev->dev, @@ -2490,14 +2495,20 @@ static void nvme_release_prp_pools(struct nvme_dev *dev) dma_pool_destroy(dev->prp_small_pool); } +static void nvme_free_tagset(struct nvme_dev *dev) +{ + if (dev->tagset.tags) + blk_mq_free_tag_set(&dev->tagset); + dev->ctrl.tagset = NULL; +} + static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl) { struct nvme_dev *dev = to_nvme_dev(ctrl); nvme_dbbuf_dma_free(dev); put_device(dev->dev); - if (dev->tagset.tags) - blk_mq_free_tag_set(&dev->tagset); + nvme_free_tagset(dev); if (dev->ctrl.admin_q) blk_put_queue(dev->ctrl.admin_q); kfree(dev->queues); @@ -2508,6 +2519,11 @@ static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl) static void nvme_remove_dead_ctrl(struct nvme_dev *dev) { + /* + * Set state to deleting now to avoid blocking nvme_wait_reset(), which + * may be holding this pci_dev's device lock. + */ + nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); nvme_get_ctrl(&dev->ctrl); nvme_dev_disable(dev, false); nvme_kill_queues(&dev->ctrl); @@ -2521,7 +2537,6 @@ static void nvme_reset_work(struct work_struct *work) container_of(work, struct nvme_dev, ctrl.reset_work); bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL); int result; - enum nvme_ctrl_state new_state = NVME_CTRL_LIVE; if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING)) { result = -ENODEV; @@ -2615,13 +2630,11 @@ static void nvme_reset_work(struct work_struct *work) dev_warn(dev->ctrl.device, "IO queues not created\n"); nvme_kill_queues(&dev->ctrl); nvme_remove_namespaces(&dev->ctrl); - new_state = NVME_CTRL_ADMIN_ONLY; + nvme_free_tagset(dev); } else { nvme_start_queues(&dev->ctrl); nvme_wait_freeze(&dev->ctrl); - /* hit this only when allocate tagset fails */ - if (nvme_dev_add(dev)) - new_state = NVME_CTRL_ADMIN_ONLY; + nvme_dev_add(dev); nvme_unfreeze(&dev->ctrl); } @@ -2629,9 +2642,9 @@ static void nvme_reset_work(struct work_struct *work) * If only admin queue live, keep it to do further investigation or * recovery. */ - if (!nvme_change_ctrl_state(&dev->ctrl, new_state)) { + if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) { dev_warn(dev->ctrl.device, - "failed to mark controller state %d\n", new_state); + "failed to mark controller live state\n"); result = -ENODEV; goto out; } @@ -2672,7 +2685,7 @@ static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) { - *val = readq(to_nvme_dev(ctrl)->bar + off); + *val = lo_hi_readq(to_nvme_dev(ctrl)->bar + off); return 0; } @@ -2836,19 +2849,28 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) static void nvme_reset_prepare(struct pci_dev *pdev) { struct nvme_dev *dev = pci_get_drvdata(pdev); - nvme_dev_disable(dev, false); + + /* + * We don't need to check the return value from waiting for the reset + * state as pci_dev device lock is held, making it impossible to race + * with ->remove(). + */ + nvme_disable_prepare_reset(dev, false); + nvme_sync_queues(&dev->ctrl); } static void nvme_reset_done(struct pci_dev *pdev) { struct nvme_dev *dev = pci_get_drvdata(pdev); - nvme_reset_ctrl_sync(&dev->ctrl); + + if (!nvme_try_sched_reset(&dev->ctrl)) + flush_work(&dev->ctrl.reset_work); } static void nvme_shutdown(struct pci_dev *pdev) { struct nvme_dev *dev = pci_get_drvdata(pdev); - nvme_dev_disable(dev, true); + nvme_disable_prepare_reset(dev, true); } /* @@ -2901,7 +2923,7 @@ static int nvme_resume(struct device *dev) if (ndev->last_ps == U32_MAX || nvme_set_power_state(ctrl, ndev->last_ps) != 0) - nvme_reset_ctrl(ctrl); + return nvme_try_sched_reset(&ndev->ctrl); return 0; } @@ -2929,17 +2951,14 @@ static int nvme_suspend(struct device *dev) */ if (pm_suspend_via_firmware() || !ctrl->npss || !pcie_aspm_enabled(pdev) || - (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND)) { - nvme_dev_disable(ndev, true); - return 0; - } + (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND)) + return nvme_disable_prepare_reset(ndev, true); nvme_start_freeze(ctrl); nvme_wait_freeze(ctrl); nvme_sync_queues(ctrl); - if (ctrl->state != NVME_CTRL_LIVE && - ctrl->state != NVME_CTRL_ADMIN_ONLY) + if (ctrl->state != NVME_CTRL_LIVE) goto unfreeze; ret = nvme_get_power_state(ctrl, &ndev->last_ps); @@ -2965,9 +2984,8 @@ static int nvme_suspend(struct device *dev) * Clearing npss forces a controller reset on resume. The * correct value will be resdicovered then. */ - nvme_dev_disable(ndev, true); + ret = nvme_disable_prepare_reset(ndev, true); ctrl->npss = 0; - ret = 0; } unfreeze: nvme_unfreeze(ctrl); @@ -2977,9 +2995,7 @@ unfreeze: static int nvme_simple_suspend(struct device *dev) { struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev)); - - nvme_dev_disable(ndev, true); - return 0; + return nvme_disable_prepare_reset(ndev, true); } static int nvme_simple_resume(struct device *dev) @@ -2987,8 +3003,7 @@ static int nvme_simple_resume(struct device *dev) struct pci_dev *pdev = to_pci_dev(dev); struct nvme_dev *ndev = pci_get_drvdata(pdev); - nvme_reset_ctrl(&ndev->ctrl); - return 0; + return nvme_try_sched_reset(&ndev->ctrl); } static const struct dev_pm_ops nvme_dev_pm_ops = { diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 4d280160dd3f..cb4c3000a57e 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -1701,6 +1701,14 @@ nvme_rdma_timeout(struct request *rq, bool reserved) dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n", rq->tag, nvme_rdma_queue_idx(queue)); + /* + * Restart the timer if a controller reset is already scheduled. Any + * timed out commands would be handled before entering the connecting + * state. + */ + if (ctrl->ctrl.state == NVME_CTRL_RESETTING) + return BLK_EH_RESET_TIMER; + if (ctrl->ctrl.state != NVME_CTRL_LIVE) { /* * Teardown immediately if controller times out while starting @@ -2125,8 +2133,16 @@ err_unreg_client: static void __exit nvme_rdma_cleanup_module(void) { + struct nvme_rdma_ctrl *ctrl; + nvmf_unregister_transport(&nvme_rdma_transport); ib_unregister_client(&nvme_rdma_ib_client); + + mutex_lock(&nvme_rdma_ctrl_mutex); + list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) + nvme_delete_ctrl(&ctrl->ctrl); + mutex_unlock(&nvme_rdma_ctrl_mutex); + flush_workqueue(nvme_delete_wq); } module_init(nvme_rdma_init_module); diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 385a5212c10f..7544be84ab35 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -1386,7 +1386,9 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, queue->sock->sk->sk_data_ready = nvme_tcp_data_ready; queue->sock->sk->sk_state_change = nvme_tcp_state_change; queue->sock->sk->sk_write_space = nvme_tcp_write_space; +#ifdef CONFIG_NET_RX_BUSY_POLL queue->sock->sk->sk_ll_usec = 1; +#endif write_unlock_bh(&queue->sock->sk->sk_callback_lock); return 0; @@ -2044,6 +2046,14 @@ nvme_tcp_timeout(struct request *rq, bool reserved) struct nvme_tcp_ctrl *ctrl = req->queue->ctrl; struct nvme_tcp_cmd_pdu *pdu = req->pdu; + /* + * Restart the timer if a controller reset is already scheduled. Any + * timed out commands would be handled before entering the connecting + * state. + */ + if (ctrl->ctrl.state == NVME_CTRL_RESETTING) + return BLK_EH_RESET_TIMER; + dev_warn(ctrl->ctrl.device, "queue %d: timeout request %#x type %d\n", nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type); @@ -2126,6 +2136,7 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns, ret = nvme_tcp_map_data(queue, rq); if (unlikely(ret)) { + nvme_cleanup_cmd(rq); dev_err(queue->ctrl->ctrl.device, "Failed to map data (%d)\n", ret); return ret; @@ -2208,7 +2219,7 @@ static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx) struct nvme_tcp_queue *queue = hctx->driver_data; struct sock *sk = queue->sock->sk; - if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue)) + if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue)) sk_busy_loop(sk, true); nvme_tcp_try_recv(queue); return queue->nr_cqe; diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 748a39fca771..11f5aea97d1b 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -157,8 +157,10 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, iod->sg_table.sgl = iod->first_sgl; if (sg_alloc_table_chained(&iod->sg_table, blk_rq_nr_phys_segments(req), - iod->sg_table.sgl, SG_CHUNK_SIZE)) + iod->sg_table.sgl, SG_CHUNK_SIZE)) { + nvme_cleanup_cmd(req); return BLK_STS_RESOURCE; + } iod->req.sg = iod->sg_table.sgl; iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl); diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c index 7989703b883c..6bd610ee2cd7 100644 --- a/drivers/of/of_reserved_mem.c +++ b/drivers/of/of_reserved_mem.c @@ -324,8 +324,10 @@ int of_reserved_mem_device_init_by_idx(struct device *dev, if (!target) return -ENODEV; - if (!of_device_is_available(target)) + if (!of_device_is_available(target)) { + of_node_put(target); return 0; + } rmem = __find_rmem(target); of_node_put(target); diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c index 480a21e2ed39..92e895d86458 100644 --- a/drivers/of/unittest.c +++ b/drivers/of/unittest.c @@ -1207,6 +1207,7 @@ static int __init unittest_data_add(void) of_fdt_unflatten_tree(unittest_data, NULL, &unittest_data_node); if (!unittest_data_node) { pr_warn("%s: No tree to attach; not running tests\n", __func__); + kfree(unittest_data); return -ENODATA; } diff --git a/drivers/opp/core.c b/drivers/opp/core.c index 3b7ffd0234e9..9ff0538ee83a 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -1626,12 +1626,6 @@ struct opp_table *dev_pm_opp_set_regulators(struct device *dev, goto free_regulators; } - ret = regulator_enable(reg); - if (ret < 0) { - regulator_put(reg); - goto free_regulators; - } - opp_table->regulators[i] = reg; } @@ -1645,10 +1639,8 @@ struct opp_table *dev_pm_opp_set_regulators(struct device *dev, return opp_table; free_regulators: - while (i--) { - regulator_disable(opp_table->regulators[i]); - regulator_put(opp_table->regulators[i]); - } + while (i != 0) + regulator_put(opp_table->regulators[--i]); kfree(opp_table->regulators); opp_table->regulators = NULL; @@ -1674,10 +1666,8 @@ void dev_pm_opp_put_regulators(struct opp_table *opp_table) /* Make sure there are no concurrent readers while updating opp_table */ WARN_ON(!list_empty(&opp_table->opp_list)); - for (i = opp_table->regulator_count - 1; i >= 0; i--) { - regulator_disable(opp_table->regulators[i]); + for (i = opp_table->regulator_count - 1; i >= 0; i--) regulator_put(opp_table->regulators[i]); - } _free_set_opp_data(opp_table); diff --git a/drivers/opp/of.c b/drivers/opp/of.c index 1813f5ad5fa2..1cbb58240b80 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c @@ -77,8 +77,6 @@ static struct dev_pm_opp *_find_opp_of_np(struct opp_table *opp_table, { struct dev_pm_opp *opp; - lockdep_assert_held(&opp_table_lock); - mutex_lock(&opp_table->lock); list_for_each_entry(opp, &opp_table->opp_list, node) { @@ -665,6 +663,13 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table) return 0; } + /* + * Re-initialize list_kref every time we add static OPPs to the OPP + * table as the reference count may be 0 after the last tie static OPPs + * were removed. + */ + kref_init(&opp_table->list_kref); + /* We have opp-table node now, iterate over it and add OPPs */ for_each_available_child_of_node(opp_table->np, np) { opp = _opp_add_static_v2(opp_table, dev, np); diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index ed50502cc65a..de8e4e347249 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c @@ -678,14 +678,6 @@ static int sba_dma_supported( struct device *dev, u64 mask) return(0); } - /* Documentation/DMA-API-HOWTO.txt tells drivers to try 64-bit - * first, then fall back to 32-bit if that fails. - * We are just "encouraging" 32-bit DMA masks here since we can - * never allow IOMMU bypass unless we add special support for ZX1. - */ - if (mask > ~0U) - return 0; - ioc = GET_IOC(dev); if (!ioc) return 0; diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index e7982af9a5d8..a97e2571a527 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -959,19 +959,6 @@ void pci_refresh_power_state(struct pci_dev *dev) } /** - * pci_power_up - Put the given device into D0 forcibly - * @dev: PCI device to power up - */ -void pci_power_up(struct pci_dev *dev) -{ - if (platform_pci_power_manageable(dev)) - platform_pci_set_power_state(dev, PCI_D0); - - pci_raw_set_power_state(dev, PCI_D0); - pci_update_current_state(dev, PCI_D0); -} - -/** * pci_platform_power_transition - Use platform to change device power state * @dev: PCI device to handle. * @state: State to put the device into. @@ -1154,6 +1141,17 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state) EXPORT_SYMBOL(pci_set_power_state); /** + * pci_power_up - Put the given device into D0 forcibly + * @dev: PCI device to power up + */ +void pci_power_up(struct pci_dev *dev) +{ + __pci_start_power_transition(dev, PCI_D0); + pci_raw_set_power_state(dev, PCI_D0); + pci_update_current_state(dev, PCI_D0); +} + +/** * pci_choose_state - Choose the power state of a PCI device * @dev: PCI device to be suspended * @state: target sleep state for the whole system. This is the value diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c index 648ddb7f038a..c6800d220920 100644 --- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c +++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c @@ -87,7 +87,7 @@ FUNC_GROUP_DECL(MACLINK3, L23); #define K25 7 SIG_EXPR_LIST_DECL_SESG(K25, MACLINK4, MACLINK4, SIG_DESC_SET(SCU410, 7)); -SIG_EXPR_LIST_DECL_SESG(K25, SDA14, SDA14, SIG_DESC_SET(SCU4B0, 7)); +SIG_EXPR_LIST_DECL_SESG(K25, SDA14, I2C14, SIG_DESC_SET(SCU4B0, 7)); PIN_DECL_2(K25, GPIOA7, MACLINK4, SDA14); FUNC_GROUP_DECL(MACLINK4, K25); @@ -1262,13 +1262,13 @@ GROUP_DECL(SPI1, AB11, AC11, AA11); #define AD11 206 SIG_EXPR_LIST_DECL_SEMG(AD11, SPI1DQ2, QSPI1, SPI1, SIG_DESC_SET(SCU438, 14)); SIG_EXPR_LIST_DECL_SEMG(AD11, TXD13, UART13G1, UART13, - SIG_DESC_SET(SCU438, 14)); + SIG_DESC_CLEAR(SCU4B8, 2), SIG_DESC_SET(SCU4D8, 14)); PIN_DECL_2(AD11, GPIOZ6, SPI1DQ2, TXD13); #define AF10 207 SIG_EXPR_LIST_DECL_SEMG(AF10, SPI1DQ3, QSPI1, SPI1, SIG_DESC_SET(SCU438, 15)); SIG_EXPR_LIST_DECL_SEMG(AF10, RXD13, UART13G1, UART13, - SIG_DESC_SET(SCU438, 15)); + SIG_DESC_CLEAR(SCU4B8, 3), SIG_DESC_SET(SCU4D8, 15)); PIN_DECL_2(AF10, GPIOZ7, SPI1DQ3, RXD13); GROUP_DECL(QSPI1, AB11, AC11, AA11, AD11, AF10); @@ -1440,91 +1440,85 @@ FUNC_GROUP_DECL(RGMII2, D4, C2, C1, D3, E4, F5, D2, E3, D1, F4, E2, E1); FUNC_GROUP_DECL(RMII2, D4, C2, C1, D3, D2, D1, F4, E2, E1); #define AB4 232 -SIG_EXPR_LIST_DECL_SESG(AB4, SD3CLK, SD3, SIG_DESC_SET(SCU400, 24)); -PIN_DECL_1(AB4, GPIO18D0, SD3CLK); +SIG_EXPR_LIST_DECL_SEMG(AB4, EMMCCLK, EMMCG1, EMMC, SIG_DESC_SET(SCU400, 24)); +PIN_DECL_1(AB4, GPIO18D0, EMMCCLK); #define AA4 233 -SIG_EXPR_LIST_DECL_SESG(AA4, SD3CMD, SD3, SIG_DESC_SET(SCU400, 25)); -PIN_DECL_1(AA4, GPIO18D1, SD3CMD); +SIG_EXPR_LIST_DECL_SEMG(AA4, EMMCCMD, EMMCG1, EMMC, SIG_DESC_SET(SCU400, 25)); +PIN_DECL_1(AA4, GPIO18D1, EMMCCMD); #define AC4 234 -SIG_EXPR_LIST_DECL_SESG(AC4, SD3DAT0, SD3, SIG_DESC_SET(SCU400, 26)); -PIN_DECL_1(AC4, GPIO18D2, SD3DAT0); +SIG_EXPR_LIST_DECL_SEMG(AC4, EMMCDAT0, EMMCG1, EMMC, SIG_DESC_SET(SCU400, 26)); +PIN_DECL_1(AC4, GPIO18D2, EMMCDAT0); #define AA5 235 -SIG_EXPR_LIST_DECL_SESG(AA5, SD3DAT1, SD3, SIG_DESC_SET(SCU400, 27)); -PIN_DECL_1(AA5, GPIO18D3, SD3DAT1); +SIG_EXPR_LIST_DECL_SEMG(AA5, EMMCDAT1, EMMCG4, EMMC, SIG_DESC_SET(SCU400, 27)); +PIN_DECL_1(AA5, GPIO18D3, EMMCDAT1); #define Y5 236 -SIG_EXPR_LIST_DECL_SESG(Y5, SD3DAT2, SD3, SIG_DESC_SET(SCU400, 28)); -PIN_DECL_1(Y5, GPIO18D4, SD3DAT2); +SIG_EXPR_LIST_DECL_SEMG(Y5, EMMCDAT2, EMMCG4, EMMC, SIG_DESC_SET(SCU400, 28)); +PIN_DECL_1(Y5, GPIO18D4, EMMCDAT2); #define AB5 237 -SIG_EXPR_LIST_DECL_SESG(AB5, SD3DAT3, SD3, SIG_DESC_SET(SCU400, 29)); -PIN_DECL_1(AB5, GPIO18D5, SD3DAT3); +SIG_EXPR_LIST_DECL_SEMG(AB5, EMMCDAT3, EMMCG4, EMMC, SIG_DESC_SET(SCU400, 29)); +PIN_DECL_1(AB5, GPIO18D5, EMMCDAT3); #define AB6 238 -SIG_EXPR_LIST_DECL_SESG(AB6, SD3CD, SD3, SIG_DESC_SET(SCU400, 30)); -PIN_DECL_1(AB6, GPIO18D6, SD3CD); +SIG_EXPR_LIST_DECL_SEMG(AB6, EMMCCD, EMMCG1, EMMC, SIG_DESC_SET(SCU400, 30)); +PIN_DECL_1(AB6, GPIO18D6, EMMCCD); #define AC5 239 -SIG_EXPR_LIST_DECL_SESG(AC5, SD3WP, SD3, SIG_DESC_SET(SCU400, 31)); -PIN_DECL_1(AC5, GPIO18D7, SD3WP); +SIG_EXPR_LIST_DECL_SEMG(AC5, EMMCWP, EMMCG1, EMMC, SIG_DESC_SET(SCU400, 31)); +PIN_DECL_1(AC5, GPIO18D7, EMMCWP); -FUNC_GROUP_DECL(SD3, AB4, AA4, AC4, AA5, Y5, AB5, AB6, AC5); +GROUP_DECL(EMMCG1, AB4, AA4, AC4, AB6, AC5); +GROUP_DECL(EMMCG4, AB4, AA4, AC4, AA5, Y5, AB5, AB6, AC5); #define Y1 240 SIG_EXPR_LIST_DECL_SEMG(Y1, FWSPIDCS, FWSPID, FWSPID, SIG_DESC_SET(SCU500, 3)); SIG_EXPR_LIST_DECL_SESG(Y1, VBCS, VB, SIG_DESC_SET(SCU500, 5)); -SIG_EXPR_LIST_DECL_SESG(Y1, SD3DAT4, SD3DAT4, SIG_DESC_SET(SCU404, 0)); -PIN_DECL_3(Y1, GPIO18E0, FWSPIDCS, VBCS, SD3DAT4); -FUNC_GROUP_DECL(SD3DAT4, Y1); +SIG_EXPR_LIST_DECL_SEMG(Y1, EMMCDAT4, EMMCG8, EMMC, SIG_DESC_SET(SCU404, 0)); +PIN_DECL_3(Y1, GPIO18E0, FWSPIDCS, VBCS, EMMCDAT4); #define Y2 241 SIG_EXPR_LIST_DECL_SEMG(Y2, FWSPIDCK, FWSPID, FWSPID, SIG_DESC_SET(SCU500, 3)); SIG_EXPR_LIST_DECL_SESG(Y2, VBCK, VB, SIG_DESC_SET(SCU500, 5)); -SIG_EXPR_LIST_DECL_SESG(Y2, SD3DAT5, SD3DAT5, SIG_DESC_SET(SCU404, 1)); -PIN_DECL_3(Y2, GPIO18E1, FWSPIDCK, VBCK, SD3DAT5); -FUNC_GROUP_DECL(SD3DAT5, Y2); +SIG_EXPR_LIST_DECL_SEMG(Y2, EMMCDAT5, EMMCG8, EMMC, SIG_DESC_SET(SCU404, 1)); +PIN_DECL_3(Y2, GPIO18E1, FWSPIDCK, VBCK, EMMCDAT5); #define Y3 242 SIG_EXPR_LIST_DECL_SEMG(Y3, FWSPIDMOSI, FWSPID, FWSPID, SIG_DESC_SET(SCU500, 3)); SIG_EXPR_LIST_DECL_SESG(Y3, VBMOSI, VB, SIG_DESC_SET(SCU500, 5)); -SIG_EXPR_LIST_DECL_SESG(Y3, SD3DAT6, SD3DAT6, SIG_DESC_SET(SCU404, 2)); -PIN_DECL_3(Y3, GPIO18E2, FWSPIDMOSI, VBMOSI, SD3DAT6); -FUNC_GROUP_DECL(SD3DAT6, Y3); +SIG_EXPR_LIST_DECL_SEMG(Y3, EMMCDAT6, EMMCG8, EMMC, SIG_DESC_SET(SCU404, 2)); +PIN_DECL_3(Y3, GPIO18E2, FWSPIDMOSI, VBMOSI, EMMCDAT6); #define Y4 243 SIG_EXPR_LIST_DECL_SEMG(Y4, FWSPIDMISO, FWSPID, FWSPID, SIG_DESC_SET(SCU500, 3)); SIG_EXPR_LIST_DECL_SESG(Y4, VBMISO, VB, SIG_DESC_SET(SCU500, 5)); -SIG_EXPR_LIST_DECL_SESG(Y4, SD3DAT7, SD3DAT7, SIG_DESC_SET(SCU404, 3)); -PIN_DECL_3(Y4, GPIO18E3, FWSPIDMISO, VBMISO, SD3DAT7); -FUNC_GROUP_DECL(SD3DAT7, Y4); +SIG_EXPR_LIST_DECL_SEMG(Y4, EMMCDAT7, EMMCG8, EMMC, SIG_DESC_SET(SCU404, 3)); +PIN_DECL_3(Y4, GPIO18E3, FWSPIDMISO, VBMISO, EMMCDAT7); GROUP_DECL(FWSPID, Y1, Y2, Y3, Y4); GROUP_DECL(FWQSPID, Y1, Y2, Y3, Y4, AE12, AF12); +GROUP_DECL(EMMCG8, AB4, AA4, AC4, AA5, Y5, AB5, AB6, AC5, Y1, Y2, Y3, Y4); FUNC_DECL_2(FWSPID, FWSPID, FWQSPID); FUNC_GROUP_DECL(VB, Y1, Y2, Y3, Y4); - +FUNC_DECL_3(EMMC, EMMCG1, EMMCG4, EMMCG8); /* * FIXME: Confirm bits and priorities are the right way around for the * following 4 pins */ #define AF25 244 -SIG_EXPR_LIST_DECL_SEMG(AF25, I3C3SCL, I3C3, I3C3, SIG_DESC_SET(SCU438, 20), - SIG_DESC_SET(SCU4D8, 20)); -SIG_EXPR_LIST_DECL_SESG(AF25, FSI1CLK, FSI1, SIG_DESC_CLEAR(SCU438, 20), - SIG_DESC_SET(SCU4D8, 20)); +SIG_EXPR_LIST_DECL_SEMG(AF25, I3C3SCL, I3C3, I3C3, SIG_DESC_SET(SCU438, 20)); +SIG_EXPR_LIST_DECL_SESG(AF25, FSI1CLK, FSI1, SIG_DESC_SET(SCU4D8, 20)); PIN_DECL_(AF25, SIG_EXPR_LIST_PTR(AF25, I3C3SCL), SIG_EXPR_LIST_PTR(AF25, FSI1CLK)); #define AE26 245 -SIG_EXPR_LIST_DECL_SEMG(AE26, I3C3SDA, I3C3, I3C3, SIG_DESC_SET(SCU438, 21), - SIG_DESC_SET(SCU4D8, 21)); -SIG_EXPR_LIST_DECL_SESG(AE26, FSI1DATA, FSI1, SIG_DESC_CLEAR(SCU438, 21), - SIG_DESC_SET(SCU4D8, 21)); +SIG_EXPR_LIST_DECL_SEMG(AE26, I3C3SDA, I3C3, I3C3, SIG_DESC_SET(SCU438, 21)); +SIG_EXPR_LIST_DECL_SESG(AE26, FSI1DATA, FSI1, SIG_DESC_SET(SCU4D8, 21)); PIN_DECL_(AE26, SIG_EXPR_LIST_PTR(AE26, I3C3SDA), SIG_EXPR_LIST_PTR(AE26, FSI1DATA)); @@ -1533,18 +1527,14 @@ FUNC_DECL_2(I3C3, HVI3C3, I3C3); FUNC_GROUP_DECL(FSI1, AF25, AE26); #define AE25 246 -SIG_EXPR_LIST_DECL_SEMG(AE25, I3C4SCL, I3C4, I3C4, SIG_DESC_SET(SCU438, 22), - SIG_DESC_SET(SCU4D8, 22)); -SIG_EXPR_LIST_DECL_SESG(AE25, FSI2CLK, FSI2, SIG_DESC_CLEAR(SCU438, 22), - SIG_DESC_SET(SCU4D8, 22)); +SIG_EXPR_LIST_DECL_SEMG(AE25, I3C4SCL, I3C4, I3C4, SIG_DESC_SET(SCU438, 22)); +SIG_EXPR_LIST_DECL_SESG(AE25, FSI2CLK, FSI2, SIG_DESC_SET(SCU4D8, 22)); PIN_DECL_(AE25, SIG_EXPR_LIST_PTR(AE25, I3C4SCL), SIG_EXPR_LIST_PTR(AE25, FSI2CLK)); #define AF24 247 -SIG_EXPR_LIST_DECL_SEMG(AF24, I3C4SDA, I3C4, I3C4, SIG_DESC_SET(SCU438, 23), - SIG_DESC_SET(SCU4D8, 23)); -SIG_EXPR_LIST_DECL_SESG(AF24, FSI2DATA, FSI2, SIG_DESC_CLEAR(SCU438, 23), - SIG_DESC_SET(SCU4D8, 23)); +SIG_EXPR_LIST_DECL_SEMG(AF24, I3C4SDA, I3C4, I3C4, SIG_DESC_SET(SCU438, 23)); +SIG_EXPR_LIST_DECL_SESG(AF24, FSI2DATA, FSI2, SIG_DESC_SET(SCU4D8, 23)); PIN_DECL_(AF24, SIG_EXPR_LIST_PTR(AF24, I3C4SDA), SIG_EXPR_LIST_PTR(AF24, FSI2DATA)); @@ -1574,6 +1564,8 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = { ASPEED_PINCTRL_PIN(A3), ASPEED_PINCTRL_PIN(AA11), ASPEED_PINCTRL_PIN(AA12), + ASPEED_PINCTRL_PIN(AA16), + ASPEED_PINCTRL_PIN(AA17), ASPEED_PINCTRL_PIN(AA23), ASPEED_PINCTRL_PIN(AA24), ASPEED_PINCTRL_PIN(AA25), @@ -1585,6 +1577,8 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = { ASPEED_PINCTRL_PIN(AB11), ASPEED_PINCTRL_PIN(AB12), ASPEED_PINCTRL_PIN(AB15), + ASPEED_PINCTRL_PIN(AB16), + ASPEED_PINCTRL_PIN(AB17), ASPEED_PINCTRL_PIN(AB18), ASPEED_PINCTRL_PIN(AB19), ASPEED_PINCTRL_PIN(AB22), @@ -1602,6 +1596,7 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = { ASPEED_PINCTRL_PIN(AC11), ASPEED_PINCTRL_PIN(AC12), ASPEED_PINCTRL_PIN(AC15), + ASPEED_PINCTRL_PIN(AC16), ASPEED_PINCTRL_PIN(AC17), ASPEED_PINCTRL_PIN(AC18), ASPEED_PINCTRL_PIN(AC19), @@ -1619,6 +1614,7 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = { ASPEED_PINCTRL_PIN(AD12), ASPEED_PINCTRL_PIN(AD14), ASPEED_PINCTRL_PIN(AD15), + ASPEED_PINCTRL_PIN(AD16), ASPEED_PINCTRL_PIN(AD19), ASPEED_PINCTRL_PIN(AD20), ASPEED_PINCTRL_PIN(AD22), @@ -1634,8 +1630,11 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = { ASPEED_PINCTRL_PIN(AE12), ASPEED_PINCTRL_PIN(AE14), ASPEED_PINCTRL_PIN(AE15), + ASPEED_PINCTRL_PIN(AE16), ASPEED_PINCTRL_PIN(AE18), ASPEED_PINCTRL_PIN(AE19), + ASPEED_PINCTRL_PIN(AE25), + ASPEED_PINCTRL_PIN(AE26), ASPEED_PINCTRL_PIN(AE7), ASPEED_PINCTRL_PIN(AE8), ASPEED_PINCTRL_PIN(AF10), @@ -1643,6 +1642,8 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = { ASPEED_PINCTRL_PIN(AF12), ASPEED_PINCTRL_PIN(AF14), ASPEED_PINCTRL_PIN(AF15), + ASPEED_PINCTRL_PIN(AF24), + ASPEED_PINCTRL_PIN(AF25), ASPEED_PINCTRL_PIN(AF7), ASPEED_PINCTRL_PIN(AF8), ASPEED_PINCTRL_PIN(AF9), @@ -1792,17 +1793,6 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = { ASPEED_PINCTRL_PIN(Y3), ASPEED_PINCTRL_PIN(Y4), ASPEED_PINCTRL_PIN(Y5), - ASPEED_PINCTRL_PIN(AB16), - ASPEED_PINCTRL_PIN(AA17), - ASPEED_PINCTRL_PIN(AB17), - ASPEED_PINCTRL_PIN(AE16), - ASPEED_PINCTRL_PIN(AC16), - ASPEED_PINCTRL_PIN(AA16), - ASPEED_PINCTRL_PIN(AD16), - ASPEED_PINCTRL_PIN(AF25), - ASPEED_PINCTRL_PIN(AE26), - ASPEED_PINCTRL_PIN(AE25), - ASPEED_PINCTRL_PIN(AF24), }; static const struct aspeed_pin_group aspeed_g6_groups[] = { @@ -1976,11 +1966,9 @@ static const struct aspeed_pin_group aspeed_g6_groups[] = { ASPEED_PINCTRL_GROUP(SALT9G1), ASPEED_PINCTRL_GROUP(SD1), ASPEED_PINCTRL_GROUP(SD2), - ASPEED_PINCTRL_GROUP(SD3), - ASPEED_PINCTRL_GROUP(SD3DAT4), - ASPEED_PINCTRL_GROUP(SD3DAT5), - ASPEED_PINCTRL_GROUP(SD3DAT6), - ASPEED_PINCTRL_GROUP(SD3DAT7), + ASPEED_PINCTRL_GROUP(EMMCG1), + ASPEED_PINCTRL_GROUP(EMMCG4), + ASPEED_PINCTRL_GROUP(EMMCG8), ASPEED_PINCTRL_GROUP(SGPM1), ASPEED_PINCTRL_GROUP(SGPS1), ASPEED_PINCTRL_GROUP(SIOONCTRL), @@ -2059,6 +2047,7 @@ static const struct aspeed_pin_function aspeed_g6_functions[] = { ASPEED_PINCTRL_FUNC(ADC8), ASPEED_PINCTRL_FUNC(ADC9), ASPEED_PINCTRL_FUNC(BMCINT), + ASPEED_PINCTRL_FUNC(EMMC), ASPEED_PINCTRL_FUNC(ESPI), ASPEED_PINCTRL_FUNC(ESPIALT), ASPEED_PINCTRL_FUNC(FSI1), @@ -2191,11 +2180,6 @@ static const struct aspeed_pin_function aspeed_g6_functions[] = { ASPEED_PINCTRL_FUNC(SALT9), ASPEED_PINCTRL_FUNC(SD1), ASPEED_PINCTRL_FUNC(SD2), - ASPEED_PINCTRL_FUNC(SD3), - ASPEED_PINCTRL_FUNC(SD3DAT4), - ASPEED_PINCTRL_FUNC(SD3DAT5), - ASPEED_PINCTRL_FUNC(SD3DAT6), - ASPEED_PINCTRL_FUNC(SD3DAT7), ASPEED_PINCTRL_FUNC(SGPM1), ASPEED_PINCTRL_FUNC(SGPS1), ASPEED_PINCTRL_FUNC(SIOONCTRL), diff --git a/drivers/pinctrl/aspeed/pinmux-aspeed.h b/drivers/pinctrl/aspeed/pinmux-aspeed.h index a2c0d52e4f7b..140c5ce9fbc1 100644 --- a/drivers/pinctrl/aspeed/pinmux-aspeed.h +++ b/drivers/pinctrl/aspeed/pinmux-aspeed.h @@ -508,7 +508,7 @@ struct aspeed_pin_desc { * @idx: The bit index in the register */ #define SIG_DESC_SET(reg, idx) SIG_DESC_IP_BIT(ASPEED_IP_SCU, reg, idx, 1) -#define SIG_DESC_CLEAR(reg, idx) SIG_DESC_IP_BIT(ASPEED_IP_SCU, reg, idx, 0) +#define SIG_DESC_CLEAR(reg, idx) { ASPEED_IP_SCU, reg, BIT_MASK(idx), 0, 0 } #define SIG_DESC_LIST_SYM(sig, group) sig_descs_ ## sig ## _ ## group #define SIG_DESC_LIST_DECL(sig, group, ...) \ @@ -738,6 +738,7 @@ struct aspeed_pin_desc { static const char *FUNC_SYM(func)[] = { __VA_ARGS__ } #define FUNC_DECL_2(func, one, two) FUNC_DECL_(func, #one, #two) +#define FUNC_DECL_3(func, one, two, three) FUNC_DECL_(func, #one, #two, #three) #define FUNC_GROUP_DECL(func, ...) \ GROUP_DECL(func, __VA_ARGS__); \ diff --git a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c index 6f7d3a2f2e97..42f7ab383ad9 100644 --- a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c +++ b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c @@ -1,14 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2014-2017 Broadcom - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ /* @@ -853,7 +845,7 @@ static int iproc_gpio_probe(struct platform_device *pdev) /* optional GPIO interrupt support */ irq = platform_get_irq(pdev, 0); - if (irq) { + if (irq > 0) { struct irq_chip *irqc; struct gpio_irq_chip *girq; diff --git a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c index 2bf6af7df7d9..9fabc451550e 100644 --- a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c +++ b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c @@ -640,8 +640,8 @@ static int ns2_pinmux_enable(struct pinctrl_dev *pctrl_dev, const struct ns2_pin_function *func; const struct ns2_pin_group *grp; - if (grp_select > pinctrl->num_groups || - func_select > pinctrl->num_functions) + if (grp_select >= pinctrl->num_groups || + func_select >= pinctrl->num_functions) return -EINVAL; func = &pinctrl->functions[func_select]; diff --git a/drivers/pinctrl/berlin/pinctrl-as370.c b/drivers/pinctrl/berlin/pinctrl-as370.c index 44f8ccdbeeff..9dfdc275ee33 100644 --- a/drivers/pinctrl/berlin/pinctrl-as370.c +++ b/drivers/pinctrl/berlin/pinctrl-as370.c @@ -43,7 +43,7 @@ static const struct berlin_desc_group as370_soc_pinctrl_groups[] = { BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO5 */ BERLIN_PINCTRL_FUNCTION(0x1, "i2s1"), /* DO3 */ BERLIN_PINCTRL_FUNCTION(0x2, "pwm"), /* PWM5 */ - BERLIN_PINCTRL_FUNCTION(0x3, "spififib"), /* SPDIFIB */ + BERLIN_PINCTRL_FUNCTION(0x3, "spdifib"), /* SPDIFIB */ BERLIN_PINCTRL_FUNCTION(0x4, "spdifo"), /* SPDIFO */ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG5 */ BERLIN_PINCTRL_GROUP("I2S1_MCLK", 0x0, 0x3, 0x12, diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index aae51c507f59..2c419fa5d1c1 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c @@ -147,6 +147,7 @@ struct chv_pin_context { * @pctldesc: Pin controller description * @pctldev: Pointer to the pin controller device * @chip: GPIO chip in this pin controller + * @irqchip: IRQ chip in this pin controller * @regs: MMIO registers * @intr_lines: Stores mapping between 16 HW interrupt wires and GPIO * offset (in GPIO number space) @@ -162,6 +163,7 @@ struct chv_pinctrl { struct pinctrl_desc pctldesc; struct pinctrl_dev *pctldev; struct gpio_chip chip; + struct irq_chip irqchip; void __iomem *regs; unsigned intr_lines[16]; const struct chv_community *community; @@ -1466,16 +1468,6 @@ static int chv_gpio_irq_type(struct irq_data *d, unsigned int type) return 0; } -static struct irq_chip chv_gpio_irqchip = { - .name = "chv-gpio", - .irq_startup = chv_gpio_irq_startup, - .irq_ack = chv_gpio_irq_ack, - .irq_mask = chv_gpio_irq_mask, - .irq_unmask = chv_gpio_irq_unmask, - .irq_set_type = chv_gpio_irq_type, - .flags = IRQCHIP_SKIP_SET_WAKE, -}; - static void chv_gpio_irq_handler(struct irq_desc *desc) { struct gpio_chip *gc = irq_desc_get_handler_data(desc); @@ -1513,7 +1505,6 @@ static const struct dmi_system_id chv_no_valid_mask[] = { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"), - DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"), }, }, { @@ -1521,7 +1512,6 @@ static const struct dmi_system_id chv_no_valid_mask[] = { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "HP"), DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"), - DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"), }, }, { @@ -1529,7 +1519,6 @@ static const struct dmi_system_id chv_no_valid_mask[] = { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"), - DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"), }, }, { @@ -1537,7 +1526,6 @@ static const struct dmi_system_id chv_no_valid_mask[] = { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), DMI_MATCH(DMI_PRODUCT_NAME, "Celes"), - DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"), }, }, {} @@ -1563,7 +1551,7 @@ static void chv_init_irq_valid_mask(struct gpio_chip *chip, intsel >>= CHV_PADCTRL0_INTSEL_SHIFT; if (intsel >= community->nirqs) - clear_bit(i, valid_mask); + clear_bit(desc->number, valid_mask); } } @@ -1629,7 +1617,15 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) } } - ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, 0, + pctrl->irqchip.name = "chv-gpio"; + pctrl->irqchip.irq_startup = chv_gpio_irq_startup; + pctrl->irqchip.irq_ack = chv_gpio_irq_ack; + pctrl->irqchip.irq_mask = chv_gpio_irq_mask; + pctrl->irqchip.irq_unmask = chv_gpio_irq_unmask; + pctrl->irqchip.irq_set_type = chv_gpio_irq_type; + pctrl->irqchip.flags = IRQCHIP_SKIP_SET_WAKE; + + ret = gpiochip_irqchip_add(chip, &pctrl->irqchip, 0, handle_bad_irq, IRQ_TYPE_NONE); if (ret) { dev_err(pctrl->dev, "failed to add IRQ chip\n"); @@ -1646,7 +1642,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) } } - gpiochip_set_chained_irqchip(chip, &chv_gpio_irqchip, irq, + gpiochip_set_chained_irqchip(chip, &pctrl->irqchip, irq, chv_gpio_irq_handler); return 0; } diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c index 1f13bcd0e4e1..83981ad66a71 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.c +++ b/drivers/pinctrl/intel/pinctrl-intel.c @@ -52,6 +52,7 @@ #define PADCFG0_GPIROUTNMI BIT(17) #define PADCFG0_PMODE_SHIFT 10 #define PADCFG0_PMODE_MASK GENMASK(13, 10) +#define PADCFG0_PMODE_GPIO 0 #define PADCFG0_GPIORXDIS BIT(9) #define PADCFG0_GPIOTXDIS BIT(8) #define PADCFG0_GPIORXSTATE BIT(1) @@ -96,6 +97,7 @@ struct intel_pinctrl_context { * @pctldesc: Pin controller description * @pctldev: Pointer to the pin controller device * @chip: GPIO chip in this pin controller + * @irqchip: IRQ chip in this pin controller * @soc: SoC/PCH specific pin configuration data * @communities: All communities in this pin controller * @ncommunities: Number of communities in this pin controller @@ -108,6 +110,7 @@ struct intel_pinctrl { struct pinctrl_desc pctldesc; struct pinctrl_dev *pctldev; struct gpio_chip chip; + struct irq_chip irqchip; const struct intel_pinctrl_soc_data *soc; struct intel_community *communities; size_t ncommunities; @@ -330,7 +333,7 @@ static void intel_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s, cfg1 = readl(intel_get_padcfg(pctrl, pin, PADCFG1)); mode = (cfg0 & PADCFG0_PMODE_MASK) >> PADCFG0_PMODE_SHIFT; - if (!mode) + if (mode == PADCFG0_PMODE_GPIO) seq_puts(s, "GPIO "); else seq_printf(s, "mode %d ", mode); @@ -456,6 +459,11 @@ static void __intel_gpio_set_direction(void __iomem *padcfg0, bool input) writel(value, padcfg0); } +static int intel_gpio_get_gpio_mode(void __iomem *padcfg0) +{ + return (readl(padcfg0) & PADCFG0_PMODE_MASK) >> PADCFG0_PMODE_SHIFT; +} + static void intel_gpio_set_gpio_mode(void __iomem *padcfg0) { u32 value; @@ -489,7 +497,20 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev, } padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0); + + /* + * If pin is already configured in GPIO mode, we assume that + * firmware provides correct settings. In such case we avoid + * potential glitches on the pin. Otherwise, for the pin in + * alternative mode, consumer has to supply respective flags. + */ + if (intel_gpio_get_gpio_mode(padcfg0) == PADCFG0_PMODE_GPIO) { + raw_spin_unlock_irqrestore(&pctrl->lock, flags); + return 0; + } + intel_gpio_set_gpio_mode(padcfg0); + /* Disable TX buffer and enable RX (this will be input) */ __intel_gpio_set_direction(padcfg0, true); @@ -1139,16 +1160,6 @@ static irqreturn_t intel_gpio_irq(int irq, void *data) return ret; } -static struct irq_chip intel_gpio_irqchip = { - .name = "intel-gpio", - .irq_ack = intel_gpio_irq_ack, - .irq_mask = intel_gpio_irq_mask, - .irq_unmask = intel_gpio_irq_unmask, - .irq_set_type = intel_gpio_irq_type, - .irq_set_wake = intel_gpio_irq_wake, - .flags = IRQCHIP_MASK_ON_SUSPEND, -}; - static int intel_gpio_add_pin_ranges(struct intel_pinctrl *pctrl, const struct intel_community *community) { @@ -1198,12 +1209,22 @@ static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq) pctrl->chip = intel_gpio_chip; + /* Setup GPIO chip */ pctrl->chip.ngpio = intel_gpio_ngpio(pctrl); pctrl->chip.label = dev_name(pctrl->dev); pctrl->chip.parent = pctrl->dev; pctrl->chip.base = -1; pctrl->irq = irq; + /* Setup IRQ chip */ + pctrl->irqchip.name = dev_name(pctrl->dev); + pctrl->irqchip.irq_ack = intel_gpio_irq_ack; + pctrl->irqchip.irq_mask = intel_gpio_irq_mask; + pctrl->irqchip.irq_unmask = intel_gpio_irq_unmask; + pctrl->irqchip.irq_set_type = intel_gpio_irq_type; + pctrl->irqchip.irq_set_wake = intel_gpio_irq_wake; + pctrl->irqchip.flags = IRQCHIP_MASK_ON_SUSPEND; + ret = devm_gpiochip_add_data(pctrl->dev, &pctrl->chip, pctrl); if (ret) { dev_err(pctrl->dev, "failed to register gpiochip\n"); @@ -1233,15 +1254,14 @@ static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq) return ret; } - ret = gpiochip_irqchip_add(&pctrl->chip, &intel_gpio_irqchip, 0, + ret = gpiochip_irqchip_add(&pctrl->chip, &pctrl->irqchip, 0, handle_bad_irq, IRQ_TYPE_NONE); if (ret) { dev_err(pctrl->dev, "failed to add irqchip\n"); return ret; } - gpiochip_set_chained_irqchip(&pctrl->chip, &intel_gpio_irqchip, irq, - NULL); + gpiochip_set_chained_irqchip(&pctrl->chip, &pctrl->irqchip, irq, NULL); return 0; } diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c index 6462d3ca7ceb..f2f5fcd9a237 100644 --- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c +++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c @@ -183,10 +183,10 @@ static struct armada_37xx_pin_group armada_37xx_nb_groups[] = { PIN_GRP_EXTRA("uart2", 9, 2, BIT(1) | BIT(13) | BIT(14) | BIT(19), BIT(1) | BIT(13) | BIT(14), BIT(1) | BIT(19), 18, 2, "gpio", "uart"), - PIN_GRP_GPIO("led0_od", 11, 1, BIT(20), "led"), - PIN_GRP_GPIO("led1_od", 12, 1, BIT(21), "led"), - PIN_GRP_GPIO("led2_od", 13, 1, BIT(22), "led"), - PIN_GRP_GPIO("led3_od", 14, 1, BIT(23), "led"), + PIN_GRP_GPIO_2("led0_od", 11, 1, BIT(20), BIT(20), 0, "led"), + PIN_GRP_GPIO_2("led1_od", 12, 1, BIT(21), BIT(21), 0, "led"), + PIN_GRP_GPIO_2("led2_od", 13, 1, BIT(22), BIT(22), 0, "led"), + PIN_GRP_GPIO_2("led3_od", 14, 1, BIT(23), BIT(23), 0, "led"), }; @@ -221,11 +221,11 @@ static const struct armada_37xx_pin_data armada_37xx_pin_sb = { }; static inline void armada_37xx_update_reg(unsigned int *reg, - unsigned int offset) + unsigned int *offset) { /* We never have more than 2 registers */ - if (offset >= GPIO_PER_REG) { - offset -= GPIO_PER_REG; + if (*offset >= GPIO_PER_REG) { + *offset -= GPIO_PER_REG; *reg += sizeof(u32); } } @@ -376,7 +376,7 @@ static inline void armada_37xx_irq_update_reg(unsigned int *reg, { int offset = irqd_to_hwirq(d); - armada_37xx_update_reg(reg, offset); + armada_37xx_update_reg(reg, &offset); } static int armada_37xx_gpio_direction_input(struct gpio_chip *chip, @@ -386,7 +386,7 @@ static int armada_37xx_gpio_direction_input(struct gpio_chip *chip, unsigned int reg = OUTPUT_EN; unsigned int mask; - armada_37xx_update_reg(®, offset); + armada_37xx_update_reg(®, &offset); mask = BIT(offset); return regmap_update_bits(info->regmap, reg, mask, 0); @@ -399,7 +399,7 @@ static int armada_37xx_gpio_get_direction(struct gpio_chip *chip, unsigned int reg = OUTPUT_EN; unsigned int val, mask; - armada_37xx_update_reg(®, offset); + armada_37xx_update_reg(®, &offset); mask = BIT(offset); regmap_read(info->regmap, reg, &val); @@ -413,7 +413,7 @@ static int armada_37xx_gpio_direction_output(struct gpio_chip *chip, unsigned int reg = OUTPUT_EN; unsigned int mask, val, ret; - armada_37xx_update_reg(®, offset); + armada_37xx_update_reg(®, &offset); mask = BIT(offset); ret = regmap_update_bits(info->regmap, reg, mask, mask); @@ -434,7 +434,7 @@ static int armada_37xx_gpio_get(struct gpio_chip *chip, unsigned int offset) unsigned int reg = INPUT_VAL; unsigned int val, mask; - armada_37xx_update_reg(®, offset); + armada_37xx_update_reg(®, &offset); mask = BIT(offset); regmap_read(info->regmap, reg, &val); @@ -449,7 +449,7 @@ static void armada_37xx_gpio_set(struct gpio_chip *chip, unsigned int offset, unsigned int reg = OUTPUT_VAL; unsigned int mask, val; - armada_37xx_update_reg(®, offset); + armada_37xx_update_reg(®, &offset); mask = BIT(offset); val = value ? mask : 0; diff --git a/drivers/pinctrl/pinctrl-stmfx.c b/drivers/pinctrl/pinctrl-stmfx.c index 974973777395..ccdf0bb21414 100644 --- a/drivers/pinctrl/pinctrl-stmfx.c +++ b/drivers/pinctrl/pinctrl-stmfx.c @@ -585,19 +585,6 @@ static int stmfx_pinctrl_gpio_function_enable(struct stmfx_pinctrl *pctl) return stmfx_function_enable(pctl->stmfx, func); } -static int stmfx_pinctrl_gpio_init_valid_mask(struct gpio_chip *gc, - unsigned long *valid_mask, - unsigned int ngpios) -{ - struct stmfx_pinctrl *pctl = gpiochip_get_data(gc); - u32 n; - - for_each_clear_bit(n, &pctl->gpio_valid_mask, ngpios) - clear_bit(n, valid_mask); - - return 0; -} - static int stmfx_pinctrl_probe(struct platform_device *pdev) { struct stmfx *stmfx = dev_get_drvdata(pdev->dev.parent); @@ -660,7 +647,6 @@ static int stmfx_pinctrl_probe(struct platform_device *pdev) pctl->gpio_chip.ngpio = pctl->pctl_desc.npins; pctl->gpio_chip.can_sleep = true; pctl->gpio_chip.of_node = np; - pctl->gpio_chip.init_valid_mask = stmfx_pinctrl_gpio_init_valid_mask; ret = devm_gpiochip_add_data(pctl->dev, &pctl->gpio_chip, pctl); if (ret) { @@ -705,7 +691,7 @@ static int stmfx_pinctrl_probe(struct platform_device *pdev) static int stmfx_pinctrl_remove(struct platform_device *pdev) { - struct stmfx *stmfx = dev_get_platdata(&pdev->dev); + struct stmfx *stmfx = dev_get_drvdata(pdev->dev.parent); return stmfx_function_disable(stmfx, STMFX_FUNC_GPIO | diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c index 86cc2cc68fb5..af063f690846 100644 --- a/drivers/platform/x86/classmate-laptop.c +++ b/drivers/platform/x86/classmate-laptop.c @@ -420,12 +420,6 @@ failed_sensitivity: static int cmpc_accel_remove_v4(struct acpi_device *acpi) { - struct input_dev *inputdev; - struct cmpc_accel *accel; - - inputdev = dev_get_drvdata(&acpi->dev); - accel = dev_get_drvdata(&inputdev->dev); - device_remove_file(&acpi->dev, &cmpc_accel_sensitivity_attr_v4); device_remove_file(&acpi->dev, &cmpc_accel_g_select_attr_v4); return cmpc_remove_acpi_notify_device(acpi); @@ -656,12 +650,6 @@ failed_file: static int cmpc_accel_remove(struct acpi_device *acpi) { - struct input_dev *inputdev; - struct cmpc_accel *accel; - - inputdev = dev_get_drvdata(&acpi->dev); - accel = dev_get_drvdata(&inputdev->dev); - device_remove_file(&acpi->dev, &cmpc_accel_sensitivity_attr); return cmpc_remove_acpi_notify_device(acpi); } diff --git a/drivers/platform/x86/i2c-multi-instantiate.c b/drivers/platform/x86/i2c-multi-instantiate.c index ea68f6ed66ae..ffb8d5d1eb5f 100644 --- a/drivers/platform/x86/i2c-multi-instantiate.c +++ b/drivers/platform/x86/i2c-multi-instantiate.c @@ -108,6 +108,7 @@ static int i2c_multi_inst_probe(struct platform_device *pdev) if (ret < 0) { dev_dbg(dev, "Error requesting irq at index %d: %d\n", inst_data[i].irq_idx, ret); + goto error; } board_info.irq = ret; break; diff --git a/drivers/platform/x86/intel_punit_ipc.c b/drivers/platform/x86/intel_punit_ipc.c index ab7ae1950867..fa97834fdb78 100644 --- a/drivers/platform/x86/intel_punit_ipc.c +++ b/drivers/platform/x86/intel_punit_ipc.c @@ -293,9 +293,8 @@ static int intel_punit_ipc_probe(struct platform_device *pdev) platform_set_drvdata(pdev, punit_ipcdev); - irq = platform_get_irq(pdev, 0); + irq = platform_get_irq_optional(pdev, 0); if (irq < 0) { - punit_ipcdev->irq = 0; dev_warn(&pdev->dev, "Invalid IRQ, using polling mode\n"); } else { ret = devm_request_irq(&pdev->dev, irq, intel_punit_ioc, diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig index 960961fb0d7c..0517272a268e 100644 --- a/drivers/ptp/Kconfig +++ b/drivers/ptp/Kconfig @@ -97,8 +97,8 @@ config PTP_1588_CLOCK_PCH help This driver adds support for using the PCH EG20T as a PTP clock. The hardware supports time stamping of PTP packets - when using the end-to-end delay (E2E) mechansim. The peer - delay mechansim (P2P) is not supported. + when using the end-to-end delay (E2E) mechanism. The peer + delay mechanism (P2P) is not supported. This clock is only useful if your PTP programs are getting hardware time stamps on the PTP Ethernet packets using the diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c index 6ad51aa60c03..f877e77d9184 100644 --- a/drivers/pwm/core.c +++ b/drivers/pwm/core.c @@ -472,14 +472,7 @@ int pwm_apply_state(struct pwm_device *pwm, const struct pwm_state *state) if (err) return err; - /* - * .apply might have to round some values in *state, if possible - * read the actually implemented value back. - */ - if (chip->ops->get_state) - chip->ops->get_state(chip, pwm, &pwm->state); - else - pwm->state = *state; + pwm->state = *state; } else { /* * FIXME: restore the initial state in case of error. diff --git a/drivers/pwm/pwm-bcm-iproc.c b/drivers/pwm/pwm-bcm-iproc.c index 56c38cfae92c..1f829edd8ee7 100644 --- a/drivers/pwm/pwm-bcm-iproc.c +++ b/drivers/pwm/pwm-bcm-iproc.c @@ -187,6 +187,7 @@ static int iproc_pwmc_apply(struct pwm_chip *chip, struct pwm_device *pwm, static const struct pwm_ops iproc_pwm_ops = { .apply = iproc_pwmc_apply, .get_state = iproc_pwmc_get_state, + .owner = THIS_MODULE, }; static int iproc_pwmc_probe(struct platform_device *pdev) diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index afe94470b67f..a46be221dbdc 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -5053,6 +5053,19 @@ regulator_register(const struct regulator_desc *regulator_desc, init_data = regulator_of_get_init_data(dev, regulator_desc, config, &rdev->dev.of_node); + + /* + * Sometimes not all resources are probed already so we need to take + * that into account. This happens most the time if the ena_gpiod comes + * from a gpio extender or something else. + */ + if (PTR_ERR(init_data) == -EPROBE_DEFER) { + kfree(config); + kfree(rdev); + ret = -EPROBE_DEFER; + goto rinse; + } + /* * We need to keep track of any GPIO descriptor coming from the * device tree until we have handled it over to the core. If the diff --git a/drivers/regulator/da9062-regulator.c b/drivers/regulator/da9062-regulator.c index 56f3f72d7707..710e67081d53 100644 --- a/drivers/regulator/da9062-regulator.c +++ b/drivers/regulator/da9062-regulator.c @@ -136,7 +136,6 @@ static int da9062_buck_set_mode(struct regulator_dev *rdev, unsigned mode) static unsigned da9062_buck_get_mode(struct regulator_dev *rdev) { struct da9062_regulator *regl = rdev_get_drvdata(rdev); - struct regmap_field *field; unsigned int val, mode = 0; int ret; @@ -158,18 +157,7 @@ static unsigned da9062_buck_get_mode(struct regulator_dev *rdev) return REGULATOR_MODE_NORMAL; } - /* Detect current regulator state */ - ret = regmap_field_read(regl->suspend, &val); - if (ret < 0) - return 0; - - /* Read regulator mode from proper register, depending on state */ - if (val) - field = regl->suspend_sleep; - else - field = regl->sleep; - - ret = regmap_field_read(field, &val); + ret = regmap_field_read(regl->sleep, &val); if (ret < 0) return 0; @@ -208,21 +196,9 @@ static int da9062_ldo_set_mode(struct regulator_dev *rdev, unsigned mode) static unsigned da9062_ldo_get_mode(struct regulator_dev *rdev) { struct da9062_regulator *regl = rdev_get_drvdata(rdev); - struct regmap_field *field; int ret, val; - /* Detect current regulator state */ - ret = regmap_field_read(regl->suspend, &val); - if (ret < 0) - return 0; - - /* Read regulator mode from proper register, depending on state */ - if (val) - field = regl->suspend_sleep; - else - field = regl->sleep; - - ret = regmap_field_read(field, &val); + ret = regmap_field_read(regl->sleep, &val); if (ret < 0) return 0; @@ -408,10 +384,10 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = { __builtin_ffs((int)DA9062AA_BUCK1_MODE_MASK) - 1, sizeof(unsigned int) * 8 - __builtin_clz((DA9062AA_BUCK1_MODE_MASK)) - 1), - .suspend = REG_FIELD(DA9062AA_DVC_1, - __builtin_ffs((int)DA9062AA_VBUCK1_SEL_MASK) - 1, + .suspend = REG_FIELD(DA9062AA_BUCK1_CONT, + __builtin_ffs((int)DA9062AA_BUCK1_CONF_MASK) - 1, sizeof(unsigned int) * 8 - - __builtin_clz((DA9062AA_VBUCK1_SEL_MASK)) - 1), + __builtin_clz(DA9062AA_BUCK1_CONF_MASK) - 1), }, { .desc.id = DA9061_ID_BUCK2, @@ -444,10 +420,10 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = { __builtin_ffs((int)DA9062AA_BUCK3_MODE_MASK) - 1, sizeof(unsigned int) * 8 - __builtin_clz((DA9062AA_BUCK3_MODE_MASK)) - 1), - .suspend = REG_FIELD(DA9062AA_DVC_1, - __builtin_ffs((int)DA9062AA_VBUCK3_SEL_MASK) - 1, + .suspend = REG_FIELD(DA9062AA_BUCK3_CONT, + __builtin_ffs((int)DA9062AA_BUCK3_CONF_MASK) - 1, sizeof(unsigned int) * 8 - - __builtin_clz((DA9062AA_VBUCK3_SEL_MASK)) - 1), + __builtin_clz(DA9062AA_BUCK3_CONF_MASK) - 1), }, { .desc.id = DA9061_ID_BUCK3, @@ -480,10 +456,10 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = { __builtin_ffs((int)DA9062AA_BUCK4_MODE_MASK) - 1, sizeof(unsigned int) * 8 - __builtin_clz((DA9062AA_BUCK4_MODE_MASK)) - 1), - .suspend = REG_FIELD(DA9062AA_DVC_1, - __builtin_ffs((int)DA9062AA_VBUCK4_SEL_MASK) - 1, + .suspend = REG_FIELD(DA9062AA_BUCK4_CONT, + __builtin_ffs((int)DA9062AA_BUCK4_CONF_MASK) - 1, sizeof(unsigned int) * 8 - - __builtin_clz((DA9062AA_VBUCK4_SEL_MASK)) - 1), + __builtin_clz(DA9062AA_BUCK4_CONF_MASK) - 1), }, { .desc.id = DA9061_ID_LDO1, @@ -509,10 +485,10 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = { sizeof(unsigned int) * 8 - __builtin_clz((DA9062AA_LDO1_SL_B_MASK)) - 1), .suspend_vsel_reg = DA9062AA_VLDO1_B, - .suspend = REG_FIELD(DA9062AA_DVC_1, - __builtin_ffs((int)DA9062AA_VLDO1_SEL_MASK) - 1, + .suspend = REG_FIELD(DA9062AA_LDO1_CONT, + __builtin_ffs((int)DA9062AA_LDO1_CONF_MASK) - 1, sizeof(unsigned int) * 8 - - __builtin_clz((DA9062AA_VLDO1_SEL_MASK)) - 1), + __builtin_clz(DA9062AA_LDO1_CONF_MASK) - 1), .oc_event = REG_FIELD(DA9062AA_STATUS_D, __builtin_ffs((int)DA9062AA_LDO1_ILIM_MASK) - 1, sizeof(unsigned int) * 8 - @@ -542,10 +518,10 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = { sizeof(unsigned int) * 8 - __builtin_clz((DA9062AA_LDO2_SL_B_MASK)) - 1), .suspend_vsel_reg = DA9062AA_VLDO2_B, - .suspend = REG_FIELD(DA9062AA_DVC_1, - __builtin_ffs((int)DA9062AA_VLDO2_SEL_MASK) - 1, + .suspend = REG_FIELD(DA9062AA_LDO2_CONT, + __builtin_ffs((int)DA9062AA_LDO2_CONF_MASK) - 1, sizeof(unsigned int) * 8 - - __builtin_clz((DA9062AA_VLDO2_SEL_MASK)) - 1), + __builtin_clz(DA9062AA_LDO2_CONF_MASK) - 1), .oc_event = REG_FIELD(DA9062AA_STATUS_D, __builtin_ffs((int)DA9062AA_LDO2_ILIM_MASK) - 1, sizeof(unsigned int) * 8 - @@ -575,10 +551,10 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = { sizeof(unsigned int) * 8 - __builtin_clz((DA9062AA_LDO3_SL_B_MASK)) - 1), .suspend_vsel_reg = DA9062AA_VLDO3_B, - .suspend = REG_FIELD(DA9062AA_DVC_1, - __builtin_ffs((int)DA9062AA_VLDO3_SEL_MASK) - 1, + .suspend = REG_FIELD(DA9062AA_LDO3_CONT, + __builtin_ffs((int)DA9062AA_LDO3_CONF_MASK) - 1, sizeof(unsigned int) * 8 - - __builtin_clz((DA9062AA_VLDO3_SEL_MASK)) - 1), + __builtin_clz(DA9062AA_LDO3_CONF_MASK) - 1), .oc_event = REG_FIELD(DA9062AA_STATUS_D, __builtin_ffs((int)DA9062AA_LDO3_ILIM_MASK) - 1, sizeof(unsigned int) * 8 - @@ -608,10 +584,10 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = { sizeof(unsigned int) * 8 - __builtin_clz((DA9062AA_LDO4_SL_B_MASK)) - 1), .suspend_vsel_reg = DA9062AA_VLDO4_B, - .suspend = REG_FIELD(DA9062AA_DVC_1, - __builtin_ffs((int)DA9062AA_VLDO4_SEL_MASK) - 1, + .suspend = REG_FIELD(DA9062AA_LDO4_CONT, + __builtin_ffs((int)DA9062AA_LDO4_CONF_MASK) - 1, sizeof(unsigned int) * 8 - - __builtin_clz((DA9062AA_VLDO4_SEL_MASK)) - 1), + __builtin_clz(DA9062AA_LDO4_CONF_MASK) - 1), .oc_event = REG_FIELD(DA9062AA_STATUS_D, __builtin_ffs((int)DA9062AA_LDO4_ILIM_MASK) - 1, sizeof(unsigned int) * 8 - @@ -652,10 +628,10 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = { __builtin_ffs((int)DA9062AA_BUCK1_MODE_MASK) - 1, sizeof(unsigned int) * 8 - __builtin_clz((DA9062AA_BUCK1_MODE_MASK)) - 1), - .suspend = REG_FIELD(DA9062AA_DVC_1, - __builtin_ffs((int)DA9062AA_VBUCK1_SEL_MASK) - 1, + .suspend = REG_FIELD(DA9062AA_BUCK1_CONT, + __builtin_ffs((int)DA9062AA_BUCK1_CONF_MASK) - 1, sizeof(unsigned int) * 8 - - __builtin_clz((DA9062AA_VBUCK1_SEL_MASK)) - 1), + __builtin_clz(DA9062AA_BUCK1_CONF_MASK) - 1), }, { .desc.id = DA9062_ID_BUCK2, @@ -688,10 +664,10 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = { __builtin_ffs((int)DA9062AA_BUCK2_MODE_MASK) - 1, sizeof(unsigned int) * 8 - __builtin_clz((DA9062AA_BUCK2_MODE_MASK)) - 1), - .suspend = REG_FIELD(DA9062AA_DVC_1, - __builtin_ffs((int)DA9062AA_VBUCK2_SEL_MASK) - 1, + .suspend = REG_FIELD(DA9062AA_BUCK2_CONT, + __builtin_ffs((int)DA9062AA_BUCK2_CONF_MASK) - 1, sizeof(unsigned int) * 8 - - __builtin_clz((DA9062AA_VBUCK2_SEL_MASK)) - 1), + __builtin_clz(DA9062AA_BUCK2_CONF_MASK) - 1), }, { .desc.id = DA9062_ID_BUCK3, @@ -724,10 +700,10 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = { __builtin_ffs((int)DA9062AA_BUCK3_MODE_MASK) - 1, sizeof(unsigned int) * 8 - __builtin_clz((DA9062AA_BUCK3_MODE_MASK)) - 1), - .suspend = REG_FIELD(DA9062AA_DVC_1, - __builtin_ffs((int)DA9062AA_VBUCK3_SEL_MASK) - 1, + .suspend = REG_FIELD(DA9062AA_BUCK3_CONT, + __builtin_ffs((int)DA9062AA_BUCK3_CONF_MASK) - 1, sizeof(unsigned int) * 8 - - __builtin_clz((DA9062AA_VBUCK3_SEL_MASK)) - 1), + __builtin_clz(DA9062AA_BUCK3_CONF_MASK) - 1), }, { .desc.id = DA9062_ID_BUCK4, @@ -760,10 +736,10 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = { __builtin_ffs((int)DA9062AA_BUCK4_MODE_MASK) - 1, sizeof(unsigned int) * 8 - __builtin_clz((DA9062AA_BUCK4_MODE_MASK)) - 1), - .suspend = REG_FIELD(DA9062AA_DVC_1, - __builtin_ffs((int)DA9062AA_VBUCK4_SEL_MASK) - 1, + .suspend = REG_FIELD(DA9062AA_BUCK4_CONT, + __builtin_ffs((int)DA9062AA_BUCK4_CONF_MASK) - 1, sizeof(unsigned int) * 8 - - __builtin_clz((DA9062AA_VBUCK4_SEL_MASK)) - 1), + __builtin_clz(DA9062AA_BUCK4_CONF_MASK) - 1), }, { .desc.id = DA9062_ID_LDO1, @@ -789,10 +765,10 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = { sizeof(unsigned int) * 8 - __builtin_clz((DA9062AA_LDO1_SL_B_MASK)) - 1), .suspend_vsel_reg = DA9062AA_VLDO1_B, - .suspend = REG_FIELD(DA9062AA_DVC_1, - __builtin_ffs((int)DA9062AA_VLDO1_SEL_MASK) - 1, + .suspend = REG_FIELD(DA9062AA_LDO1_CONT, + __builtin_ffs((int)DA9062AA_LDO1_CONF_MASK) - 1, sizeof(unsigned int) * 8 - - __builtin_clz((DA9062AA_VLDO1_SEL_MASK)) - 1), + __builtin_clz(DA9062AA_LDO1_CONF_MASK) - 1), .oc_event = REG_FIELD(DA9062AA_STATUS_D, __builtin_ffs((int)DA9062AA_LDO1_ILIM_MASK) - 1, sizeof(unsigned int) * 8 - @@ -822,10 +798,10 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = { sizeof(unsigned int) * 8 - __builtin_clz((DA9062AA_LDO2_SL_B_MASK)) - 1), .suspend_vsel_reg = DA9062AA_VLDO2_B, - .suspend = REG_FIELD(DA9062AA_DVC_1, - __builtin_ffs((int)DA9062AA_VLDO2_SEL_MASK) - 1, + .suspend = REG_FIELD(DA9062AA_LDO2_CONT, + __builtin_ffs((int)DA9062AA_LDO2_CONF_MASK) - 1, sizeof(unsigned int) * 8 - - __builtin_clz((DA9062AA_VLDO2_SEL_MASK)) - 1), + __builtin_clz(DA9062AA_LDO2_CONF_MASK) - 1), .oc_event = REG_FIELD(DA9062AA_STATUS_D, __builtin_ffs((int)DA9062AA_LDO2_ILIM_MASK) - 1, sizeof(unsigned int) * 8 - @@ -855,10 +831,10 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = { sizeof(unsigned int) * 8 - __builtin_clz((DA9062AA_LDO3_SL_B_MASK)) - 1), .suspend_vsel_reg = DA9062AA_VLDO3_B, - .suspend = REG_FIELD(DA9062AA_DVC_1, - __builtin_ffs((int)DA9062AA_VLDO3_SEL_MASK) - 1, + .suspend = REG_FIELD(DA9062AA_LDO3_CONT, + __builtin_ffs((int)DA9062AA_LDO3_CONF_MASK) - 1, sizeof(unsigned int) * 8 - - __builtin_clz((DA9062AA_VLDO3_SEL_MASK)) - 1), + __builtin_clz(DA9062AA_LDO3_CONF_MASK) - 1), .oc_event = REG_FIELD(DA9062AA_STATUS_D, __builtin_ffs((int)DA9062AA_LDO3_ILIM_MASK) - 1, sizeof(unsigned int) * 8 - @@ -888,10 +864,10 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = { sizeof(unsigned int) * 8 - __builtin_clz((DA9062AA_LDO4_SL_B_MASK)) - 1), .suspend_vsel_reg = DA9062AA_VLDO4_B, - .suspend = REG_FIELD(DA9062AA_DVC_1, - __builtin_ffs((int)DA9062AA_VLDO4_SEL_MASK) - 1, + .suspend = REG_FIELD(DA9062AA_LDO4_CONT, + __builtin_ffs((int)DA9062AA_LDO4_CONF_MASK) - 1, sizeof(unsigned int) * 8 - - __builtin_clz((DA9062AA_VLDO4_SEL_MASK)) - 1), + __builtin_clz(DA9062AA_LDO4_CONF_MASK) - 1), .oc_event = REG_FIELD(DA9062AA_STATUS_D, __builtin_ffs((int)DA9062AA_LDO4_ILIM_MASK) - 1, sizeof(unsigned int) * 8 - diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c index d90a6fd8cbc7..f81533070058 100644 --- a/drivers/regulator/fixed.c +++ b/drivers/regulator/fixed.c @@ -144,8 +144,7 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct fixed_voltage_config *config; struct fixed_voltage_data *drvdata; - const struct fixed_dev_type *drvtype = - of_match_device(dev->driver->of_match_table, dev)->data; + const struct fixed_dev_type *drvtype = of_device_get_match_data(dev); struct regulator_config cfg = { }; enum gpiod_flags gflags; int ret; @@ -177,7 +176,7 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev) drvdata->desc.type = REGULATOR_VOLTAGE; drvdata->desc.owner = THIS_MODULE; - if (drvtype->has_enable_clock) { + if (drvtype && drvtype->has_enable_clock) { drvdata->desc.ops = &fixed_voltage_clkenabled_ops; drvdata->enable_clock = devm_clk_get(dev, NULL); diff --git a/drivers/regulator/lochnagar-regulator.c b/drivers/regulator/lochnagar-regulator.c index ff97cc50f2eb..9b05e03ba830 100644 --- a/drivers/regulator/lochnagar-regulator.c +++ b/drivers/regulator/lochnagar-regulator.c @@ -210,6 +210,7 @@ static const struct regulator_desc lochnagar_regulators[] = { .enable_time = 3000, .ramp_delay = 1000, + .off_on_delay = 15000, .owner = THIS_MODULE, }, diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c index afefb29ce1b0..87637eb6bcbc 100644 --- a/drivers/regulator/of_regulator.c +++ b/drivers/regulator/of_regulator.c @@ -231,12 +231,12 @@ static int of_get_regulation_constraints(struct device *dev, "regulator-off-in-suspend")) suspend_state->enabled = DISABLE_IN_SUSPEND; - if (!of_property_read_u32(np, "regulator-suspend-min-microvolt", - &pval)) + if (!of_property_read_u32(suspend_np, + "regulator-suspend-min-microvolt", &pval)) suspend_state->min_uV = pval; - if (!of_property_read_u32(np, "regulator-suspend-max-microvolt", - &pval)) + if (!of_property_read_u32(suspend_np, + "regulator-suspend-max-microvolt", &pval)) suspend_state->max_uV = pval; if (!of_property_read_u32(suspend_np, @@ -445,11 +445,20 @@ struct regulator_init_data *regulator_of_get_init_data(struct device *dev, goto error; } - if (desc->of_parse_cb && desc->of_parse_cb(child, desc, config)) { - dev_err(dev, - "driver callback failed to parse DT for regulator %pOFn\n", - child); - goto error; + if (desc->of_parse_cb) { + int ret; + + ret = desc->of_parse_cb(child, desc, config); + if (ret) { + if (ret == -EPROBE_DEFER) { + of_node_put(child); + return ERR_PTR(-EPROBE_DEFER); + } + dev_err(dev, + "driver callback failed to parse DT for regulator %pOFn\n", + child); + goto error; + } } *node = child; diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c index df5df1c495ad..689537927f6f 100644 --- a/drivers/regulator/pfuze100-regulator.c +++ b/drivers/regulator/pfuze100-regulator.c @@ -788,7 +788,13 @@ static int pfuze100_regulator_probe(struct i2c_client *client, /* SW2~SW4 high bit check and modify the voltage value table */ if (i >= sw_check_start && i <= sw_check_end) { - regmap_read(pfuze_chip->regmap, desc->vsel_reg, &val); + ret = regmap_read(pfuze_chip->regmap, + desc->vsel_reg, &val); + if (ret) { + dev_err(&client->dev, "Fails to read from the register.\n"); + return ret; + } + if (val & sw_hi) { if (pfuze_chip->chip_id == PFUZE3000 || pfuze_chip->chip_id == PFUZE3001) { diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c index db6c085da65e..0246b6f99fb5 100644 --- a/drivers/regulator/qcom-rpmh-regulator.c +++ b/drivers/regulator/qcom-rpmh-regulator.c @@ -735,8 +735,8 @@ static const struct rpmh_vreg_hw_data pmic5_hfsmps515 = { static const struct rpmh_vreg_hw_data pmic5_bob = { .regulator_type = VRM, .ops = &rpmh_regulator_vrm_bypass_ops, - .voltage_range = REGULATOR_LINEAR_RANGE(300000, 0, 135, 32000), - .n_voltages = 136, + .voltage_range = REGULATOR_LINEAR_RANGE(3000000, 0, 31, 32000), + .n_voltages = 32, .pmic_mode_map = pmic_mode_map_pmic5_bob, .of_map_mode = rpmh_regulator_pmic4_bob_of_map_mode, }; diff --git a/drivers/regulator/ti-abb-regulator.c b/drivers/regulator/ti-abb-regulator.c index cced1ffb896c..89b9314d64c9 100644 --- a/drivers/regulator/ti-abb-regulator.c +++ b/drivers/regulator/ti-abb-regulator.c @@ -173,19 +173,14 @@ static int ti_abb_wait_txdone(struct device *dev, struct ti_abb *abb) while (timeout++ <= abb->settling_time) { status = ti_abb_check_txdone(abb); if (status) - break; + return 0; udelay(1); } - if (timeout > abb->settling_time) { - dev_warn_ratelimited(dev, - "%s:TRANXDONE timeout(%duS) int=0x%08x\n", - __func__, timeout, readl(abb->int_base)); - return -ETIMEDOUT; - } - - return 0; + dev_warn_ratelimited(dev, "%s:TRANXDONE timeout(%duS) int=0x%08x\n", + __func__, timeout, readl(abb->int_base)); + return -ETIMEDOUT; } /** @@ -205,19 +200,14 @@ static int ti_abb_clear_all_txdone(struct device *dev, const struct ti_abb *abb) status = ti_abb_check_txdone(abb); if (!status) - break; + return 0; udelay(1); } - if (timeout > abb->settling_time) { - dev_warn_ratelimited(dev, - "%s:TRANXDONE timeout(%duS) int=0x%08x\n", - __func__, timeout, readl(abb->int_base)); - return -ETIMEDOUT; - } - - return 0; + dev_warn_ratelimited(dev, "%s:TRANXDONE timeout(%duS) int=0x%08x\n", + __func__, timeout, readl(abb->int_base)); + return -ETIMEDOUT; } /** diff --git a/drivers/reset/core.c b/drivers/reset/core.c index 213ff40dda11..3c9a64c1b7a8 100644 --- a/drivers/reset/core.c +++ b/drivers/reset/core.c @@ -76,7 +76,6 @@ static const char *rcdev_name(struct reset_controller_dev *rcdev) * of_reset_simple_xlate - translate reset_spec to the reset line number * @rcdev: a pointer to the reset controller device * @reset_spec: reset line specifier as found in the device tree - * @flags: a flags pointer to fill in (optional) * * This simple translation function should be used for reset controllers * with 1:1 mapping, where reset lines can be indexed by number without gaps. @@ -748,6 +747,7 @@ static void reset_control_array_put(struct reset_control_array *resets) for (i = 0; i < resets->num_rstcs; i++) __reset_control_put_internal(resets->rstc[i]); mutex_unlock(&reset_list_mutex); + kfree(resets); } /** @@ -825,9 +825,10 @@ int __device_reset(struct device *dev, bool optional) } EXPORT_SYMBOL_GPL(__device_reset); -/** +/* * APIs to manage an array of reset controls. */ + /** * of_reset_control_get_count - Count number of resets available with a device * diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h index ba7d2480613b..dcdaba689b20 100644 --- a/drivers/s390/cio/cio.h +++ b/drivers/s390/cio/cio.h @@ -113,6 +113,7 @@ struct subchannel { enum sch_todo todo; struct work_struct todo_work; struct schib_config config; + u64 dma_mask; char *driver_override; /* Driver name to force a match */ } __attribute__ ((aligned(8))); diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 1fbfb0a93f5f..831850435c23 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -232,7 +232,12 @@ struct subchannel *css_alloc_subchannel(struct subchannel_id schid, * belong to a subchannel need to fit 31 bit width (e.g. ccw). */ sch->dev.coherent_dma_mask = DMA_BIT_MASK(31); - sch->dev.dma_mask = &sch->dev.coherent_dma_mask; + /* + * But we don't have such restrictions imposed on the stuff that + * is handled by the streaming API. + */ + sch->dma_mask = DMA_BIT_MASK(64); + sch->dev.dma_mask = &sch->dma_mask; return sch; err: diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 131430bd48d9..0c6245fc7706 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -710,7 +710,7 @@ static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch) if (!cdev->private) goto err_priv; cdev->dev.coherent_dma_mask = sch->dev.coherent_dma_mask; - cdev->dev.dma_mask = &cdev->dev.coherent_dma_mask; + cdev->dev.dma_mask = sch->dev.dma_mask; dma_pool = cio_gp_dma_create(&cdev->dev, 1); if (!dma_pool) goto err_dma_pool; diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index 45bdb47f84c1..9157e728a362 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c @@ -522,8 +522,7 @@ static int zcrypt_release(struct inode *inode, struct file *filp) if (filp->f_inode->i_cdev == &zcrypt_cdev) { struct zcdn_device *zcdndev; - if (mutex_lock_interruptible(&ap_perms_mutex)) - return -ERESTARTSYS; + mutex_lock(&ap_perms_mutex); zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev); mutex_unlock(&ap_perms_mutex); if (zcdndev) { diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index b8799cd3e7aa..bd8143e51747 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -2021,10 +2021,10 @@ static bool qeth_l2_vnicc_recover_char(struct qeth_card *card, u32 vnicc, static void qeth_l2_vnicc_init(struct qeth_card *card) { u32 *timeout = &card->options.vnicc.learning_timeout; + bool enable, error = false; unsigned int chars_len, i; unsigned long chars_tmp; u32 sup_cmds, vnicc; - bool enable, error; QETH_CARD_TEXT(card, 2, "vniccini"); /* reset rx_bcast */ @@ -2045,17 +2045,24 @@ static void qeth_l2_vnicc_init(struct qeth_card *card) chars_len = sizeof(card->options.vnicc.sup_chars) * BITS_PER_BYTE; for_each_set_bit(i, &chars_tmp, chars_len) { vnicc = BIT(i); - qeth_l2_vnicc_query_cmds(card, vnicc, &sup_cmds); - if (!(sup_cmds & IPA_VNICC_SET_TIMEOUT) || - !(sup_cmds & IPA_VNICC_GET_TIMEOUT)) + if (qeth_l2_vnicc_query_cmds(card, vnicc, &sup_cmds)) { + sup_cmds = 0; + error = true; + } + if ((sup_cmds & IPA_VNICC_SET_TIMEOUT) && + (sup_cmds & IPA_VNICC_GET_TIMEOUT)) + card->options.vnicc.getset_timeout_sup |= vnicc; + else card->options.vnicc.getset_timeout_sup &= ~vnicc; - if (!(sup_cmds & IPA_VNICC_ENABLE) || - !(sup_cmds & IPA_VNICC_DISABLE)) + if ((sup_cmds & IPA_VNICC_ENABLE) && + (sup_cmds & IPA_VNICC_DISABLE)) + card->options.vnicc.set_char_sup |= vnicc; + else card->options.vnicc.set_char_sup &= ~vnicc; } /* enforce assumed default values and recover settings, if changed */ - error = qeth_l2_vnicc_recover_timeout(card, QETH_VNICC_LEARNING, - timeout); + error |= qeth_l2_vnicc_recover_timeout(card, QETH_VNICC_LEARNING, + timeout); chars_tmp = card->options.vnicc.wanted_chars ^ QETH_VNICC_DEFAULT; chars_tmp |= QETH_VNICC_BRIDGE_INVISIBLE; chars_len = sizeof(card->options.vnicc.wanted_chars) * BITS_PER_BYTE; diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 296bbc3c4606..cf63916814cc 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -27,6 +27,11 @@ struct kmem_cache *zfcp_fsf_qtcb_cache; +static bool ber_stop = true; +module_param(ber_stop, bool, 0600); +MODULE_PARM_DESC(ber_stop, + "Shuts down FCP devices for FCP channels that report a bit-error count in excess of its threshold (default on)"); + static void zfcp_fsf_request_timeout_handler(struct timer_list *t) { struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer); @@ -236,10 +241,15 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) case FSF_STATUS_READ_SENSE_DATA_AVAIL: break; case FSF_STATUS_READ_BIT_ERROR_THRESHOLD: - dev_warn(&adapter->ccw_device->dev, - "The error threshold for checksum statistics " - "has been exceeded\n"); zfcp_dbf_hba_bit_err("fssrh_3", req); + if (ber_stop) { + dev_warn(&adapter->ccw_device->dev, + "All paths over this FCP device are disused because of excessive bit errors\n"); + zfcp_erp_adapter_shutdown(adapter, 0, "fssrh_b"); + } else { + dev_warn(&adapter->ccw_device->dev, + "The error threshold for checksum statistics has been exceeded\n"); + } break; case FSF_STATUS_READ_LINK_DOWN: zfcp_fsf_status_read_link_down(req); diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 1b92f3c19ff3..90cf4691b8c3 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -898,7 +898,7 @@ config SCSI_SNI_53C710 config 53C700_LE_ON_BE bool - depends on SCSI_LASI700 + depends on SCSI_LASI700 || SCSI_SNI_53C710 default y config SCSI_STEX diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c index 5f8153c37f77..76751d6c7f0d 100644 --- a/drivers/scsi/ch.c +++ b/drivers/scsi/ch.c @@ -579,7 +579,6 @@ ch_release(struct inode *inode, struct file *file) scsi_changer *ch = file->private_data; scsi_device_put(ch->device); - ch->device = NULL; file->private_data = NULL; kref_put(&ch->ref, ch_destroy); return 0; diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index 4971104b1817..f32da0ca529e 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -512,6 +512,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg) unsigned int tpg_desc_tbl_off; unsigned char orig_transition_tmo; unsigned long flags; + bool transitioning_sense = false; if (!pg->expiry) { unsigned long transition_tmo = ALUA_FAILOVER_TIMEOUT * HZ; @@ -572,13 +573,19 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg) goto retry; } /* - * Retry on ALUA state transition or if any - * UNIT ATTENTION occurred. + * If the array returns with 'ALUA state transition' + * sense code here it cannot return RTPG data during + * transition. So set the state to 'transitioning' directly. */ if (sense_hdr.sense_key == NOT_READY && - sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a) - err = SCSI_DH_RETRY; - else if (sense_hdr.sense_key == UNIT_ATTENTION) + sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a) { + transitioning_sense = true; + goto skip_rtpg; + } + /* + * Retry on any other UNIT ATTENTION occurred. + */ + if (sense_hdr.sense_key == UNIT_ATTENTION) err = SCSI_DH_RETRY; if (err == SCSI_DH_RETRY && pg->expiry != 0 && time_before(jiffies, pg->expiry)) { @@ -666,7 +673,11 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg) off = 8 + (desc[7] * 4); } + skip_rtpg: spin_lock_irqsave(&pg->lock, flags); + if (transitioning_sense) + pg->state = SCSI_ACCESS_STATE_TRANSITIONING; + sdev_printk(KERN_INFO, sdev, "%s: port group %02x state %c %s supports %c%c%c%c%c%c%c\n", ALUA_DH_NAME, pg->group_id, print_alua_state(pg->state), diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index ac39ed79ccaa..216e557f703e 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -5477,6 +5477,8 @@ static int hpsa_ciss_submit(struct ctlr_info *h, return SCSI_MLQUEUE_HOST_BUSY; } + c->device = dev; + enqueue_cmd_and_start_io(h, c); /* the cmd'll come back via intr handler in complete_scsi_command() */ return 0; @@ -5548,6 +5550,7 @@ static int hpsa_ioaccel_submit(struct ctlr_info *h, hpsa_cmd_init(h, c->cmdindex, c); c->cmd_type = CMD_SCSI; c->scsi_cmd = cmd; + c->device = dev; rc = hpsa_scsi_ioaccel_raid_map(h, c); if (rc < 0) /* scsi_dma_map failed. */ rc = SCSI_MLQUEUE_HOST_BUSY; @@ -5555,6 +5558,7 @@ static int hpsa_ioaccel_submit(struct ctlr_info *h, hpsa_cmd_init(h, c->cmdindex, c); c->cmd_type = CMD_SCSI; c->scsi_cmd = cmd; + c->device = dev; rc = hpsa_scsi_ioaccel_direct_map(h, c); if (rc < 0) /* scsi_dma_map failed. */ rc = SCSI_MLQUEUE_HOST_BUSY; diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index e91377a4cafe..e8813d26e594 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -9055,7 +9055,6 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) } } -#if defined(BUILD_NVME) /* Clear NVME stats */ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { @@ -9063,7 +9062,6 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat)); } } -#endif /* Clear SCSI stats */ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index f4b879d25fe9..fc6e4546d738 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -851,9 +851,9 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) if (!(vport->fc_flag & FC_PT2PT)) { /* Check config parameter use-adisc or FCP-2 */ - if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) || + if (vport->cfg_use_adisc && ((vport->fc_flag & FC_RSCN_MODE) || ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) && - (ndlp->nlp_type & NLP_FCP_TARGET))) { + (ndlp->nlp_type & NLP_FCP_TARGET)))) { spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NPR_ADISC; spin_unlock_irq(shost->host_lock); diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index fe1097666de4..6822cd9ff8f1 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -528,7 +528,6 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba, list_del_init(&psb->list); psb->exch_busy = 0; psb->status = IOSTAT_SUCCESS; -#ifdef BUILD_NVME if (psb->cur_iocbq.iocb_flag == LPFC_IO_NVME) { qp->abts_nvme_io_bufs--; spin_unlock(&qp->abts_io_buf_list_lock); @@ -536,7 +535,6 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba, lpfc_sli4_nvme_xri_aborted(phba, axri, psb); return; } -#endif qp->abts_scsi_io_bufs--; spin_unlock(&qp->abts_io_buf_list_lock); diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index a0c6945b8139..614f78dddafe 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -7866,7 +7866,7 @@ lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba) if (sli4_hba->hdwq) { for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) { eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq; - if (eq->queue_id == sli4_hba->mbx_cq->assoc_qid) { + if (eq && eq->queue_id == sli4_hba->mbx_cq->assoc_qid) { fpeq = eq; break; } diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 30bafd9d21e9..7259bce85e0e 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -440,9 +440,6 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, valid = 0; if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0) valid = 1; - else if (start == (ha->flt_region_boot * 4) || - start == (ha->flt_region_fw * 4)) - valid = 1; else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)) valid = 1; if (!valid) { @@ -489,8 +486,10 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, "Writing flash region -- 0x%x/0x%x.\n", ha->optrom_region_start, ha->optrom_region_size); - ha->isp_ops->write_optrom(vha, ha->optrom_buffer, + rval = ha->isp_ops->write_optrom(vha, ha->optrom_buffer, ha->optrom_region_start, ha->optrom_region_size); + if (rval) + rval = -EIO; break; default: rval = -EINVAL; diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index 28d587a89ba6..99f0a1a08143 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c @@ -253,7 +253,7 @@ qla2x00_process_els(struct bsg_job *bsg_job) srb_t *sp; const char *type; int req_sg_cnt, rsp_sg_cnt; - int rval = (DRIVER_ERROR << 16); + int rval = (DID_ERROR << 16); uint16_t nextlid = 0; if (bsg_request->msgcode == FC_BSG_RPT_ELS) { @@ -432,7 +432,7 @@ qla2x00_process_ct(struct bsg_job *bsg_job) struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); struct qla_hw_data *ha = vha->hw; - int rval = (DRIVER_ERROR << 16); + int rval = (DID_ERROR << 16); int req_sg_cnt, rsp_sg_cnt; uint16_t loop_id; struct fc_port *fcport; @@ -1950,7 +1950,7 @@ qlafx00_mgmt_cmd(struct bsg_job *bsg_job) struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); struct qla_hw_data *ha = vha->hw; - int rval = (DRIVER_ERROR << 16); + int rval = (DID_ERROR << 16); struct qla_mt_iocb_rqst_fx00 *piocb_rqst; srb_t *sp; int req_sg_cnt = 0, rsp_sg_cnt = 0; diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 4c26630c1c3e..009fd5a33fcd 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -2837,8 +2837,6 @@ qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) if (sense_len == 0) { rsp->status_srb = NULL; sp->done(sp, cp->result); - } else { - WARN_ON_ONCE(true); } } diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 1cc6913f76c4..4a1f21c11758 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -702,6 +702,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) mcp->mb[2] = LSW(risc_addr); mcp->mb[3] = 0; mcp->mb[4] = 0; + mcp->mb[11] = 0; ha->flags.using_lr_setting = 0; if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { @@ -746,7 +747,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) if (ha->flags.exchoffld_enabled) mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD; - mcp->out_mb |= MBX_4|MBX_3|MBX_2|MBX_1; + mcp->out_mb |= MBX_4 | MBX_3 | MBX_2 | MBX_1 | MBX_11; mcp->in_mb |= MBX_3 | MBX_2 | MBX_1; } else { mcp->mb[1] = LSW(risc_addr); diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 3568031c6504..337162ac3a77 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -3224,6 +3224,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out); ha->wq = alloc_workqueue("qla2xxx_wq", 0, 0); + if (unlikely(!ha->wq)) { + ret = -ENOMEM; + goto probe_failed; + } if (ha->isp_ops->initialize_adapter(base_vha)) { ql_log(ql_log_fatal, base_vha, 0x00d6, @@ -3531,6 +3535,10 @@ qla2x00_shutdown(struct pci_dev *pdev) qla2x00_try_to_stop_firmware(vha); } + /* Disable timer */ + if (vha->timer_active) + qla2x00_stop_timer(vha); + /* Turn adapter off line */ vha->flags.online = 0; diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 1c470e31ae81..ae2fa170f6ad 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -967,6 +967,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses, ses->data_direction = scmd->sc_data_direction; ses->sdb = scmd->sdb; ses->result = scmd->result; + ses->resid_len = scmd->req.resid_len; ses->underflow = scmd->underflow; ses->prot_op = scmd->prot_op; ses->eh_eflags = scmd->eh_eflags; @@ -977,6 +978,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses, memset(scmd->cmnd, 0, BLK_MAX_CDB); memset(&scmd->sdb, 0, sizeof(scmd->sdb)); scmd->result = 0; + scmd->req.resid_len = 0; if (sense_bytes) { scmd->sdb.length = min_t(unsigned, SCSI_SENSE_BUFFERSIZE, @@ -1029,6 +1031,7 @@ void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses) scmd->sc_data_direction = ses->data_direction; scmd->sdb = ses->sdb; scmd->result = ses->result; + scmd->req.resid_len = ses->resid_len; scmd->underflow = ses->underflow; scmd->prot_op = ses->prot_op; scmd->eh_eflags = ses->eh_eflags; diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index dc210b9d4896..5447738906ac 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1834,6 +1834,7 @@ static const struct blk_mq_ops scsi_mq_ops_no_commit = { .init_request = scsi_mq_init_request, .exit_request = scsi_mq_exit_request, .initialize_rq_fn = scsi_initialize_rq, + .cleanup_rq = scsi_cleanup_rq, .busy = scsi_mq_lld_busy, .map_queues = scsi_map_queues, }; @@ -1921,7 +1922,8 @@ struct scsi_device *scsi_device_from_queue(struct request_queue *q) { struct scsi_device *sdev = NULL; - if (q->mq_ops == &scsi_mq_ops) + if (q->mq_ops == &scsi_mq_ops_no_commit || + q->mq_ops == &scsi_mq_ops) sdev = q->queuedata; if (!sdev || !get_device(&sdev->sdev_gendev)) sdev = NULL; diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 64c96c7828ee..6d7362e7367e 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -730,6 +730,14 @@ sdev_store_delete(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct kernfs_node *kn; + struct scsi_device *sdev = to_scsi_device(dev); + + /* + * We need to try to get module, avoiding the module been removed + * during delete. + */ + if (scsi_device_get(sdev)) + return -ENODEV; kn = sysfs_break_active_protection(&dev->kobj, &attr->attr); WARN_ON_ONCE(!kn); @@ -744,9 +752,10 @@ sdev_store_delete(struct device *dev, struct device_attribute *attr, * state into SDEV_DEL. */ device_remove_file(dev, attr); - scsi_remove_device(to_scsi_device(dev)); + scsi_remove_device(sdev); if (kn) sysfs_unbreak_active_protection(kn); + scsi_device_put(sdev); return count; }; static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete); diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 50928bc266eb..ebb40160539f 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -1166,11 +1166,12 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd) sector_t lba = sectors_to_logical(sdp, blk_rq_pos(rq)); sector_t threshold; unsigned int nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); - bool dif, dix; unsigned int mask = logical_to_sectors(sdp, 1) - 1; bool write = rq_data_dir(rq) == WRITE; unsigned char protect, fua; blk_status_t ret; + unsigned int dif; + bool dix; ret = scsi_init_io(cmd); if (ret != BLK_STS_OK) @@ -1654,7 +1655,8 @@ static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr) /* we need to evaluate the error return */ if (scsi_sense_valid(sshdr) && (sshdr->asc == 0x3a || /* medium not present */ - sshdr->asc == 0x20)) /* invalid command */ + sshdr->asc == 0x20 || /* invalid command */ + (sshdr->asc == 0x74 && sshdr->ascq == 0x71))) /* drive is password locked */ /* this is no error here */ return 0; diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c index aef4881d8e21..a85d52b5dc32 100644 --- a/drivers/scsi/sni_53c710.c +++ b/drivers/scsi/sni_53c710.c @@ -66,10 +66,8 @@ static int snirm710_probe(struct platform_device *dev) base = res->start; hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL); - if (!hostdata) { - dev_printk(KERN_ERR, dev, "Failed to allocate host data\n"); + if (!hostdata) return -ENOMEM; - } hostdata->dev = &dev->dev; dma_set_mask(&dev->dev, DMA_BIT_MASK(32)); diff --git a/drivers/scsi/ufs/ufs_bsg.c b/drivers/scsi/ufs/ufs_bsg.c index a9344eb4e047..dc2f6d2b46ed 100644 --- a/drivers/scsi/ufs/ufs_bsg.c +++ b/drivers/scsi/ufs/ufs_bsg.c @@ -98,6 +98,8 @@ static int ufs_bsg_request(struct bsg_job *job) bsg_reply->reply_payload_rcv_len = 0; + pm_runtime_get_sync(hba->dev); + msgcode = bsg_request->msgcode; switch (msgcode) { case UPIU_TRANSACTION_QUERY_REQ: @@ -135,6 +137,8 @@ static int ufs_bsg_request(struct bsg_job *job) break; } + pm_runtime_put_sync(hba->dev); + if (!desc_buff) goto out; diff --git a/drivers/soc/imx/gpc.c b/drivers/soc/imx/gpc.c index d9231bd3c691..98b9d9a902ae 100644 --- a/drivers/soc/imx/gpc.c +++ b/drivers/soc/imx/gpc.c @@ -249,13 +249,13 @@ static struct genpd_power_state imx6_pm_domain_pu_state = { }; static struct imx_pm_domain imx_gpc_domains[] = { - [GPC_PGC_DOMAIN_ARM] { + [GPC_PGC_DOMAIN_ARM] = { .base = { .name = "ARM", .flags = GENPD_FLAG_ALWAYS_ON, }, }, - [GPC_PGC_DOMAIN_PU] { + [GPC_PGC_DOMAIN_PU] = { .base = { .name = "PU", .power_off = imx6_pm_domain_power_off, @@ -266,7 +266,7 @@ static struct imx_pm_domain imx_gpc_domains[] = { .reg_offs = 0x260, .cntr_pdn_bit = 0, }, - [GPC_PGC_DOMAIN_DISPLAY] { + [GPC_PGC_DOMAIN_DISPLAY] = { .base = { .name = "DISPLAY", .power_off = imx6_pm_domain_power_off, @@ -275,7 +275,7 @@ static struct imx_pm_domain imx_gpc_domains[] = { .reg_offs = 0x240, .cntr_pdn_bit = 4, }, - [GPC_PGC_DOMAIN_PCI] { + [GPC_PGC_DOMAIN_PCI] = { .base = { .name = "PCI", .power_off = imx6_pm_domain_power_off, diff --git a/drivers/soc/imx/soc-imx-scu.c b/drivers/soc/imx/soc-imx-scu.c index 50831ebf126a..c68882eb80f7 100644 --- a/drivers/soc/imx/soc-imx-scu.c +++ b/drivers/soc/imx/soc-imx-scu.c @@ -46,7 +46,7 @@ static ssize_t soc_uid_show(struct device *dev, hdr->func = IMX_SC_MISC_FUNC_UNIQUE_ID; hdr->size = 1; - ret = imx_scu_call_rpc(soc_ipc_handle, &msg, false); + ret = imx_scu_call_rpc(soc_ipc_handle, &msg, true); if (ret) { pr_err("%s: get soc uid failed, ret %d\n", __func__, ret); return ret; diff --git a/drivers/soundwire/Kconfig b/drivers/soundwire/Kconfig index f518273cfbe3..c8c80df090d1 100644 --- a/drivers/soundwire/Kconfig +++ b/drivers/soundwire/Kconfig @@ -5,6 +5,7 @@ menuconfig SOUNDWIRE tristate "SoundWire support" + depends on ACPI || OF help SoundWire is a 2-Pin interface with data and clock line ratified by the MIPI Alliance. SoundWire is used for transporting data diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c index f1e38a293967..13c54eac0cc3 100644 --- a/drivers/soundwire/intel.c +++ b/drivers/soundwire/intel.c @@ -900,7 +900,7 @@ static int intel_register_dai(struct sdw_intel *sdw) /* Create PCM DAIs */ stream = &cdns->pcm; - ret = intel_create_dai(cdns, dais, INTEL_PDI_IN, stream->num_in, + ret = intel_create_dai(cdns, dais, INTEL_PDI_IN, cdns->pcm.num_in, off, stream->num_ch_in, true); if (ret) return ret; @@ -931,7 +931,7 @@ static int intel_register_dai(struct sdw_intel *sdw) if (ret) return ret; - off += cdns->pdm.num_bd; + off += cdns->pdm.num_out; ret = intel_create_dai(cdns, dais, INTEL_PDI_BD, cdns->pdm.num_bd, off, stream->num_ch_bd, false); if (ret) diff --git a/drivers/soundwire/slave.c b/drivers/soundwire/slave.c index 48a63ca130d2..6473fa602f82 100644 --- a/drivers/soundwire/slave.c +++ b/drivers/soundwire/slave.c @@ -128,7 +128,8 @@ int sdw_of_find_slaves(struct sdw_bus *bus) struct device_node *node; for_each_child_of_node(bus->dev->of_node, node) { - int link_id, sdw_version, ret, len; + int link_id, ret, len; + unsigned int sdw_version; const char *compat = NULL; struct sdw_slave_id id; const __be32 *addr; diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index 6f1fa4c849a1..927d29eb92c6 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -125,4 +125,6 @@ source "drivers/staging/exfat/Kconfig" source "drivers/staging/qlge/Kconfig" +source "drivers/staging/vboxsf/Kconfig" + endif # STAGING diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index a90f9b308c8d..f01f04199073 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -53,3 +53,4 @@ obj-$(CONFIG_UWB) += uwb/ obj-$(CONFIG_USB_WUSB) += wusbcore/ obj-$(CONFIG_EXFAT_FS) += exfat/ obj-$(CONFIG_QLGE) += qlge/ +obj-$(CONFIG_VBOXSF_FS) += vboxsf/ diff --git a/drivers/staging/exfat/Kconfig b/drivers/staging/exfat/Kconfig index 290dbfc7ace1..ce32dfe33bec 100644 --- a/drivers/staging/exfat/Kconfig +++ b/drivers/staging/exfat/Kconfig @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 config EXFAT_FS tristate "exFAT fs support" depends on BLOCK @@ -6,7 +7,7 @@ config EXFAT_FS This adds support for the exFAT file system. config EXFAT_DONT_MOUNT_VFAT - bool "Prohibit mounting of fat/vfat filesysems by exFAT" + bool "Prohibit mounting of fat/vfat filesystems by exFAT" depends on EXFAT_FS default y help diff --git a/drivers/staging/exfat/Makefile b/drivers/staging/exfat/Makefile index 84944dfbae28..6c90aec83feb 100644 --- a/drivers/staging/exfat/Makefile +++ b/drivers/staging/exfat/Makefile @@ -1,4 +1,4 @@ -# SPDX-License-Identifier: GPL-2.0 +# SPDX-License-Identifier: GPL-2.0-or-later obj-$(CONFIG_EXFAT_FS) += exfat.o diff --git a/drivers/staging/exfat/exfat.h b/drivers/staging/exfat/exfat.h index 6c12f2d79f4d..3abab33e932c 100644 --- a/drivers/staging/exfat/exfat.h +++ b/drivers/staging/exfat/exfat.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. */ diff --git a/drivers/staging/exfat/exfat_blkdev.c b/drivers/staging/exfat/exfat_blkdev.c index f086c75e7076..81d20e6241c6 100644 --- a/drivers/staging/exfat/exfat_blkdev.c +++ b/drivers/staging/exfat/exfat_blkdev.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. */ diff --git a/drivers/staging/exfat/exfat_cache.c b/drivers/staging/exfat/exfat_cache.c index 1565ce65d39f..e1b001718709 100644 --- a/drivers/staging/exfat/exfat_cache.c +++ b/drivers/staging/exfat/exfat_cache.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. */ diff --git a/drivers/staging/exfat/exfat_core.c b/drivers/staging/exfat/exfat_core.c index b3e9cf725cf5..79174e5c4145 100644 --- a/drivers/staging/exfat/exfat_core.c +++ b/drivers/staging/exfat/exfat_core.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. */ diff --git a/drivers/staging/exfat/exfat_nls.c b/drivers/staging/exfat/exfat_nls.c index 03cb8290b5d2..a5c4b68925fb 100644 --- a/drivers/staging/exfat/exfat_nls.c +++ b/drivers/staging/exfat/exfat_nls.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. */ diff --git a/drivers/staging/exfat/exfat_super.c b/drivers/staging/exfat/exfat_super.c index 5f6caee819a6..3b2b0ceb7297 100644 --- a/drivers/staging/exfat/exfat_super.c +++ b/drivers/staging/exfat/exfat_super.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. */ @@ -7,6 +7,7 @@ #include <linux/init.h> #include <linux/time.h> #include <linux/slab.h> +#include <linux/mm.h> #include <linux/seq_file.h> #include <linux/pagemap.h> #include <linux/mpage.h> @@ -3450,7 +3451,7 @@ static void exfat_free_super(struct exfat_sb_info *sbi) kfree(sbi->options.iocharset); /* mutex_init is in exfat_fill_super function. only for 3.7+ */ mutex_destroy(&sbi->s_lock); - kfree(sbi); + kvfree(sbi); } static void exfat_put_super(struct super_block *sb) @@ -3845,7 +3846,7 @@ static int exfat_fill_super(struct super_block *sb, void *data, int silent) * the filesystem, since we're only just about to mount * it and have no inodes etc active! */ - sbi = kzalloc(sizeof(struct exfat_sb_info), GFP_KERNEL); + sbi = kvzalloc(sizeof(*sbi), GFP_KERNEL); if (!sbi) return -ENOMEM; mutex_init(&sbi->s_lock); diff --git a/drivers/staging/exfat/exfat_upcase.c b/drivers/staging/exfat/exfat_upcase.c index 366082fb3dab..b91a1faa0e50 100644 --- a/drivers/staging/exfat/exfat_upcase.c +++ b/drivers/staging/exfat/exfat_upcase.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. */ diff --git a/drivers/staging/fbtft/Kconfig b/drivers/staging/fbtft/Kconfig index 8ec524a95ec8..cb61c2a772bd 100644 --- a/drivers/staging/fbtft/Kconfig +++ b/drivers/staging/fbtft/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 menuconfig FB_TFT tristate "Support for small TFT LCD display modules" - depends on FB && SPI + depends on FB && SPI && OF depends on GPIOLIB || COMPILE_TEST select FB_SYS_FILLRECT select FB_SYS_COPYAREA @@ -199,13 +199,3 @@ config FB_TFT_WATTEROTT depends on FB_TFT help Generic Framebuffer support for WATTEROTT - -config FB_FLEX - tristate "Generic FB driver for TFT LCD displays" - depends on FB_TFT - help - Generic Framebuffer support for TFT LCD displays. - -config FB_TFT_FBTFT_DEVICE - tristate "Module to for adding FBTFT devices" - depends on FB_TFT diff --git a/drivers/staging/fbtft/Makefile b/drivers/staging/fbtft/Makefile index 6bc03311c9c7..27af43f32f81 100644 --- a/drivers/staging/fbtft/Makefile +++ b/drivers/staging/fbtft/Makefile @@ -36,7 +36,3 @@ obj-$(CONFIG_FB_TFT_UC1611) += fb_uc1611.o obj-$(CONFIG_FB_TFT_UC1701) += fb_uc1701.o obj-$(CONFIG_FB_TFT_UPD161704) += fb_upd161704.o obj-$(CONFIG_FB_TFT_WATTEROTT) += fb_watterott.o -obj-$(CONFIG_FB_FLEX) += flexfb.o - -# Device modules -obj-$(CONFIG_FB_TFT_FBTFT_DEVICE) += fbtft_device.o diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c index cf5700a2ea66..a0a67aa517f0 100644 --- a/drivers/staging/fbtft/fbtft-core.c +++ b/drivers/staging/fbtft/fbtft-core.c @@ -714,7 +714,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display, if (par->gamma.curves && gamma) { if (fbtft_gamma_parse_str(par, par->gamma.curves, gamma, strlen(gamma))) - goto alloc_fail; + goto release_framebuf; } /* Transmit buffer */ @@ -731,7 +731,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display, if (txbuflen > 0) { txbuf = devm_kzalloc(par->info->device, txbuflen, GFP_KERNEL); if (!txbuf) - goto alloc_fail; + goto release_framebuf; par->txbuf.buf = txbuf; par->txbuf.len = txbuflen; } @@ -753,6 +753,9 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display, return info; +release_framebuf: + framebuffer_release(info); + alloc_fail: vfree(vmem); diff --git a/drivers/staging/fbtft/fbtft_device.c b/drivers/staging/fbtft/fbtft_device.c deleted file mode 100644 index 44e1410eb3fe..000000000000 --- a/drivers/staging/fbtft/fbtft_device.c +++ /dev/null @@ -1,1261 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * - * Copyright (C) 2013, Noralf Tronnes - */ - -#define pr_fmt(fmt) "fbtft_device: " fmt -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/gpio/consumer.h> -#include <linux/spi/spi.h> -#include <video/mipi_display.h> - -#include "fbtft.h" - -#define MAX_GPIOS 32 - -static struct spi_device *spi_device; -static struct platform_device *p_device; - -static char *name; -module_param(name, charp, 0000); -MODULE_PARM_DESC(name, - "Devicename (required). name=list => list all supported devices."); - -static unsigned int rotate; -module_param(rotate, uint, 0000); -MODULE_PARM_DESC(rotate, - "Angle to rotate display counter clockwise: 0, 90, 180, 270"); - -static unsigned int busnum; -module_param(busnum, uint, 0000); -MODULE_PARM_DESC(busnum, "SPI bus number (default=0)"); - -static unsigned int cs; -module_param(cs, uint, 0000); -MODULE_PARM_DESC(cs, "SPI chip select (default=0)"); - -static unsigned int speed; -module_param(speed, uint, 0000); -MODULE_PARM_DESC(speed, "SPI speed (override device default)"); - -static int mode = -1; -module_param(mode, int, 0000); -MODULE_PARM_DESC(mode, "SPI mode (override device default)"); - -static unsigned int fps; -module_param(fps, uint, 0000); -MODULE_PARM_DESC(fps, "Frames per second (override driver default)"); - -static char *gamma; -module_param(gamma, charp, 0000); -MODULE_PARM_DESC(gamma, - "String representation of Gamma Curve(s). Driver specific."); - -static int txbuflen; -module_param(txbuflen, int, 0000); -MODULE_PARM_DESC(txbuflen, "txbuflen (override driver default)"); - -static int bgr = -1; -module_param(bgr, int, 0000); -MODULE_PARM_DESC(bgr, - "BGR bit (supported by some drivers)."); - -static unsigned int startbyte; -module_param(startbyte, uint, 0000); -MODULE_PARM_DESC(startbyte, "Sets the Start byte used by some SPI displays."); - -static bool custom; -module_param(custom, bool, 0000); -MODULE_PARM_DESC(custom, "Add a custom display device. Use speed= argument to make it a SPI device, else platform_device"); - -static unsigned int width; -module_param(width, uint, 0000); -MODULE_PARM_DESC(width, "Display width, used with the custom argument"); - -static unsigned int height; -module_param(height, uint, 0000); -MODULE_PARM_DESC(height, "Display height, used with the custom argument"); - -static unsigned int buswidth = 8; -module_param(buswidth, uint, 0000); -MODULE_PARM_DESC(buswidth, "Display bus width, used with the custom argument"); - -static s16 init[FBTFT_MAX_INIT_SEQUENCE]; -static int init_num; -module_param_array(init, short, &init_num, 0000); -MODULE_PARM_DESC(init, "Init sequence, used with the custom argument"); - -static unsigned long debug; -module_param(debug, ulong, 0000); -MODULE_PARM_DESC(debug, - "level: 0-7 (the remaining 29 bits is for advanced usage)"); - -static unsigned int verbose = 3; -module_param(verbose, uint, 0000); -MODULE_PARM_DESC(verbose, - "0 silent, >1 show devices, >2 show devices before (default=3)"); - -struct fbtft_device_display { - char *name; - struct spi_board_info *spi; - struct platform_device *pdev; -}; - -static void fbtft_device_pdev_release(struct device *dev); - -static int write_gpio16_wr_slow(struct fbtft_par *par, void *buf, size_t len); -static void adafruit18_green_tab_set_addr_win(struct fbtft_par *par, - int xs, int ys, int xe, int ye); - -#define ADAFRUIT18_GAMMA \ - "02 1c 07 12 37 32 29 2d 29 25 2B 39 00 01 03 10\n" \ - "03 1d 07 06 2E 2C 29 2D 2E 2E 37 3F 00 00 02 10" - -#define CBERRY28_GAMMA \ - "D0 00 14 15 13 2C 42 43 4E 09 16 14 18 21\n" \ - "D0 00 14 15 13 0B 43 55 53 0C 17 14 23 20" - -static const s16 cberry28_init_sequence[] = { - /* turn off sleep mode */ - -1, MIPI_DCS_EXIT_SLEEP_MODE, - -2, 120, - - /* set pixel format to RGB-565 */ - -1, MIPI_DCS_SET_PIXEL_FORMAT, MIPI_DCS_PIXEL_FMT_16BIT, - - -1, 0xB2, 0x0C, 0x0C, 0x00, 0x33, 0x33, - - /* - * VGH = 13.26V - * VGL = -10.43V - */ - -1, 0xB7, 0x35, - - /* - * VDV and VRH register values come from command write - * (instead of NVM) - */ - -1, 0xC2, 0x01, 0xFF, - - /* - * VAP = 4.7V + (VCOM + VCOM offset + 0.5 * VDV) - * VAN = -4.7V + (VCOM + VCOM offset + 0.5 * VDV) - */ - -1, 0xC3, 0x17, - - /* VDV = 0V */ - -1, 0xC4, 0x20, - - /* VCOM = 0.675V */ - -1, 0xBB, 0x17, - - /* VCOM offset = 0V */ - -1, 0xC5, 0x20, - - /* - * AVDD = 6.8V - * AVCL = -4.8V - * VDS = 2.3V - */ - -1, 0xD0, 0xA4, 0xA1, - - -1, MIPI_DCS_SET_DISPLAY_ON, - - -3, -}; - -static const s16 hy28b_init_sequence[] = { - -1, 0x00e7, 0x0010, -1, 0x0000, 0x0001, - -1, 0x0001, 0x0100, -1, 0x0002, 0x0700, - -1, 0x0003, 0x1030, -1, 0x0004, 0x0000, - -1, 0x0008, 0x0207, -1, 0x0009, 0x0000, - -1, 0x000a, 0x0000, -1, 0x000c, 0x0001, - -1, 0x000d, 0x0000, -1, 0x000f, 0x0000, - -1, 0x0010, 0x0000, -1, 0x0011, 0x0007, - -1, 0x0012, 0x0000, -1, 0x0013, 0x0000, - -2, 50, -1, 0x0010, 0x1590, -1, 0x0011, - 0x0227, -2, 50, -1, 0x0012, 0x009c, -2, 50, - -1, 0x0013, 0x1900, -1, 0x0029, 0x0023, - -1, 0x002b, 0x000e, -2, 50, - -1, 0x0020, 0x0000, -1, 0x0021, 0x0000, - -2, 50, -1, 0x0050, 0x0000, - -1, 0x0051, 0x00ef, -1, 0x0052, 0x0000, - -1, 0x0053, 0x013f, -1, 0x0060, 0xa700, - -1, 0x0061, 0x0001, -1, 0x006a, 0x0000, - -1, 0x0080, 0x0000, -1, 0x0081, 0x0000, - -1, 0x0082, 0x0000, -1, 0x0083, 0x0000, - -1, 0x0084, 0x0000, -1, 0x0085, 0x0000, - -1, 0x0090, 0x0010, -1, 0x0092, 0x0000, - -1, 0x0093, 0x0003, -1, 0x0095, 0x0110, - -1, 0x0097, 0x0000, -1, 0x0098, 0x0000, - -1, 0x0007, 0x0133, -1, 0x0020, 0x0000, - -1, 0x0021, 0x0000, -2, 100, -3 }; - -#define HY28B_GAMMA \ - "04 1F 4 7 7 0 7 7 6 0\n" \ - "0F 00 1 7 4 0 0 0 6 7" - -static const s16 pitft_init_sequence[] = { - -1, MIPI_DCS_SOFT_RESET, - -2, 5, - -1, MIPI_DCS_SET_DISPLAY_OFF, - -1, 0xEF, 0x03, 0x80, 0x02, - -1, 0xCF, 0x00, 0xC1, 0x30, - -1, 0xED, 0x64, 0x03, 0x12, 0x81, - -1, 0xE8, 0x85, 0x00, 0x78, - -1, 0xCB, 0x39, 0x2C, 0x00, 0x34, 0x02, - -1, 0xF7, 0x20, - -1, 0xEA, 0x00, 0x00, - -1, 0xC0, 0x23, - -1, 0xC1, 0x10, - -1, 0xC5, 0x3E, 0x28, - -1, 0xC7, 0x86, - -1, MIPI_DCS_SET_PIXEL_FORMAT, 0x55, - -1, 0xB1, 0x00, 0x18, - -1, 0xB6, 0x08, 0x82, 0x27, - -1, 0xF2, 0x00, - -1, MIPI_DCS_SET_GAMMA_CURVE, 0x01, - -1, 0xE0, 0x0F, 0x31, 0x2B, 0x0C, 0x0E, 0x08, 0x4E, - 0xF1, 0x37, 0x07, 0x10, 0x03, 0x0E, 0x09, 0x00, - -1, 0xE1, 0x00, 0x0E, 0x14, 0x03, 0x11, 0x07, 0x31, - 0xC1, 0x48, 0x08, 0x0F, 0x0C, 0x31, 0x36, 0x0F, - -1, MIPI_DCS_EXIT_SLEEP_MODE, - -2, 100, - -1, MIPI_DCS_SET_DISPLAY_ON, - -2, 20, - -3 -}; - -static const s16 waveshare32b_init_sequence[] = { - -1, 0xCB, 0x39, 0x2C, 0x00, 0x34, 0x02, - -1, 0xCF, 0x00, 0xC1, 0x30, - -1, 0xE8, 0x85, 0x00, 0x78, - -1, 0xEA, 0x00, 0x00, - -1, 0xED, 0x64, 0x03, 0x12, 0x81, - -1, 0xF7, 0x20, - -1, 0xC0, 0x23, - -1, 0xC1, 0x10, - -1, 0xC5, 0x3E, 0x28, - -1, 0xC7, 0x86, - -1, MIPI_DCS_SET_ADDRESS_MODE, 0x28, - -1, MIPI_DCS_SET_PIXEL_FORMAT, 0x55, - -1, 0xB1, 0x00, 0x18, - -1, 0xB6, 0x08, 0x82, 0x27, - -1, 0xF2, 0x00, - -1, MIPI_DCS_SET_GAMMA_CURVE, 0x01, - -1, 0xE0, 0x0F, 0x31, 0x2B, 0x0C, 0x0E, 0x08, 0x4E, - 0xF1, 0x37, 0x07, 0x10, 0x03, 0x0E, 0x09, 0x00, - -1, 0xE1, 0x00, 0x0E, 0x14, 0x03, 0x11, 0x07, 0x31, - 0xC1, 0x48, 0x08, 0x0F, 0x0C, 0x31, 0x36, 0x0F, - -1, MIPI_DCS_EXIT_SLEEP_MODE, - -2, 120, - -1, MIPI_DCS_SET_DISPLAY_ON, - -1, MIPI_DCS_WRITE_MEMORY_START, - -3 -}; - -#define PIOLED_GAMMA "0 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 " \ - "2 2 2 2 2 2 2 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 " \ - "3 3 3 4 4 4 4 4 4 4 4 4 4 4 4" - -/* Supported displays in alphabetical order */ -static struct fbtft_device_display displays[] = { - { - .name = "adafruit18", - .spi = &(struct spi_board_info) { - .modalias = "fb_st7735r", - .max_speed_hz = 32000000, - .mode = SPI_MODE_0, - .platform_data = &(struct fbtft_platform_data) { - .display = { - .buswidth = 8, - .backlight = 1, - }, - .gamma = ADAFRUIT18_GAMMA, - } - } - }, { - .name = "adafruit18_green", - .spi = &(struct spi_board_info) { - .modalias = "fb_st7735r", - .max_speed_hz = 4000000, - .mode = SPI_MODE_0, - .platform_data = &(struct fbtft_platform_data) { - .display = { - .buswidth = 8, - .backlight = 1, - .fbtftops.set_addr_win = - adafruit18_green_tab_set_addr_win, - }, - .bgr = true, - .gamma = ADAFRUIT18_GAMMA, - } - } - }, { - .name = "adafruit22", - .spi = &(struct spi_board_info) { - .modalias = "fb_hx8340bn", - .max_speed_hz = 32000000, - .mode = SPI_MODE_0, - .platform_data = &(struct fbtft_platform_data) { - .display = { - .buswidth = 9, - .backlight = 1, - }, - .bgr = true, - } - } - }, { - .name = "adafruit22a", - .spi = &(struct spi_board_info) { - .modalias = "fb_ili9340", - .max_speed_hz = 32000000, - .mode = SPI_MODE_0, - .platform_data = &(struct fbtft_platform_data) { - .display = { - .buswidth = 8, - .backlight = 1, - }, - .bgr = true, - } - } - }, { - .name = "adafruit28", - .spi = &(struct spi_board_info) { - .modalias = "fb_ili9341", - .max_speed_hz = 32000000, - .mode = SPI_MODE_0, - .platform_data = &(struct fbtft_platform_data) { - .display = { - .buswidth = 8, - .backlight = 1, - }, - .bgr = true, - } - } - }, { - .name = "adafruit13m", - .spi = &(struct spi_board_info) { - .modalias = "fb_ssd1306", - .max_speed_hz = 16000000, - .mode = SPI_MODE_0, - .platform_data = &(struct fbtft_platform_data) { - .display = { - .buswidth = 8, - }, - } - } - }, { - .name = "admatec_c-berry28", - .spi = &(struct spi_board_info) { - .modalias = "fb_st7789v", - .max_speed_hz = 48000000, - .mode = SPI_MODE_0, - .platform_data = &(struct fbtft_platform_data) { - .display = { - .buswidth = 8, - .backlight = 1, - .init_sequence = cberry28_init_sequence, - }, - .gamma = CBERRY28_GAMMA, - } - } - }, { - .name = "agm1264k-fl", - .pdev = &(struct platform_device) { - .name = "fb_agm1264k-fl", - .id = 0, - .dev = { - .release = fbtft_device_pdev_release, - .platform_data = &(struct fbtft_platform_data) { - .display = { - .buswidth = 8, - .backlight = FBTFT_ONBOARD_BACKLIGHT, - }, - }, - } - } - }, { - .name = "dogs102", - .spi = &(struct spi_board_info) { - .modalias = "fb_uc1701", - .max_speed_hz = 8000000, - .mode = SPI_MODE_0, - .platform_data = &(struct fbtft_platform_data) { - .display = { - .buswidth = 8, - }, - .bgr = true, - } - } - }, { - .name = "er_tftm050_2", - .spi = &(struct spi_board_info) { - .modalias = "fb_ra8875", - .max_speed_hz = 5000000, - .mode = SPI_MODE_3, - .platform_data = &(struct fbtft_platform_data) { - .display = { - .buswidth = 8, - .backlight = 1, - .width = 480, - .height = 272, - }, - .bgr = true, - } - } - }, { - .name = "er_tftm070_5", - .spi = &(struct spi_board_info) { - .modalias = "fb_ra8875", - .max_speed_hz = 5000000, - .mode = SPI_MODE_3, - .platform_data = &(struct fbtft_platform_data) { - .display = { - .buswidth = 8, - .backlight = 1, - .width = 800, - .height = 480, - }, - .bgr = true, - } - } - }, { - .name = "ew24ha0", - .spi = &(struct spi_board_info) { - .modalias = "fb_uc1611", - .max_speed_hz = 32000000, - .mode = SPI_MODE_3, - .platform_data = &(struct fbtft_platform_data) { - .display = { - .buswidth = 8, - }, - } - } - }, { - .name = "ew24ha0_9bit", - .spi = &(struct spi_board_info) { - .modalias = "fb_uc1611", - .max_speed_hz = 32000000, - .mode = SPI_MODE_3, - .platform_data = &(struct fbtft_platform_data) { - .display = { - .buswidth = 9, - }, - } - } - }, { - .name = "flexfb", - .spi = &(struct spi_board_info) { - .modalias = "flexfb", - .max_speed_hz = 32000000, - .mode = SPI_MODE_0, - } - }, { - .name = "flexpfb", - .pdev = &(struct platform_device) { - .name = "flexpfb", - .id = 0, - .dev = { - .release = fbtft_device_pdev_release, - } - } - }, { - .name = "freetronicsoled128", - .spi = &(struct spi_board_info) { - .modalias = "fb_ssd1351", - .max_speed_hz = 20000000, - .mode = SPI_MODE_0, - .platform_data = &(struct fbtft_platform_data) { - .display = { - .buswidth = 8, - .backlight = FBTFT_ONBOARD_BACKLIGHT, - }, - .bgr = true, - } - } - }, { - .name = "hx8353d", - .spi = &(struct spi_board_info) { - .modalias = "fb_hx8353d", - .max_speed_hz = 16000000, - .mode = SPI_MODE_0, - .platform_data = &(struct fbtft_platform_data) { - .display = { - .buswidth = 8, - .backlight = 1, - }, - } - } - }, { - .name = "hy28a", - .spi = &(struct spi_board_info) { - .modalias = "fb_ili9320", - .max_speed_hz = 32000000, - .mode = SPI_MODE_3, - .platform_data = &(struct fbtft_platform_data) { - .display = { - .buswidth = 8, - .backlight = 1, - }, - .startbyte = 0x70, - .bgr = true, - } - } - }, { - .name = "hy28b", - .spi = &(struct spi_board_info) { - .modalias = "fb_ili9325", - .max_speed_hz = 48000000, - .mode = SPI_MODE_3, - .platform_data = &(struct fbtft_platform_data) { - .display = { - .buswidth = 8, - .backlight = 1, - .init_sequence = hy28b_init_sequence, - }, - .startbyte = 0x70, - .bgr = true, - .fps = 50, - .gamma = HY28B_GAMMA, - } - } - }, { - .name = "ili9481", - .spi = &(struct spi_board_info) { - .modalias = "fb_ili9481", - .max_speed_hz = 32000000, - .mode = SPI_MODE_0, - .platform_data = &(struct fbtft_platform_data) { - .display = { - .regwidth = 16, - .buswidth = 8, - .backlight = 1, - }, - .bgr = true, - } - } - }, { - .name = "itdb24", - .pdev = &(struct platform_device) { - .name = "fb_s6d1121", - .id = 0, - .dev = { - .release = fbtft_device_pdev_release, - .platform_data = &(struct fbtft_platform_data) { - .display = { - .buswidth = 8, - .backlight = 1, - }, - .bgr = false, - }, - } - } - }, { - .name = "itdb28", - .pdev = &(struct platform_device) { - .name = "fb_ili9325", - .id = 0, - .dev = { - .release = fbtft_device_pdev_release, - .platform_data = &(struct fbtft_platform_data) { - .display = { - .buswidth = 8, - .backlight = 1, - }, - .bgr = true, - }, - } - } - }, { - .name = "itdb28_spi", - .spi = &(struct spi_board_info) { - .modalias = "fb_ili9325", - .max_speed_hz = 32000000, - .mode = SPI_MODE_0, - .platform_data = &(struct fbtft_platform_data) { - .display = { - .buswidth = 8, - .backlight = 1, - }, - .bgr = true, - } - } - }, { - .name = "mi0283qt-2", - .spi = &(struct spi_board_info) { - .modalias = "fb_hx8347d", - .max_speed_hz = 32000000, - .mode = SPI_MODE_0, - .platform_data = &(struct fbtft_platform_data) { - .display = { - .buswidth = 8, - .backlight = 1, - }, - .startbyte = 0x70, - .bgr = true, - } - } - }, { - .name = "mi0283qt-9a", - .spi = &(struct spi_board_info) { - .modalias = "fb_ili9341", - .max_speed_hz = 32000000, - .mode = SPI_MODE_0, - .platform_data = &(struct fbtft_platform_data) { - .display = { - .buswidth = 9, - .backlight = 1, - }, - .bgr = true, - } - } - }, { - .name = "mi0283qt-v2", - .spi = &(struct spi_board_info) { - .modalias = "fb_watterott", - .max_speed_hz = 4000000, - .mode = SPI_MODE_3, - .platform_data = &(struct fbtft_platform_data) { - } - } - }, { - .name = "nokia3310", - .spi = &(struct spi_board_info) { - .modalias = "fb_pcd8544", - .max_speed_hz = 400000, - .mode = SPI_MODE_0, - .platform_data = &(struct fbtft_platform_data) { - .display = { - .buswidth = 8, - }, - } - } - }, { - .name = "nokia3310a", - .spi = &(struct spi_board_info) { - .modalias = "fb_tls8204", - .max_speed_hz = 1000000, - .mode = SPI_MODE_0, - .platform_data = &(struct fbtft_platform_data) { - .display = { - .buswidth = 8, - }, - } - } - }, { - .name = "nokia5110", - .spi = &(struct spi_board_info) { - .modalias = "fb_ili9163", - .max_speed_hz = 12000000, - .mode = SPI_MODE_0, - .platform_data = &(struct fbtft_platform_data) { - .display = { - .buswidth = 8, - .backlight = 1, - }, - .bgr = true, - } - } - }, { - .name = "piscreen", - .spi = &(struct spi_board_info) { - .modalias = "fb_ili9486", - .max_speed_hz = 32000000, - .mode = SPI_MODE_0, - .platform_data = &(struct fbtft_platform_data) { - .display = { - .regwidth = 16, - .buswidth = 8, - .backlight = 1, - }, - .bgr = true, - } - } - }, { - .name = "pitft", - .spi = &(struct spi_board_info) { - .modalias = "fb_ili9340", - .max_speed_hz = 32000000, - .mode = SPI_MODE_0, - .chip_select = 0, - .platform_data = &(struct fbtft_platform_data) { - .display = { - .buswidth = 8, - .backlight = 1, - .init_sequence = pitft_init_sequence, - }, - .bgr = true, - } - } - }, { - .name = "pioled", - .spi = &(struct spi_board_info) { - .modalias = "fb_ssd1351", - .max_speed_hz = 20000000, - .mode = SPI_MODE_0, - .platform_data = &(struct fbtft_platform_data) { - .display = { - .buswidth = 8, - }, - .bgr = true, - .gamma = PIOLED_GAMMA - } - } - }, { - .name = "rpi-display", - .spi = &(struct spi_board_info) { - .modalias = "fb_ili9341", - .max_speed_hz = 32000000, - .mode = SPI_MODE_0, - .platform_data = &(struct fbtft_platform_data) { - .display = { - .buswidth = 8, - .backlight = 1, - }, - .bgr = true, - } - } - }, { - .name = "s6d02a1", - .spi = &(struct spi_board_info) { - .modalias = "fb_s6d02a1", - .max_speed_hz = 32000000, - .mode = SPI_MODE_0, - .platform_data = &(struct fbtft_platform_data) { - .display = { - .buswidth = 8, - .backlight = 1, - }, - .bgr = true, - } - } - }, { - .name = "sainsmart18", - .spi = &(struct spi_board_info) { - .modalias = "fb_st7735r", - .max_speed_hz = 32000000, - .mode = SPI_MODE_0, - .platform_data = &(struct fbtft_platform_data) { - .display = { - .buswidth = 8, - }, - } - } - }, { - .name = "sainsmart32", - .pdev = &(struct platform_device) { - .name = "fb_ssd1289", - .id = 0, - .dev = { - .release = fbtft_device_pdev_release, - .platform_data = &(struct fbtft_platform_data) { - .display = { - .buswidth = 16, - .txbuflen = -2, /* disable buffer */ - .backlight = 1, - .fbtftops.write = write_gpio16_wr_slow, - }, - .bgr = true, - }, - }, - } - }, { - .name = "sainsmart32_fast", - .pdev = &(struct platform_device) { - .name = "fb_ssd1289", - .id = 0, - .dev = { - .release = fbtft_device_pdev_release, - .platform_data = &(struct fbtft_platform_data) { - .display = { - .buswidth = 16, - .txbuflen = -2, /* disable buffer */ - .backlight = 1, - }, - .bgr = true, - }, - }, - } - }, { - .name = "sainsmart32_latched", - .pdev = &(struct platform_device) { - .name = "fb_ssd1289", - .id = 0, - .dev = { - .release = fbtft_device_pdev_release, - .platform_data = &(struct fbtft_platform_data) { - .display = { - .buswidth = 16, - .txbuflen = -2, /* disable buffer */ - .backlight = 1, - .fbtftops.write = - fbtft_write_gpio16_wr_latched, - }, - .bgr = true, - }, - }, - } - }, { - .name = "sainsmart32_spi", - .spi = &(struct spi_board_info) { - .modalias = "fb_ssd1289", - .max_speed_hz = 16000000, - .mode = SPI_MODE_0, - .platform_data = &(struct fbtft_platform_data) { - .display = { - .buswidth = 8, - .backlight = 1, - }, - .bgr = true, - } - } - }, { - .name = "spidev", - .spi = &(struct spi_board_info) { - .modalias = "spidev", - .max_speed_hz = 500000, - .bus_num = 0, - .chip_select = 0, - .mode = SPI_MODE_0, - .platform_data = &(struct fbtft_platform_data) { - } - } - }, { - .name = "ssd1331", - .spi = &(struct spi_board_info) { - .modalias = "fb_ssd1331", - .max_speed_hz = 20000000, - .mode = SPI_MODE_3, - .platform_data = &(struct fbtft_platform_data) { - .display = { - .buswidth = 8, - }, - } - } - }, { - .name = "tinylcd35", - .spi = &(struct spi_board_info) { - .modalias = "fb_tinylcd", - .max_speed_hz = 32000000, - .mode = SPI_MODE_0, - .platform_data = &(struct fbtft_platform_data) { - .display = { - .buswidth = 8, - .backlight = 1, - }, - .bgr = true, - } - } - }, { - .name = "tm022hdh26", - .spi = &(struct spi_board_info) { - .modalias = "fb_ili9341", - .max_speed_hz = 32000000, - .mode = SPI_MODE_0, - .platform_data = &(struct fbtft_platform_data) { - .display = { - .buswidth = 8, - .backlight = 1, - }, - .bgr = true, - } - } - }, { - .name = "tontec35_9481", /* boards before 02 July 2014 */ - .spi = &(struct spi_board_info) { - .modalias = "fb_ili9481", - .max_speed_hz = 128000000, - .mode = SPI_MODE_3, - .platform_data = &(struct fbtft_platform_data) { - .display = { - .buswidth = 8, - .backlight = 1, - }, - .bgr = true, - } - } - }, { - .name = "tontec35_9486", /* boards after 02 July 2014 */ - .spi = &(struct spi_board_info) { - .modalias = "fb_ili9486", - .max_speed_hz = 128000000, - .mode = SPI_MODE_3, - .platform_data = &(struct fbtft_platform_data) { - .display = { - .buswidth = 8, - .backlight = 1, - }, - .bgr = true, - } - } - }, { - .name = "upd161704", - .spi = &(struct spi_board_info) { - .modalias = "fb_upd161704", - .max_speed_hz = 32000000, - .mode = SPI_MODE_0, - .platform_data = &(struct fbtft_platform_data) { - .display = { - .buswidth = 8, - }, - } - } - }, { - .name = "waveshare32b", - .spi = &(struct spi_board_info) { - .modalias = "fb_ili9340", - .max_speed_hz = 48000000, - .mode = SPI_MODE_0, - .platform_data = &(struct fbtft_platform_data) { - .display = { - .buswidth = 8, - .backlight = 1, - .init_sequence = - waveshare32b_init_sequence, - }, - .bgr = true, - } - } - }, { - .name = "waveshare22", - .spi = &(struct spi_board_info) { - .modalias = "fb_bd663474", - .max_speed_hz = 32000000, - .mode = SPI_MODE_3, - .platform_data = &(struct fbtft_platform_data) { - .display = { - .buswidth = 8, - }, - } - } - }, { - /* This should be the last item. - * Used with the custom argument - */ - .name = "", - .spi = &(struct spi_board_info) { - .modalias = "", - .max_speed_hz = 0, - .mode = SPI_MODE_0, - .platform_data = &(struct fbtft_platform_data) { - } - }, - .pdev = &(struct platform_device) { - .name = "", - .id = 0, - .dev = { - .release = fbtft_device_pdev_release, - .platform_data = &(struct fbtft_platform_data) { - }, - }, - }, - } -}; - -static int write_gpio16_wr_slow(struct fbtft_par *par, void *buf, size_t len) -{ - u16 data; - int i; -#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO - static u16 prev_data; -#endif - - fbtft_par_dbg_hex(DEBUG_WRITE, par, par->info->device, u8, buf, len, - "%s(len=%zu): ", __func__, len); - - while (len) { - data = *(u16 *)buf; - - /* Start writing by pulling down /WR */ - gpiod_set_value(par->gpio.wr, 0); - - /* Set data */ -#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO - if (data == prev_data) { - gpiod_set_value(par->gpio.wr, 0); /* used as delay */ - } else { - for (i = 0; i < 16; i++) { - if ((data & 1) != (prev_data & 1)) - gpiod_set_value(par->gpio.db[i], - data & 1); - data >>= 1; - prev_data >>= 1; - } - } -#else - for (i = 0; i < 16; i++) { - gpiod_set_value(par->gpio.db[i], data & 1); - data >>= 1; - } -#endif - - /* Pullup /WR */ - gpiod_set_value(par->gpio.wr, 1); - -#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO - prev_data = *(u16 *)buf; -#endif - buf += 2; - len -= 2; - } - - return 0; -} - -static void adafruit18_green_tab_set_addr_win(struct fbtft_par *par, - int xs, int ys, int xe, int ye) -{ - write_reg(par, 0x2A, 0, xs + 2, 0, xe + 2); - write_reg(par, 0x2B, 0, ys + 1, 0, ye + 1); - write_reg(par, 0x2C); -} - -static void fbtft_device_pdev_release(struct device *dev) -{ -/* Needed to silence this message: - * Device 'xxx' does not have a release() function, - * it is broken and must be fixed - */ -} - -static int spi_device_found(struct device *dev, void *data) -{ - struct spi_device *spi = to_spi_device(dev); - - dev_info(dev, "%s %s %dkHz %d bits mode=0x%02X\n", spi->modalias, - dev_name(dev), spi->max_speed_hz / 1000, spi->bits_per_word, - spi->mode); - - return 0; -} - -static void pr_spi_devices(void) -{ - pr_debug("SPI devices registered:\n"); - bus_for_each_dev(&spi_bus_type, NULL, NULL, spi_device_found); -} - -static int p_device_found(struct device *dev, void *data) -{ - struct platform_device - *pdev = to_platform_device(dev); - - if (strstr(pdev->name, "fb")) - dev_info(dev, "%s id=%d pdata? %s\n", pdev->name, pdev->id, - pdev->dev.platform_data ? "yes" : "no"); - - return 0; -} - -static void pr_p_devices(void) -{ - pr_debug("'fb' Platform devices registered:\n"); - bus_for_each_dev(&platform_bus_type, NULL, NULL, p_device_found); -} - -#ifdef MODULE -static void fbtft_device_spi_delete(struct spi_master *master, unsigned int cs) -{ - struct device *dev; - char str[32]; - - snprintf(str, sizeof(str), "%s.%u", dev_name(&master->dev), cs); - - dev = bus_find_device_by_name(&spi_bus_type, NULL, str); - if (dev) { - if (verbose) - dev_info(dev, "Deleting %s\n", str); - device_del(dev); - } -} - -static int fbtft_device_spi_device_register(struct spi_board_info *spi) -{ - struct spi_master *master; - - master = spi_busnum_to_master(spi->bus_num); - if (!master) { - pr_err("spi_busnum_to_master(%d) returned NULL\n", - spi->bus_num); - return -EINVAL; - } - /* make sure it's available */ - fbtft_device_spi_delete(master, spi->chip_select); - spi_device = spi_new_device(master, spi); - put_device(&master->dev); - if (!spi_device) { - dev_err(&master->dev, "spi_new_device() returned NULL\n"); - return -EPERM; - } - return 0; -} -#else -static int fbtft_device_spi_device_register(struct spi_board_info *spi) -{ - return spi_register_board_info(spi, 1); -} -#endif - -static int __init fbtft_device_init(void) -{ - struct spi_board_info *spi = NULL; - struct fbtft_platform_data *pdata; - bool found = false; - int i = 0; - int ret = 0; - - if (!name) { -#ifdef MODULE - pr_err("missing module parameter: 'name'\n"); - return -EINVAL; -#else - return 0; -#endif - } - - if (init_num > FBTFT_MAX_INIT_SEQUENCE) { - pr_err("init parameter: exceeded max array size: %d\n", - FBTFT_MAX_INIT_SEQUENCE); - return -EINVAL; - } - - if (verbose > 2) { - pr_spi_devices(); /* print list of registered SPI devices */ - pr_p_devices(); /* print list of 'fb' platform devices */ - } - - pr_debug("name='%s', busnum=%d, cs=%d\n", name, busnum, cs); - - if (rotate > 0 && rotate < 4) { - rotate = (4 - rotate) * 90; - pr_warn("argument 'rotate' should be an angle. Values 1-3 is deprecated. Setting it to %d.\n", - rotate); - } - if (rotate != 0 && rotate != 90 && rotate != 180 && rotate != 270) { - pr_warn("argument 'rotate' illegal value: %d. Setting it to 0.\n", - rotate); - rotate = 0; - } - - /* name=list lists all supported displays */ - if (strcmp(name, "list") == 0) { - pr_info("Supported displays:\n"); - - for (i = 0; i < ARRAY_SIZE(displays); i++) - pr_info("%s\n", displays[i].name); - return -ECANCELED; - } - - if (custom) { - i = ARRAY_SIZE(displays) - 1; - displays[i].name = name; - if (speed == 0) { - displays[i].pdev->name = name; - displays[i].spi = NULL; - } else { - size_t len; - - len = strlcpy(displays[i].spi->modalias, name, - SPI_NAME_SIZE); - if (len >= SPI_NAME_SIZE) - pr_warn("modalias (name) truncated to: %s\n", - displays[i].spi->modalias); - displays[i].pdev = NULL; - } - } - - for (i = 0; i < ARRAY_SIZE(displays); i++) { - if (strncmp(name, displays[i].name, SPI_NAME_SIZE) == 0) { - if (displays[i].spi) { - spi = displays[i].spi; - spi->chip_select = cs; - spi->bus_num = busnum; - if (speed) - spi->max_speed_hz = speed; - if (mode != -1) - spi->mode = mode; - pdata = (void *)spi->platform_data; - } else if (displays[i].pdev) { - p_device = displays[i].pdev; - pdata = p_device->dev.platform_data; - } else { - pr_err("broken displays array\n"); - return -EINVAL; - } - - pdata->rotate = rotate; - if (bgr == 0) - pdata->bgr = false; - else if (bgr == 1) - pdata->bgr = true; - if (startbyte) - pdata->startbyte = startbyte; - if (gamma) - pdata->gamma = gamma; - pdata->display.debug = debug; - if (fps) - pdata->fps = fps; - if (txbuflen) - pdata->txbuflen = txbuflen; - if (init_num) - pdata->display.init_sequence = init; - if (custom) { - pdata->display.width = width; - pdata->display.height = height; - pdata->display.buswidth = buswidth; - pdata->display.backlight = 1; - } - - if (displays[i].spi) { - ret = fbtft_device_spi_device_register(spi); - if (ret) { - pr_err("failed to register SPI device\n"); - return ret; - } - } else { - ret = platform_device_register(p_device); - if (ret < 0) { - pr_err("platform_device_register() returned %d\n", - ret); - return ret; - } - } - found = true; - break; - } - } - - if (!found) { - pr_err("display not supported: '%s'\n", name); - return -EINVAL; - } - - if (spi_device && (verbose > 1)) - pr_spi_devices(); - if (p_device && (verbose > 1)) - pr_p_devices(); - - return 0; -} - -static void __exit fbtft_device_exit(void) -{ - if (spi_device) { - device_del(&spi_device->dev); - kfree(spi_device); - } - - if (p_device) - platform_device_unregister(p_device); -} - -arch_initcall(fbtft_device_init); -module_exit(fbtft_device_exit); - -MODULE_DESCRIPTION("Add a FBTFT device."); -MODULE_AUTHOR("Noralf Tronnes"); -MODULE_LICENSE("GPL"); diff --git a/drivers/staging/fbtft/flexfb.c b/drivers/staging/fbtft/flexfb.c deleted file mode 100644 index 3747321011fa..000000000000 --- a/drivers/staging/fbtft/flexfb.c +++ /dev/null @@ -1,851 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * Generic FB driver for TFT LCD displays - * - * Copyright (C) 2013 Noralf Tronnes - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/vmalloc.h> -#include <linux/gpio/consumer.h> -#include <linux/spi/spi.h> -#include <linux/delay.h> - -#include "fbtft.h" - -#define DRVNAME "flexfb" - -static char *chip; -module_param(chip, charp, 0000); -MODULE_PARM_DESC(chip, "LCD controller"); - -static unsigned int width; -module_param(width, uint, 0000); -MODULE_PARM_DESC(width, "Display width"); - -static unsigned int height; -module_param(height, uint, 0000); -MODULE_PARM_DESC(height, "Display height"); - -static s16 init[512]; -static int init_num; -module_param_array(init, short, &init_num, 0000); -MODULE_PARM_DESC(init, "Init sequence"); - -static unsigned int setaddrwin; -module_param(setaddrwin, uint, 0000); -MODULE_PARM_DESC(setaddrwin, "Which set_addr_win() implementation to use"); - -static unsigned int buswidth = 8; -module_param(buswidth, uint, 0000); -MODULE_PARM_DESC(buswidth, "Width of databus (default: 8)"); - -static unsigned int regwidth = 8; -module_param(regwidth, uint, 0000); -MODULE_PARM_DESC(regwidth, "Width of controller register (default: 8)"); - -static bool nobacklight; -module_param(nobacklight, bool, 0000); -MODULE_PARM_DESC(nobacklight, "Turn off backlight functionality."); - -static bool latched; -module_param(latched, bool, 0000); -MODULE_PARM_DESC(latched, "Use with latched 16-bit databus"); - -static const s16 *initp; -static int initp_num; - -/* default init sequences */ -static const s16 st7735r_init[] = { - -1, 0x01, - -2, 150, - -1, 0x11, - -2, 500, - -1, 0xB1, 0x01, 0x2C, 0x2D, - -1, 0xB2, 0x01, 0x2C, 0x2D, - -1, 0xB3, 0x01, 0x2C, 0x2D, 0x01, 0x2C, 0x2D, - -1, 0xB4, 0x07, - -1, 0xC0, 0xA2, 0x02, 0x84, - -1, 0xC1, 0xC5, - -1, 0xC2, 0x0A, 0x00, - -1, 0xC3, 0x8A, 0x2A, - -1, 0xC4, 0x8A, 0xEE, - -1, 0xC5, 0x0E, - -1, 0x20, - -1, 0x36, 0xC0, - -1, 0x3A, 0x05, - -1, 0xE0, 0x0f, 0x1a, 0x0f, 0x18, 0x2f, 0x28, 0x20, 0x22, - 0x1f, 0x1b, 0x23, 0x37, 0x00, 0x07, 0x02, 0x10, - -1, 0xE1, 0x0f, 0x1b, 0x0f, 0x17, 0x33, 0x2c, 0x29, 0x2e, - 0x30, 0x30, 0x39, 0x3f, 0x00, 0x07, 0x03, 0x10, - -1, 0x29, - -2, 100, - -1, 0x13, - -2, 10, - -3 -}; - -static const s16 ssd1289_init[] = { - -1, 0x00, 0x0001, - -1, 0x03, 0xA8A4, - -1, 0x0C, 0x0000, - -1, 0x0D, 0x080C, - -1, 0x0E, 0x2B00, - -1, 0x1E, 0x00B7, - -1, 0x01, 0x2B3F, - -1, 0x02, 0x0600, - -1, 0x10, 0x0000, - -1, 0x11, 0x6070, - -1, 0x05, 0x0000, - -1, 0x06, 0x0000, - -1, 0x16, 0xEF1C, - -1, 0x17, 0x0003, - -1, 0x07, 0x0233, - -1, 0x0B, 0x0000, - -1, 0x0F, 0x0000, - -1, 0x41, 0x0000, - -1, 0x42, 0x0000, - -1, 0x48, 0x0000, - -1, 0x49, 0x013F, - -1, 0x4A, 0x0000, - -1, 0x4B, 0x0000, - -1, 0x44, 0xEF00, - -1, 0x45, 0x0000, - -1, 0x46, 0x013F, - -1, 0x30, 0x0707, - -1, 0x31, 0x0204, - -1, 0x32, 0x0204, - -1, 0x33, 0x0502, - -1, 0x34, 0x0507, - -1, 0x35, 0x0204, - -1, 0x36, 0x0204, - -1, 0x37, 0x0502, - -1, 0x3A, 0x0302, - -1, 0x3B, 0x0302, - -1, 0x23, 0x0000, - -1, 0x24, 0x0000, - -1, 0x25, 0x8000, - -1, 0x4f, 0x0000, - -1, 0x4e, 0x0000, - -1, 0x22, - -3 -}; - -static const s16 hx8340bn_init[] = { - -1, 0xC1, 0xFF, 0x83, 0x40, - -1, 0x11, - -2, 150, - -1, 0xCA, 0x70, 0x00, 0xD9, - -1, 0xB0, 0x01, 0x11, - -1, 0xC9, 0x90, 0x49, 0x10, 0x28, 0x28, 0x10, 0x00, 0x06, - -2, 20, - -1, 0xC2, 0x60, 0x71, 0x01, 0x0E, 0x05, 0x02, 0x09, 0x31, 0x0A, - -1, 0xC3, 0x67, 0x30, 0x61, 0x17, 0x48, 0x07, 0x05, 0x33, - -2, 10, - -1, 0xB5, 0x35, 0x20, 0x45, - -1, 0xB4, 0x33, 0x25, 0x4C, - -2, 10, - -1, 0x3A, 0x05, - -1, 0x29, - -2, 10, - -3 -}; - -static const s16 ili9225_init[] = { - -1, 0x0001, 0x011C, - -1, 0x0002, 0x0100, - -1, 0x0003, 0x1030, - -1, 0x0008, 0x0808, - -1, 0x000C, 0x0000, - -1, 0x000F, 0x0A01, - -1, 0x0020, 0x0000, - -1, 0x0021, 0x0000, - -2, 50, - -1, 0x0010, 0x0A00, - -1, 0x0011, 0x1038, - -2, 50, - -1, 0x0012, 0x1121, - -1, 0x0013, 0x004E, - -1, 0x0014, 0x676F, - -1, 0x0030, 0x0000, - -1, 0x0031, 0x00DB, - -1, 0x0032, 0x0000, - -1, 0x0033, 0x0000, - -1, 0x0034, 0x00DB, - -1, 0x0035, 0x0000, - -1, 0x0036, 0x00AF, - -1, 0x0037, 0x0000, - -1, 0x0038, 0x00DB, - -1, 0x0039, 0x0000, - -1, 0x0050, 0x0000, - -1, 0x0051, 0x060A, - -1, 0x0052, 0x0D0A, - -1, 0x0053, 0x0303, - -1, 0x0054, 0x0A0D, - -1, 0x0055, 0x0A06, - -1, 0x0056, 0x0000, - -1, 0x0057, 0x0303, - -1, 0x0058, 0x0000, - -1, 0x0059, 0x0000, - -2, 50, - -1, 0x0007, 0x1017, - -2, 50, - -3 -}; - -static const s16 ili9320_init[] = { - -1, 0x00E5, 0x8000, - -1, 0x0000, 0x0001, - -1, 0x0001, 0x0100, - -1, 0x0002, 0x0700, - -1, 0x0003, 0x1030, - -1, 0x0004, 0x0000, - -1, 0x0008, 0x0202, - -1, 0x0009, 0x0000, - -1, 0x000A, 0x0000, - -1, 0x000C, 0x0000, - -1, 0x000D, 0x0000, - -1, 0x000F, 0x0000, - -1, 0x0010, 0x0000, - -1, 0x0011, 0x0007, - -1, 0x0012, 0x0000, - -1, 0x0013, 0x0000, - -2, 200, - -1, 0x0010, 0x17B0, - -1, 0x0011, 0x0031, - -2, 50, - -1, 0x0012, 0x0138, - -2, 50, - -1, 0x0013, 0x1800, - -1, 0x0029, 0x0008, - -2, 50, - -1, 0x0020, 0x0000, - -1, 0x0021, 0x0000, - -1, 0x0030, 0x0000, - -1, 0x0031, 0x0505, - -1, 0x0032, 0x0004, - -1, 0x0035, 0x0006, - -1, 0x0036, 0x0707, - -1, 0x0037, 0x0105, - -1, 0x0038, 0x0002, - -1, 0x0039, 0x0707, - -1, 0x003C, 0x0704, - -1, 0x003D, 0x0807, - -1, 0x0050, 0x0000, - -1, 0x0051, 0x00EF, - -1, 0x0052, 0x0000, - -1, 0x0053, 0x013F, - -1, 0x0060, 0x2700, - -1, 0x0061, 0x0001, - -1, 0x006A, 0x0000, - -1, 0x0080, 0x0000, - -1, 0x0081, 0x0000, - -1, 0x0082, 0x0000, - -1, 0x0083, 0x0000, - -1, 0x0084, 0x0000, - -1, 0x0085, 0x0000, - -1, 0x0090, 0x0010, - -1, 0x0092, 0x0000, - -1, 0x0093, 0x0003, - -1, 0x0095, 0x0110, - -1, 0x0097, 0x0000, - -1, 0x0098, 0x0000, - -1, 0x0007, 0x0173, - -3 -}; - -static const s16 ili9325_init[] = { - -1, 0x00E3, 0x3008, - -1, 0x00E7, 0x0012, - -1, 0x00EF, 0x1231, - -1, 0x0001, 0x0100, - -1, 0x0002, 0x0700, - -1, 0x0003, 0x1030, - -1, 0x0004, 0x0000, - -1, 0x0008, 0x0207, - -1, 0x0009, 0x0000, - -1, 0x000A, 0x0000, - -1, 0x000C, 0x0000, - -1, 0x000D, 0x0000, - -1, 0x000F, 0x0000, - -1, 0x0010, 0x0000, - -1, 0x0011, 0x0007, - -1, 0x0012, 0x0000, - -1, 0x0013, 0x0000, - -2, 200, - -1, 0x0010, 0x1690, - -1, 0x0011, 0x0223, - -2, 50, - -1, 0x0012, 0x000D, - -2, 50, - -1, 0x0013, 0x1200, - -1, 0x0029, 0x000A, - -1, 0x002B, 0x000C, - -2, 50, - -1, 0x0020, 0x0000, - -1, 0x0021, 0x0000, - -1, 0x0030, 0x0000, - -1, 0x0031, 0x0506, - -1, 0x0032, 0x0104, - -1, 0x0035, 0x0207, - -1, 0x0036, 0x000F, - -1, 0x0037, 0x0306, - -1, 0x0038, 0x0102, - -1, 0x0039, 0x0707, - -1, 0x003C, 0x0702, - -1, 0x003D, 0x1604, - -1, 0x0050, 0x0000, - -1, 0x0051, 0x00EF, - -1, 0x0052, 0x0000, - -1, 0x0053, 0x013F, - -1, 0x0060, 0xA700, - -1, 0x0061, 0x0001, - -1, 0x006A, 0x0000, - -1, 0x0080, 0x0000, - -1, 0x0081, 0x0000, - -1, 0x0082, 0x0000, - -1, 0x0083, 0x0000, - -1, 0x0084, 0x0000, - -1, 0x0085, 0x0000, - -1, 0x0090, 0x0010, - -1, 0x0092, 0x0600, - -1, 0x0007, 0x0133, - -3 -}; - -static const s16 ili9341_init[] = { - -1, 0x28, - -2, 20, - -1, 0xCF, 0x00, 0x83, 0x30, - -1, 0xED, 0x64, 0x03, 0x12, 0x81, - -1, 0xE8, 0x85, 0x01, 0x79, - -1, 0xCB, 0x39, 0x2c, 0x00, 0x34, 0x02, - -1, 0xF7, 0x20, - -1, 0xEA, 0x00, 0x00, - -1, 0xC0, 0x26, - -1, 0xC1, 0x11, - -1, 0xC5, 0x35, 0x3E, - -1, 0xC7, 0xBE, - -1, 0xB1, 0x00, 0x1B, - -1, 0xB6, 0x0a, 0x82, 0x27, 0x00, - -1, 0xB7, 0x07, - -1, 0x3A, 0x55, - -1, 0x36, 0x48, - -1, 0x11, - -2, 120, - -1, 0x29, - -2, 20, - -3 -}; - -static const s16 ssd1351_init[] = { - -1, 0xfd, 0x12, - -1, 0xfd, 0xb1, - -1, 0xae, - -1, 0xb3, 0xf1, - -1, 0xca, 0x7f, - -1, 0xa0, 0x74, - -1, 0x15, 0x00, 0x7f, - -1, 0x75, 0x00, 0x7f, - -1, 0xa1, 0x00, - -1, 0xa2, 0x00, - -1, 0xb5, 0x00, - -1, 0xab, 0x01, - -1, 0xb1, 0x32, - -1, 0xb4, 0xa0, 0xb5, 0x55, - -1, 0xbb, 0x17, - -1, 0xbe, 0x05, - -1, 0xc1, 0xc8, 0x80, 0xc8, - -1, 0xc7, 0x0f, - -1, 0xb6, 0x01, - -1, 0xa6, - -1, 0xaf, - -3 -}; - -/** - * struct flexfb_lcd_controller - Describes the LCD controller properties - * @name: Model name of the chip - * @width: Width of display in pixels - * @height: Height of display in pixels - * @setaddrwin: Which set_addr_win() implementation to use - * @regwidth: LCD Controller Register width in bits - * @init_seq: LCD initialization sequence - * @init_seq_sz: Size of LCD initialization sequence - */ -struct flexfb_lcd_controller { - const char *name; - unsigned int width; - unsigned int height; - unsigned int setaddrwin; - unsigned int regwidth; - const s16 *init_seq; - int init_seq_sz; -}; - -static const struct flexfb_lcd_controller flexfb_chip_table[] = { - { - .name = "st7735r", - .width = 120, - .height = 160, - .init_seq = st7735r_init, - .init_seq_sz = ARRAY_SIZE(st7735r_init), - }, - { - .name = "hx8340bn", - .width = 176, - .height = 220, - .init_seq = hx8340bn_init, - .init_seq_sz = ARRAY_SIZE(hx8340bn_init), - }, - { - .name = "ili9225", - .width = 176, - .height = 220, - .regwidth = 16, - .init_seq = ili9225_init, - .init_seq_sz = ARRAY_SIZE(ili9225_init), - }, - { - .name = "ili9320", - .width = 240, - .height = 320, - .setaddrwin = 1, - .regwidth = 16, - .init_seq = ili9320_init, - .init_seq_sz = ARRAY_SIZE(ili9320_init), - }, - { - .name = "ili9325", - .width = 240, - .height = 320, - .setaddrwin = 1, - .regwidth = 16, - .init_seq = ili9325_init, - .init_seq_sz = ARRAY_SIZE(ili9325_init), - }, - { - .name = "ili9341", - .width = 240, - .height = 320, - .init_seq = ili9341_init, - .init_seq_sz = ARRAY_SIZE(ili9341_init), - }, - { - .name = "ssd1289", - .width = 240, - .height = 320, - .setaddrwin = 2, - .regwidth = 16, - .init_seq = ssd1289_init, - .init_seq_sz = ARRAY_SIZE(ssd1289_init), - }, - { - .name = "ssd1351", - .width = 128, - .height = 128, - .setaddrwin = 3, - .init_seq = ssd1351_init, - .init_seq_sz = ARRAY_SIZE(ssd1351_init), - }, -}; - -/* ili9320, ili9325 */ -static void flexfb_set_addr_win_1(struct fbtft_par *par, - int xs, int ys, int xe, int ye) -{ - switch (par->info->var.rotate) { - /* R20h = Horizontal GRAM Start Address */ - /* R21h = Vertical GRAM Start Address */ - case 0: - write_reg(par, 0x0020, xs); - write_reg(par, 0x0021, ys); - break; - case 180: - write_reg(par, 0x0020, width - 1 - xs); - write_reg(par, 0x0021, height - 1 - ys); - break; - case 270: - write_reg(par, 0x0020, width - 1 - ys); - write_reg(par, 0x0021, xs); - break; - case 90: - write_reg(par, 0x0020, ys); - write_reg(par, 0x0021, height - 1 - xs); - break; - } - write_reg(par, 0x0022); /* Write Data to GRAM */ -} - -/* ssd1289 */ -static void flexfb_set_addr_win_2(struct fbtft_par *par, - int xs, int ys, int xe, int ye) -{ - switch (par->info->var.rotate) { - /* R4Eh - Set GDDRAM X address counter */ - /* R4Fh - Set GDDRAM Y address counter */ - case 0: - write_reg(par, 0x4e, xs); - write_reg(par, 0x4f, ys); - break; - case 180: - write_reg(par, 0x4e, par->info->var.xres - 1 - xs); - write_reg(par, 0x4f, par->info->var.yres - 1 - ys); - break; - case 270: - write_reg(par, 0x4e, par->info->var.yres - 1 - ys); - write_reg(par, 0x4f, xs); - break; - case 90: - write_reg(par, 0x4e, ys); - write_reg(par, 0x4f, par->info->var.xres - 1 - xs); - break; - } - - /* R22h - RAM data write */ - write_reg(par, 0x22, 0); -} - -/* ssd1351 */ -static void set_addr_win_3(struct fbtft_par *par, - int xs, int ys, int xe, int ye) -{ - write_reg(par, 0x15, xs, xe); - write_reg(par, 0x75, ys, ye); - write_reg(par, 0x5C); -} - -static int flexfb_verify_gpios_dc(struct fbtft_par *par) -{ - fbtft_par_dbg(DEBUG_VERIFY_GPIOS, par, "%s()\n", __func__); - - if (!par->gpio.dc) { - dev_err(par->info->device, - "Missing info about 'dc' gpio. Aborting.\n"); - return -EINVAL; - } - - return 0; -} - -static int flexfb_verify_gpios_db(struct fbtft_par *par) -{ - int i; - int num_db = buswidth; - - fbtft_par_dbg(DEBUG_VERIFY_GPIOS, par, "%s()\n", __func__); - - if (!par->gpio.dc) { - dev_err(par->info->device, "Missing info about 'dc' gpio. Aborting.\n"); - return -EINVAL; - } - if (!par->gpio.wr) { - dev_err(par->info->device, "Missing info about 'wr' gpio. Aborting.\n"); - return -EINVAL; - } - if (latched && !par->gpio.latch) { - dev_err(par->info->device, "Missing info about 'latch' gpio. Aborting.\n"); - return -EINVAL; - } - if (latched) - num_db = buswidth / 2; - for (i = 0; i < num_db; i++) { - if (!par->gpio.db[i]) { - dev_err(par->info->device, - "Missing info about 'db%02d' gpio. Aborting.\n", - i); - return -EINVAL; - } - } - - return 0; -} - -static void flexfb_chip_load_param(const struct flexfb_lcd_controller *chip) -{ - if (!width) - width = chip->width; - if (!height) - height = chip->height; - setaddrwin = chip->setaddrwin; - if (chip->regwidth) - regwidth = chip->regwidth; - if (!init_num) { - initp = chip->init_seq; - initp_num = chip->init_seq_sz; - } -} - -static struct fbtft_display flex_display = { }; - -static int flexfb_chip_init(const struct device *dev) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(flexfb_chip_table); i++) - if (!strcmp(chip, flexfb_chip_table[i].name)) { - flexfb_chip_load_param(&flexfb_chip_table[i]); - return 0; - } - - dev_err(dev, "chip=%s is not supported\n", chip); - - return -EINVAL; -} - -static int flexfb_probe_common(struct spi_device *sdev, - struct platform_device *pdev) -{ - struct device *dev; - struct fb_info *info; - struct fbtft_par *par; - int ret; - - initp = init; - initp_num = init_num; - - if (sdev) - dev = &sdev->dev; - else - dev = &pdev->dev; - - fbtft_init_dbg(dev, "%s(%s)\n", __func__, - sdev ? "'SPI device'" : "'Platform device'"); - - if (chip) { - ret = flexfb_chip_init(dev); - if (ret) - return ret; - } - - if (width == 0 || height == 0) { - dev_err(dev, "argument(s) missing: width and height has to be set.\n"); - return -EINVAL; - } - flex_display.width = width; - flex_display.height = height; - fbtft_init_dbg(dev, "Display resolution: %dx%d\n", width, height); - fbtft_init_dbg(dev, "chip = %s\n", chip ? chip : "not set"); - fbtft_init_dbg(dev, "setaddrwin = %d\n", setaddrwin); - fbtft_init_dbg(dev, "regwidth = %d\n", regwidth); - fbtft_init_dbg(dev, "buswidth = %d\n", buswidth); - - info = fbtft_framebuffer_alloc(&flex_display, dev, dev->platform_data); - if (!info) - return -ENOMEM; - - par = info->par; - if (sdev) - par->spi = sdev; - else - par->pdev = pdev; - if (!par->init_sequence) - par->init_sequence = initp; - par->fbtftops.init_display = fbtft_init_display; - - /* registerwrite functions */ - switch (regwidth) { - case 8: - par->fbtftops.write_register = fbtft_write_reg8_bus8; - break; - case 16: - par->fbtftops.write_register = fbtft_write_reg16_bus8; - break; - default: - dev_err(dev, - "argument 'regwidth': %d is not supported.\n", - regwidth); - return -EINVAL; - } - - /* bus functions */ - if (sdev) { - par->fbtftops.write = fbtft_write_spi; - switch (buswidth) { - case 8: - par->fbtftops.write_vmem = fbtft_write_vmem16_bus8; - if (!par->startbyte) - par->fbtftops.verify_gpios = flexfb_verify_gpios_dc; - break; - case 9: - if (regwidth == 16) { - dev_err(dev, "argument 'regwidth': %d is not supported with buswidth=%d and SPI.\n", - regwidth, buswidth); - return -EINVAL; - } - par->fbtftops.write_register = fbtft_write_reg8_bus9; - par->fbtftops.write_vmem = fbtft_write_vmem16_bus9; - if (par->spi->master->bits_per_word_mask - & SPI_BPW_MASK(9)) { - par->spi->bits_per_word = 9; - break; - } - - dev_warn(dev, - "9-bit SPI not available, emulating using 8-bit.\n"); - /* allocate buffer with room for dc bits */ - par->extra = devm_kzalloc(par->info->device, - par->txbuf.len - + (par->txbuf.len / 8) + 8, - GFP_KERNEL); - if (!par->extra) { - ret = -ENOMEM; - goto out_release; - } - par->fbtftops.write = fbtft_write_spi_emulate_9; - - break; - default: - dev_err(dev, - "argument 'buswidth': %d is not supported with SPI.\n", - buswidth); - return -EINVAL; - } - } else { - par->fbtftops.verify_gpios = flexfb_verify_gpios_db; - switch (buswidth) { - case 8: - par->fbtftops.write = fbtft_write_gpio8_wr; - par->fbtftops.write_vmem = fbtft_write_vmem16_bus8; - break; - case 16: - par->fbtftops.write_register = fbtft_write_reg16_bus16; - if (latched) - par->fbtftops.write = fbtft_write_gpio16_wr_latched; - else - par->fbtftops.write = fbtft_write_gpio16_wr; - par->fbtftops.write_vmem = fbtft_write_vmem16_bus16; - break; - default: - dev_err(dev, - "argument 'buswidth': %d is not supported with parallel.\n", - buswidth); - return -EINVAL; - } - } - - /* set_addr_win function */ - switch (setaddrwin) { - case 0: - /* use default */ - break; - case 1: - par->fbtftops.set_addr_win = flexfb_set_addr_win_1; - break; - case 2: - par->fbtftops.set_addr_win = flexfb_set_addr_win_2; - break; - case 3: - par->fbtftops.set_addr_win = set_addr_win_3; - break; - default: - dev_err(dev, "argument 'setaddrwin': unknown value %d.\n", - setaddrwin); - return -EINVAL; - } - - if (!nobacklight) - par->fbtftops.register_backlight = fbtft_register_backlight; - - ret = fbtft_register_framebuffer(info); - if (ret < 0) - goto out_release; - - return 0; - -out_release: - fbtft_framebuffer_release(info); - - return ret; -} - -static int flexfb_remove_common(struct device *dev, struct fb_info *info) -{ - struct fbtft_par *par; - - if (!info) - return -EINVAL; - par = info->par; - if (par) - fbtft_par_dbg(DEBUG_DRIVER_INIT_FUNCTIONS, par, "%s()\n", - __func__); - fbtft_unregister_framebuffer(info); - fbtft_framebuffer_release(info); - - return 0; -} - -static int flexfb_probe_spi(struct spi_device *spi) -{ - return flexfb_probe_common(spi, NULL); -} - -static int flexfb_remove_spi(struct spi_device *spi) -{ - struct fb_info *info = spi_get_drvdata(spi); - - return flexfb_remove_common(&spi->dev, info); -} - -static int flexfb_probe_pdev(struct platform_device *pdev) -{ - return flexfb_probe_common(NULL, pdev); -} - -static int flexfb_remove_pdev(struct platform_device *pdev) -{ - struct fb_info *info = platform_get_drvdata(pdev); - - return flexfb_remove_common(&pdev->dev, info); -} - -static struct spi_driver flexfb_spi_driver = { - .driver = { - .name = DRVNAME, - }, - .probe = flexfb_probe_spi, - .remove = flexfb_remove_spi, -}; - -static const struct platform_device_id flexfb_platform_ids[] = { - { "flexpfb", 0 }, - { }, -}; -MODULE_DEVICE_TABLE(platform, flexfb_platform_ids); - -static struct platform_driver flexfb_platform_driver = { - .driver = { - .name = DRVNAME, - }, - .id_table = flexfb_platform_ids, - .probe = flexfb_probe_pdev, - .remove = flexfb_remove_pdev, -}; - -static int __init flexfb_init(void) -{ - int ret, ret2; - - ret = spi_register_driver(&flexfb_spi_driver); - ret2 = platform_driver_register(&flexfb_platform_driver); - if (ret < 0) - return ret; - return ret2; -} - -static void __exit flexfb_exit(void) -{ - spi_unregister_driver(&flexfb_spi_driver); - platform_driver_unregister(&flexfb_platform_driver); -} - -/* ------------------------------------------------------------------------- */ - -module_init(flexfb_init); -module_exit(flexfb_exit); - -MODULE_DESCRIPTION("Generic FB driver for TFT LCD displays"); -MODULE_AUTHOR("Noralf Tronnes"); -MODULE_LICENSE("GPL"); diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c index a62057555d1b..83469061a542 100644 --- a/drivers/staging/octeon/ethernet-tx.c +++ b/drivers/staging/octeon/ethernet-tx.c @@ -261,11 +261,11 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev) /* Build the PKO buffer pointer */ hw_buffer.u64 = 0; if (skb_shinfo(skb)->nr_frags == 0) { - hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data); + hw_buffer.s.addr = XKPHYS_TO_PHYS((uintptr_t)skb->data); hw_buffer.s.pool = 0; hw_buffer.s.size = skb->len; } else { - hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data); + hw_buffer.s.addr = XKPHYS_TO_PHYS((uintptr_t)skb->data); hw_buffer.s.pool = 0; hw_buffer.s.size = skb_headlen(skb); CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64; @@ -273,11 +273,12 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev) skb_frag_t *fs = skb_shinfo(skb)->frags + i; hw_buffer.s.addr = - XKPHYS_TO_PHYS((u64)skb_frag_address(fs)); + XKPHYS_TO_PHYS((uintptr_t)skb_frag_address(fs)); hw_buffer.s.size = skb_frag_size(fs); CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64; } - hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)CVM_OCT_SKB_CB(skb)); + hw_buffer.s.addr = + XKPHYS_TO_PHYS((uintptr_t)CVM_OCT_SKB_CB(skb)); hw_buffer.s.size = skb_shinfo(skb)->nr_frags + 1; pko_command.s.segs = skb_shinfo(skb)->nr_frags + 1; pko_command.s.gather = 1; diff --git a/drivers/staging/octeon/octeon-stubs.h b/drivers/staging/octeon/octeon-stubs.h index a4ac3bfb62a8..b78ce9eaab85 100644 --- a/drivers/staging/octeon/octeon-stubs.h +++ b/drivers/staging/octeon/octeon-stubs.h @@ -1202,7 +1202,7 @@ static inline int cvmx_wqe_get_grp(cvmx_wqe_t *work) static inline void *cvmx_phys_to_ptr(uint64_t physical_address) { - return (void *)(physical_address); + return (void *)(uintptr_t)(physical_address); } static inline uint64_t cvmx_ptr_to_phys(void *ptr) diff --git a/drivers/staging/rtl8188eu/hal/hal8188e_rate_adaptive.c b/drivers/staging/rtl8188eu/hal/hal8188e_rate_adaptive.c index 9ddd51685063..5792f491b59a 100644 --- a/drivers/staging/rtl8188eu/hal/hal8188e_rate_adaptive.c +++ b/drivers/staging/rtl8188eu/hal/hal8188e_rate_adaptive.c @@ -409,7 +409,7 @@ static int odm_ARFBRefresh_8188E(struct odm_dm_struct *dm_odm, struct odm_ra_inf pRaInfo->PTModeSS = 3; else if (pRaInfo->HighestRate > 0x0b) pRaInfo->PTModeSS = 2; - else if (pRaInfo->HighestRate > 0x0b) + else if (pRaInfo->HighestRate > 0x03) pRaInfo->PTModeSS = 1; else pRaInfo->PTModeSS = 0; diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c index 664d93a7f90d..4fac9dca798e 100644 --- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c +++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c @@ -348,8 +348,10 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj, } padapter->HalData = kzalloc(sizeof(struct hal_data_8188e), GFP_KERNEL); - if (!padapter->HalData) - DBG_88E("cant not alloc memory for HAL DATA\n"); + if (!padapter->HalData) { + DBG_88E("Failed to allocate memory for HAL data\n"); + goto free_adapter; + } /* step read_chip_version */ rtw_hal_read_chip_version(padapter); diff --git a/drivers/staging/speakup/sysfs-driver-speakup b/drivers/staging/speakup/sysfs-driver-speakup new file mode 100644 index 000000000000..be3f5d6962e9 --- /dev/null +++ b/drivers/staging/speakup/sysfs-driver-speakup @@ -0,0 +1,369 @@ +What: /sys/accessibility/speakup/attrib_bleep +KernelVersion: 2.6 +Contact: speakup@linux-speakup.org +Description: Beeps the PC speaker when there is an attribute change such as + foreground or background color when using speakup review + commands. One = on, zero = off. + +What: /sys/accessibility/speakup/bell_pos +KernelVersion: 2.6 +Contact: speakup@linux-speakup.org +Description: This works much like a typewriter bell. If for example 72 is + echoed to bell_pos, it will beep the PC speaker when typing on + a line past character 72. + +What: /sys/accessibility/speakup/bleeps +KernelVersion: 2.6 +Contact: speakup@linux-speakup.org +Description: This controls whether one hears beeps through the PC speaker + when using speakup's review commands. + TODO: what values does it accept? + +What: /sys/accessibility/speakup/bleep_time +KernelVersion: 2.6 +Contact: speakup@linux-speakup.org +Description: This controls the duration of the PC speaker beeps speakup + produces. + TODO: What are the units? Jiffies? + +What: /sys/accessibility/speakup/cursor_time +KernelVersion: 2.6 +Contact: speakup@linux-speakup.org +Description: This controls cursor delay when using arrow keys. When a + connection is very slow, with the default setting, when moving + with the arrows, or backspacing etc. speakup says the incorrect + characters. Set this to a higher value to adjust for the delay + and better synchronisation between cursor position and speech. + +What: /sys/accessibility/speakup/delimiters +KernelVersion: 2.6 +Contact: speakup@linux-speakup.org +Description: Delimit a word from speakup. + TODO: add more info + +What: /sys/accessibility/speakup/ex_num +KernelVersion: 2.6 +Contact: speakup@linux-speakup.org +Description: TODO: + +What: /sys/accessibility/speakup/key_echo +KernelVersion: 2.6 +Contact: speakup@linux-speakup.org +Description: Controls if speakup speaks keys when they are typed. One = on, + zero = off or don't echo keys. + +What: /sys/accessibility/speakup/keymap +KernelVersion: 2.6 +Contact: speakup@linux-speakup.org +Description: Speakup keymap remaps keys to Speakup functions. + It uses a binary + format. A special program called genmap is needed to compile a + textual keymap into the binary format which is then loaded into + /sys/accessibility/speakup/keymap. + +What: /sys/accessibility/speakup/no_interrupt +KernelVersion: 2.6 +Contact: speakup@linux-speakup.org +Description: Controls if typing interrupts output from speakup. With + no_interrupt set to zero, typing on the keyboard will interrupt + speakup if for example + the say screen command is used before the + entire screen is read. + With no_interrupt set to one, if the say + screen command is used, and one then types on the keyboard, + speakup will continue to say the whole screen regardless until + it finishes. + +What: /sys/accessibility/speakup/punc_all +KernelVersion: 2.6 +Contact: speakup@linux-speakup.org +Description: This is a list of all the punctuation speakup should speak when + punc_level is set to four. + +What: /sys/accessibility/speakup/punc_level +KernelVersion: 2.6 +Contact: speakup@linux-speakup.org +Description: Controls the level of punctuation spoken as the screen is + displayed, not reviewed. Levels range from zero no punctuation, + to four, all punctuation. One corresponds to punc_some, two + corresponds to punc_most, and three as well as four both + correspond to punc_all. Some hardware synthesizers may have + different levels each corresponding to three and four for + punc_level. Also note that if punc_level is set to zero, and + key_echo is set to one, typed punctuation is still spoken as it + is typed. + +What: /sys/accessibility/speakup/punc_most +KernelVersion: 2.6 +Contact: speakup@linux-speakup.org +Description: This is a list of all the punctuation speakup should speak when + punc_level is set to two. + +What: /sys/accessibility/speakup/punc_some +KernelVersion: 2.6 +Contact: speakup@linux-speakup.org +Description: This is a list of all the punctuation speakup should speak when + punc_level is set to one. + +What: /sys/accessibility/speakup/reading_punc +KernelVersion: 2.6 +Contact: speakup@linux-speakup.org +Description: Almost the same as punc_level, the differences being that + reading_punc controls the level of punctuation when reviewing + the screen with speakup's screen review commands. The other + difference is that reading_punc set to three speaks punc_all, + and reading_punc set to four speaks all punctuation, including + spaces. + +What: /sys/accessibility/speakup/repeats +KernelVersion: 2.6 +Contact: speakup@linux-speakup.org +Description: A list of characters speakup repeats. Normally, when there are + more than three characters in a row, speakup + just reads three of + those characters. For example, "......" would be read as dot, + dot, dot. If a . is added to the list of characters in repeats, + "......" would be read as dot, dot, dot, times six. + +What: /sys/accessibility/speakup/say_control +KernelVersion: 2.6 +Contact: speakup@linux-speakup.org +Description: If set to one, speakup speaks shift, alt and control when those + keys are pressed. If say_control is set to zero, shift, ctrl, + and alt are not spoken when they are pressed. + +What: /sys/accessibility/speakup/say_word_ctl +KernelVersion: 2.6 +Contact: speakup@linux-speakup.org +Description: TODO: + +What: /sys/accessibility/speakup/silent +KernelVersion: 2.6 +Contact: speakup@linux-speakup.org +Description: TODO: + +What: /sys/accessibility/speakup/spell_delay +KernelVersion: 2.6 +Contact: speakup@linux-speakup.org +Description: This controls how fast a word is spelled + when speakup's say word + review command is pressed twice quickly to speak the current + word being reviewed. Zero just speaks the letters one after + another, while values one through four + seem to introduce more of + a pause between the spelling of each letter by speakup. + +What: /sys/accessibility/speakup/synth +KernelVersion: 2.6 +Contact: speakup@linux-speakup.org +Description: Gets or sets the synthesizer driver currently in use. Reading + synth returns the synthesizer driver currently in use. Writing + synth switches to the given synthesizer driver, provided it is + either built into the kernel, or already loaded as a module. + +What: /sys/accessibility/speakup/synth_direct +KernelVersion: 2.6 +Contact: speakup@linux-speakup.org +Description: Sends whatever is written to synth_direct + directly to the speech synthesizer in use, bypassing speakup. + This could be used to make the synthesizer speak + a string, or to + send control sequences to the synthesizer to change how the + synthesizer behaves. + +What: /sys/accessibility/speakup/version +KernelVersion: 2.6 +Contact: speakup@linux-speakup.org +Description: Reading version returns the version of speakup, and the version + of the synthesizer driver currently in use. + +What: /sys/accessibility/speakup/i18n/announcements +KernelVersion: 2.6 +Contact: speakup@linux-speakup.org +Description: This file contains various general announcements, most of which + cannot be categorized. You will find messages such as "You + killed Speakup", "I'm alive", "leaving help", "parked", + "unparked", and others. You will also find the names of the + screen edges and cursor tracking modes here. + +What: /sys/accessibility/speakup/i18n/chartab +KernelVersion: 2.6 +Contact: speakup@linux-speakup.org +Description: TODO + +What: /sys/accessibility/speakup/i18n/ctl_keys +KernelVersion: 2.6 +Contact: speakup@linux-speakup.org +Description: Here, you will find names of control keys. These are used with + Speakup's say_control feature. + +What: /sys/accessibility/speakup/i18n/function_names +KernelVersion: 2.6 +Contact: speakup@linux-speakup.org +Description: Here, you will find a list of names for Speakup functions. + These are used by the help system. For example, suppose that + you have activated help mode, and you pressed + keypad 3. Speakup + says: "keypad 3 is character, say next." + The message "character, say next" names a Speakup function, and + it comes from this function_names file. + +What: /sys/accessibility/speakup/i18n/states +KernelVersion: 2.6 +Contact: speakup@linux-speakup.org +Description: This file contains names for key states. + Again, these are part of the help system. For instance, if you + had pressed speakup + keypad 3, you would hear: + "speakup keypad 3 is go to bottom edge." + The speakup key is depressed, so the name of the key state is + speakup. + This part of the message comes from the states collection. + +What: /sys/accessibility/speakup/i18n/characters +KernelVersion: 2.6 +Contact: speakup@linux-speakup.org +Description: Through this sys entry, Speakup gives you the ability to change + how Speakup pronounces a given character. You could, for + example, change how some punctuation characters are spoken. You + can even change how Speakup will pronounce certain letters. For + further details see '12. Changing the Pronunciation of + Characters' in Speakup User's Guide (file spkguide.txt in + source). + +What: /sys/accessibility/speakup/i18n/colors +KernelVersion: 2.6 +Contact: speakup@linux-speakup.org +Description: When you use the "say attributes" function, Speakup says the + name of the foreground and background colors. These names come + from the i18n/colors file. + +What: /sys/accessibility/speakup/i18n/formatted +KernelVersion: 2.6 +Contact: speakup@linux-speakup.org +Description: This group of messages contains embedded formatting codes, to + specify the type and width of displayed data. If you change + these, you must preserve all of the formatting codes, and they + must appear in the order used by the default messages. + +What: /sys/accessibility/speakup/i18n/key_names +KernelVersion: 2.6 +Contact: speakup@linux-speakup.org +Description: Again, key_names is used by Speakup's help system. In the + previous example, Speakup said that you pressed "keypad 3." + This name came from the key_names file. + +What: /sys/accessibility/speakup/<synth-name>/ +KernelVersion: 2.6 +Contact: speakup@linux-speakup.org +Description: In `/sys/accessibility/speakup` is a directory corresponding to + the synthesizer driver currently in use (E.G) `soft` for the + soft driver. This directory contains files which control the + speech synthesizer itself, + as opposed to controlling the speakup + screen reader. The parameters in this directory have the same + names and functions across all + supported synthesizers. The range + of values for freq, pitch, rate, and vol is the same for all + supported synthesizers, with the given range being internally + mapped by the driver to more or less fit the range of values + supported for a given parameter by the individual synthesizer. + Below is a description of values and parameters for soft + synthesizer, which is currently the most commonly used. + +What: /sys/accessibility/speakup/soft/caps_start +KernelVersion: 2.6 +Contact: speakup@linux-speakup.org +Description: This is the string that is sent to the synthesizer to cause it + to start speaking uppercase letters. For the soft synthesizer + and most others, this causes the pitch of the voice to rise + above the currently set pitch. + +What: /sys/accessibility/speakup/soft/caps_stop +KernelVersion: 2.6 +Contact: speakup@linux-speakup.org +Description: This is the string sent to the synthesizer to cause it to stop + speaking uppercase letters. In the case of the soft synthesizer + and most others, this returns the pitch of the voice + down to the + currently set pitch. + +What: /sys/accessibility/speakup/soft/delay_time +KernelVersion: 2.6 +Contact: speakup@linux-speakup.org +Description: TODO: + +What: /sys/accessibility/speakup/soft/direct +KernelVersion: 2.6 +Contact: speakup@linux-speakup.org +Description: Controls if punctuation is spoken by speakup, or by the + synthesizer. + For example, speakup speaks ">" as "greater", while + the espeak synthesizer used by the soft driver speaks "greater + than". Zero lets speakup speak the punctuation. One lets the + synthesizer itself speak punctuation. + +What: /sys/accessibility/speakup/soft/freq +KernelVersion: 2.6 +Contact: speakup@linux-speakup.org +Description: Gets or sets the frequency of the speech synthesizer. Range is + 0-9. + +What: /sys/accessibility/speakup/soft/full_time +KernelVersion: 2.6 +Contact: speakup@linux-speakup.org +Description: TODO: + +What: /sys/accessibility/speakup/soft/jiffy_delta +KernelVersion: 2.6 +Contact: speakup@linux-speakup.org +Description: This controls how many jiffys the kernel gives to the + synthesizer. Setting this too high can make a system unstable, + or even crash it. + +What: /sys/accessibility/speakup/soft/pitch +KernelVersion: 2.6 +Contact: speakup@linux-speakup.org +Description: Gets or sets the pitch of the synthesizer. The range is 0-9. + +What: /sys/accessibility/speakup/soft/punct +KernelVersion: 2.6 +Contact: speakup@linux-speakup.org +Description: Gets or sets the amount of punctuation spoken by the + synthesizer. The range for the soft driver seems to be 0-2. + TODO: How is this related to speakup's punc_level, or + reading_punc. + +What: /sys/accessibility/speakup/soft/rate +KernelVersion: 2.6 +Contact: speakup@linux-speakup.org +Description: Gets or sets the rate of the synthesizer. Range is from zero + slowest, to nine fastest. + +What: /sys/accessibility/speakup/soft/tone +KernelVersion: 2.6 +Contact: speakup@linux-speakup.org +Description: Gets or sets the tone of the speech synthesizer. The range for + the soft driver seems to be 0-2. This seems to make no + difference if using espeak and the espeakup connector. + TODO: does espeakup support different tonalities? + +What: /sys/accessibility/speakup/soft/trigger_time +KernelVersion: 2.6 +Contact: speakup@linux-speakup.org +Description: TODO: + +What: /sys/accessibility/speakup/soft/voice +KernelVersion: 2.6 +Contact: speakup@linux-speakup.org +Description: Gets or sets the voice used by the synthesizer if the + synthesizer can speak in more than one voice. The range for the + soft driver is 0-7. Note that while espeak supports multiple + voices, this parameter will not set the voice when the espeakup + connector is used between speakup and espeak. + +What: /sys/accessibility/speakup/soft/vol +KernelVersion: 2.6 +Contact: speakup@linux-speakup.org +Description: Gets or sets the volume of the speech synthesizer. Range is 0-9, + with zero being the softest, and nine being the loudest. + diff --git a/drivers/staging/vboxsf/Kconfig b/drivers/staging/vboxsf/Kconfig new file mode 100644 index 000000000000..b84586ae08b3 --- /dev/null +++ b/drivers/staging/vboxsf/Kconfig @@ -0,0 +1,10 @@ +config VBOXSF_FS + tristate "VirtualBox guest shared folder (vboxsf) support" + depends on X86 && VBOXGUEST + select NLS + help + VirtualBox hosts can share folders with guests, this driver + implements the Linux-guest side of this allowing folders exported + by the host to be mounted under Linux. + + If you want to use shared folders in VirtualBox guests, answer Y or M. diff --git a/drivers/staging/vboxsf/Makefile b/drivers/staging/vboxsf/Makefile new file mode 100644 index 000000000000..9e4328e79623 --- /dev/null +++ b/drivers/staging/vboxsf/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: MIT + +obj-$(CONFIG_VBOXSF_FS) += vboxsf.o + +vboxsf-y := dir.o file.o utils.o vboxsf_wrappers.o super.o diff --git a/drivers/staging/vboxsf/TODO b/drivers/staging/vboxsf/TODO new file mode 100644 index 000000000000..8b9193d0d4f0 --- /dev/null +++ b/drivers/staging/vboxsf/TODO @@ -0,0 +1,7 @@ +TODO: +- Find a file-system developer to review this and give their Reviewed-By +- Address any items coming up during review +- Move to fs/vboxfs + +Please send any patches to Greg Kroah-Hartman <gregkh@linuxfoundation.org> +and Hans de Goede <hdegoede@redhat.com> diff --git a/drivers/staging/vboxsf/dir.c b/drivers/staging/vboxsf/dir.c new file mode 100644 index 000000000000..f260b5cc1646 --- /dev/null +++ b/drivers/staging/vboxsf/dir.c @@ -0,0 +1,418 @@ +// SPDX-License-Identifier: MIT +/* + * VirtualBox Guest Shared Folders support: Directory inode and file operations + * + * Copyright (C) 2006-2018 Oracle Corporation + */ + +#include <linux/namei.h> +#include <linux/vbox_utils.h> +#include "vfsmod.h" + +static int vboxsf_dir_open(struct inode *inode, struct file *file) +{ + struct vboxsf_sbi *sbi = VBOXSF_SBI(inode->i_sb); + struct shfl_createparms params = {}; + struct vboxsf_dir_info *sf_d; + int err; + + sf_d = vboxsf_dir_info_alloc(); + if (!sf_d) + return -ENOMEM; + + params.handle = SHFL_HANDLE_NIL; + params.create_flags = SHFL_CF_DIRECTORY | SHFL_CF_ACT_OPEN_IF_EXISTS | + SHFL_CF_ACT_FAIL_IF_NEW | SHFL_CF_ACCESS_READ; + + err = vboxsf_create_at_dentry(file_dentry(file), ¶ms); + if (err) + goto err_free_dir_info; + + if (params.result != SHFL_FILE_EXISTS) { + err = -ENOENT; + goto err_close; + } + + err = vboxsf_dir_read_all(sbi, sf_d, params.handle); + if (err) + goto err_close; + + vboxsf_close(sbi->root, params.handle); + file->private_data = sf_d; + return 0; + +err_close: + vboxsf_close(sbi->root, params.handle); +err_free_dir_info: + vboxsf_dir_info_free(sf_d); + return err; +} + +static int vboxsf_dir_release(struct inode *inode, struct file *file) +{ + if (file->private_data) + vboxsf_dir_info_free(file->private_data); + + return 0; +} + +static unsigned int vboxsf_get_d_type(u32 mode) +{ + unsigned int d_type; + + switch (mode & SHFL_TYPE_MASK) { + case SHFL_TYPE_FIFO: + d_type = DT_FIFO; + break; + case SHFL_TYPE_DEV_CHAR: + d_type = DT_CHR; + break; + case SHFL_TYPE_DIRECTORY: + d_type = DT_DIR; + break; + case SHFL_TYPE_DEV_BLOCK: + d_type = DT_BLK; + break; + case SHFL_TYPE_FILE: + d_type = DT_REG; + break; + case SHFL_TYPE_SYMLINK: + d_type = DT_LNK; + break; + case SHFL_TYPE_SOCKET: + d_type = DT_SOCK; + break; + case SHFL_TYPE_WHITEOUT: + d_type = DT_WHT; + break; + default: + d_type = DT_UNKNOWN; + break; + } + return d_type; +} + +static bool vboxsf_dir_emit(struct file *dir, struct dir_context *ctx) +{ + struct vboxsf_sbi *sbi = VBOXSF_SBI(file_inode(dir)->i_sb); + struct vboxsf_dir_info *sf_d = dir->private_data; + struct shfl_dirinfo *info; + struct vboxsf_dir_buf *b; + unsigned int d_type; + loff_t i, cur = 0; + ino_t fake_ino; + size_t size; + int err; + + list_for_each_entry(b, &sf_d->info_list, head) { +try_next_entry: + if (ctx->pos >= cur + b->entries) { + cur += b->entries; + continue; + } + + /* + * Note the vboxsf_dir_info objects we are iterating over here + * are variable sized, so the info pointer may end up being + * unaligned. This is how we get the data from the host. + * Since vboxsf is only supported on x86 machines this is not + * a problem. + */ + for (i = 0, info = b->buf; i < ctx->pos - cur; i++) { + size = offsetof(struct shfl_dirinfo, name.string) + + info->name.size; + info = (struct shfl_dirinfo *)((uintptr_t)info + size); + } + + /* Info now points to the right entry, emit it. */ + d_type = vboxsf_get_d_type(info->info.attr.mode); + + /* + * On 32 bit systems pos is 64 signed, while ino is 32 bit + * unsigned so fake_ino may overflow, check for this. + */ + if ((ino_t)(ctx->pos + 1) != (u64)(ctx->pos + 1)) { + vbg_err("vboxsf: fake ino overflow, truncating dir\n"); + return false; + } + fake_ino = ctx->pos + 1; + + if (sbi->nls) { + char d_name[NAME_MAX]; + + err = vboxsf_nlscpy(sbi, d_name, NAME_MAX, + info->name.string.utf8, + info->name.length); + if (err) { + /* skip erroneous entry and proceed */ + ctx->pos += 1; + goto try_next_entry; + } + + return dir_emit(ctx, d_name, strlen(d_name), + fake_ino, d_type); + } + + return dir_emit(ctx, info->name.string.utf8, info->name.length, + fake_ino, d_type); + } + + return false; +} + +static int vboxsf_dir_iterate(struct file *dir, struct dir_context *ctx) +{ + bool keep_iterating; + + for (keep_iterating = true; keep_iterating; ctx->pos += 1) + keep_iterating = vboxsf_dir_emit(dir, ctx); + + return 0; +} + +const struct file_operations vboxsf_dir_fops = { + .open = vboxsf_dir_open, + .iterate = vboxsf_dir_iterate, + .release = vboxsf_dir_release, + .read = generic_read_dir, + .llseek = generic_file_llseek, +}; + +/* + * This is called during name resolution/lookup to check if the @dentry in + * the cache is still valid. the job is handled by vboxsf_inode_revalidate. + */ +static int vboxsf_dentry_revalidate(struct dentry *dentry, unsigned int flags) +{ + if (flags & LOOKUP_RCU) + return -ECHILD; + + if (d_really_is_positive(dentry)) + return vboxsf_inode_revalidate(dentry) == 0; + else + return vboxsf_stat_dentry(dentry, NULL) == -ENOENT; +} + +const struct dentry_operations vboxsf_dentry_ops = { + .d_revalidate = vboxsf_dentry_revalidate +}; + +/* iops */ + +static struct dentry *vboxsf_dir_lookup(struct inode *parent, + struct dentry *dentry, + unsigned int flags) +{ + struct vboxsf_sbi *sbi = VBOXSF_SBI(parent->i_sb); + struct shfl_fsobjinfo fsinfo; + struct inode *inode; + int err; + + dentry->d_time = jiffies; + + err = vboxsf_stat_dentry(dentry, &fsinfo); + if (err) { + inode = (err == -ENOENT) ? NULL : ERR_PTR(err); + } else { + inode = vboxsf_new_inode(parent->i_sb); + if (!IS_ERR(inode)) + vboxsf_init_inode(sbi, inode, &fsinfo); + } + + return d_splice_alias(inode, dentry); +} + +static int vboxsf_dir_instantiate(struct inode *parent, struct dentry *dentry, + struct shfl_fsobjinfo *info) +{ + struct vboxsf_sbi *sbi = VBOXSF_SBI(parent->i_sb); + struct vboxsf_inode *sf_i; + struct inode *inode; + + inode = vboxsf_new_inode(parent->i_sb); + if (IS_ERR(inode)) + return PTR_ERR(inode); + + sf_i = VBOXSF_I(inode); + /* The host may have given us different attr then requested */ + sf_i->force_restat = 1; + vboxsf_init_inode(sbi, inode, info); + + d_instantiate(dentry, inode); + + return 0; +} + +static int vboxsf_dir_create(struct inode *parent, struct dentry *dentry, + umode_t mode, int is_dir) +{ + struct vboxsf_inode *sf_parent_i = VBOXSF_I(parent); + struct vboxsf_sbi *sbi = VBOXSF_SBI(parent->i_sb); + struct shfl_createparms params = {}; + int err; + + params.handle = SHFL_HANDLE_NIL; + params.create_flags = SHFL_CF_ACT_CREATE_IF_NEW | + SHFL_CF_ACT_FAIL_IF_EXISTS | + SHFL_CF_ACCESS_READWRITE | + (is_dir ? SHFL_CF_DIRECTORY : 0); + params.info.attr.mode = (mode & 0777) | + (is_dir ? SHFL_TYPE_DIRECTORY : SHFL_TYPE_FILE); + params.info.attr.additional = SHFLFSOBJATTRADD_NOTHING; + + err = vboxsf_create_at_dentry(dentry, ¶ms); + if (err) + return err; + + if (params.result != SHFL_FILE_CREATED) + return -EPERM; + + vboxsf_close(sbi->root, params.handle); + + err = vboxsf_dir_instantiate(parent, dentry, ¶ms.info); + if (err) + return err; + + /* parent directory access/change time changed */ + sf_parent_i->force_restat = 1; + + return 0; +} + +static int vboxsf_dir_mkfile(struct inode *parent, struct dentry *dentry, + umode_t mode, bool excl) +{ + return vboxsf_dir_create(parent, dentry, mode, 0); +} + +static int vboxsf_dir_mkdir(struct inode *parent, struct dentry *dentry, + umode_t mode) +{ + return vboxsf_dir_create(parent, dentry, mode, 1); +} + +static int vboxsf_dir_unlink(struct inode *parent, struct dentry *dentry) +{ + struct vboxsf_sbi *sbi = VBOXSF_SBI(parent->i_sb); + struct vboxsf_inode *sf_parent_i = VBOXSF_I(parent); + struct inode *inode = d_inode(dentry); + struct shfl_string *path; + u32 flags; + int err; + + if (S_ISDIR(inode->i_mode)) + flags = SHFL_REMOVE_DIR; + else + flags = SHFL_REMOVE_FILE; + + if (S_ISLNK(inode->i_mode)) + flags |= SHFL_REMOVE_SYMLINK; + + path = vboxsf_path_from_dentry(sbi, dentry); + if (IS_ERR(path)) + return PTR_ERR(path); + + err = vboxsf_remove(sbi->root, path, flags); + __putname(path); + if (err) + return err; + + /* parent directory access/change time changed */ + sf_parent_i->force_restat = 1; + + return 0; +} + +static int vboxsf_dir_rename(struct inode *old_parent, + struct dentry *old_dentry, + struct inode *new_parent, + struct dentry *new_dentry, + unsigned int flags) +{ + struct vboxsf_sbi *sbi = VBOXSF_SBI(old_parent->i_sb); + struct vboxsf_inode *sf_old_parent_i = VBOXSF_I(old_parent); + struct vboxsf_inode *sf_new_parent_i = VBOXSF_I(new_parent); + u32 shfl_flags = SHFL_RENAME_FILE | SHFL_RENAME_REPLACE_IF_EXISTS; + struct shfl_string *old_path, *new_path; + int err; + + if (flags) + return -EINVAL; + + old_path = vboxsf_path_from_dentry(sbi, old_dentry); + if (IS_ERR(old_path)) + return PTR_ERR(old_path); + + new_path = vboxsf_path_from_dentry(sbi, new_dentry); + if (IS_ERR(new_path)) { + err = PTR_ERR(new_path); + goto err_put_old_path; + } + + if (d_inode(old_dentry)->i_mode & S_IFDIR) + shfl_flags = 0; + + err = vboxsf_rename(sbi->root, old_path, new_path, shfl_flags); + if (err == 0) { + /* parent directories access/change time changed */ + sf_new_parent_i->force_restat = 1; + sf_old_parent_i->force_restat = 1; + } + + __putname(new_path); +err_put_old_path: + __putname(old_path); + return err; +} + +static int vboxsf_dir_symlink(struct inode *parent, struct dentry *dentry, + const char *symname) +{ + struct vboxsf_inode *sf_parent_i = VBOXSF_I(parent); + struct vboxsf_sbi *sbi = VBOXSF_SBI(parent->i_sb); + int symname_size = strlen(symname) + 1; + struct shfl_string *path, *ssymname; + struct shfl_fsobjinfo info; + int err; + + path = vboxsf_path_from_dentry(sbi, dentry); + if (IS_ERR(path)) + return PTR_ERR(path); + + ssymname = kmalloc(SHFLSTRING_HEADER_SIZE + symname_size, GFP_KERNEL); + if (!ssymname) { + __putname(path); + return -ENOMEM; + } + ssymname->length = symname_size - 1; + ssymname->size = symname_size; + memcpy(ssymname->string.utf8, symname, symname_size); + + err = vboxsf_symlink(sbi->root, path, ssymname, &info); + kfree(ssymname); + __putname(path); + if (err) { + /* -EROFS means symlinks are note support -> -EPERM */ + return (err == -EROFS) ? -EPERM : err; + } + + err = vboxsf_dir_instantiate(parent, dentry, &info); + if (err) + return err; + + /* parent directory access/change time changed */ + sf_parent_i->force_restat = 1; + return 0; +} + +const struct inode_operations vboxsf_dir_iops = { + .lookup = vboxsf_dir_lookup, + .create = vboxsf_dir_mkfile, + .mkdir = vboxsf_dir_mkdir, + .rmdir = vboxsf_dir_unlink, + .unlink = vboxsf_dir_unlink, + .rename = vboxsf_dir_rename, + .symlink = vboxsf_dir_symlink, + .getattr = vboxsf_getattr, + .setattr = vboxsf_setattr, +}; diff --git a/drivers/staging/vboxsf/file.c b/drivers/staging/vboxsf/file.c new file mode 100644 index 000000000000..4b61ccf83fca --- /dev/null +++ b/drivers/staging/vboxsf/file.c @@ -0,0 +1,370 @@ +// SPDX-License-Identifier: MIT +/* + * VirtualBox Guest Shared Folders support: Regular file inode and file ops. + * + * Copyright (C) 2006-2018 Oracle Corporation + */ + +#include <linux/mm.h> +#include <linux/page-flags.h> +#include <linux/pagemap.h> +#include <linux/highmem.h> +#include <linux/sizes.h> +#include "vfsmod.h" + +struct vboxsf_handle { + u64 handle; + u32 root; + u32 access_flags; + struct kref refcount; + struct list_head head; +}; + +static int vboxsf_file_open(struct inode *inode, struct file *file) +{ + struct vboxsf_inode *sf_i = VBOXSF_I(inode); + struct shfl_createparms params = {}; + struct vboxsf_handle *sf_handle; + u32 access_flags = 0; + int err; + + sf_handle = kmalloc(sizeof(*sf_handle), GFP_KERNEL); + if (!sf_handle) + return -ENOMEM; + + /* + * We check the value of params.handle afterwards to find out if + * the call succeeded or failed, as the API does not seem to cleanly + * distinguish error and informational messages. + * + * Furthermore, we must set params.handle to SHFL_HANDLE_NIL to + * make the shared folders host service use our mode parameter. + */ + params.handle = SHFL_HANDLE_NIL; + if (file->f_flags & O_CREAT) { + params.create_flags |= SHFL_CF_ACT_CREATE_IF_NEW; + /* + * We ignore O_EXCL, as the Linux kernel seems to call create + * beforehand itself, so O_EXCL should always fail. + */ + if (file->f_flags & O_TRUNC) + params.create_flags |= SHFL_CF_ACT_OVERWRITE_IF_EXISTS; + else + params.create_flags |= SHFL_CF_ACT_OPEN_IF_EXISTS; + } else { + params.create_flags |= SHFL_CF_ACT_FAIL_IF_NEW; + if (file->f_flags & O_TRUNC) + params.create_flags |= SHFL_CF_ACT_OVERWRITE_IF_EXISTS; + } + + switch (file->f_flags & O_ACCMODE) { + case O_RDONLY: + access_flags |= SHFL_CF_ACCESS_READ; + break; + + case O_WRONLY: + access_flags |= SHFL_CF_ACCESS_WRITE; + break; + + case O_RDWR: + access_flags |= SHFL_CF_ACCESS_READWRITE; + break; + + default: + WARN_ON(1); + } + + if (file->f_flags & O_APPEND) + access_flags |= SHFL_CF_ACCESS_APPEND; + + params.create_flags |= access_flags; + params.info.attr.mode = inode->i_mode; + + err = vboxsf_create_at_dentry(file_dentry(file), ¶ms); + if (err == 0 && params.handle == SHFL_HANDLE_NIL) + err = (params.result == SHFL_FILE_EXISTS) ? -EEXIST : -ENOENT; + if (err) { + kfree(sf_handle); + return err; + } + + /* the host may have given us different attr then requested */ + sf_i->force_restat = 1; + + /* init our handle struct and add it to the inode's handles list */ + sf_handle->handle = params.handle; + sf_handle->root = VBOXSF_SBI(inode->i_sb)->root; + sf_handle->access_flags = access_flags; + kref_init(&sf_handle->refcount); + + mutex_lock(&sf_i->handle_list_mutex); + list_add(&sf_handle->head, &sf_i->handle_list); + mutex_unlock(&sf_i->handle_list_mutex); + + file->private_data = sf_handle; + return 0; +} + +static void vboxsf_handle_release(struct kref *refcount) +{ + struct vboxsf_handle *sf_handle = + container_of(refcount, struct vboxsf_handle, refcount); + + vboxsf_close(sf_handle->root, sf_handle->handle); + kfree(sf_handle); +} + +static int vboxsf_file_release(struct inode *inode, struct file *file) +{ + struct vboxsf_inode *sf_i = VBOXSF_I(inode); + struct vboxsf_handle *sf_handle = file->private_data; + + /* + * When a file is closed on our (the guest) side, we want any subsequent + * accesses done on the host side to see all changes done from our side. + */ + filemap_write_and_wait(inode->i_mapping); + + mutex_lock(&sf_i->handle_list_mutex); + list_del(&sf_handle->head); + mutex_unlock(&sf_i->handle_list_mutex); + + kref_put(&sf_handle->refcount, vboxsf_handle_release); + return 0; +} + +/* + * Write back dirty pages now, because there may not be any suitable + * open files later + */ +static void vboxsf_vma_close(struct vm_area_struct *vma) +{ + filemap_write_and_wait(vma->vm_file->f_mapping); +} + +static const struct vm_operations_struct vboxsf_file_vm_ops = { + .close = vboxsf_vma_close, + .fault = filemap_fault, + .map_pages = filemap_map_pages, +}; + +static int vboxsf_file_mmap(struct file *file, struct vm_area_struct *vma) +{ + int err; + + err = generic_file_mmap(file, vma); + if (!err) + vma->vm_ops = &vboxsf_file_vm_ops; + + return err; +} + +/* + * Note that since we are accessing files on the host's filesystem, files + * may always be changed underneath us by the host! + * + * The vboxsf API between the guest and the host does not offer any functions + * to deal with this. There is no inode-generation to check for changes, no + * events / callback on changes and no way to lock files. + * + * To avoid returning stale data when a file gets *opened* on our (the guest) + * side, we do a "stat" on the host side, then compare the mtime with the + * last known mtime and invalidate the page-cache if they differ. + * This is done from vboxsf_inode_revalidate(). + * + * When reads are done through the read_iter fop, it is possible to do + * further cache revalidation then, there are 3 options to deal with this: + * + * 1) Rely solely on the revalidation done at open time + * 2) Do another "stat" and compare mtime again. Unfortunately the vboxsf + * host API does not allow stat on handles, so we would need to use + * file->f_path.dentry and the stat will then fail if the file was unlinked + * or renamed (and there is no thing like NFS' silly-rename). So we get: + * 2a) "stat" and compare mtime, on stat failure invalidate the cache + * 2b) "stat" and compare mtime, on stat failure do nothing + * 3) Simply always call invalidate_inode_pages2_range on the range of the read + * + * Currently we are keeping things KISS and using option 1. this allows + * directly using generic_file_read_iter without wrapping it. + * + * This means that only data written on the host side before open() on + * the guest side is guaranteed to be seen by the guest. If necessary + * we may provide other read-cache strategies in the future and make this + * configurable through a mount option. + */ +const struct file_operations vboxsf_reg_fops = { + .llseek = generic_file_llseek, + .read_iter = generic_file_read_iter, + .write_iter = generic_file_write_iter, + .mmap = vboxsf_file_mmap, + .open = vboxsf_file_open, + .release = vboxsf_file_release, + .fsync = noop_fsync, + .splice_read = generic_file_splice_read, +}; + +const struct inode_operations vboxsf_reg_iops = { + .getattr = vboxsf_getattr, + .setattr = vboxsf_setattr +}; + +static int vboxsf_readpage(struct file *file, struct page *page) +{ + struct vboxsf_handle *sf_handle = file->private_data; + loff_t off = page_offset(page); + u32 nread = PAGE_SIZE; + u8 *buf; + int err; + + buf = kmap(page); + + err = vboxsf_read(sf_handle->root, sf_handle->handle, off, &nread, buf); + if (err == 0) { + memset(&buf[nread], 0, PAGE_SIZE - nread); + flush_dcache_page(page); + SetPageUptodate(page); + } else { + SetPageError(page); + } + + kunmap(page); + unlock_page(page); + return err; +} + +static struct vboxsf_handle *vboxsf_get_write_handle(struct vboxsf_inode *sf_i) +{ + struct vboxsf_handle *h, *sf_handle = NULL; + + mutex_lock(&sf_i->handle_list_mutex); + list_for_each_entry(h, &sf_i->handle_list, head) { + if (h->access_flags == SHFL_CF_ACCESS_WRITE || + h->access_flags == SHFL_CF_ACCESS_READWRITE) { + kref_get(&h->refcount); + sf_handle = h; + break; + } + } + mutex_unlock(&sf_i->handle_list_mutex); + + return sf_handle; +} + +static int vboxsf_writepage(struct page *page, struct writeback_control *wbc) +{ + struct inode *inode = page->mapping->host; + struct vboxsf_inode *sf_i = VBOXSF_I(inode); + struct vboxsf_handle *sf_handle; + loff_t off = page_offset(page); + loff_t size = i_size_read(inode); + u32 nwrite = PAGE_SIZE; + u8 *buf; + int err; + + if (off + PAGE_SIZE > size) + nwrite = size & ~PAGE_MASK; + + sf_handle = vboxsf_get_write_handle(sf_i); + if (!sf_handle) + return -EBADF; + + buf = kmap(page); + err = vboxsf_write(sf_handle->root, sf_handle->handle, + off, &nwrite, buf); + kunmap(page); + + kref_put(&sf_handle->refcount, vboxsf_handle_release); + + if (err == 0) { + ClearPageError(page); + /* mtime changed */ + sf_i->force_restat = 1; + } else { + ClearPageUptodate(page); + } + + unlock_page(page); + return err; +} + +static int vboxsf_write_end(struct file *file, struct address_space *mapping, + loff_t pos, unsigned int len, unsigned int copied, + struct page *page, void *fsdata) +{ + struct inode *inode = mapping->host; + struct vboxsf_handle *sf_handle = file->private_data; + unsigned int from = pos & ~PAGE_MASK; + u32 nwritten = len; + u8 *buf; + int err; + + buf = kmap(page); + err = vboxsf_write(sf_handle->root, sf_handle->handle, + pos, &nwritten, buf + from); + kunmap(page); + + if (err) { + nwritten = 0; + goto out; + } + + /* mtime changed */ + VBOXSF_I(inode)->force_restat = 1; + + if (!PageUptodate(page) && nwritten == PAGE_SIZE) + SetPageUptodate(page); + + pos += nwritten; + if (pos > inode->i_size) + i_size_write(inode, pos); + +out: + unlock_page(page); + put_page(page); + + return nwritten; +} + +const struct address_space_operations vboxsf_reg_aops = { + .readpage = vboxsf_readpage, + .writepage = vboxsf_writepage, + .set_page_dirty = __set_page_dirty_nobuffers, + .write_begin = simple_write_begin, + .write_end = vboxsf_write_end, +}; + +static const char *vboxsf_get_link(struct dentry *dentry, struct inode *inode, + struct delayed_call *done) +{ + struct vboxsf_sbi *sbi = VBOXSF_SBI(inode->i_sb); + struct shfl_string *path; + char *link; + int err; + + if (!dentry) + return ERR_PTR(-ECHILD); + + path = vboxsf_path_from_dentry(sbi, dentry); + if (IS_ERR(path)) + return (char *)path; + + link = kzalloc(PATH_MAX, GFP_KERNEL); + if (!link) { + __putname(path); + return ERR_PTR(-ENOMEM); + } + + err = vboxsf_readlink(sbi->root, path, PATH_MAX, link); + __putname(path); + if (err) { + kfree(link); + return ERR_PTR(err); + } + + set_delayed_call(done, kfree_link, link); + return link; +} + +const struct inode_operations vboxsf_lnk_iops = { + .get_link = vboxsf_get_link +}; diff --git a/drivers/staging/vboxsf/shfl_hostintf.h b/drivers/staging/vboxsf/shfl_hostintf.h new file mode 100644 index 000000000000..aca829062c12 --- /dev/null +++ b/drivers/staging/vboxsf/shfl_hostintf.h @@ -0,0 +1,901 @@ +/* SPDX-License-Identifier: MIT */ +/* + * VirtualBox Shared Folders: host interface definition. + * + * Copyright (C) 2006-2018 Oracle Corporation + */ + +#ifndef SHFL_HOSTINTF_H +#define SHFL_HOSTINTF_H + +#include <linux/vbox_vmmdev_types.h> + +/* The max in/out buffer size for a FN_READ or FN_WRITE call */ +#define SHFL_MAX_RW_COUNT (16 * SZ_1M) + +/* + * Structures shared between guest and the service + * can be relocated and use offsets to point to variable + * length parts. + * + * Shared folders protocol works with handles. + * Before doing any action on a file system object, + * one have to obtain the object handle via a SHFL_FN_CREATE + * request. A handle must be closed with SHFL_FN_CLOSE. + */ + +enum { + SHFL_FN_QUERY_MAPPINGS = 1, /* Query mappings changes. */ + SHFL_FN_QUERY_MAP_NAME = 2, /* Query map name. */ + SHFL_FN_CREATE = 3, /* Open/create object. */ + SHFL_FN_CLOSE = 4, /* Close object handle. */ + SHFL_FN_READ = 5, /* Read object content. */ + SHFL_FN_WRITE = 6, /* Write new object content. */ + SHFL_FN_LOCK = 7, /* Lock/unlock a range in the object. */ + SHFL_FN_LIST = 8, /* List object content. */ + SHFL_FN_INFORMATION = 9, /* Query/set object information. */ + /* Note function number 10 is not used! */ + SHFL_FN_REMOVE = 11, /* Remove object */ + SHFL_FN_MAP_FOLDER_OLD = 12, /* Map folder (legacy) */ + SHFL_FN_UNMAP_FOLDER = 13, /* Unmap folder */ + SHFL_FN_RENAME = 14, /* Rename object */ + SHFL_FN_FLUSH = 15, /* Flush file */ + SHFL_FN_SET_UTF8 = 16, /* Select UTF8 filename encoding */ + SHFL_FN_MAP_FOLDER = 17, /* Map folder */ + SHFL_FN_READLINK = 18, /* Read symlink dest (as of VBox 4.0) */ + SHFL_FN_SYMLINK = 19, /* Create symlink (as of VBox 4.0) */ + SHFL_FN_SET_SYMLINKS = 20, /* Ask host to show symlinks (4.0+) */ +}; + +/* Root handles for a mapping are of type u32, Root handles are unique. */ +#define SHFL_ROOT_NIL UINT_MAX + +/* Shared folders handle for an opened object are of type u64. */ +#define SHFL_HANDLE_NIL ULLONG_MAX + +/* Hardcoded maximum length (in chars) of a shared folder name. */ +#define SHFL_MAX_LEN (256) +/* Hardcoded maximum number of shared folder mapping available to the guest. */ +#define SHFL_MAX_MAPPINGS (64) + +/** Shared folder string buffer structure. */ +struct shfl_string { + /** Allocated size of the string member in bytes. */ + u16 size; + + /** Length of string without trailing nul in bytes. */ + u16 length; + + /** UTF-8 or UTF-16 string. Nul terminated. */ + union { + u8 utf8[2]; + u16 utf16[1]; + u16 ucs2[1]; /* misnomer, use utf16. */ + } string; +}; +VMMDEV_ASSERT_SIZE(shfl_string, 6); + +/* The size of shfl_string w/o the string part. */ +#define SHFLSTRING_HEADER_SIZE 4 + +/* Calculate size of the string. */ +static inline u32 shfl_string_buf_size(const struct shfl_string *string) +{ + return string ? SHFLSTRING_HEADER_SIZE + string->size : 0; +} + +/* Set user id on execution (S_ISUID). */ +#define SHFL_UNIX_ISUID 0004000U +/* Set group id on execution (S_ISGID). */ +#define SHFL_UNIX_ISGID 0002000U +/* Sticky bit (S_ISVTX / S_ISTXT). */ +#define SHFL_UNIX_ISTXT 0001000U + +/* Owner readable (S_IRUSR). */ +#define SHFL_UNIX_IRUSR 0000400U +/* Owner writable (S_IWUSR). */ +#define SHFL_UNIX_IWUSR 0000200U +/* Owner executable (S_IXUSR). */ +#define SHFL_UNIX_IXUSR 0000100U + +/* Group readable (S_IRGRP). */ +#define SHFL_UNIX_IRGRP 0000040U +/* Group writable (S_IWGRP). */ +#define SHFL_UNIX_IWGRP 0000020U +/* Group executable (S_IXGRP). */ +#define SHFL_UNIX_IXGRP 0000010U + +/* Other readable (S_IROTH). */ +#define SHFL_UNIX_IROTH 0000004U +/* Other writable (S_IWOTH). */ +#define SHFL_UNIX_IWOTH 0000002U +/* Other executable (S_IXOTH). */ +#define SHFL_UNIX_IXOTH 0000001U + +/* Named pipe (fifo) (S_IFIFO). */ +#define SHFL_TYPE_FIFO 0010000U +/* Character device (S_IFCHR). */ +#define SHFL_TYPE_DEV_CHAR 0020000U +/* Directory (S_IFDIR). */ +#define SHFL_TYPE_DIRECTORY 0040000U +/* Block device (S_IFBLK). */ +#define SHFL_TYPE_DEV_BLOCK 0060000U +/* Regular file (S_IFREG). */ +#define SHFL_TYPE_FILE 0100000U +/* Symbolic link (S_IFLNK). */ +#define SHFL_TYPE_SYMLINK 0120000U +/* Socket (S_IFSOCK). */ +#define SHFL_TYPE_SOCKET 0140000U +/* Whiteout (S_IFWHT). */ +#define SHFL_TYPE_WHITEOUT 0160000U +/* Type mask (S_IFMT). */ +#define SHFL_TYPE_MASK 0170000U + +/* Checks the mode flags indicate a directory (S_ISDIR). */ +#define SHFL_IS_DIRECTORY(m) (((m) & SHFL_TYPE_MASK) == SHFL_TYPE_DIRECTORY) +/* Checks the mode flags indicate a symbolic link (S_ISLNK). */ +#define SHFL_IS_SYMLINK(m) (((m) & SHFL_TYPE_MASK) == SHFL_TYPE_SYMLINK) + +/** The available additional information in a shfl_fsobjattr object. */ +enum shfl_fsobjattr_add { + /** No additional information is available / requested. */ + SHFLFSOBJATTRADD_NOTHING = 1, + /** + * The additional unix attributes (shfl_fsobjattr::u::unix_attr) are + * available / requested. + */ + SHFLFSOBJATTRADD_UNIX, + /** + * The additional extended attribute size (shfl_fsobjattr::u::size) is + * available / requested. + */ + SHFLFSOBJATTRADD_EASIZE, + /** + * The last valid item (inclusive). + * The valid range is SHFLFSOBJATTRADD_NOTHING thru + * SHFLFSOBJATTRADD_LAST. + */ + SHFLFSOBJATTRADD_LAST = SHFLFSOBJATTRADD_EASIZE, + + /** The usual 32-bit hack. */ + SHFLFSOBJATTRADD_32BIT_SIZE_HACK = 0x7fffffff +}; + +/** + * Additional unix Attributes, these are available when + * shfl_fsobjattr.additional == SHFLFSOBJATTRADD_UNIX. + */ +struct shfl_fsobjattr_unix { + /** + * The user owning the filesystem object (st_uid). + * This field is ~0U if not supported. + */ + u32 uid; + + /** + * The group the filesystem object is assigned (st_gid). + * This field is ~0U if not supported. + */ + u32 gid; + + /** + * Number of hard links to this filesystem object (st_nlink). + * This field is 1 if the filesystem doesn't support hardlinking or + * the information isn't available. + */ + u32 hardlinks; + + /** + * The device number of the device which this filesystem object resides + * on (st_dev). This field is 0 if this information is not available. + */ + u32 inode_id_device; + + /** + * The unique identifier (within the filesystem) of this filesystem + * object (st_ino). Together with inode_id_device, this field can be + * used as a OS wide unique id, when both their values are not 0. + * This field is 0 if the information is not available. + */ + u64 inode_id; + + /** + * User flags (st_flags). + * This field is 0 if this information is not available. + */ + u32 flags; + + /** + * The current generation number (st_gen). + * This field is 0 if this information is not available. + */ + u32 generation_id; + + /** + * The device number of a char. or block device type object (st_rdev). + * This field is 0 if the file isn't a char. or block device or when + * the OS doesn't use the major+minor device idenfication scheme. + */ + u32 device; +} __packed; + +/** Extended attribute size. */ +struct shfl_fsobjattr_easize { + /** Size of EAs. */ + s64 cb; +} __packed; + +/** Shared folder filesystem object attributes. */ +struct shfl_fsobjattr { + /** Mode flags (st_mode). SHFL_UNIX_*, SHFL_TYPE_*, and SHFL_DOS_*. */ + u32 mode; + + /** The additional attributes available. */ + enum shfl_fsobjattr_add additional; + + /** + * Additional attributes. + * + * Unless explicitly specified to an API, the API can provide additional + * data as it is provided by the underlying OS. + */ + union { + struct shfl_fsobjattr_unix unix_attr; + struct shfl_fsobjattr_easize size; + } __packed u; +} __packed; +VMMDEV_ASSERT_SIZE(shfl_fsobjattr, 44); + +struct shfl_timespec { + s64 ns_relative_to_unix_epoch; +}; + +/** Filesystem object information structure. */ +struct shfl_fsobjinfo { + /** + * Logical size (st_size). + * For normal files this is the size of the file. + * For symbolic links, this is the length of the path name contained + * in the symbolic link. + * For other objects this fields needs to be specified. + */ + s64 size; + + /** Disk allocation size (st_blocks * DEV_BSIZE). */ + s64 allocated; + + /** Time of last access (st_atime). */ + struct shfl_timespec access_time; + + /** Time of last data modification (st_mtime). */ + struct shfl_timespec modification_time; + + /** + * Time of last status change (st_ctime). + * If not available this is set to modification_time. + */ + struct shfl_timespec change_time; + + /** + * Time of file birth (st_birthtime). + * If not available this is set to change_time. + */ + struct shfl_timespec birth_time; + + /** Attributes. */ + struct shfl_fsobjattr attr; + +} __packed; +VMMDEV_ASSERT_SIZE(shfl_fsobjinfo, 92); + +/** + * result of an open/create request. + * Along with handle value the result code + * identifies what has happened while + * trying to open the object. + */ +enum shfl_create_result { + SHFL_NO_RESULT, + /** Specified path does not exist. */ + SHFL_PATH_NOT_FOUND, + /** Path to file exists, but the last component does not. */ + SHFL_FILE_NOT_FOUND, + /** File already exists and either has been opened or not. */ + SHFL_FILE_EXISTS, + /** New file was created. */ + SHFL_FILE_CREATED, + /** Existing file was replaced or overwritten. */ + SHFL_FILE_REPLACED +}; + +/* No flags. Initialization value. */ +#define SHFL_CF_NONE (0x00000000) + +/* + * Only lookup the object, do not return a handle. When this is set all other + * flags are ignored. + */ +#define SHFL_CF_LOOKUP (0x00000001) + +/* + * Open parent directory of specified object. + * Useful for the corresponding Windows FSD flag + * and for opening paths like \\dir\\*.* to search the 'dir'. + */ +#define SHFL_CF_OPEN_TARGET_DIRECTORY (0x00000002) + +/* Create/open a directory. */ +#define SHFL_CF_DIRECTORY (0x00000004) + +/* + * Open/create action to do if object exists + * and if the object does not exists. + * REPLACE file means atomically DELETE and CREATE. + * OVERWRITE file means truncating the file to 0 and + * setting new size. + * When opening an existing directory REPLACE and OVERWRITE + * actions are considered invalid, and cause returning + * FILE_EXISTS with NIL handle. + */ +#define SHFL_CF_ACT_MASK_IF_EXISTS (0x000000f0) +#define SHFL_CF_ACT_MASK_IF_NEW (0x00000f00) + +/* What to do if object exists. */ +#define SHFL_CF_ACT_OPEN_IF_EXISTS (0x00000000) +#define SHFL_CF_ACT_FAIL_IF_EXISTS (0x00000010) +#define SHFL_CF_ACT_REPLACE_IF_EXISTS (0x00000020) +#define SHFL_CF_ACT_OVERWRITE_IF_EXISTS (0x00000030) + +/* What to do if object does not exist. */ +#define SHFL_CF_ACT_CREATE_IF_NEW (0x00000000) +#define SHFL_CF_ACT_FAIL_IF_NEW (0x00000100) + +/* Read/write requested access for the object. */ +#define SHFL_CF_ACCESS_MASK_RW (0x00003000) + +/* No access requested. */ +#define SHFL_CF_ACCESS_NONE (0x00000000) +/* Read access requested. */ +#define SHFL_CF_ACCESS_READ (0x00001000) +/* Write access requested. */ +#define SHFL_CF_ACCESS_WRITE (0x00002000) +/* Read/Write access requested. */ +#define SHFL_CF_ACCESS_READWRITE (0x00003000) + +/* Requested share access for the object. */ +#define SHFL_CF_ACCESS_MASK_DENY (0x0000c000) + +/* Allow any access. */ +#define SHFL_CF_ACCESS_DENYNONE (0x00000000) +/* Do not allow read. */ +#define SHFL_CF_ACCESS_DENYREAD (0x00004000) +/* Do not allow write. */ +#define SHFL_CF_ACCESS_DENYWRITE (0x00008000) +/* Do not allow access. */ +#define SHFL_CF_ACCESS_DENYALL (0x0000c000) + +/* Requested access to attributes of the object. */ +#define SHFL_CF_ACCESS_MASK_ATTR (0x00030000) + +/* No access requested. */ +#define SHFL_CF_ACCESS_ATTR_NONE (0x00000000) +/* Read access requested. */ +#define SHFL_CF_ACCESS_ATTR_READ (0x00010000) +/* Write access requested. */ +#define SHFL_CF_ACCESS_ATTR_WRITE (0x00020000) +/* Read/Write access requested. */ +#define SHFL_CF_ACCESS_ATTR_READWRITE (0x00030000) + +/* + * The file is opened in append mode. + * Ignored if SHFL_CF_ACCESS_WRITE is not set. + */ +#define SHFL_CF_ACCESS_APPEND (0x00040000) + +/** Create parameters buffer struct for SHFL_FN_CREATE call */ +struct shfl_createparms { + /** Returned handle of opened object. */ + u64 handle; + + /** Returned result of the operation */ + enum shfl_create_result result; + + /** SHFL_CF_* */ + u32 create_flags; + + /** + * Attributes of object to create and + * returned actual attributes of opened/created object. + */ + struct shfl_fsobjinfo info; +} __packed; + +/** Shared Folder directory information */ +struct shfl_dirinfo { + /** Full information about the object. */ + struct shfl_fsobjinfo info; + /** + * The length of the short field (number of UTF16 chars). + * It is 16-bit for reasons of alignment. + */ + u16 short_name_len; + /** + * The short name for 8.3 compatibility. + * Empty string if not available. + */ + u16 short_name[14]; + struct shfl_string name; +}; + +/** Shared folder filesystem properties. */ +struct shfl_fsproperties { + /** + * The maximum size of a filesystem object name. + * This does not include the '\\0'. + */ + u32 max_component_len; + + /** + * True if the filesystem is remote. + * False if the filesystem is local. + */ + bool remote; + + /** + * True if the filesystem is case sensitive. + * False if the filesystem is case insensitive. + */ + bool case_sensitive; + + /** + * True if the filesystem is mounted read only. + * False if the filesystem is mounted read write. + */ + bool read_only; + + /** + * True if the filesystem can encode unicode object names. + * False if it can't. + */ + bool supports_unicode; + + /** + * True if the filesystem is compresses. + * False if it isn't or we don't know. + */ + bool compressed; + + /** + * True if the filesystem compresses of individual files. + * False if it doesn't or we don't know. + */ + bool file_compression; +}; +VMMDEV_ASSERT_SIZE(shfl_fsproperties, 12); + +struct shfl_volinfo { + s64 total_allocation_bytes; + s64 available_allocation_bytes; + u32 bytes_per_allocation_unit; + u32 bytes_per_sector; + u32 serial; + struct shfl_fsproperties properties; +}; + + +/** SHFL_FN_MAP_FOLDER Parameters structure. */ +struct shfl_map_folder { + /** + * pointer, in: + * Points to struct shfl_string buffer. + */ + struct vmmdev_hgcm_function_parameter path; + + /** + * pointer, out: SHFLROOT (u32) + * Root handle of the mapping which name is queried. + */ + struct vmmdev_hgcm_function_parameter root; + + /** + * pointer, in: UTF16 + * Path delimiter + */ + struct vmmdev_hgcm_function_parameter delimiter; + + /** + * pointer, in: SHFLROOT (u32) + * Case senstive flag + */ + struct vmmdev_hgcm_function_parameter case_sensitive; + +}; + +/* Number of parameters */ +#define SHFL_CPARMS_MAP_FOLDER (4) + + +/** SHFL_FN_UNMAP_FOLDER Parameters structure. */ +struct shfl_unmap_folder { + /** + * pointer, in: SHFLROOT (u32) + * Root handle of the mapping which name is queried. + */ + struct vmmdev_hgcm_function_parameter root; + +}; + +/* Number of parameters */ +#define SHFL_CPARMS_UNMAP_FOLDER (1) + + +/** SHFL_FN_CREATE Parameters structure. */ +struct shfl_create { + /** + * pointer, in: SHFLROOT (u32) + * Root handle of the mapping which name is queried. + */ + struct vmmdev_hgcm_function_parameter root; + + /** + * pointer, in: + * Points to struct shfl_string buffer. + */ + struct vmmdev_hgcm_function_parameter path; + + /** + * pointer, in/out: + * Points to struct shfl_createparms buffer. + */ + struct vmmdev_hgcm_function_parameter parms; + +}; + +/* Number of parameters */ +#define SHFL_CPARMS_CREATE (3) + + +/** SHFL_FN_CLOSE Parameters structure. */ +struct shfl_close { + /** + * pointer, in: SHFLROOT (u32) + * Root handle of the mapping which name is queried. + */ + struct vmmdev_hgcm_function_parameter root; + + /** + * value64, in: + * SHFLHANDLE (u64) of object to close. + */ + struct vmmdev_hgcm_function_parameter handle; + +}; + +/* Number of parameters */ +#define SHFL_CPARMS_CLOSE (2) + + +/** SHFL_FN_READ Parameters structure. */ +struct shfl_read { + /** + * pointer, in: SHFLROOT (u32) + * Root handle of the mapping which name is queried. + */ + struct vmmdev_hgcm_function_parameter root; + + /** + * value64, in: + * SHFLHANDLE (u64) of object to read from. + */ + struct vmmdev_hgcm_function_parameter handle; + + /** + * value64, in: + * Offset to read from. + */ + struct vmmdev_hgcm_function_parameter offset; + + /** + * value64, in/out: + * Bytes to read/How many were read. + */ + struct vmmdev_hgcm_function_parameter cb; + + /** + * pointer, out: + * Buffer to place data to. + */ + struct vmmdev_hgcm_function_parameter buffer; + +}; + +/* Number of parameters */ +#define SHFL_CPARMS_READ (5) + + +/** SHFL_FN_WRITE Parameters structure. */ +struct shfl_write { + /** + * pointer, in: SHFLROOT (u32) + * Root handle of the mapping which name is queried. + */ + struct vmmdev_hgcm_function_parameter root; + + /** + * value64, in: + * SHFLHANDLE (u64) of object to write to. + */ + struct vmmdev_hgcm_function_parameter handle; + + /** + * value64, in: + * Offset to write to. + */ + struct vmmdev_hgcm_function_parameter offset; + + /** + * value64, in/out: + * Bytes to write/How many were written. + */ + struct vmmdev_hgcm_function_parameter cb; + + /** + * pointer, in: + * Data to write. + */ + struct vmmdev_hgcm_function_parameter buffer; + +}; + +/* Number of parameters */ +#define SHFL_CPARMS_WRITE (5) + + +/* + * SHFL_FN_LIST + * Listing information includes variable length RTDIRENTRY[EX] structures. + */ + +#define SHFL_LIST_NONE 0 +#define SHFL_LIST_RETURN_ONE 1 + +/** SHFL_FN_LIST Parameters structure. */ +struct shfl_list { + /** + * pointer, in: SHFLROOT (u32) + * Root handle of the mapping which name is queried. + */ + struct vmmdev_hgcm_function_parameter root; + + /** + * value64, in: + * SHFLHANDLE (u64) of object to be listed. + */ + struct vmmdev_hgcm_function_parameter handle; + + /** + * value32, in: + * List flags SHFL_LIST_*. + */ + struct vmmdev_hgcm_function_parameter flags; + + /** + * value32, in/out: + * Bytes to be used for listing information/How many bytes were used. + */ + struct vmmdev_hgcm_function_parameter cb; + + /** + * pointer, in/optional + * Points to struct shfl_string buffer that specifies a search path. + */ + struct vmmdev_hgcm_function_parameter path; + + /** + * pointer, out: + * Buffer to place listing information to. (struct shfl_dirinfo) + */ + struct vmmdev_hgcm_function_parameter buffer; + + /** + * value32, in/out: + * Indicates a key where the listing must be resumed. + * in: 0 means start from begin of object. + * out: 0 means listing completed. + */ + struct vmmdev_hgcm_function_parameter resume_point; + + /** + * pointer, out: + * Number of files returned + */ + struct vmmdev_hgcm_function_parameter file_count; +}; + +/* Number of parameters */ +#define SHFL_CPARMS_LIST (8) + + +/** SHFL_FN_READLINK Parameters structure. */ +struct shfl_readLink { + /** + * pointer, in: SHFLROOT (u32) + * Root handle of the mapping which name is queried. + */ + struct vmmdev_hgcm_function_parameter root; + + /** + * pointer, in: + * Points to struct shfl_string buffer. + */ + struct vmmdev_hgcm_function_parameter path; + + /** + * pointer, out: + * Buffer to place data to. + */ + struct vmmdev_hgcm_function_parameter buffer; + +}; + +/* Number of parameters */ +#define SHFL_CPARMS_READLINK (3) + + +/* SHFL_FN_INFORMATION */ + +/* Mask of Set/Get bit. */ +#define SHFL_INFO_MODE_MASK (0x1) +/* Get information */ +#define SHFL_INFO_GET (0x0) +/* Set information */ +#define SHFL_INFO_SET (0x1) + +/* Get name of the object. */ +#define SHFL_INFO_NAME (0x2) +/* Set size of object (extend/trucate); only applies to file objects */ +#define SHFL_INFO_SIZE (0x4) +/* Get/Set file object info. */ +#define SHFL_INFO_FILE (0x8) +/* Get volume information. */ +#define SHFL_INFO_VOLUME (0x10) + +/** SHFL_FN_INFORMATION Parameters structure. */ +struct shfl_information { + /** + * pointer, in: SHFLROOT (u32) + * Root handle of the mapping which name is queried. + */ + struct vmmdev_hgcm_function_parameter root; + + /** + * value64, in: + * SHFLHANDLE (u64) of object to be listed. + */ + struct vmmdev_hgcm_function_parameter handle; + + /** + * value32, in: + * SHFL_INFO_* + */ + struct vmmdev_hgcm_function_parameter flags; + + /** + * value32, in/out: + * Bytes to be used for information/How many bytes were used. + */ + struct vmmdev_hgcm_function_parameter cb; + + /** + * pointer, in/out: + * Information to be set/get (shfl_fsobjinfo or shfl_string). Do not + * forget to set the shfl_fsobjinfo::attr::additional for a get + * operation as well. + */ + struct vmmdev_hgcm_function_parameter info; + +}; + +/* Number of parameters */ +#define SHFL_CPARMS_INFORMATION (5) + + +/* SHFL_FN_REMOVE */ + +#define SHFL_REMOVE_FILE (0x1) +#define SHFL_REMOVE_DIR (0x2) +#define SHFL_REMOVE_SYMLINK (0x4) + +/** SHFL_FN_REMOVE Parameters structure. */ +struct shfl_remove { + /** + * pointer, in: SHFLROOT (u32) + * Root handle of the mapping which name is queried. + */ + struct vmmdev_hgcm_function_parameter root; + + /** + * pointer, in: + * Points to struct shfl_string buffer. + */ + struct vmmdev_hgcm_function_parameter path; + + /** + * value32, in: + * remove flags (file/directory) + */ + struct vmmdev_hgcm_function_parameter flags; + +}; + +#define SHFL_CPARMS_REMOVE (3) + + +/* SHFL_FN_RENAME */ + +#define SHFL_RENAME_FILE (0x1) +#define SHFL_RENAME_DIR (0x2) +#define SHFL_RENAME_REPLACE_IF_EXISTS (0x4) + +/** SHFL_FN_RENAME Parameters structure. */ +struct shfl_rename { + /** + * pointer, in: SHFLROOT (u32) + * Root handle of the mapping which name is queried. + */ + struct vmmdev_hgcm_function_parameter root; + + /** + * pointer, in: + * Points to struct shfl_string src. + */ + struct vmmdev_hgcm_function_parameter src; + + /** + * pointer, in: + * Points to struct shfl_string dest. + */ + struct vmmdev_hgcm_function_parameter dest; + + /** + * value32, in: + * rename flags (file/directory) + */ + struct vmmdev_hgcm_function_parameter flags; + +}; + +#define SHFL_CPARMS_RENAME (4) + + +/** SHFL_FN_SYMLINK Parameters structure. */ +struct shfl_symlink { + /** + * pointer, in: SHFLROOT (u32) + * Root handle of the mapping which name is queried. + */ + struct vmmdev_hgcm_function_parameter root; + + /** + * pointer, in: + * Points to struct shfl_string of path for the new symlink. + */ + struct vmmdev_hgcm_function_parameter new_path; + + /** + * pointer, in: + * Points to struct shfl_string of destination for symlink. + */ + struct vmmdev_hgcm_function_parameter old_path; + + /** + * pointer, out: + * Information about created symlink. + */ + struct vmmdev_hgcm_function_parameter info; + +}; + +#define SHFL_CPARMS_SYMLINK (4) + +#endif diff --git a/drivers/staging/vboxsf/super.c b/drivers/staging/vboxsf/super.c new file mode 100644 index 000000000000..0bf4d724aefd --- /dev/null +++ b/drivers/staging/vboxsf/super.c @@ -0,0 +1,501 @@ +// SPDX-License-Identifier: MIT +/* + * VirtualBox Guest Shared Folders support: Virtual File System. + * + * Module initialization/finalization + * File system registration/deregistration + * Superblock reading + * Few utility functions + * + * Copyright (C) 2006-2018 Oracle Corporation + */ + +#include <linux/idr.h> +#include <linux/fs_parser.h> +#include <linux/magic.h> +#include <linux/module.h> +#include <linux/nls.h> +#include <linux/statfs.h> +#include <linux/vbox_utils.h> +#include "vfsmod.h" + +#define VBOXSF_SUPER_MAGIC 0x786f4256 /* 'VBox' little endian */ + +#define VBSF_MOUNT_SIGNATURE_BYTE_0 ('\000') +#define VBSF_MOUNT_SIGNATURE_BYTE_1 ('\377') +#define VBSF_MOUNT_SIGNATURE_BYTE_2 ('\376') +#define VBSF_MOUNT_SIGNATURE_BYTE_3 ('\375') + +static int follow_symlinks; +module_param(follow_symlinks, int, 0444); +MODULE_PARM_DESC(follow_symlinks, + "Let host resolve symlinks rather than showing them"); + +static DEFINE_IDA(vboxsf_bdi_ida); +static DEFINE_MUTEX(vboxsf_setup_mutex); +static bool vboxsf_setup_done; +static struct super_operations vboxsf_super_ops; /* forward declaration */ +static struct kmem_cache *vboxsf_inode_cachep; + +static char * const vboxsf_default_nls = CONFIG_NLS_DEFAULT; + +enum { opt_nls, opt_uid, opt_gid, opt_ttl, opt_dmode, opt_fmode, + opt_dmask, opt_fmask }; + +static const struct fs_parameter_spec vboxsf_param_specs[] = { + fsparam_string ("nls", opt_nls), + fsparam_u32 ("uid", opt_uid), + fsparam_u32 ("gid", opt_gid), + fsparam_u32 ("ttl", opt_ttl), + fsparam_u32oct ("dmode", opt_dmode), + fsparam_u32oct ("fmode", opt_fmode), + fsparam_u32oct ("dmask", opt_dmask), + fsparam_u32oct ("fmask", opt_fmask), + {} +}; + +static const struct fs_parameter_description vboxsf_fs_parameters = { + .name = "vboxsf", + .specs = vboxsf_param_specs, +}; + +static int vboxsf_parse_param(struct fs_context *fc, struct fs_parameter *param) +{ + struct vboxsf_fs_context *ctx = fc->fs_private; + struct fs_parse_result result; + kuid_t uid; + kgid_t gid; + int opt; + + opt = fs_parse(fc, &vboxsf_fs_parameters, param, &result); + if (opt < 0) + return opt; + + switch (opt) { + case opt_nls: + if (fc->purpose != FS_CONTEXT_FOR_MOUNT) { + vbg_err("vboxsf: Cannot reconfigure nls option\n"); + return -EINVAL; + } + ctx->nls_name = param->string; + param->string = NULL; + break; + case opt_uid: + uid = make_kuid(current_user_ns(), result.uint_32); + if (!uid_valid(uid)) + return -EINVAL; + ctx->o.uid = uid; + break; + case opt_gid: + gid = make_kgid(current_user_ns(), result.uint_32); + if (!gid_valid(gid)) + return -EINVAL; + ctx->o.gid = gid; + break; + case opt_ttl: + ctx->o.ttl = msecs_to_jiffies(result.uint_32); + break; + case opt_dmode: + if (result.uint_32 & ~0777) + return -EINVAL; + ctx->o.dmode = result.uint_32; + ctx->o.dmode_set = true; + break; + case opt_fmode: + if (result.uint_32 & ~0777) + return -EINVAL; + ctx->o.fmode = result.uint_32; + ctx->o.fmode_set = true; + break; + case opt_dmask: + if (result.uint_32 & ~07777) + return -EINVAL; + ctx->o.dmask = result.uint_32; + break; + case opt_fmask: + if (result.uint_32 & ~07777) + return -EINVAL; + ctx->o.fmask = result.uint_32; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int vboxsf_fill_super(struct super_block *sb, struct fs_context *fc) +{ + struct vboxsf_fs_context *ctx = fc->fs_private; + struct shfl_string *folder_name, root_path; + struct vboxsf_sbi *sbi; + struct dentry *droot; + struct inode *iroot; + char *nls_name; + size_t size; + int err; + + if (!fc->source) + return -EINVAL; + + sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); + if (!sbi) + return -ENOMEM; + + sbi->o = ctx->o; + idr_init(&sbi->ino_idr); + spin_lock_init(&sbi->ino_idr_lock); + sbi->next_generation = 1; + sbi->bdi_id = -1; + + /* Load nls if not utf8 */ + nls_name = ctx->nls_name ? ctx->nls_name : vboxsf_default_nls; + if (strcmp(nls_name, "utf8") != 0) { + if (nls_name == vboxsf_default_nls) + sbi->nls = load_nls_default(); + else + sbi->nls = load_nls(nls_name); + + if (!sbi->nls) { + vbg_err("vboxsf: Count not load '%s' nls\n", nls_name); + err = -EINVAL; + goto fail_free; + } + } + + sbi->bdi_id = ida_simple_get(&vboxsf_bdi_ida, 0, 0, GFP_KERNEL); + if (sbi->bdi_id < 0) { + err = sbi->bdi_id; + goto fail_free; + } + + err = super_setup_bdi_name(sb, "vboxsf-%s.%d", fc->source, sbi->bdi_id); + if (err) + goto fail_free; + + /* Turn source into a shfl_string and map the folder */ + size = strlen(fc->source) + 1; + folder_name = kmalloc(SHFLSTRING_HEADER_SIZE + size, GFP_KERNEL); + if (!folder_name) { + err = -ENOMEM; + goto fail_free; + } + folder_name->size = size; + folder_name->length = size - 1; + strlcpy(folder_name->string.utf8, fc->source, size); + err = vboxsf_map_folder(folder_name, &sbi->root); + kfree(folder_name); + if (err) { + vbg_err("vboxsf: Host rejected mount of '%s' with error %d\n", + fc->source, err); + goto fail_free; + } + + root_path.length = 1; + root_path.size = 2; + root_path.string.utf8[0] = '/'; + root_path.string.utf8[1] = 0; + err = vboxsf_stat(sbi, &root_path, &sbi->root_info); + if (err) + goto fail_unmap; + + sb->s_magic = VBOXSF_SUPER_MAGIC; + sb->s_blocksize = 1024; + sb->s_maxbytes = MAX_LFS_FILESIZE; + sb->s_op = &vboxsf_super_ops; + sb->s_d_op = &vboxsf_dentry_ops; + + iroot = iget_locked(sb, 0); + if (!iroot) { + err = -ENOMEM; + goto fail_unmap; + } + vboxsf_init_inode(sbi, iroot, &sbi->root_info); + unlock_new_inode(iroot); + + droot = d_make_root(iroot); + if (!droot) { + err = -ENOMEM; + goto fail_unmap; + } + + sb->s_root = droot; + sb->s_fs_info = sbi; + return 0; + +fail_unmap: + vboxsf_unmap_folder(sbi->root); +fail_free: + if (sbi->bdi_id >= 0) + ida_simple_remove(&vboxsf_bdi_ida, sbi->bdi_id); + if (sbi->nls) + unload_nls(sbi->nls); + idr_destroy(&sbi->ino_idr); + kfree(sbi); + return err; +} + +static void vboxsf_inode_init_once(void *data) +{ + struct vboxsf_inode *sf_i = data; + + mutex_init(&sf_i->handle_list_mutex); + inode_init_once(&sf_i->vfs_inode); +} + +static struct inode *vboxsf_alloc_inode(struct super_block *sb) +{ + struct vboxsf_inode *sf_i; + + sf_i = kmem_cache_alloc(vboxsf_inode_cachep, GFP_NOFS); + if (!sf_i) + return NULL; + + sf_i->force_restat = 0; + INIT_LIST_HEAD(&sf_i->handle_list); + + return &sf_i->vfs_inode; +} + +static void vboxsf_free_inode(struct inode *inode) +{ + struct vboxsf_sbi *sbi = VBOXSF_SBI(inode->i_sb); + unsigned long flags; + + spin_lock_irqsave(&sbi->ino_idr_lock, flags); + idr_remove(&sbi->ino_idr, inode->i_ino); + spin_unlock_irqrestore(&sbi->ino_idr_lock, flags); + kmem_cache_free(vboxsf_inode_cachep, VBOXSF_I(inode)); +} + +static void vboxsf_put_super(struct super_block *sb) +{ + struct vboxsf_sbi *sbi = VBOXSF_SBI(sb); + + vboxsf_unmap_folder(sbi->root); + if (sbi->bdi_id >= 0) + ida_simple_remove(&vboxsf_bdi_ida, sbi->bdi_id); + if (sbi->nls) + unload_nls(sbi->nls); + + /* + * vboxsf_free_inode uses the idr, make sure all delayed rcu free + * inodes are flushed. + */ + rcu_barrier(); + idr_destroy(&sbi->ino_idr); + kfree(sbi); +} + +static int vboxsf_statfs(struct dentry *dentry, struct kstatfs *stat) +{ + struct super_block *sb = dentry->d_sb; + struct shfl_volinfo shfl_volinfo; + struct vboxsf_sbi *sbi; + u32 buf_len; + int err; + + sbi = VBOXSF_SBI(sb); + buf_len = sizeof(shfl_volinfo); + err = vboxsf_fsinfo(sbi->root, 0, SHFL_INFO_GET | SHFL_INFO_VOLUME, + &buf_len, &shfl_volinfo); + if (err) + return err; + + stat->f_type = VBOXSF_SUPER_MAGIC; + stat->f_bsize = shfl_volinfo.bytes_per_allocation_unit; + + do_div(shfl_volinfo.total_allocation_bytes, + shfl_volinfo.bytes_per_allocation_unit); + stat->f_blocks = shfl_volinfo.total_allocation_bytes; + + do_div(shfl_volinfo.available_allocation_bytes, + shfl_volinfo.bytes_per_allocation_unit); + stat->f_bfree = shfl_volinfo.available_allocation_bytes; + stat->f_bavail = shfl_volinfo.available_allocation_bytes; + + stat->f_files = 1000; + /* + * Don't return 0 here since the guest may then think that it is not + * possible to create any more files. + */ + stat->f_ffree = 1000000; + stat->f_fsid.val[0] = 0; + stat->f_fsid.val[1] = 0; + stat->f_namelen = 255; + return 0; +} + +static struct super_operations vboxsf_super_ops = { + .alloc_inode = vboxsf_alloc_inode, + .free_inode = vboxsf_free_inode, + .put_super = vboxsf_put_super, + .statfs = vboxsf_statfs, +}; + +static int vboxsf_setup(void) +{ + int err; + + mutex_lock(&vboxsf_setup_mutex); + + if (vboxsf_setup_done) + goto success; + + vboxsf_inode_cachep = + kmem_cache_create("vboxsf_inode_cache", + sizeof(struct vboxsf_inode), 0, + (SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | + SLAB_ACCOUNT), + vboxsf_inode_init_once); + if (!vboxsf_inode_cachep) { + err = -ENOMEM; + goto fail_nomem; + } + + err = vboxsf_connect(); + if (err) { + vbg_err("vboxsf: err %d connecting to guest PCI-device\n", err); + vbg_err("vboxsf: make sure you are inside a VirtualBox VM\n"); + vbg_err("vboxsf: and check dmesg for vboxguest errors\n"); + goto fail_free_cache; + } + + err = vboxsf_set_utf8(); + if (err) { + vbg_err("vboxsf_setutf8 error %d\n", err); + goto fail_disconnect; + } + + if (!follow_symlinks) { + err = vboxsf_set_symlinks(); + if (err) + vbg_warn("vboxsf: Unable to show symlinks: %d\n", err); + } + + vboxsf_setup_done = true; +success: + mutex_unlock(&vboxsf_setup_mutex); + return 0; + +fail_disconnect: + vboxsf_disconnect(); +fail_free_cache: + kmem_cache_destroy(vboxsf_inode_cachep); +fail_nomem: + mutex_unlock(&vboxsf_setup_mutex); + return err; +} + +static int vboxsf_parse_monolithic(struct fs_context *fc, void *data) +{ + char *options = data; + + if (options && options[0] == VBSF_MOUNT_SIGNATURE_BYTE_0 && + options[1] == VBSF_MOUNT_SIGNATURE_BYTE_1 && + options[2] == VBSF_MOUNT_SIGNATURE_BYTE_2 && + options[3] == VBSF_MOUNT_SIGNATURE_BYTE_3) { + vbg_err("vboxsf: Old binary mount data not supported, remove obsolete mount.vboxsf and/or update your VBoxService.\n"); + return -EINVAL; + } + + return generic_parse_monolithic(fc, data); +} + +static int vboxsf_get_tree(struct fs_context *fc) +{ + int err; + + err = vboxsf_setup(); + if (err) + return err; + + return vfs_get_super(fc, vfs_get_independent_super, vboxsf_fill_super); +} + +static int vboxsf_reconfigure(struct fs_context *fc) +{ + struct vboxsf_sbi *sbi = VBOXSF_SBI(fc->root->d_sb); + struct vboxsf_fs_context *ctx = fc->fs_private; + struct inode *iroot; + + iroot = ilookup(fc->root->d_sb, 0); + if (!iroot) + return -ENOENT; + + /* Apply changed options to the root inode */ + sbi->o = ctx->o; + vboxsf_init_inode(sbi, iroot, &sbi->root_info); + + return 0; +} + +static void vboxsf_free_fc(struct fs_context *fc) +{ + struct vboxsf_fs_context *ctx = fc->fs_private; + + kfree(ctx->nls_name); + kfree(ctx); +} + +static const struct fs_context_operations vboxsf_context_ops = { + .free = vboxsf_free_fc, + .parse_param = vboxsf_parse_param, + .parse_monolithic = vboxsf_parse_monolithic, + .get_tree = vboxsf_get_tree, + .reconfigure = vboxsf_reconfigure, +}; + +static int vboxsf_init_fs_context(struct fs_context *fc) +{ + struct vboxsf_fs_context *ctx; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + current_uid_gid(&ctx->o.uid, &ctx->o.gid); + + fc->fs_private = ctx; + fc->ops = &vboxsf_context_ops; + return 0; +} + +static struct file_system_type vboxsf_fs_type = { + .owner = THIS_MODULE, + .name = "vboxsf", + .init_fs_context = vboxsf_init_fs_context, + .parameters = &vboxsf_fs_parameters, + .kill_sb = kill_anon_super +}; + +/* Module initialization/finalization handlers */ +static int __init vboxsf_init(void) +{ + return register_filesystem(&vboxsf_fs_type); +} + +static void __exit vboxsf_fini(void) +{ + unregister_filesystem(&vboxsf_fs_type); + + mutex_lock(&vboxsf_setup_mutex); + if (vboxsf_setup_done) { + vboxsf_disconnect(); + /* + * Make sure all delayed rcu free inodes are flushed + * before we destroy the cache. + */ + rcu_barrier(); + kmem_cache_destroy(vboxsf_inode_cachep); + } + mutex_unlock(&vboxsf_setup_mutex); +} + +module_init(vboxsf_init); +module_exit(vboxsf_fini); + +MODULE_DESCRIPTION("Oracle VM VirtualBox Module for Host File System Access"); +MODULE_AUTHOR("Oracle Corporation"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS_FS("vboxsf"); diff --git a/drivers/staging/vboxsf/utils.c b/drivers/staging/vboxsf/utils.c new file mode 100644 index 000000000000..34a49e6f74fc --- /dev/null +++ b/drivers/staging/vboxsf/utils.c @@ -0,0 +1,551 @@ +// SPDX-License-Identifier: MIT +/* + * VirtualBox Guest Shared Folders support: Utility functions. + * Mainly conversion from/to VirtualBox/Linux data structures. + * + * Copyright (C) 2006-2018 Oracle Corporation + */ + +#include <linux/namei.h> +#include <linux/nls.h> +#include <linux/sizes.h> +#include <linux/vfs.h> +#include "vfsmod.h" + +struct inode *vboxsf_new_inode(struct super_block *sb) +{ + struct vboxsf_sbi *sbi = VBOXSF_SBI(sb); + struct inode *inode; + unsigned long flags; + int cursor, ret; + u32 gen; + + inode = new_inode(sb); + if (!inode) + return ERR_PTR(-ENOMEM); + + idr_preload(GFP_KERNEL); + spin_lock_irqsave(&sbi->ino_idr_lock, flags); + cursor = idr_get_cursor(&sbi->ino_idr); + ret = idr_alloc_cyclic(&sbi->ino_idr, inode, 1, 0, GFP_ATOMIC); + if (ret >= 0 && ret < cursor) + sbi->next_generation++; + gen = sbi->next_generation; + spin_unlock_irqrestore(&sbi->ino_idr_lock, flags); + idr_preload_end(); + + if (ret < 0) { + iput(inode); + return ERR_PTR(ret); + } + + inode->i_ino = ret; + inode->i_generation = gen; + return inode; +} + +/* set [inode] attributes based on [info], uid/gid based on [sbi] */ +void vboxsf_init_inode(struct vboxsf_sbi *sbi, struct inode *inode, + const struct shfl_fsobjinfo *info) +{ + const struct shfl_fsobjattr *attr; + s64 allocated; + int mode; + + attr = &info->attr; + +#define mode_set(r) ((attr->mode & (SHFL_UNIX_##r)) ? (S_##r) : 0) + + mode = mode_set(IRUSR); + mode |= mode_set(IWUSR); + mode |= mode_set(IXUSR); + + mode |= mode_set(IRGRP); + mode |= mode_set(IWGRP); + mode |= mode_set(IXGRP); + + mode |= mode_set(IROTH); + mode |= mode_set(IWOTH); + mode |= mode_set(IXOTH); + +#undef mode_set + + /* We use the host-side values for these */ + inode->i_flags |= S_NOATIME | S_NOCMTIME; + inode->i_mapping->a_ops = &vboxsf_reg_aops; + + if (SHFL_IS_DIRECTORY(attr->mode)) { + inode->i_mode = sbi->o.dmode_set ? sbi->o.dmode : mode; + inode->i_mode &= ~sbi->o.dmask; + inode->i_mode |= S_IFDIR; + inode->i_op = &vboxsf_dir_iops; + inode->i_fop = &vboxsf_dir_fops; + /* + * XXX: this probably should be set to the number of entries + * in the directory plus two (. ..) + */ + set_nlink(inode, 1); + } else if (SHFL_IS_SYMLINK(attr->mode)) { + inode->i_mode = sbi->o.fmode_set ? sbi->o.fmode : mode; + inode->i_mode &= ~sbi->o.fmask; + inode->i_mode |= S_IFLNK; + inode->i_op = &vboxsf_lnk_iops; + set_nlink(inode, 1); + } else { + inode->i_mode = sbi->o.fmode_set ? sbi->o.fmode : mode; + inode->i_mode &= ~sbi->o.fmask; + inode->i_mode |= S_IFREG; + inode->i_op = &vboxsf_reg_iops; + inode->i_fop = &vboxsf_reg_fops; + set_nlink(inode, 1); + } + + inode->i_uid = sbi->o.uid; + inode->i_gid = sbi->o.gid; + + inode->i_size = info->size; + inode->i_blkbits = 12; + /* i_blocks always in units of 512 bytes! */ + allocated = info->allocated + 511; + do_div(allocated, 512); + inode->i_blocks = allocated; + + inode->i_atime = ns_to_timespec64( + info->access_time.ns_relative_to_unix_epoch); + inode->i_ctime = ns_to_timespec64( + info->change_time.ns_relative_to_unix_epoch); + inode->i_mtime = ns_to_timespec64( + info->modification_time.ns_relative_to_unix_epoch); +} + +int vboxsf_create_at_dentry(struct dentry *dentry, + struct shfl_createparms *params) +{ + struct vboxsf_sbi *sbi = VBOXSF_SBI(dentry->d_sb); + struct shfl_string *path; + int err; + + path = vboxsf_path_from_dentry(sbi, dentry); + if (IS_ERR(path)) + return PTR_ERR(path); + + err = vboxsf_create(sbi->root, path, params); + __putname(path); + + return err; +} + +int vboxsf_stat(struct vboxsf_sbi *sbi, struct shfl_string *path, + struct shfl_fsobjinfo *info) +{ + struct shfl_createparms params = {}; + int err; + + params.handle = SHFL_HANDLE_NIL; + params.create_flags = SHFL_CF_LOOKUP | SHFL_CF_ACT_FAIL_IF_NEW; + + err = vboxsf_create(sbi->root, path, ¶ms); + if (err) + return err; + + if (params.result != SHFL_FILE_EXISTS) + return -ENOENT; + + if (info) + *info = params.info; + + return 0; +} + +int vboxsf_stat_dentry(struct dentry *dentry, struct shfl_fsobjinfo *info) +{ + struct vboxsf_sbi *sbi = VBOXSF_SBI(dentry->d_sb); + struct shfl_string *path; + int err; + + path = vboxsf_path_from_dentry(sbi, dentry); + if (IS_ERR(path)) + return PTR_ERR(path); + + err = vboxsf_stat(sbi, path, info); + __putname(path); + return err; +} + +int vboxsf_inode_revalidate(struct dentry *dentry) +{ + struct vboxsf_sbi *sbi; + struct vboxsf_inode *sf_i; + struct shfl_fsobjinfo info; + struct timespec64 prev_mtime; + struct inode *inode; + int err; + + if (!dentry || !d_really_is_positive(dentry)) + return -EINVAL; + + inode = d_inode(dentry); + prev_mtime = inode->i_mtime; + sf_i = VBOXSF_I(inode); + sbi = VBOXSF_SBI(dentry->d_sb); + if (!sf_i->force_restat) { + if (time_before(jiffies, dentry->d_time + sbi->o.ttl)) + return 0; + } + + err = vboxsf_stat_dentry(dentry, &info); + if (err) + return err; + + dentry->d_time = jiffies; + sf_i->force_restat = 0; + vboxsf_init_inode(sbi, inode, &info); + + /* + * If the file was changed on the host side we need to invalidate the + * page-cache for it. Note this also gets triggered by our own writes, + * this is unavoidable. + */ + if (timespec64_compare(&inode->i_mtime, &prev_mtime) > 0) + invalidate_inode_pages2(inode->i_mapping); + + return 0; +} + +int vboxsf_getattr(const struct path *path, struct kstat *kstat, + u32 request_mask, unsigned int flags) +{ + int err; + struct dentry *dentry = path->dentry; + struct inode *inode = d_inode(dentry); + struct vboxsf_inode *sf_i = VBOXSF_I(inode); + + switch (flags & AT_STATX_SYNC_TYPE) { + case AT_STATX_DONT_SYNC: + err = 0; + break; + case AT_STATX_FORCE_SYNC: + sf_i->force_restat = 1; + /* fall-through */ + default: + err = vboxsf_inode_revalidate(dentry); + } + if (err) + return err; + + generic_fillattr(d_inode(dentry), kstat); + return 0; +} + +int vboxsf_setattr(struct dentry *dentry, struct iattr *iattr) +{ + struct vboxsf_inode *sf_i = VBOXSF_I(d_inode(dentry)); + struct vboxsf_sbi *sbi = VBOXSF_SBI(dentry->d_sb); + struct shfl_createparms params = {}; + struct shfl_fsobjinfo info = {}; + u32 buf_len; + int err; + + params.handle = SHFL_HANDLE_NIL; + params.create_flags = SHFL_CF_ACT_OPEN_IF_EXISTS | + SHFL_CF_ACT_FAIL_IF_NEW | + SHFL_CF_ACCESS_ATTR_WRITE; + + /* this is at least required for Posix hosts */ + if (iattr->ia_valid & ATTR_SIZE) + params.create_flags |= SHFL_CF_ACCESS_WRITE; + + err = vboxsf_create_at_dentry(dentry, ¶ms); + if (err || params.result != SHFL_FILE_EXISTS) + return err ? err : -ENOENT; + +#define mode_set(r) ((iattr->ia_mode & (S_##r)) ? SHFL_UNIX_##r : 0) + + /* + * Setting the file size and setting the other attributes has to + * be handled separately. + */ + if (iattr->ia_valid & (ATTR_MODE | ATTR_ATIME | ATTR_MTIME)) { + if (iattr->ia_valid & ATTR_MODE) { + info.attr.mode = mode_set(IRUSR); + info.attr.mode |= mode_set(IWUSR); + info.attr.mode |= mode_set(IXUSR); + info.attr.mode |= mode_set(IRGRP); + info.attr.mode |= mode_set(IWGRP); + info.attr.mode |= mode_set(IXGRP); + info.attr.mode |= mode_set(IROTH); + info.attr.mode |= mode_set(IWOTH); + info.attr.mode |= mode_set(IXOTH); + + if (iattr->ia_mode & S_IFDIR) + info.attr.mode |= SHFL_TYPE_DIRECTORY; + else + info.attr.mode |= SHFL_TYPE_FILE; + } + + if (iattr->ia_valid & ATTR_ATIME) + info.access_time.ns_relative_to_unix_epoch = + timespec64_to_ns(&iattr->ia_atime); + + if (iattr->ia_valid & ATTR_MTIME) + info.modification_time.ns_relative_to_unix_epoch = + timespec64_to_ns(&iattr->ia_mtime); + + /* + * Ignore ctime (inode change time) as it can't be set + * from userland anyway. + */ + + buf_len = sizeof(info); + err = vboxsf_fsinfo(sbi->root, params.handle, + SHFL_INFO_SET | SHFL_INFO_FILE, &buf_len, + &info); + if (err) { + vboxsf_close(sbi->root, params.handle); + return err; + } + + /* the host may have given us different attr then requested */ + sf_i->force_restat = 1; + } + +#undef mode_set + + if (iattr->ia_valid & ATTR_SIZE) { + memset(&info, 0, sizeof(info)); + info.size = iattr->ia_size; + buf_len = sizeof(info); + err = vboxsf_fsinfo(sbi->root, params.handle, + SHFL_INFO_SET | SHFL_INFO_SIZE, &buf_len, + &info); + if (err) { + vboxsf_close(sbi->root, params.handle); + return err; + } + + /* the host may have given us different attr then requested */ + sf_i->force_restat = 1; + } + + vboxsf_close(sbi->root, params.handle); + + /* Update the inode with what the host has actually given us. */ + if (sf_i->force_restat) + vboxsf_inode_revalidate(dentry); + + return 0; +} + +/* + * [dentry] contains string encoded in coding system that corresponds + * to [sbi]->nls, we must convert it to UTF8 here. + * Returns a shfl_string allocated through __getname (must be freed using + * __putname), or an ERR_PTR on error. + */ +struct shfl_string *vboxsf_path_from_dentry(struct vboxsf_sbi *sbi, + struct dentry *dentry) +{ + struct shfl_string *shfl_path; + int path_len, out_len, nb; + char *buf, *path; + wchar_t uni; + u8 *out; + + buf = __getname(); + if (!buf) + return ERR_PTR(-ENOMEM); + + path = dentry_path_raw(dentry, buf, PATH_MAX); + if (IS_ERR(path)) { + __putname(buf); + return (struct shfl_string *)path; + } + path_len = strlen(path); + + if (sbi->nls) { + shfl_path = __getname(); + if (!shfl_path) { + __putname(buf); + return ERR_PTR(-ENOMEM); + } + + out = shfl_path->string.utf8; + out_len = PATH_MAX - SHFLSTRING_HEADER_SIZE - 1; + + while (path_len) { + nb = sbi->nls->char2uni(path, path_len, &uni); + if (nb < 0) { + __putname(shfl_path); + __putname(buf); + return ERR_PTR(-EINVAL); + } + path += nb; + path_len -= nb; + + nb = utf32_to_utf8(uni, out, out_len); + if (nb < 0) { + __putname(shfl_path); + __putname(buf); + return ERR_PTR(-ENAMETOOLONG); + } + out += nb; + out_len -= nb; + } + *out = 0; + shfl_path->length = out - shfl_path->string.utf8; + shfl_path->size = shfl_path->length + 1; + __putname(buf); + } else { + if ((SHFLSTRING_HEADER_SIZE + path_len + 1) > PATH_MAX) { + __putname(buf); + return ERR_PTR(-ENAMETOOLONG); + } + /* + * dentry_path stores the name at the end of buf, but the + * shfl_string string we return must be properly aligned. + */ + shfl_path = (struct shfl_string *)buf; + memmove(shfl_path->string.utf8, path, path_len); + shfl_path->string.utf8[path_len] = 0; + shfl_path->length = path_len; + shfl_path->size = path_len + 1; + } + + return shfl_path; +} + +int vboxsf_nlscpy(struct vboxsf_sbi *sbi, char *name, size_t name_bound_len, + const unsigned char *utf8_name, size_t utf8_len) +{ + const char *in; + char *out; + size_t out_len; + size_t out_bound_len; + size_t in_bound_len; + + in = utf8_name; + in_bound_len = utf8_len; + + out = name; + out_len = 0; + /* Reserve space for terminating 0 */ + out_bound_len = name_bound_len - 1; + + while (in_bound_len) { + int nb; + unicode_t uni; + + nb = utf8_to_utf32(in, in_bound_len, &uni); + if (nb < 0) + return -EINVAL; + + in += nb; + in_bound_len -= nb; + + nb = sbi->nls->uni2char(uni, out, out_bound_len); + if (nb < 0) + return nb; + + out += nb; + out_bound_len -= nb; + out_len += nb; + } + + *out = 0; + + return 0; +} + +static struct vboxsf_dir_buf *vboxsf_dir_buf_alloc(struct list_head *list) +{ + struct vboxsf_dir_buf *b; + + b = kmalloc(sizeof(*b), GFP_KERNEL); + if (!b) + return NULL; + + b->buf = kmalloc(DIR_BUFFER_SIZE, GFP_KERNEL); + if (!b->buf) { + kfree(b); + return NULL; + } + + b->entries = 0; + b->used = 0; + b->free = DIR_BUFFER_SIZE; + list_add(&b->head, list); + + return b; +} + +static void vboxsf_dir_buf_free(struct vboxsf_dir_buf *b) +{ + list_del(&b->head); + kfree(b->buf); + kfree(b); +} + +struct vboxsf_dir_info *vboxsf_dir_info_alloc(void) +{ + struct vboxsf_dir_info *p; + + p = kmalloc(sizeof(*p), GFP_KERNEL); + if (!p) + return NULL; + + INIT_LIST_HEAD(&p->info_list); + return p; +} + +void vboxsf_dir_info_free(struct vboxsf_dir_info *p) +{ + struct list_head *list, *pos, *tmp; + + list = &p->info_list; + list_for_each_safe(pos, tmp, list) { + struct vboxsf_dir_buf *b; + + b = list_entry(pos, struct vboxsf_dir_buf, head); + vboxsf_dir_buf_free(b); + } + kfree(p); +} + +int vboxsf_dir_read_all(struct vboxsf_sbi *sbi, struct vboxsf_dir_info *sf_d, + u64 handle) +{ + struct vboxsf_dir_buf *b; + u32 entries, size; + int err = 0; + void *buf; + + /* vboxsf_dirinfo returns 1 on end of dir */ + while (err == 0) { + b = vboxsf_dir_buf_alloc(&sf_d->info_list); + if (!b) { + err = -ENOMEM; + break; + } + + buf = b->buf; + size = b->free; + + err = vboxsf_dirinfo(sbi->root, handle, NULL, 0, 0, + &size, buf, &entries); + if (err < 0) + break; + + b->entries += entries; + b->free -= size; + b->used += size; + } + + if (b && b->used == 0) + vboxsf_dir_buf_free(b); + + /* -EILSEQ means the host could not translate a filename, ignore */ + if (err > 0 || err == -EILSEQ) + err = 0; + + return err; +} diff --git a/drivers/staging/vboxsf/vboxsf_wrappers.c b/drivers/staging/vboxsf/vboxsf_wrappers.c new file mode 100644 index 000000000000..bfc78a097dae --- /dev/null +++ b/drivers/staging/vboxsf/vboxsf_wrappers.c @@ -0,0 +1,371 @@ +// SPDX-License-Identifier: MIT +/* + * Wrapper functions for the shfl host calls. + * + * Copyright (C) 2006-2018 Oracle Corporation + */ + +#include <linux/mm.h> +#include <linux/slab.h> +#include <linux/vbox_err.h> +#include <linux/vbox_utils.h> +#include "vfsmod.h" + +#define SHFL_REQUEST \ + (VMMDEV_REQUESTOR_KERNEL | VMMDEV_REQUESTOR_USR_DRV_OTHER | \ + VMMDEV_REQUESTOR_CON_DONT_KNOW | VMMDEV_REQUESTOR_TRUST_NOT_GIVEN) + +static u32 vboxsf_client_id; + +int vboxsf_connect(void) +{ + struct vbg_dev *gdev; + struct vmmdev_hgcm_service_location loc; + int err, vbox_status; + + loc.type = VMMDEV_HGCM_LOC_LOCALHOST_EXISTING; + strcpy(loc.u.localhost.service_name, "VBoxSharedFolders"); + + gdev = vbg_get_gdev(); + if (IS_ERR(gdev)) + return -ENODEV; /* No guest-device */ + + err = vbg_hgcm_connect(gdev, SHFL_REQUEST, &loc, + &vboxsf_client_id, &vbox_status); + vbg_put_gdev(gdev); + + return err ? err : vbg_status_code_to_errno(vbox_status); +} + +void vboxsf_disconnect(void) +{ + struct vbg_dev *gdev; + int vbox_status; + + gdev = vbg_get_gdev(); + if (IS_ERR(gdev)) + return; /* guest-device is gone, already disconnected */ + + vbg_hgcm_disconnect(gdev, SHFL_REQUEST, vboxsf_client_id, &vbox_status); + vbg_put_gdev(gdev); +} + +static int vboxsf_call(u32 function, void *parms, u32 parm_count, int *status) +{ + struct vbg_dev *gdev; + int err, vbox_status; + + gdev = vbg_get_gdev(); + if (IS_ERR(gdev)) + return -ESHUTDOWN; /* guest-dev removed underneath us */ + + err = vbg_hgcm_call(gdev, SHFL_REQUEST, vboxsf_client_id, function, + U32_MAX, parms, parm_count, &vbox_status); + vbg_put_gdev(gdev); + + if (err < 0) + return err; + + if (status) + *status = vbox_status; + + return vbg_status_code_to_errno(vbox_status); +} + +int vboxsf_map_folder(struct shfl_string *folder_name, u32 *root) +{ + struct shfl_map_folder parms; + int err, status; + + parms.path.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL; + parms.path.u.pointer.size = shfl_string_buf_size(folder_name); + parms.path.u.pointer.u.linear_addr = (uintptr_t)folder_name; + + parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT; + parms.root.u.value32 = 0; + + parms.delimiter.type = VMMDEV_HGCM_PARM_TYPE_32BIT; + parms.delimiter.u.value32 = '/'; + + parms.case_sensitive.type = VMMDEV_HGCM_PARM_TYPE_32BIT; + parms.case_sensitive.u.value32 = 1; + + err = vboxsf_call(SHFL_FN_MAP_FOLDER, &parms, SHFL_CPARMS_MAP_FOLDER, + &status); + if (err == -ENOSYS && status == VERR_NOT_IMPLEMENTED) + vbg_err("%s: Error host is too old\n", __func__); + + *root = parms.root.u.value32; + return err; +} + +int vboxsf_unmap_folder(u32 root) +{ + struct shfl_unmap_folder parms; + + parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT; + parms.root.u.value32 = root; + + return vboxsf_call(SHFL_FN_UNMAP_FOLDER, &parms, + SHFL_CPARMS_UNMAP_FOLDER, NULL); +} + +/** + * vboxsf_create - Create a new file or folder + * @root: Root of the shared folder in which to create the file + * @parsed_path: The path of the file or folder relative to the shared folder + * @param: create_parms Parameters for file/folder creation. + * + * Create a new file or folder or open an existing one in a shared folder. + * Note this function always returns 0 / success unless an exceptional condition + * occurs - out of memory, invalid arguments, etc. If the file or folder could + * not be opened or created, create_parms->handle will be set to + * SHFL_HANDLE_NIL on return. In this case the value in create_parms->result + * provides information as to why (e.g. SHFL_FILE_EXISTS), create_parms->result + * is also set on success as additional information. + * + * Returns: + * 0 or negative errno value. + */ +int vboxsf_create(u32 root, struct shfl_string *parsed_path, + struct shfl_createparms *create_parms) +{ + struct shfl_create parms; + + parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT; + parms.root.u.value32 = root; + + parms.path.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL; + parms.path.u.pointer.size = shfl_string_buf_size(parsed_path); + parms.path.u.pointer.u.linear_addr = (uintptr_t)parsed_path; + + parms.parms.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL; + parms.parms.u.pointer.size = sizeof(struct shfl_createparms); + parms.parms.u.pointer.u.linear_addr = (uintptr_t)create_parms; + + return vboxsf_call(SHFL_FN_CREATE, &parms, SHFL_CPARMS_CREATE, NULL); +} + +int vboxsf_close(u32 root, u64 handle) +{ + struct shfl_close parms; + + parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT; + parms.root.u.value32 = root; + + parms.handle.type = VMMDEV_HGCM_PARM_TYPE_64BIT; + parms.handle.u.value64 = handle; + + return vboxsf_call(SHFL_FN_CLOSE, &parms, SHFL_CPARMS_CLOSE, NULL); +} + +int vboxsf_remove(u32 root, struct shfl_string *parsed_path, u32 flags) +{ + struct shfl_remove parms; + + parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT; + parms.root.u.value32 = root; + + parms.path.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN; + parms.path.u.pointer.size = shfl_string_buf_size(parsed_path); + parms.path.u.pointer.u.linear_addr = (uintptr_t)parsed_path; + + parms.flags.type = VMMDEV_HGCM_PARM_TYPE_32BIT; + parms.flags.u.value32 = flags; + + return vboxsf_call(SHFL_FN_REMOVE, &parms, SHFL_CPARMS_REMOVE, NULL); +} + +int vboxsf_rename(u32 root, struct shfl_string *src_path, + struct shfl_string *dest_path, u32 flags) +{ + struct shfl_rename parms; + + parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT; + parms.root.u.value32 = root; + + parms.src.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN; + parms.src.u.pointer.size = shfl_string_buf_size(src_path); + parms.src.u.pointer.u.linear_addr = (uintptr_t)src_path; + + parms.dest.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN; + parms.dest.u.pointer.size = shfl_string_buf_size(dest_path); + parms.dest.u.pointer.u.linear_addr = (uintptr_t)dest_path; + + parms.flags.type = VMMDEV_HGCM_PARM_TYPE_32BIT; + parms.flags.u.value32 = flags; + + return vboxsf_call(SHFL_FN_RENAME, &parms, SHFL_CPARMS_RENAME, NULL); +} + +int vboxsf_read(u32 root, u64 handle, u64 offset, u32 *buf_len, u8 *buf) +{ + struct shfl_read parms; + int err; + + parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT; + parms.root.u.value32 = root; + + parms.handle.type = VMMDEV_HGCM_PARM_TYPE_64BIT; + parms.handle.u.value64 = handle; + parms.offset.type = VMMDEV_HGCM_PARM_TYPE_64BIT; + parms.offset.u.value64 = offset; + parms.cb.type = VMMDEV_HGCM_PARM_TYPE_32BIT; + parms.cb.u.value32 = *buf_len; + parms.buffer.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT; + parms.buffer.u.pointer.size = *buf_len; + parms.buffer.u.pointer.u.linear_addr = (uintptr_t)buf; + + err = vboxsf_call(SHFL_FN_READ, &parms, SHFL_CPARMS_READ, NULL); + + *buf_len = parms.cb.u.value32; + return err; +} + +int vboxsf_write(u32 root, u64 handle, u64 offset, u32 *buf_len, u8 *buf) +{ + struct shfl_write parms; + int err; + + parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT; + parms.root.u.value32 = root; + + parms.handle.type = VMMDEV_HGCM_PARM_TYPE_64BIT; + parms.handle.u.value64 = handle; + parms.offset.type = VMMDEV_HGCM_PARM_TYPE_64BIT; + parms.offset.u.value64 = offset; + parms.cb.type = VMMDEV_HGCM_PARM_TYPE_32BIT; + parms.cb.u.value32 = *buf_len; + parms.buffer.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN; + parms.buffer.u.pointer.size = *buf_len; + parms.buffer.u.pointer.u.linear_addr = (uintptr_t)buf; + + err = vboxsf_call(SHFL_FN_WRITE, &parms, SHFL_CPARMS_WRITE, NULL); + + *buf_len = parms.cb.u.value32; + return err; +} + +/* Returns 0 on success, 1 on end-of-dir, negative errno otherwise */ +int vboxsf_dirinfo(u32 root, u64 handle, + struct shfl_string *parsed_path, u32 flags, u32 index, + u32 *buf_len, struct shfl_dirinfo *buf, u32 *file_count) +{ + struct shfl_list parms; + int err, status; + + parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT; + parms.root.u.value32 = root; + + parms.handle.type = VMMDEV_HGCM_PARM_TYPE_64BIT; + parms.handle.u.value64 = handle; + parms.flags.type = VMMDEV_HGCM_PARM_TYPE_32BIT; + parms.flags.u.value32 = flags; + parms.cb.type = VMMDEV_HGCM_PARM_TYPE_32BIT; + parms.cb.u.value32 = *buf_len; + if (parsed_path) { + parms.path.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN; + parms.path.u.pointer.size = shfl_string_buf_size(parsed_path); + parms.path.u.pointer.u.linear_addr = (uintptr_t)parsed_path; + } else { + parms.path.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_IN; + parms.path.u.pointer.size = 0; + parms.path.u.pointer.u.linear_addr = 0; + } + + parms.buffer.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT; + parms.buffer.u.pointer.size = *buf_len; + parms.buffer.u.pointer.u.linear_addr = (uintptr_t)buf; + + parms.resume_point.type = VMMDEV_HGCM_PARM_TYPE_32BIT; + parms.resume_point.u.value32 = index; + parms.file_count.type = VMMDEV_HGCM_PARM_TYPE_32BIT; + parms.file_count.u.value32 = 0; /* out parameter only */ + + err = vboxsf_call(SHFL_FN_LIST, &parms, SHFL_CPARMS_LIST, &status); + if (err == -ENODATA && status == VERR_NO_MORE_FILES) + err = 1; + + *buf_len = parms.cb.u.value32; + *file_count = parms.file_count.u.value32; + return err; +} + +int vboxsf_fsinfo(u32 root, u64 handle, u32 flags, + u32 *buf_len, void *buf) +{ + struct shfl_information parms; + int err; + + parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT; + parms.root.u.value32 = root; + + parms.handle.type = VMMDEV_HGCM_PARM_TYPE_64BIT; + parms.handle.u.value64 = handle; + parms.flags.type = VMMDEV_HGCM_PARM_TYPE_32BIT; + parms.flags.u.value32 = flags; + parms.cb.type = VMMDEV_HGCM_PARM_TYPE_32BIT; + parms.cb.u.value32 = *buf_len; + parms.info.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL; + parms.info.u.pointer.size = *buf_len; + parms.info.u.pointer.u.linear_addr = (uintptr_t)buf; + + err = vboxsf_call(SHFL_FN_INFORMATION, &parms, SHFL_CPARMS_INFORMATION, + NULL); + + *buf_len = parms.cb.u.value32; + return err; +} + +int vboxsf_readlink(u32 root, struct shfl_string *parsed_path, + u32 buf_len, u8 *buf) +{ + struct shfl_readLink parms; + + parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT; + parms.root.u.value32 = root; + + parms.path.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN; + parms.path.u.pointer.size = shfl_string_buf_size(parsed_path); + parms.path.u.pointer.u.linear_addr = (uintptr_t)parsed_path; + + parms.buffer.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT; + parms.buffer.u.pointer.size = buf_len; + parms.buffer.u.pointer.u.linear_addr = (uintptr_t)buf; + + return vboxsf_call(SHFL_FN_READLINK, &parms, SHFL_CPARMS_READLINK, + NULL); +} + +int vboxsf_symlink(u32 root, struct shfl_string *new_path, + struct shfl_string *old_path, struct shfl_fsobjinfo *buf) +{ + struct shfl_symlink parms; + + parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT; + parms.root.u.value32 = root; + + parms.new_path.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN; + parms.new_path.u.pointer.size = shfl_string_buf_size(new_path); + parms.new_path.u.pointer.u.linear_addr = (uintptr_t)new_path; + + parms.old_path.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN; + parms.old_path.u.pointer.size = shfl_string_buf_size(old_path); + parms.old_path.u.pointer.u.linear_addr = (uintptr_t)old_path; + + parms.info.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT; + parms.info.u.pointer.size = sizeof(struct shfl_fsobjinfo); + parms.info.u.pointer.u.linear_addr = (uintptr_t)buf; + + return vboxsf_call(SHFL_FN_SYMLINK, &parms, SHFL_CPARMS_SYMLINK, NULL); +} + +int vboxsf_set_utf8(void) +{ + return vboxsf_call(SHFL_FN_SET_UTF8, NULL, 0, NULL); +} + +int vboxsf_set_symlinks(void) +{ + return vboxsf_call(SHFL_FN_SET_SYMLINKS, NULL, 0, NULL); +} diff --git a/drivers/staging/vboxsf/vfsmod.h b/drivers/staging/vboxsf/vfsmod.h new file mode 100644 index 000000000000..18f95b00fc33 --- /dev/null +++ b/drivers/staging/vboxsf/vfsmod.h @@ -0,0 +1,137 @@ +/* SPDX-License-Identifier: MIT */ +/* + * VirtualBox Guest Shared Folders support: module header. + * + * Copyright (C) 2006-2018 Oracle Corporation + */ + +#ifndef VFSMOD_H +#define VFSMOD_H + +#include <linux/backing-dev.h> +#include <linux/idr.h> +#include "shfl_hostintf.h" + +#define DIR_BUFFER_SIZE SZ_16K + +/* The cast is to prevent assignment of void * to pointers of arbitrary type */ +#define VBOXSF_SBI(sb) ((struct vboxsf_sbi *)(sb)->s_fs_info) +#define VBOXSF_I(i) container_of(i, struct vboxsf_inode, vfs_inode) + +struct vboxsf_options { + unsigned long ttl; + kuid_t uid; + kgid_t gid; + bool dmode_set; + bool fmode_set; + umode_t dmode; + umode_t fmode; + umode_t dmask; + umode_t fmask; +}; + +struct vboxsf_fs_context { + struct vboxsf_options o; + char *nls_name; +}; + +/* per-shared folder information */ +struct vboxsf_sbi { + struct vboxsf_options o; + struct shfl_fsobjinfo root_info; + struct idr ino_idr; + spinlock_t ino_idr_lock; /* This protects ino_idr */ + struct nls_table *nls; + u32 next_generation; + u32 root; + int bdi_id; +}; + +/* per-inode information */ +struct vboxsf_inode { + /* some information was changed, update data on next revalidate */ + int force_restat; + /* list of open handles for this inode + lock protecting it */ + struct list_head handle_list; + /* This mutex protects handle_list accesses */ + struct mutex handle_list_mutex; + /* The VFS inode struct */ + struct inode vfs_inode; +}; + +struct vboxsf_dir_info { + struct list_head info_list; +}; + +struct vboxsf_dir_buf { + size_t entries; + size_t free; + size_t used; + void *buf; + struct list_head head; +}; + +/* globals */ +extern const struct inode_operations vboxsf_dir_iops; +extern const struct inode_operations vboxsf_lnk_iops; +extern const struct inode_operations vboxsf_reg_iops; +extern const struct file_operations vboxsf_dir_fops; +extern const struct file_operations vboxsf_reg_fops; +extern const struct address_space_operations vboxsf_reg_aops; +extern const struct dentry_operations vboxsf_dentry_ops; + +/* from utils.c */ +struct inode *vboxsf_new_inode(struct super_block *sb); +void vboxsf_init_inode(struct vboxsf_sbi *sbi, struct inode *inode, + const struct shfl_fsobjinfo *info); +int vboxsf_create_at_dentry(struct dentry *dentry, + struct shfl_createparms *params); +int vboxsf_stat(struct vboxsf_sbi *sbi, struct shfl_string *path, + struct shfl_fsobjinfo *info); +int vboxsf_stat_dentry(struct dentry *dentry, struct shfl_fsobjinfo *info); +int vboxsf_inode_revalidate(struct dentry *dentry); +int vboxsf_getattr(const struct path *path, struct kstat *kstat, + u32 request_mask, unsigned int query_flags); +int vboxsf_setattr(struct dentry *dentry, struct iattr *iattr); +struct shfl_string *vboxsf_path_from_dentry(struct vboxsf_sbi *sbi, + struct dentry *dentry); +int vboxsf_nlscpy(struct vboxsf_sbi *sbi, char *name, size_t name_bound_len, + const unsigned char *utf8_name, size_t utf8_len); +struct vboxsf_dir_info *vboxsf_dir_info_alloc(void); +void vboxsf_dir_info_free(struct vboxsf_dir_info *p); +int vboxsf_dir_read_all(struct vboxsf_sbi *sbi, struct vboxsf_dir_info *sf_d, + u64 handle); + +/* from vboxsf_wrappers.c */ +int vboxsf_connect(void); +void vboxsf_disconnect(void); + +int vboxsf_create(u32 root, struct shfl_string *parsed_path, + struct shfl_createparms *create_parms); + +int vboxsf_close(u32 root, u64 handle); +int vboxsf_remove(u32 root, struct shfl_string *parsed_path, u32 flags); +int vboxsf_rename(u32 root, struct shfl_string *src_path, + struct shfl_string *dest_path, u32 flags); + +int vboxsf_read(u32 root, u64 handle, u64 offset, u32 *buf_len, u8 *buf); +int vboxsf_write(u32 root, u64 handle, u64 offset, u32 *buf_len, u8 *buf); + +int vboxsf_dirinfo(u32 root, u64 handle, + struct shfl_string *parsed_path, u32 flags, u32 index, + u32 *buf_len, struct shfl_dirinfo *buf, u32 *file_count); +int vboxsf_fsinfo(u32 root, u64 handle, u32 flags, + u32 *buf_len, void *buf); + +int vboxsf_map_folder(struct shfl_string *folder_name, u32 *root); +int vboxsf_unmap_folder(u32 root); + +int vboxsf_readlink(u32 root, struct shfl_string *parsed_path, + u32 buf_len, u8 *buf); +int vboxsf_symlink(u32 root, struct shfl_string *new_path, + struct shfl_string *old_path, struct shfl_fsobjinfo *buf); + +int vboxsf_set_utf8(void); +int vboxsf_set_symlinks(void); + +#endif diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c index bc1eaa3a0773..826016c3431a 100644 --- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c +++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c @@ -12,7 +12,7 @@ static const struct snd_pcm_hardware snd_bcm2835_playback_hw = { .info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | - SNDRV_PCM_INFO_DRAIN_TRIGGER | SNDRV_PCM_INFO_SYNC_APPLPTR), + SNDRV_PCM_INFO_SYNC_APPLPTR), .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, .rate_min = 8000, @@ -29,7 +29,7 @@ static const struct snd_pcm_hardware snd_bcm2835_playback_hw = { static const struct snd_pcm_hardware snd_bcm2835_playback_spdif_hw = { .info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | - SNDRV_PCM_INFO_DRAIN_TRIGGER | SNDRV_PCM_INFO_SYNC_APPLPTR), + SNDRV_PCM_INFO_SYNC_APPLPTR), .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000, diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c index 23fba01107b9..c6f9cf1913d2 100644 --- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c +++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c @@ -289,6 +289,7 @@ int bcm2835_audio_stop(struct bcm2835_alsa_stream *alsa_stream) VC_AUDIO_MSG_TYPE_STOP, false); } +/* FIXME: this doesn't seem working as expected for "draining" */ int bcm2835_audio_drain(struct bcm2835_alsa_stream *alsa_stream) { struct vc_audio_msg m = { diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c index c6bb4aaf9bd0..082302944c37 100644 --- a/drivers/staging/vt6655/device_main.c +++ b/drivers/staging/vt6655/device_main.c @@ -1748,8 +1748,10 @@ vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent) priv->hw->max_signal = 100; - if (vnt_init(priv)) + if (vnt_init(priv)) { + device_free_info(priv); return -ENODEV; + } device_print_info(priv); pci_set_drvdata(pcid, priv); diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c index eee1998c4b18..fac38c842ac5 100644 --- a/drivers/staging/wlan-ng/cfg80211.c +++ b/drivers/staging/wlan-ng/cfg80211.c @@ -469,10 +469,8 @@ static int prism2_connect(struct wiphy *wiphy, struct net_device *dev, /* Set the encryption - we only support wep */ if (is_wep) { if (sme->key) { - if (sme->key_idx >= NUM_WEPKEYS) { - err = -EINVAL; - goto exit; - } + if (sme->key_idx >= NUM_WEPKEYS) + return -EINVAL; result = prism2_domibset_uint32(wlandev, DIDMIB_DOT11SMT_PRIVACYTABLE_WEPDEFAULTKEYID, diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c index c70caf4ea490..a2b5c796bbc4 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c @@ -1831,7 +1831,7 @@ static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb) while (credits) { struct sk_buff *p = cxgbit_sock_peek_wr(csk); - const u32 csum = (__force u32)p->csum; + u32 csum; if (unlikely(!p)) { pr_err("csk 0x%p,%u, cr %u,%u+%u, empty.\n", @@ -1840,6 +1840,7 @@ static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb) break; } + csum = (__force u32)p->csum; if (unlikely(credits < csum)) { pr_warn("csk 0x%p,%u, cr %u,%u+%u, < %u.\n", csk, csk->tid, diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 04bf2acd3800..2d19f0e332b0 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -1075,27 +1075,6 @@ passthrough_parse_cdb(struct se_cmd *cmd, unsigned int size; /* - * Clear a lun set in the cdb if the initiator talking to use spoke - * and old standards version, as we can't assume the underlying device - * won't choke up on it. - */ - switch (cdb[0]) { - case READ_10: /* SBC - RDProtect */ - case READ_12: /* SBC - RDProtect */ - case READ_16: /* SBC - RDProtect */ - case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */ - case VERIFY: /* SBC - VRProtect */ - case VERIFY_16: /* SBC - VRProtect */ - case WRITE_VERIFY: /* SBC - VRProtect */ - case WRITE_VERIFY_12: /* SBC - VRProtect */ - case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */ - break; - default: - cdb[1] &= 0x1f; /* clear logical unit number */ - break; - } - - /* * For REPORT LUNS we always need to emulate the response, for everything * else, pass it up. */ diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c index 391f39776c6a..6b9865c786ba 100644 --- a/drivers/thermal/cpu_cooling.c +++ b/drivers/thermal/cpu_cooling.c @@ -88,7 +88,7 @@ struct cpufreq_cooling_device { struct cpufreq_policy *policy; struct list_head node; struct time_in_idle *idle_time; - struct dev_pm_qos_request qos_req; + struct freq_qos_request qos_req; }; static DEFINE_IDA(cpufreq_ida); @@ -331,7 +331,7 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev, cpufreq_cdev->cpufreq_state = state; - return dev_pm_qos_update_request(&cpufreq_cdev->qos_req, + return freq_qos_update_request(&cpufreq_cdev->qos_req, cpufreq_cdev->freq_table[state].frequency); } @@ -615,9 +615,9 @@ __cpufreq_cooling_register(struct device_node *np, cooling_ops = &cpufreq_cooling_ops; } - ret = dev_pm_qos_add_request(dev, &cpufreq_cdev->qos_req, - DEV_PM_QOS_MAX_FREQUENCY, - cpufreq_cdev->freq_table[0].frequency); + ret = freq_qos_add_request(&policy->constraints, + &cpufreq_cdev->qos_req, FREQ_QOS_MAX, + cpufreq_cdev->freq_table[0].frequency); if (ret < 0) { pr_err("%s: Failed to add freq constraint (%d)\n", __func__, ret); @@ -637,7 +637,7 @@ __cpufreq_cooling_register(struct device_node *np, return cdev; remove_qos_req: - dev_pm_qos_remove_request(&cpufreq_cdev->qos_req); + freq_qos_remove_request(&cpufreq_cdev->qos_req); remove_ida: ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id); free_table: @@ -736,7 +736,7 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) mutex_unlock(&cooling_list_lock); thermal_cooling_device_unregister(cdev); - dev_pm_qos_remove_request(&cpufreq_cdev->qos_req); + freq_qos_remove_request(&cpufreq_cdev->qos_req); ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id); kfree(cpufreq_cdev->idle_time); kfree(cpufreq_cdev->freq_table); diff --git a/drivers/thunderbolt/nhi_ops.c b/drivers/thunderbolt/nhi_ops.c index 61cd09cef943..6795851aac95 100644 --- a/drivers/thunderbolt/nhi_ops.c +++ b/drivers/thunderbolt/nhi_ops.c @@ -80,7 +80,6 @@ static void icl_nhi_lc_mailbox_cmd(struct tb_nhi *nhi, enum icl_lc_mailbox_cmd c { u32 data; - pci_read_config_dword(nhi->pdev, VS_CAP_19, &data); data = (cmd << VS_CAP_19_CMD_SHIFT) & VS_CAP_19_CMD_MASK; pci_write_config_dword(nhi->pdev, VS_CAP_19, data | VS_CAP_19_VALID); } diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index 410bf1bceeee..5ea8db667e83 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -896,12 +896,13 @@ int tb_dp_port_set_hops(struct tb_port *port, unsigned int video, */ bool tb_dp_port_is_enabled(struct tb_port *port) { - u32 data; + u32 data[2]; - if (tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap, 1)) + if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap, + ARRAY_SIZE(data))) return false; - return !!(data & (TB_DP_VIDEO_EN | TB_DP_AUX_EN)); + return !!(data[0] & (TB_DP_VIDEO_EN | TB_DP_AUX_EN)); } /** @@ -914,19 +915,21 @@ bool tb_dp_port_is_enabled(struct tb_port *port) */ int tb_dp_port_enable(struct tb_port *port, bool enable) { - u32 data; + u32 data[2]; int ret; - ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap, 1); + ret = tb_port_read(port, data, TB_CFG_PORT, port->cap_adap, + ARRAY_SIZE(data)); if (ret) return ret; if (enable) - data |= TB_DP_VIDEO_EN | TB_DP_AUX_EN; + data[0] |= TB_DP_VIDEO_EN | TB_DP_AUX_EN; else - data &= ~(TB_DP_VIDEO_EN | TB_DP_AUX_EN); + data[0] &= ~(TB_DP_VIDEO_EN | TB_DP_AUX_EN); - return tb_port_write(port, &data, TB_CFG_PORT, port->cap_adap, 1); + return tb_port_write(port, data, TB_CFG_PORT, port->cap_adap, + ARRAY_SIZE(data)); } /* switch utility functions */ @@ -1031,13 +1034,6 @@ static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val) if (sw->authorized) goto unlock; - /* - * Make sure there is no PCIe rescan ongoing when a new PCIe - * tunnel is created. Otherwise the PCIe rescan code might find - * the new tunnel too early. - */ - pci_lock_rescan_remove(); - switch (val) { /* Approve switch */ case 1: @@ -1057,8 +1053,6 @@ static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val) break; } - pci_unlock_rescan_remove(); - if (!ret) { sw->authorized = val; /* Notify status change to the userspace */ diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c index e55c79eb6430..98361acd3053 100644 --- a/drivers/tty/n_hdlc.c +++ b/drivers/tty/n_hdlc.c @@ -968,6 +968,11 @@ static int __init n_hdlc_init(void) } /* end of init_module() */ +#ifdef CONFIG_SPARC +#undef __exitdata +#define __exitdata +#endif + static const char hdlc_unregister_ok[] __exitdata = KERN_INFO "N_HDLC: line discipline unregistered\n"; static const char hdlc_unregister_fail[] __exitdata = diff --git a/drivers/tty/serial/8250/8250_men_mcb.c b/drivers/tty/serial/8250/8250_men_mcb.c index 02c5aff58a74..8df89e9cd254 100644 --- a/drivers/tty/serial/8250/8250_men_mcb.c +++ b/drivers/tty/serial/8250/8250_men_mcb.c @@ -72,8 +72,8 @@ static int serial_8250_men_mcb_probe(struct mcb_device *mdev, { struct serial_8250_men_mcb_data *data; struct resource *mem; - unsigned int num_ports; - unsigned int i; + int num_ports; + int i; void __iomem *membase; mem = mcb_get_resource(mdev, IORESOURCE_MEM); @@ -88,7 +88,7 @@ static int serial_8250_men_mcb_probe(struct mcb_device *mdev, dev_dbg(&mdev->dev, "found a 16z%03u with %u ports\n", mdev->id, num_ports); - if (num_ports == 0 || num_ports > 4) { + if (num_ports <= 0 || num_ports > 4) { dev_err(&mdev->dev, "unexpected number of ports: %u\n", num_ports); return -ENODEV; @@ -133,7 +133,7 @@ static int serial_8250_men_mcb_probe(struct mcb_device *mdev, static void serial_8250_men_mcb_remove(struct mcb_device *mdev) { - unsigned int num_ports, i; + int num_ports, i; struct serial_8250_men_mcb_data *data = mcb_get_drvdata(mdev); if (!data) diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c index c68e2b3a1634..836e736ae188 100644 --- a/drivers/tty/serial/8250/8250_omap.c +++ b/drivers/tty/serial/8250/8250_omap.c @@ -141,7 +141,7 @@ static void omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl) serial8250_do_set_mctrl(port, mctrl); - if (!up->gpios) { + if (!mctrl_gpio_to_gpiod(up->gpios, UART_GPIO_RTS)) { /* * Turn off autoRTS if RTS is lowered and restore autoRTS * setting if RTS is raised @@ -456,7 +456,8 @@ static void omap_8250_set_termios(struct uart_port *port, up->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS | UPSTAT_AUTOXOFF); if (termios->c_cflag & CRTSCTS && up->port.flags & UPF_HARD_FLOW && - !up->gpios) { + !mctrl_gpio_to_gpiod(up->gpios, UART_GPIO_RTS) && + !mctrl_gpio_to_gpiod(up->gpios, UART_GPIO_CTS)) { /* Enable AUTOCTS (autoRTS is enabled when RTS is raised) */ up->port.status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS; priv->efr |= UART_EFR_CTS; diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 4789b5d62f63..67a9eb3f94ce 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -1032,6 +1032,7 @@ config SERIAL_SIFIVE_CONSOLE bool "Console on SiFive UART" depends on SERIAL_SIFIVE=y select SERIAL_CORE_CONSOLE + select SERIAL_EARLYCON help Select this option if you would like to use a SiFive UART as the system console. diff --git a/drivers/tty/serial/fsl_linflexuart.c b/drivers/tty/serial/fsl_linflexuart.c index 68d74f2b5106..a32f0d2afd59 100644 --- a/drivers/tty/serial/fsl_linflexuart.c +++ b/drivers/tty/serial/fsl_linflexuart.c @@ -3,7 +3,7 @@ * Freescale linflexuart serial port driver * * Copyright 2012-2016 Freescale Semiconductor, Inc. - * Copyright 2017-2018 NXP + * Copyright 2017-2019 NXP */ #if defined(CONFIG_SERIAL_FSL_LINFLEXUART_CONSOLE) && \ @@ -246,12 +246,14 @@ static irqreturn_t linflex_rxint(int irq, void *dev_id) struct tty_port *port = &sport->state->port; unsigned long flags, status; unsigned char rx; + bool brk; spin_lock_irqsave(&sport->lock, flags); status = readl(sport->membase + UARTSR); while (status & LINFLEXD_UARTSR_RMB) { rx = readb(sport->membase + BDRM); + brk = false; flg = TTY_NORMAL; sport->icount.rx++; @@ -261,8 +263,11 @@ static irqreturn_t linflex_rxint(int irq, void *dev_id) status |= LINFLEXD_UARTSR_SZF; if (status & LINFLEXD_UARTSR_BOF) status |= LINFLEXD_UARTSR_BOF; - if (status & LINFLEXD_UARTSR_FEF) + if (status & LINFLEXD_UARTSR_FEF) { + if (!rx) + brk = true; status |= LINFLEXD_UARTSR_FEF; + } if (status & LINFLEXD_UARTSR_PE) status |= LINFLEXD_UARTSR_PE; } @@ -271,13 +276,15 @@ static irqreturn_t linflex_rxint(int irq, void *dev_id) sport->membase + UARTSR); status = readl(sport->membase + UARTSR); - if (uart_handle_sysrq_char(sport, (unsigned char)rx)) - continue; - + if (brk) { + uart_handle_break(sport); + } else { #ifdef SUPPORT_SYSRQ - sport->sysrq = 0; + if (uart_handle_sysrq_char(sport, (unsigned char)rx)) + continue; #endif - tty_insert_flip_char(port, rx, flg); + tty_insert_flip_char(port, rx, flg); + } } spin_unlock_irqrestore(&sport->lock, flags); diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index 3e17bb8a0b16..537896c4d887 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c @@ -548,7 +548,7 @@ static void lpuart_flush_buffer(struct uart_port *port) val |= UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH; lpuart32_write(&sport->port, val, UARTFIFO); } else { - val = readb(sport->port.membase + UARTPFIFO); + val = readb(sport->port.membase + UARTCFIFO); val |= UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH; writeb(val, sport->port.membase + UARTCFIFO); } diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index 87c58f9f6390..5e08f2657b90 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -2222,8 +2222,8 @@ static int imx_uart_probe(struct platform_device *pdev) return PTR_ERR(base); rxirq = platform_get_irq(pdev, 0); - txirq = platform_get_irq(pdev, 1); - rtsirq = platform_get_irq(pdev, 2); + txirq = platform_get_irq_optional(pdev, 1); + rtsirq = platform_get_irq_optional(pdev, 2); sport->port.dev = &pdev->dev; sport->port.mapbase = res->start; diff --git a/drivers/tty/serial/owl-uart.c b/drivers/tty/serial/owl-uart.c index 03963af77b15..d2d8b3494685 100644 --- a/drivers/tty/serial/owl-uart.c +++ b/drivers/tty/serial/owl-uart.c @@ -740,7 +740,7 @@ static int __init owl_uart_init(void) return ret; } -static void __init owl_uart_exit(void) +static void __exit owl_uart_exit(void) { platform_driver_unregister(&owl_uart_platform_driver); uart_unregister_driver(&owl_uart_driver); diff --git a/drivers/tty/serial/rda-uart.c b/drivers/tty/serial/rda-uart.c index c1b0d7662ef9..ff9a27d48bca 100644 --- a/drivers/tty/serial/rda-uart.c +++ b/drivers/tty/serial/rda-uart.c @@ -815,7 +815,7 @@ static int __init rda_uart_init(void) return ret; } -static void __init rda_uart_exit(void) +static void __exit rda_uart_exit(void) { platform_driver_unregister(&rda_uart_platform_driver); uart_unregister_driver(&rda_uart_driver); diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 6e713be1d4e9..c4a414a46c7f 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -1964,8 +1964,10 @@ uart_get_console(struct uart_port *ports, int nr, struct console *co) * console=<name>,io|mmio|mmio16|mmio32|mmio32be|mmio32native,<addr>,<options> * * The optional form + * * earlycon=<name>,0x<addr>,<options> * console=<name>,0x<addr>,<options> + * * is also accepted; the returned @iotype will be UPIO_MEM. * * Returns 0 on success or -EINVAL on failure diff --git a/drivers/tty/serial/serial_mctrl_gpio.c b/drivers/tty/serial/serial_mctrl_gpio.c index d9074303c88e..fb4781292d40 100644 --- a/drivers/tty/serial/serial_mctrl_gpio.c +++ b/drivers/tty/serial/serial_mctrl_gpio.c @@ -66,6 +66,9 @@ EXPORT_SYMBOL_GPL(mctrl_gpio_set); struct gpio_desc *mctrl_gpio_to_gpiod(struct mctrl_gpios *gpios, enum mctrl_gpio_idx gidx) { + if (gpios == NULL) + return NULL; + return gpios->gpio[gidx]; } EXPORT_SYMBOL_GPL(mctrl_gpio_to_gpiod); diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 4e754a4850e6..22e5d4e13714 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -2894,8 +2894,12 @@ static int sci_init_single(struct platform_device *dev, port->mapbase = res->start; sci_port->reg_size = resource_size(res); - for (i = 0; i < ARRAY_SIZE(sci_port->irqs); ++i) - sci_port->irqs[i] = platform_get_irq(dev, i); + for (i = 0; i < ARRAY_SIZE(sci_port->irqs); ++i) { + if (i) + sci_port->irqs[i] = platform_get_irq_optional(dev, i); + else + sci_port->irqs[i] = platform_get_irq(dev, i); + } /* The SCI generates several interrupts. They can be muxed together or * connected to different interrupt lines. In the muxed case only one diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c index b8b912b5a8b9..06e79c11141d 100644 --- a/drivers/tty/serial/uartlite.c +++ b/drivers/tty/serial/uartlite.c @@ -897,7 +897,8 @@ static int __init ulite_init(void) static void __exit ulite_exit(void) { platform_driver_unregister(&ulite_platform_driver); - uart_unregister_driver(&ulite_uart_driver); + if (ulite_uart_driver.state) + uart_unregister_driver(&ulite_uart_driver); } module_init(ulite_init); diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c index da4563aaaf5c..4e55bc327a54 100644 --- a/drivers/tty/serial/xilinx_uartps.c +++ b/drivers/tty/serial/xilinx_uartps.c @@ -1550,7 +1550,6 @@ static int cdns_uart_probe(struct platform_device *pdev) goto err_out_id; } - uartps_major = cdns_uart_uart_driver->tty_driver->major; cdns_uart_data->cdns_uart_driver = cdns_uart_uart_driver; /* @@ -1680,6 +1679,7 @@ static int cdns_uart_probe(struct platform_device *pdev) console_port = NULL; #endif + uartps_major = cdns_uart_uart_driver->tty_driver->major; cdns_uart_data->cts_override = of_property_read_bool(pdev->dev.of_node, "cts-override"); return 0; @@ -1741,6 +1741,12 @@ static int cdns_uart_remove(struct platform_device *pdev) console_port = NULL; #endif + /* If this is last instance major number should be initialized */ + mutex_lock(&bitmap_lock); + if (bitmap_empty(bitmap, MAX_UART_INSTANCES)) + uartps_major = 0; + mutex_unlock(&bitmap_lock); + uart_unregister_driver(cdns_uart_data->cdns_uart_driver); return rc; } diff --git a/drivers/usb/cdns3/cdns3-pci-wrap.c b/drivers/usb/cdns3/cdns3-pci-wrap.c index c41ddb61b857..b0a29efe7d31 100644 --- a/drivers/usb/cdns3/cdns3-pci-wrap.c +++ b/drivers/usb/cdns3/cdns3-pci-wrap.c @@ -159,8 +159,9 @@ static int cdns3_pci_probe(struct pci_dev *pdev, wrap->plat_dev = platform_device_register_full(&plat_info); if (IS_ERR(wrap->plat_dev)) { pci_disable_device(pdev); + err = PTR_ERR(wrap->plat_dev); kfree(wrap); - return PTR_ERR(wrap->plat_dev); + return err; } } diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c index 06f1e105be4e..c2123ef8d8a3 100644 --- a/drivers/usb/cdns3/core.c +++ b/drivers/usb/cdns3/core.c @@ -160,10 +160,30 @@ static int cdns3_core_init_role(struct cdns3 *cdns) if (ret) goto err; - if (cdns->dr_mode != USB_DR_MODE_OTG) { + /* Initialize idle role to start with */ + ret = cdns3_role_start(cdns, USB_ROLE_NONE); + if (ret) + goto err; + + switch (cdns->dr_mode) { + case USB_DR_MODE_OTG: ret = cdns3_hw_role_switch(cdns); if (ret) goto err; + break; + case USB_DR_MODE_PERIPHERAL: + ret = cdns3_role_start(cdns, USB_ROLE_DEVICE); + if (ret) + goto err; + break; + case USB_DR_MODE_HOST: + ret = cdns3_role_start(cdns, USB_ROLE_HOST); + if (ret) + goto err; + break; + default: + ret = -EINVAL; + goto err; } return ret; diff --git a/drivers/usb/cdns3/ep0.c b/drivers/usb/cdns3/ep0.c index 44f652e8b5a2..e71240b386b4 100644 --- a/drivers/usb/cdns3/ep0.c +++ b/drivers/usb/cdns3/ep0.c @@ -234,9 +234,11 @@ static int cdns3_req_ep0_set_address(struct cdns3_device *priv_dev, static int cdns3_req_ep0_get_status(struct cdns3_device *priv_dev, struct usb_ctrlrequest *ctrl) { + struct cdns3_endpoint *priv_ep; __le16 *response_pkt; u16 usb_status = 0; u32 recip; + u8 index; recip = ctrl->bRequestType & USB_RECIP_MASK; @@ -262,9 +264,13 @@ static int cdns3_req_ep0_get_status(struct cdns3_device *priv_dev, case USB_RECIP_INTERFACE: return cdns3_ep0_delegate_req(priv_dev, ctrl); case USB_RECIP_ENDPOINT: - /* check if endpoint is stalled */ + index = cdns3_ep_addr_to_index(ctrl->wIndex); + priv_ep = priv_dev->eps[index]; + + /* check if endpoint is stalled or stall is pending */ cdns3_select_ep(priv_dev, ctrl->wIndex); - if (EP_STS_STALL(readl(&priv_dev->regs->ep_sts))) + if (EP_STS_STALL(readl(&priv_dev->regs->ep_sts)) || + (priv_ep->flags & EP_STALL_PENDING)) usb_status = BIT(USB_ENDPOINT_HALT); break; default: @@ -332,7 +338,7 @@ static int cdns3_ep0_feature_handle_device(struct cdns3_device *priv_dev, * for sending status stage. * This time should be less then 3ms. */ - usleep_range(1000, 2000); + mdelay(1); cdns3_set_register_bit(&priv_dev->regs->usb_cmd, USB_CMD_STMODE | USB_STS_TMODE_SEL(tmode - 1)); diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c index 228cdc4ab886..4c1e75509303 100644 --- a/drivers/usb/cdns3/gadget.c +++ b/drivers/usb/cdns3/gadget.c @@ -1145,6 +1145,14 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev, request = cdns3_next_request(&priv_ep->pending_req_list); priv_req = to_cdns3_request(request); + trb = priv_ep->trb_pool + priv_ep->dequeue; + + /* Request was dequeued and TRB was changed to TRB_LINK. */ + if (TRB_FIELD_TO_TYPE(trb->control) == TRB_LINK) { + trace_cdns3_complete_trb(priv_ep, trb); + cdns3_move_deq_to_next_trb(priv_req); + } + /* Re-select endpoint. It could be changed by other CPU during * handling usb_gadget_giveback_request. */ @@ -2067,6 +2075,7 @@ int cdns3_gadget_ep_dequeue(struct usb_ep *ep, struct usb_request *req, *req_temp; struct cdns3_request *priv_req; struct cdns3_trb *link_trb; + u8 req_on_hw_ring = 0; unsigned long flags; int ret = 0; @@ -2083,8 +2092,10 @@ int cdns3_gadget_ep_dequeue(struct usb_ep *ep, list_for_each_entry_safe(req, req_temp, &priv_ep->pending_req_list, list) { - if (request == req) + if (request == req) { + req_on_hw_ring = 1; goto found; + } } list_for_each_entry_safe(req, req_temp, &priv_ep->deferred_req_list, @@ -2096,27 +2107,21 @@ int cdns3_gadget_ep_dequeue(struct usb_ep *ep, goto not_found; found: - - if (priv_ep->wa1_trb == priv_req->trb) - cdns3_wa1_restore_cycle_bit(priv_ep); - link_trb = priv_req->trb; - cdns3_move_deq_to_next_trb(priv_req); - cdns3_gadget_giveback(priv_ep, priv_req, -ECONNRESET); - - /* Update ring */ - request = cdns3_next_request(&priv_ep->deferred_req_list); - if (request) { - priv_req = to_cdns3_request(request); + /* Update ring only if removed request is on pending_req_list list */ + if (req_on_hw_ring) { link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma + (priv_req->start_trb * TRB_SIZE)); link_trb->control = (link_trb->control & TRB_CYCLE) | - TRB_TYPE(TRB_LINK) | TRB_CHAIN | TRB_TOGGLE; - } else { - priv_ep->flags |= EP_UPDATE_EP_TRBADDR; + TRB_TYPE(TRB_LINK) | TRB_CHAIN; + + if (priv_ep->wa1_trb == priv_req->trb) + cdns3_wa1_restore_cycle_bit(priv_ep); } + cdns3_gadget_giveback(priv_ep, priv_req, -ECONNRESET); + not_found: spin_unlock_irqrestore(&priv_dev->lock, flags); return ret; @@ -2324,8 +2329,6 @@ static void cdns3_gadget_config(struct cdns3_device *priv_dev) writel(USB_CONF_CLK2OFFDS | USB_CONF_L1DS, ®s->usb_conf); cdns3_configure_dmult(priv_dev, NULL); - - cdns3_gadget_pullup(&priv_dev->gadget, 1); } /** @@ -2340,9 +2343,35 @@ static int cdns3_gadget_udc_start(struct usb_gadget *gadget, { struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); unsigned long flags; + enum usb_device_speed max_speed = driver->max_speed; spin_lock_irqsave(&priv_dev->lock, flags); priv_dev->gadget_driver = driver; + + /* limit speed if necessary */ + max_speed = min(driver->max_speed, gadget->max_speed); + + switch (max_speed) { + case USB_SPEED_FULL: + writel(USB_CONF_SFORCE_FS, &priv_dev->regs->usb_conf); + writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf); + break; + case USB_SPEED_HIGH: + writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf); + break; + case USB_SPEED_SUPER: + break; + default: + dev_err(priv_dev->dev, + "invalid maximum_speed parameter %d\n", + max_speed); + /* fall through */ + case USB_SPEED_UNKNOWN: + /* default to superspeed */ + max_speed = USB_SPEED_SUPER; + break; + } + cdns3_gadget_config(priv_dev); spin_unlock_irqrestore(&priv_dev->lock, flags); return 0; @@ -2376,6 +2405,8 @@ static int cdns3_gadget_udc_stop(struct usb_gadget *gadget) writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd); readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, !(val & EP_CMD_EPRST), 1, 100); + + priv_ep->flags &= ~EP_CLAIMED; } /* disable interrupt for device */ @@ -2570,11 +2601,7 @@ static int cdns3_gadget_start(struct cdns3 *cdns) /* Check the maximum_speed parameter */ switch (max_speed) { case USB_SPEED_FULL: - writel(USB_CONF_SFORCE_FS, &priv_dev->regs->usb_conf); - break; case USB_SPEED_HIGH: - writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf); - break; case USB_SPEED_SUPER: break; default: @@ -2662,6 +2689,13 @@ static int __cdns3_gadget_init(struct cdns3 *cdns) { int ret = 0; + /* Ensure 32-bit DMA Mask in case we switched back from Host mode */ + ret = dma_set_mask_and_coherent(cdns->dev, DMA_BIT_MASK(32)); + if (ret) { + dev_err(cdns->dev, "Failed to set dma mask: %d\n", ret); + return ret; + } + cdns3_drd_switch_gadget(cdns, 1); pm_runtime_get_sync(cdns->dev); @@ -2700,8 +2734,6 @@ static int cdns3_gadget_suspend(struct cdns3 *cdns, bool do_wakeup) /* disable interrupt for device */ writel(0, &priv_dev->regs->usb_ien); - cdns3_gadget_pullup(&priv_dev->gadget, 0); - return 0; } diff --git a/drivers/usb/cdns3/host-export.h b/drivers/usb/cdns3/host-export.h index b498a170b7e8..ae11810f8826 100644 --- a/drivers/usb/cdns3/host-export.h +++ b/drivers/usb/cdns3/host-export.h @@ -12,7 +12,6 @@ #ifdef CONFIG_USB_CDNS3_HOST int cdns3_host_init(struct cdns3 *cdns); -void cdns3_host_exit(struct cdns3 *cdns); #else diff --git a/drivers/usb/cdns3/host.c b/drivers/usb/cdns3/host.c index 2733a8f71fcd..ad788bf3fe4f 100644 --- a/drivers/usb/cdns3/host.c +++ b/drivers/usb/cdns3/host.c @@ -12,6 +12,7 @@ #include <linux/platform_device.h> #include "core.h" #include "drd.h" +#include "host-export.h" static int __cdns3_host_init(struct cdns3 *cdns) { diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c index 7fea4999d352..0d8e3f3804a3 100644 --- a/drivers/usb/class/usblp.c +++ b/drivers/usb/class/usblp.c @@ -445,6 +445,7 @@ static void usblp_cleanup(struct usblp *usblp) kfree(usblp->readbuf); kfree(usblp->device_id_string); kfree(usblp->statusbuf); + usb_put_intf(usblp->intf); kfree(usblp); } @@ -461,10 +462,12 @@ static int usblp_release(struct inode *inode, struct file *file) mutex_lock(&usblp_mutex); usblp->used = 0; - if (usblp->present) { + if (usblp->present) usblp_unlink_urbs(usblp); - usb_autopm_put_interface(usblp->intf); - } else /* finish cleanup from disconnect */ + + usb_autopm_put_interface(usblp->intf); + + if (!usblp->present) /* finish cleanup from disconnect */ usblp_cleanup(usblp); mutex_unlock(&usblp_mutex); return 0; @@ -1111,7 +1114,7 @@ static int usblp_probe(struct usb_interface *intf, init_waitqueue_head(&usblp->wwait); init_usb_anchor(&usblp->urbs); usblp->ifnum = intf->cur_altsetting->desc.bInterfaceNumber; - usblp->intf = intf; + usblp->intf = usb_get_intf(intf); /* Malloc device ID string buffer to the largest expected length, * since we can re-query it on an ioctl and a dynamic string @@ -1196,6 +1199,7 @@ abort: kfree(usblp->readbuf); kfree(usblp->statusbuf); kfree(usblp->device_id_string); + usb_put_intf(usblp->intf); kfree(usblp); abort_ret: return retval; diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 151a74a54386..1ac1095bfeac 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -348,6 +348,11 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, /* Validate the wMaxPacketSize field */ maxp = usb_endpoint_maxp(&endpoint->desc); + if (maxp == 0) { + dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has wMaxPacketSize 0, skipping\n", + cfgno, inum, asnum, d->bEndpointAddress); + goto skip_to_next_endpoint_or_interface_descriptor; + } /* Find the highest legal maxpacket size for this endpoint */ i = 0; /* additional transactions per microframe */ diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig index 89abc6078703..556a876c7896 100644 --- a/drivers/usb/dwc3/Kconfig +++ b/drivers/usb/dwc3/Kconfig @@ -102,6 +102,7 @@ config USB_DWC3_MESON_G12A depends on ARCH_MESON || COMPILE_TEST default USB_DWC3 select USB_ROLE_SWITCH + select REGMAP_MMIO help Support USB2/3 functionality in Amlogic G12A platforms. Say 'Y' or 'M' if you have one such device. diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 999ce5e84d3c..97d6ae3c4df2 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -312,8 +312,7 @@ static void dwc3_frame_length_adjustment(struct dwc3 *dwc) reg = dwc3_readl(dwc->regs, DWC3_GFLADJ); dft = reg & DWC3_GFLADJ_30MHZ_MASK; - if (!dev_WARN_ONCE(dwc->dev, dft == dwc->fladj, - "request value same as default, ignoring\n")) { + if (dft != dwc->fladj) { reg &= ~DWC3_GFLADJ_30MHZ_MASK; reg |= DWC3_GFLADJ_30MHZ_SDBND_SEL | dwc->fladj; dwc3_writel(dwc->regs, DWC3_GFLADJ, reg); diff --git a/drivers/usb/dwc3/drd.c b/drivers/usb/dwc3/drd.c index 726100d1ac0d..c946d64142ad 100644 --- a/drivers/usb/dwc3/drd.c +++ b/drivers/usb/dwc3/drd.c @@ -139,14 +139,14 @@ static int dwc3_otg_get_irq(struct dwc3 *dwc) struct platform_device *dwc3_pdev = to_platform_device(dwc->dev); int irq; - irq = platform_get_irq_byname(dwc3_pdev, "otg"); + irq = platform_get_irq_byname_optional(dwc3_pdev, "otg"); if (irq > 0) goto out; if (irq == -EPROBE_DEFER) goto out; - irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3"); + irq = platform_get_irq_byname_optional(dwc3_pdev, "dwc_usb3"); if (irq > 0) goto out; @@ -157,9 +157,6 @@ static int dwc3_otg_get_irq(struct dwc3 *dwc) if (irq > 0) goto out; - if (irq != -EPROBE_DEFER) - dev_err(dwc->dev, "missing OTG IRQ\n"); - if (!irq) irq = -EINVAL; diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 5e8e18222f92..023f0357efd7 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -258,7 +258,7 @@ static int dwc3_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) ret = platform_device_add_properties(dwc->dwc3, p); if (ret < 0) - return ret; + goto err; ret = dwc3_pci_quirks(dwc); if (ret) diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 8adb59f8e4f1..a9aba716bf80 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -707,6 +707,12 @@ static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep) dwc3_gadget_giveback(dep, req, -ESHUTDOWN); } + + while (!list_empty(&dep->cancelled_list)) { + req = next_request(&dep->cancelled_list); + + dwc3_gadget_giveback(dep, req, -ESHUTDOWN); + } } /** @@ -3264,14 +3270,14 @@ static int dwc3_gadget_get_irq(struct dwc3 *dwc) struct platform_device *dwc3_pdev = to_platform_device(dwc->dev); int irq; - irq = platform_get_irq_byname(dwc3_pdev, "peripheral"); + irq = platform_get_irq_byname_optional(dwc3_pdev, "peripheral"); if (irq > 0) goto out; if (irq == -EPROBE_DEFER) goto out; - irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3"); + irq = platform_get_irq_byname_optional(dwc3_pdev, "dwc_usb3"); if (irq > 0) goto out; @@ -3282,9 +3288,6 @@ static int dwc3_gadget_get_irq(struct dwc3 *dwc) if (irq > 0) goto out; - if (irq != -EPROBE_DEFER) - dev_err(dwc->dev, "missing peripheral IRQ\n"); - if (!irq) irq = -EINVAL; diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c index 8deea8c91e03..5567ed2cddbe 100644 --- a/drivers/usb/dwc3/host.c +++ b/drivers/usb/dwc3/host.c @@ -16,14 +16,14 @@ static int dwc3_host_get_irq(struct dwc3 *dwc) struct platform_device *dwc3_pdev = to_platform_device(dwc->dev); int irq; - irq = platform_get_irq_byname(dwc3_pdev, "host"); + irq = platform_get_irq_byname_optional(dwc3_pdev, "host"); if (irq > 0) goto out; if (irq == -EPROBE_DEFER) goto out; - irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3"); + irq = platform_get_irq_byname_optional(dwc3_pdev, "dwc_usb3"); if (irq > 0) goto out; @@ -34,9 +34,6 @@ static int dwc3_host_get_irq(struct dwc3 *dwc) if (irq > 0) goto out; - if (irq != -EPROBE_DEFER) - dev_err(dwc->dev, "missing host IRQ\n"); - if (!irq) irq = -EINVAL; diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index d516e8d6cd7f..5ec54b69c29c 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -2170,14 +2170,18 @@ void composite_dev_cleanup(struct usb_composite_dev *cdev) usb_ep_dequeue(cdev->gadget->ep0, cdev->os_desc_req); kfree(cdev->os_desc_req->buf); + cdev->os_desc_req->buf = NULL; usb_ep_free_request(cdev->gadget->ep0, cdev->os_desc_req); + cdev->os_desc_req = NULL; } if (cdev->req) { if (cdev->setup_pending) usb_ep_dequeue(cdev->gadget->ep0, cdev->req); kfree(cdev->req->buf); + cdev->req->buf = NULL; usb_ep_free_request(cdev->gadget->ep0, cdev->req); + cdev->req = NULL; } cdev->next_string_id = 0; device_remove_file(&cdev->gadget->dev, &dev_attr_suspended); diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index 025129942894..33852c2b29d1 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c @@ -61,6 +61,8 @@ struct gadget_info { bool use_os_desc; char b_vendor_code; char qw_sign[OS_STRING_QW_SIGN_LEN]; + spinlock_t spinlock; + bool unbind; }; static inline struct gadget_info *to_gadget_info(struct config_item *item) @@ -1244,6 +1246,7 @@ static int configfs_composite_bind(struct usb_gadget *gadget, int ret; /* the gi->lock is hold by the caller */ + gi->unbind = 0; cdev->gadget = gadget; set_gadget_data(gadget, cdev); ret = composite_dev_prepare(composite, cdev); @@ -1376,31 +1379,128 @@ static void configfs_composite_unbind(struct usb_gadget *gadget) { struct usb_composite_dev *cdev; struct gadget_info *gi; + unsigned long flags; /* the gi->lock is hold by the caller */ cdev = get_gadget_data(gadget); gi = container_of(cdev, struct gadget_info, cdev); + spin_lock_irqsave(&gi->spinlock, flags); + gi->unbind = 1; + spin_unlock_irqrestore(&gi->spinlock, flags); kfree(otg_desc[0]); otg_desc[0] = NULL; purge_configs_funcs(gi); composite_dev_cleanup(cdev); usb_ep_autoconfig_reset(cdev->gadget); + spin_lock_irqsave(&gi->spinlock, flags); cdev->gadget = NULL; set_gadget_data(gadget, NULL); + spin_unlock_irqrestore(&gi->spinlock, flags); +} + +static int configfs_composite_setup(struct usb_gadget *gadget, + const struct usb_ctrlrequest *ctrl) +{ + struct usb_composite_dev *cdev; + struct gadget_info *gi; + unsigned long flags; + int ret; + + cdev = get_gadget_data(gadget); + if (!cdev) + return 0; + + gi = container_of(cdev, struct gadget_info, cdev); + spin_lock_irqsave(&gi->spinlock, flags); + cdev = get_gadget_data(gadget); + if (!cdev || gi->unbind) { + spin_unlock_irqrestore(&gi->spinlock, flags); + return 0; + } + + ret = composite_setup(gadget, ctrl); + spin_unlock_irqrestore(&gi->spinlock, flags); + return ret; +} + +static void configfs_composite_disconnect(struct usb_gadget *gadget) +{ + struct usb_composite_dev *cdev; + struct gadget_info *gi; + unsigned long flags; + + cdev = get_gadget_data(gadget); + if (!cdev) + return; + + gi = container_of(cdev, struct gadget_info, cdev); + spin_lock_irqsave(&gi->spinlock, flags); + cdev = get_gadget_data(gadget); + if (!cdev || gi->unbind) { + spin_unlock_irqrestore(&gi->spinlock, flags); + return; + } + + composite_disconnect(gadget); + spin_unlock_irqrestore(&gi->spinlock, flags); +} + +static void configfs_composite_suspend(struct usb_gadget *gadget) +{ + struct usb_composite_dev *cdev; + struct gadget_info *gi; + unsigned long flags; + + cdev = get_gadget_data(gadget); + if (!cdev) + return; + + gi = container_of(cdev, struct gadget_info, cdev); + spin_lock_irqsave(&gi->spinlock, flags); + cdev = get_gadget_data(gadget); + if (!cdev || gi->unbind) { + spin_unlock_irqrestore(&gi->spinlock, flags); + return; + } + + composite_suspend(gadget); + spin_unlock_irqrestore(&gi->spinlock, flags); +} + +static void configfs_composite_resume(struct usb_gadget *gadget) +{ + struct usb_composite_dev *cdev; + struct gadget_info *gi; + unsigned long flags; + + cdev = get_gadget_data(gadget); + if (!cdev) + return; + + gi = container_of(cdev, struct gadget_info, cdev); + spin_lock_irqsave(&gi->spinlock, flags); + cdev = get_gadget_data(gadget); + if (!cdev || gi->unbind) { + spin_unlock_irqrestore(&gi->spinlock, flags); + return; + } + + composite_resume(gadget); + spin_unlock_irqrestore(&gi->spinlock, flags); } static const struct usb_gadget_driver configfs_driver_template = { .bind = configfs_composite_bind, .unbind = configfs_composite_unbind, - .setup = composite_setup, - .reset = composite_disconnect, - .disconnect = composite_disconnect, + .setup = configfs_composite_setup, + .reset = configfs_composite_disconnect, + .disconnect = configfs_composite_disconnect, - .suspend = composite_suspend, - .resume = composite_resume, + .suspend = configfs_composite_suspend, + .resume = configfs_composite_resume, .max_speed = USB_SPEED_SUPER, .driver = { diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig index d7e611645533..d354036ff6c8 100644 --- a/drivers/usb/gadget/udc/Kconfig +++ b/drivers/usb/gadget/udc/Kconfig @@ -45,7 +45,7 @@ config USB_AT91 config USB_LPC32XX tristate "LPC32XX USB Peripheral Controller" - depends on ARCH_LPC32XX + depends on ARCH_LPC32XX || COMPILE_TEST depends on I2C select USB_ISP1301 help diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c index 86ffc8307864..1d0d8952a74b 100644 --- a/drivers/usb/gadget/udc/atmel_usba_udc.c +++ b/drivers/usb/gadget/udc/atmel_usba_udc.c @@ -449,9 +449,11 @@ static void submit_request(struct usba_ep *ep, struct usba_request *req) next_fifo_transaction(ep, req); if (req->last_transaction) { usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY); - usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE); + if (ep_is_control(ep)) + usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE); } else { - usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE); + if (ep_is_control(ep)) + usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE); usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY); } } diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c index 92af8dc98c3d..51fa614b4079 100644 --- a/drivers/usb/gadget/udc/core.c +++ b/drivers/usb/gadget/udc/core.c @@ -98,6 +98,17 @@ int usb_ep_enable(struct usb_ep *ep) if (ep->enabled) goto out; + /* UDC drivers can't handle endpoints with maxpacket size 0 */ + if (usb_endpoint_maxp(ep->desc) == 0) { + /* + * We should log an error message here, but we can't call + * dev_err() because there's no way to find the gadget + * given only ep. + */ + ret = -EINVAL; + goto out; + } + ret = ep->ops->enable(ep, ep->desc); if (ret) goto out; diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c index 8414fac74493..3d499d93c083 100644 --- a/drivers/usb/gadget/udc/dummy_hcd.c +++ b/drivers/usb/gadget/udc/dummy_hcd.c @@ -48,6 +48,7 @@ #define DRIVER_VERSION "02 May 2005" #define POWER_BUDGET 500 /* in mA; use 8 for low-power port testing */ +#define POWER_BUDGET_3 900 /* in mA */ static const char driver_name[] = "dummy_hcd"; static const char driver_desc[] = "USB Host+Gadget Emulator"; @@ -2432,7 +2433,7 @@ static int dummy_start_ss(struct dummy_hcd *dum_hcd) dum_hcd->rh_state = DUMMY_RH_RUNNING; dum_hcd->stream_en_ep = 0; INIT_LIST_HEAD(&dum_hcd->urbp_list); - dummy_hcd_to_hcd(dum_hcd)->power_budget = POWER_BUDGET; + dummy_hcd_to_hcd(dum_hcd)->power_budget = POWER_BUDGET_3; dummy_hcd_to_hcd(dum_hcd)->state = HC_STATE_RUNNING; dummy_hcd_to_hcd(dum_hcd)->uses_new_polling = 1; #ifdef CONFIG_USB_OTG diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c index 20141c3096f6..9a05863b2876 100644 --- a/drivers/usb/gadget/udc/fsl_udc_core.c +++ b/drivers/usb/gadget/udc/fsl_udc_core.c @@ -2576,7 +2576,7 @@ static int fsl_udc_remove(struct platform_device *pdev) dma_pool_destroy(udc_controller->td_pool); free_irq(udc_controller->irq, udc_controller); iounmap(dr_regs); - if (pdata->operating_mode == FSL_USB2_DR_DEVICE) + if (res && (pdata->operating_mode == FSL_USB2_DR_DEVICE)) release_mem_region(res->start, resource_size(res)); /* free udc --wait for the release() finished */ diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c index b3e073fb88c6..bf6c81e2f8cc 100644 --- a/drivers/usb/gadget/udc/lpc32xx_udc.c +++ b/drivers/usb/gadget/udc/lpc32xx_udc.c @@ -1151,7 +1151,7 @@ static void udc_pop_fifo(struct lpc32xx_udc *udc, u8 *data, u32 bytes) u32 *p32, tmp, cbytes; /* Use optimal data transfer method based on source address and size */ - switch (((u32) data) & 0x3) { + switch (((uintptr_t) data) & 0x3) { case 0: /* 32-bit aligned */ p32 = (u32 *) data; cbytes = (bytes & ~0x3); @@ -1177,11 +1177,11 @@ static void udc_pop_fifo(struct lpc32xx_udc *udc, u8 *data, u32 bytes) tmp = readl(USBD_RXDATA(udc->udp_baseaddr)); bl = bytes - n; - if (bl > 3) - bl = 3; + if (bl > 4) + bl = 4; for (i = 0; i < bl; i++) - data[n + i] = (u8) ((tmp >> (n * 8)) & 0xFF); + data[n + i] = (u8) ((tmp >> (i * 8)) & 0xFF); } break; @@ -1252,7 +1252,7 @@ static void udc_stuff_fifo(struct lpc32xx_udc *udc, u8 *data, u32 bytes) u32 *p32, tmp, cbytes; /* Use optimal data transfer method based on source address and size */ - switch (((u32) data) & 0x3) { + switch (((uintptr_t) data) & 0x3) { case 0: /* 32-bit aligned */ p32 = (u32 *) data; cbytes = (bytes & ~0x3); diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c index e098f16c01cb..33703140233a 100644 --- a/drivers/usb/gadget/udc/renesas_usb3.c +++ b/drivers/usb/gadget/udc/renesas_usb3.c @@ -1544,10 +1544,10 @@ static void usb3_set_device_address(struct renesas_usb3 *usb3, u16 addr) static bool usb3_std_req_set_address(struct renesas_usb3 *usb3, struct usb_ctrlrequest *ctrl) { - if (ctrl->wValue >= 128) + if (le16_to_cpu(ctrl->wValue) >= 128) return true; /* stall */ - usb3_set_device_address(usb3, ctrl->wValue); + usb3_set_device_address(usb3, le16_to_cpu(ctrl->wValue)); usb3_set_p0_con_for_no_data(usb3); return false; @@ -1582,6 +1582,7 @@ static bool usb3_std_req_get_status(struct renesas_usb3 *usb3, struct renesas_usb3_ep *usb3_ep; int num; u16 status = 0; + __le16 tx_data; switch (ctrl->bRequestType & USB_RECIP_MASK) { case USB_RECIP_DEVICE: @@ -1604,10 +1605,10 @@ static bool usb3_std_req_get_status(struct renesas_usb3 *usb3, } if (!stall) { - status = cpu_to_le16(status); + tx_data = cpu_to_le16(status); dev_dbg(usb3_to_dev(usb3), "get_status: req = %p\n", usb_req_to_usb3_req(usb3->ep0_req)); - usb3_pipe0_internal_xfer(usb3, &status, sizeof(status), + usb3_pipe0_internal_xfer(usb3, &tx_data, sizeof(tx_data), usb3_pipe0_get_status_completion); } @@ -1772,7 +1773,7 @@ static bool usb3_std_req_set_sel(struct renesas_usb3 *usb3, static bool usb3_std_req_set_configuration(struct renesas_usb3 *usb3, struct usb_ctrlrequest *ctrl) { - if (ctrl->wValue > 0) + if (le16_to_cpu(ctrl->wValue) > 0) usb3_set_bit(usb3, USB_COM_CON_CONF, USB3_USB_COM_CON); else usb3_clear_bit(usb3, USB_COM_CON_CONF, USB3_USB_COM_CON); diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c index 7ba6afc7ef23..76c3f29562d2 100644 --- a/drivers/usb/host/xhci-debugfs.c +++ b/drivers/usb/host/xhci-debugfs.c @@ -202,10 +202,10 @@ static void xhci_ring_dump_segment(struct seq_file *s, trb = &seg->trbs[i]; dma = seg->dma + i * sizeof(*trb); seq_printf(s, "%pad: %s\n", &dma, - xhci_decode_trb(trb->generic.field[0], - trb->generic.field[1], - trb->generic.field[2], - trb->generic.field[3])); + xhci_decode_trb(le32_to_cpu(trb->generic.field[0]), + le32_to_cpu(trb->generic.field[1]), + le32_to_cpu(trb->generic.field[2]), + le32_to_cpu(trb->generic.field[3]))); } } @@ -263,10 +263,10 @@ static int xhci_slot_context_show(struct seq_file *s, void *unused) xhci = hcd_to_xhci(bus_to_hcd(dev->udev->bus)); slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx); seq_printf(s, "%pad: %s\n", &dev->out_ctx->dma, - xhci_decode_slot_context(slot_ctx->dev_info, - slot_ctx->dev_info2, - slot_ctx->tt_info, - slot_ctx->dev_state)); + xhci_decode_slot_context(le32_to_cpu(slot_ctx->dev_info), + le32_to_cpu(slot_ctx->dev_info2), + le32_to_cpu(slot_ctx->tt_info), + le32_to_cpu(slot_ctx->dev_state))); return 0; } @@ -286,10 +286,10 @@ static int xhci_endpoint_context_show(struct seq_file *s, void *unused) ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, dci); dma = dev->out_ctx->dma + dci * CTX_SIZE(xhci->hcc_params); seq_printf(s, "%pad: %s\n", &dma, - xhci_decode_ep_context(ep_ctx->ep_info, - ep_ctx->ep_info2, - ep_ctx->deq, - ep_ctx->tx_info)); + xhci_decode_ep_context(le32_to_cpu(ep_ctx->ep_info), + le32_to_cpu(ep_ctx->ep_info2), + le64_to_cpu(ep_ctx->deq), + le32_to_cpu(ep_ctx->tx_info))); } return 0; diff --git a/drivers/usb/host/xhci-ext-caps.c b/drivers/usb/host/xhci-ext-caps.c index f498160df969..3351d07c431f 100644 --- a/drivers/usb/host/xhci-ext-caps.c +++ b/drivers/usb/host/xhci-ext-caps.c @@ -57,6 +57,7 @@ static int xhci_create_intel_xhci_sw_pdev(struct xhci_hcd *xhci, u32 cap_offset) ret = platform_device_add_properties(pdev, role_switch_props); if (ret) { dev_err(dev, "failed to register device properties\n"); + platform_device_put(pdev); return ret; } } diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 9741cdeea9d7..e7aab31fd9a5 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -3202,10 +3202,10 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len, if (usb_urb_dir_out(urb)) { len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs, seg->bounce_buf, new_buff_len, enqd_len); - if (len != seg->bounce_len) + if (len != new_buff_len) xhci_warn(xhci, "WARN Wrong bounce buffer write length: %zu != %d\n", - len, seg->bounce_len); + len, new_buff_len); seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, max_pkt, DMA_TO_DEVICE); } else { @@ -3330,6 +3330,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, if (xhci_urb_suitable_for_idt(urb)) { memcpy(&send_addr, urb->transfer_buffer, trb_buff_len); + le64_to_cpus(&send_addr); field |= TRB_IDT; } } @@ -3475,6 +3476,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, if (xhci_urb_suitable_for_idt(urb)) { memcpy(&addr, urb->transfer_buffer, urb->transfer_buffer_length); + le64_to_cpus(&addr); field |= TRB_IDT; } else { addr = (u64) urb->transfer_dma; diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 500865975687..6c17e3fe181a 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -1032,7 +1032,7 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) writel(command, &xhci->op_regs->command); xhci->broken_suspend = 0; if (xhci_handshake(&xhci->op_regs->status, - STS_SAVE, 0, 10 * 1000)) { + STS_SAVE, 0, 20 * 1000)) { /* * AMD SNPS xHC 3.0 occasionally does not clear the * SSS bit of USBSTS and when driver tries to poll @@ -1108,6 +1108,18 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) hibernated = true; if (!hibernated) { + /* + * Some controllers might lose power during suspend, so wait + * for controller not ready bit to clear, just as in xHC init. + */ + retval = xhci_handshake(&xhci->op_regs->status, + STS_CNR, 0, 10 * 1000 * 1000); + if (retval) { + xhci_warn(xhci, "Controller not ready at resume %d\n", + retval); + spin_unlock_irq(&xhci->lock); + return retval; + } /* step 1: restore register */ xhci_restore_registers(xhci); /* step 2: initialize command ring buffer */ @@ -3059,6 +3071,48 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index, } } +static void xhci_endpoint_disable(struct usb_hcd *hcd, + struct usb_host_endpoint *host_ep) +{ + struct xhci_hcd *xhci; + struct xhci_virt_device *vdev; + struct xhci_virt_ep *ep; + struct usb_device *udev; + unsigned long flags; + unsigned int ep_index; + + xhci = hcd_to_xhci(hcd); +rescan: + spin_lock_irqsave(&xhci->lock, flags); + + udev = (struct usb_device *)host_ep->hcpriv; + if (!udev || !udev->slot_id) + goto done; + + vdev = xhci->devs[udev->slot_id]; + if (!vdev) + goto done; + + ep_index = xhci_get_endpoint_index(&host_ep->desc); + ep = &vdev->eps[ep_index]; + if (!ep) + goto done; + + /* wait for hub_tt_work to finish clearing hub TT */ + if (ep->ep_state & EP_CLEARING_TT) { + spin_unlock_irqrestore(&xhci->lock, flags); + schedule_timeout_uninterruptible(1); + goto rescan; + } + + if (ep->ep_state) + xhci_dbg(xhci, "endpoint disable with ep_state 0x%x\n", + ep->ep_state); +done: + host_ep->hcpriv = NULL; + spin_unlock_irqrestore(&xhci->lock, flags); +} + /* * Called after usb core issues a clear halt control message. * The host side of the halt should already be cleared by a reset endpoint @@ -3083,6 +3137,7 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd, unsigned int ep_index; unsigned long flags; u32 ep_flag; + int err; xhci = hcd_to_xhci(hcd); if (!host_ep->hcpriv) @@ -3142,7 +3197,17 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd, xhci_free_command(xhci, cfg_cmd); goto cleanup; } - xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id, ep_index, 0); + + err = xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id, + ep_index, 0); + if (err < 0) { + spin_unlock_irqrestore(&xhci->lock, flags); + xhci_free_command(xhci, cfg_cmd); + xhci_dbg(xhci, "%s: Failed to queue stop ep command, %d ", + __func__, err); + goto cleanup; + } + xhci_ring_cmd_db(xhci); spin_unlock_irqrestore(&xhci->lock, flags); @@ -3156,8 +3221,16 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd, ctrl_ctx, ep_flag, ep_flag); xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index); - xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma, + err = xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma, udev->slot_id, false); + if (err < 0) { + spin_unlock_irqrestore(&xhci->lock, flags); + xhci_free_command(xhci, cfg_cmd); + xhci_dbg(xhci, "%s: Failed to queue config ep command, %d ", + __func__, err); + goto cleanup; + } + xhci_ring_cmd_db(xhci); spin_unlock_irqrestore(&xhci->lock, flags); @@ -4674,12 +4747,12 @@ static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci, alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev, desc, state, timeout); - /* If we found we can't enable hub-initiated LPM, or + /* If we found we can't enable hub-initiated LPM, and * the U1 or U2 exit latency was too high to allow - * device-initiated LPM as well, just stop searching. + * device-initiated LPM as well, then we will disable LPM + * for this device, so stop searching any further. */ - if (alt_timeout == USB3_LPM_DISABLED || - alt_timeout == USB3_LPM_DEVICE_INITIATED) { + if (alt_timeout == USB3_LPM_DISABLED) { *timeout = alt_timeout; return -E2BIG; } @@ -4790,10 +4863,12 @@ static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd, if (intf->dev.driver) { driver = to_usb_driver(intf->dev.driver); if (driver && driver->disable_hub_initiated_lpm) { - dev_dbg(&udev->dev, "Hub-initiated %s disabled " - "at request of driver %s\n", - state_name, driver->name); - return xhci_get_timeout_no_hub_lpm(udev, state); + dev_dbg(&udev->dev, "Hub-initiated %s disabled at request of driver %s\n", + state_name, driver->name); + timeout = xhci_get_timeout_no_hub_lpm(udev, + state); + if (timeout == USB3_LPM_DISABLED) + return timeout; } } @@ -5077,11 +5152,18 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) hcd->has_tt = 1; } else { /* - * Some 3.1 hosts return sbrn 0x30, use xhci supported protocol - * minor revision instead of sbrn. Minor revision is a two digit - * BCD containing minor and sub-minor numbers, only show minor. + * Early xHCI 1.1 spec did not mention USB 3.1 capable hosts + * should return 0x31 for sbrn, or that the minor revision + * is a two digit BCD containig minor and sub-minor numbers. + * This was later clarified in xHCI 1.2. + * + * Some USB 3.1 capable hosts therefore have sbrn 0x30, and + * minor revision set to 0x1 instead of 0x10. */ - minor_rev = xhci->usb3_rhub.min_rev / 0x10; + if (xhci->usb3_rhub.min_rev == 0x1) + minor_rev = 1; + else + minor_rev = xhci->usb3_rhub.min_rev / 0x10; switch (minor_rev) { case 2: @@ -5199,11 +5281,12 @@ static void xhci_clear_tt_buffer_complete(struct usb_hcd *hcd, unsigned long flags; xhci = hcd_to_xhci(hcd); + + spin_lock_irqsave(&xhci->lock, flags); udev = (struct usb_device *)ep->hcpriv; slot_id = udev->slot_id; ep_index = xhci_get_endpoint_index(&ep->desc); - spin_lock_irqsave(&xhci->lock, flags); xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_CLEARING_TT; xhci_ring_doorbell_for_active_rings(xhci, slot_id, ep_index); spin_unlock_irqrestore(&xhci->lock, flags); @@ -5240,6 +5323,7 @@ static const struct hc_driver xhci_hc_driver = { .free_streams = xhci_free_streams, .add_endpoint = xhci_add_endpoint, .drop_endpoint = xhci_drop_endpoint, + .endpoint_disable = xhci_endpoint_disable, .endpoint_reset = xhci_endpoint_reset, .check_bandwidth = xhci_check_bandwidth, .reset_bandwidth = xhci_reset_bandwidth, diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c index 0a57c2cc8e5a..7a6b122c833f 100644 --- a/drivers/usb/image/microtek.c +++ b/drivers/usb/image/microtek.c @@ -716,6 +716,10 @@ static int mts_usb_probe(struct usb_interface *intf, } + if (ep_in_current != &ep_in_set[2]) { + MTS_WARNING("couldn't find two input bulk endpoints. Bailing out.\n"); + return -ENODEV; + } if ( ep_out == -1 ) { MTS_WARNING( "couldn't find an output bulk endpoint. Bailing out.\n" ); diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig index bdae62b2ffe0..9bce583aada3 100644 --- a/drivers/usb/misc/Kconfig +++ b/drivers/usb/misc/Kconfig @@ -47,16 +47,6 @@ config USB_SEVSEG To compile this driver as a module, choose M here: the module will be called usbsevseg. -config USB_RIO500 - tristate "USB Diamond Rio500 support" - help - Say Y here if you want to connect a USB Rio500 mp3 player to your - computer's USB port. Please read <file:Documentation/usb/rio.rst> - for more information. - - To compile this driver as a module, choose M here: the - module will be called rio500. - config USB_LEGOTOWER tristate "USB Lego Infrared Tower support" help diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile index 109f54f5b9aa..0d416eb624bb 100644 --- a/drivers/usb/misc/Makefile +++ b/drivers/usb/misc/Makefile @@ -17,7 +17,6 @@ obj-$(CONFIG_USB_ISIGHTFW) += isight_firmware.o obj-$(CONFIG_USB_LCD) += usblcd.o obj-$(CONFIG_USB_LD) += ldusb.o obj-$(CONFIG_USB_LEGOTOWER) += legousbtower.o -obj-$(CONFIG_USB_RIO500) += rio500.o obj-$(CONFIG_USB_TEST) += usbtest.o obj-$(CONFIG_USB_EHSET_TEST_FIXTURE) += ehset.o obj-$(CONFIG_USB_TRANCEVIBRATOR) += trancevibrator.o diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c index 344d523b0502..6f5edb9fc61e 100644 --- a/drivers/usb/misc/adutux.c +++ b/drivers/usb/misc/adutux.c @@ -75,6 +75,7 @@ struct adu_device { char serial_number[8]; int open_count; /* number of times this port has been opened */ + unsigned long disconnected:1; char *read_buffer_primary; int read_buffer_length; @@ -116,7 +117,7 @@ static void adu_abort_transfers(struct adu_device *dev) { unsigned long flags; - if (dev->udev == NULL) + if (dev->disconnected) return; /* shutdown transfer */ @@ -148,6 +149,7 @@ static void adu_delete(struct adu_device *dev) kfree(dev->read_buffer_secondary); kfree(dev->interrupt_in_buffer); kfree(dev->interrupt_out_buffer); + usb_put_dev(dev->udev); kfree(dev); } @@ -243,7 +245,7 @@ static int adu_open(struct inode *inode, struct file *file) } dev = usb_get_intfdata(interface); - if (!dev || !dev->udev) { + if (!dev) { retval = -ENODEV; goto exit_no_device; } @@ -326,7 +328,7 @@ static int adu_release(struct inode *inode, struct file *file) } adu_release_internal(dev); - if (dev->udev == NULL) { + if (dev->disconnected) { /* the device was unplugged before the file was released */ if (!dev->open_count) /* ... and we're the last user */ adu_delete(dev); @@ -354,7 +356,7 @@ static ssize_t adu_read(struct file *file, __user char *buffer, size_t count, return -ERESTARTSYS; /* verify that the device wasn't unplugged */ - if (dev->udev == NULL) { + if (dev->disconnected) { retval = -ENODEV; pr_err("No device or device unplugged %d\n", retval); goto exit; @@ -518,7 +520,7 @@ static ssize_t adu_write(struct file *file, const __user char *buffer, goto exit_nolock; /* verify that the device wasn't unplugged */ - if (dev->udev == NULL) { + if (dev->disconnected) { retval = -ENODEV; pr_err("No device or device unplugged %d\n", retval); goto exit; @@ -663,7 +665,7 @@ static int adu_probe(struct usb_interface *interface, mutex_init(&dev->mtx); spin_lock_init(&dev->buflock); - dev->udev = udev; + dev->udev = usb_get_dev(udev); init_waitqueue_head(&dev->read_wait); init_waitqueue_head(&dev->write_wait); @@ -762,14 +764,18 @@ static void adu_disconnect(struct usb_interface *interface) dev = usb_get_intfdata(interface); - mutex_lock(&dev->mtx); /* not interruptible */ - dev->udev = NULL; /* poison */ usb_deregister_dev(interface, &adu_class); - mutex_unlock(&dev->mtx); + + usb_poison_urb(dev->interrupt_in_urb); + usb_poison_urb(dev->interrupt_out_urb); mutex_lock(&adutux_mutex); usb_set_intfdata(interface, NULL); + mutex_lock(&dev->mtx); /* not interruptible */ + dev->disconnected = 1; + mutex_unlock(&dev->mtx); + /* if the device is not opened, then we clean up right now */ if (!dev->open_count) adu_delete(dev); diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c index cf5828ce927a..34e6cd6f40d3 100644 --- a/drivers/usb/misc/chaoskey.c +++ b/drivers/usb/misc/chaoskey.c @@ -98,6 +98,7 @@ static void chaoskey_free(struct chaoskey *dev) usb_free_urb(dev->urb); kfree(dev->name); kfree(dev->buf); + usb_put_intf(dev->interface); kfree(dev); } } @@ -145,6 +146,8 @@ static int chaoskey_probe(struct usb_interface *interface, if (dev == NULL) goto out; + dev->interface = usb_get_intf(interface); + dev->buf = kmalloc(size, GFP_KERNEL); if (dev->buf == NULL) @@ -174,8 +177,6 @@ static int chaoskey_probe(struct usb_interface *interface, goto out; } - dev->interface = interface; - dev->in_ep = in_ep; if (le16_to_cpu(udev->descriptor.idVendor) != ALEA_VENDOR_ID) diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c index f5bed9f29e56..dce44fbf031f 100644 --- a/drivers/usb/misc/iowarrior.c +++ b/drivers/usb/misc/iowarrior.c @@ -54,11 +54,7 @@ MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); -/* Module parameters */ -static DEFINE_MUTEX(iowarrior_mutex); - static struct usb_driver iowarrior_driver; -static DEFINE_MUTEX(iowarrior_open_disc_lock); /*--------------*/ /* data */ @@ -87,6 +83,7 @@ struct iowarrior { char chip_serial[9]; /* the serial number string of the chip connected */ int report_size; /* number of bytes in a report */ u16 product_id; + struct usb_anchor submitted; }; /*--------------*/ @@ -243,6 +240,7 @@ static inline void iowarrior_delete(struct iowarrior *dev) kfree(dev->int_in_buffer); usb_free_urb(dev->int_in_urb); kfree(dev->read_queue); + usb_put_intf(dev->interface); kfree(dev); } @@ -424,11 +422,13 @@ static ssize_t iowarrior_write(struct file *file, retval = -EFAULT; goto error; } + usb_anchor_urb(int_out_urb, &dev->submitted); retval = usb_submit_urb(int_out_urb, GFP_KERNEL); if (retval) { dev_dbg(&dev->interface->dev, "submit error %d for urb nr.%d\n", retval, atomic_read(&dev->write_busy)); + usb_unanchor_urb(int_out_urb); goto error; } /* submit was ok */ @@ -477,8 +477,6 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd, if (!buffer) return -ENOMEM; - /* lock this object */ - mutex_lock(&iowarrior_mutex); mutex_lock(&dev->mutex); /* verify that the device wasn't unplugged */ @@ -571,7 +569,6 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd, error_out: /* unlock the device */ mutex_unlock(&dev->mutex); - mutex_unlock(&iowarrior_mutex); kfree(buffer); return retval; } @@ -586,27 +583,20 @@ static int iowarrior_open(struct inode *inode, struct file *file) int subminor; int retval = 0; - mutex_lock(&iowarrior_mutex); subminor = iminor(inode); interface = usb_find_interface(&iowarrior_driver, subminor); if (!interface) { - mutex_unlock(&iowarrior_mutex); - printk(KERN_ERR "%s - error, can't find device for minor %d\n", + pr_err("%s - error, can't find device for minor %d\n", __func__, subminor); return -ENODEV; } - mutex_lock(&iowarrior_open_disc_lock); dev = usb_get_intfdata(interface); - if (!dev) { - mutex_unlock(&iowarrior_open_disc_lock); - mutex_unlock(&iowarrior_mutex); + if (!dev) return -ENODEV; - } mutex_lock(&dev->mutex); - mutex_unlock(&iowarrior_open_disc_lock); /* Only one process can open each device, no sharing. */ if (dev->opened) { @@ -628,7 +618,6 @@ static int iowarrior_open(struct inode *inode, struct file *file) out: mutex_unlock(&dev->mutex); - mutex_unlock(&iowarrior_mutex); return retval; } @@ -764,11 +753,13 @@ static int iowarrior_probe(struct usb_interface *interface, init_waitqueue_head(&dev->write_wait); dev->udev = udev; - dev->interface = interface; + dev->interface = usb_get_intf(interface); iface_desc = interface->cur_altsetting; dev->product_id = le16_to_cpu(udev->descriptor.idProduct); + init_usb_anchor(&dev->submitted); + res = usb_find_last_int_in_endpoint(iface_desc, &dev->int_in_endpoint); if (res) { dev_err(&interface->dev, "no interrupt-in endpoint found\n"); @@ -836,7 +827,6 @@ static int iowarrior_probe(struct usb_interface *interface, if (retval) { /* something prevented us from registering this driver */ dev_err(&interface->dev, "Not able to get a minor for this device.\n"); - usb_set_intfdata(interface, NULL); goto error; } @@ -860,26 +850,15 @@ error: */ static void iowarrior_disconnect(struct usb_interface *interface) { - struct iowarrior *dev; - int minor; - - dev = usb_get_intfdata(interface); - mutex_lock(&iowarrior_open_disc_lock); - usb_set_intfdata(interface, NULL); - /* prevent device read, write and ioctl */ - dev->present = 0; - - minor = dev->minor; - mutex_unlock(&iowarrior_open_disc_lock); - /* give back our minor - this will call close() locks need to be dropped at this point*/ + struct iowarrior *dev = usb_get_intfdata(interface); + int minor = dev->minor; usb_deregister_dev(interface, &iowarrior_class); mutex_lock(&dev->mutex); /* prevent device read, write and ioctl */ - - mutex_unlock(&dev->mutex); + dev->present = 0; if (dev->opened) { /* There is a process that holds a filedescriptor to the device , @@ -887,10 +866,13 @@ static void iowarrior_disconnect(struct usb_interface *interface) Deleting the device is postponed until close() was called. */ usb_kill_urb(dev->int_in_urb); + usb_kill_anchored_urbs(&dev->submitted); wake_up_interruptible(&dev->read_wait); wake_up_interruptible(&dev->write_wait); + mutex_unlock(&dev->mutex); } else { /* no process is using the device, cleanup now */ + mutex_unlock(&dev->mutex); iowarrior_delete(dev); } diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c index 6581774bdfa4..8f86b4ebca89 100644 --- a/drivers/usb/misc/ldusb.c +++ b/drivers/usb/misc/ldusb.c @@ -153,6 +153,7 @@ MODULE_PARM_DESC(min_interrupt_out_interval, "Minimum interrupt out interval in struct ld_usb { struct mutex mutex; /* locks this structure */ struct usb_interface *intf; /* save off the usb interface pointer */ + unsigned long disconnected:1; int open_count; /* number of times this port has been opened */ @@ -192,12 +193,10 @@ static void ld_usb_abort_transfers(struct ld_usb *dev) /* shutdown transfer */ if (dev->interrupt_in_running) { dev->interrupt_in_running = 0; - if (dev->intf) - usb_kill_urb(dev->interrupt_in_urb); + usb_kill_urb(dev->interrupt_in_urb); } if (dev->interrupt_out_busy) - if (dev->intf) - usb_kill_urb(dev->interrupt_out_urb); + usb_kill_urb(dev->interrupt_out_urb); } /** @@ -205,8 +204,6 @@ static void ld_usb_abort_transfers(struct ld_usb *dev) */ static void ld_usb_delete(struct ld_usb *dev) { - ld_usb_abort_transfers(dev); - /* free data structures */ usb_free_urb(dev->interrupt_in_urb); usb_free_urb(dev->interrupt_out_urb); @@ -263,7 +260,7 @@ static void ld_usb_interrupt_in_callback(struct urb *urb) resubmit: /* resubmit if we're still running */ - if (dev->interrupt_in_running && !dev->buffer_overflow && dev->intf) { + if (dev->interrupt_in_running && !dev->buffer_overflow) { retval = usb_submit_urb(dev->interrupt_in_urb, GFP_ATOMIC); if (retval) { dev_err(&dev->intf->dev, @@ -383,16 +380,13 @@ static int ld_usb_release(struct inode *inode, struct file *file) goto exit; } - if (mutex_lock_interruptible(&dev->mutex)) { - retval = -ERESTARTSYS; - goto exit; - } + mutex_lock(&dev->mutex); if (dev->open_count != 1) { retval = -ENODEV; goto unlock_exit; } - if (dev->intf == NULL) { + if (dev->disconnected) { /* the device was unplugged before the file was released */ mutex_unlock(&dev->mutex); /* unlock here as ld_usb_delete frees dev */ @@ -423,7 +417,7 @@ static __poll_t ld_usb_poll(struct file *file, poll_table *wait) dev = file->private_data; - if (!dev->intf) + if (dev->disconnected) return EPOLLERR | EPOLLHUP; poll_wait(file, &dev->read_wait, wait); @@ -462,7 +456,7 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count, } /* verify that the device wasn't unplugged */ - if (dev->intf == NULL) { + if (dev->disconnected) { retval = -ENODEV; printk(KERN_ERR "ldusb: No device or device unplugged %d\n", retval); goto unlock_exit; @@ -470,7 +464,7 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count, /* wait for data */ spin_lock_irq(&dev->rbsl); - if (dev->ring_head == dev->ring_tail) { + while (dev->ring_head == dev->ring_tail) { dev->interrupt_in_done = 0; spin_unlock_irq(&dev->rbsl); if (file->f_flags & O_NONBLOCK) { @@ -480,15 +474,20 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count, retval = wait_event_interruptible(dev->read_wait, dev->interrupt_in_done); if (retval < 0) goto unlock_exit; - } else { - spin_unlock_irq(&dev->rbsl); + + spin_lock_irq(&dev->rbsl); } + spin_unlock_irq(&dev->rbsl); /* actual_buffer contains actual_length + interrupt_in_buffer */ actual_buffer = (size_t *)(dev->ring_buffer + dev->ring_tail * (sizeof(size_t)+dev->interrupt_in_endpoint_size)); + if (*actual_buffer > dev->interrupt_in_endpoint_size) { + retval = -EIO; + goto unlock_exit; + } bytes_to_read = min(count, *actual_buffer); if (bytes_to_read < *actual_buffer) - dev_warn(&dev->intf->dev, "Read buffer overflow, %zd bytes dropped\n", + dev_warn(&dev->intf->dev, "Read buffer overflow, %zu bytes dropped\n", *actual_buffer-bytes_to_read); /* copy one interrupt_in_buffer from ring_buffer into userspace */ @@ -496,11 +495,11 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count, retval = -EFAULT; goto unlock_exit; } - dev->ring_tail = (dev->ring_tail+1) % ring_buffer_size; - retval = bytes_to_read; spin_lock_irq(&dev->rbsl); + dev->ring_tail = (dev->ring_tail + 1) % ring_buffer_size; + if (dev->buffer_overflow) { dev->buffer_overflow = 0; spin_unlock_irq(&dev->rbsl); @@ -542,7 +541,7 @@ static ssize_t ld_usb_write(struct file *file, const char __user *buffer, } /* verify that the device wasn't unplugged */ - if (dev->intf == NULL) { + if (dev->disconnected) { retval = -ENODEV; printk(KERN_ERR "ldusb: No device or device unplugged %d\n", retval); goto unlock_exit; @@ -563,8 +562,9 @@ static ssize_t ld_usb_write(struct file *file, const char __user *buffer, /* write the data into interrupt_out_buffer from userspace */ bytes_to_write = min(count, write_buffer_size*dev->interrupt_out_endpoint_size); if (bytes_to_write < count) - dev_warn(&dev->intf->dev, "Write buffer overflow, %zd bytes dropped\n", count-bytes_to_write); - dev_dbg(&dev->intf->dev, "%s: count = %zd, bytes_to_write = %zd\n", + dev_warn(&dev->intf->dev, "Write buffer overflow, %zu bytes dropped\n", + count - bytes_to_write); + dev_dbg(&dev->intf->dev, "%s: count = %zu, bytes_to_write = %zu\n", __func__, count, bytes_to_write); if (copy_from_user(dev->interrupt_out_buffer, buffer, bytes_to_write)) { @@ -581,7 +581,7 @@ static ssize_t ld_usb_write(struct file *file, const char __user *buffer, 1 << 8, 0, dev->interrupt_out_buffer, bytes_to_write, - USB_CTRL_SET_TIMEOUT * HZ); + USB_CTRL_SET_TIMEOUT); if (retval < 0) dev_err(&dev->intf->dev, "Couldn't submit HID_REQ_SET_REPORT %d\n", @@ -696,10 +696,9 @@ static int ld_usb_probe(struct usb_interface *intf, const struct usb_device_id * dev_warn(&intf->dev, "Interrupt out endpoint not found (using control endpoint instead)\n"); dev->interrupt_in_endpoint_size = usb_endpoint_maxp(dev->interrupt_in_endpoint); - dev->ring_buffer = - kmalloc_array(ring_buffer_size, - sizeof(size_t) + dev->interrupt_in_endpoint_size, - GFP_KERNEL); + dev->ring_buffer = kcalloc(ring_buffer_size, + sizeof(size_t) + dev->interrupt_in_endpoint_size, + GFP_KERNEL); if (!dev->ring_buffer) goto error; dev->interrupt_in_buffer = kmalloc(dev->interrupt_in_endpoint_size, GFP_KERNEL); @@ -764,6 +763,9 @@ static void ld_usb_disconnect(struct usb_interface *intf) /* give back our minor */ usb_deregister_dev(intf, &ld_usb_class); + usb_poison_urb(dev->interrupt_in_urb); + usb_poison_urb(dev->interrupt_out_urb); + mutex_lock(&dev->mutex); /* if the device is not opened, then we clean up right now */ @@ -771,7 +773,7 @@ static void ld_usb_disconnect(struct usb_interface *intf) mutex_unlock(&dev->mutex); ld_usb_delete(dev); } else { - dev->intf = NULL; + dev->disconnected = 1; /* wake up pollers */ wake_up_interruptible_all(&dev->read_wait); wake_up_interruptible_all(&dev->write_wait); diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c index 006cf13b2199..23061f1526b4 100644 --- a/drivers/usb/misc/legousbtower.c +++ b/drivers/usb/misc/legousbtower.c @@ -179,7 +179,6 @@ static const struct usb_device_id tower_table[] = { }; MODULE_DEVICE_TABLE (usb, tower_table); -static DEFINE_MUTEX(open_disc_mutex); #define LEGO_USB_TOWER_MINOR_BASE 160 @@ -191,6 +190,7 @@ struct lego_usb_tower { unsigned char minor; /* the starting minor number for this device */ int open_count; /* number of times this port has been opened */ + unsigned long disconnected:1; char* read_buffer; size_t read_buffer_length; /* this much came in */ @@ -290,14 +290,13 @@ static inline void lego_usb_tower_debug_data(struct device *dev, */ static inline void tower_delete (struct lego_usb_tower *dev) { - tower_abort_transfers (dev); - /* free data structures */ usb_free_urb(dev->interrupt_in_urb); usb_free_urb(dev->interrupt_out_urb); kfree (dev->read_buffer); kfree (dev->interrupt_in_buffer); kfree (dev->interrupt_out_buffer); + usb_put_dev(dev->udev); kfree (dev); } @@ -332,18 +331,14 @@ static int tower_open (struct inode *inode, struct file *file) goto exit; } - mutex_lock(&open_disc_mutex); dev = usb_get_intfdata(interface); - if (!dev) { - mutex_unlock(&open_disc_mutex); retval = -ENODEV; goto exit; } /* lock this device */ if (mutex_lock_interruptible(&dev->lock)) { - mutex_unlock(&open_disc_mutex); retval = -ERESTARTSYS; goto exit; } @@ -351,12 +346,9 @@ static int tower_open (struct inode *inode, struct file *file) /* allow opening only once */ if (dev->open_count) { - mutex_unlock(&open_disc_mutex); retval = -EBUSY; goto unlock_exit; } - dev->open_count = 1; - mutex_unlock(&open_disc_mutex); /* reset the tower */ result = usb_control_msg (dev->udev, @@ -396,13 +388,14 @@ static int tower_open (struct inode *inode, struct file *file) dev_err(&dev->udev->dev, "Couldn't submit interrupt_in_urb %d\n", retval); dev->interrupt_in_running = 0; - dev->open_count = 0; goto unlock_exit; } /* save device in the file's private structure */ file->private_data = dev; + dev->open_count = 1; + unlock_exit: mutex_unlock(&dev->lock); @@ -423,22 +416,19 @@ static int tower_release (struct inode *inode, struct file *file) if (dev == NULL) { retval = -ENODEV; - goto exit_nolock; - } - - mutex_lock(&open_disc_mutex); - if (mutex_lock_interruptible(&dev->lock)) { - retval = -ERESTARTSYS; goto exit; } + mutex_lock(&dev->lock); + if (dev->open_count != 1) { dev_dbg(&dev->udev->dev, "%s: device not opened exactly once\n", __func__); retval = -ENODEV; goto unlock_exit; } - if (dev->udev == NULL) { + + if (dev->disconnected) { /* the device was unplugged before the file was released */ /* unlock here as tower_delete frees dev */ @@ -456,10 +446,7 @@ static int tower_release (struct inode *inode, struct file *file) unlock_exit: mutex_unlock(&dev->lock); - exit: - mutex_unlock(&open_disc_mutex); -exit_nolock: return retval; } @@ -477,10 +464,9 @@ static void tower_abort_transfers (struct lego_usb_tower *dev) if (dev->interrupt_in_running) { dev->interrupt_in_running = 0; mb(); - if (dev->udev) - usb_kill_urb (dev->interrupt_in_urb); + usb_kill_urb(dev->interrupt_in_urb); } - if (dev->interrupt_out_busy && dev->udev) + if (dev->interrupt_out_busy) usb_kill_urb(dev->interrupt_out_urb); } @@ -516,7 +502,7 @@ static __poll_t tower_poll (struct file *file, poll_table *wait) dev = file->private_data; - if (!dev->udev) + if (dev->disconnected) return EPOLLERR | EPOLLHUP; poll_wait(file, &dev->read_wait, wait); @@ -563,7 +549,7 @@ static ssize_t tower_read (struct file *file, char __user *buffer, size_t count, } /* verify that the device wasn't unplugged */ - if (dev->udev == NULL) { + if (dev->disconnected) { retval = -ENODEV; pr_err("No device or device unplugged %d\n", retval); goto unlock_exit; @@ -649,7 +635,7 @@ static ssize_t tower_write (struct file *file, const char __user *buffer, size_t } /* verify that the device wasn't unplugged */ - if (dev->udev == NULL) { + if (dev->disconnected) { retval = -ENODEV; pr_err("No device or device unplugged %d\n", retval); goto unlock_exit; @@ -759,7 +745,7 @@ static void tower_interrupt_in_callback (struct urb *urb) resubmit: /* resubmit if we're still running */ - if (dev->interrupt_in_running && dev->udev) { + if (dev->interrupt_in_running) { retval = usb_submit_urb (dev->interrupt_in_urb, GFP_ATOMIC); if (retval) dev_err(&dev->udev->dev, @@ -822,8 +808,9 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device mutex_init(&dev->lock); - dev->udev = udev; + dev->udev = usb_get_dev(udev); dev->open_count = 0; + dev->disconnected = 0; dev->read_buffer = NULL; dev->read_buffer_length = 0; @@ -891,8 +878,10 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device get_version_reply, sizeof(*get_version_reply), 1000); - if (result < 0) { - dev_err(idev, "LEGO USB Tower get version control request failed\n"); + if (result != sizeof(*get_version_reply)) { + if (result >= 0) + result = -EIO; + dev_err(idev, "get version request failed: %d\n", result); retval = result; goto error; } @@ -910,7 +899,6 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device if (retval) { /* something prevented us from registering this driver */ dev_err(idev, "Not able to get a minor for this device.\n"); - usb_set_intfdata (interface, NULL); goto error; } dev->minor = interface->minor; @@ -942,23 +930,24 @@ static void tower_disconnect (struct usb_interface *interface) int minor; dev = usb_get_intfdata (interface); - mutex_lock(&open_disc_mutex); - usb_set_intfdata (interface, NULL); minor = dev->minor; - /* give back our minor */ + /* give back our minor and prevent further open() */ usb_deregister_dev (interface, &tower_class); + /* stop I/O */ + usb_poison_urb(dev->interrupt_in_urb); + usb_poison_urb(dev->interrupt_out_urb); + mutex_lock(&dev->lock); - mutex_unlock(&open_disc_mutex); /* if the device is not opened, then we clean up right now */ if (!dev->open_count) { mutex_unlock(&dev->lock); tower_delete (dev); } else { - dev->udev = NULL; + dev->disconnected = 1; /* wake up pollers */ wake_up_interruptible_all(&dev->read_wait); wake_up_interruptible_all(&dev->write_wait); diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c deleted file mode 100644 index 30cae5e1954d..000000000000 --- a/drivers/usb/misc/rio500.c +++ /dev/null @@ -1,554 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* -*- linux-c -*- */ - -/* - * Driver for USB Rio 500 - * - * Cesar Miquel (miquel@df.uba.ar) - * - * based on hp_scanner.c by David E. Nelson (dnelson@jump.net) - * - * Based upon mouse.c (Brad Keryan) and printer.c (Michael Gee). - * - * Changelog: - * 30/05/2003 replaced lock/unlock kernel with up/down - * Daniele Bellucci bellucda@tiscali.it - * */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/signal.h> -#include <linux/sched/signal.h> -#include <linux/mutex.h> -#include <linux/errno.h> -#include <linux/random.h> -#include <linux/poll.h> -#include <linux/slab.h> -#include <linux/spinlock.h> -#include <linux/usb.h> -#include <linux/wait.h> - -#include "rio500_usb.h" - -#define DRIVER_AUTHOR "Cesar Miquel <miquel@df.uba.ar>" -#define DRIVER_DESC "USB Rio 500 driver" - -#define RIO_MINOR 64 - -/* stall/wait timeout for rio */ -#define NAK_TIMEOUT (HZ) - -#define IBUF_SIZE 0x1000 - -/* Size of the rio buffer */ -#define OBUF_SIZE 0x10000 - -struct rio_usb_data { - struct usb_device *rio_dev; /* init: probe_rio */ - unsigned int ifnum; /* Interface number of the USB device */ - int isopen; /* nz if open */ - int present; /* Device is present on the bus */ - char *obuf, *ibuf; /* transfer buffers */ - char bulk_in_ep, bulk_out_ep; /* Endpoint assignments */ - wait_queue_head_t wait_q; /* for timeouts */ -}; - -static DEFINE_MUTEX(rio500_mutex); -static struct rio_usb_data rio_instance; - -static int open_rio(struct inode *inode, struct file *file) -{ - struct rio_usb_data *rio = &rio_instance; - - /* against disconnect() */ - mutex_lock(&rio500_mutex); - - if (rio->isopen || !rio->present) { - mutex_unlock(&rio500_mutex); - return -EBUSY; - } - rio->isopen = 1; - - init_waitqueue_head(&rio->wait_q); - - - dev_info(&rio->rio_dev->dev, "Rio opened.\n"); - mutex_unlock(&rio500_mutex); - - return 0; -} - -static int close_rio(struct inode *inode, struct file *file) -{ - struct rio_usb_data *rio = &rio_instance; - - /* against disconnect() */ - mutex_lock(&rio500_mutex); - - rio->isopen = 0; - if (!rio->present) { - /* cleanup has been delayed */ - kfree(rio->ibuf); - kfree(rio->obuf); - rio->ibuf = NULL; - rio->obuf = NULL; - } else { - dev_info(&rio->rio_dev->dev, "Rio closed.\n"); - } - mutex_unlock(&rio500_mutex); - return 0; -} - -static long ioctl_rio(struct file *file, unsigned int cmd, unsigned long arg) -{ - struct RioCommand rio_cmd; - struct rio_usb_data *rio = &rio_instance; - void __user *data; - unsigned char *buffer; - int result, requesttype; - int retries; - int retval=0; - - mutex_lock(&rio500_mutex); - /* Sanity check to make sure rio is connected, powered, etc */ - if (rio->present == 0 || rio->rio_dev == NULL) { - retval = -ENODEV; - goto err_out; - } - - switch (cmd) { - case RIO_RECV_COMMAND: - data = (void __user *) arg; - if (data == NULL) - break; - if (copy_from_user(&rio_cmd, data, sizeof(struct RioCommand))) { - retval = -EFAULT; - goto err_out; - } - if (rio_cmd.length < 0 || rio_cmd.length > PAGE_SIZE) { - retval = -EINVAL; - goto err_out; - } - buffer = (unsigned char *) __get_free_page(GFP_KERNEL); - if (buffer == NULL) { - retval = -ENOMEM; - goto err_out; - } - if (copy_from_user(buffer, rio_cmd.buffer, rio_cmd.length)) { - retval = -EFAULT; - free_page((unsigned long) buffer); - goto err_out; - } - - requesttype = rio_cmd.requesttype | USB_DIR_IN | - USB_TYPE_VENDOR | USB_RECIP_DEVICE; - dev_dbg(&rio->rio_dev->dev, - "sending command:reqtype=%0x req=%0x value=%0x index=%0x len=%0x\n", - requesttype, rio_cmd.request, rio_cmd.value, - rio_cmd.index, rio_cmd.length); - /* Send rio control message */ - retries = 3; - while (retries) { - result = usb_control_msg(rio->rio_dev, - usb_rcvctrlpipe(rio-> rio_dev, 0), - rio_cmd.request, - requesttype, - rio_cmd.value, - rio_cmd.index, buffer, - rio_cmd.length, - jiffies_to_msecs(rio_cmd.timeout)); - if (result == -ETIMEDOUT) - retries--; - else if (result < 0) { - dev_err(&rio->rio_dev->dev, - "Error executing ioctrl. code = %d\n", - result); - retries = 0; - } else { - dev_dbg(&rio->rio_dev->dev, - "Executed ioctl. Result = %d (data=%02x)\n", - result, buffer[0]); - if (copy_to_user(rio_cmd.buffer, buffer, - rio_cmd.length)) { - free_page((unsigned long) buffer); - retval = -EFAULT; - goto err_out; - } - retries = 0; - } - - /* rio_cmd.buffer contains a raw stream of single byte - data which has been returned from rio. Data is - interpreted at application level. For data that - will be cast to data types longer than 1 byte, data - will be little_endian and will potentially need to - be swapped at the app level */ - - } - free_page((unsigned long) buffer); - break; - - case RIO_SEND_COMMAND: - data = (void __user *) arg; - if (data == NULL) - break; - if (copy_from_user(&rio_cmd, data, sizeof(struct RioCommand))) { - retval = -EFAULT; - goto err_out; - } - if (rio_cmd.length < 0 || rio_cmd.length > PAGE_SIZE) { - retval = -EINVAL; - goto err_out; - } - buffer = (unsigned char *) __get_free_page(GFP_KERNEL); - if (buffer == NULL) { - retval = -ENOMEM; - goto err_out; - } - if (copy_from_user(buffer, rio_cmd.buffer, rio_cmd.length)) { - free_page((unsigned long)buffer); - retval = -EFAULT; - goto err_out; - } - - requesttype = rio_cmd.requesttype | USB_DIR_OUT | - USB_TYPE_VENDOR | USB_RECIP_DEVICE; - dev_dbg(&rio->rio_dev->dev, - "sending command: reqtype=%0x req=%0x value=%0x index=%0x len=%0x\n", - requesttype, rio_cmd.request, rio_cmd.value, - rio_cmd.index, rio_cmd.length); - /* Send rio control message */ - retries = 3; - while (retries) { - result = usb_control_msg(rio->rio_dev, - usb_sndctrlpipe(rio-> rio_dev, 0), - rio_cmd.request, - requesttype, - rio_cmd.value, - rio_cmd.index, buffer, - rio_cmd.length, - jiffies_to_msecs(rio_cmd.timeout)); - if (result == -ETIMEDOUT) - retries--; - else if (result < 0) { - dev_err(&rio->rio_dev->dev, - "Error executing ioctrl. code = %d\n", - result); - retries = 0; - } else { - dev_dbg(&rio->rio_dev->dev, - "Executed ioctl. Result = %d\n", result); - retries = 0; - - } - - } - free_page((unsigned long) buffer); - break; - - default: - retval = -ENOTTY; - break; - } - - -err_out: - mutex_unlock(&rio500_mutex); - return retval; -} - -static ssize_t -write_rio(struct file *file, const char __user *buffer, - size_t count, loff_t * ppos) -{ - DEFINE_WAIT(wait); - struct rio_usb_data *rio = &rio_instance; - - unsigned long copy_size; - unsigned long bytes_written = 0; - unsigned int partial; - - int result = 0; - int maxretry; - int errn = 0; - int intr; - - intr = mutex_lock_interruptible(&rio500_mutex); - if (intr) - return -EINTR; - /* Sanity check to make sure rio is connected, powered, etc */ - if (rio->present == 0 || rio->rio_dev == NULL) { - mutex_unlock(&rio500_mutex); - return -ENODEV; - } - - - - do { - unsigned long thistime; - char *obuf = rio->obuf; - - thistime = copy_size = - (count >= OBUF_SIZE) ? OBUF_SIZE : count; - if (copy_from_user(rio->obuf, buffer, copy_size)) { - errn = -EFAULT; - goto error; - } - maxretry = 5; - while (thistime) { - if (!rio->rio_dev) { - errn = -ENODEV; - goto error; - } - if (signal_pending(current)) { - mutex_unlock(&rio500_mutex); - return bytes_written ? bytes_written : -EINTR; - } - - result = usb_bulk_msg(rio->rio_dev, - usb_sndbulkpipe(rio->rio_dev, 2), - obuf, thistime, &partial, 5000); - - dev_dbg(&rio->rio_dev->dev, - "write stats: result:%d thistime:%lu partial:%u\n", - result, thistime, partial); - - if (result == -ETIMEDOUT) { /* NAK - so hold for a while */ - if (!maxretry--) { - errn = -ETIME; - goto error; - } - prepare_to_wait(&rio->wait_q, &wait, TASK_INTERRUPTIBLE); - schedule_timeout(NAK_TIMEOUT); - finish_wait(&rio->wait_q, &wait); - continue; - } else if (!result && partial) { - obuf += partial; - thistime -= partial; - } else - break; - } - if (result) { - dev_err(&rio->rio_dev->dev, "Write Whoops - %x\n", - result); - errn = -EIO; - goto error; - } - bytes_written += copy_size; - count -= copy_size; - buffer += copy_size; - } while (count > 0); - - mutex_unlock(&rio500_mutex); - - return bytes_written ? bytes_written : -EIO; - -error: - mutex_unlock(&rio500_mutex); - return errn; -} - -static ssize_t -read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos) -{ - DEFINE_WAIT(wait); - struct rio_usb_data *rio = &rio_instance; - ssize_t read_count; - unsigned int partial; - int this_read; - int result; - int maxretry = 10; - char *ibuf; - int intr; - - intr = mutex_lock_interruptible(&rio500_mutex); - if (intr) - return -EINTR; - /* Sanity check to make sure rio is connected, powered, etc */ - if (rio->present == 0 || rio->rio_dev == NULL) { - mutex_unlock(&rio500_mutex); - return -ENODEV; - } - - ibuf = rio->ibuf; - - read_count = 0; - - - while (count > 0) { - if (signal_pending(current)) { - mutex_unlock(&rio500_mutex); - return read_count ? read_count : -EINTR; - } - if (!rio->rio_dev) { - mutex_unlock(&rio500_mutex); - return -ENODEV; - } - this_read = (count >= IBUF_SIZE) ? IBUF_SIZE : count; - - result = usb_bulk_msg(rio->rio_dev, - usb_rcvbulkpipe(rio->rio_dev, 1), - ibuf, this_read, &partial, - 8000); - - dev_dbg(&rio->rio_dev->dev, - "read stats: result:%d this_read:%u partial:%u\n", - result, this_read, partial); - - if (partial) { - count = this_read = partial; - } else if (result == -ETIMEDOUT || result == 15) { /* FIXME: 15 ??? */ - if (!maxretry--) { - mutex_unlock(&rio500_mutex); - dev_err(&rio->rio_dev->dev, - "read_rio: maxretry timeout\n"); - return -ETIME; - } - prepare_to_wait(&rio->wait_q, &wait, TASK_INTERRUPTIBLE); - schedule_timeout(NAK_TIMEOUT); - finish_wait(&rio->wait_q, &wait); - continue; - } else if (result != -EREMOTEIO) { - mutex_unlock(&rio500_mutex); - dev_err(&rio->rio_dev->dev, - "Read Whoops - result:%d partial:%u this_read:%u\n", - result, partial, this_read); - return -EIO; - } else { - mutex_unlock(&rio500_mutex); - return (0); - } - - if (this_read) { - if (copy_to_user(buffer, ibuf, this_read)) { - mutex_unlock(&rio500_mutex); - return -EFAULT; - } - count -= this_read; - read_count += this_read; - buffer += this_read; - } - } - mutex_unlock(&rio500_mutex); - return read_count; -} - -static const struct file_operations usb_rio_fops = { - .owner = THIS_MODULE, - .read = read_rio, - .write = write_rio, - .unlocked_ioctl = ioctl_rio, - .open = open_rio, - .release = close_rio, - .llseek = noop_llseek, -}; - -static struct usb_class_driver usb_rio_class = { - .name = "rio500%d", - .fops = &usb_rio_fops, - .minor_base = RIO_MINOR, -}; - -static int probe_rio(struct usb_interface *intf, - const struct usb_device_id *id) -{ - struct usb_device *dev = interface_to_usbdev(intf); - struct rio_usb_data *rio = &rio_instance; - int retval = -ENOMEM; - char *ibuf, *obuf; - - if (rio->present) { - dev_info(&intf->dev, "Second USB Rio at address %d refused\n", dev->devnum); - return -EBUSY; - } - dev_info(&intf->dev, "USB Rio found at address %d\n", dev->devnum); - - obuf = kmalloc(OBUF_SIZE, GFP_KERNEL); - if (!obuf) { - dev_err(&dev->dev, - "probe_rio: Not enough memory for the output buffer\n"); - goto err_obuf; - } - dev_dbg(&intf->dev, "obuf address: %p\n", obuf); - - ibuf = kmalloc(IBUF_SIZE, GFP_KERNEL); - if (!ibuf) { - dev_err(&dev->dev, - "probe_rio: Not enough memory for the input buffer\n"); - goto err_ibuf; - } - dev_dbg(&intf->dev, "ibuf address: %p\n", ibuf); - - mutex_lock(&rio500_mutex); - rio->rio_dev = dev; - rio->ibuf = ibuf; - rio->obuf = obuf; - rio->present = 1; - mutex_unlock(&rio500_mutex); - - retval = usb_register_dev(intf, &usb_rio_class); - if (retval) { - dev_err(&dev->dev, - "Not able to get a minor for this device.\n"); - goto err_register; - } - - usb_set_intfdata(intf, rio); - return retval; - - err_register: - mutex_lock(&rio500_mutex); - rio->present = 0; - mutex_unlock(&rio500_mutex); - err_ibuf: - kfree(obuf); - err_obuf: - return retval; -} - -static void disconnect_rio(struct usb_interface *intf) -{ - struct rio_usb_data *rio = usb_get_intfdata (intf); - - usb_set_intfdata (intf, NULL); - if (rio) { - usb_deregister_dev(intf, &usb_rio_class); - - mutex_lock(&rio500_mutex); - if (rio->isopen) { - rio->isopen = 0; - /* better let it finish - the release will do whats needed */ - rio->rio_dev = NULL; - mutex_unlock(&rio500_mutex); - return; - } - kfree(rio->ibuf); - kfree(rio->obuf); - - dev_info(&intf->dev, "USB Rio disconnected.\n"); - - rio->present = 0; - mutex_unlock(&rio500_mutex); - } -} - -static const struct usb_device_id rio_table[] = { - { USB_DEVICE(0x0841, 1) }, /* Rio 500 */ - { } /* Terminating entry */ -}; - -MODULE_DEVICE_TABLE (usb, rio_table); - -static struct usb_driver rio_driver = { - .name = "rio500", - .probe = probe_rio, - .disconnect = disconnect_rio, - .id_table = rio_table, -}; - -module_usb_driver(rio_driver); - -MODULE_AUTHOR( DRIVER_AUTHOR ); -MODULE_DESCRIPTION( DRIVER_DESC ); -MODULE_LICENSE("GPL"); - diff --git a/drivers/usb/misc/rio500_usb.h b/drivers/usb/misc/rio500_usb.h deleted file mode 100644 index 6db7a5863496..000000000000 --- a/drivers/usb/misc/rio500_usb.h +++ /dev/null @@ -1,20 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* ---------------------------------------------------------------------- - Copyright (C) 2000 Cesar Miquel (miquel@df.uba.ar) - ---------------------------------------------------------------------- */ - -#define RIO_SEND_COMMAND 0x1 -#define RIO_RECV_COMMAND 0x2 - -#define RIO_DIR_OUT 0x0 -#define RIO_DIR_IN 0x1 - -struct RioCommand { - short length; - int request; - int requesttype; - int value; - int index; - void __user *buffer; - int timeout; -}; diff --git a/drivers/usb/misc/usblcd.c b/drivers/usb/misc/usblcd.c index 9ba4a4e68d91..61e9e987fe4a 100644 --- a/drivers/usb/misc/usblcd.c +++ b/drivers/usb/misc/usblcd.c @@ -18,6 +18,7 @@ #include <linux/slab.h> #include <linux/errno.h> #include <linux/mutex.h> +#include <linux/rwsem.h> #include <linux/uaccess.h> #include <linux/usb.h> @@ -29,16 +30,12 @@ #define IOCTL_GET_DRV_VERSION 2 -static DEFINE_MUTEX(lcd_mutex); static const struct usb_device_id id_table[] = { { .idVendor = 0x10D2, .match_flags = USB_DEVICE_ID_MATCH_VENDOR, }, { }, }; MODULE_DEVICE_TABLE(usb, id_table); -static DEFINE_MUTEX(open_disc_mutex); - - struct usb_lcd { struct usb_device *udev; /* init: probe_lcd */ struct usb_interface *interface; /* the interface for @@ -57,6 +54,8 @@ struct usb_lcd { using up all RAM */ struct usb_anchor submitted; /* URBs to wait for before suspend */ + struct rw_semaphore io_rwsem; + unsigned long disconnected:1; }; #define to_lcd_dev(d) container_of(d, struct usb_lcd, kref) @@ -81,40 +80,29 @@ static int lcd_open(struct inode *inode, struct file *file) struct usb_interface *interface; int subminor, r; - mutex_lock(&lcd_mutex); subminor = iminor(inode); interface = usb_find_interface(&lcd_driver, subminor); if (!interface) { - mutex_unlock(&lcd_mutex); - printk(KERN_ERR "USBLCD: %s - error, can't find device for minor %d\n", + pr_err("USBLCD: %s - error, can't find device for minor %d\n", __func__, subminor); return -ENODEV; } - mutex_lock(&open_disc_mutex); dev = usb_get_intfdata(interface); - if (!dev) { - mutex_unlock(&open_disc_mutex); - mutex_unlock(&lcd_mutex); - return -ENODEV; - } /* increment our usage count for the device */ kref_get(&dev->kref); - mutex_unlock(&open_disc_mutex); /* grab a power reference */ r = usb_autopm_get_interface(interface); if (r < 0) { kref_put(&dev->kref, lcd_delete); - mutex_unlock(&lcd_mutex); return r; } /* save our object in the file's private structure */ file->private_data = dev; - mutex_unlock(&lcd_mutex); return 0; } @@ -142,6 +130,13 @@ static ssize_t lcd_read(struct file *file, char __user * buffer, dev = file->private_data; + down_read(&dev->io_rwsem); + + if (dev->disconnected) { + retval = -ENODEV; + goto out_up_io; + } + /* do a blocking bulk read to get data from the device */ retval = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, @@ -158,6 +153,9 @@ static ssize_t lcd_read(struct file *file, char __user * buffer, retval = bytes_read; } +out_up_io: + up_read(&dev->io_rwsem); + return retval; } @@ -173,14 +171,12 @@ static long lcd_ioctl(struct file *file, unsigned int cmd, unsigned long arg) switch (cmd) { case IOCTL_GET_HARD_VERSION: - mutex_lock(&lcd_mutex); bcdDevice = le16_to_cpu((dev->udev)->descriptor.bcdDevice); sprintf(buf, "%1d%1d.%1d%1d", (bcdDevice & 0xF000)>>12, (bcdDevice & 0xF00)>>8, (bcdDevice & 0xF0)>>4, (bcdDevice & 0xF)); - mutex_unlock(&lcd_mutex); if (copy_to_user((void __user *)arg, buf, strlen(buf)) != 0) return -EFAULT; break; @@ -237,11 +233,18 @@ static ssize_t lcd_write(struct file *file, const char __user * user_buffer, if (r < 0) return -EINTR; + down_read(&dev->io_rwsem); + + if (dev->disconnected) { + retval = -ENODEV; + goto err_up_io; + } + /* create a urb, and a buffer for it, and copy the data to the urb */ urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { retval = -ENOMEM; - goto err_no_buf; + goto err_up_io; } buf = usb_alloc_coherent(dev->udev, count, GFP_KERNEL, @@ -278,6 +281,7 @@ static ssize_t lcd_write(struct file *file, const char __user * user_buffer, the USB core will eventually free it entirely */ usb_free_urb(urb); + up_read(&dev->io_rwsem); exit: return count; error_unanchor: @@ -285,7 +289,8 @@ error_unanchor: error: usb_free_coherent(dev->udev, count, buf, urb->transfer_dma); usb_free_urb(urb); -err_no_buf: +err_up_io: + up_read(&dev->io_rwsem); up(&dev->limit_sem); return retval; } @@ -325,6 +330,7 @@ static int lcd_probe(struct usb_interface *interface, kref_init(&dev->kref); sema_init(&dev->limit_sem, USB_LCD_CONCURRENT_WRITES); + init_rwsem(&dev->io_rwsem); init_usb_anchor(&dev->submitted); dev->udev = usb_get_dev(interface_to_usbdev(interface)); @@ -365,7 +371,6 @@ static int lcd_probe(struct usb_interface *interface, /* something prevented us from registering this driver */ dev_err(&interface->dev, "Not able to get a minor for this device.\n"); - usb_set_intfdata(interface, NULL); goto error; } @@ -411,17 +416,18 @@ static int lcd_resume(struct usb_interface *intf) static void lcd_disconnect(struct usb_interface *interface) { - struct usb_lcd *dev; + struct usb_lcd *dev = usb_get_intfdata(interface); int minor = interface->minor; - mutex_lock(&open_disc_mutex); - dev = usb_get_intfdata(interface); - usb_set_intfdata(interface, NULL); - mutex_unlock(&open_disc_mutex); - /* give back our minor */ usb_deregister_dev(interface, &lcd_class); + down_write(&dev->io_rwsem); + dev->disconnected = 1; + up_write(&dev->io_rwsem); + + usb_kill_anchored_urbs(&dev->submitted); + /* decrement our usage count */ kref_put(&dev->kref, lcd_delete); diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c index 6715a128e6c8..be0505b8b5d4 100644 --- a/drivers/usb/misc/yurex.c +++ b/drivers/usb/misc/yurex.c @@ -60,6 +60,7 @@ struct usb_yurex { struct kref kref; struct mutex io_mutex; + unsigned long disconnected:1; struct fasync_struct *async_queue; wait_queue_head_t waitq; @@ -107,6 +108,7 @@ static void yurex_delete(struct kref *kref) dev->int_buffer, dev->urb->transfer_dma); usb_free_urb(dev->urb); } + usb_put_intf(dev->interface); usb_put_dev(dev->udev); kfree(dev); } @@ -132,6 +134,7 @@ static void yurex_interrupt(struct urb *urb) switch (status) { case 0: /*success*/ break; + /* The device is terminated or messed up, give up */ case -EOVERFLOW: dev_err(&dev->interface->dev, "%s - overflow with length %d, actual length is %d\n", @@ -140,12 +143,13 @@ static void yurex_interrupt(struct urb *urb) case -ENOENT: case -ESHUTDOWN: case -EILSEQ: - /* The device is terminated, clean up */ + case -EPROTO: + case -ETIME: return; default: dev_err(&dev->interface->dev, "%s - unknown status received: %d\n", __func__, status); - goto exit; + return; } /* handle received message */ @@ -177,7 +181,6 @@ static void yurex_interrupt(struct urb *urb) break; } -exit: retval = usb_submit_urb(dev->urb, GFP_ATOMIC); if (retval) { dev_err(&dev->interface->dev, "%s - usb_submit_urb failed: %d\n", @@ -204,7 +207,7 @@ static int yurex_probe(struct usb_interface *interface, const struct usb_device_ init_waitqueue_head(&dev->waitq); dev->udev = usb_get_dev(interface_to_usbdev(interface)); - dev->interface = interface; + dev->interface = usb_get_intf(interface); /* set up the endpoint information */ iface_desc = interface->cur_altsetting; @@ -315,8 +318,9 @@ static void yurex_disconnect(struct usb_interface *interface) /* prevent more I/O from starting */ usb_poison_urb(dev->urb); + usb_poison_urb(dev->cntl_urb); mutex_lock(&dev->io_mutex); - dev->interface = NULL; + dev->disconnected = 1; mutex_unlock(&dev->io_mutex); /* wakeup waiters */ @@ -404,7 +408,7 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count, dev = file->private_data; mutex_lock(&dev->io_mutex); - if (!dev->interface) { /* already disconnected */ + if (dev->disconnected) { /* already disconnected */ mutex_unlock(&dev->io_mutex); return -ENODEV; } @@ -439,7 +443,7 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer, goto error; mutex_lock(&dev->io_mutex); - if (!dev->interface) { /* already disconnected */ + if (dev->disconnected) { /* already disconnected */ mutex_unlock(&dev->io_mutex); retval = -ENODEV; goto error; diff --git a/drivers/usb/mtu3/mtu3_core.c b/drivers/usb/mtu3/mtu3_core.c index c3d5c1206eec..9dd02160cca9 100644 --- a/drivers/usb/mtu3/mtu3_core.c +++ b/drivers/usb/mtu3/mtu3_core.c @@ -16,6 +16,7 @@ #include <linux/platform_device.h> #include "mtu3.h" +#include "mtu3_dr.h" #include "mtu3_debug.h" #include "mtu3_trace.h" diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c index 4c3de777ef6c..a3c30b609433 100644 --- a/drivers/usb/renesas_usbhs/common.c +++ b/drivers/usb/renesas_usbhs/common.c @@ -162,17 +162,17 @@ void usbhs_usbreq_get_val(struct usbhs_priv *priv, struct usb_ctrlrequest *req) req->bRequest = (val >> 8) & 0xFF; req->bRequestType = (val >> 0) & 0xFF; - req->wValue = usbhs_read(priv, USBVAL); - req->wIndex = usbhs_read(priv, USBINDX); - req->wLength = usbhs_read(priv, USBLENG); + req->wValue = cpu_to_le16(usbhs_read(priv, USBVAL)); + req->wIndex = cpu_to_le16(usbhs_read(priv, USBINDX)); + req->wLength = cpu_to_le16(usbhs_read(priv, USBLENG)); } void usbhs_usbreq_set_val(struct usbhs_priv *priv, struct usb_ctrlrequest *req) { usbhs_write(priv, USBREQ, (req->bRequest << 8) | req->bRequestType); - usbhs_write(priv, USBVAL, req->wValue); - usbhs_write(priv, USBINDX, req->wIndex); - usbhs_write(priv, USBLENG, req->wLength); + usbhs_write(priv, USBVAL, le16_to_cpu(req->wValue)); + usbhs_write(priv, USBINDX, le16_to_cpu(req->wIndex)); + usbhs_write(priv, USBLENG, le16_to_cpu(req->wLength)); usbhs_bset(priv, DCPCTR, SUREQ, SUREQ); } diff --git a/drivers/usb/renesas_usbhs/common.h b/drivers/usb/renesas_usbhs/common.h index d1a0a35ecfff..0824099b905e 100644 --- a/drivers/usb/renesas_usbhs/common.h +++ b/drivers/usb/renesas_usbhs/common.h @@ -211,6 +211,7 @@ struct usbhs_priv; /* DCPCTR */ #define BSTS (1 << 15) /* Buffer Status */ #define SUREQ (1 << 14) /* Sending SETUP Token */ +#define INBUFM (1 << 14) /* (PIPEnCTR) Transfer Buffer Monitor */ #define CSSTS (1 << 12) /* CSSTS Status */ #define ACLRM (1 << 9) /* Buffer Auto-Clear Mode */ #define SQCLR (1 << 8) /* Toggle Bit Clear */ diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c index 2a01ceb71641..86637cd066cf 100644 --- a/drivers/usb/renesas_usbhs/fifo.c +++ b/drivers/usb/renesas_usbhs/fifo.c @@ -89,7 +89,7 @@ static void __usbhsf_pkt_del(struct usbhs_pkt *pkt) list_del_init(&pkt->node); } -static struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe) +struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe) { return list_first_entry_or_null(&pipe->list, struct usbhs_pkt, node); } diff --git a/drivers/usb/renesas_usbhs/fifo.h b/drivers/usb/renesas_usbhs/fifo.h index 88d1816bcda2..c3d3cc35cee0 100644 --- a/drivers/usb/renesas_usbhs/fifo.h +++ b/drivers/usb/renesas_usbhs/fifo.h @@ -97,5 +97,6 @@ void usbhs_pkt_push(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt, void *buf, int len, int zero, int sequence); struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt); void usbhs_pkt_start(struct usbhs_pipe *pipe); +struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe); #endif /* RENESAS_USB_FIFO_H */ diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c index 4d571a5205e2..cd38d74b3223 100644 --- a/drivers/usb/renesas_usbhs/mod_gadget.c +++ b/drivers/usb/renesas_usbhs/mod_gadget.c @@ -265,7 +265,7 @@ static int usbhsg_recip_handler_std_set_device(struct usbhs_priv *priv, case USB_DEVICE_TEST_MODE: usbhsg_recip_handler_std_control_done(priv, uep, ctrl); udelay(100); - usbhs_sys_set_test_mode(priv, le16_to_cpu(ctrl->wIndex >> 8)); + usbhs_sys_set_test_mode(priv, le16_to_cpu(ctrl->wIndex) >> 8); break; default: usbhsg_recip_handler_std_control_done(priv, uep, ctrl); @@ -315,7 +315,7 @@ static void __usbhsg_recip_send_status(struct usbhsg_gpriv *gpriv, struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(dcp); struct device *dev = usbhsg_gpriv_to_dev(gpriv); struct usb_request *req; - unsigned short *buf; + __le16 *buf; /* alloc new usb_request for recip */ req = usb_ep_alloc_request(&dcp->ep, GFP_ATOMIC); @@ -722,8 +722,7 @@ static int __usbhsg_ep_set_halt_wedge(struct usb_ep *ep, int halt, int wedge) struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv); struct device *dev = usbhsg_gpriv_to_dev(gpriv); unsigned long flags; - - usbhsg_pipe_disable(uep); + int ret = 0; dev_dbg(dev, "set halt %d (pipe %d)\n", halt, usbhs_pipe_number(pipe)); @@ -731,6 +730,18 @@ static int __usbhsg_ep_set_halt_wedge(struct usb_ep *ep, int halt, int wedge) /******************** spin lock ********************/ usbhs_lock(priv, flags); + /* + * According to usb_ep_set_halt()'s description, this function should + * return -EAGAIN if the IN endpoint has any queue or data. Note + * that the usbhs_pipe_is_dir_in() returns false if the pipe is an + * IN endpoint in the gadget mode. + */ + if (!usbhs_pipe_is_dir_in(pipe) && (__usbhsf_pkt_get(pipe) || + usbhs_pipe_contains_transmittable_data(pipe))) { + ret = -EAGAIN; + goto out; + } + if (halt) usbhs_pipe_stall(pipe); else @@ -741,10 +752,11 @@ static int __usbhsg_ep_set_halt_wedge(struct usb_ep *ep, int halt, int wedge) else usbhsg_status_clr(gpriv, USBHSG_STATUS_WEDGE); +out: usbhs_unlock(priv, flags); /******************** spin unlock ******************/ - return 0; + return ret; } static int usbhsg_ep_set_halt(struct usb_ep *ep, int value) diff --git a/drivers/usb/renesas_usbhs/pipe.c b/drivers/usb/renesas_usbhs/pipe.c index c4922b96c93b..9e5afdde1adb 100644 --- a/drivers/usb/renesas_usbhs/pipe.c +++ b/drivers/usb/renesas_usbhs/pipe.c @@ -277,6 +277,21 @@ int usbhs_pipe_is_accessible(struct usbhs_pipe *pipe) return -EBUSY; } +bool usbhs_pipe_contains_transmittable_data(struct usbhs_pipe *pipe) +{ + u16 val; + + /* Do not support for DCP pipe */ + if (usbhs_pipe_is_dcp(pipe)) + return false; + + val = usbhsp_pipectrl_get(pipe); + if (val & INBUFM) + return true; + + return false; +} + /* * PID ctrl */ diff --git a/drivers/usb/renesas_usbhs/pipe.h b/drivers/usb/renesas_usbhs/pipe.h index 3080423e600c..3b130529408b 100644 --- a/drivers/usb/renesas_usbhs/pipe.h +++ b/drivers/usb/renesas_usbhs/pipe.h @@ -83,6 +83,7 @@ void usbhs_pipe_clear(struct usbhs_pipe *pipe); void usbhs_pipe_clear_without_sequence(struct usbhs_pipe *pipe, int needs_bfre, int bfre_enable); int usbhs_pipe_is_accessible(struct usbhs_pipe *pipe); +bool usbhs_pipe_contains_transmittable_data(struct usbhs_pipe *pipe); void usbhs_pipe_enable(struct usbhs_pipe *pipe); void usbhs_pipe_disable(struct usbhs_pipe *pipe); void usbhs_pipe_stall(struct usbhs_pipe *pipe); diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index f0688c44b04c..25e81faf4c24 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -1030,6 +1030,9 @@ static const struct usb_device_id id_table_combined[] = { /* EZPrototypes devices */ { USB_DEVICE(EZPROTOTYPES_VID, HJELMSLUND_USB485_ISO_PID) }, { USB_DEVICE_INTERFACE_NUMBER(UNJO_VID, UNJO_ISODEBUG_V1_PID, 1) }, + /* Sienna devices */ + { USB_DEVICE(FTDI_VID, FTDI_SIENNA_PID) }, + { USB_DEVICE(ECHELON_VID, ECHELON_U20_PID) }, { } /* Terminating entry */ }; diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index f12d806220b4..22d66217cb41 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -39,6 +39,9 @@ #define FTDI_LUMEL_PD12_PID 0x6002 +/* Sienna Serial Interface by Secyourit GmbH */ +#define FTDI_SIENNA_PID 0x8348 + /* Cyber Cortex AV by Fabulous Silicon (http://fabuloussilicon.com) */ #define CYBER_CORTEX_AV_PID 0x8698 @@ -689,6 +692,12 @@ #define BANDB_ZZ_PROG1_USB_PID 0xBA02 /* + * Echelon USB Serial Interface + */ +#define ECHELON_VID 0x0920 +#define ECHELON_U20_PID 0x7500 + +/* * Intrepid Control Systems (http://www.intrepidcs.com/) ValueCAN and NeoVI */ #define INTREPID_VID 0x093C diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c index d34779fe4a8d..e66a59ef43a1 100644 --- a/drivers/usb/serial/keyspan.c +++ b/drivers/usb/serial/keyspan.c @@ -1741,8 +1741,8 @@ static struct urb *keyspan_setup_urb(struct usb_serial *serial, int endpoint, ep_desc = find_ep(serial, endpoint); if (!ep_desc) { - /* leak the urb, something's wrong and the callers don't care */ - return urb; + usb_free_urb(urb); + return NULL; } if (usb_endpoint_xfer_int(ep_desc)) { ep_type_name = "INT"; diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 38e920ac7f82..06ab016be0b6 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -419,6 +419,7 @@ static void option_instat_callback(struct urb *urb); #define CINTERION_PRODUCT_PH8_AUDIO 0x0083 #define CINTERION_PRODUCT_AHXX_2RMNET 0x0084 #define CINTERION_PRODUCT_AHXX_AUDIO 0x0085 +#define CINTERION_PRODUCT_CLS8 0x00b0 /* Olivetti products */ #define OLIVETTI_VENDOR_ID 0x0b3c @@ -1154,6 +1155,14 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff), .driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1050, 0xff), /* Telit FN980 (rmnet) */ + .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1051, 0xff), /* Telit FN980 (MBIM) */ + .driver_info = NCTRL(0) | RSVD(1) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1052, 0xff), /* Telit FN980 (RNDIS) */ + .driver_info = NCTRL(2) | RSVD(3) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1053, 0xff), /* Telit FN980 (ECM) */ + .driver_info = NCTRL(0) | RSVD(1) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910), .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM), @@ -1847,6 +1856,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(4) }, { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_2RMNET, 0xff) }, { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_AUDIO, 0xff) }, + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_CLS8, 0xff), + .driver_info = RSVD(0) | RSVD(4) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) }, diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c index dd0ad67aa71e..ef23acc9b9ce 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.c +++ b/drivers/usb/serial/ti_usb_3410_5052.c @@ -776,7 +776,6 @@ static void ti_close(struct usb_serial_port *port) struct ti_port *tport; int port_number; int status; - int do_unlock; unsigned long flags; tdev = usb_get_serial_data(port->serial); @@ -800,16 +799,13 @@ static void ti_close(struct usb_serial_port *port) "%s - cannot send close port command, %d\n" , __func__, status); - /* if mutex_lock is interrupted, continue anyway */ - do_unlock = !mutex_lock_interruptible(&tdev->td_open_close_lock); - --tport->tp_tdev->td_open_port_count; - if (tport->tp_tdev->td_open_port_count <= 0) { + mutex_lock(&tdev->td_open_close_lock); + --tdev->td_open_port_count; + if (tdev->td_open_port_count == 0) { /* last port is closed, shut down interrupt urb */ usb_kill_urb(port->serial->port[0]->interrupt_in_urb); - tport->tp_tdev->td_open_port_count = 0; } - if (do_unlock) - mutex_unlock(&tdev->td_open_close_lock); + mutex_unlock(&tdev->td_open_close_lock); } diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index a3179fea38c8..8f066bb55d7d 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c @@ -314,10 +314,7 @@ static void serial_cleanup(struct tty_struct *tty) serial = port->serial; owner = serial->type->driver.owner; - mutex_lock(&serial->disc_mutex); - if (!serial->disconnected) - usb_autopm_put_interface(serial->interface); - mutex_unlock(&serial->disc_mutex); + usb_autopm_put_interface(serial->interface); usb_serial_put(serial); module_put(owner); diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c index 79314d8c94a4..ca3bd58f2025 100644 --- a/drivers/usb/serial/whiteheat.c +++ b/drivers/usb/serial/whiteheat.c @@ -559,6 +559,10 @@ static int firm_send_command(struct usb_serial_port *port, __u8 command, command_port = port->serial->port[COMMAND_PORT]; command_info = usb_get_serial_port_data(command_port); + + if (command_port->bulk_out_size < datasize + 1) + return -EIO; + mutex_lock(&command_info->mutex); command_info->command_finished = false; @@ -632,6 +636,7 @@ static void firm_setup_port(struct tty_struct *tty) struct device *dev = &port->dev; struct whiteheat_port_settings port_settings; unsigned int cflag = tty->termios.c_cflag; + speed_t baud; port_settings.port = port->port_number + 1; @@ -692,11 +697,13 @@ static void firm_setup_port(struct tty_struct *tty) dev_dbg(dev, "%s - XON = %2x, XOFF = %2x\n", __func__, port_settings.xon, port_settings.xoff); /* get the baud rate wanted */ - port_settings.baud = tty_get_baud_rate(tty); - dev_dbg(dev, "%s - baud rate = %d\n", __func__, port_settings.baud); + baud = tty_get_baud_rate(tty); + port_settings.baud = cpu_to_le32(baud); + dev_dbg(dev, "%s - baud rate = %u\n", __func__, baud); /* fixme: should set validated settings */ - tty_encode_baud_rate(tty, port_settings.baud, port_settings.baud); + tty_encode_baud_rate(tty, baud, baud); + /* handle any settings that aren't specified in the tty structure */ port_settings.lloop = 0; diff --git a/drivers/usb/serial/whiteheat.h b/drivers/usb/serial/whiteheat.h index 00398149cd8d..269e727a92f9 100644 --- a/drivers/usb/serial/whiteheat.h +++ b/drivers/usb/serial/whiteheat.h @@ -87,7 +87,7 @@ struct whiteheat_simple { struct whiteheat_port_settings { __u8 port; /* port number (1 to N) */ - __u32 baud; /* any value 7 - 460800, firmware calculates + __le32 baud; /* any value 7 - 460800, firmware calculates best fit; arrives little endian */ __u8 bits; /* 5, 6, 7, or 8 */ __u8 stop; /* 1 or 2, default 1 (2 = 1.5 if bits = 5) */ diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c index 6737fab94959..54a3c8195c96 100644 --- a/drivers/usb/storage/scsiglue.c +++ b/drivers/usb/storage/scsiglue.c @@ -68,7 +68,6 @@ static const char* host_info(struct Scsi_Host *host) static int slave_alloc (struct scsi_device *sdev) { struct us_data *us = host_to_us(sdev->host); - int maxp; /* * Set the INQUIRY transfer length to 36. We don't use any of @@ -78,15 +77,6 @@ static int slave_alloc (struct scsi_device *sdev) sdev->inquiry_len = 36; /* - * USB has unusual scatter-gather requirements: the length of each - * scatterlist element except the last must be divisible by the - * Bulk maxpacket value. Fortunately this value is always a - * power of 2. Inform the block layer about this requirement. - */ - maxp = usb_maxpacket(us->pusb_dev, us->recv_bulk_pipe, 0); - blk_queue_virt_boundary(sdev->request_queue, maxp - 1); - - /* * Some host controllers may have alignment requirements. * We'll play it safe by requiring 512-byte alignment always. */ diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index bf80d6f81f58..34538253f12c 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c @@ -789,30 +789,10 @@ static int uas_slave_alloc(struct scsi_device *sdev) { struct uas_dev_info *devinfo = (struct uas_dev_info *)sdev->host->hostdata; - int maxp; sdev->hostdata = devinfo; /* - * We have two requirements here. We must satisfy the requirements - * of the physical HC and the demands of the protocol, as we - * definitely want no additional memory allocation in this path - * ruling out using bounce buffers. - * - * For a transmission on USB to continue we must never send - * a package that is smaller than maxpacket. Hence the length of each - * scatterlist element except the last must be divisible by the - * Bulk maxpacket value. - * If the HC does not ensure that through SG, - * the upper layer must do that. We must assume nothing - * about the capabilities off the HC, so we use the most - * pessimistic requirement. - */ - - maxp = usb_maxpacket(devinfo->udev, devinfo->data_in_pipe, 0); - blk_queue_virt_boundary(sdev->request_queue, maxp - 1); - - /* * The protocol has no requirements on alignment in the strict sense. * Controllers may or may not have alignment restrictions. * As this is not exported, we use an extremely conservative guess. diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index 96562744101c..5f61d9977a15 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -4409,18 +4409,20 @@ static int tcpm_fw_get_caps(struct tcpm_port *port, /* USB data support is optional */ ret = fwnode_property_read_string(fwnode, "data-role", &cap_str); if (ret == 0) { - port->typec_caps.data = typec_find_port_data_role(cap_str); - if (port->typec_caps.data < 0) - return -EINVAL; + ret = typec_find_port_data_role(cap_str); + if (ret < 0) + return ret; + port->typec_caps.data = ret; } ret = fwnode_property_read_string(fwnode, "power-role", &cap_str); if (ret < 0) return ret; - port->typec_caps.type = typec_find_port_power_role(cap_str); - if (port->typec_caps.type < 0) - return -EINVAL; + ret = typec_find_port_power_role(cap_str); + if (ret < 0) + return ret; + port->typec_caps.type = ret; port->port_type = port->typec_caps.type; if (port->port_type == TYPEC_PORT_SNK) diff --git a/drivers/usb/typec/ucsi/displayport.c b/drivers/usb/typec/ucsi/displayport.c index 6c103697c582..d99700cb4dca 100644 --- a/drivers/usb/typec/ucsi/displayport.c +++ b/drivers/usb/typec/ucsi/displayport.c @@ -75,6 +75,8 @@ static int ucsi_displayport_enter(struct typec_altmode *alt) if (cur != 0xff) { mutex_unlock(&dp->con->lock); + if (dp->con->port_altmode[cur] == alt) + return 0; return -EBUSY; } diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c index 907e20e1a71e..d772fce51905 100644 --- a/drivers/usb/typec/ucsi/ucsi_ccg.c +++ b/drivers/usb/typec/ucsi/ucsi_ccg.c @@ -195,7 +195,6 @@ struct ucsi_ccg { /* fw build with vendor information */ u16 fw_build; - bool run_isr; /* flag to call ISR routine during resume */ struct work_struct pm_work; }; @@ -224,18 +223,6 @@ static int ccg_read(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len) if (quirks && quirks->max_read_len) max_read_len = quirks->max_read_len; - if (uc->fw_build == CCG_FW_BUILD_NVIDIA && - uc->fw_version <= CCG_OLD_FW_VERSION) { - mutex_lock(&uc->lock); - /* - * Do not schedule pm_work to run ISR in - * ucsi_ccg_runtime_resume() after pm_runtime_get_sync() - * since we are already in ISR path. - */ - uc->run_isr = false; - mutex_unlock(&uc->lock); - } - pm_runtime_get_sync(uc->dev); while (rem_len > 0) { msgs[1].buf = &data[len - rem_len]; @@ -278,18 +265,6 @@ static int ccg_write(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len) msgs[0].len = len + sizeof(rab); msgs[0].buf = buf; - if (uc->fw_build == CCG_FW_BUILD_NVIDIA && - uc->fw_version <= CCG_OLD_FW_VERSION) { - mutex_lock(&uc->lock); - /* - * Do not schedule pm_work to run ISR in - * ucsi_ccg_runtime_resume() after pm_runtime_get_sync() - * since we are already in ISR path. - */ - uc->run_isr = false; - mutex_unlock(&uc->lock); - } - pm_runtime_get_sync(uc->dev); status = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); if (status < 0) { @@ -1130,7 +1105,6 @@ static int ucsi_ccg_probe(struct i2c_client *client, uc->ppm.sync = ucsi_ccg_sync; uc->dev = dev; uc->client = client; - uc->run_isr = true; mutex_init(&uc->lock); INIT_WORK(&uc->work, ccg_update_firmware); INIT_WORK(&uc->pm_work, ccg_pm_workaround_work); @@ -1188,6 +1162,8 @@ static int ucsi_ccg_probe(struct i2c_client *client, pm_runtime_set_active(uc->dev); pm_runtime_enable(uc->dev); + pm_runtime_use_autosuspend(uc->dev); + pm_runtime_set_autosuspend_delay(uc->dev, 5000); pm_runtime_idle(uc->dev); return 0; @@ -1229,7 +1205,6 @@ static int ucsi_ccg_runtime_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct ucsi_ccg *uc = i2c_get_clientdata(client); - bool schedule = true; /* * Firmware version 3.1.10 or earlier, built for NVIDIA has known issue @@ -1237,17 +1212,8 @@ static int ucsi_ccg_runtime_resume(struct device *dev) * Schedule a work to call ISR as a workaround. */ if (uc->fw_build == CCG_FW_BUILD_NVIDIA && - uc->fw_version <= CCG_OLD_FW_VERSION) { - mutex_lock(&uc->lock); - if (!uc->run_isr) { - uc->run_isr = true; - schedule = false; - } - mutex_unlock(&uc->lock); - - if (schedule) - schedule_work(&uc->pm_work); - } + uc->fw_version <= CCG_OLD_FW_VERSION) + schedule_work(&uc->pm_work); return 0; } diff --git a/drivers/usb/usb-skeleton.c b/drivers/usb/usb-skeleton.c index c31d17d05810..2dc58766273a 100644 --- a/drivers/usb/usb-skeleton.c +++ b/drivers/usb/usb-skeleton.c @@ -61,6 +61,7 @@ struct usb_skel { spinlock_t err_lock; /* lock for errors */ struct kref kref; struct mutex io_mutex; /* synchronize I/O with disconnect */ + unsigned long disconnected:1; wait_queue_head_t bulk_in_wait; /* to wait for an ongoing read */ }; #define to_skel_dev(d) container_of(d, struct usb_skel, kref) @@ -73,6 +74,7 @@ static void skel_delete(struct kref *kref) struct usb_skel *dev = to_skel_dev(kref); usb_free_urb(dev->bulk_in_urb); + usb_put_intf(dev->interface); usb_put_dev(dev->udev); kfree(dev->bulk_in_buffer); kfree(dev); @@ -124,10 +126,7 @@ static int skel_release(struct inode *inode, struct file *file) return -ENODEV; /* allow the device to be autosuspended */ - mutex_lock(&dev->io_mutex); - if (dev->interface) - usb_autopm_put_interface(dev->interface); - mutex_unlock(&dev->io_mutex); + usb_autopm_put_interface(dev->interface); /* decrement the count on our device */ kref_put(&dev->kref, skel_delete); @@ -231,8 +230,7 @@ static ssize_t skel_read(struct file *file, char *buffer, size_t count, dev = file->private_data; - /* if we cannot read at all, return EOF */ - if (!dev->bulk_in_urb || !count) + if (!count) return 0; /* no concurrent readers */ @@ -240,7 +238,7 @@ static ssize_t skel_read(struct file *file, char *buffer, size_t count, if (rv < 0) return rv; - if (!dev->interface) { /* disconnect() was called */ + if (dev->disconnected) { /* disconnect() was called */ rv = -ENODEV; goto exit; } @@ -422,7 +420,7 @@ static ssize_t skel_write(struct file *file, const char *user_buffer, /* this lock makes sure we don't submit URBs to gone devices */ mutex_lock(&dev->io_mutex); - if (!dev->interface) { /* disconnect() was called */ + if (dev->disconnected) { /* disconnect() was called */ mutex_unlock(&dev->io_mutex); retval = -ENODEV; goto error; @@ -507,7 +505,7 @@ static int skel_probe(struct usb_interface *interface, init_waitqueue_head(&dev->bulk_in_wait); dev->udev = usb_get_dev(interface_to_usbdev(interface)); - dev->interface = interface; + dev->interface = usb_get_intf(interface); /* set up the endpoint information */ /* use only the first bulk-in and bulk-out endpoints */ @@ -573,9 +571,10 @@ static void skel_disconnect(struct usb_interface *interface) /* prevent more I/O from starting */ mutex_lock(&dev->io_mutex); - dev->interface = NULL; + dev->disconnected = 1; mutex_unlock(&dev->io_mutex); + usb_kill_urb(dev->bulk_in_urb); usb_kill_anchored_urbs(&dev->submitted); /* decrement our usage count */ diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c index 585a84d319bd..65850e9c7190 100644 --- a/drivers/usb/usbip/vhci_hcd.c +++ b/drivers/usb/usbip/vhci_hcd.c @@ -1195,12 +1195,12 @@ static int vhci_start(struct usb_hcd *hcd) if (id == 0 && usb_hcd_is_primary_hcd(hcd)) { err = vhci_init_attr_group(); if (err) { - pr_err("init attr group\n"); + dev_err(hcd_dev(hcd), "init attr group failed, err = %d\n", err); return err; } err = sysfs_create_group(&hcd_dev(hcd)->kobj, &vhci_attr_group); if (err) { - pr_err("create sysfs files\n"); + dev_err(hcd_dev(hcd), "create sysfs files failed, err = %d\n", err); vhci_finish_attr_group(); return err; } diff --git a/drivers/usb/usbip/vhci_tx.c b/drivers/usb/usbip/vhci_tx.c index c3803785f6ef..0ae40a13a9fe 100644 --- a/drivers/usb/usbip/vhci_tx.c +++ b/drivers/usb/usbip/vhci_tx.c @@ -147,7 +147,10 @@ static int vhci_send_cmd_submit(struct vhci_device *vdev) } kfree(iov); + /* This is only for isochronous case */ kfree(iso_buffer); + iso_buffer = NULL; + usbip_dbg_vhci_tx("send txdata\n"); total_size += txsize; diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 96fddc1dafc3..d864277ea16f 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -1658,7 +1658,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, struct bus_type *bus = NULL; int ret; bool resv_msi, msi_remap; - phys_addr_t resv_msi_base; + phys_addr_t resv_msi_base = 0; struct iommu_domain_geometry geo; LIST_HEAD(iova_copy); LIST_HEAD(group_resv_regions); diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c index 7804869c6a31..056308008288 100644 --- a/drivers/vhost/test.c +++ b/drivers/vhost/test.c @@ -161,6 +161,7 @@ static int vhost_test_release(struct inode *inode, struct file *f) vhost_test_stop(n, &private); vhost_test_flush(n); + vhost_dev_stop(&n->dev); vhost_dev_cleanup(&n->dev); /* We do an extra flush before freeing memory, * since jobs can re-queue themselves. */ @@ -237,6 +238,7 @@ static long vhost_test_reset_owner(struct vhost_test *n) } vhost_test_stop(n, &priv); vhost_test_flush(n); + vhost_dev_stop(&n->dev); vhost_dev_reset_owner(&n->dev, umem); done: mutex_unlock(&n->dev.mutex); diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c index 08ad0d1f0476..a0a2d74967ef 100644 --- a/drivers/vhost/vringh.c +++ b/drivers/vhost/vringh.c @@ -852,6 +852,12 @@ static inline int xfer_kern(void *src, void *dst, size_t len) return 0; } +static inline int kern_xfer(void *dst, void *src, size_t len) +{ + memcpy(dst, src, len); + return 0; +} + /** * vringh_init_kern - initialize a vringh for a kernelspace vring. * @vrh: the vringh to initialize. @@ -958,7 +964,7 @@ EXPORT_SYMBOL(vringh_iov_pull_kern); ssize_t vringh_iov_push_kern(struct vringh_kiov *wiov, const void *src, size_t len) { - return vringh_iov_xfer(wiov, (void *)src, len, xfer_kern); + return vringh_iov_xfer(wiov, (void *)src, len, kern_xfer); } EXPORT_SYMBOL(vringh_iov_push_kern); diff --git a/drivers/video/fbdev/c2p_core.h b/drivers/video/fbdev/c2p_core.h index e1035a865fb9..45a6d895a7d7 100644 --- a/drivers/video/fbdev/c2p_core.h +++ b/drivers/video/fbdev/c2p_core.h @@ -29,7 +29,7 @@ static inline void _transp(u32 d[], unsigned int i1, unsigned int i2, extern void c2p_unsupported(void); -static inline u32 get_mask(unsigned int n) +static __always_inline u32 get_mask(unsigned int n) { switch (n) { case 1: @@ -57,7 +57,7 @@ static inline u32 get_mask(unsigned int n) * Transpose operations on 8 32-bit words */ -static inline void transp8(u32 d[], unsigned int n, unsigned int m) +static __always_inline void transp8(u32 d[], unsigned int n, unsigned int m) { u32 mask = get_mask(n); @@ -99,7 +99,7 @@ static inline void transp8(u32 d[], unsigned int n, unsigned int m) * Transpose operations on 4 32-bit words */ -static inline void transp4(u32 d[], unsigned int n, unsigned int m) +static __always_inline void transp4(u32 d[], unsigned int n, unsigned int m) { u32 mask = get_mask(n); @@ -126,7 +126,7 @@ static inline void transp4(u32 d[], unsigned int n, unsigned int m) * Transpose operations on 4 32-bit words (reverse order) */ -static inline void transp4x(u32 d[], unsigned int n, unsigned int m) +static __always_inline void transp4x(u32 d[], unsigned int n, unsigned int m) { u32 mask = get_mask(n); diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c index 75fd140b02ff..43c391626a00 100644 --- a/drivers/virt/vboxguest/vboxguest_utils.c +++ b/drivers/virt/vboxguest/vboxguest_utils.c @@ -220,6 +220,8 @@ static int hgcm_call_preprocess_linaddr( if (!bounce_buf) return -ENOMEM; + *bounce_buf_ret = bounce_buf; + if (copy_in) { ret = copy_from_user(bounce_buf, (void __user *)buf, len); if (ret) @@ -228,7 +230,6 @@ static int hgcm_call_preprocess_linaddr( memset(bounce_buf, 0, len); } - *bounce_buf_ret = bounce_buf; hgcm_call_add_pagelist_size(bounce_buf, len, extra); return 0; } diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index bdc08244a648..a8041e451e9e 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -1499,9 +1499,6 @@ static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq) * counter first before updating event flags. */ virtio_wmb(vq->weak_barriers); - } else { - used_idx = vq->last_used_idx; - wrap_counter = vq->packed.used_wrap_counter; } if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) { @@ -1518,7 +1515,9 @@ static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq) */ virtio_mb(vq->weak_barriers); - if (is_used_desc_packed(vq, used_idx, wrap_counter)) { + if (is_used_desc_packed(vq, + vq->last_used_idx, + vq->packed.used_wrap_counter)) { END_USE(vq); return false; } diff --git a/drivers/w1/slaves/Kconfig b/drivers/w1/slaves/Kconfig index ebed495b9e69..b7847636501d 100644 --- a/drivers/w1/slaves/Kconfig +++ b/drivers/w1/slaves/Kconfig @@ -103,6 +103,7 @@ config W1_SLAVE_DS2438 config W1_SLAVE_DS250X tristate "512b/1kb/16kb EPROM family support" + select CRC16 help Say Y here if you want to use a 1-wire 512b/1kb/16kb EPROM family device (DS250x). diff --git a/drivers/watchdog/bd70528_wdt.c b/drivers/watchdog/bd70528_wdt.c index b0152fef4fc7..bc60e036627a 100644 --- a/drivers/watchdog/bd70528_wdt.c +++ b/drivers/watchdog/bd70528_wdt.c @@ -288,3 +288,4 @@ module_platform_driver(bd70528_wdt); MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>"); MODULE_DESCRIPTION("BD70528 watchdog driver"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:bd70528-wdt"); diff --git a/drivers/watchdog/cpwd.c b/drivers/watchdog/cpwd.c index 9393be584e72..808eeb4779e4 100644 --- a/drivers/watchdog/cpwd.c +++ b/drivers/watchdog/cpwd.c @@ -26,6 +26,7 @@ #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/timer.h> +#include <linux/compat.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/io.h> @@ -473,6 +474,11 @@ static long cpwd_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return 0; } +static long cpwd_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + return cpwd_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); +} + static ssize_t cpwd_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { @@ -497,7 +503,7 @@ static ssize_t cpwd_read(struct file *file, char __user *buffer, static const struct file_operations cpwd_fops = { .owner = THIS_MODULE, .unlocked_ioctl = cpwd_ioctl, - .compat_ioctl = compat_ptr_ioctl, + .compat_ioctl = cpwd_compat_ioctl, .open = cpwd_open, .write = cpwd_write, .read = cpwd_read, diff --git a/drivers/watchdog/imx_sc_wdt.c b/drivers/watchdog/imx_sc_wdt.c index 7ea5cf54e94a..8ed89f032ebf 100644 --- a/drivers/watchdog/imx_sc_wdt.c +++ b/drivers/watchdog/imx_sc_wdt.c @@ -99,8 +99,14 @@ static int imx_sc_wdt_set_pretimeout(struct watchdog_device *wdog, { struct arm_smccc_res res; + /* + * SCU firmware calculates pretimeout based on current time + * stamp instead of watchdog timeout stamp, need to convert + * the pretimeout to SCU firmware's timeout value. + */ arm_smccc_smc(IMX_SIP_TIMER, IMX_SIP_TIMER_SET_PRETIME_WDOG, - pretimeout * 1000, 0, 0, 0, 0, 0, &res); + (wdog->timeout - pretimeout) * 1000, 0, 0, 0, + 0, 0, &res); if (res.a0) return -EACCES; diff --git a/drivers/watchdog/meson_gxbb_wdt.c b/drivers/watchdog/meson_gxbb_wdt.c index d17c1a6ed723..5a9ca10fbcfa 100644 --- a/drivers/watchdog/meson_gxbb_wdt.c +++ b/drivers/watchdog/meson_gxbb_wdt.c @@ -89,8 +89,8 @@ static unsigned int meson_gxbb_wdt_get_timeleft(struct watchdog_device *wdt_dev) reg = readl(data->reg_base + GXBB_WDT_TCNT_REG); - return ((reg >> GXBB_WDT_TCNT_CNT_SHIFT) - - (reg & GXBB_WDT_TCNT_SETUP_MASK)) / 1000; + return ((reg & GXBB_WDT_TCNT_SETUP_MASK) - + (reg >> GXBB_WDT_TCNT_CNT_SHIFT)) / 1000; } static const struct watchdog_ops meson_gxbb_wdt_ops = { diff --git a/drivers/watchdog/pm8916_wdt.c b/drivers/watchdog/pm8916_wdt.c index 2d3652004e39..1213179f863c 100644 --- a/drivers/watchdog/pm8916_wdt.c +++ b/drivers/watchdog/pm8916_wdt.c @@ -163,9 +163,17 @@ static int pm8916_wdt_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq > 0) { - if (devm_request_irq(dev, irq, pm8916_wdt_isr, 0, "pm8916_wdt", - wdt)) - irq = 0; + err = devm_request_irq(dev, irq, pm8916_wdt_isr, 0, + "pm8916_wdt", wdt); + if (err) + return err; + + wdt->wdev.info = &pm8916_wdt_pt_ident; + } else { + if (irq == -EPROBE_DEFER) + return -EPROBE_DEFER; + + wdt->wdev.info = &pm8916_wdt_ident; } /* Configure watchdog to hard-reset mode */ @@ -177,7 +185,6 @@ static int pm8916_wdt_probe(struct platform_device *pdev) return err; } - wdt->wdev.info = (irq > 0) ? &pm8916_wdt_pt_ident : &pm8916_wdt_ident, wdt->wdev.ops = &pm8916_wdt_ops, wdt->wdev.parent = dev; wdt->wdev.min_timeout = PM8916_WDT_MIN_TIMEOUT; diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index a446a7221e13..81401f386c9c 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -22,6 +22,7 @@ #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt +#include <linux/dma-mapping.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> @@ -34,9 +35,6 @@ #include <linux/slab.h> #include <linux/highmem.h> #include <linux/refcount.h> -#ifdef CONFIG_XEN_GRANT_DMA_ALLOC -#include <linux/of_device.h> -#endif #include <xen/xen.h> #include <xen/grant_table.h> @@ -625,14 +623,7 @@ static int gntdev_open(struct inode *inode, struct file *flip) flip->private_data = priv; #ifdef CONFIG_XEN_GRANT_DMA_ALLOC priv->dma_dev = gntdev_miscdev.this_device; - - /* - * The device is not spawn from a device tree, so arch_setup_dma_ops - * is not called, thus leaving the device with dummy DMA ops. - * Fix this by calling of_dma_configure() with a NULL node to set - * default DMA ops. - */ - of_dma_configure(priv->dma_dev, NULL, true); + dma_coerce_mask_and_coherent(priv->dma_dev, DMA_BIT_MASK(64)); #endif pr_debug("priv %p\n", priv); diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 7ea6fb6a2e5d..49b381e104ef 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -1363,8 +1363,7 @@ static int gnttab_setup(void) if (xen_feature(XENFEAT_auto_translated_physmap) && gnttab_shared.addr == NULL) { gnttab_shared.addr = xen_auto_xlat_grant_frames.vaddr; if (gnttab_shared.addr == NULL) { - pr_warn("gnttab share frames (addr=0x%08lx) is not mapped!\n", - (unsigned long)xen_auto_xlat_grant_frames.vaddr); + pr_warn("gnttab share frames is not mapped!\n"); return -ENOMEM; } } diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c index 69a626b0e594..c57c71b7d53d 100644 --- a/drivers/xen/pvcalls-back.c +++ b/drivers/xen/pvcalls-back.c @@ -775,7 +775,7 @@ static int pvcalls_back_poll(struct xenbus_device *dev, mappass->reqcopy = *req; icsk = inet_csk(mappass->sock->sk); queue = &icsk->icsk_accept_queue; - data = queue->rskq_accept_head != NULL; + data = READ_ONCE(queue->rskq_accept_head) != NULL; if (data) { mappass->reqcopy.cmd = 0; ret = 0; |
