aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2023-06-19 11:57:25 +1000
committerDave Airlie <airlied@redhat.com>2023-06-19 11:57:26 +1000
commitbcbede6fbeb0e1eb85ccbb532faf06d3b31f0e73 (patch)
tree53062368790eb070705726f24a49beb34b8737d0 /drivers/gpu/drm/amd/amdgpu
parentMerge tag 'drm-misc-next-fixes-2023-06-15' of git://anongit.freedesktop.org/d... (diff)
parentdrm/dp_mst: Clear MSG_RDY flag before sending new message (diff)
downloadlinux-bcbede6fbeb0e1eb85ccbb532faf06d3b31f0e73.tar.gz
linux-bcbede6fbeb0e1eb85ccbb532faf06d3b31f0e73.zip
Merge tag 'amd-drm-next-6.5-2023-06-16' of https://gitlab.freedesktop.org/agd5f/linux into drm-next
amd-drm-next-6.5-2023-06-16: amdgpu: - Misc display fixes - W=1 fixes - Improve scheduler naming - DCN 3.1.4 fixes - kdoc fixes - Enable W=1 - VCN 4.0 fix - xgmi fixes - TOPDOWN fix for large BAR systems - eDP fix - PSR fixes - SubVP fixes - Freesync fix - DPIA fix - SMU 13.0.5 fixes - vblflash fix - RAS fixes - SDMA 4 fix - BO locking fix - BO backing store fix - NBIO 7.9 fixes - GC 9.4.3 fixes - GPU reset recovery fixes - HMM fix amdkfd: - Fix NULL check - Trap fixes - Queue count fix - Add event age tracking radeon: - fbdev client fix scheduler: - Avoid an infinite loop UAPI: - Add KFD event age tracking: Proposed ROCT-Thunk-Interface: https://github.com/RadeonOpenCompute/ROCT-Thunk-Interface/commit/efdbf6cfbc026bd68ac3c35d00dacf84370eb81e https://github.com/RadeonOpenCompute/ROCT-Thunk-Interface/commit/1820ae0a2db85b6f584611dc0cde1a00e7c22915 Proposed ROCR-Runtime: https://github.com/RadeonOpenCompute/ROCR-Runtime/compare/master...zhums:ROCR-Runtime:new_event_wait_review https://github.com/RadeonOpenCompute/ROCR-Runtime/commit/e1f5bdb88eb882ac798aeca2c00ea3fbb2dba459 https://github.com/RadeonOpenCompute/ROCR-Runtime/commit/7d26afd14107b5c2a754c1a3f415d89f3aabb503 drm: - DP MST fix Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexander.deucher@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20230616163548.7706-1-alexander.deucher@amd.com
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c176
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ta_ras_if.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_10.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c6
37 files changed, 388 insertions, 151 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 86b833085f19..8d16f280b695 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -40,7 +40,18 @@ ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \
-I$(FULL_AMD_PATH)/amdkfd
subdir-ccflags-y := -Wextra
-subdir-ccflags-y += $(call cc-option, -Wunused-but-set-variable)
+subdir-ccflags-y += -Wunused
+subdir-ccflags-y += -Wmissing-prototypes
+subdir-ccflags-y += -Wmissing-declarations
+subdir-ccflags-y += -Wmissing-include-dirs
+subdir-ccflags-y += -Wold-style-definition
+subdir-ccflags-y += -Wmissing-format-attribute
+# Need this to avoid recursive variable evaluation issues
+cond-flags := $(call cc-option, -Wunused-but-set-variable) \
+ $(call cc-option, -Wunused-const-variable) \
+ $(call cc-option, -Wstringop-truncation) \
+ $(call cc-option, -Wpacked-not-aligned)
+subdir-ccflags-y += $(cond-flags)
subdir-ccflags-y += -Wno-unused-parameter
subdir-ccflags-y += -Wno-type-limits
subdir-ccflags-y += -Wno-sign-compare
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 6e1d331af01f..d9503882ea97 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -309,7 +309,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
}
p->gang_leader = p->jobs[p->gang_leader_idx];
- if (p->ctx->vram_lost_counter != p->gang_leader->vram_lost_counter) {
+ if (p->ctx->generation != p->gang_leader->generation) {
ret = -ECANCELED;
goto free_all_kdata;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 410acdd4554c..0dc9c655c4fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -333,7 +333,7 @@ static int amdgpu_ctx_init(struct amdgpu_ctx_mgr *mgr, int32_t priority,
ctx->reset_counter = atomic_read(&mgr->adev->gpu_reset_counter);
ctx->reset_counter_query = ctx->reset_counter;
- ctx->vram_lost_counter = atomic_read(&mgr->adev->vram_lost_counter);
+ ctx->generation = amdgpu_vm_generation(mgr->adev, &fpriv->vm);
ctx->init_priority = priority;
ctx->override_priority = AMDGPU_CTX_PRIORITY_UNSET;
@@ -432,6 +432,7 @@ int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
u32 ring, struct drm_sched_entity **entity)
{
int r;
+ struct drm_sched_entity *ctx_entity;
if (hw_ip >= AMDGPU_HW_IP_NUM) {
DRM_ERROR("unknown HW IP type: %d\n", hw_ip);
@@ -455,7 +456,14 @@ int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
return r;
}
- *entity = &ctx->entities[hw_ip][ring]->entity;
+ ctx_entity = &ctx->entities[hw_ip][ring]->entity;
+ r = drm_sched_entity_error(ctx_entity);
+ if (r) {
+ DRM_DEBUG("error entity %p\n", ctx_entity);
+ return r;
+ }
+
+ *entity = ctx_entity;
return 0;
}
@@ -586,7 +594,7 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter))
out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET;
- if (ctx->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
+ if (ctx->generation != amdgpu_vm_generation(adev, &fpriv->vm))
out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST;
if (atomic_read(&ctx->guilty))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
index f1e27b6e16f4..85376baaa92f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
@@ -47,7 +47,7 @@ struct amdgpu_ctx {
struct amdgpu_ctx_mgr *mgr;
unsigned reset_counter;
unsigned reset_counter_query;
- uint32_t vram_lost_counter;
+ uint64_t generation;
spinlock_t ring_lock;
struct amdgpu_ctx_entity *entities[AMDGPU_HW_IP_NUM][AMDGPU_MAX_ENTITY_NUM];
bool preamble_presented;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 859882109f55..8e1cfc87122d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -1515,6 +1515,7 @@ static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
mall_size += mall_size_per_umc;
}
adev->gmc.mall_size = mall_size;
+ adev->gmc.m_half_use = half_use;
break;
default:
dev_err(adev->dev,
@@ -1896,6 +1897,8 @@ static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
break;
case IP_VERSION(9, 4, 3):
+ if (!amdgpu_exp_hw_support)
+ return -EINVAL;
amdgpu_device_ip_block_add(adev, &gfx_v9_4_3_ip_block);
break;
case IP_VERSION(10, 1, 10):
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 876ec35b8f83..c694b41f6461 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -692,6 +692,30 @@ void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring)
}
/**
+ * amdgpu_fence_driver_set_error - set error code on fences
+ * @ring: the ring which contains the fences
+ * @error: the error code to set
+ *
+ * Set an error code to all the fences pending on the ring.
+ */
+void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error)
+{
+ struct amdgpu_fence_driver *drv = &ring->fence_drv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&drv->lock, flags);
+ for (unsigned int i = 0; i <= drv->num_fences_mask; ++i) {
+ struct dma_fence *fence;
+
+ fence = rcu_dereference_protected(drv->fences[i],
+ lockdep_is_held(&drv->lock));
+ if (fence && !dma_fence_is_signaled_locked(fence))
+ dma_fence_set_error(fence, error);
+ }
+ spin_unlock_irqrestore(&drv->lock, flags);
+}
+
+/**
* amdgpu_fence_driver_force_completion - force signal latest fence of ring
*
* @ring: fence of the ring to signal
@@ -699,6 +723,7 @@ void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring)
*/
void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
{
+ amdgpu_fence_driver_set_error(ring, -ECANCELED);
amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
amdgpu_fence_process(ring);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 6794edd1d2d2..56d73fade568 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -301,6 +301,8 @@ struct amdgpu_gmc {
/* MALL size */
u64 mall_size;
+ uint32_t m_half_use;
+
/* number of UMC instances */
int num_umc;
/* mode2 save restore */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
index 2dadcfe43d03..081267161d40 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
@@ -190,8 +190,8 @@ int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
pr_debug("hmm range: start = 0x%lx, end = 0x%lx",
hmm_range->start, hmm_range->end);
- /* Assuming 512MB takes maxmium 1 second to fault page address */
- timeout = max((hmm_range->end - hmm_range->start) >> 29, 1UL);
+ /* Assuming 128MB takes maximum 1 second to fault page address */
+ timeout = max((hmm_range->end - hmm_range->start) >> 27, 1UL);
timeout *= HMM_RANGE_DEFAULT_TIMEOUT;
timeout = jiffies + msecs_to_jiffies(timeout);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 3481d2808ce5..5273decc5753 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -467,7 +467,8 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
handled = true;
} else {
- DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
+ DRM_DEBUG("Unregistered interrupt src_id: %d of client_id:%d\n",
+ src_id, client_id);
}
/* Send it to amdkfd as well if it isn't already handled */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index aca3a2bfe8d2..78476bc75b4e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -109,7 +109,7 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
(*job)->vm = vm;
amdgpu_sync_create(&(*job)->explicit_sync);
- (*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
+ (*job)->generation = amdgpu_vm_generation(adev, vm);
(*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET;
if (!entity)
@@ -258,16 +258,27 @@ amdgpu_job_prepare_job(struct drm_sched_job *sched_job,
struct dma_fence *fence = NULL;
int r;
+ /* Ignore soft recovered fences here */
+ r = drm_sched_entity_error(s_entity);
+ if (r && r != -ENODATA)
+ goto error;
+
if (!fence && job->gang_submit)
fence = amdgpu_device_switch_gang(ring->adev, job->gang_submit);
while (!fence && job->vm && !job->vmid) {
r = amdgpu_vmid_grab(job->vm, ring, job, &fence);
- if (r)
+ if (r) {
DRM_ERROR("Error getting VM ID (%d)\n", r);
+ goto error;
+ }
}
return fence;
+
+error:
+ dma_fence_set_error(&job->base.s_fence->finished, r);
+ return NULL;
}
static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
@@ -284,7 +295,7 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
trace_amdgpu_sched_run_job(job);
/* Skip job if VRAM is lost and never resubmit gangs */
- if (job->vram_lost_counter != atomic_read(&adev->vram_lost_counter) ||
+ if (job->generation != amdgpu_vm_generation(adev, job->vm) ||
(job->job_run_counter && job->gang_submit))
dma_fence_set_error(finished, -ECANCELED);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index 3f9804f956c9..a963a25ddd62 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -61,7 +61,7 @@ struct amdgpu_job {
uint32_t gds_base, gds_size;
uint32_t gws_base, gws_size;
uint32_t oa_base, oa_size;
- uint32_t vram_lost_counter;
+ uint64_t generation;
/* user fence handling */
uint64_t uf_addr;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
index 095aecfb201e..8ab8ae01f87c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
@@ -99,8 +99,6 @@ struct amdgpu_nbio_funcs {
int (*get_compute_partition_mode)(struct amdgpu_device *adev);
u32 (*get_memory_partition_mode)(struct amdgpu_device *adev,
u32 *supp_modes);
- void (*set_compute_partition_mode)(struct amdgpu_device *adev,
- enum amdgpu_gfx_partition mode);
};
struct amdgpu_nbio {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index f76649e523a0..f7905bce0de1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -150,7 +150,7 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
places[c].lpfn = min_not_zero(places[c].lpfn, visible_pfn);
- else if (adev->gmc.real_vram_size != adev->gmc.visible_vram_size)
+ else
places[c].flags |= TTM_PL_FLAG_TOPDOWN;
if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
@@ -1282,8 +1282,12 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
struct amdgpu_mem_stats *stats)
{
- unsigned int domain;
uint64_t size = amdgpu_bo_size(bo);
+ unsigned int domain;
+
+ /* Abort if the BO doesn't currently have a backing store */
+ if (!bo->tbo.resource)
+ return;
domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
switch (domain) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index dd865beb39a8..e15c27e05564 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -569,6 +569,26 @@ int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
return -ETIME;
}
+int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index,
+ uint32_t reg_val, uint32_t mask, uint32_t msec_timeout)
+{
+ uint32_t val;
+ int i;
+ struct amdgpu_device *adev = psp->adev;
+
+ if (psp->adev->no_hw_access)
+ return 0;
+
+ for (i = 0; i < msec_timeout; i++) {
+ val = RREG32(reg_index);
+ if ((val & mask) == reg_val)
+ return 0;
+ msleep(1);
+ }
+
+ return -ETIME;
+}
+
static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)
{
switch (cmd_id) {
@@ -1653,10 +1673,11 @@ int psp_ras_initialize(struct psp_context *psp)
if (amdgpu_ras_is_poison_mode_supported(adev))
ras_cmd->ras_in_message.init_flags.poison_mode_en = 1;
- if (!adev->gmc.xgmi.connected_to_cpu)
+ if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
ras_cmd->ras_in_message.init_flags.dgpu_mode = 1;
ras_cmd->ras_in_message.init_flags.xcc_mask =
adev->gfx.xcc_mask;
+ ras_cmd->ras_in_message.init_flags.channel_dis_num = hweight32(adev->gmc.m_half_use) * 2;
ret = psp_ta_load(psp, &psp->ras_context.context);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index d84323923a3f..2cae0b1a0b8a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -455,6 +455,8 @@ extern const struct amdgpu_ip_block_version psp_v13_0_4_ip_block;
extern int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
uint32_t field_val, uint32_t mask, bool check_changed);
+extern int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index,
+ uint32_t field_val, uint32_t mask, uint32_t msec_timeout);
int psp_gpu_reset(struct amdgpu_device *adev);
int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index a6c3265cdbc4..4769a18304d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -171,8 +171,7 @@ static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t addre
memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
err_data.err_addr = &err_rec;
- amdgpu_umc_fill_error_record(&err_data, address,
- (address >> AMDGPU_GPU_PAGE_SHIFT), 0, 0);
+ amdgpu_umc_fill_error_record(&err_data, address, address, 0, 0);
if (amdgpu_bad_page_threshold != 0) {
amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
@@ -490,7 +489,8 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
break;
case 2:
- if ((data.inject.address >= adev->gmc.mc_vram_size) ||
+ if ((data.inject.address >= adev->gmc.mc_vram_size &&
+ adev->gmc.mc_vram_size) ||
(data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
dev_warn(adev->dev, "RAS WARN: input address "
"0x%llx is invalid.",
@@ -1491,6 +1491,7 @@ static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct amdgpu_ras_eeprom_control *eeprom = &con->eeprom_control;
struct drm_minor *minor = adev_to_drm(adev)->primary;
struct dentry *dir;
@@ -1501,6 +1502,7 @@ static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *
&amdgpu_ras_debugfs_eeprom_ops);
debugfs_create_u32("bad_page_cnt_threshold", 0444, dir,
&con->bad_page_cnt_threshold);
+ debugfs_create_u32("ras_num_recs", 0444, dir, &eeprom->ras_num_recs);
debugfs_create_x32("ras_hw_enabled", 0444, dir, &adev->ras_hw_enabled);
debugfs_create_x32("ras_enabled", 0444, dir, &adev->ras_enabled);
debugfs_create_file("ras_eeprom_size", S_IRUGO, dir, adev,
@@ -1685,8 +1687,7 @@ static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *
}
}
- if (!adev->gmc.xgmi.connected_to_cpu)
- amdgpu_umc_poison_handler(adev, false);
+ amdgpu_umc_poison_handler(adev, false);
if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption)
poison_stat = block_obj->hw_ops->handle_poison_consumption(adev);
@@ -2451,11 +2452,10 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
{
adev->ras_hw_enabled = adev->ras_enabled = 0;
- if (!adev->is_atom_fw ||
- !amdgpu_ras_asic_supported(adev))
+ if (!amdgpu_ras_asic_supported(adev))
return;
- if (!adev->gmc.xgmi.connected_to_cpu) {
+ if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
dev_info(adev->dev, "MEM ECC is active.\n");
adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index c2e8f6491ac6..0648dfe559af 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -72,7 +72,7 @@
/* Bad GPU tag ‘BADG’ */
#define RAS_TABLE_HDR_BAD 0x42414447
-/**
+/*
* EEPROM Table structure v1
* ---------------------------------
* | |
@@ -94,7 +94,7 @@
#define RAS_MAX_RECORD_COUNT ((RAS_TBL_SIZE_BYTES - RAS_TABLE_HEADER_SIZE) \
/ RAS_TABLE_RECORD_SIZE)
-/**
+/*
* EEPROM Table structrue v2.1
* ---------------------------------
* | |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index da26c555af24..80d6e132e409 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -433,11 +433,18 @@ void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
struct dma_fence *fence)
{
+ unsigned long flags;
+
ktime_t deadline = ktime_add_us(ktime_get(), 10000);
if (amdgpu_sriov_vf(ring->adev) || !ring->funcs->soft_recovery || !fence)
return false;
+ spin_lock_irqsave(fence->lock, flags);
+ if (!dma_fence_is_signaled_locked(fence))
+ dma_fence_set_error(fence, -ENODATA);
+ spin_unlock_irqrestore(fence->lock, flags);
+
atomic_inc(&ring->adev->gpu_reset_counter);
while (!dma_fence_is_signaled(fence) &&
ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0)
@@ -561,6 +568,17 @@ static const struct file_operations amdgpu_debugfs_mqd_fops = {
.llseek = default_llseek
};
+static int amdgpu_debugfs_ring_error(void *data, u64 val)
+{
+ struct amdgpu_ring *ring = data;
+
+ amdgpu_fence_driver_set_error(ring, val);
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(amdgpu_debugfs_error_fops, NULL,
+ amdgpu_debugfs_ring_error, "%lld\n");
+
#endif
void amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
@@ -582,6 +600,11 @@ void amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
&amdgpu_debugfs_mqd_fops,
ring->mqd_size);
}
+
+ sprintf(name, "amdgpu_error_%s", ring->name);
+ debugfs_create_file(name, 0200, root, ring,
+ &amdgpu_debugfs_error_fops);
+
#endif
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 21ffb9ce32ce..028ff075db51 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -126,6 +126,7 @@ struct amdgpu_fence_driver {
extern const struct drm_sched_backend_ops amdgpu_sched_ops;
void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring);
+void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error);
void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index d2d0d27f9053..0534ab716809 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -228,7 +228,7 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
- r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity,
+ r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr,
AMDGPU_FENCE_OWNER_UNDEFINED,
num_dw * 4 + num_bytes,
AMDGPU_IB_POOL_DELAYED, &job);
@@ -1456,7 +1456,7 @@ static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
memcpy(adev->mman.sdma_access_ptr, buf, len);
num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
- r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity,
+ r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr,
AMDGPU_FENCE_OWNER_UNDEFINED,
num_dw * 4, AMDGPU_IB_POOL_DELAYED,
&job);
@@ -2032,7 +2032,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
ring = adev->mman.buffer_funcs_ring;
sched = &ring->sched;
- r = drm_sched_entity_init(&adev->mman.entity,
+ r = drm_sched_entity_init(&adev->mman.high_pr,
DRM_SCHED_PRIORITY_KERNEL, &sched,
1, NULL);
if (r) {
@@ -2041,7 +2041,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
return;
}
- r = drm_sched_entity_init(&adev->mman.delayed,
+ r = drm_sched_entity_init(&adev->mman.low_pr,
DRM_SCHED_PRIORITY_NORMAL, &sched,
1, NULL);
if (r) {
@@ -2050,8 +2050,8 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
goto error_free_entity;
}
} else {
- drm_sched_entity_destroy(&adev->mman.entity);
- drm_sched_entity_destroy(&adev->mman.delayed);
+ drm_sched_entity_destroy(&adev->mman.high_pr);
+ drm_sched_entity_destroy(&adev->mman.low_pr);
dma_fence_put(man->move);
man->move = NULL;
}
@@ -2067,7 +2067,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
return;
error_free_entity:
- drm_sched_entity_destroy(&adev->mman.entity);
+ drm_sched_entity_destroy(&adev->mman.high_pr);
}
static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
@@ -2082,8 +2082,8 @@ static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
AMDGPU_IB_POOL_DIRECT :
AMDGPU_IB_POOL_DELAYED;
int r;
- struct drm_sched_entity *entity = delayed ? &adev->mman.delayed :
- &adev->mman.entity;
+ struct drm_sched_entity *entity = delayed ? &adev->mman.low_pr :
+ &adev->mman.high_pr;
r = amdgpu_job_alloc_with_ib(adev, entity,
AMDGPU_FENCE_OWNER_UNDEFINED,
num_dw * 4, pool, job);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index e82b1edee7a4..6d0d66e40db9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -59,10 +59,10 @@ struct amdgpu_mman {
bool buffer_funcs_enabled;
struct mutex gtt_window_lock;
- /* Scheduler entity for buffer moves */
- struct drm_sched_entity entity;
- /* Scheduler entity for VRAM clearing */
- struct drm_sched_entity delayed;
+ /* High priority scheduler entity for buffer moves */
+ struct drm_sched_entity high_pr;
+ /* Low priority scheduler entity for VRAM clearing */
+ struct drm_sched_entity low_pr;
struct amdgpu_vram_mgr vram_mgr;
struct amdgpu_gtt_mgr gtt_mgr;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index 1edf8e6aeb16..db0d94ca4ffc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -169,27 +169,31 @@ int amdgpu_umc_poison_handler(struct amdgpu_device *adev, bool reset)
{
int ret = AMDGPU_RAS_SUCCESS;
- if (!amdgpu_sriov_vf(adev)) {
- if (!adev->gmc.xgmi.connected_to_cpu) {
- struct ras_err_data err_data = {0, 0, 0, NULL};
- struct ras_common_if head = {
- .block = AMDGPU_RAS_BLOCK__UMC,
- };
- struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head);
-
- ret = amdgpu_umc_do_page_retirement(adev, &err_data, NULL, reset);
-
- if (ret == AMDGPU_RAS_SUCCESS && obj) {
- obj->err_data.ue_count += err_data.ue_count;
- obj->err_data.ce_count += err_data.ce_count;
- }
- } else if (reset) {
+ if (adev->gmc.xgmi.connected_to_cpu ||
+ adev->gmc.is_app_apu) {
+ if (reset) {
/* MCA poison handler is only responsible for GPU reset,
* let MCA notifier do page retirement.
*/
kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
amdgpu_ras_reset_gpu(adev);
}
+ return ret;
+ }
+
+ if (!amdgpu_sriov_vf(adev)) {
+ struct ras_err_data err_data = {0, 0, 0, NULL};
+ struct ras_common_if head = {
+ .block = AMDGPU_RAS_BLOCK__UMC,
+ };
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head);
+
+ ret = amdgpu_umc_do_page_retirement(adev, &err_data, NULL, reset);
+
+ if (ret == AMDGPU_RAS_SUCCESS && obj) {
+ obj->err_data.ue_count += err_data.ue_count;
+ obj->err_data.ce_count += err_data.ce_count;
+ }
} else {
if (adev->virt.ops && adev->virt.ops->ras_poison_handler)
adev->virt.ops->ras_poison_handler(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index dc80c9c8fd14..143d11afe0e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -267,6 +267,32 @@ static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
}
/**
+ * amdgpu_vm_bo_reset_state_machine - reset the vm_bo state machine
+ * @vm: the VM which state machine to reset
+ *
+ * Move all vm_bo object in the VM into a state where they will be updated
+ * again during validation.
+ */
+static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
+{
+ struct amdgpu_vm_bo_base *vm_bo, *tmp;
+
+ spin_lock(&vm->status_lock);
+ list_splice_init(&vm->done, &vm->invalidated);
+ list_for_each_entry(vm_bo, &vm->invalidated, vm_status)
+ vm_bo->moved = true;
+ list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) {
+ struct amdgpu_bo *bo = vm_bo->bo;
+
+ if (!bo || bo->tbo.type != ttm_bo_type_kernel)
+ list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
+ else if (bo->parent)
+ list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
+ }
+ spin_unlock(&vm->status_lock);
+}
+
+/**
* amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
*
* @base: base structure for tracking BO usage in a VM
@@ -351,6 +377,58 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
spin_unlock(&adev->mman.bdev.lru_lock);
}
+/* Create scheduler entities for page table updates */
+static int amdgpu_vm_init_entities(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm)
+{
+ int r;
+
+ r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL,
+ adev->vm_manager.vm_pte_scheds,
+ adev->vm_manager.vm_pte_num_scheds, NULL);
+ if (r)
+ goto error;
+
+ return drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL,
+ adev->vm_manager.vm_pte_scheds,
+ adev->vm_manager.vm_pte_num_scheds, NULL);
+
+error:
+ drm_sched_entity_destroy(&vm->immediate);
+ return r;
+}
+
+/* Destroy the entities for page table updates again */
+static void amdgpu_vm_fini_entities(struct amdgpu_vm *vm)
+{
+ drm_sched_entity_destroy(&vm->immediate);
+ drm_sched_entity_destroy(&vm->delayed);
+}
+
+/**
+ * amdgpu_vm_generation - return the page table re-generation counter
+ * @adev: the amdgpu_device
+ * @vm: optional VM to check, might be NULL
+ *
+ * Returns a page table re-generation token to allow checking if submissions
+ * are still valid to use this VM. The VM parameter might be NULL in which case
+ * just the VRAM lost counter will be used.
+ */
+uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+{
+ uint64_t result = (u64)atomic_read(&adev->vram_lost_counter) << 32;
+
+ if (!vm)
+ return result;
+
+ result += vm->generation;
+ /* Add one if the page tables will be re-generated on next CS */
+ if (drm_sched_entity_error(&vm->delayed))
+ ++result;
+
+ return result;
+}
+
/**
* amdgpu_vm_validate_pt_bos - validate the page table BOs
*
@@ -373,6 +451,15 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo *bo;
int r;
+ if (drm_sched_entity_error(&vm->delayed)) {
+ ++vm->generation;
+ amdgpu_vm_bo_reset_state_machine(vm);
+ amdgpu_vm_fini_entities(vm);
+ r = amdgpu_vm_init_entities(adev, vm);
+ if (r)
+ return r;
+ }
+
spin_lock(&vm->status_lock);
while (!list_empty(&vm->evicted)) {
bo_base = list_first_entry(&vm->evicted,
@@ -920,42 +1007,51 @@ error_unlock:
return r;
}
+static void amdgpu_vm_bo_get_memory(struct amdgpu_bo_va *bo_va,
+ struct amdgpu_mem_stats *stats)
+{
+ struct amdgpu_vm *vm = bo_va->base.vm;
+ struct amdgpu_bo *bo = bo_va->base.bo;
+
+ if (!bo)
+ return;
+
+ /*
+ * For now ignore BOs which are currently locked and potentially
+ * changing their location.
+ */
+ if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv &&
+ !dma_resv_trylock(bo->tbo.base.resv))
+ return;
+
+ amdgpu_bo_get_memory(bo, stats);
+ if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv)
+ dma_resv_unlock(bo->tbo.base.resv);
+}
+
void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
struct amdgpu_mem_stats *stats)
{
struct amdgpu_bo_va *bo_va, *tmp;
spin_lock(&vm->status_lock);
- list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
- if (!bo_va->base.bo)
- continue;
- amdgpu_bo_get_memory(bo_va->base.bo, stats);
- }
- list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) {
- if (!bo_va->base.bo)
- continue;
- amdgpu_bo_get_memory(bo_va->base.bo, stats);
- }
- list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) {
- if (!bo_va->base.bo)
- continue;
- amdgpu_bo_get_memory(bo_va->base.bo, stats);
- }
- list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
- if (!bo_va->base.bo)
- continue;
- amdgpu_bo_get_memory(bo_va->base.bo, stats);
- }
- list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
- if (!bo_va->base.bo)
- continue;
- amdgpu_bo_get_memory(bo_va->base.bo, stats);
- }
- list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) {
- if (!bo_va->base.bo)
- continue;
- amdgpu_bo_get_memory(bo_va->base.bo, stats);
- }
+ list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status)
+ amdgpu_vm_bo_get_memory(bo_va, stats);
+
+ list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status)
+ amdgpu_vm_bo_get_memory(bo_va, stats);
+
+ list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status)
+ amdgpu_vm_bo_get_memory(bo_va, stats);
+
+ list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status)
+ amdgpu_vm_bo_get_memory(bo_va, stats);
+
+ list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status)
+ amdgpu_vm_bo_get_memory(bo_va, stats);
+
+ list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status)
+ amdgpu_vm_bo_get_memory(bo_va, stats);
spin_unlock(&vm->status_lock);
}
@@ -2039,19 +2135,10 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
INIT_LIST_HEAD(&vm->pt_freed);
INIT_WORK(&vm->pt_free_work, amdgpu_vm_pt_free_work);
- /* create scheduler entities for page table updates */
- r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL,
- adev->vm_manager.vm_pte_scheds,
- adev->vm_manager.vm_pte_num_scheds, NULL);
+ r = amdgpu_vm_init_entities(adev, vm);
if (r)
return r;
- r = drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL,
- adev->vm_manager.vm_pte_scheds,
- adev->vm_manager.vm_pte_num_scheds, NULL);
- if (r)
- goto error_free_immediate;
-
vm->pte_support_ats = false;
vm->is_compute_context = false;
@@ -2072,6 +2159,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
vm->last_update = dma_fence_get_stub();
vm->last_unlocked = dma_fence_get_stub();
vm->last_tlb_flush = dma_fence_get_stub();
+ vm->generation = 0;
mutex_init(&vm->eviction_lock);
vm->evicting = false;
@@ -2112,10 +2200,7 @@ error_free_root:
error_free_delayed:
dma_fence_put(vm->last_tlb_flush);
dma_fence_put(vm->last_unlocked);
- drm_sched_entity_destroy(&vm->delayed);
-
-error_free_immediate:
- drm_sched_entity_destroy(&vm->immediate);
+ amdgpu_vm_fini_entities(vm);
return r;
}
@@ -2268,8 +2353,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
amdgpu_bo_unref(&root);
WARN_ON(vm->root.bo);
- drm_sched_entity_destroy(&vm->immediate);
- drm_sched_entity_destroy(&vm->delayed);
+ amdgpu_vm_fini_entities(vm);
if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
dev_err(adev->dev, "still active bo inside vm\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 14f9a2bf3acb..9c85d494f2a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -295,6 +295,9 @@ struct amdgpu_vm {
atomic64_t tlb_seq;
struct dma_fence *last_tlb_flush;
+ /* How many times we had to re-generate the page tables */
+ uint64_t generation;
+
/* Last unlocked submission to the scheduler entities */
struct dma_fence *last_unlocked;
@@ -397,6 +400,7 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
struct list_head *validated,
struct amdgpu_bo_list_entry *entry);
bool amdgpu_vm_ready(struct amdgpu_vm *vm);
+uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm);
int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int (*callback)(void *p, struct amdgpu_bo *bo),
void *param);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 85ee1af963dd..03dc59cbe8aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -325,6 +325,36 @@ static ssize_t amdgpu_xgmi_show_device_id(struct device *dev,
}
+static ssize_t amdgpu_xgmi_show_num_hops(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
+ int i;
+
+ for (i = 0; i < top->num_nodes; i++)
+ sprintf(buf + 3 * i, "%02x ", top->nodes[i].num_hops);
+
+ return sysfs_emit(buf, "%s\n", buf);
+}
+
+static ssize_t amdgpu_xgmi_show_num_links(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
+ int i;
+
+ for (i = 0; i < top->num_nodes; i++)
+ sprintf(buf + 3 * i, "%02x ", top->nodes[i].num_links);
+
+ return sysfs_emit(buf, "%s\n", buf);
+}
+
#define AMDGPU_XGMI_SET_FICAA(o) ((o) | 0x456801)
static ssize_t amdgpu_xgmi_show_error(struct device *dev,
struct device_attribute *attr,
@@ -361,6 +391,8 @@ static ssize_t amdgpu_xgmi_show_error(struct device *dev,
static DEVICE_ATTR(xgmi_device_id, S_IRUGO, amdgpu_xgmi_show_device_id, NULL);
static DEVICE_ATTR(xgmi_error, S_IRUGO, amdgpu_xgmi_show_error, NULL);
+static DEVICE_ATTR(xgmi_num_hops, S_IRUGO, amdgpu_xgmi_show_num_hops, NULL);
+static DEVICE_ATTR(xgmi_num_links, S_IRUGO, amdgpu_xgmi_show_num_links, NULL);
static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev,
struct amdgpu_hive_info *hive)
@@ -380,6 +412,15 @@ static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev,
if (ret)
pr_err("failed to create xgmi_error\n");
+ /* Create xgmi num hops file */
+ ret = device_create_file(adev->dev, &dev_attr_xgmi_num_hops);
+ if (ret)
+ pr_err("failed to create xgmi_num_hops\n");
+
+ /* Create xgmi num links file */
+ ret = device_create_file(adev->dev, &dev_attr_xgmi_num_links);
+ if (ret)
+ pr_err("failed to create xgmi_num_links\n");
/* Create sysfs link to hive info folder on the first device */
if (hive->kobj.parent != (&adev->dev->kobj)) {
@@ -407,6 +448,9 @@ remove_link:
remove_file:
device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
+ device_remove_file(adev->dev, &dev_attr_xgmi_error);
+ device_remove_file(adev->dev, &dev_attr_xgmi_num_hops);
+ device_remove_file(adev->dev, &dev_attr_xgmi_num_links);
success:
return ret;
@@ -420,6 +464,8 @@ static void amdgpu_xgmi_sysfs_rem_dev_info(struct amdgpu_device *adev,
device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
device_remove_file(adev->dev, &dev_attr_xgmi_error);
+ device_remove_file(adev->dev, &dev_attr_xgmi_num_hops);
+ device_remove_file(adev->dev, &dev_attr_xgmi_num_links);
if (hive->kobj.parent != (&adev->dev->kobj))
sysfs_remove_link(&adev->dev->kobj,"xgmi_hive_info");
diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c
index a595bb958215..16471b81a1f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c
@@ -518,9 +518,6 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
adev->gfx.funcs->switch_partition_mode(xcp_mgr->adev,
num_xcc_per_xcp);
- if (adev->nbio.funcs->set_compute_partition_mode)
- adev->nbio.funcs->set_compute_partition_mode(adev, mode);
-
/* Init info about new xcps */
*num_xcps = num_xcc / num_xcc_per_xcp;
amdgpu_xcp_init(xcp_mgr, *num_xcps, mode);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index f5b8d3f388ff..c1ee54d4c3d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -623,22 +623,16 @@ static void gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device *adev,
static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev,
int num_xccs_per_xcp)
{
- int i, num_xcc;
- u32 tmp = 0;
-
- num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ int ret;
- for (i = 0; i < num_xcc; i++) {
- tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP,
- num_xccs_per_xcp);
- tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, VIRTUAL_XCC_ID,
- i % num_xccs_per_xcp);
- WREG32_SOC15(GC, GET_INST(GC, i), regCP_HYP_XCP_CTL, tmp);
- }
+ ret = psp_spatial_partition(&adev->psp, NUM_XCC(adev->gfx.xcc_mask) /
+ num_xccs_per_xcp);
+ if (ret)
+ return ret;
adev->gfx.num_xcc_per_xcp = num_xccs_per_xcp;
- return 0;
+ return ret;
}
static int gfx_v9_4_3_ih_to_xcc_inst(struct amdgpu_device *adev, int ih_node)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index b2e42f1b0f12..0c8a47989576 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -382,7 +382,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
* translation. Avoid this by doing the invalidation from the SDMA
* itself.
*/
- r = amdgpu_job_alloc_with_ib(ring->adev, &adev->mman.entity,
+ r = amdgpu_job_alloc_with_ib(ring->adev, &adev->mman.high_pr,
AMDGPU_FENCE_OWNER_UNDEFINED,
16 * 4, AMDGPU_IB_POOL_IMMEDIATE,
&job);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 3ed286b72cae..67e669e0141c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -2107,6 +2107,7 @@ static int gmc_v9_0_sw_init(void *handle)
bitmap_or(adev->vmhubs_mask, adev->vmhubs_mask, &inst_mask, 32);
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
+ adev->gmc.translate_further = adev->vm_manager.num_level > 1;
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
index d19325476752..cd1a02d30420 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
@@ -390,22 +390,7 @@ static int nbio_v7_9_get_compute_partition_mode(struct amdgpu_device *adev)
px = REG_GET_FIELD(tmp, BIF_BX_PF0_PARTITION_COMPUTE_STATUS,
PARTITION_MODE);
- return ffs(px);
-}
-
-static void nbio_v7_9_set_compute_partition_mode(struct amdgpu_device *adev,
- enum amdgpu_gfx_partition mode)
-{
- u32 tmp;
-
- /* Each bit represents DPX,TPX,QPX,CPX mode. No bit set means default
- * SPX mode.
- */
- tmp = RREG32_SOC15(NBIO, 0, regBIF_BX_PF0_PARTITION_COMPUTE_STATUS);
- tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_PARTITION_COMPUTE_STATUS,
- PARTITION_MODE, mode ? BIT(mode - 1) : mode);
-
- WREG32_SOC15(NBIO, 0, regBIF_BX_PF0_PARTITION_COMPUTE_STATUS, tmp);
+ return px;
}
static u32 nbio_v7_9_get_memory_partition_mode(struct amdgpu_device *adev,
@@ -463,7 +448,6 @@ const struct amdgpu_nbio_funcs nbio_v7_9_funcs = {
.ih_control = nbio_v7_9_ih_control,
.remap_hdp_registers = nbio_v7_9_remap_hdp_registers,
.get_compute_partition_mode = nbio_v7_9_get_compute_partition_mode,
- .set_compute_partition_mode = nbio_v7_9_set_compute_partition_mode,
.get_memory_partition_mode = nbio_v7_9_get_memory_partition_mode,
.init_registers = nbio_v7_9_init_registers,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
index caee76ab7110..f9cb0d2c89d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -624,10 +624,11 @@ static int psp_v13_0_exec_spi_cmd(struct psp_context *psp, int cmd)
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_73, 1);
if (cmd == C2PMSG_CMD_SPI_UPDATE_FLASH_IMAGE)
- return 0;
-
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115),
- MBOX_READY_FLAG, MBOX_READY_MASK, false);
+ ret = psp_wait_for_spirom_update(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115),
+ MBOX_READY_FLAG, MBOX_READY_MASK, PSP_SPIROM_UPDATE_TIMEOUT);
+ else
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115),
+ MBOX_READY_FLAG, MBOX_READY_MASK, false);
if (ret) {
dev_err(adev->dev, "SPI cmd %x timed out, ret = %d", cmd, ret);
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.h b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.h
index b2414a729ca1..de5677ce4330 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.h
@@ -25,6 +25,8 @@
#include "amdgpu_psp.h"
+#define PSP_SPIROM_UPDATE_TIMEOUT 60000 /* 60s */
+
void psp_v13_0_set_psp_funcs(struct psp_context *psp);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 1f83eebfc8a7..cd37f45e01a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -2312,7 +2312,7 @@ const struct amd_ip_funcs sdma_v4_0_ip_funcs = {
static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = {
.type = AMDGPU_RING_TYPE_SDMA,
- .align_mask = 0xf,
+ .align_mask = 0xff,
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.support_64bit_ptrs = true,
.secure_submission_supported = true,
@@ -2344,7 +2344,7 @@ static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = {
static const struct amdgpu_ring_funcs sdma_v4_0_page_ring_funcs = {
.type = AMDGPU_RING_TYPE_SDMA,
- .align_mask = 0xf,
+ .align_mask = 0xff,
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.support_64bit_ptrs = true,
.secure_submission_supported = true,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
index 8eebf9c2bbcd..ea5e12390d18 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
@@ -1434,9 +1434,11 @@ static int sdma_v4_4_2_hw_fini(void *handle)
return 0;
inst_mask = GENMASK(adev->sdma.num_instances - 1, 0);
- for (i = 0; i < adev->sdma.num_instances; i++) {
- amdgpu_irq_put(adev, &adev->sdma.ecc_irq,
- AMDGPU_SDMA_IRQ_INSTANCE0 + i);
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ amdgpu_irq_put(adev, &adev->sdma.ecc_irq,
+ AMDGPU_SDMA_IRQ_INSTANCE0 + i);
+ }
}
sdma_v4_4_2_inst_ctx_switch_enable(adev, false, inst_mask);
@@ -1823,7 +1825,7 @@ const struct amd_ip_funcs sdma_v4_4_2_ip_funcs = {
static const struct amdgpu_ring_funcs sdma_v4_4_2_ring_funcs = {
.type = AMDGPU_RING_TYPE_SDMA,
- .align_mask = 0xf,
+ .align_mask = 0xff,
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.support_64bit_ptrs = true,
.get_rptr = sdma_v4_4_2_ring_get_rptr,
@@ -1854,7 +1856,7 @@ static const struct amdgpu_ring_funcs sdma_v4_4_2_ring_funcs = {
static const struct amdgpu_ring_funcs sdma_v4_4_2_page_ring_funcs = {
.type = AMDGPU_RING_TYPE_SDMA,
- .align_mask = 0xf,
+ .align_mask = 0xff,
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.support_64bit_ptrs = true,
.get_rptr = sdma_v4_4_2_ring_get_rptr,
@@ -2073,9 +2075,11 @@ static int sdma_v4_4_2_xcp_suspend(void *handle, uint32_t inst_mask)
uint32_t tmp_mask = inst_mask;
int i;
- for_each_inst(i, tmp_mask) {
- amdgpu_irq_put(adev, &adev->sdma.ecc_irq,
- AMDGPU_SDMA_IRQ_INSTANCE0 + i);
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
+ for_each_inst(i, tmp_mask) {
+ amdgpu_irq_put(adev, &adev->sdma.ecc_irq,
+ AMDGPU_SDMA_IRQ_INSTANCE0 + i);
+ }
}
sdma_v4_4_2_inst_ctx_switch_enable(adev, false, inst_mask);
diff --git a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
index be2984ac00a5..879bb7af297c 100644
--- a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
@@ -130,6 +130,7 @@ struct ta_ras_init_flags {
uint8_t poison_mode_en;
uint8_t dgpu_mode;
uint16_t xcc_mask;
+ uint8_t channel_dis_num;
};
struct ta_ras_output_flags {
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.h b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.h
index c6dfd433fec7..dc12e0af5451 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.h
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.h
@@ -33,7 +33,8 @@
/* Total channel instances for all available umc nodes */
#define UMC_V8_10_TOTAL_CHANNEL_NUM(adev) \
- (UMC_V8_10_CHANNEL_INSTANCE_NUM * UMC_V8_10_UMC_INSTANCE_NUM * (adev)->gmc.num_umc)
+ (UMC_V8_10_CHANNEL_INSTANCE_NUM * UMC_V8_10_UMC_INSTANCE_NUM * \
+ (adev)->gmc.num_umc - hweight32((adev)->gmc.m_half_use) * 2)
/* UMC regiser per channel offset */
#define UMC_V8_10_PER_CHANNEL_OFFSET 0x400
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index 8d371faaa2b3..b48bb5212488 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -129,7 +129,11 @@ static int vcn_v4_0_sw_init(void *handle)
if (adev->vcn.harvest_config & (1 << i))
continue;
- atomic_set(&adev->vcn.inst[i].sched_score, 0);
+ /* Init instance 0 sched_score to 1, so it's scheduled after other instances */
+ if (i == 0)
+ atomic_set(&adev->vcn.inst[i].sched_score, 1);
+ else
+ atomic_set(&adev->vcn.inst[i].sched_score, 0);
/* VCN UNIFIED TRAP */
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],