linux-zen-desktop/drivers/gpu/drm/i915/i915_gem.c

1358 lines
35 KiB
C
Raw Normal View History

2023-08-30 17:31:07 +02:00
/*
* Copyright © 2008-2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*
*/
#include <linux/dma-fence-array.h>
#include <linux/kthread.h>
#include <linux/dma-resv.h>
#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/stop_machine.h>
#include <linux/swap.h>
#include <linux/pci.h>
#include <linux/dma-buf.h>
#include <linux/mman.h>
#include <drm/drm_cache.h>
#include <drm/drm_vma_manager.h>
#include "display/intel_display.h"
#include "display/intel_frontbuffer.h"
#include "gem/i915_gem_clflush.h"
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_ioctls.h"
#include "gem/i915_gem_mman.h"
#include "gem/i915_gem_pm.h"
#include "gem/i915_gem_region.h"
#include "gem/i915_gem_userptr.h"
#include "gt/intel_engine_user.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_workarounds.h"
#include "i915_drv.h"
#include "i915_file_private.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
2023-10-24 12:59:35 +02:00
#include "intel_clock_gating.h"
2023-08-30 17:31:07 +02:00
static int
insert_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node, u32 size)
{
int err;
err = mutex_lock_interruptible(&ggtt->vm.mutex);
if (err)
return err;
memset(node, 0, sizeof(*node));
err = drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
size, 0, I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end,
DRM_MM_INSERT_LOW);
mutex_unlock(&ggtt->vm.mutex);
return err;
}
static void
remove_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node)
{
mutex_lock(&ggtt->vm.mutex);
drm_mm_remove_node(node);
mutex_unlock(&ggtt->vm.mutex);
}
int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_private *i915 = to_i915(dev);
struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
struct drm_i915_gem_get_aperture *args = data;
struct i915_vma *vma;
u64 pinned;
if (mutex_lock_interruptible(&ggtt->vm.mutex))
return -EINTR;
pinned = ggtt->vm.reserved;
list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
if (i915_vma_is_pinned(vma))
pinned += vma->node.size;
mutex_unlock(&ggtt->vm.mutex);
args->aper_size = ggtt->vm.total;
args->aper_available_size = args->aper_size - pinned;
return 0;
}
int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
unsigned long flags)
{
struct intel_runtime_pm *rpm = &to_i915(obj->base.dev)->runtime_pm;
bool vm_trylock = !!(flags & I915_GEM_OBJECT_UNBIND_VM_TRYLOCK);
LIST_HEAD(still_in_list);
intel_wakeref_t wakeref;
struct i915_vma *vma;
int ret;
assert_object_held(obj);
if (list_empty(&obj->vma.list))
return 0;
/*
* As some machines use ACPI to handle runtime-resume callbacks, and
* ACPI is quite kmalloc happy, we cannot resume beneath the vm->mutex
* as they are required by the shrinker. Ergo, we wake the device up
* first just in case.
*/
wakeref = intel_runtime_pm_get(rpm);
try_again:
ret = 0;
spin_lock(&obj->vma.lock);
while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
struct i915_vma,
obj_link))) {
list_move_tail(&vma->obj_link, &still_in_list);
if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK))
continue;
if (flags & I915_GEM_OBJECT_UNBIND_TEST) {
ret = -EBUSY;
break;
}
/*
* Requiring the vm destructor to take the object lock
* before destroying a vma would help us eliminate the
* i915_vm_tryget() here, AND thus also the barrier stuff
* at the end. That's an easy fix, but sleeping locks in
* a kthread should generally be avoided.
*/
ret = -EAGAIN;
if (!i915_vm_tryget(vma->vm))
break;
spin_unlock(&obj->vma.lock);
/*
* Since i915_vma_parked() takes the object lock
* before vma destruction, it won't race us here,
* and destroy the vma from under us.
*/
ret = -EBUSY;
if (flags & I915_GEM_OBJECT_UNBIND_ASYNC) {
assert_object_held(vma->obj);
ret = i915_vma_unbind_async(vma, vm_trylock);
}
if (ret == -EBUSY && (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
!i915_vma_is_active(vma))) {
if (vm_trylock) {
if (mutex_trylock(&vma->vm->mutex)) {
ret = __i915_vma_unbind(vma);
mutex_unlock(&vma->vm->mutex);
}
} else {
ret = i915_vma_unbind(vma);
}
}
i915_vm_put(vma->vm);
spin_lock(&obj->vma.lock);
}
list_splice_init(&still_in_list, &obj->vma.list);
spin_unlock(&obj->vma.lock);
if (ret == -EAGAIN && flags & I915_GEM_OBJECT_UNBIND_BARRIER) {
rcu_barrier(); /* flush the i915_vm_release() */
goto try_again;
}
intel_runtime_pm_put(rpm, wakeref);
return ret;
}
static int
shmem_pread(struct page *page, int offset, int len, char __user *user_data,
bool needs_clflush)
{
char *vaddr;
int ret;
vaddr = kmap(page);
if (needs_clflush)
drm_clflush_virt_range(vaddr + offset, len);
ret = __copy_to_user(user_data, vaddr + offset, len);
kunmap(page);
return ret ? -EFAULT : 0;
}
static int
i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
struct drm_i915_gem_pread *args)
{
unsigned int needs_clflush;
char __user *user_data;
unsigned long offset;
pgoff_t idx;
u64 remain;
int ret;
ret = i915_gem_object_lock_interruptible(obj, NULL);
if (ret)
return ret;
ret = i915_gem_object_pin_pages(obj);
if (ret)
goto err_unlock;
ret = i915_gem_object_prepare_read(obj, &needs_clflush);
if (ret)
goto err_unpin;
i915_gem_object_finish_access(obj);
i915_gem_object_unlock(obj);
remain = args->size;
user_data = u64_to_user_ptr(args->data_ptr);
offset = offset_in_page(args->offset);
for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
struct page *page = i915_gem_object_get_page(obj, idx);
unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
ret = shmem_pread(page, offset, length, user_data,
needs_clflush);
if (ret)
break;
remain -= length;
user_data += length;
offset = 0;
}
i915_gem_object_unpin_pages(obj);
return ret;
err_unpin:
i915_gem_object_unpin_pages(obj);
err_unlock:
i915_gem_object_unlock(obj);
return ret;
}
static inline bool
gtt_user_read(struct io_mapping *mapping,
loff_t base, int offset,
char __user *user_data, int length)
{
void __iomem *vaddr;
unsigned long unwritten;
/* We can use the cpu mem copy function because this is X86. */
vaddr = io_mapping_map_atomic_wc(mapping, base);
unwritten = __copy_to_user_inatomic(user_data,
(void __force *)vaddr + offset,
length);
io_mapping_unmap_atomic(vaddr);
if (unwritten) {
vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
unwritten = copy_to_user(user_data,
(void __force *)vaddr + offset,
length);
io_mapping_unmap(vaddr);
}
return unwritten;
}
static struct i915_vma *i915_gem_gtt_prepare(struct drm_i915_gem_object *obj,
struct drm_mm_node *node,
bool write)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
struct i915_vma *vma;
struct i915_gem_ww_ctx ww;
int ret;
i915_gem_ww_ctx_init(&ww, true);
retry:
vma = ERR_PTR(-ENODEV);
ret = i915_gem_object_lock(obj, &ww);
if (ret)
goto err_ww;
ret = i915_gem_object_set_to_gtt_domain(obj, write);
if (ret)
goto err_ww;
if (!i915_gem_object_is_tiled(obj))
vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL, 0, 0,
PIN_MAPPABLE |
PIN_NONBLOCK /* NOWARN */ |
PIN_NOEVICT);
if (vma == ERR_PTR(-EDEADLK)) {
ret = -EDEADLK;
goto err_ww;
} else if (!IS_ERR(vma)) {
node->start = i915_ggtt_offset(vma);
node->flags = 0;
} else {
ret = insert_mappable_node(ggtt, node, PAGE_SIZE);
if (ret)
goto err_ww;
GEM_BUG_ON(!drm_mm_node_allocated(node));
vma = NULL;
}
ret = i915_gem_object_pin_pages(obj);
if (ret) {
if (drm_mm_node_allocated(node)) {
ggtt->vm.clear_range(&ggtt->vm, node->start, node->size);
remove_mappable_node(ggtt, node);
} else {
i915_vma_unpin(vma);
}
}
err_ww:
if (ret == -EDEADLK) {
ret = i915_gem_ww_ctx_backoff(&ww);
if (!ret)
goto retry;
}
i915_gem_ww_ctx_fini(&ww);
return ret ? ERR_PTR(ret) : vma;
}
static void i915_gem_gtt_cleanup(struct drm_i915_gem_object *obj,
struct drm_mm_node *node,
struct i915_vma *vma)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
i915_gem_object_unpin_pages(obj);
if (drm_mm_node_allocated(node)) {
ggtt->vm.clear_range(&ggtt->vm, node->start, node->size);
remove_mappable_node(ggtt, node);
} else {
i915_vma_unpin(vma);
}
}
static int
i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_pread *args)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
unsigned long remain, offset;
intel_wakeref_t wakeref;
struct drm_mm_node node;
void __user *user_data;
struct i915_vma *vma;
int ret = 0;
if (overflows_type(args->size, remain) ||
overflows_type(args->offset, offset))
return -EINVAL;
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
vma = i915_gem_gtt_prepare(obj, &node, false);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto out_rpm;
}
user_data = u64_to_user_ptr(args->data_ptr);
remain = args->size;
offset = args->offset;
while (remain > 0) {
/* Operation in this page
*
* page_base = page offset within aperture
* page_offset = offset within page
* page_length = bytes to copy for this page
*/
u32 page_base = node.start;
unsigned page_offset = offset_in_page(offset);
unsigned page_length = PAGE_SIZE - page_offset;
page_length = remain < page_length ? remain : page_length;
if (drm_mm_node_allocated(&node)) {
ggtt->vm.insert_page(&ggtt->vm,
2023-10-24 12:59:35 +02:00
i915_gem_object_get_dma_address(obj,
offset >> PAGE_SHIFT),
node.start,
i915_gem_get_pat_index(i915,
I915_CACHE_NONE), 0);
2023-08-30 17:31:07 +02:00
} else {
page_base += offset & PAGE_MASK;
}
if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
user_data, page_length)) {
ret = -EFAULT;
break;
}
remain -= page_length;
user_data += page_length;
offset += page_length;
}
i915_gem_gtt_cleanup(obj, &node, vma);
out_rpm:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
return ret;
}
/**
2023-10-24 12:59:35 +02:00
* i915_gem_pread_ioctl - Reads data from the object referenced by handle.
2023-08-30 17:31:07 +02:00
* @dev: drm device pointer
* @data: ioctl data blob
* @file: drm file pointer
*
* On error, the contents of *data are undefined.
*/
int
i915_gem_pread_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_pread *args = data;
struct drm_i915_gem_object *obj;
int ret;
/* PREAD is disallowed for all platforms after TGL-LP. This also
* covers all platforms with local memory.
*/
if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915))
return -EOPNOTSUPP;
if (args->size == 0)
return 0;
if (!access_ok(u64_to_user_ptr(args->data_ptr),
args->size))
return -EFAULT;
obj = i915_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
/* Bounds check source. */
if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
ret = -EINVAL;
goto out;
}
trace_i915_gem_object_pread(obj, args->offset, args->size);
ret = -ENODEV;
if (obj->ops->pread)
ret = obj->ops->pread(obj, args);
if (ret != -ENODEV)
goto out;
ret = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE,
MAX_SCHEDULE_TIMEOUT);
if (ret)
goto out;
ret = i915_gem_shmem_pread(obj, args);
if (ret == -EFAULT || ret == -ENODEV)
ret = i915_gem_gtt_pread(obj, args);
out:
i915_gem_object_put(obj);
return ret;
}
/* This is the fast write path which cannot handle
* page faults in the source data
*/
static inline bool
ggtt_write(struct io_mapping *mapping,
loff_t base, int offset,
char __user *user_data, int length)
{
void __iomem *vaddr;
unsigned long unwritten;
/* We can use the cpu mem copy function because this is X86. */
vaddr = io_mapping_map_atomic_wc(mapping, base);
unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
user_data, length);
io_mapping_unmap_atomic(vaddr);
if (unwritten) {
vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
unwritten = copy_from_user((void __force *)vaddr + offset,
user_data, length);
io_mapping_unmap(vaddr);
}
return unwritten;
}
/**
2023-10-24 12:59:35 +02:00
* i915_gem_gtt_pwrite_fast - This is the fast pwrite path, where we copy the data directly from the
2023-08-30 17:31:07 +02:00
* user into the GTT, uncached.
* @obj: i915 GEM object
* @args: pwrite arguments structure
*/
static int
i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_pwrite *args)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
struct intel_runtime_pm *rpm = &i915->runtime_pm;
unsigned long remain, offset;
intel_wakeref_t wakeref;
struct drm_mm_node node;
struct i915_vma *vma;
void __user *user_data;
int ret = 0;
if (overflows_type(args->size, remain) ||
overflows_type(args->offset, offset))
return -EINVAL;
if (i915_gem_object_has_struct_page(obj)) {
/*
* Avoid waking the device up if we can fallback, as
* waking/resuming is very slow (worst-case 10-100 ms
* depending on PCI sleeps and our own resume time).
* This easily dwarfs any performance advantage from
* using the cache bypass of indirect GGTT access.
*/
wakeref = intel_runtime_pm_get_if_in_use(rpm);
if (!wakeref)
return -EFAULT;
} else {
/* No backing pages, no fallback, we must force GGTT access */
wakeref = intel_runtime_pm_get(rpm);
}
vma = i915_gem_gtt_prepare(obj, &node, true);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto out_rpm;
}
i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
user_data = u64_to_user_ptr(args->data_ptr);
offset = args->offset;
remain = args->size;
while (remain) {
/* Operation in this page
*
* page_base = page offset within aperture
* page_offset = offset within page
* page_length = bytes to copy for this page
*/
u32 page_base = node.start;
unsigned int page_offset = offset_in_page(offset);
unsigned int page_length = PAGE_SIZE - page_offset;
page_length = remain < page_length ? remain : page_length;
if (drm_mm_node_allocated(&node)) {
/* flush the write before we modify the GGTT */
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
ggtt->vm.insert_page(&ggtt->vm,
2023-10-24 12:59:35 +02:00
i915_gem_object_get_dma_address(obj,
offset >> PAGE_SHIFT),
node.start,
i915_gem_get_pat_index(i915,
I915_CACHE_NONE), 0);
2023-08-30 17:31:07 +02:00
wmb(); /* flush modifications to the GGTT (insert_page) */
} else {
page_base += offset & PAGE_MASK;
}
/* If we get a fault while copying data, then (presumably) our
* source page isn't available. Return the error and we'll
* retry in the slow path.
* If the object is non-shmem backed, we retry again with the
* path that handles page fault.
*/
if (ggtt_write(&ggtt->iomap, page_base, page_offset,
user_data, page_length)) {
ret = -EFAULT;
break;
}
remain -= page_length;
user_data += page_length;
offset += page_length;
}
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
i915_gem_gtt_cleanup(obj, &node, vma);
out_rpm:
intel_runtime_pm_put(rpm, wakeref);
return ret;
}
/* Per-page copy function for the shmem pwrite fastpath.
* Flushes invalid cachelines before writing to the target if
* needs_clflush_before is set and flushes out any written cachelines after
* writing if needs_clflush is set.
*/
static int
shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
bool needs_clflush_before,
bool needs_clflush_after)
{
char *vaddr;
int ret;
vaddr = kmap(page);
if (needs_clflush_before)
drm_clflush_virt_range(vaddr + offset, len);
ret = __copy_from_user(vaddr + offset, user_data, len);
if (!ret && needs_clflush_after)
drm_clflush_virt_range(vaddr + offset, len);
kunmap(page);
return ret ? -EFAULT : 0;
}
static int
i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_pwrite *args)
{
unsigned int partial_cacheline_write;
unsigned int needs_clflush;
void __user *user_data;
unsigned long offset;
pgoff_t idx;
u64 remain;
int ret;
ret = i915_gem_object_lock_interruptible(obj, NULL);
if (ret)
return ret;
ret = i915_gem_object_pin_pages(obj);
if (ret)
goto err_unlock;
ret = i915_gem_object_prepare_write(obj, &needs_clflush);
if (ret)
goto err_unpin;
i915_gem_object_finish_access(obj);
i915_gem_object_unlock(obj);
/* If we don't overwrite a cacheline completely we need to be
* careful to have up-to-date data by first clflushing. Don't
* overcomplicate things and flush the entire patch.
*/
partial_cacheline_write = 0;
if (needs_clflush & CLFLUSH_BEFORE)
partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
user_data = u64_to_user_ptr(args->data_ptr);
remain = args->size;
offset = offset_in_page(args->offset);
for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
struct page *page = i915_gem_object_get_page(obj, idx);
unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
ret = shmem_pwrite(page, offset, length, user_data,
(offset | length) & partial_cacheline_write,
needs_clflush & CLFLUSH_AFTER);
if (ret)
break;
remain -= length;
user_data += length;
offset = 0;
}
i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
i915_gem_object_unpin_pages(obj);
return ret;
err_unpin:
i915_gem_object_unpin_pages(obj);
err_unlock:
i915_gem_object_unlock(obj);
return ret;
}
/**
2023-10-24 12:59:35 +02:00
* i915_gem_pwrite_ioctl - Writes data to the object referenced by handle.
2023-08-30 17:31:07 +02:00
* @dev: drm device
* @data: ioctl data blob
* @file: drm file
*
* On error, the contents of the buffer that were to be modified are undefined.
*/
int
i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_pwrite *args = data;
struct drm_i915_gem_object *obj;
int ret;
/* PWRITE is disallowed for all platforms after TGL-LP. This also
* covers all platforms with local memory.
*/
if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915))
return -EOPNOTSUPP;
if (args->size == 0)
return 0;
if (!access_ok(u64_to_user_ptr(args->data_ptr), args->size))
return -EFAULT;
obj = i915_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
/* Bounds check destination. */
if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
ret = -EINVAL;
goto err;
}
/* Writes not allowed into this read-only object */
if (i915_gem_object_is_readonly(obj)) {
ret = -EINVAL;
goto err;
}
trace_i915_gem_object_pwrite(obj, args->offset, args->size);
ret = -ENODEV;
if (obj->ops->pwrite)
ret = obj->ops->pwrite(obj, args);
if (ret != -ENODEV)
goto err;
ret = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_ALL,
MAX_SCHEDULE_TIMEOUT);
if (ret)
goto err;
ret = -EFAULT;
/* We can only do the GTT pwrite on untiled buffers, as otherwise
* it would end up going through the fenced access, and we'll get
* different detiling behavior between reading and writing.
* pread/pwrite currently are reading and writing from the CPU
* perspective, requiring manual detiling by the client.
*/
if (!i915_gem_object_has_struct_page(obj) ||
i915_gem_cpu_write_needs_clflush(obj))
/* Note that the gtt paths might fail with non-page-backed user
* pointers (e.g. gtt mappings when moving data between
* textures). Fallback to the shmem path in that case.
*/
ret = i915_gem_gtt_pwrite_fast(obj, args);
if (ret == -EFAULT || ret == -ENOSPC) {
if (i915_gem_object_has_struct_page(obj))
ret = i915_gem_shmem_pwrite(obj, args);
}
err:
i915_gem_object_put(obj);
return ret;
}
/**
2023-10-24 12:59:35 +02:00
* i915_gem_sw_finish_ioctl - Called when user space has done writes to this buffer
2023-08-30 17:31:07 +02:00
* @dev: drm device
* @data: ioctl data blob
* @file: drm file
*/
int
i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_sw_finish *args = data;
struct drm_i915_gem_object *obj;
obj = i915_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
/*
* Proxy objects are barred from CPU access, so there is no
* need to ban sw_finish as it is a nop.
*/
/* Pinned buffers may be scanout, so flush the cache */
i915_gem_object_flush_if_display(obj);
i915_gem_object_put(obj);
return 0;
}
void i915_gem_runtime_suspend(struct drm_i915_private *i915)
{
struct drm_i915_gem_object *obj, *on;
int i;
/*
* Only called during RPM suspend. All users of the userfault_list
* must be holding an RPM wakeref to ensure that this can not
* run concurrently with themselves (and use the struct_mutex for
* protection between themselves).
*/
list_for_each_entry_safe(obj, on,
&to_gt(i915)->ggtt->userfault_list, userfault_link)
__i915_gem_object_release_mmap_gtt(obj);
list_for_each_entry_safe(obj, on,
&i915->runtime_pm.lmem_userfault_list, userfault_link)
i915_gem_object_runtime_pm_release_mmap_offset(obj);
/*
* The fence will be lost when the device powers down. If any were
* in use by hardware (i.e. they are pinned), we should not be powering
* down! All other fences will be reacquired by the user upon waking.
*/
for (i = 0; i < to_gt(i915)->ggtt->num_fences; i++) {
struct i915_fence_reg *reg = &to_gt(i915)->ggtt->fence_regs[i];
/*
* Ideally we want to assert that the fence register is not
* live at this point (i.e. that no piece of code will be
* trying to write through fence + GTT, as that both violates
* our tracking of activity and associated locking/barriers,
* but also is illegal given that the hw is powered down).
*
* Previously we used reg->pin_count as a "liveness" indicator.
* That is not sufficient, and we need a more fine-grained
* tool if we want to have a sanity check here.
*/
if (!reg->vma)
continue;
GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
reg->dirty = true;
}
}
static void discard_ggtt_vma(struct i915_vma *vma)
{
struct drm_i915_gem_object *obj = vma->obj;
spin_lock(&obj->vma.lock);
if (!RB_EMPTY_NODE(&vma->obj_node)) {
rb_erase(&vma->obj_node, &obj->vma.tree);
RB_CLEAR_NODE(&vma->obj_node);
}
spin_unlock(&obj->vma.lock);
}
struct i915_vma *
i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
struct i915_gem_ww_ctx *ww,
const struct i915_gtt_view *view,
u64 size, u64 alignment, u64 flags)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
struct i915_vma *vma;
int ret;
GEM_WARN_ON(!ww);
if (flags & PIN_MAPPABLE &&
(!view || view->type == I915_GTT_VIEW_NORMAL)) {
/*
* If the required space is larger than the available
* aperture, we will not able to find a slot for the
* object and unbinding the object now will be in
* vain. Worse, doing so may cause us to ping-pong
* the object in and out of the Global GTT and
* waste a lot of cycles under the mutex.
*/
if (obj->base.size > ggtt->mappable_end)
return ERR_PTR(-E2BIG);
/*
* If NONBLOCK is set the caller is optimistically
* trying to cache the full object within the mappable
* aperture, and *must* have a fallback in place for
* situations where we cannot bind the object. We
* can be a little more lax here and use the fallback
* more often to avoid costly migrations of ourselves
* and other objects within the aperture.
*
* Half-the-aperture is used as a simple heuristic.
* More interesting would to do search for a free
* block prior to making the commitment to unbind.
* That caters for the self-harm case, and with a
* little more heuristics (e.g. NOFAULT, NOEVICT)
* we could try to minimise harm to others.
*/
if (flags & PIN_NONBLOCK &&
obj->base.size > ggtt->mappable_end / 2)
return ERR_PTR(-ENOSPC);
}
new_vma:
vma = i915_vma_instance(obj, &ggtt->vm, view);
if (IS_ERR(vma))
return vma;
if (i915_vma_misplaced(vma, size, alignment, flags)) {
if (flags & PIN_NONBLOCK) {
if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
return ERR_PTR(-ENOSPC);
/*
* If this misplaced vma is too big (i.e, at-least
* half the size of aperture) or hasn't been pinned
* mappable before, we ignore the misplacement when
* PIN_NONBLOCK is set in order to avoid the ping-pong
* issue described above. In other words, we try to
* avoid the costly operation of unbinding this vma
* from the GGTT and rebinding it back because there
* may not be enough space for this vma in the aperture.
*/
if (flags & PIN_MAPPABLE &&
(vma->fence_size > ggtt->mappable_end / 2 ||
!i915_vma_is_map_and_fenceable(vma)))
return ERR_PTR(-ENOSPC);
}
if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)) {
discard_ggtt_vma(vma);
goto new_vma;
}
ret = i915_vma_unbind(vma);
if (ret)
return ERR_PTR(ret);
}
ret = i915_vma_pin_ww(vma, ww, size, alignment, flags | PIN_GLOBAL);
if (ret)
return ERR_PTR(ret);
if (vma->fence && !i915_gem_object_is_tiled(obj)) {
mutex_lock(&ggtt->vm.mutex);
i915_vma_revoke_fence(vma);
mutex_unlock(&ggtt->vm.mutex);
}
ret = i915_vma_wait_for_bind(vma);
if (ret) {
i915_vma_unpin(vma);
return ERR_PTR(ret);
}
return vma;
}
struct i915_vma * __must_check
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
const struct i915_gtt_view *view,
u64 size, u64 alignment, u64 flags)
{
struct i915_gem_ww_ctx ww;
struct i915_vma *ret;
int err;
for_i915_gem_ww(&ww, err, true) {
err = i915_gem_object_lock(obj, &ww);
if (err)
continue;
ret = i915_gem_object_ggtt_pin_ww(obj, &ww, view, size,
alignment, flags);
if (IS_ERR(ret))
err = PTR_ERR(ret);
}
return err ? ERR_PTR(err) : ret;
}
int
i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_madvise *args = data;
struct drm_i915_gem_object *obj;
int err;
switch (args->madv) {
case I915_MADV_DONTNEED:
case I915_MADV_WILLNEED:
break;
default:
return -EINVAL;
}
obj = i915_gem_object_lookup(file_priv, args->handle);
if (!obj)
return -ENOENT;
err = i915_gem_object_lock_interruptible(obj, NULL);
if (err)
goto out;
if (i915_gem_object_has_pages(obj) &&
i915_gem_object_is_tiled(obj) &&
i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) {
if (obj->mm.madv == I915_MADV_WILLNEED) {
GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj));
i915_gem_object_clear_tiling_quirk(obj);
i915_gem_object_make_shrinkable(obj);
}
if (args->madv == I915_MADV_WILLNEED) {
GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
i915_gem_object_make_unshrinkable(obj);
i915_gem_object_set_tiling_quirk(obj);
}
}
if (obj->mm.madv != __I915_MADV_PURGED) {
obj->mm.madv = args->madv;
if (obj->ops->adjust_lru)
obj->ops->adjust_lru(obj);
}
if (i915_gem_object_has_pages(obj) ||
i915_gem_object_has_self_managed_shrink_list(obj)) {
unsigned long flags;
spin_lock_irqsave(&i915->mm.obj_lock, flags);
if (!list_empty(&obj->mm.link)) {
struct list_head *list;
if (obj->mm.madv != I915_MADV_WILLNEED)
list = &i915->mm.purge_list;
else
list = &i915->mm.shrink_list;
list_move_tail(&obj->mm.link, list);
}
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
/* if the object is no longer attached, discard its backing storage */
if (obj->mm.madv == I915_MADV_DONTNEED &&
!i915_gem_object_has_pages(obj))
i915_gem_object_truncate(obj);
args->retained = obj->mm.madv != __I915_MADV_PURGED;
i915_gem_object_unlock(obj);
out:
i915_gem_object_put(obj);
return err;
}
/*
* A single pass should suffice to release all the freed objects (along most
* call paths), but be a little more paranoid in that freeing the objects does
* take a little amount of time, during which the rcu callbacks could have added
* new objects into the freed list, and armed the work again.
*/
void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
{
while (atomic_read(&i915->mm.free_count)) {
flush_work(&i915->mm.free_work);
drain_workqueue(i915->bdev.wq);
rcu_barrier();
}
}
/*
* Similar to objects above (see i915_gem_drain_freed-objects), in general we
* have workers that are armed by RCU and then rearm themselves in their
* callbacks. To be paranoid, we need to drain the workqueue a second time after
* waiting for the RCU grace period so that we catch work queued via RCU from
* the first pass. As neither drain_workqueue() nor flush_workqueue() report a
* result, we make an assumption that we only don't require more than 3 passes
* to catch all _recursive_ RCU delayed work.
*/
void i915_gem_drain_workqueue(struct drm_i915_private *i915)
{
int i;
for (i = 0; i < 3; i++) {
flush_workqueue(i915->wq);
rcu_barrier();
i915_gem_drain_freed_objects(i915);
}
drain_workqueue(i915->wq);
}
int i915_gem_init(struct drm_i915_private *dev_priv)
{
struct intel_gt *gt;
unsigned int i;
int ret;
2023-10-24 12:59:35 +02:00
/*
* In the proccess of replacing cache_level with pat_index a tricky
* dependency is created on the definition of the enum i915_cache_level.
* in case this enum is changed, PTE encode would be broken.
* Add a WARNING here. And remove when we completely quit using this
* enum
*/
BUILD_BUG_ON(I915_CACHE_NONE != 0 ||
I915_CACHE_LLC != 1 ||
I915_CACHE_L3_LLC != 2 ||
I915_CACHE_WT != 3 ||
I915_MAX_CACHE_LEVEL != 4);
2023-08-30 17:31:07 +02:00
/* We need to fallback to 4K pages if host doesn't support huge gtt. */
if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
RUNTIME_INFO(dev_priv)->page_sizes = I915_GTT_PAGE_SIZE_4K;
ret = i915_gem_init_userptr(dev_priv);
if (ret)
return ret;
for_each_gt(gt, dev_priv, i) {
intel_uc_fetch_firmwares(&gt->uc);
intel_wopcm_init(&gt->wopcm);
if (GRAPHICS_VER(dev_priv) >= 8)
setup_private_pat(gt);
}
ret = i915_init_ggtt(dev_priv);
if (ret) {
GEM_BUG_ON(ret == -EIO);
goto err_unlock;
}
/*
2023-10-24 12:59:35 +02:00
* Despite its name intel_clock_gating_init applies both display
2023-08-30 17:31:07 +02:00
* clock gating workarounds; GT mmio workarounds and the occasional
* GT power context workaround. Worse, sometimes it includes a context
* register workaround which we need to apply before we record the
* default HW state for all contexts.
*
* FIXME: break up the workarounds and apply them at the right time!
*/
2023-10-24 12:59:35 +02:00
intel_clock_gating_init(dev_priv);
2023-08-30 17:31:07 +02:00
for_each_gt(gt, dev_priv, i) {
ret = intel_gt_init(gt);
if (ret)
goto err_unlock;
}
return 0;
/*
* Unwinding is complicated by that we want to handle -EIO to mean
* disable GPU submission but keep KMS alive. We want to mark the
* HW as irrevisibly wedged, but keep enough state around that the
* driver doesn't explode during runtime.
*/
err_unlock:
i915_gem_drain_workqueue(dev_priv);
if (ret != -EIO) {
for_each_gt(gt, dev_priv, i) {
intel_gt_driver_remove(gt);
intel_gt_driver_release(gt);
intel_uc_cleanup_firmwares(&gt->uc);
}
}
if (ret == -EIO) {
/*
* Allow engines or uC initialisation to fail by marking the GPU
* as wedged. But we only want to do this when the GPU is angry,
* for all other failure, such as an allocation failure, bail.
*/
for_each_gt(gt, dev_priv, i) {
if (!intel_gt_is_wedged(gt)) {
i915_probe_error(dev_priv,
"Failed to initialize GPU, declaring it wedged!\n");
intel_gt_set_wedged(gt);
}
}
/* Minimal basic recovery for KMS */
ret = i915_ggtt_enable_hw(dev_priv);
i915_ggtt_resume(to_gt(dev_priv)->ggtt);
2023-10-24 12:59:35 +02:00
intel_clock_gating_init(dev_priv);
2023-08-30 17:31:07 +02:00
}
i915_gem_drain_freed_objects(dev_priv);
return ret;
}
void i915_gem_driver_register(struct drm_i915_private *i915)
{
i915_gem_driver_register__shrinker(i915);
intel_engines_driver_register(i915);
}
void i915_gem_driver_unregister(struct drm_i915_private *i915)
{
i915_gem_driver_unregister__shrinker(i915);
}
void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
{
struct intel_gt *gt;
unsigned int i;
i915_gem_suspend_late(dev_priv);
for_each_gt(gt, dev_priv, i)
intel_gt_driver_remove(gt);
dev_priv->uabi_engines = RB_ROOT;
/* Flush any outstanding unpin_work. */
i915_gem_drain_workqueue(dev_priv);
}
void i915_gem_driver_release(struct drm_i915_private *dev_priv)
{
struct intel_gt *gt;
unsigned int i;
for_each_gt(gt, dev_priv, i) {
intel_gt_driver_release(gt);
intel_uc_cleanup_firmwares(&gt->uc);
}
/* Flush any outstanding work, including i915_gem_context.release_work. */
i915_gem_drain_workqueue(dev_priv);
drm_WARN_ON(&dev_priv->drm, !list_empty(&dev_priv->gem.contexts.list));
}
static void i915_gem_init__mm(struct drm_i915_private *i915)
{
spin_lock_init(&i915->mm.obj_lock);
init_llist_head(&i915->mm.free_list);
INIT_LIST_HEAD(&i915->mm.purge_list);
INIT_LIST_HEAD(&i915->mm.shrink_list);
i915_gem_init__objects(i915);
}
void i915_gem_init_early(struct drm_i915_private *dev_priv)
{
i915_gem_init__mm(dev_priv);
i915_gem_init__contexts(dev_priv);
spin_lock_init(&dev_priv->display.fb_tracking.lock);
}
void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
{
i915_gem_drain_workqueue(dev_priv);
GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
drm_WARN_ON(&dev_priv->drm, dev_priv->mm.shrink_count);
}
int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
{
struct drm_i915_file_private *file_priv;
struct i915_drm_client *client;
int ret = -ENOMEM;
drm_dbg(&i915->drm, "\n");
file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
if (!file_priv)
goto err_alloc;
2023-10-24 12:59:35 +02:00
client = i915_drm_client_alloc();
if (!client)
2023-08-30 17:31:07 +02:00
goto err_client;
file->driver_priv = file_priv;
2023-10-24 12:59:35 +02:00
file_priv->i915 = i915;
2023-08-30 17:31:07 +02:00
file_priv->file = file;
file_priv->client = client;
file_priv->bsd_engine = -1;
file_priv->hang_timestamp = jiffies;
ret = i915_gem_context_open(i915, file);
if (ret)
goto err_context;
return 0;
err_context:
i915_drm_client_put(client);
err_client:
kfree(file_priv);
err_alloc:
return ret;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_gem_device.c"
#include "selftests/i915_gem.c"
#endif