2023-10-24 12:59:35 +02:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2023-08-30 17:31:07 +02:00
|
|
|
/*
|
2023-10-24 12:59:35 +02:00
|
|
|
* Copyright (c) 2022, Microsoft Corporation.
|
2023-08-30 17:31:07 +02:00
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Beau Belgrave <beaub@linux.microsoft.com>
|
|
|
|
*/
|
|
|
|
|
2023-10-24 12:59:35 +02:00
|
|
|
#ifndef _LINUX_USER_EVENTS_H
|
|
|
|
#define _LINUX_USER_EVENTS_H
|
2023-08-30 17:31:07 +02:00
|
|
|
|
2023-10-24 12:59:35 +02:00
|
|
|
#include <linux/list.h>
|
|
|
|
#include <linux/refcount.h>
|
|
|
|
#include <linux/mm_types.h>
|
|
|
|
#include <linux/workqueue.h>
|
|
|
|
#include <uapi/linux/user_events.h>
|
2023-08-30 17:31:07 +02:00
|
|
|
|
2023-10-24 12:59:35 +02:00
|
|
|
#ifdef CONFIG_USER_EVENTS
|
|
|
|
struct user_event_mm {
|
|
|
|
struct list_head mms_link;
|
|
|
|
struct list_head enablers;
|
|
|
|
struct mm_struct *mm;
|
|
|
|
/* Used for one-shot lists, protected by event_mutex */
|
|
|
|
struct user_event_mm *next;
|
|
|
|
refcount_t refcnt;
|
|
|
|
refcount_t tasks;
|
|
|
|
struct rcu_work put_rwork;
|
|
|
|
};
|
2023-08-30 17:31:07 +02:00
|
|
|
|
2023-10-24 12:59:35 +02:00
|
|
|
extern void user_event_mm_dup(struct task_struct *t,
|
|
|
|
struct user_event_mm *old_mm);
|
2023-08-30 17:31:07 +02:00
|
|
|
|
2023-10-24 12:59:35 +02:00
|
|
|
extern void user_event_mm_remove(struct task_struct *t);
|
|
|
|
|
|
|
|
static inline void user_events_fork(struct task_struct *t,
|
|
|
|
unsigned long clone_flags)
|
|
|
|
{
|
|
|
|
struct user_event_mm *old_mm;
|
2023-08-30 17:31:07 +02:00
|
|
|
|
2023-10-24 12:59:35 +02:00
|
|
|
if (!t || !current->user_event_mm)
|
|
|
|
return;
|
2023-08-30 17:31:07 +02:00
|
|
|
|
2023-10-24 12:59:35 +02:00
|
|
|
old_mm = current->user_event_mm;
|
2023-08-30 17:31:07 +02:00
|
|
|
|
2023-10-24 12:59:35 +02:00
|
|
|
if (clone_flags & CLONE_VM) {
|
|
|
|
t->user_event_mm = old_mm;
|
|
|
|
refcount_inc(&old_mm->tasks);
|
|
|
|
return;
|
|
|
|
}
|
2023-08-30 17:31:07 +02:00
|
|
|
|
2023-10-24 12:59:35 +02:00
|
|
|
user_event_mm_dup(t, old_mm);
|
|
|
|
}
|
2023-08-30 17:31:07 +02:00
|
|
|
|
2023-10-24 12:59:35 +02:00
|
|
|
static inline void user_events_execve(struct task_struct *t)
|
|
|
|
{
|
|
|
|
if (!t || !t->user_event_mm)
|
|
|
|
return;
|
|
|
|
|
|
|
|
user_event_mm_remove(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void user_events_exit(struct task_struct *t)
|
|
|
|
{
|
|
|
|
if (!t || !t->user_event_mm)
|
|
|
|
return;
|
|
|
|
|
|
|
|
user_event_mm_remove(t);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static inline void user_events_fork(struct task_struct *t,
|
|
|
|
unsigned long clone_flags)
|
|
|
|
{
|
|
|
|
}
|
2023-08-30 17:31:07 +02:00
|
|
|
|
2023-10-24 12:59:35 +02:00
|
|
|
static inline void user_events_execve(struct task_struct *t)
|
|
|
|
{
|
|
|
|
}
|
2023-08-30 17:31:07 +02:00
|
|
|
|
2023-10-24 12:59:35 +02:00
|
|
|
static inline void user_events_exit(struct task_struct *t)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_USER_EVENTS */
|
2023-08-30 17:31:07 +02:00
|
|
|
|
2023-10-24 12:59:35 +02:00
|
|
|
#endif /* _LINUX_USER_EVENTS_H */
|