2023-08-30 17:31:07 +02:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
|
|
|
/*
|
|
|
|
* kernel/workqueue.c - generic async execution with shared worker pool
|
|
|
|
*
|
|
|
|
* Copyright (C) 2002 Ingo Molnar
|
|
|
|
*
|
|
|
|
* Derived from the taskqueue/keventd code by:
|
|
|
|
* David Woodhouse <dwmw2@infradead.org>
|
|
|
|
* Andrew Morton
|
|
|
|
* Kai Petzke <wpp@marie.physik.tu-berlin.de>
|
|
|
|
* Theodore Ts'o <tytso@mit.edu>
|
|
|
|
*
|
|
|
|
* Made to use alloc_percpu by Christoph Lameter.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2010 SUSE Linux Products GmbH
|
|
|
|
* Copyright (C) 2010 Tejun Heo <tj@kernel.org>
|
|
|
|
*
|
|
|
|
* This is the generic async execution mechanism. Work items as are
|
|
|
|
* executed in process context. The worker pool is shared and
|
|
|
|
* automatically managed. There are two worker pools for each CPU (one for
|
|
|
|
* normal work items and the other for high priority ones) and some extra
|
|
|
|
* pools for workqueues which are not bound to any specific CPU - the
|
|
|
|
* number of these backing pools is dynamic.
|
|
|
|
*
|
|
|
|
* Please read Documentation/core-api/workqueue.rst for details.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/export.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/signal.h>
|
|
|
|
#include <linux/completion.h>
|
|
|
|
#include <linux/workqueue.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/cpu.h>
|
|
|
|
#include <linux/notifier.h>
|
|
|
|
#include <linux/kthread.h>
|
|
|
|
#include <linux/hardirq.h>
|
|
|
|
#include <linux/mempolicy.h>
|
|
|
|
#include <linux/freezer.h>
|
|
|
|
#include <linux/debug_locks.h>
|
|
|
|
#include <linux/lockdep.h>
|
|
|
|
#include <linux/idr.h>
|
|
|
|
#include <linux/jhash.h>
|
|
|
|
#include <linux/hashtable.h>
|
|
|
|
#include <linux/rculist.h>
|
|
|
|
#include <linux/nodemask.h>
|
|
|
|
#include <linux/moduleparam.h>
|
|
|
|
#include <linux/uaccess.h>
|
|
|
|
#include <linux/sched/isolation.h>
|
2023-10-24 12:59:35 +02:00
|
|
|
#include <linux/sched/debug.h>
|
2023-08-30 17:31:07 +02:00
|
|
|
#include <linux/nmi.h>
|
|
|
|
#include <linux/kvm_para.h>
|
2023-10-24 12:59:35 +02:00
|
|
|
#include <linux/delay.h>
|
2023-08-30 17:31:07 +02:00
|
|
|
|
|
|
|
#include "workqueue_internal.h"
|
|
|
|
|
|
|
|
enum {
|
|
|
|
/*
|
|
|
|
* worker_pool flags
|
|
|
|
*
|
|
|
|
* A bound pool is either associated or disassociated with its CPU.
|
|
|
|
* While associated (!DISASSOCIATED), all workers are bound to the
|
|
|
|
* CPU and none has %WORKER_UNBOUND set and concurrency management
|
|
|
|
* is in effect.
|
|
|
|
*
|
|
|
|
* While DISASSOCIATED, the cpu may be offline and all workers have
|
|
|
|
* %WORKER_UNBOUND set and concurrency management disabled, and may
|
|
|
|
* be executing on any CPU. The pool behaves as an unbound one.
|
|
|
|
*
|
|
|
|
* Note that DISASSOCIATED should be flipped only while holding
|
|
|
|
* wq_pool_attach_mutex to avoid changing binding state while
|
|
|
|
* worker_attach_to_pool() is in progress.
|
|
|
|
*/
|
|
|
|
POOL_MANAGER_ACTIVE = 1 << 0, /* being managed */
|
|
|
|
POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */
|
|
|
|
|
|
|
|
/* worker flags */
|
|
|
|
WORKER_DIE = 1 << 1, /* die die die */
|
|
|
|
WORKER_IDLE = 1 << 2, /* is idle */
|
|
|
|
WORKER_PREP = 1 << 3, /* preparing to run works */
|
|
|
|
WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */
|
|
|
|
WORKER_UNBOUND = 1 << 7, /* worker is unbound */
|
|
|
|
WORKER_REBOUND = 1 << 8, /* worker was rebound */
|
|
|
|
|
|
|
|
WORKER_NOT_RUNNING = WORKER_PREP | WORKER_CPU_INTENSIVE |
|
|
|
|
WORKER_UNBOUND | WORKER_REBOUND,
|
|
|
|
|
|
|
|
NR_STD_WORKER_POOLS = 2, /* # standard pools per cpu */
|
|
|
|
|
|
|
|
UNBOUND_POOL_HASH_ORDER = 6, /* hashed by pool->attrs */
|
|
|
|
BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */
|
|
|
|
|
|
|
|
MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */
|
|
|
|
IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */
|
|
|
|
|
|
|
|
MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2,
|
|
|
|
/* call for help after 10ms
|
|
|
|
(min two ticks) */
|
|
|
|
MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */
|
|
|
|
CREATE_COOLDOWN = HZ, /* time to breath after fail */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Rescue workers are used only on emergencies and shared by
|
|
|
|
* all cpus. Give MIN_NICE.
|
|
|
|
*/
|
|
|
|
RESCUER_NICE_LEVEL = MIN_NICE,
|
|
|
|
HIGHPRI_NICE_LEVEL = MIN_NICE,
|
|
|
|
|
|
|
|
WQ_NAME_LEN = 24,
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Structure fields follow one of the following exclusion rules.
|
|
|
|
*
|
|
|
|
* I: Modifiable by initialization/destruction paths and read-only for
|
|
|
|
* everyone else.
|
|
|
|
*
|
|
|
|
* P: Preemption protected. Disabling preemption is enough and should
|
|
|
|
* only be modified and accessed from the local cpu.
|
|
|
|
*
|
|
|
|
* L: pool->lock protected. Access with pool->lock held.
|
|
|
|
*
|
|
|
|
* X: During normal operation, modification requires pool->lock and should
|
|
|
|
* be done only from local cpu. Either disabling preemption on local
|
|
|
|
* cpu or grabbing pool->lock is enough for read access. If
|
|
|
|
* POOL_DISASSOCIATED is set, it's identical to L.
|
|
|
|
*
|
2023-10-24 12:59:35 +02:00
|
|
|
* K: Only modified by worker while holding pool->lock. Can be safely read by
|
|
|
|
* self, while holding pool->lock or from IRQ context if %current is the
|
|
|
|
* kworker.
|
|
|
|
*
|
|
|
|
* S: Only modified by worker self.
|
|
|
|
*
|
2023-08-30 17:31:07 +02:00
|
|
|
* A: wq_pool_attach_mutex protected.
|
|
|
|
*
|
|
|
|
* PL: wq_pool_mutex protected.
|
|
|
|
*
|
|
|
|
* PR: wq_pool_mutex protected for writes. RCU protected for reads.
|
|
|
|
*
|
|
|
|
* PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads.
|
|
|
|
*
|
|
|
|
* PWR: wq_pool_mutex and wq->mutex protected for writes. Either or
|
|
|
|
* RCU for reads.
|
|
|
|
*
|
|
|
|
* WQ: wq->mutex protected.
|
|
|
|
*
|
|
|
|
* WR: wq->mutex protected for writes. RCU protected for reads.
|
|
|
|
*
|
|
|
|
* MD: wq_mayday_lock protected.
|
2023-10-24 12:59:35 +02:00
|
|
|
*
|
|
|
|
* WD: Used internally by the watchdog.
|
2023-08-30 17:31:07 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
/* struct worker is defined in workqueue_internal.h */
|
|
|
|
|
|
|
|
struct worker_pool {
|
|
|
|
raw_spinlock_t lock; /* the pool lock */
|
|
|
|
int cpu; /* I: the associated cpu */
|
|
|
|
int node; /* I: the associated node ID */
|
|
|
|
int id; /* I: pool ID */
|
|
|
|
unsigned int flags; /* X: flags */
|
|
|
|
|
|
|
|
unsigned long watchdog_ts; /* L: watchdog timestamp */
|
2023-10-24 12:59:35 +02:00
|
|
|
bool cpu_stall; /* WD: stalled cpu bound pool */
|
2023-08-30 17:31:07 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The counter is incremented in a process context on the associated CPU
|
|
|
|
* w/ preemption disabled, and decremented or reset in the same context
|
|
|
|
* but w/ pool->lock held. The readers grab pool->lock and are
|
|
|
|
* guaranteed to see if the counter reached zero.
|
|
|
|
*/
|
|
|
|
int nr_running;
|
|
|
|
|
|
|
|
struct list_head worklist; /* L: list of pending works */
|
|
|
|
|
|
|
|
int nr_workers; /* L: total number of workers */
|
|
|
|
int nr_idle; /* L: currently idle workers */
|
|
|
|
|
|
|
|
struct list_head idle_list; /* L: list of idle workers */
|
|
|
|
struct timer_list idle_timer; /* L: worker idle timeout */
|
|
|
|
struct work_struct idle_cull_work; /* L: worker idle cleanup */
|
|
|
|
|
|
|
|
struct timer_list mayday_timer; /* L: SOS timer for workers */
|
|
|
|
|
|
|
|
/* a workers is either on busy_hash or idle_list, or the manager */
|
|
|
|
DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
|
|
|
|
/* L: hash of busy workers */
|
|
|
|
|
|
|
|
struct worker *manager; /* L: purely informational */
|
|
|
|
struct list_head workers; /* A: attached workers */
|
|
|
|
struct list_head dying_workers; /* A: workers about to die */
|
|
|
|
struct completion *detach_completion; /* all workers detached */
|
|
|
|
|
|
|
|
struct ida worker_ida; /* worker IDs for task name */
|
|
|
|
|
|
|
|
struct workqueue_attrs *attrs; /* I: worker attributes */
|
|
|
|
struct hlist_node hash_node; /* PL: unbound_pool_hash node */
|
|
|
|
int refcnt; /* PL: refcnt for unbound pools */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Destruction of pool is RCU protected to allow dereferences
|
|
|
|
* from get_work_pool().
|
|
|
|
*/
|
|
|
|
struct rcu_head rcu;
|
|
|
|
};
|
|
|
|
|
2023-10-24 12:59:35 +02:00
|
|
|
/*
|
|
|
|
* Per-pool_workqueue statistics. These can be monitored using
|
|
|
|
* tools/workqueue/wq_monitor.py.
|
|
|
|
*/
|
|
|
|
enum pool_workqueue_stats {
|
|
|
|
PWQ_STAT_STARTED, /* work items started execution */
|
|
|
|
PWQ_STAT_COMPLETED, /* work items completed execution */
|
|
|
|
PWQ_STAT_CPU_TIME, /* total CPU time consumed */
|
|
|
|
PWQ_STAT_CPU_INTENSIVE, /* wq_cpu_intensive_thresh_us violations */
|
|
|
|
PWQ_STAT_CM_WAKEUP, /* concurrency-management worker wakeups */
|
|
|
|
PWQ_STAT_MAYDAY, /* maydays to rescuer */
|
|
|
|
PWQ_STAT_RESCUED, /* linked work items executed by rescuer */
|
|
|
|
|
|
|
|
PWQ_NR_STATS,
|
|
|
|
};
|
|
|
|
|
2023-08-30 17:31:07 +02:00
|
|
|
/*
|
|
|
|
* The per-pool workqueue. While queued, the lower WORK_STRUCT_FLAG_BITS
|
|
|
|
* of work_struct->data are used for flags and the remaining high bits
|
|
|
|
* point to the pwq; thus, pwqs need to be aligned at two's power of the
|
|
|
|
* number of flag bits.
|
|
|
|
*/
|
|
|
|
struct pool_workqueue {
|
|
|
|
struct worker_pool *pool; /* I: the associated pool */
|
|
|
|
struct workqueue_struct *wq; /* I: the owning workqueue */
|
|
|
|
int work_color; /* L: current color */
|
|
|
|
int flush_color; /* L: flushing color */
|
|
|
|
int refcnt; /* L: reference count */
|
|
|
|
int nr_in_flight[WORK_NR_COLORS];
|
|
|
|
/* L: nr of in_flight works */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* nr_active management and WORK_STRUCT_INACTIVE:
|
|
|
|
*
|
|
|
|
* When pwq->nr_active >= max_active, new work item is queued to
|
|
|
|
* pwq->inactive_works instead of pool->worklist and marked with
|
|
|
|
* WORK_STRUCT_INACTIVE.
|
|
|
|
*
|
|
|
|
* All work items marked with WORK_STRUCT_INACTIVE do not participate
|
|
|
|
* in pwq->nr_active and all work items in pwq->inactive_works are
|
|
|
|
* marked with WORK_STRUCT_INACTIVE. But not all WORK_STRUCT_INACTIVE
|
|
|
|
* work items are in pwq->inactive_works. Some of them are ready to
|
|
|
|
* run in pool->worklist or worker->scheduled. Those work itmes are
|
|
|
|
* only struct wq_barrier which is used for flush_work() and should
|
|
|
|
* not participate in pwq->nr_active. For non-barrier work item, it
|
|
|
|
* is marked with WORK_STRUCT_INACTIVE iff it is in pwq->inactive_works.
|
|
|
|
*/
|
|
|
|
int nr_active; /* L: nr of active works */
|
|
|
|
int max_active; /* L: max active works */
|
|
|
|
struct list_head inactive_works; /* L: inactive works */
|
|
|
|
struct list_head pwqs_node; /* WR: node on wq->pwqs */
|
|
|
|
struct list_head mayday_node; /* MD: node on wq->maydays */
|
|
|
|
|
2023-10-24 12:59:35 +02:00
|
|
|
u64 stats[PWQ_NR_STATS];
|
|
|
|
|
2023-08-30 17:31:07 +02:00
|
|
|
/*
|
|
|
|
* Release of unbound pwq is punted to system_wq. See put_pwq()
|
|
|
|
* and pwq_unbound_release_workfn() for details. pool_workqueue
|
|
|
|
* itself is also RCU protected so that the first pwq can be
|
|
|
|
* determined without grabbing wq->mutex.
|
|
|
|
*/
|
|
|
|
struct work_struct unbound_release_work;
|
|
|
|
struct rcu_head rcu;
|
|
|
|
} __aligned(1 << WORK_STRUCT_FLAG_BITS);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Structure used to wait for workqueue flush.
|
|
|
|
*/
|
|
|
|
struct wq_flusher {
|
|
|
|
struct list_head list; /* WQ: list of flushers */
|
|
|
|
int flush_color; /* WQ: flush color waiting for */
|
|
|
|
struct completion done; /* flush completion */
|
|
|
|
};
|
|
|
|
|
|
|
|
struct wq_device;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The externally visible workqueue. It relays the issued work items to
|
|
|
|
* the appropriate worker_pool through its pool_workqueues.
|
|
|
|
*/
|
|
|
|
struct workqueue_struct {
|
|
|
|
struct list_head pwqs; /* WR: all pwqs of this wq */
|
|
|
|
struct list_head list; /* PR: list of all workqueues */
|
|
|
|
|
|
|
|
struct mutex mutex; /* protects this wq */
|
|
|
|
int work_color; /* WQ: current work color */
|
|
|
|
int flush_color; /* WQ: current flush color */
|
|
|
|
atomic_t nr_pwqs_to_flush; /* flush in progress */
|
|
|
|
struct wq_flusher *first_flusher; /* WQ: first flusher */
|
|
|
|
struct list_head flusher_queue; /* WQ: flush waiters */
|
|
|
|
struct list_head flusher_overflow; /* WQ: flush overflow list */
|
|
|
|
|
|
|
|
struct list_head maydays; /* MD: pwqs requesting rescue */
|
|
|
|
struct worker *rescuer; /* MD: rescue worker */
|
|
|
|
|
|
|
|
int nr_drainers; /* WQ: drain in progress */
|
|
|
|
int saved_max_active; /* WQ: saved pwq max_active */
|
|
|
|
|
|
|
|
struct workqueue_attrs *unbound_attrs; /* PW: only for unbound wqs */
|
|
|
|
struct pool_workqueue *dfl_pwq; /* PW: only for unbound wqs */
|
|
|
|
|
|
|
|
#ifdef CONFIG_SYSFS
|
|
|
|
struct wq_device *wq_dev; /* I: for sysfs interface */
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_LOCKDEP
|
|
|
|
char *lock_name;
|
|
|
|
struct lock_class_key key;
|
|
|
|
struct lockdep_map lockdep_map;
|
|
|
|
#endif
|
|
|
|
char name[WQ_NAME_LEN]; /* I: workqueue name */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Destruction of workqueue_struct is RCU protected to allow walking
|
|
|
|
* the workqueues list without grabbing wq_pool_mutex.
|
|
|
|
* This is used to dump all workqueues from sysrq.
|
|
|
|
*/
|
|
|
|
struct rcu_head rcu;
|
|
|
|
|
|
|
|
/* hot fields used during command issue, aligned to cacheline */
|
|
|
|
unsigned int flags ____cacheline_aligned; /* WQ: WQ_* flags */
|
|
|
|
struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwqs */
|
|
|
|
struct pool_workqueue __rcu *numa_pwq_tbl[]; /* PWR: unbound pwqs indexed by node */
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct kmem_cache *pwq_cache;
|
|
|
|
|
|
|
|
static cpumask_var_t *wq_numa_possible_cpumask;
|
|
|
|
/* possible CPUs of each node */
|
|
|
|
|
2023-10-24 12:59:35 +02:00
|
|
|
/*
|
|
|
|
* Per-cpu work items which run for longer than the following threshold are
|
|
|
|
* automatically considered CPU intensive and excluded from concurrency
|
|
|
|
* management to prevent them from noticeably delaying other per-cpu work items.
|
|
|
|
* ULONG_MAX indicates that the user hasn't overridden it with a boot parameter.
|
|
|
|
* The actual value is initialized in wq_cpu_intensive_thresh_init().
|
|
|
|
*/
|
|
|
|
static unsigned long wq_cpu_intensive_thresh_us = ULONG_MAX;
|
|
|
|
module_param_named(cpu_intensive_thresh_us, wq_cpu_intensive_thresh_us, ulong, 0644);
|
|
|
|
|
2023-08-30 17:31:07 +02:00
|
|
|
static bool wq_disable_numa;
|
|
|
|
module_param_named(disable_numa, wq_disable_numa, bool, 0444);
|
|
|
|
|
|
|
|
/* see the comment above the definition of WQ_POWER_EFFICIENT */
|
|
|
|
static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT);
|
|
|
|
module_param_named(power_efficient, wq_power_efficient, bool, 0444);
|
|
|
|
|
|
|
|
static bool wq_online; /* can kworkers be created yet? */
|
|
|
|
|
|
|
|
static bool wq_numa_enabled; /* unbound NUMA affinity enabled */
|
|
|
|
|
|
|
|
/* buf for wq_update_unbound_numa_attrs(), protected by CPU hotplug exclusion */
|
|
|
|
static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
|
|
|
|
|
|
|
|
static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
|
|
|
|
static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */
|
|
|
|
static DEFINE_RAW_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
|
|
|
|
/* wait for manager to go away */
|
|
|
|
static struct rcuwait manager_wait = __RCUWAIT_INITIALIZER(manager_wait);
|
|
|
|
|
|
|
|
static LIST_HEAD(workqueues); /* PR: list of all workqueues */
|
|
|
|
static bool workqueue_freezing; /* PL: have wqs started freezing? */
|
|
|
|
|
|
|
|
/* PL&A: allowable cpus for unbound wqs and work items */
|
|
|
|
static cpumask_var_t wq_unbound_cpumask;
|
|
|
|
|
|
|
|
/* CPU where unbound work was last round robin scheduled from this CPU */
|
|
|
|
static DEFINE_PER_CPU(int, wq_rr_cpu_last);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Local execution of unbound work items is no longer guaranteed. The
|
|
|
|
* following always forces round-robin CPU selection on unbound work items
|
|
|
|
* to uncover usages which depend on it.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU
|
|
|
|
static bool wq_debug_force_rr_cpu = true;
|
|
|
|
#else
|
|
|
|
static bool wq_debug_force_rr_cpu = false;
|
|
|
|
#endif
|
|
|
|
module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644);
|
|
|
|
|
|
|
|
/* the per-cpu worker pools */
|
|
|
|
static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], cpu_worker_pools);
|
|
|
|
|
|
|
|
static DEFINE_IDR(worker_pool_idr); /* PR: idr of all pools */
|
|
|
|
|
|
|
|
/* PL: hash of all unbound pools keyed by pool->attrs */
|
|
|
|
static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
|
|
|
|
|
|
|
|
/* I: attributes used when instantiating standard unbound pools on demand */
|
|
|
|
static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
|
|
|
|
|
|
|
|
/* I: attributes used when instantiating ordered pools on demand */
|
|
|
|
static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
|
|
|
|
|
|
|
|
struct workqueue_struct *system_wq __read_mostly;
|
|
|
|
EXPORT_SYMBOL(system_wq);
|
|
|
|
struct workqueue_struct *system_highpri_wq __read_mostly;
|
|
|
|
EXPORT_SYMBOL_GPL(system_highpri_wq);
|
|
|
|
struct workqueue_struct *system_long_wq __read_mostly;
|
|
|
|
EXPORT_SYMBOL_GPL(system_long_wq);
|
|
|
|
struct workqueue_struct *system_unbound_wq __read_mostly;
|
|
|
|
EXPORT_SYMBOL_GPL(system_unbound_wq);
|
|
|
|
struct workqueue_struct *system_freezable_wq __read_mostly;
|
|
|
|
EXPORT_SYMBOL_GPL(system_freezable_wq);
|
|
|
|
struct workqueue_struct *system_power_efficient_wq __read_mostly;
|
|
|
|
EXPORT_SYMBOL_GPL(system_power_efficient_wq);
|
|
|
|
struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
|
|
|
|
EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
|
|
|
|
|
|
|
|
static int worker_thread(void *__worker);
|
|
|
|
static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
|
|
|
|
static void show_pwq(struct pool_workqueue *pwq);
|
|
|
|
static void show_one_worker_pool(struct worker_pool *pool);
|
|
|
|
|
|
|
|
#define CREATE_TRACE_POINTS
|
|
|
|
#include <trace/events/workqueue.h>
|
|
|
|
|
|
|
|
#define assert_rcu_or_pool_mutex() \
|
|
|
|
RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
|
|
|
|
!lockdep_is_held(&wq_pool_mutex), \
|
|
|
|
"RCU or wq_pool_mutex should be held")
|
|
|
|
|
|
|
|
#define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \
|
|
|
|
RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
|
|
|
|
!lockdep_is_held(&wq->mutex) && \
|
|
|
|
!lockdep_is_held(&wq_pool_mutex), \
|
|
|
|
"RCU, wq->mutex or wq_pool_mutex should be held")
|
|
|
|
|
|
|
|
#define for_each_cpu_worker_pool(pool, cpu) \
|
|
|
|
for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
|
|
|
|
(pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
|
|
|
|
(pool)++)
|
|
|
|
|
|
|
|
/**
|
|
|
|
* for_each_pool - iterate through all worker_pools in the system
|
|
|
|
* @pool: iteration cursor
|
|
|
|
* @pi: integer used for iteration
|
|
|
|
*
|
|
|
|
* This must be called either with wq_pool_mutex held or RCU read
|
|
|
|
* locked. If the pool needs to be used beyond the locking in effect, the
|
|
|
|
* caller is responsible for guaranteeing that the pool stays online.
|
|
|
|
*
|
|
|
|
* The if/else clause exists only for the lockdep assertion and can be
|
|
|
|
* ignored.
|
|
|
|
*/
|
|
|
|
#define for_each_pool(pool, pi) \
|
|
|
|
idr_for_each_entry(&worker_pool_idr, pool, pi) \
|
|
|
|
if (({ assert_rcu_or_pool_mutex(); false; })) { } \
|
|
|
|
else
|
|
|
|
|
|
|
|
/**
|
|
|
|
* for_each_pool_worker - iterate through all workers of a worker_pool
|
|
|
|
* @worker: iteration cursor
|
|
|
|
* @pool: worker_pool to iterate workers of
|
|
|
|
*
|
|
|
|
* This must be called with wq_pool_attach_mutex.
|
|
|
|
*
|
|
|
|
* The if/else clause exists only for the lockdep assertion and can be
|
|
|
|
* ignored.
|
|
|
|
*/
|
|
|
|
#define for_each_pool_worker(worker, pool) \
|
|
|
|
list_for_each_entry((worker), &(pool)->workers, node) \
|
|
|
|
if (({ lockdep_assert_held(&wq_pool_attach_mutex); false; })) { } \
|
|
|
|
else
|
|
|
|
|
|
|
|
/**
|
|
|
|
* for_each_pwq - iterate through all pool_workqueues of the specified workqueue
|
|
|
|
* @pwq: iteration cursor
|
|
|
|
* @wq: the target workqueue
|
|
|
|
*
|
|
|
|
* This must be called either with wq->mutex held or RCU read locked.
|
|
|
|
* If the pwq needs to be used beyond the locking in effect, the caller is
|
|
|
|
* responsible for guaranteeing that the pwq stays online.
|
|
|
|
*
|
|
|
|
* The if/else clause exists only for the lockdep assertion and can be
|
|
|
|
* ignored.
|
|
|
|
*/
|
|
|
|
#define for_each_pwq(pwq, wq) \
|
|
|
|
list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node, \
|
|
|
|
lockdep_is_held(&(wq->mutex)))
|
|
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_OBJECTS_WORK
|
|
|
|
|
|
|
|
static const struct debug_obj_descr work_debug_descr;
|
|
|
|
|
|
|
|
static void *work_debug_hint(void *addr)
|
|
|
|
{
|
|
|
|
return ((struct work_struct *) addr)->func;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool work_is_static_object(void *addr)
|
|
|
|
{
|
|
|
|
struct work_struct *work = addr;
|
|
|
|
|
|
|
|
return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* fixup_init is called when:
|
|
|
|
* - an active object is initialized
|
|
|
|
*/
|
|
|
|
static bool work_fixup_init(void *addr, enum debug_obj_state state)
|
|
|
|
{
|
|
|
|
struct work_struct *work = addr;
|
|
|
|
|
|
|
|
switch (state) {
|
|
|
|
case ODEBUG_STATE_ACTIVE:
|
|
|
|
cancel_work_sync(work);
|
|
|
|
debug_object_init(work, &work_debug_descr);
|
|
|
|
return true;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* fixup_free is called when:
|
|
|
|
* - an active object is freed
|
|
|
|
*/
|
|
|
|
static bool work_fixup_free(void *addr, enum debug_obj_state state)
|
|
|
|
{
|
|
|
|
struct work_struct *work = addr;
|
|
|
|
|
|
|
|
switch (state) {
|
|
|
|
case ODEBUG_STATE_ACTIVE:
|
|
|
|
cancel_work_sync(work);
|
|
|
|
debug_object_free(work, &work_debug_descr);
|
|
|
|
return true;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct debug_obj_descr work_debug_descr = {
|
|
|
|
.name = "work_struct",
|
|
|
|
.debug_hint = work_debug_hint,
|
|
|
|
.is_static_object = work_is_static_object,
|
|
|
|
.fixup_init = work_fixup_init,
|
|
|
|
.fixup_free = work_fixup_free,
|
|
|
|
};
|
|
|
|
|
|
|
|
static inline void debug_work_activate(struct work_struct *work)
|
|
|
|
{
|
|
|
|
debug_object_activate(work, &work_debug_descr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void debug_work_deactivate(struct work_struct *work)
|
|
|
|
{
|
|
|
|
debug_object_deactivate(work, &work_debug_descr);
|
|
|
|
}
|
|
|
|
|
|
|
|
void __init_work(struct work_struct *work, int onstack)
|
|
|
|
{
|
|
|
|
if (onstack)
|
|
|
|
debug_object_init_on_stack(work, &work_debug_descr);
|
|
|
|
else
|
|
|
|
debug_object_init(work, &work_debug_descr);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(__init_work);
|
|
|
|
|
|
|
|
void destroy_work_on_stack(struct work_struct *work)
|
|
|
|
{
|
|
|
|
debug_object_free(work, &work_debug_descr);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(destroy_work_on_stack);
|
|
|
|
|
|
|
|
void destroy_delayed_work_on_stack(struct delayed_work *work)
|
|
|
|
{
|
|
|
|
destroy_timer_on_stack(&work->timer);
|
|
|
|
debug_object_free(&work->work, &work_debug_descr);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack);
|
|
|
|
|
|
|
|
#else
|
|
|
|
static inline void debug_work_activate(struct work_struct *work) { }
|
|
|
|
static inline void debug_work_deactivate(struct work_struct *work) { }
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/**
|
|
|
|
* worker_pool_assign_id - allocate ID and assign it to @pool
|
|
|
|
* @pool: the pool pointer of interest
|
|
|
|
*
|
|
|
|
* Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned
|
|
|
|
* successfully, -errno on failure.
|
|
|
|
*/
|
|
|
|
static int worker_pool_assign_id(struct worker_pool *pool)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
lockdep_assert_held(&wq_pool_mutex);
|
|
|
|
|
|
|
|
ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE,
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (ret >= 0) {
|
|
|
|
pool->id = ret;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* unbound_pwq_by_node - return the unbound pool_workqueue for the given node
|
|
|
|
* @wq: the target workqueue
|
|
|
|
* @node: the node ID
|
|
|
|
*
|
|
|
|
* This must be called with any of wq_pool_mutex, wq->mutex or RCU
|
|
|
|
* read locked.
|
|
|
|
* If the pwq needs to be used beyond the locking in effect, the caller is
|
|
|
|
* responsible for guaranteeing that the pwq stays online.
|
|
|
|
*
|
|
|
|
* Return: The unbound pool_workqueue for @node.
|
|
|
|
*/
|
|
|
|
static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq,
|
|
|
|
int node)
|
|
|
|
{
|
|
|
|
assert_rcu_or_wq_mutex_or_pool_mutex(wq);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* XXX: @node can be NUMA_NO_NODE if CPU goes offline while a
|
|
|
|
* delayed item is pending. The plan is to keep CPU -> NODE
|
|
|
|
* mapping valid and stable across CPU on/offlines. Once that
|
|
|
|
* happens, this workaround can be removed.
|
|
|
|
*/
|
|
|
|
if (unlikely(node == NUMA_NO_NODE))
|
|
|
|
return wq->dfl_pwq;
|
|
|
|
|
|
|
|
return rcu_dereference_raw(wq->numa_pwq_tbl[node]);
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int work_color_to_flags(int color)
|
|
|
|
{
|
|
|
|
return color << WORK_STRUCT_COLOR_SHIFT;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int get_work_color(unsigned long work_data)
|
|
|
|
{
|
|
|
|
return (work_data >> WORK_STRUCT_COLOR_SHIFT) &
|
|
|
|
((1 << WORK_STRUCT_COLOR_BITS) - 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int work_next_color(int color)
|
|
|
|
{
|
|
|
|
return (color + 1) % WORK_NR_COLORS;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data
|
|
|
|
* contain the pointer to the queued pwq. Once execution starts, the flag
|
|
|
|
* is cleared and the high bits contain OFFQ flags and pool ID.
|
|
|
|
*
|
|
|
|
* set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling()
|
|
|
|
* and clear_work_data() can be used to set the pwq, pool or clear
|
|
|
|
* work->data. These functions should only be called while the work is
|
|
|
|
* owned - ie. while the PENDING bit is set.
|
|
|
|
*
|
|
|
|
* get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq
|
|
|
|
* corresponding to a work. Pool is available once the work has been
|
|
|
|
* queued anywhere after initialization until it is sync canceled. pwq is
|
|
|
|
* available only while the work item is queued.
|
|
|
|
*
|
|
|
|
* %WORK_OFFQ_CANCELING is used to mark a work item which is being
|
|
|
|
* canceled. While being canceled, a work item may have its PENDING set
|
|
|
|
* but stay off timer and worklist for arbitrarily long and nobody should
|
|
|
|
* try to steal the PENDING bit.
|
|
|
|
*/
|
|
|
|
static inline void set_work_data(struct work_struct *work, unsigned long data,
|
|
|
|
unsigned long flags)
|
|
|
|
{
|
|
|
|
WARN_ON_ONCE(!work_pending(work));
|
|
|
|
atomic_long_set(&work->data, data | flags | work_static(work));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq,
|
|
|
|
unsigned long extra_flags)
|
|
|
|
{
|
|
|
|
set_work_data(work, (unsigned long)pwq,
|
|
|
|
WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void set_work_pool_and_keep_pending(struct work_struct *work,
|
|
|
|
int pool_id)
|
|
|
|
{
|
|
|
|
set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT,
|
|
|
|
WORK_STRUCT_PENDING);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void set_work_pool_and_clear_pending(struct work_struct *work,
|
|
|
|
int pool_id)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* The following wmb is paired with the implied mb in
|
|
|
|
* test_and_set_bit(PENDING) and ensures all updates to @work made
|
|
|
|
* here are visible to and precede any updates by the next PENDING
|
|
|
|
* owner.
|
|
|
|
*/
|
|
|
|
smp_wmb();
|
|
|
|
set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
|
|
|
|
/*
|
|
|
|
* The following mb guarantees that previous clear of a PENDING bit
|
|
|
|
* will not be reordered with any speculative LOADS or STORES from
|
|
|
|
* work->current_func, which is executed afterwards. This possible
|
|
|
|
* reordering can lead to a missed execution on attempt to queue
|
|
|
|
* the same @work. E.g. consider this case:
|
|
|
|
*
|
|
|
|
* CPU#0 CPU#1
|
|
|
|
* ---------------------------- --------------------------------
|
|
|
|
*
|
|
|
|
* 1 STORE event_indicated
|
|
|
|
* 2 queue_work_on() {
|
|
|
|
* 3 test_and_set_bit(PENDING)
|
|
|
|
* 4 } set_..._and_clear_pending() {
|
|
|
|
* 5 set_work_data() # clear bit
|
|
|
|
* 6 smp_mb()
|
|
|
|
* 7 work->current_func() {
|
|
|
|
* 8 LOAD event_indicated
|
|
|
|
* }
|
|
|
|
*
|
|
|
|
* Without an explicit full barrier speculative LOAD on line 8 can
|
|
|
|
* be executed before CPU#0 does STORE on line 1. If that happens,
|
|
|
|
* CPU#0 observes the PENDING bit is still set and new execution of
|
|
|
|
* a @work is not queued in a hope, that CPU#1 will eventually
|
|
|
|
* finish the queued @work. Meanwhile CPU#1 does not see
|
|
|
|
* event_indicated is set, because speculative LOAD was executed
|
|
|
|
* before actual STORE.
|
|
|
|
*/
|
|
|
|
smp_mb();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void clear_work_data(struct work_struct *work)
|
|
|
|
{
|
|
|
|
smp_wmb(); /* see set_work_pool_and_clear_pending() */
|
|
|
|
set_work_data(work, WORK_STRUCT_NO_POOL, 0);
|
|
|
|
}
|
|
|
|
|
2023-10-24 12:59:35 +02:00
|
|
|
static inline struct pool_workqueue *work_struct_pwq(unsigned long data)
|
|
|
|
{
|
|
|
|
return (struct pool_workqueue *)(data & WORK_STRUCT_WQ_DATA_MASK);
|
|
|
|
}
|
|
|
|
|
2023-08-30 17:31:07 +02:00
|
|
|
static struct pool_workqueue *get_work_pwq(struct work_struct *work)
|
|
|
|
{
|
|
|
|
unsigned long data = atomic_long_read(&work->data);
|
|
|
|
|
|
|
|
if (data & WORK_STRUCT_PWQ)
|
2023-10-24 12:59:35 +02:00
|
|
|
return work_struct_pwq(data);
|
2023-08-30 17:31:07 +02:00
|
|
|
else
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* get_work_pool - return the worker_pool a given work was associated with
|
|
|
|
* @work: the work item of interest
|
|
|
|
*
|
|
|
|
* Pools are created and destroyed under wq_pool_mutex, and allows read
|
|
|
|
* access under RCU read lock. As such, this function should be
|
|
|
|
* called under wq_pool_mutex or inside of a rcu_read_lock() region.
|
|
|
|
*
|
|
|
|
* All fields of the returned pool are accessible as long as the above
|
|
|
|
* mentioned locking is in effect. If the returned pool needs to be used
|
|
|
|
* beyond the critical section, the caller is responsible for ensuring the
|
|
|
|
* returned pool is and stays online.
|
|
|
|
*
|
|
|
|
* Return: The worker_pool @work was last associated with. %NULL if none.
|
|
|
|
*/
|
|
|
|
static struct worker_pool *get_work_pool(struct work_struct *work)
|
|
|
|
{
|
|
|
|
unsigned long data = atomic_long_read(&work->data);
|
|
|
|
int pool_id;
|
|
|
|
|
|
|
|
assert_rcu_or_pool_mutex();
|
|
|
|
|
|
|
|
if (data & WORK_STRUCT_PWQ)
|
2023-10-24 12:59:35 +02:00
|
|
|
return work_struct_pwq(data)->pool;
|
2023-08-30 17:31:07 +02:00
|
|
|
|
|
|
|
pool_id = data >> WORK_OFFQ_POOL_SHIFT;
|
|
|
|
if (pool_id == WORK_OFFQ_POOL_NONE)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return idr_find(&worker_pool_idr, pool_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* get_work_pool_id - return the worker pool ID a given work is associated with
|
|
|
|
* @work: the work item of interest
|
|
|
|
*
|
|
|
|
* Return: The worker_pool ID @work was last associated with.
|
|
|
|
* %WORK_OFFQ_POOL_NONE if none.
|
|
|
|
*/
|
|
|
|
static int get_work_pool_id(struct work_struct *work)
|
|
|
|
{
|
|
|
|
unsigned long data = atomic_long_read(&work->data);
|
|
|
|
|
|
|
|
if (data & WORK_STRUCT_PWQ)
|
2023-10-24 12:59:35 +02:00
|
|
|
return work_struct_pwq(data)->pool->id;
|
2023-08-30 17:31:07 +02:00
|
|
|
|
|
|
|
return data >> WORK_OFFQ_POOL_SHIFT;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mark_work_canceling(struct work_struct *work)
|
|
|
|
{
|
|
|
|
unsigned long pool_id = get_work_pool_id(work);
|
|
|
|
|
|
|
|
pool_id <<= WORK_OFFQ_POOL_SHIFT;
|
|
|
|
set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool work_is_canceling(struct work_struct *work)
|
|
|
|
{
|
|
|
|
unsigned long data = atomic_long_read(&work->data);
|
|
|
|
|
|
|
|
return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Policy functions. These define the policies on how the global worker
|
|
|
|
* pools are managed. Unless noted otherwise, these functions assume that
|
|
|
|
* they're being called with pool->lock held.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static bool __need_more_worker(struct worker_pool *pool)
|
|
|
|
{
|
|
|
|
return !pool->nr_running;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Need to wake up a worker? Called from anything but currently
|
|
|
|
* running workers.
|
|
|
|
*
|
|
|
|
* Note that, because unbound workers never contribute to nr_running, this
|
|
|
|
* function will always return %true for unbound pools as long as the
|
|
|
|
* worklist isn't empty.
|
|
|
|
*/
|
|
|
|
static bool need_more_worker(struct worker_pool *pool)
|
|
|
|
{
|
|
|
|
return !list_empty(&pool->worklist) && __need_more_worker(pool);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Can I start working? Called from busy but !running workers. */
|
|
|
|
static bool may_start_working(struct worker_pool *pool)
|
|
|
|
{
|
|
|
|
return pool->nr_idle;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Do I need to keep working? Called from currently running workers. */
|
|
|
|
static bool keep_working(struct worker_pool *pool)
|
|
|
|
{
|
|
|
|
return !list_empty(&pool->worklist) && (pool->nr_running <= 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Do we need a new worker? Called from manager. */
|
|
|
|
static bool need_to_create_worker(struct worker_pool *pool)
|
|
|
|
{
|
|
|
|
return need_more_worker(pool) && !may_start_working(pool);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Do we have too many workers and should some go away? */
|
|
|
|
static bool too_many_workers(struct worker_pool *pool)
|
|
|
|
{
|
|
|
|
bool managing = pool->flags & POOL_MANAGER_ACTIVE;
|
|
|
|
int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
|
|
|
|
int nr_busy = pool->nr_workers - nr_idle;
|
|
|
|
|
|
|
|
return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Wake up functions.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Return the first idle worker. Called with pool->lock held. */
|
|
|
|
static struct worker *first_idle_worker(struct worker_pool *pool)
|
|
|
|
{
|
|
|
|
if (unlikely(list_empty(&pool->idle_list)))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return list_first_entry(&pool->idle_list, struct worker, entry);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* wake_up_worker - wake up an idle worker
|
|
|
|
* @pool: worker pool to wake worker from
|
|
|
|
*
|
|
|
|
* Wake up the first idle worker of @pool.
|
|
|
|
*
|
|
|
|
* CONTEXT:
|
|
|
|
* raw_spin_lock_irq(pool->lock).
|
|
|
|
*/
|
|
|
|
static void wake_up_worker(struct worker_pool *pool)
|
|
|
|
{
|
|
|
|
struct worker *worker = first_idle_worker(pool);
|
|
|
|
|
|
|
|
if (likely(worker))
|
|
|
|
wake_up_process(worker->task);
|
|
|
|
}
|
|
|
|
|
2023-10-24 12:59:35 +02:00
|
|
|
/**
|
|
|
|
* worker_set_flags - set worker flags and adjust nr_running accordingly
|
|
|
|
* @worker: self
|
|
|
|
* @flags: flags to set
|
|
|
|
*
|
|
|
|
* Set @flags in @worker->flags and adjust nr_running accordingly.
|
|
|
|
*
|
|
|
|
* CONTEXT:
|
|
|
|
* raw_spin_lock_irq(pool->lock)
|
|
|
|
*/
|
|
|
|
static inline void worker_set_flags(struct worker *worker, unsigned int flags)
|
|
|
|
{
|
|
|
|
struct worker_pool *pool = worker->pool;
|
|
|
|
|
|
|
|
WARN_ON_ONCE(worker->task != current);
|
|
|
|
|
|
|
|
/* If transitioning into NOT_RUNNING, adjust nr_running. */
|
|
|
|
if ((flags & WORKER_NOT_RUNNING) &&
|
|
|
|
!(worker->flags & WORKER_NOT_RUNNING)) {
|
|
|
|
pool->nr_running--;
|
|
|
|
}
|
|
|
|
|
|
|
|
worker->flags |= flags;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* worker_clr_flags - clear worker flags and adjust nr_running accordingly
|
|
|
|
* @worker: self
|
|
|
|
* @flags: flags to clear
|
|
|
|
*
|
|
|
|
* Clear @flags in @worker->flags and adjust nr_running accordingly.
|
|
|
|
*
|
|
|
|
* CONTEXT:
|
|
|
|
* raw_spin_lock_irq(pool->lock)
|
|
|
|
*/
|
|
|
|
static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
|
|
|
|
{
|
|
|
|
struct worker_pool *pool = worker->pool;
|
|
|
|
unsigned int oflags = worker->flags;
|
|
|
|
|
|
|
|
WARN_ON_ONCE(worker->task != current);
|
|
|
|
|
|
|
|
worker->flags &= ~flags;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If transitioning out of NOT_RUNNING, increment nr_running. Note
|
|
|
|
* that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask
|
|
|
|
* of multiple flags, not a single flag.
|
|
|
|
*/
|
|
|
|
if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
|
|
|
|
if (!(worker->flags & WORKER_NOT_RUNNING))
|
|
|
|
pool->nr_running++;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_WQ_CPU_INTENSIVE_REPORT
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Concurrency-managed per-cpu work items that hog CPU for longer than
|
|
|
|
* wq_cpu_intensive_thresh_us trigger the automatic CPU_INTENSIVE mechanism,
|
|
|
|
* which prevents them from stalling other concurrency-managed work items. If a
|
|
|
|
* work function keeps triggering this mechanism, it's likely that the work item
|
|
|
|
* should be using an unbound workqueue instead.
|
|
|
|
*
|
|
|
|
* wq_cpu_intensive_report() tracks work functions which trigger such conditions
|
|
|
|
* and report them so that they can be examined and converted to use unbound
|
|
|
|
* workqueues as appropriate. To avoid flooding the console, each violating work
|
|
|
|
* function is tracked and reported with exponential backoff.
|
|
|
|
*/
|
|
|
|
#define WCI_MAX_ENTS 128
|
|
|
|
|
|
|
|
struct wci_ent {
|
|
|
|
work_func_t func;
|
|
|
|
atomic64_t cnt;
|
|
|
|
struct hlist_node hash_node;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct wci_ent wci_ents[WCI_MAX_ENTS];
|
|
|
|
static int wci_nr_ents;
|
|
|
|
static DEFINE_RAW_SPINLOCK(wci_lock);
|
|
|
|
static DEFINE_HASHTABLE(wci_hash, ilog2(WCI_MAX_ENTS));
|
|
|
|
|
|
|
|
static struct wci_ent *wci_find_ent(work_func_t func)
|
|
|
|
{
|
|
|
|
struct wci_ent *ent;
|
|
|
|
|
|
|
|
hash_for_each_possible_rcu(wci_hash, ent, hash_node,
|
|
|
|
(unsigned long)func) {
|
|
|
|
if (ent->func == func)
|
|
|
|
return ent;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void wq_cpu_intensive_report(work_func_t func)
|
|
|
|
{
|
|
|
|
struct wci_ent *ent;
|
|
|
|
|
|
|
|
restart:
|
|
|
|
ent = wci_find_ent(func);
|
|
|
|
if (ent) {
|
|
|
|
u64 cnt;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Start reporting from the fourth time and back off
|
|
|
|
* exponentially.
|
|
|
|
*/
|
|
|
|
cnt = atomic64_inc_return_relaxed(&ent->cnt);
|
|
|
|
if (cnt >= 4 && is_power_of_2(cnt))
|
|
|
|
printk_deferred(KERN_WARNING "workqueue: %ps hogged CPU for >%luus %llu times, consider switching to WQ_UNBOUND\n",
|
|
|
|
ent->func, wq_cpu_intensive_thresh_us,
|
|
|
|
atomic64_read(&ent->cnt));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* @func is a new violation. Allocate a new entry for it. If wcn_ents[]
|
|
|
|
* is exhausted, something went really wrong and we probably made enough
|
|
|
|
* noise already.
|
|
|
|
*/
|
|
|
|
if (wci_nr_ents >= WCI_MAX_ENTS)
|
|
|
|
return;
|
|
|
|
|
|
|
|
raw_spin_lock(&wci_lock);
|
|
|
|
|
|
|
|
if (wci_nr_ents >= WCI_MAX_ENTS) {
|
|
|
|
raw_spin_unlock(&wci_lock);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (wci_find_ent(func)) {
|
|
|
|
raw_spin_unlock(&wci_lock);
|
|
|
|
goto restart;
|
|
|
|
}
|
|
|
|
|
|
|
|
ent = &wci_ents[wci_nr_ents++];
|
|
|
|
ent->func = func;
|
|
|
|
atomic64_set(&ent->cnt, 1);
|
|
|
|
hash_add_rcu(wci_hash, &ent->hash_node, (unsigned long)func);
|
|
|
|
|
|
|
|
raw_spin_unlock(&wci_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
#else /* CONFIG_WQ_CPU_INTENSIVE_REPORT */
|
|
|
|
static void wq_cpu_intensive_report(work_func_t func) {}
|
|
|
|
#endif /* CONFIG_WQ_CPU_INTENSIVE_REPORT */
|
|
|
|
|
2023-08-30 17:31:07 +02:00
|
|
|
/**
|
|
|
|
* wq_worker_running - a worker is running again
|
|
|
|
* @task: task waking up
|
|
|
|
*
|
|
|
|
* This function is called when a worker returns from schedule()
|
|
|
|
*/
|
|
|
|
void wq_worker_running(struct task_struct *task)
|
|
|
|
{
|
|
|
|
struct worker *worker = kthread_data(task);
|
|
|
|
|
2023-10-24 12:59:35 +02:00
|
|
|
if (!READ_ONCE(worker->sleeping))
|
2023-08-30 17:31:07 +02:00
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If preempted by unbind_workers() between the WORKER_NOT_RUNNING check
|
|
|
|
* and the nr_running increment below, we may ruin the nr_running reset
|
|
|
|
* and leave with an unexpected pool->nr_running == 1 on the newly unbound
|
|
|
|
* pool. Protect against such race.
|
|
|
|
*/
|
|
|
|
preempt_disable();
|
|
|
|
if (!(worker->flags & WORKER_NOT_RUNNING))
|
|
|
|
worker->pool->nr_running++;
|
|
|
|
preempt_enable();
|
2023-10-24 12:59:35 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* CPU intensive auto-detection cares about how long a work item hogged
|
|
|
|
* CPU without sleeping. Reset the starting timestamp on wakeup.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_SCHED_ALT
|
|
|
|
worker->current_at = worker->task->sched_time;
|
|
|
|
#else
|
|
|
|
worker->current_at = worker->task->se.sum_exec_runtime;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
WRITE_ONCE(worker->sleeping, 0);
|
2023-08-30 17:31:07 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* wq_worker_sleeping - a worker is going to sleep
|
|
|
|
* @task: task going to sleep
|
|
|
|
*
|
|
|
|
* This function is called from schedule() when a busy worker is
|
|
|
|
* going to sleep.
|
|
|
|
*/
|
|
|
|
void wq_worker_sleeping(struct task_struct *task)
|
|
|
|
{
|
|
|
|
struct worker *worker = kthread_data(task);
|
|
|
|
struct worker_pool *pool;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Rescuers, which may not have all the fields set up like normal
|
|
|
|
* workers, also reach here, let's not access anything before
|
|
|
|
* checking NOT_RUNNING.
|
|
|
|
*/
|
|
|
|
if (worker->flags & WORKER_NOT_RUNNING)
|
|
|
|
return;
|
|
|
|
|
|
|
|
pool = worker->pool;
|
|
|
|
|
|
|
|
/* Return if preempted before wq_worker_running() was reached */
|
2023-10-24 12:59:35 +02:00
|
|
|
if (READ_ONCE(worker->sleeping))
|
2023-08-30 17:31:07 +02:00
|
|
|
return;
|
|
|
|
|
2023-10-24 12:59:35 +02:00
|
|
|
WRITE_ONCE(worker->sleeping, 1);
|
2023-08-30 17:31:07 +02:00
|
|
|
raw_spin_lock_irq(&pool->lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Recheck in case unbind_workers() preempted us. We don't
|
|
|
|
* want to decrement nr_running after the worker is unbound
|
|
|
|
* and nr_running has been reset.
|
|
|
|
*/
|
|
|
|
if (worker->flags & WORKER_NOT_RUNNING) {
|
|
|
|
raw_spin_unlock_irq(&pool->lock);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
pool->nr_running--;
|
2023-10-24 12:59:35 +02:00
|
|
|
if (need_more_worker(pool)) {
|
|
|
|
worker->current_pwq->stats[PWQ_STAT_CM_WAKEUP]++;
|
2023-08-30 17:31:07 +02:00
|
|
|
wake_up_worker(pool);
|
2023-10-24 12:59:35 +02:00
|
|
|
}
|
2023-08-30 17:31:07 +02:00
|
|
|
raw_spin_unlock_irq(&pool->lock);
|
|
|
|
}
|
|
|
|
|
2023-10-24 12:59:35 +02:00
|
|
|
/**
|
|
|
|
* wq_worker_tick - a scheduler tick occurred while a kworker is running
|
|
|
|
* @task: task currently running
|
|
|
|
*
|
|
|
|
* Called from scheduler_tick(). We're in the IRQ context and the current
|
|
|
|
* worker's fields which follow the 'K' locking rule can be accessed safely.
|
|
|
|
*/
|
|
|
|
void wq_worker_tick(struct task_struct *task)
|
|
|
|
{
|
|
|
|
struct worker *worker = kthread_data(task);
|
|
|
|
struct pool_workqueue *pwq = worker->current_pwq;
|
|
|
|
struct worker_pool *pool = worker->pool;
|
|
|
|
|
|
|
|
if (!pwq)
|
|
|
|
return;
|
|
|
|
|
|
|
|
pwq->stats[PWQ_STAT_CPU_TIME] += TICK_USEC;
|
|
|
|
|
|
|
|
if (!wq_cpu_intensive_thresh_us)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the current worker is concurrency managed and hogged the CPU for
|
|
|
|
* longer than wq_cpu_intensive_thresh_us, it's automatically marked
|
|
|
|
* CPU_INTENSIVE to avoid stalling other concurrency-managed work items.
|
|
|
|
*
|
|
|
|
* Set @worker->sleeping means that @worker is in the process of
|
|
|
|
* switching out voluntarily and won't be contributing to
|
|
|
|
* @pool->nr_running until it wakes up. As wq_worker_sleeping() also
|
|
|
|
* decrements ->nr_running, setting CPU_INTENSIVE here can lead to
|
|
|
|
* double decrements. The task is releasing the CPU anyway. Let's skip.
|
|
|
|
* We probably want to make this prettier in the future.
|
|
|
|
*/
|
|
|
|
if ((worker->flags & WORKER_NOT_RUNNING) || READ_ONCE(worker->sleeping) ||
|
|
|
|
#ifdef CONFIG_SCHED_ALT
|
|
|
|
worker->task->sched_time - worker->current_at <
|
|
|
|
#else
|
|
|
|
worker->task->se.sum_exec_runtime - worker->current_at <
|
|
|
|
#endif
|
|
|
|
wq_cpu_intensive_thresh_us * NSEC_PER_USEC)
|
|
|
|
return;
|
|
|
|
|
|
|
|
raw_spin_lock(&pool->lock);
|
|
|
|
|
|
|
|
worker_set_flags(worker, WORKER_CPU_INTENSIVE);
|
|
|
|
wq_cpu_intensive_report(worker->current_func);
|
|
|
|
pwq->stats[PWQ_STAT_CPU_INTENSIVE]++;
|
|
|
|
|
|
|
|
if (need_more_worker(pool)) {
|
|
|
|
pwq->stats[PWQ_STAT_CM_WAKEUP]++;
|
|
|
|
wake_up_worker(pool);
|
|
|
|
}
|
|
|
|
|
|
|
|
raw_spin_unlock(&pool->lock);
|
|
|
|
}
|
|
|
|
|
2023-08-30 17:31:07 +02:00
|
|
|
/**
|
|
|
|
* wq_worker_last_func - retrieve worker's last work function
|
|
|
|
* @task: Task to retrieve last work function of.
|
|
|
|
*
|
|
|
|
* Determine the last function a worker executed. This is called from
|
|
|
|
* the scheduler to get a worker's last known identity.
|
|
|
|
*
|
|
|
|
* CONTEXT:
|
|
|
|
* raw_spin_lock_irq(rq->lock)
|
|
|
|
*
|
|
|
|
* This function is called during schedule() when a kworker is going
|
|
|
|
* to sleep. It's used by psi to identify aggregation workers during
|
|
|
|
* dequeuing, to allow periodic aggregation to shut-off when that
|
|
|
|
* worker is the last task in the system or cgroup to go to sleep.
|
|
|
|
*
|
|
|
|
* As this function doesn't involve any workqueue-related locking, it
|
|
|
|
* only returns stable values when called from inside the scheduler's
|
|
|
|
* queuing and dequeuing paths, when @task, which must be a kworker,
|
|
|
|
* is guaranteed to not be processing any works.
|
|
|
|
*
|
|
|
|
* Return:
|
|
|
|
* The last work function %current executed as a worker, NULL if it
|
|
|
|
* hasn't executed any work yet.
|
|
|
|
*/
|
|
|
|
work_func_t wq_worker_last_func(struct task_struct *task)
|
|
|
|
{
|
|
|
|
struct worker *worker = kthread_data(task);
|
|
|
|
|
|
|
|
return worker->last_func;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* find_worker_executing_work - find worker which is executing a work
|
|
|
|
* @pool: pool of interest
|
|
|
|
* @work: work to find worker for
|
|
|
|
*
|
|
|
|
* Find a worker which is executing @work on @pool by searching
|
|
|
|
* @pool->busy_hash which is keyed by the address of @work. For a worker
|
|
|
|
* to match, its current execution should match the address of @work and
|
|
|
|
* its work function. This is to avoid unwanted dependency between
|
|
|
|
* unrelated work executions through a work item being recycled while still
|
|
|
|
* being executed.
|
|
|
|
*
|
|
|
|
* This is a bit tricky. A work item may be freed once its execution
|
|
|
|
* starts and nothing prevents the freed area from being recycled for
|
|
|
|
* another work item. If the same work item address ends up being reused
|
|
|
|
* before the original execution finishes, workqueue will identify the
|
|
|
|
* recycled work item as currently executing and make it wait until the
|
|
|
|
* current execution finishes, introducing an unwanted dependency.
|
|
|
|
*
|
|
|
|
* This function checks the work item address and work function to avoid
|
|
|
|
* false positives. Note that this isn't complete as one may construct a
|
|
|
|
* work function which can introduce dependency onto itself through a
|
|
|
|
* recycled work item. Well, if somebody wants to shoot oneself in the
|
|
|
|
* foot that badly, there's only so much we can do, and if such deadlock
|
|
|
|
* actually occurs, it should be easy to locate the culprit work function.
|
|
|
|
*
|
|
|
|
* CONTEXT:
|
|
|
|
* raw_spin_lock_irq(pool->lock).
|
|
|
|
*
|
|
|
|
* Return:
|
|
|
|
* Pointer to worker which is executing @work if found, %NULL
|
|
|
|
* otherwise.
|
|
|
|
*/
|
|
|
|
static struct worker *find_worker_executing_work(struct worker_pool *pool,
|
|
|
|
struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct worker *worker;
|
|
|
|
|
|
|
|
hash_for_each_possible(pool->busy_hash, worker, hentry,
|
|
|
|
(unsigned long)work)
|
|
|
|
if (worker->current_work == work &&
|
|
|
|
worker->current_func == work->func)
|
|
|
|
return worker;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* move_linked_works - move linked works to a list
|
|
|
|
* @work: start of series of works to be scheduled
|
|
|
|
* @head: target list to append @work to
|
|
|
|
* @nextp: out parameter for nested worklist walking
|
|
|
|
*
|
|
|
|
* Schedule linked works starting from @work to @head. Work series to
|
|
|
|
* be scheduled starts at @work and includes any consecutive work with
|
|
|
|
* WORK_STRUCT_LINKED set in its predecessor.
|
|
|
|
*
|
|
|
|
* If @nextp is not NULL, it's updated to point to the next work of
|
|
|
|
* the last scheduled work. This allows move_linked_works() to be
|
|
|
|
* nested inside outer list_for_each_entry_safe().
|
|
|
|
*
|
|
|
|
* CONTEXT:
|
|
|
|
* raw_spin_lock_irq(pool->lock).
|
|
|
|
*/
|
|
|
|
static void move_linked_works(struct work_struct *work, struct list_head *head,
|
|
|
|
struct work_struct **nextp)
|
|
|
|
{
|
|
|
|
struct work_struct *n;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Linked worklist will always end before the end of the list,
|
|
|
|
* use NULL for list head.
|
|
|
|
*/
|
|
|
|
list_for_each_entry_safe_from(work, n, NULL, entry) {
|
|
|
|
list_move_tail(&work->entry, head);
|
|
|
|
if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we're already inside safe list traversal and have moved
|
|
|
|
* multiple works to the scheduled queue, the next position
|
|
|
|
* needs to be updated.
|
|
|
|
*/
|
|
|
|
if (nextp)
|
|
|
|
*nextp = n;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* get_pwq - get an extra reference on the specified pool_workqueue
|
|
|
|
* @pwq: pool_workqueue to get
|
|
|
|
*
|
|
|
|
* Obtain an extra reference on @pwq. The caller should guarantee that
|
|
|
|
* @pwq has positive refcnt and be holding the matching pool->lock.
|
|
|
|
*/
|
|
|
|
static void get_pwq(struct pool_workqueue *pwq)
|
|
|
|
{
|
|
|
|
lockdep_assert_held(&pwq->pool->lock);
|
|
|
|
WARN_ON_ONCE(pwq->refcnt <= 0);
|
|
|
|
pwq->refcnt++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* put_pwq - put a pool_workqueue reference
|
|
|
|
* @pwq: pool_workqueue to put
|
|
|
|
*
|
|
|
|
* Drop a reference of @pwq. If its refcnt reaches zero, schedule its
|
|
|
|
* destruction. The caller should be holding the matching pool->lock.
|
|
|
|
*/
|
|
|
|
static void put_pwq(struct pool_workqueue *pwq)
|
|
|
|
{
|
|
|
|
lockdep_assert_held(&pwq->pool->lock);
|
|
|
|
if (likely(--pwq->refcnt))
|
|
|
|
return;
|
|
|
|
if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND)))
|
|
|
|
return;
|
|
|
|
/*
|
|
|
|
* @pwq can't be released under pool->lock, bounce to
|
|
|
|
* pwq_unbound_release_workfn(). This never recurses on the same
|
|
|
|
* pool->lock as this path is taken only for unbound workqueues and
|
|
|
|
* the release work item is scheduled on a per-cpu workqueue. To
|
|
|
|
* avoid lockdep warning, unbound pool->locks are given lockdep
|
|
|
|
* subclass of 1 in get_unbound_pool().
|
|
|
|
*/
|
|
|
|
schedule_work(&pwq->unbound_release_work);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock
|
|
|
|
* @pwq: pool_workqueue to put (can be %NULL)
|
|
|
|
*
|
|
|
|
* put_pwq() with locking. This function also allows %NULL @pwq.
|
|
|
|
*/
|
|
|
|
static void put_pwq_unlocked(struct pool_workqueue *pwq)
|
|
|
|
{
|
|
|
|
if (pwq) {
|
|
|
|
/*
|
|
|
|
* As both pwqs and pools are RCU protected, the
|
|
|
|
* following lock operations are safe.
|
|
|
|
*/
|
|
|
|
raw_spin_lock_irq(&pwq->pool->lock);
|
|
|
|
put_pwq(pwq);
|
|
|
|
raw_spin_unlock_irq(&pwq->pool->lock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pwq_activate_inactive_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct pool_workqueue *pwq = get_work_pwq(work);
|
|
|
|
|
|
|
|
trace_workqueue_activate_work(work);
|
|
|
|
if (list_empty(&pwq->pool->worklist))
|
|
|
|
pwq->pool->watchdog_ts = jiffies;
|
|
|
|
move_linked_works(work, &pwq->pool->worklist, NULL);
|
|
|
|
__clear_bit(WORK_STRUCT_INACTIVE_BIT, work_data_bits(work));
|
|
|
|
pwq->nr_active++;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pwq_activate_first_inactive(struct pool_workqueue *pwq)
|
|
|
|
{
|
|
|
|
struct work_struct *work = list_first_entry(&pwq->inactive_works,
|
|
|
|
struct work_struct, entry);
|
|
|
|
|
|
|
|
pwq_activate_inactive_work(work);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* pwq_dec_nr_in_flight - decrement pwq's nr_in_flight
|
|
|
|
* @pwq: pwq of interest
|
|
|
|
* @work_data: work_data of work which left the queue
|
|
|
|
*
|
|
|
|
* A work either has completed or is removed from pending queue,
|
|
|
|
* decrement nr_in_flight of its pwq and handle workqueue flushing.
|
|
|
|
*
|
|
|
|
* CONTEXT:
|
|
|
|
* raw_spin_lock_irq(pool->lock).
|
|
|
|
*/
|
|
|
|
static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, unsigned long work_data)
|
|
|
|
{
|
|
|
|
int color = get_work_color(work_data);
|
|
|
|
|
|
|
|
if (!(work_data & WORK_STRUCT_INACTIVE)) {
|
|
|
|
pwq->nr_active--;
|
|
|
|
if (!list_empty(&pwq->inactive_works)) {
|
|
|
|
/* one down, submit an inactive one */
|
|
|
|
if (pwq->nr_active < pwq->max_active)
|
|
|
|
pwq_activate_first_inactive(pwq);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pwq->nr_in_flight[color]--;
|
|
|
|
|
|
|
|
/* is flush in progress and are we at the flushing tip? */
|
|
|
|
if (likely(pwq->flush_color != color))
|
|
|
|
goto out_put;
|
|
|
|
|
|
|
|
/* are there still in-flight works? */
|
|
|
|
if (pwq->nr_in_flight[color])
|
|
|
|
goto out_put;
|
|
|
|
|
|
|
|
/* this pwq is done, clear flush_color */
|
|
|
|
pwq->flush_color = -1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If this was the last pwq, wake up the first flusher. It
|
|
|
|
* will handle the rest.
|
|
|
|
*/
|
|
|
|
if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush))
|
|
|
|
complete(&pwq->wq->first_flusher->done);
|
|
|
|
out_put:
|
|
|
|
put_pwq(pwq);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* try_to_grab_pending - steal work item from worklist and disable irq
|
|
|
|
* @work: work item to steal
|
|
|
|
* @is_dwork: @work is a delayed_work
|
|
|
|
* @flags: place to store irq state
|
|
|
|
*
|
|
|
|
* Try to grab PENDING bit of @work. This function can handle @work in any
|
|
|
|
* stable state - idle, on timer or on worklist.
|
|
|
|
*
|
|
|
|
* Return:
|
|
|
|
*
|
|
|
|
* ======== ================================================================
|
|
|
|
* 1 if @work was pending and we successfully stole PENDING
|
|
|
|
* 0 if @work was idle and we claimed PENDING
|
|
|
|
* -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry
|
|
|
|
* -ENOENT if someone else is canceling @work, this state may persist
|
|
|
|
* for arbitrarily long
|
|
|
|
* ======== ================================================================
|
|
|
|
*
|
|
|
|
* Note:
|
|
|
|
* On >= 0 return, the caller owns @work's PENDING bit. To avoid getting
|
|
|
|
* interrupted while holding PENDING and @work off queue, irq must be
|
|
|
|
* disabled on entry. This, combined with delayed_work->timer being
|
|
|
|
* irqsafe, ensures that we return -EAGAIN for finite short period of time.
|
|
|
|
*
|
|
|
|
* On successful return, >= 0, irq is disabled and the caller is
|
|
|
|
* responsible for releasing it using local_irq_restore(*@flags).
|
|
|
|
*
|
|
|
|
* This function is safe to call from any context including IRQ handler.
|
|
|
|
*/
|
|
|
|
static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
|
|
|
|
unsigned long *flags)
|
|
|
|
{
|
|
|
|
struct worker_pool *pool;
|
|
|
|
struct pool_workqueue *pwq;
|
|
|
|
|
|
|
|
local_irq_save(*flags);
|
|
|
|
|
|
|
|
/* try to steal the timer if it exists */
|
|
|
|
if (is_dwork) {
|
|
|
|
struct delayed_work *dwork = to_delayed_work(work);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* dwork->timer is irqsafe. If del_timer() fails, it's
|
|
|
|
* guaranteed that the timer is not queued anywhere and not
|
|
|
|
* running on the local CPU.
|
|
|
|
*/
|
|
|
|
if (likely(del_timer(&dwork->timer)))
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* try to claim PENDING the normal way */
|
|
|
|
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
/*
|
|
|
|
* The queueing is in progress, or it is already queued. Try to
|
|
|
|
* steal it from ->worklist without clearing WORK_STRUCT_PENDING.
|
|
|
|
*/
|
|
|
|
pool = get_work_pool(work);
|
|
|
|
if (!pool)
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
raw_spin_lock(&pool->lock);
|
|
|
|
/*
|
|
|
|
* work->data is guaranteed to point to pwq only while the work
|
|
|
|
* item is queued on pwq->wq, and both updating work->data to point
|
|
|
|
* to pwq on queueing and to pool on dequeueing are done under
|
|
|
|
* pwq->pool->lock. This in turn guarantees that, if work->data
|
|
|
|
* points to pwq which is associated with a locked pool, the work
|
|
|
|
* item is currently queued on that pool.
|
|
|
|
*/
|
|
|
|
pwq = get_work_pwq(work);
|
|
|
|
if (pwq && pwq->pool == pool) {
|
|
|
|
debug_work_deactivate(work);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A cancelable inactive work item must be in the
|
|
|
|
* pwq->inactive_works since a queued barrier can't be
|
|
|
|
* canceled (see the comments in insert_wq_barrier()).
|
|
|
|
*
|
|
|
|
* An inactive work item cannot be grabbed directly because
|
|
|
|
* it might have linked barrier work items which, if left
|
|
|
|
* on the inactive_works list, will confuse pwq->nr_active
|
|
|
|
* management later on and cause stall. Make sure the work
|
|
|
|
* item is activated before grabbing.
|
|
|
|
*/
|
|
|
|
if (*work_data_bits(work) & WORK_STRUCT_INACTIVE)
|
|
|
|
pwq_activate_inactive_work(work);
|
|
|
|
|
|
|
|
list_del_init(&work->entry);
|
|
|
|
pwq_dec_nr_in_flight(pwq, *work_data_bits(work));
|
|
|
|
|
|
|
|
/* work->data points to pwq iff queued, point to pool */
|
|
|
|
set_work_pool_and_keep_pending(work, pool->id);
|
|
|
|
|
|
|
|
raw_spin_unlock(&pool->lock);
|
|
|
|
rcu_read_unlock();
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
raw_spin_unlock(&pool->lock);
|
|
|
|
fail:
|
|
|
|
rcu_read_unlock();
|
|
|
|
local_irq_restore(*flags);
|
|
|
|
if (work_is_canceling(work))
|
|
|
|
return -ENOENT;
|
|
|
|
cpu_relax();
|
|
|
|
return -EAGAIN;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* insert_work - insert a work into a pool
|
|
|
|
* @pwq: pwq @work belongs to
|
|
|
|
* @work: work to insert
|
|
|
|
* @head: insertion point
|
|
|
|
* @extra_flags: extra WORK_STRUCT_* flags to set
|
|
|
|
*
|
|
|
|
* Insert @work which belongs to @pwq after @head. @extra_flags is or'd to
|
|
|
|
* work_struct flags.
|
|
|
|
*
|
|
|
|
* CONTEXT:
|
|
|
|
* raw_spin_lock_irq(pool->lock).
|
|
|
|
*/
|
|
|
|
static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
|
|
|
|
struct list_head *head, unsigned int extra_flags)
|
|
|
|
{
|
|
|
|
struct worker_pool *pool = pwq->pool;
|
|
|
|
|
|
|
|
/* record the work call stack in order to print it in KASAN reports */
|
|
|
|
kasan_record_aux_stack_noalloc(work);
|
|
|
|
|
|
|
|
/* we own @work, set data and link */
|
|
|
|
set_work_pwq(work, pwq, extra_flags);
|
|
|
|
list_add_tail(&work->entry, head);
|
|
|
|
get_pwq(pwq);
|
|
|
|
|
|
|
|
if (__need_more_worker(pool))
|
|
|
|
wake_up_worker(pool);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Test whether @work is being queued from another work executing on the
|
|
|
|
* same workqueue.
|
|
|
|
*/
|
|
|
|
static bool is_chained_work(struct workqueue_struct *wq)
|
|
|
|
{
|
|
|
|
struct worker *worker;
|
|
|
|
|
|
|
|
worker = current_wq_worker();
|
|
|
|
/*
|
|
|
|
* Return %true iff I'm a worker executing a work item on @wq. If
|
|
|
|
* I'm @worker, it's safe to dereference it without locking.
|
|
|
|
*/
|
|
|
|
return worker && worker->current_pwq->wq == wq;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When queueing an unbound work item to a wq, prefer local CPU if allowed
|
|
|
|
* by wq_unbound_cpumask. Otherwise, round robin among the allowed ones to
|
|
|
|
* avoid perturbing sensitive tasks.
|
|
|
|
*/
|
|
|
|
static int wq_select_unbound_cpu(int cpu)
|
|
|
|
{
|
|
|
|
int new_cpu;
|
|
|
|
|
|
|
|
if (likely(!wq_debug_force_rr_cpu)) {
|
|
|
|
if (cpumask_test_cpu(cpu, wq_unbound_cpumask))
|
|
|
|
return cpu;
|
2023-10-24 12:59:35 +02:00
|
|
|
} else {
|
|
|
|
pr_warn_once("workqueue: round-robin CPU selection forced, expect performance impact\n");
|
2023-08-30 17:31:07 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (cpumask_empty(wq_unbound_cpumask))
|
|
|
|
return cpu;
|
|
|
|
|
|
|
|
new_cpu = __this_cpu_read(wq_rr_cpu_last);
|
|
|
|
new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask);
|
|
|
|
if (unlikely(new_cpu >= nr_cpu_ids)) {
|
|
|
|
new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask);
|
|
|
|
if (unlikely(new_cpu >= nr_cpu_ids))
|
|
|
|
return cpu;
|
|
|
|
}
|
|
|
|
__this_cpu_write(wq_rr_cpu_last, new_cpu);
|
|
|
|
|
|
|
|
return new_cpu;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __queue_work(int cpu, struct workqueue_struct *wq,
|
|
|
|
struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct pool_workqueue *pwq;
|
|
|
|
struct worker_pool *last_pool;
|
|
|
|
struct list_head *worklist;
|
|
|
|
unsigned int work_flags;
|
|
|
|
unsigned int req_cpu = cpu;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* While a work item is PENDING && off queue, a task trying to
|
|
|
|
* steal the PENDING will busy-loop waiting for it to either get
|
|
|
|
* queued or lose PENDING. Grabbing PENDING and queueing should
|
|
|
|
* happen with IRQ disabled.
|
|
|
|
*/
|
|
|
|
lockdep_assert_irqs_disabled();
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For a draining wq, only works from the same workqueue are
|
|
|
|
* allowed. The __WQ_DESTROYING helps to spot the issue that
|
|
|
|
* queues a new work item to a wq after destroy_workqueue(wq).
|
|
|
|
*/
|
|
|
|
if (unlikely(wq->flags & (__WQ_DESTROYING | __WQ_DRAINING) &&
|
|
|
|
WARN_ON_ONCE(!is_chained_work(wq))))
|
|
|
|
return;
|
|
|
|
rcu_read_lock();
|
|
|
|
retry:
|
|
|
|
/* pwq which will be used unless @work is executing elsewhere */
|
|
|
|
if (wq->flags & WQ_UNBOUND) {
|
|
|
|
if (req_cpu == WORK_CPU_UNBOUND)
|
|
|
|
cpu = wq_select_unbound_cpu(raw_smp_processor_id());
|
|
|
|
pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
|
|
|
|
} else {
|
|
|
|
if (req_cpu == WORK_CPU_UNBOUND)
|
|
|
|
cpu = raw_smp_processor_id();
|
|
|
|
pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If @work was previously on a different pool, it might still be
|
|
|
|
* running there, in which case the work needs to be queued on that
|
|
|
|
* pool to guarantee non-reentrancy.
|
|
|
|
*/
|
|
|
|
last_pool = get_work_pool(work);
|
|
|
|
if (last_pool && last_pool != pwq->pool) {
|
|
|
|
struct worker *worker;
|
|
|
|
|
|
|
|
raw_spin_lock(&last_pool->lock);
|
|
|
|
|
|
|
|
worker = find_worker_executing_work(last_pool, work);
|
|
|
|
|
|
|
|
if (worker && worker->current_pwq->wq == wq) {
|
|
|
|
pwq = worker->current_pwq;
|
|
|
|
} else {
|
|
|
|
/* meh... not running there, queue here */
|
|
|
|
raw_spin_unlock(&last_pool->lock);
|
|
|
|
raw_spin_lock(&pwq->pool->lock);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
raw_spin_lock(&pwq->pool->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* pwq is determined and locked. For unbound pools, we could have
|
|
|
|
* raced with pwq release and it could already be dead. If its
|
|
|
|
* refcnt is zero, repeat pwq selection. Note that pwqs never die
|
|
|
|
* without another pwq replacing it in the numa_pwq_tbl or while
|
|
|
|
* work items are executing on it, so the retrying is guaranteed to
|
|
|
|
* make forward-progress.
|
|
|
|
*/
|
|
|
|
if (unlikely(!pwq->refcnt)) {
|
|
|
|
if (wq->flags & WQ_UNBOUND) {
|
|
|
|
raw_spin_unlock(&pwq->pool->lock);
|
|
|
|
cpu_relax();
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
/* oops */
|
|
|
|
WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt",
|
|
|
|
wq->name, cpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* pwq determined, queue */
|
|
|
|
trace_workqueue_queue_work(req_cpu, pwq, work);
|
|
|
|
|
|
|
|
if (WARN_ON(!list_empty(&work->entry)))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
pwq->nr_in_flight[pwq->work_color]++;
|
|
|
|
work_flags = work_color_to_flags(pwq->work_color);
|
|
|
|
|
|
|
|
if (likely(pwq->nr_active < pwq->max_active)) {
|
|
|
|
trace_workqueue_activate_work(work);
|
|
|
|
pwq->nr_active++;
|
|
|
|
worklist = &pwq->pool->worklist;
|
|
|
|
if (list_empty(worklist))
|
|
|
|
pwq->pool->watchdog_ts = jiffies;
|
|
|
|
} else {
|
|
|
|
work_flags |= WORK_STRUCT_INACTIVE;
|
|
|
|
worklist = &pwq->inactive_works;
|
|
|
|
}
|
|
|
|
|
|
|
|
debug_work_activate(work);
|
|
|
|
insert_work(pwq, work, worklist, work_flags);
|
|
|
|
|
|
|
|
out:
|
|
|
|
raw_spin_unlock(&pwq->pool->lock);
|
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* queue_work_on - queue work on specific cpu
|
|
|
|
* @cpu: CPU number to execute work on
|
|
|
|
* @wq: workqueue to use
|
|
|
|
* @work: work to queue
|
|
|
|
*
|
|
|
|
* We queue the work to a specific CPU, the caller must ensure it
|
|
|
|
* can't go away. Callers that fail to ensure that the specified
|
|
|
|
* CPU cannot go away will execute on a randomly chosen CPU.
|
2023-10-24 12:59:35 +02:00
|
|
|
* But note well that callers specifying a CPU that never has been
|
|
|
|
* online will get a splat.
|
2023-08-30 17:31:07 +02:00
|
|
|
*
|
|
|
|
* Return: %false if @work was already on a queue, %true otherwise.
|
|
|
|
*/
|
|
|
|
bool queue_work_on(int cpu, struct workqueue_struct *wq,
|
|
|
|
struct work_struct *work)
|
|
|
|
{
|
|
|
|
bool ret = false;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
|
|
|
|
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
|
|
|
|
__queue_work(cpu, wq, work);
|
|
|
|
ret = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
local_irq_restore(flags);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(queue_work_on);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* workqueue_select_cpu_near - Select a CPU based on NUMA node
|
|
|
|
* @node: NUMA node ID that we want to select a CPU from
|
|
|
|
*
|
|
|
|
* This function will attempt to find a "random" cpu available on a given
|
|
|
|
* node. If there are no CPUs available on the given node it will return
|
|
|
|
* WORK_CPU_UNBOUND indicating that we should just schedule to any
|
|
|
|
* available CPU if we need to schedule this work.
|
|
|
|
*/
|
|
|
|
static int workqueue_select_cpu_near(int node)
|
|
|
|
{
|
|
|
|
int cpu;
|
|
|
|
|
|
|
|
/* No point in doing this if NUMA isn't enabled for workqueues */
|
|
|
|
if (!wq_numa_enabled)
|
|
|
|
return WORK_CPU_UNBOUND;
|
|
|
|
|
|
|
|
/* Delay binding to CPU if node is not valid or online */
|
|
|
|
if (node < 0 || node >= MAX_NUMNODES || !node_online(node))
|
|
|
|
return WORK_CPU_UNBOUND;
|
|
|
|
|
|
|
|
/* Use local node/cpu if we are already there */
|
|
|
|
cpu = raw_smp_processor_id();
|
|
|
|
if (node == cpu_to_node(cpu))
|
|
|
|
return cpu;
|
|
|
|
|
|
|
|
/* Use "random" otherwise know as "first" online CPU of node */
|
|
|
|
cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask);
|
|
|
|
|
|
|
|
/* If CPU is valid return that, otherwise just defer */
|
|
|
|
return cpu < nr_cpu_ids ? cpu : WORK_CPU_UNBOUND;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* queue_work_node - queue work on a "random" cpu for a given NUMA node
|
|
|
|
* @node: NUMA node that we are targeting the work for
|
|
|
|
* @wq: workqueue to use
|
|
|
|
* @work: work to queue
|
|
|
|
*
|
|
|
|
* We queue the work to a "random" CPU within a given NUMA node. The basic
|
|
|
|
* idea here is to provide a way to somehow associate work with a given
|
|
|
|
* NUMA node.
|
|
|
|
*
|
|
|
|
* This function will only make a best effort attempt at getting this onto
|
|
|
|
* the right NUMA node. If no node is requested or the requested node is
|
|
|
|
* offline then we just fall back to standard queue_work behavior.
|
|
|
|
*
|
|
|
|
* Currently the "random" CPU ends up being the first available CPU in the
|
|
|
|
* intersection of cpu_online_mask and the cpumask of the node, unless we
|
|
|
|
* are running on the node. In that case we just use the current CPU.
|
|
|
|
*
|
|
|
|
* Return: %false if @work was already on a queue, %true otherwise.
|
|
|
|
*/
|
|
|
|
bool queue_work_node(int node, struct workqueue_struct *wq,
|
|
|
|
struct work_struct *work)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
bool ret = false;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This current implementation is specific to unbound workqueues.
|
|
|
|
* Specifically we only return the first available CPU for a given
|
|
|
|
* node instead of cycling through individual CPUs within the node.
|
|
|
|
*
|
|
|
|
* If this is used with a per-cpu workqueue then the logic in
|
|
|
|
* workqueue_select_cpu_near would need to be updated to allow for
|
|
|
|
* some round robin type logic.
|
|
|
|
*/
|
|
|
|
WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND));
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
|
|
|
|
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
|
|
|
|
int cpu = workqueue_select_cpu_near(node);
|
|
|
|
|
|
|
|
__queue_work(cpu, wq, work);
|
|
|
|
ret = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
local_irq_restore(flags);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(queue_work_node);
|
|
|
|
|
|
|
|
void delayed_work_timer_fn(struct timer_list *t)
|
|
|
|
{
|
|
|
|
struct delayed_work *dwork = from_timer(dwork, t, timer);
|
|
|
|
|
|
|
|
/* should have been called from irqsafe timer with irq already off */
|
|
|
|
__queue_work(dwork->cpu, dwork->wq, &dwork->work);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(delayed_work_timer_fn);
|
|
|
|
|
|
|
|
static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
|
|
|
|
struct delayed_work *dwork, unsigned long delay)
|
|
|
|
{
|
|
|
|
struct timer_list *timer = &dwork->timer;
|
|
|
|
struct work_struct *work = &dwork->work;
|
|
|
|
|
|
|
|
WARN_ON_ONCE(!wq);
|
|
|
|
WARN_ON_ONCE(timer->function != delayed_work_timer_fn);
|
|
|
|
WARN_ON_ONCE(timer_pending(timer));
|
|
|
|
WARN_ON_ONCE(!list_empty(&work->entry));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If @delay is 0, queue @dwork->work immediately. This is for
|
|
|
|
* both optimization and correctness. The earliest @timer can
|
|
|
|
* expire is on the closest next tick and delayed_work users depend
|
|
|
|
* on that there's no such delay when @delay is 0.
|
|
|
|
*/
|
|
|
|
if (!delay) {
|
|
|
|
__queue_work(cpu, wq, &dwork->work);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
dwork->wq = wq;
|
|
|
|
dwork->cpu = cpu;
|
|
|
|
timer->expires = jiffies + delay;
|
|
|
|
|
|
|
|
if (unlikely(cpu != WORK_CPU_UNBOUND))
|
|
|
|
add_timer_on(timer, cpu);
|
|
|
|
else
|
|
|
|
add_timer(timer);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* queue_delayed_work_on - queue work on specific CPU after delay
|
|
|
|
* @cpu: CPU number to execute work on
|
|
|
|
* @wq: workqueue to use
|
|
|
|
* @dwork: work to queue
|
|
|
|
* @delay: number of jiffies to wait before queueing
|
|
|
|
*
|
|
|
|
* Return: %false if @work was already on a queue, %true otherwise. If
|
|
|
|
* @delay is zero and @dwork is idle, it will be scheduled for immediate
|
|
|
|
* execution.
|
|
|
|
*/
|
|
|
|
bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
|
|
|
|
struct delayed_work *dwork, unsigned long delay)
|
|
|
|
{
|
|
|
|
struct work_struct *work = &dwork->work;
|
|
|
|
bool ret = false;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
/* read the comment in __queue_work() */
|
|
|
|
local_irq_save(flags);
|
|
|
|
|
|
|
|
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
|
|
|
|
__queue_delayed_work(cpu, wq, dwork, delay);
|
|
|
|
ret = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
local_irq_restore(flags);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(queue_delayed_work_on);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
|
|
|
|
* @cpu: CPU number to execute work on
|
|
|
|
* @wq: workqueue to use
|
|
|
|
* @dwork: work to queue
|
|
|
|
* @delay: number of jiffies to wait before queueing
|
|
|
|
*
|
|
|
|
* If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise,
|
|
|
|
* modify @dwork's timer so that it expires after @delay. If @delay is
|
|
|
|
* zero, @work is guaranteed to be scheduled immediately regardless of its
|
|
|
|
* current state.
|
|
|
|
*
|
|
|
|
* Return: %false if @dwork was idle and queued, %true if @dwork was
|
|
|
|
* pending and its timer was modified.
|
|
|
|
*
|
|
|
|
* This function is safe to call from any context including IRQ handler.
|
|
|
|
* See try_to_grab_pending() for details.
|
|
|
|
*/
|
|
|
|
bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
|
|
|
|
struct delayed_work *dwork, unsigned long delay)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
do {
|
|
|
|
ret = try_to_grab_pending(&dwork->work, true, &flags);
|
|
|
|
} while (unlikely(ret == -EAGAIN));
|
|
|
|
|
|
|
|
if (likely(ret >= 0)) {
|
|
|
|
__queue_delayed_work(cpu, wq, dwork, delay);
|
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* -ENOENT from try_to_grab_pending() becomes %true */
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(mod_delayed_work_on);
|
|
|
|
|
|
|
|
static void rcu_work_rcufn(struct rcu_head *rcu)
|
|
|
|
{
|
|
|
|
struct rcu_work *rwork = container_of(rcu, struct rcu_work, rcu);
|
|
|
|
|
|
|
|
/* read the comment in __queue_work() */
|
|
|
|
local_irq_disable();
|
|
|
|
__queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work);
|
|
|
|
local_irq_enable();
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* queue_rcu_work - queue work after a RCU grace period
|
|
|
|
* @wq: workqueue to use
|
|
|
|
* @rwork: work to queue
|
|
|
|
*
|
|
|
|
* Return: %false if @rwork was already pending, %true otherwise. Note
|
|
|
|
* that a full RCU grace period is guaranteed only after a %true return.
|
|
|
|
* While @rwork is guaranteed to be executed after a %false return, the
|
|
|
|
* execution may happen before a full RCU grace period has passed.
|
|
|
|
*/
|
|
|
|
bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork)
|
|
|
|
{
|
|
|
|
struct work_struct *work = &rwork->work;
|
|
|
|
|
|
|
|
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
|
|
|
|
rwork->wq = wq;
|
|
|
|
call_rcu_hurry(&rwork->rcu, rcu_work_rcufn);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(queue_rcu_work);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* worker_enter_idle - enter idle state
|
|
|
|
* @worker: worker which is entering idle state
|
|
|
|
*
|
|
|
|
* @worker is entering idle state. Update stats and idle timer if
|
|
|
|
* necessary.
|
|
|
|
*
|
|
|
|
* LOCKING:
|
|
|
|
* raw_spin_lock_irq(pool->lock).
|
|
|
|
*/
|
|
|
|
static void worker_enter_idle(struct worker *worker)
|
|
|
|
{
|
|
|
|
struct worker_pool *pool = worker->pool;
|
|
|
|
|
|
|
|
if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) ||
|
|
|
|
WARN_ON_ONCE(!list_empty(&worker->entry) &&
|
|
|
|
(worker->hentry.next || worker->hentry.pprev)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* can't use worker_set_flags(), also called from create_worker() */
|
|
|
|
worker->flags |= WORKER_IDLE;
|
|
|
|
pool->nr_idle++;
|
|
|
|
worker->last_active = jiffies;
|
|
|
|
|
|
|
|
/* idle_list is LIFO */
|
|
|
|
list_add(&worker->entry, &pool->idle_list);
|
|
|
|
|
|
|
|
if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
|
|
|
|
mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
|
|
|
|
|
|
|
|
/* Sanity check nr_running. */
|
|
|
|
WARN_ON_ONCE(pool->nr_workers == pool->nr_idle && pool->nr_running);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* worker_leave_idle - leave idle state
|
|
|
|
* @worker: worker which is leaving idle state
|
|
|
|
*
|
|
|
|
* @worker is leaving idle state. Update stats.
|
|
|
|
*
|
|
|
|
* LOCKING:
|
|
|
|
* raw_spin_lock_irq(pool->lock).
|
|
|
|
*/
|
|
|
|
static void worker_leave_idle(struct worker *worker)
|
|
|
|
{
|
|
|
|
struct worker_pool *pool = worker->pool;
|
|
|
|
|
|
|
|
if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE)))
|
|
|
|
return;
|
|
|
|
worker_clr_flags(worker, WORKER_IDLE);
|
|
|
|
pool->nr_idle--;
|
|
|
|
list_del_init(&worker->entry);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct worker *alloc_worker(int node)
|
|
|
|
{
|
|
|
|
struct worker *worker;
|
|
|
|
|
|
|
|
worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node);
|
|
|
|
if (worker) {
|
|
|
|
INIT_LIST_HEAD(&worker->entry);
|
|
|
|
INIT_LIST_HEAD(&worker->scheduled);
|
|
|
|
INIT_LIST_HEAD(&worker->node);
|
|
|
|
/* on creation a worker is in !idle && prep state */
|
|
|
|
worker->flags = WORKER_PREP;
|
|
|
|
}
|
|
|
|
return worker;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* worker_attach_to_pool() - attach a worker to a pool
|
|
|
|
* @worker: worker to be attached
|
|
|
|
* @pool: the target pool
|
|
|
|
*
|
|
|
|
* Attach @worker to @pool. Once attached, the %WORKER_UNBOUND flag and
|
|
|
|
* cpu-binding of @worker are kept coordinated with the pool across
|
|
|
|
* cpu-[un]hotplugs.
|
|
|
|
*/
|
|
|
|
static void worker_attach_to_pool(struct worker *worker,
|
|
|
|
struct worker_pool *pool)
|
|
|
|
{
|
|
|
|
mutex_lock(&wq_pool_attach_mutex);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains
|
|
|
|
* stable across this function. See the comments above the flag
|
|
|
|
* definition for details.
|
|
|
|
*/
|
|
|
|
if (pool->flags & POOL_DISASSOCIATED)
|
|
|
|
worker->flags |= WORKER_UNBOUND;
|
|
|
|
else
|
|
|
|
kthread_set_per_cpu(worker->task, pool->cpu);
|
|
|
|
|
|
|
|
if (worker->rescue_wq)
|
|
|
|
set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
|
|
|
|
|
|
|
|
list_add_tail(&worker->node, &pool->workers);
|
|
|
|
worker->pool = pool;
|
|
|
|
|
|
|
|
mutex_unlock(&wq_pool_attach_mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* worker_detach_from_pool() - detach a worker from its pool
|
|
|
|
* @worker: worker which is attached to its pool
|
|
|
|
*
|
|
|
|
* Undo the attaching which had been done in worker_attach_to_pool(). The
|
|
|
|
* caller worker shouldn't access to the pool after detached except it has
|
|
|
|
* other reference to the pool.
|
|
|
|
*/
|
|
|
|
static void worker_detach_from_pool(struct worker *worker)
|
|
|
|
{
|
|
|
|
struct worker_pool *pool = worker->pool;
|
|
|
|
struct completion *detach_completion = NULL;
|
|
|
|
|
|
|
|
mutex_lock(&wq_pool_attach_mutex);
|
|
|
|
|
|
|
|
kthread_set_per_cpu(worker->task, -1);
|
|
|
|
list_del(&worker->node);
|
|
|
|
worker->pool = NULL;
|
|
|
|
|
|
|
|
if (list_empty(&pool->workers) && list_empty(&pool->dying_workers))
|
|
|
|
detach_completion = pool->detach_completion;
|
|
|
|
mutex_unlock(&wq_pool_attach_mutex);
|
|
|
|
|
|
|
|
/* clear leftover flags without pool->lock after it is detached */
|
|
|
|
worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND);
|
|
|
|
|
|
|
|
if (detach_completion)
|
|
|
|
complete(detach_completion);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* create_worker - create a new workqueue worker
|
|
|
|
* @pool: pool the new worker will belong to
|
|
|
|
*
|
|
|
|
* Create and start a new worker which is attached to @pool.
|
|
|
|
*
|
|
|
|
* CONTEXT:
|
|
|
|
* Might sleep. Does GFP_KERNEL allocations.
|
|
|
|
*
|
|
|
|
* Return:
|
|
|
|
* Pointer to the newly created worker.
|
|
|
|
*/
|
|
|
|
static struct worker *create_worker(struct worker_pool *pool)
|
|
|
|
{
|
|
|
|
struct worker *worker;
|
|
|
|
int id;
|
|
|
|
char id_buf[16];
|
|
|
|
|
|
|
|
/* ID is needed to determine kthread name */
|
|
|
|
id = ida_alloc(&pool->worker_ida, GFP_KERNEL);
|
2023-10-24 12:59:35 +02:00
|
|
|
if (id < 0) {
|
|
|
|
pr_err_once("workqueue: Failed to allocate a worker ID: %pe\n",
|
|
|
|
ERR_PTR(id));
|
2023-08-30 17:31:07 +02:00
|
|
|
return NULL;
|
2023-10-24 12:59:35 +02:00
|
|
|
}
|
2023-08-30 17:31:07 +02:00
|
|
|
|
|
|
|
worker = alloc_worker(pool->node);
|
2023-10-24 12:59:35 +02:00
|
|
|
if (!worker) {
|
|
|
|
pr_err_once("workqueue: Failed to allocate a worker\n");
|
2023-08-30 17:31:07 +02:00
|
|
|
goto fail;
|
2023-10-24 12:59:35 +02:00
|
|
|
}
|
2023-08-30 17:31:07 +02:00
|
|
|
|
|
|
|
worker->id = id;
|
|
|
|
|
|
|
|
if (pool->cpu >= 0)
|
|
|
|
snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id,
|
|
|
|
pool->attrs->nice < 0 ? "H" : "");
|
|
|
|
else
|
|
|
|
snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id);
|
|
|
|
|
|
|
|
worker->task = kthread_create_on_node(worker_thread, worker, pool->node,
|
|
|
|
"kworker/%s", id_buf);
|
2023-10-24 12:59:35 +02:00
|
|
|
if (IS_ERR(worker->task)) {
|
|
|
|
if (PTR_ERR(worker->task) == -EINTR) {
|
|
|
|
pr_err("workqueue: Interrupted when creating a worker thread \"kworker/%s\"\n",
|
|
|
|
id_buf);
|
|
|
|
} else {
|
|
|
|
pr_err_once("workqueue: Failed to create a worker thread: %pe",
|
|
|
|
worker->task);
|
|
|
|
}
|
2023-08-30 17:31:07 +02:00
|
|
|
goto fail;
|
2023-10-24 12:59:35 +02:00
|
|
|
}
|
2023-08-30 17:31:07 +02:00
|
|
|
|
|
|
|
set_user_nice(worker->task, pool->attrs->nice);
|
|
|
|
kthread_bind_mask(worker->task, pool->attrs->cpumask);
|
|
|
|
|
|
|
|
/* successful, attach the worker to the pool */
|
|
|
|
worker_attach_to_pool(worker, pool);
|
|
|
|
|
|
|
|
/* start the newly created worker */
|
|
|
|
raw_spin_lock_irq(&pool->lock);
|
|
|
|
worker->pool->nr_workers++;
|
|
|
|
worker_enter_idle(worker);
|
|
|
|
wake_up_process(worker->task);
|
|
|
|
raw_spin_unlock_irq(&pool->lock);
|
|
|
|
|
|
|
|
return worker;
|
|
|
|
|
|
|
|
fail:
|
|
|
|
ida_free(&pool->worker_ida, id);
|
|
|
|
kfree(worker);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void unbind_worker(struct worker *worker)
|
|
|
|
{
|
|
|
|
lockdep_assert_held(&wq_pool_attach_mutex);
|
|
|
|
|
|
|
|
kthread_set_per_cpu(worker->task, -1);
|
|
|
|
if (cpumask_intersects(wq_unbound_cpumask, cpu_active_mask))
|
|
|
|
WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, wq_unbound_cpumask) < 0);
|
|
|
|
else
|
|
|
|
WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void wake_dying_workers(struct list_head *cull_list)
|
|
|
|
{
|
|
|
|
struct worker *worker, *tmp;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(worker, tmp, cull_list, entry) {
|
|
|
|
list_del_init(&worker->entry);
|
|
|
|
unbind_worker(worker);
|
|
|
|
/*
|
|
|
|
* If the worker was somehow already running, then it had to be
|
|
|
|
* in pool->idle_list when set_worker_dying() happened or we
|
|
|
|
* wouldn't have gotten here.
|
|
|
|
*
|
|
|
|
* Thus, the worker must either have observed the WORKER_DIE
|
|
|
|
* flag, or have set its state to TASK_IDLE. Either way, the
|
|
|
|
* below will be observed by the worker and is safe to do
|
|
|
|
* outside of pool->lock.
|
|
|
|
*/
|
|
|
|
wake_up_process(worker->task);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* set_worker_dying - Tag a worker for destruction
|
|
|
|
* @worker: worker to be destroyed
|
|
|
|
* @list: transfer worker away from its pool->idle_list and into list
|
|
|
|
*
|
|
|
|
* Tag @worker for destruction and adjust @pool stats accordingly. The worker
|
|
|
|
* should be idle.
|
|
|
|
*
|
|
|
|
* CONTEXT:
|
|
|
|
* raw_spin_lock_irq(pool->lock).
|
|
|
|
*/
|
|
|
|
static void set_worker_dying(struct worker *worker, struct list_head *list)
|
|
|
|
{
|
|
|
|
struct worker_pool *pool = worker->pool;
|
|
|
|
|
|
|
|
lockdep_assert_held(&pool->lock);
|
|
|
|
lockdep_assert_held(&wq_pool_attach_mutex);
|
|
|
|
|
|
|
|
/* sanity check frenzy */
|
|
|
|
if (WARN_ON(worker->current_work) ||
|
|
|
|
WARN_ON(!list_empty(&worker->scheduled)) ||
|
|
|
|
WARN_ON(!(worker->flags & WORKER_IDLE)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
pool->nr_workers--;
|
|
|
|
pool->nr_idle--;
|
|
|
|
|
|
|
|
worker->flags |= WORKER_DIE;
|
|
|
|
|
|
|
|
list_move(&worker->entry, list);
|
|
|
|
list_move(&worker->node, &pool->dying_workers);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* idle_worker_timeout - check if some idle workers can now be deleted.
|
|
|
|
* @t: The pool's idle_timer that just expired
|
|
|
|
*
|
|
|
|
* The timer is armed in worker_enter_idle(). Note that it isn't disarmed in
|
|
|
|
* worker_leave_idle(), as a worker flicking between idle and active while its
|
|
|
|
* pool is at the too_many_workers() tipping point would cause too much timer
|
|
|
|
* housekeeping overhead. Since IDLE_WORKER_TIMEOUT is long enough, we just let
|
|
|
|
* it expire and re-evaluate things from there.
|
|
|
|
*/
|
|
|
|
static void idle_worker_timeout(struct timer_list *t)
|
|
|
|
{
|
|
|
|
struct worker_pool *pool = from_timer(pool, t, idle_timer);
|
|
|
|
bool do_cull = false;
|
|
|
|
|
|
|
|
if (work_pending(&pool->idle_cull_work))
|
|
|
|
return;
|
|
|
|
|
|
|
|
raw_spin_lock_irq(&pool->lock);
|
|
|
|
|
|
|
|
if (too_many_workers(pool)) {
|
|
|
|
struct worker *worker;
|
|
|
|
unsigned long expires;
|
|
|
|
|
|
|
|
/* idle_list is kept in LIFO order, check the last one */
|
|
|
|
worker = list_entry(pool->idle_list.prev, struct worker, entry);
|
|
|
|
expires = worker->last_active + IDLE_WORKER_TIMEOUT;
|
|
|
|
do_cull = !time_before(jiffies, expires);
|
|
|
|
|
|
|
|
if (!do_cull)
|
|
|
|
mod_timer(&pool->idle_timer, expires);
|
|
|
|
}
|
|
|
|
raw_spin_unlock_irq(&pool->lock);
|
|
|
|
|
|
|
|
if (do_cull)
|
|
|
|
queue_work(system_unbound_wq, &pool->idle_cull_work);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* idle_cull_fn - cull workers that have been idle for too long.
|
|
|
|
* @work: the pool's work for handling these idle workers
|
|
|
|
*
|
|
|
|
* This goes through a pool's idle workers and gets rid of those that have been
|
|
|
|
* idle for at least IDLE_WORKER_TIMEOUT seconds.
|
|
|
|
*
|
|
|
|
* We don't want to disturb isolated CPUs because of a pcpu kworker being
|
|
|
|
* culled, so this also resets worker affinity. This requires a sleepable
|
|
|
|
* context, hence the split between timer callback and work item.
|
|
|
|
*/
|
|
|
|
static void idle_cull_fn(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct worker_pool *pool = container_of(work, struct worker_pool, idle_cull_work);
|
|
|
|
struct list_head cull_list;
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&cull_list);
|
|
|
|
/*
|
|
|
|
* Grabbing wq_pool_attach_mutex here ensures an already-running worker
|
|
|
|
* cannot proceed beyong worker_detach_from_pool() in its self-destruct
|
|
|
|
* path. This is required as a previously-preempted worker could run after
|
|
|
|
* set_worker_dying() has happened but before wake_dying_workers() did.
|
|
|
|
*/
|
|
|
|
mutex_lock(&wq_pool_attach_mutex);
|
|
|
|
raw_spin_lock_irq(&pool->lock);
|
|
|
|
|
|
|
|
while (too_many_workers(pool)) {
|
|
|
|
struct worker *worker;
|
|
|
|
unsigned long expires;
|
|
|
|
|
|
|
|
worker = list_entry(pool->idle_list.prev, struct worker, entry);
|
|
|
|
expires = worker->last_active + IDLE_WORKER_TIMEOUT;
|
|
|
|
|
|
|
|
if (time_before(jiffies, expires)) {
|
|
|
|
mod_timer(&pool->idle_timer, expires);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
set_worker_dying(worker, &cull_list);
|
|
|
|
}
|
|
|
|
|
|
|
|
raw_spin_unlock_irq(&pool->lock);
|
|
|
|
wake_dying_workers(&cull_list);
|
|
|
|
mutex_unlock(&wq_pool_attach_mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void send_mayday(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct pool_workqueue *pwq = get_work_pwq(work);
|
|
|
|
struct workqueue_struct *wq = pwq->wq;
|
|
|
|
|
|
|
|
lockdep_assert_held(&wq_mayday_lock);
|
|
|
|
|
|
|
|
if (!wq->rescuer)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* mayday mayday mayday */
|
|
|
|
if (list_empty(&pwq->mayday_node)) {
|
|
|
|
/*
|
|
|
|
* If @pwq is for an unbound wq, its base ref may be put at
|
|
|
|
* any time due to an attribute change. Pin @pwq until the
|
|
|
|
* rescuer is done with it.
|
|
|
|
*/
|
|
|
|
get_pwq(pwq);
|
|
|
|
list_add_tail(&pwq->mayday_node, &wq->maydays);
|
|
|
|
wake_up_process(wq->rescuer->task);
|
2023-10-24 12:59:35 +02:00
|
|
|
pwq->stats[PWQ_STAT_MAYDAY]++;
|
2023-08-30 17:31:07 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pool_mayday_timeout(struct timer_list *t)
|
|
|
|
{
|
|
|
|
struct worker_pool *pool = from_timer(pool, t, mayday_timer);
|
|
|
|
struct work_struct *work;
|
|
|
|
|
|
|
|
raw_spin_lock_irq(&pool->lock);
|
|
|
|
raw_spin_lock(&wq_mayday_lock); /* for wq->maydays */
|
|
|
|
|
|
|
|
if (need_to_create_worker(pool)) {
|
|
|
|
/*
|
|
|
|
* We've been trying to create a new worker but
|
|
|
|
* haven't been successful. We might be hitting an
|
|
|
|
* allocation deadlock. Send distress signals to
|
|
|
|
* rescuers.
|
|
|
|
*/
|
|
|
|
list_for_each_entry(work, &pool->worklist, entry)
|
|
|
|
send_mayday(work);
|
|
|
|
}
|
|
|
|
|
|
|
|
raw_spin_unlock(&wq_mayday_lock);
|
|
|
|
raw_spin_unlock_irq(&pool->lock);
|
|
|
|
|
|
|
|
mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* maybe_create_worker - create a new worker if necessary
|
|
|
|
* @pool: pool to create a new worker for
|
|
|
|
*
|
|
|
|
* Create a new worker for @pool if necessary. @pool is guaranteed to
|
|
|
|
* have at least one idle worker on return from this function. If
|
|
|
|
* creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
|
|
|
|
* sent to all rescuers with works scheduled on @pool to resolve
|
|
|
|
* possible allocation deadlock.
|
|
|
|
*
|
|
|
|
* On return, need_to_create_worker() is guaranteed to be %false and
|
|
|
|
* may_start_working() %true.
|
|
|
|
*
|
|
|
|
* LOCKING:
|
|
|
|
* raw_spin_lock_irq(pool->lock) which may be released and regrabbed
|
|
|
|
* multiple times. Does GFP_KERNEL allocations. Called only from
|
|
|
|
* manager.
|
|
|
|
*/
|
|
|
|
static void maybe_create_worker(struct worker_pool *pool)
|
|
|
|
__releases(&pool->lock)
|
|
|
|
__acquires(&pool->lock)
|
|
|
|
{
|
|
|
|
restart:
|
|
|
|
raw_spin_unlock_irq(&pool->lock);
|
|
|
|
|
|
|
|
/* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
|
|
|
|
mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
|
|
|
|
|
|
|
|
while (true) {
|
|
|
|
if (create_worker(pool) || !need_to_create_worker(pool))
|
|
|
|
break;
|
|
|
|
|
|
|
|
schedule_timeout_interruptible(CREATE_COOLDOWN);
|
|
|
|
|
|
|
|
if (!need_to_create_worker(pool))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
del_timer_sync(&pool->mayday_timer);
|
|
|
|
raw_spin_lock_irq(&pool->lock);
|
|
|
|
/*
|
|
|
|
* This is necessary even after a new worker was just successfully
|
|
|
|
* created as @pool->lock was dropped and the new worker might have
|
|
|
|
* already become busy.
|
|
|
|
*/
|
|
|
|
if (need_to_create_worker(pool))
|
|
|
|
goto restart;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* manage_workers - manage worker pool
|
|
|
|
* @worker: self
|
|
|
|
*
|
|
|
|
* Assume the manager role and manage the worker pool @worker belongs
|
|
|
|
* to. At any given time, there can be only zero or one manager per
|
|
|
|
* pool. The exclusion is handled automatically by this function.
|
|
|
|
*
|
|
|
|
* The caller can safely start processing works on false return. On
|
|
|
|
* true return, it's guaranteed that need_to_create_worker() is false
|
|
|
|
* and may_start_working() is true.
|
|
|
|
*
|
|
|
|
* CONTEXT:
|
|
|
|
* raw_spin_lock_irq(pool->lock) which may be released and regrabbed
|
|
|
|
* multiple times. Does GFP_KERNEL allocations.
|
|
|
|
*
|
|
|
|
* Return:
|
|
|
|
* %false if the pool doesn't need management and the caller can safely
|
|
|
|
* start processing works, %true if management function was performed and
|
|
|
|
* the conditions that the caller verified before calling the function may
|
|
|
|
* no longer be true.
|
|
|
|
*/
|
|
|
|
static bool manage_workers(struct worker *worker)
|
|
|
|
{
|
|
|
|
struct worker_pool *pool = worker->pool;
|
|
|
|
|
|
|
|
if (pool->flags & POOL_MANAGER_ACTIVE)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
pool->flags |= POOL_MANAGER_ACTIVE;
|
|
|
|
pool->manager = worker;
|
|
|
|
|
|
|
|
maybe_create_worker(pool);
|
|
|
|
|
|
|
|
pool->manager = NULL;
|
|
|
|
pool->flags &= ~POOL_MANAGER_ACTIVE;
|
|
|
|
rcuwait_wake_up(&manager_wait);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* process_one_work - process single work
|
|
|
|
* @worker: self
|
|
|
|
* @work: work to process
|
|
|
|
*
|
|
|
|
* Process @work. This function contains all the logics necessary to
|
|
|
|
* process a single work including synchronization against and
|
|
|
|
* interaction with other workers on the same cpu, queueing and
|
|
|
|
* flushing. As long as context requirement is met, any worker can
|
|
|
|
* call this function to process a work.
|
|
|
|
*
|
|
|
|
* CONTEXT:
|
|
|
|
* raw_spin_lock_irq(pool->lock) which is released and regrabbed.
|
|
|
|
*/
|
|
|
|
static void process_one_work(struct worker *worker, struct work_struct *work)
|
|
|
|
__releases(&pool->lock)
|
|
|
|
__acquires(&pool->lock)
|
|
|
|
{
|
|
|
|
struct pool_workqueue *pwq = get_work_pwq(work);
|
|
|
|
struct worker_pool *pool = worker->pool;
|
|
|
|
unsigned long work_data;
|
|
|
|
struct worker *collision;
|
|
|
|
#ifdef CONFIG_LOCKDEP
|
|
|
|
/*
|
|
|
|
* It is permissible to free the struct work_struct from
|
|
|
|
* inside the function that is called from it, this we need to
|
|
|
|
* take into account for lockdep too. To avoid bogus "held
|
|
|
|
* lock freed" warnings as well as problems when looking into
|
|
|
|
* work->lockdep_map, make a copy and use that here.
|
|
|
|
*/
|
|
|
|
struct lockdep_map lockdep_map;
|
|
|
|
|
|
|
|
lockdep_copy_map(&lockdep_map, &work->lockdep_map);
|
|
|
|
#endif
|
|
|
|
/* ensure we're on the correct CPU */
|
|
|
|
WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
|
|
|
|
raw_smp_processor_id() != pool->cpu);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A single work shouldn't be executed concurrently by
|
|
|
|
* multiple workers on a single cpu. Check whether anyone is
|
|
|
|
* already processing the work. If so, defer the work to the
|
|
|
|
* currently executing one.
|
|
|
|
*/
|
|
|
|
collision = find_worker_executing_work(pool, work);
|
|
|
|
if (unlikely(collision)) {
|
|
|
|
move_linked_works(work, &collision->scheduled, NULL);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* claim and dequeue */
|
|
|
|
debug_work_deactivate(work);
|
|
|
|
hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
|
|
|
|
worker->current_work = work;
|
|
|
|
worker->current_func = work->func;
|
|
|
|
worker->current_pwq = pwq;
|
2023-10-24 12:59:35 +02:00
|
|
|
#ifdef CONFIG_SCHED_ALT
|
|
|
|
worker->current_at = worker->task->sched_time;
|
|
|
|
#else
|
|
|
|
worker->current_at = worker->task->se.sum_exec_runtime;
|
|
|
|
#endif
|
2023-08-30 17:31:07 +02:00
|
|
|
work_data = *work_data_bits(work);
|
|
|
|
worker->current_color = get_work_color(work_data);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Record wq name for cmdline and debug reporting, may get
|
|
|
|
* overridden through set_worker_desc().
|
|
|
|
*/
|
|
|
|
strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN);
|
|
|
|
|
|
|
|
list_del_init(&work->entry);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* CPU intensive works don't participate in concurrency management.
|
|
|
|
* They're the scheduler's responsibility. This takes @worker out
|
|
|
|
* of concurrency management and the next code block will chain
|
|
|
|
* execution of the pending work items.
|
|
|
|
*/
|
2023-10-24 12:59:35 +02:00
|
|
|
if (unlikely(pwq->wq->flags & WQ_CPU_INTENSIVE))
|
2023-08-30 17:31:07 +02:00
|
|
|
worker_set_flags(worker, WORKER_CPU_INTENSIVE);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Wake up another worker if necessary. The condition is always
|
|
|
|
* false for normal per-cpu workers since nr_running would always
|
|
|
|
* be >= 1 at this point. This is used to chain execution of the
|
|
|
|
* pending work items for WORKER_NOT_RUNNING workers such as the
|
|
|
|
* UNBOUND and CPU_INTENSIVE ones.
|
|
|
|
*/
|
|
|
|
if (need_more_worker(pool))
|
|
|
|
wake_up_worker(pool);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Record the last pool and clear PENDING which should be the last
|
|
|
|
* update to @work. Also, do this inside @pool->lock so that
|
|
|
|
* PENDING and queued state changes happen together while IRQ is
|
|
|
|
* disabled.
|
|
|
|
*/
|
|
|
|
set_work_pool_and_clear_pending(work, pool->id);
|
|
|
|
|
2023-10-24 12:59:35 +02:00
|
|
|
pwq->stats[PWQ_STAT_STARTED]++;
|
2023-08-30 17:31:07 +02:00
|
|
|
raw_spin_unlock_irq(&pool->lock);
|
|
|
|
|
|
|
|
lock_map_acquire(&pwq->wq->lockdep_map);
|
|
|
|
lock_map_acquire(&lockdep_map);
|
|
|
|
/*
|
|
|
|
* Strictly speaking we should mark the invariant state without holding
|
|
|
|
* any locks, that is, before these two lock_map_acquire()'s.
|
|
|
|
*
|
|
|
|
* However, that would result in:
|
|
|
|
*
|
|
|
|
* A(W1)
|
|
|
|
* WFC(C)
|
|
|
|
* A(W1)
|
|
|
|
* C(C)
|
|
|
|
*
|
|
|
|
* Which would create W1->C->W1 dependencies, even though there is no
|
|
|
|
* actual deadlock possible. There are two solutions, using a
|
|
|
|
* read-recursive acquire on the work(queue) 'locks', but this will then
|
|
|
|
* hit the lockdep limitation on recursive locks, or simply discard
|
|
|
|
* these locks.
|
|
|
|
*
|
|
|
|
* AFAICT there is no possible deadlock scenario between the
|
|
|
|
* flush_work() and complete() primitives (except for single-threaded
|
|
|
|
* workqueues), so hiding them isn't a problem.
|
|
|
|
*/
|
|
|
|
lockdep_invariant_state(true);
|
|
|
|
trace_workqueue_execute_start(work);
|
|
|
|
worker->current_func(work);
|
|
|
|
/*
|
|
|
|
* While we must be careful to not use "work" after this, the trace
|
|
|
|
* point will only record its address.
|
|
|
|
*/
|
|
|
|
trace_workqueue_execute_end(work, worker->current_func);
|
2023-10-24 12:59:35 +02:00
|
|
|
pwq->stats[PWQ_STAT_COMPLETED]++;
|
2023-08-30 17:31:07 +02:00
|
|
|
lock_map_release(&lockdep_map);
|
|
|
|
lock_map_release(&pwq->wq->lockdep_map);
|
|
|
|
|
|
|
|
if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
|
|
|
|
pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
|
|
|
|
" last function: %ps\n",
|
|
|
|
current->comm, preempt_count(), task_pid_nr(current),
|
|
|
|
worker->current_func);
|
|
|
|
debug_show_held_locks(current);
|
|
|
|
dump_stack();
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The following prevents a kworker from hogging CPU on !PREEMPTION
|
|
|
|
* kernels, where a requeueing work item waiting for something to
|
|
|
|
* happen could deadlock with stop_machine as such work item could
|
|
|
|
* indefinitely requeue itself while all other CPUs are trapped in
|
|
|
|
* stop_machine. At the same time, report a quiescent RCU state so
|
|
|
|
* the same condition doesn't freeze RCU.
|
|
|
|
*/
|
|
|
|
cond_resched();
|
|
|
|
|
|
|
|
raw_spin_lock_irq(&pool->lock);
|
|
|
|
|
2023-10-24 12:59:35 +02:00
|
|
|
/*
|
|
|
|
* In addition to %WQ_CPU_INTENSIVE, @worker may also have been marked
|
|
|
|
* CPU intensive by wq_worker_tick() if @work hogged CPU longer than
|
|
|
|
* wq_cpu_intensive_thresh_us. Clear it.
|
|
|
|
*/
|
|
|
|
worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
|
2023-08-30 17:31:07 +02:00
|
|
|
|
|
|
|
/* tag the worker for identification in schedule() */
|
|
|
|
worker->last_func = worker->current_func;
|
|
|
|
|
|
|
|
/* we're done with it, release */
|
|
|
|
hash_del(&worker->hentry);
|
|
|
|
worker->current_work = NULL;
|
|
|
|
worker->current_func = NULL;
|
|
|
|
worker->current_pwq = NULL;
|
|
|
|
worker->current_color = INT_MAX;
|
|
|
|
pwq_dec_nr_in_flight(pwq, work_data);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* process_scheduled_works - process scheduled works
|
|
|
|
* @worker: self
|
|
|
|
*
|
|
|
|
* Process all scheduled works. Please note that the scheduled list
|
|
|
|
* may change while processing a work, so this function repeatedly
|
|
|
|
* fetches a work from the top and executes it.
|
|
|
|
*
|
|
|
|
* CONTEXT:
|
|
|
|
* raw_spin_lock_irq(pool->lock) which may be released and regrabbed
|
|
|
|
* multiple times.
|
|
|
|
*/
|
|
|
|
static void process_scheduled_works(struct worker *worker)
|
|
|
|
{
|
|
|
|
while (!list_empty(&worker->scheduled)) {
|
|
|
|
struct work_struct *work = list_first_entry(&worker->scheduled,
|
|
|
|
struct work_struct, entry);
|
|
|
|
process_one_work(worker, work);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void set_pf_worker(bool val)
|
|
|
|
{
|
|
|
|
mutex_lock(&wq_pool_attach_mutex);
|
|
|
|
if (val)
|
|
|
|
current->flags |= PF_WQ_WORKER;
|
|
|
|
else
|
|
|
|
current->flags &= ~PF_WQ_WORKER;
|
|
|
|
mutex_unlock(&wq_pool_attach_mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* worker_thread - the worker thread function
|
|
|
|
* @__worker: self
|
|
|
|
*
|
|
|
|
* The worker thread function. All workers belong to a worker_pool -
|
|
|
|
* either a per-cpu one or dynamic unbound one. These workers process all
|
|
|
|
* work items regardless of their specific target workqueue. The only
|
|
|
|
* exception is work items which belong to workqueues with a rescuer which
|
|
|
|
* will be explained in rescuer_thread().
|
|
|
|
*
|
|
|
|
* Return: 0
|
|
|
|
*/
|
|
|
|
static int worker_thread(void *__worker)
|
|
|
|
{
|
|
|
|
struct worker *worker = __worker;
|
|
|
|
struct worker_pool *pool = worker->pool;
|
|
|
|
|
|
|
|
/* tell the scheduler that this is a workqueue worker */
|
|
|
|
set_pf_worker(true);
|
|
|
|
woke_up:
|
|
|
|
raw_spin_lock_irq(&pool->lock);
|
|
|
|
|
|
|
|
/* am I supposed to die? */
|
|
|
|
if (unlikely(worker->flags & WORKER_DIE)) {
|
|
|
|
raw_spin_unlock_irq(&pool->lock);
|
|
|
|
set_pf_worker(false);
|
|
|
|
|
|
|
|
set_task_comm(worker->task, "kworker/dying");
|
|
|
|
ida_free(&pool->worker_ida, worker->id);
|
|
|
|
worker_detach_from_pool(worker);
|
|
|
|
WARN_ON_ONCE(!list_empty(&worker->entry));
|
|
|
|
kfree(worker);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
worker_leave_idle(worker);
|
|
|
|
recheck:
|
|
|
|
/* no more worker necessary? */
|
|
|
|
if (!need_more_worker(pool))
|
|
|
|
goto sleep;
|
|
|
|
|
|
|
|
/* do we need to manage? */
|
|
|
|
if (unlikely(!may_start_working(pool)) && manage_workers(worker))
|
|
|
|
goto recheck;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ->scheduled list can only be filled while a worker is
|
|
|
|
* preparing to process a work or actually processing it.
|
|
|
|
* Make sure nobody diddled with it while I was sleeping.
|
|
|
|
*/
|
|
|
|
WARN_ON_ONCE(!list_empty(&worker->scheduled));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Finish PREP stage. We're guaranteed to have at least one idle
|
|
|
|
* worker or that someone else has already assumed the manager
|
|
|
|
* role. This is where @worker starts participating in concurrency
|
|
|
|
* management if applicable and concurrency management is restored
|
|
|
|
* after being rebound. See rebind_workers() for details.
|
|
|
|
*/
|
|
|
|
worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND);
|
|
|
|
|
|
|
|
do {
|
|
|
|
struct work_struct *work =
|
|
|
|
list_first_entry(&pool->worklist,
|
|
|
|
struct work_struct, entry);
|
|
|
|
|
|
|
|
pool->watchdog_ts = jiffies;
|
|
|
|
|
|
|
|
if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
|
|
|
|
/* optimization path, not strictly necessary */
|
|
|
|
process_one_work(worker, work);
|
|
|
|
if (unlikely(!list_empty(&worker->scheduled)))
|
|
|
|
process_scheduled_works(worker);
|
|
|
|
} else {
|
|
|
|
move_linked_works(work, &worker->scheduled, NULL);
|
|
|
|
process_scheduled_works(worker);
|
|
|
|
}
|
|
|
|
} while (keep_working(pool));
|
|
|
|
|
|
|
|
worker_set_flags(worker, WORKER_PREP);
|
|
|
|
sleep:
|
|
|
|
/*
|
|
|
|
* pool->lock is held and there's no work to process and no need to
|
|
|
|
* manage, sleep. Workers are woken up only while holding
|
|
|
|
* pool->lock or from local cpu, so setting the current state
|
|
|
|
* before releasing pool->lock is enough to prevent losing any
|
|
|
|
* event.
|
|
|
|
*/
|
|
|
|
worker_enter_idle(worker);
|
|
|
|
__set_current_state(TASK_IDLE);
|
|
|
|
raw_spin_unlock_irq(&pool->lock);
|
|
|
|
schedule();
|
|
|
|
goto woke_up;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* rescuer_thread - the rescuer thread function
|
|
|
|
* @__rescuer: self
|
|
|
|
*
|
|
|
|
* Workqueue rescuer thread function. There's one rescuer for each
|
|
|
|
* workqueue which has WQ_MEM_RECLAIM set.
|
|
|
|
*
|
|
|
|
* Regular work processing on a pool may block trying to create a new
|
|
|
|
* worker which uses GFP_KERNEL allocation which has slight chance of
|
|
|
|
* developing into deadlock if some works currently on the same queue
|
|
|
|
* need to be processed to satisfy the GFP_KERNEL allocation. This is
|
|
|
|
* the problem rescuer solves.
|
|
|
|
*
|
|
|
|
* When such condition is possible, the pool summons rescuers of all
|
|
|
|
* workqueues which have works queued on the pool and let them process
|
|
|
|
* those works so that forward progress can be guaranteed.
|
|
|
|
*
|
|
|
|
* This should happen rarely.
|
|
|
|
*
|
|
|
|
* Return: 0
|
|
|
|
*/
|
|
|
|
static int rescuer_thread(void *__rescuer)
|
|
|
|
{
|
|
|
|
struct worker *rescuer = __rescuer;
|
|
|
|
struct workqueue_struct *wq = rescuer->rescue_wq;
|
|
|
|
struct list_head *scheduled = &rescuer->scheduled;
|
|
|
|
bool should_stop;
|
|
|
|
|
|
|
|
set_user_nice(current, RESCUER_NICE_LEVEL);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Mark rescuer as worker too. As WORKER_PREP is never cleared, it
|
|
|
|
* doesn't participate in concurrency management.
|
|
|
|
*/
|
|
|
|
set_pf_worker(true);
|
|
|
|
repeat:
|
|
|
|
set_current_state(TASK_IDLE);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* By the time the rescuer is requested to stop, the workqueue
|
|
|
|
* shouldn't have any work pending, but @wq->maydays may still have
|
|
|
|
* pwq(s) queued. This can happen by non-rescuer workers consuming
|
|
|
|
* all the work items before the rescuer got to them. Go through
|
|
|
|
* @wq->maydays processing before acting on should_stop so that the
|
|
|
|
* list is always empty on exit.
|
|
|
|
*/
|
|
|
|
should_stop = kthread_should_stop();
|
|
|
|
|
|
|
|
/* see whether any pwq is asking for help */
|
|
|
|
raw_spin_lock_irq(&wq_mayday_lock);
|
|
|
|
|
|
|
|
while (!list_empty(&wq->maydays)) {
|
|
|
|
struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
|
|
|
|
struct pool_workqueue, mayday_node);
|
|
|
|
struct worker_pool *pool = pwq->pool;
|
|
|
|
struct work_struct *work, *n;
|
|
|
|
bool first = true;
|
|
|
|
|
|
|
|
__set_current_state(TASK_RUNNING);
|
|
|
|
list_del_init(&pwq->mayday_node);
|
|
|
|
|
|
|
|
raw_spin_unlock_irq(&wq_mayday_lock);
|
|
|
|
|
|
|
|
worker_attach_to_pool(rescuer, pool);
|
|
|
|
|
|
|
|
raw_spin_lock_irq(&pool->lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Slurp in all works issued via this workqueue and
|
|
|
|
* process'em.
|
|
|
|
*/
|
|
|
|
WARN_ON_ONCE(!list_empty(scheduled));
|
|
|
|
list_for_each_entry_safe(work, n, &pool->worklist, entry) {
|
|
|
|
if (get_work_pwq(work) == pwq) {
|
|
|
|
if (first)
|
|
|
|
pool->watchdog_ts = jiffies;
|
|
|
|
move_linked_works(work, scheduled, &n);
|
2023-10-24 12:59:35 +02:00
|
|
|
pwq->stats[PWQ_STAT_RESCUED]++;
|
2023-08-30 17:31:07 +02:00
|
|
|
}
|
|
|
|
first = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!list_empty(scheduled)) {
|
|
|
|
process_scheduled_works(rescuer);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The above execution of rescued work items could
|
|
|
|
* have created more to rescue through
|
|
|
|
* pwq_activate_first_inactive() or chained
|
|
|
|
* queueing. Let's put @pwq back on mayday list so
|
|
|
|
* that such back-to-back work items, which may be
|
|
|
|
* being used to relieve memory pressure, don't
|
|
|
|
* incur MAYDAY_INTERVAL delay inbetween.
|
|
|
|
*/
|
|
|
|
if (pwq->nr_active && need_to_create_worker(pool)) {
|
|
|
|
raw_spin_lock(&wq_mayday_lock);
|
|
|
|
/*
|
|
|
|
* Queue iff we aren't racing destruction
|
|
|
|
* and somebody else hasn't queued it already.
|
|
|
|
*/
|
|
|
|
if (wq->rescuer && list_empty(&pwq->mayday_node)) {
|
|
|
|
get_pwq(pwq);
|
|
|
|
list_add_tail(&pwq->mayday_node, &wq->maydays);
|
|
|
|
}
|
|
|
|
raw_spin_unlock(&wq_mayday_lock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Put the reference grabbed by send_mayday(). @pool won't
|
|
|
|
* go away while we're still attached to it.
|
|
|
|
*/
|
|
|
|
put_pwq(pwq);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Leave this pool. If need_more_worker() is %true, notify a
|
|
|
|
* regular worker; otherwise, we end up with 0 concurrency
|
|
|
|
* and stalling the execution.
|
|
|
|
*/
|
|
|
|
if (need_more_worker(pool))
|
|
|
|
wake_up_worker(pool);
|
|
|
|
|
|
|
|
raw_spin_unlock_irq(&pool->lock);
|
|
|
|
|
|
|
|
worker_detach_from_pool(rescuer);
|
|
|
|
|
|
|
|
raw_spin_lock_irq(&wq_mayday_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
raw_spin_unlock_irq(&wq_mayday_lock);
|
|
|
|
|
|
|
|
if (should_stop) {
|
|
|
|
__set_current_state(TASK_RUNNING);
|
|
|
|
set_pf_worker(false);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* rescuers should never participate in concurrency management */
|
|
|
|
WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
|
|
|
|
schedule();
|
|
|
|
goto repeat;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* check_flush_dependency - check for flush dependency sanity
|
|
|
|
* @target_wq: workqueue being flushed
|
|
|
|
* @target_work: work item being flushed (NULL for workqueue flushes)
|
|
|
|
*
|
|
|
|
* %current is trying to flush the whole @target_wq or @target_work on it.
|
|
|
|
* If @target_wq doesn't have %WQ_MEM_RECLAIM, verify that %current is not
|
|
|
|
* reclaiming memory or running on a workqueue which doesn't have
|
|
|
|
* %WQ_MEM_RECLAIM as that can break forward-progress guarantee leading to
|
|
|
|
* a deadlock.
|
|
|
|
*/
|
|
|
|
static void check_flush_dependency(struct workqueue_struct *target_wq,
|
|
|
|
struct work_struct *target_work)
|
|
|
|
{
|
|
|
|
work_func_t target_func = target_work ? target_work->func : NULL;
|
|
|
|
struct worker *worker;
|
|
|
|
|
|
|
|
if (target_wq->flags & WQ_MEM_RECLAIM)
|
|
|
|
return;
|
|
|
|
|
|
|
|
worker = current_wq_worker();
|
|
|
|
|
|
|
|
WARN_ONCE(current->flags & PF_MEMALLOC,
|
|
|
|
"workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%ps",
|
|
|
|
current->pid, current->comm, target_wq->name, target_func);
|
|
|
|
WARN_ONCE(worker && ((worker->current_pwq->wq->flags &
|
|
|
|
(WQ_MEM_RECLAIM | __WQ_LEGACY)) == WQ_MEM_RECLAIM),
|
|
|
|
"workqueue: WQ_MEM_RECLAIM %s:%ps is flushing !WQ_MEM_RECLAIM %s:%ps",
|
|
|
|
worker->current_pwq->wq->name, worker->current_func,
|
|
|
|
target_wq->name, target_func);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct wq_barrier {
|
|
|
|
struct work_struct work;
|
|
|
|
struct completion done;
|
|
|
|
struct task_struct *task; /* purely informational */
|
|
|
|
};
|
|
|
|
|
|
|
|
static void wq_barrier_func(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
|
|
|
|
complete(&barr->done);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* insert_wq_barrier - insert a barrier work
|
|
|
|
* @pwq: pwq to insert barrier into
|
|
|
|
* @barr: wq_barrier to insert
|
|
|
|
* @target: target work to attach @barr to
|
|
|
|
* @worker: worker currently executing @target, NULL if @target is not executing
|
|
|
|
*
|
|
|
|
* @barr is linked to @target such that @barr is completed only after
|
|
|
|
* @target finishes execution. Please note that the ordering
|
|
|
|
* guarantee is observed only with respect to @target and on the local
|
|
|
|
* cpu.
|
|
|
|
*
|
|
|
|
* Currently, a queued barrier can't be canceled. This is because
|
|
|
|
* try_to_grab_pending() can't determine whether the work to be
|
|
|
|
* grabbed is at the head of the queue and thus can't clear LINKED
|
|
|
|
* flag of the previous work while there must be a valid next work
|
|
|
|
* after a work with LINKED flag set.
|
|
|
|
*
|
|
|
|
* Note that when @worker is non-NULL, @target may be modified
|
|
|
|
* underneath us, so we can't reliably determine pwq from @target.
|
|
|
|
*
|
|
|
|
* CONTEXT:
|
|
|
|
* raw_spin_lock_irq(pool->lock).
|
|
|
|
*/
|
|
|
|
static void insert_wq_barrier(struct pool_workqueue *pwq,
|
|
|
|
struct wq_barrier *barr,
|
|
|
|
struct work_struct *target, struct worker *worker)
|
|
|
|
{
|
|
|
|
unsigned int work_flags = 0;
|
|
|
|
unsigned int work_color;
|
|
|
|
struct list_head *head;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* debugobject calls are safe here even with pool->lock locked
|
|
|
|
* as we know for sure that this will not trigger any of the
|
|
|
|
* checks and call back into the fixup functions where we
|
|
|
|
* might deadlock.
|
|
|
|
*/
|
|
|
|
INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
|
|
|
|
__set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
|
|
|
|
|
|
|
|
init_completion_map(&barr->done, &target->lockdep_map);
|
|
|
|
|
|
|
|
barr->task = current;
|
|
|
|
|
|
|
|
/* The barrier work item does not participate in pwq->nr_active. */
|
|
|
|
work_flags |= WORK_STRUCT_INACTIVE;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If @target is currently being executed, schedule the
|
|
|
|
* barrier to the worker; otherwise, put it after @target.
|
|
|
|
*/
|
|
|
|
if (worker) {
|
|
|
|
head = worker->scheduled.next;
|
|
|
|
work_color = worker->current_color;
|
|
|
|
} else {
|
|
|
|
unsigned long *bits = work_data_bits(target);
|
|
|
|
|
|
|
|
head = target->entry.next;
|
|
|
|
/* there can already be other linked works, inherit and set */
|
|
|
|
work_flags |= *bits & WORK_STRUCT_LINKED;
|
|
|
|
work_color = get_work_color(*bits);
|
|
|
|
__set_bit(WORK_STRUCT_LINKED_BIT, bits);
|
|
|
|
}
|
|
|
|
|
|
|
|
pwq->nr_in_flight[work_color]++;
|
|
|
|
work_flags |= work_color_to_flags(work_color);
|
|
|
|
|
|
|
|
debug_work_activate(&barr->work);
|
|
|
|
insert_work(pwq, &barr->work, head, work_flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing
|
|
|
|
* @wq: workqueue being flushed
|
|
|
|
* @flush_color: new flush color, < 0 for no-op
|
|
|
|
* @work_color: new work color, < 0 for no-op
|
|
|
|
*
|
|
|
|
* Prepare pwqs for workqueue flushing.
|
|
|
|
*
|
|
|
|
* If @flush_color is non-negative, flush_color on all pwqs should be
|
|
|
|
* -1. If no pwq has in-flight commands at the specified color, all
|
|
|
|
* pwq->flush_color's stay at -1 and %false is returned. If any pwq
|
|
|
|
* has in flight commands, its pwq->flush_color is set to
|
|
|
|
* @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq
|
|
|
|
* wakeup logic is armed and %true is returned.
|
|
|
|
*
|
|
|
|
* The caller should have initialized @wq->first_flusher prior to
|
|
|
|
* calling this function with non-negative @flush_color. If
|
|
|
|
* @flush_color is negative, no flush color update is done and %false
|
|
|
|
* is returned.
|
|
|
|
*
|
|
|
|
* If @work_color is non-negative, all pwqs should have the same
|
|
|
|
* work_color which is previous to @work_color and all will be
|
|
|
|
* advanced to @work_color.
|
|
|
|
*
|
|
|
|
* CONTEXT:
|
|
|
|
* mutex_lock(wq->mutex).
|
|
|
|
*
|
|
|
|
* Return:
|
|
|
|
* %true if @flush_color >= 0 and there's something to flush. %false
|
|
|
|
* otherwise.
|
|
|
|
*/
|
|
|
|
static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
|
|
|
|
int flush_color, int work_color)
|
|
|
|
{
|
|
|
|
bool wait = false;
|
|
|
|
struct pool_workqueue *pwq;
|
|
|
|
|
|
|
|
if (flush_color >= 0) {
|
|
|
|
WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush));
|
|
|
|
atomic_set(&wq->nr_pwqs_to_flush, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
for_each_pwq(pwq, wq) {
|
|
|
|
struct worker_pool *pool = pwq->pool;
|
|
|
|
|
|
|
|
raw_spin_lock_irq(&pool->lock);
|
|
|
|
|
|
|
|
if (flush_color >= 0) {
|
|
|
|
WARN_ON_ONCE(pwq->flush_color != -1);
|
|
|
|
|
|
|
|
if (pwq->nr_in_flight[flush_color]) {
|
|
|
|
pwq->flush_color = flush_color;
|
|
|
|
atomic_inc(&wq->nr_pwqs_to_flush);
|
|
|
|
wait = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (work_color >= 0) {
|
|
|
|
WARN_ON_ONCE(work_color != work_next_color(pwq->work_color));
|
|
|
|
pwq->work_color = work_color;
|
|
|
|
}
|
|
|
|
|
|
|
|
raw_spin_unlock_irq(&pool->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
|
|
|
|
complete(&wq->first_flusher->done);
|
|
|
|
|
|
|
|
return wait;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* __flush_workqueue - ensure that any scheduled work has run to completion.
|
|
|
|
* @wq: workqueue to flush
|
|
|
|
*
|
|
|
|
* This function sleeps until all work items which were queued on entry
|
|
|
|
* have finished execution, but it is not livelocked by new incoming ones.
|
|
|
|
*/
|
|
|
|
void __flush_workqueue(struct workqueue_struct *wq)
|
|
|
|
{
|
|
|
|
struct wq_flusher this_flusher = {
|
|
|
|
.list = LIST_HEAD_INIT(this_flusher.list),
|
|
|
|
.flush_color = -1,
|
|
|
|
.done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map),
|
|
|
|
};
|
|
|
|
int next_color;
|
|
|
|
|
|
|
|
if (WARN_ON(!wq_online))
|
|
|
|
return;
|
|
|
|
|
|
|
|
lock_map_acquire(&wq->lockdep_map);
|
|
|
|
lock_map_release(&wq->lockdep_map);
|
|
|
|
|
|
|
|
mutex_lock(&wq->mutex);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Start-to-wait phase
|
|
|
|
*/
|
|
|
|
next_color = work_next_color(wq->work_color);
|
|
|
|
|
|
|
|
if (next_color != wq->flush_color) {
|
|
|
|
/*
|
|
|
|
* Color space is not full. The current work_color
|
|
|
|
* becomes our flush_color and work_color is advanced
|
|
|
|
* by one.
|
|
|
|
*/
|
|
|
|
WARN_ON_ONCE(!list_empty(&wq->flusher_overflow));
|
|
|
|
this_flusher.flush_color = wq->work_color;
|
|
|
|
wq->work_color = next_color;
|
|
|
|
|
|
|
|
if (!wq->first_flusher) {
|
|
|
|
/* no flush in progress, become the first flusher */
|
|
|
|
WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
|
|
|
|
|
|
|
|
wq->first_flusher = &this_flusher;
|
|
|
|
|
|
|
|
if (!flush_workqueue_prep_pwqs(wq, wq->flush_color,
|
|
|
|
wq->work_color)) {
|
|
|
|
/* nothing to flush, done */
|
|
|
|
wq->flush_color = next_color;
|
|
|
|
wq->first_flusher = NULL;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* wait in queue */
|
|
|
|
WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color);
|
|
|
|
list_add_tail(&this_flusher.list, &wq->flusher_queue);
|
|
|
|
flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Oops, color space is full, wait on overflow queue.
|
|
|
|
* The next flush completion will assign us
|
|
|
|
* flush_color and transfer to flusher_queue.
|
|
|
|
*/
|
|
|
|
list_add_tail(&this_flusher.list, &wq->flusher_overflow);
|
|
|
|
}
|
|
|
|
|
|
|
|
check_flush_dependency(wq, NULL);
|
|
|
|
|
|
|
|
mutex_unlock(&wq->mutex);
|
|
|
|
|
|
|
|
wait_for_completion(&this_flusher.done);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Wake-up-and-cascade phase
|
|
|
|
*
|
|
|
|
* First flushers are responsible for cascading flushes and
|
|
|
|
* handling overflow. Non-first flushers can simply return.
|
|
|
|
*/
|
|
|
|
if (READ_ONCE(wq->first_flusher) != &this_flusher)
|
|
|
|
return;
|
|
|
|
|
|
|
|
mutex_lock(&wq->mutex);
|
|
|
|
|
|
|
|
/* we might have raced, check again with mutex held */
|
|
|
|
if (wq->first_flusher != &this_flusher)
|
|
|
|
goto out_unlock;
|
|
|
|
|
|
|
|
WRITE_ONCE(wq->first_flusher, NULL);
|
|
|
|
|
|
|
|
WARN_ON_ONCE(!list_empty(&this_flusher.list));
|
|
|
|
WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
|
|
|
|
|
|
|
|
while (true) {
|
|
|
|
struct wq_flusher *next, *tmp;
|
|
|
|
|
|
|
|
/* complete all the flushers sharing the current flush color */
|
|
|
|
list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
|
|
|
|
if (next->flush_color != wq->flush_color)
|
|
|
|
break;
|
|
|
|
list_del_init(&next->list);
|
|
|
|
complete(&next->done);
|
|
|
|
}
|
|
|
|
|
|
|
|
WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) &&
|
|
|
|
wq->flush_color != work_next_color(wq->work_color));
|
|
|
|
|
|
|
|
/* this flush_color is finished, advance by one */
|
|
|
|
wq->flush_color = work_next_color(wq->flush_color);
|
|
|
|
|
|
|
|
/* one color has been freed, handle overflow queue */
|
|
|
|
if (!list_empty(&wq->flusher_overflow)) {
|
|
|
|
/*
|
|
|
|
* Assign the same color to all overflowed
|
|
|
|
* flushers, advance work_color and append to
|
|
|
|
* flusher_queue. This is the start-to-wait
|
|
|
|
* phase for these overflowed flushers.
|
|
|
|
*/
|
|
|
|
list_for_each_entry(tmp, &wq->flusher_overflow, list)
|
|
|
|
tmp->flush_color = wq->work_color;
|
|
|
|
|
|
|
|
wq->work_color = work_next_color(wq->work_color);
|
|
|
|
|
|
|
|
list_splice_tail_init(&wq->flusher_overflow,
|
|
|
|
&wq->flusher_queue);
|
|
|
|
flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (list_empty(&wq->flusher_queue)) {
|
|
|
|
WARN_ON_ONCE(wq->flush_color != wq->work_color);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Need to flush more colors. Make the next flusher
|
|
|
|
* the new first flusher and arm pwqs.
|
|
|
|
*/
|
|
|
|
WARN_ON_ONCE(wq->flush_color == wq->work_color);
|
|
|
|
WARN_ON_ONCE(wq->flush_color != next->flush_color);
|
|
|
|
|
|
|
|
list_del_init(&next->list);
|
|
|
|
wq->first_flusher = next;
|
|
|
|
|
|
|
|
if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1))
|
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Meh... this color is already done, clear first
|
|
|
|
* flusher and repeat cascading.
|
|
|
|
*/
|
|
|
|
wq->first_flusher = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
out_unlock:
|
|
|
|
mutex_unlock(&wq->mutex);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(__flush_workqueue);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* drain_workqueue - drain a workqueue
|
|
|
|
* @wq: workqueue to drain
|
|
|
|
*
|
|
|
|
* Wait until the workqueue becomes empty. While draining is in progress,
|
|
|
|
* only chain queueing is allowed. IOW, only currently pending or running
|
|
|
|
* work items on @wq can queue further work items on it. @wq is flushed
|
|
|
|
* repeatedly until it becomes empty. The number of flushing is determined
|
|
|
|
* by the depth of chaining and should be relatively short. Whine if it
|
|
|
|
* takes too long.
|
|
|
|
*/
|
|
|
|
void drain_workqueue(struct workqueue_struct *wq)
|
|
|
|
{
|
|
|
|
unsigned int flush_cnt = 0;
|
|
|
|
struct pool_workqueue *pwq;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* __queue_work() needs to test whether there are drainers, is much
|
|
|
|
* hotter than drain_workqueue() and already looks at @wq->flags.
|
|
|
|
* Use __WQ_DRAINING so that queue doesn't have to check nr_drainers.
|
|
|
|
*/
|
|
|
|
mutex_lock(&wq->mutex);
|
|
|
|
if (!wq->nr_drainers++)
|
|
|
|
wq->flags |= __WQ_DRAINING;
|
|
|
|
mutex_unlock(&wq->mutex);
|
|
|
|
reflush:
|
|
|
|
__flush_workqueue(wq);
|
|
|
|
|
|
|
|
mutex_lock(&wq->mutex);
|
|
|
|
|
|
|
|
for_each_pwq(pwq, wq) {
|
|
|
|
bool drained;
|
|
|
|
|
|
|
|
raw_spin_lock_irq(&pwq->pool->lock);
|
|
|
|
drained = !pwq->nr_active && list_empty(&pwq->inactive_works);
|
|
|
|
raw_spin_unlock_irq(&pwq->pool->lock);
|
|
|
|
|
|
|
|
if (drained)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (++flush_cnt == 10 ||
|
|
|
|
(flush_cnt % 100 == 0 && flush_cnt <= 1000))
|
|
|
|
pr_warn("workqueue %s: %s() isn't complete after %u tries\n",
|
|
|
|
wq->name, __func__, flush_cnt);
|
|
|
|
|
|
|
|
mutex_unlock(&wq->mutex);
|
|
|
|
goto reflush;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!--wq->nr_drainers)
|
|
|
|
wq->flags &= ~__WQ_DRAINING;
|
|
|
|
mutex_unlock(&wq->mutex);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(drain_workqueue);
|
|
|
|
|
|
|
|
static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
|
|
|
|
bool from_cancel)
|
|
|
|
{
|
|
|
|
struct worker *worker = NULL;
|
|
|
|
struct worker_pool *pool;
|
|
|
|
struct pool_workqueue *pwq;
|
|
|
|
|
|
|
|
might_sleep();
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
pool = get_work_pool(work);
|
|
|
|
if (!pool) {
|
|
|
|
rcu_read_unlock();
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
raw_spin_lock_irq(&pool->lock);
|
|
|
|
/* see the comment in try_to_grab_pending() with the same code */
|
|
|
|
pwq = get_work_pwq(work);
|
|
|
|
if (pwq) {
|
|
|
|
if (unlikely(pwq->pool != pool))
|
|
|
|
goto already_gone;
|
|
|
|
} else {
|
|
|
|
worker = find_worker_executing_work(pool, work);
|
|
|
|
if (!worker)
|
|
|
|
goto already_gone;
|
|
|
|
pwq = worker->current_pwq;
|
|
|
|
}
|
|
|
|
|
|
|
|
check_flush_dependency(pwq->wq, work);
|
|
|
|
|
|
|
|
insert_wq_barrier(pwq, barr, work, worker);
|
|
|
|
raw_spin_unlock_irq(&pool->lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Force a lock recursion deadlock when using flush_work() inside a
|
|
|
|
* single-threaded or rescuer equipped workqueue.
|
|
|
|
*
|
|
|
|
* For single threaded workqueues the deadlock happens when the work
|
|
|
|
* is after the work issuing the flush_work(). For rescuer equipped
|
|
|
|
* workqueues the deadlock happens when the rescuer stalls, blocking
|
|
|
|
* forward progress.
|
|
|
|
*/
|
|
|
|
if (!from_cancel &&
|
|
|
|
(pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) {
|
|
|
|
lock_map_acquire(&pwq->wq->lockdep_map);
|
|
|
|
lock_map_release(&pwq->wq->lockdep_map);
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
return true;
|
|
|
|
already_gone:
|
|
|
|
raw_spin_unlock_irq(&pool->lock);
|
|
|
|
rcu_read_unlock();
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool __flush_work(struct work_struct *work, bool from_cancel)
|
|
|
|
{
|
|
|
|
struct wq_barrier barr;
|
|
|
|
|
|
|
|
if (WARN_ON(!wq_online))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (WARN_ON(!work->func))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
lock_map_acquire(&work->lockdep_map);
|
|
|
|
lock_map_release(&work->lockdep_map);
|
|
|
|
|
|
|
|
if (start_flush_work(work, &barr, from_cancel)) {
|
|
|
|
wait_for_completion(&barr.done);
|
|
|
|
destroy_work_on_stack(&barr.work);
|
|
|
|
return true;
|
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* flush_work - wait for a work to finish executing the last queueing instance
|
|
|
|
* @work: the work to flush
|
|
|
|
*
|
|
|
|
* Wait until @work has finished execution. @work is guaranteed to be idle
|
|
|
|
* on return if it hasn't been requeued since flush started.
|
|
|
|
*
|
|
|
|
* Return:
|
|
|
|
* %true if flush_work() waited for the work to finish execution,
|
|
|
|
* %false if it was already idle.
|
|
|
|
*/
|
|
|
|
bool flush_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
return __flush_work(work, false);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(flush_work);
|
|
|
|
|
|
|
|
struct cwt_wait {
|
|
|
|
wait_queue_entry_t wait;
|
|
|
|
struct work_struct *work;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
|
|
|
|
{
|
|
|
|
struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
|
|
|
|
|
|
|
|
if (cwait->work != key)
|
|
|
|
return 0;
|
|
|
|
return autoremove_wake_function(wait, mode, sync, key);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
|
|
|
|
{
|
|
|
|
static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
|
|
|
|
unsigned long flags;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
do {
|
|
|
|
ret = try_to_grab_pending(work, is_dwork, &flags);
|
|
|
|
/*
|
|
|
|
* If someone else is already canceling, wait for it to
|
|
|
|
* finish. flush_work() doesn't work for PREEMPT_NONE
|
|
|
|
* because we may get scheduled between @work's completion
|
|
|
|
* and the other canceling task resuming and clearing
|
|
|
|
* CANCELING - flush_work() will return false immediately
|
|
|
|
* as @work is no longer busy, try_to_grab_pending() will
|
|
|
|
* return -ENOENT as @work is still being canceled and the
|
|
|
|
* other canceling task won't be able to clear CANCELING as
|
|
|
|
* we're hogging the CPU.
|
|
|
|
*
|
|
|
|
* Let's wait for completion using a waitqueue. As this
|
|
|
|
* may lead to the thundering herd problem, use a custom
|
|
|
|
* wake function which matches @work along with exclusive
|
|
|
|
* wait and wakeup.
|
|
|
|
*/
|
|
|
|
if (unlikely(ret == -ENOENT)) {
|
|
|
|
struct cwt_wait cwait;
|
|
|
|
|
|
|
|
init_wait(&cwait.wait);
|
|
|
|
cwait.wait.func = cwt_wakefn;
|
|
|
|
cwait.work = work;
|
|
|
|
|
|
|
|
prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait,
|
|
|
|
TASK_UNINTERRUPTIBLE);
|
|
|
|
if (work_is_canceling(work))
|
|
|
|
schedule();
|
|
|
|
finish_wait(&cancel_waitq, &cwait.wait);
|
|
|
|
}
|
|
|
|
} while (unlikely(ret < 0));
|
|
|
|
|
|
|
|
/* tell other tasks trying to grab @work to back off */
|
|
|
|
mark_work_canceling(work);
|
|
|
|
local_irq_restore(flags);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This allows canceling during early boot. We know that @work
|
|
|
|
* isn't executing.
|
|
|
|
*/
|
|
|
|
if (wq_online)
|
|
|
|
__flush_work(work, true);
|
|
|
|
|
|
|
|
clear_work_data(work);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Paired with prepare_to_wait() above so that either
|
|
|
|
* waitqueue_active() is visible here or !work_is_canceling() is
|
|
|
|
* visible there.
|
|
|
|
*/
|
|
|
|
smp_mb();
|
|
|
|
if (waitqueue_active(&cancel_waitq))
|
|
|
|
__wake_up(&cancel_waitq, TASK_NORMAL, 1, work);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* cancel_work_sync - cancel a work and wait for it to finish
|
|
|
|
* @work: the work to cancel
|
|
|
|
*
|
|
|
|
* Cancel @work and wait for its execution to finish. This function
|
|
|
|
* can be used even if the work re-queues itself or migrates to
|
|
|
|
* another workqueue. On return from this function, @work is
|
|
|
|
* guaranteed to be not pending or executing on any CPU.
|
|
|
|
*
|
|
|
|
* cancel_work_sync(&delayed_work->work) must not be used for
|
|
|
|
* delayed_work's. Use cancel_delayed_work_sync() instead.
|
|
|
|
*
|
|
|
|
* The caller must ensure that the workqueue on which @work was last
|
|
|
|
* queued can't be destroyed before this function returns.
|
|
|
|
*
|
|
|
|
* Return:
|
|
|
|
* %true if @work was pending, %false otherwise.
|
|
|
|
*/
|
|
|
|
bool cancel_work_sync(struct work_struct *work)
|
|
|
|
{
|
|
|
|
return __cancel_work_timer(work, false);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(cancel_work_sync);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* flush_delayed_work - wait for a dwork to finish executing the last queueing
|
|
|
|
* @dwork: the delayed work to flush
|
|
|
|
*
|
|
|
|
* Delayed timer is cancelled and the pending work is queued for
|
|
|
|
* immediate execution. Like flush_work(), this function only
|
|
|
|
* considers the last queueing instance of @dwork.
|
|
|
|
*
|
|
|
|
* Return:
|
|
|
|
* %true if flush_work() waited for the work to finish execution,
|
|
|
|
* %false if it was already idle.
|
|
|
|
*/
|
|
|
|
bool flush_delayed_work(struct delayed_work *dwork)
|
|
|
|
{
|
|
|
|
local_irq_disable();
|
|
|
|
if (del_timer_sync(&dwork->timer))
|
|
|
|
__queue_work(dwork->cpu, dwork->wq, &dwork->work);
|
|
|
|
local_irq_enable();
|
|
|
|
return flush_work(&dwork->work);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(flush_delayed_work);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* flush_rcu_work - wait for a rwork to finish executing the last queueing
|
|
|
|
* @rwork: the rcu work to flush
|
|
|
|
*
|
|
|
|
* Return:
|
|
|
|
* %true if flush_rcu_work() waited for the work to finish execution,
|
|
|
|
* %false if it was already idle.
|
|
|
|
*/
|
|
|
|
bool flush_rcu_work(struct rcu_work *rwork)
|
|
|
|
{
|
|
|
|
if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) {
|
|
|
|
rcu_barrier();
|
|
|
|
flush_work(&rwork->work);
|
|
|
|
return true;
|
|
|
|
} else {
|
|
|
|
return flush_work(&rwork->work);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(flush_rcu_work);
|
|
|
|
|
|
|
|
static bool __cancel_work(struct work_struct *work, bool is_dwork)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
do {
|
|
|
|
ret = try_to_grab_pending(work, is_dwork, &flags);
|
|
|
|
} while (unlikely(ret == -EAGAIN));
|
|
|
|
|
|
|
|
if (unlikely(ret < 0))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
set_work_pool_and_clear_pending(work, get_work_pool_id(work));
|
|
|
|
local_irq_restore(flags);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* See cancel_delayed_work()
|
|
|
|
*/
|
|
|
|
bool cancel_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
return __cancel_work(work, false);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(cancel_work);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* cancel_delayed_work - cancel a delayed work
|
|
|
|
* @dwork: delayed_work to cancel
|
|
|
|
*
|
|
|
|
* Kill off a pending delayed_work.
|
|
|
|
*
|
|
|
|
* Return: %true if @dwork was pending and canceled; %false if it wasn't
|
|
|
|
* pending.
|
|
|
|
*
|
|
|
|
* Note:
|
|
|
|
* The work callback function may still be running on return, unless
|
|
|
|
* it returns %true and the work doesn't re-arm itself. Explicitly flush or
|
|
|
|
* use cancel_delayed_work_sync() to wait on it.
|
|
|
|
*
|
|
|
|
* This function is safe to call from any context including IRQ handler.
|
|
|
|
*/
|
|
|
|
bool cancel_delayed_work(struct delayed_work *dwork)
|
|
|
|
{
|
|
|
|
return __cancel_work(&dwork->work, true);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(cancel_delayed_work);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
|
|
|
|
* @dwork: the delayed work cancel
|
|
|
|
*
|
|
|
|
* This is cancel_work_sync() for delayed works.
|
|
|
|
*
|
|
|
|
* Return:
|
|
|
|
* %true if @dwork was pending, %false otherwise.
|
|
|
|
*/
|
|
|
|
bool cancel_delayed_work_sync(struct delayed_work *dwork)
|
|
|
|
{
|
|
|
|
return __cancel_work_timer(&dwork->work, true);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(cancel_delayed_work_sync);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* schedule_on_each_cpu - execute a function synchronously on each online CPU
|
|
|
|
* @func: the function to call
|
|
|
|
*
|
|
|
|
* schedule_on_each_cpu() executes @func on each online CPU using the
|
|
|
|
* system workqueue and blocks until all CPUs have completed.
|
|
|
|
* schedule_on_each_cpu() is very slow.
|
|
|
|
*
|
|
|
|
* Return:
|
|
|
|
* 0 on success, -errno on failure.
|
|
|
|
*/
|
|
|
|
int schedule_on_each_cpu(work_func_t func)
|
|
|
|
{
|
|
|
|
int cpu;
|
|
|
|
struct work_struct __percpu *works;
|
|
|
|
|
|
|
|
works = alloc_percpu(struct work_struct);
|
|
|
|
if (!works)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
cpus_read_lock();
|
|
|
|
|
|
|
|
for_each_online_cpu(cpu) {
|
|
|
|
struct work_struct *work = per_cpu_ptr(works, cpu);
|
|
|
|
|
|
|
|
INIT_WORK(work, func);
|
|
|
|
schedule_work_on(cpu, work);
|
|
|
|
}
|
|
|
|
|
|
|
|
for_each_online_cpu(cpu)
|
|
|
|
flush_work(per_cpu_ptr(works, cpu));
|
|
|
|
|
|
|
|
cpus_read_unlock();
|
|
|
|
free_percpu(works);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* execute_in_process_context - reliably execute the routine with user context
|
|
|
|
* @fn: the function to execute
|
|
|
|
* @ew: guaranteed storage for the execute work structure (must
|
|
|
|
* be available when the work executes)
|
|
|
|
*
|
|
|
|
* Executes the function immediately if process context is available,
|
|
|
|
* otherwise schedules the function for delayed execution.
|
|
|
|
*
|
|
|
|
* Return: 0 - function was executed
|
|
|
|
* 1 - function was scheduled for execution
|
|
|
|
*/
|
|
|
|
int execute_in_process_context(work_func_t fn, struct execute_work *ew)
|
|
|
|
{
|
|
|
|
if (!in_interrupt()) {
|
|
|
|
fn(&ew->work);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
INIT_WORK(&ew->work, fn);
|
|
|
|
schedule_work(&ew->work);
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(execute_in_process_context);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* free_workqueue_attrs - free a workqueue_attrs
|
|
|
|
* @attrs: workqueue_attrs to free
|
|
|
|
*
|
|
|
|
* Undo alloc_workqueue_attrs().
|
|
|
|
*/
|
|
|
|
void free_workqueue_attrs(struct workqueue_attrs *attrs)
|
|
|
|
{
|
|
|
|
if (attrs) {
|
|
|
|
free_cpumask_var(attrs->cpumask);
|
|
|
|
kfree(attrs);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* alloc_workqueue_attrs - allocate a workqueue_attrs
|
|
|
|
*
|
|
|
|
* Allocate a new workqueue_attrs, initialize with default settings and
|
|
|
|
* return it.
|
|
|
|
*
|
|
|
|
* Return: The allocated new workqueue_attr on success. %NULL on failure.
|
|
|
|
*/
|
|
|
|
struct workqueue_attrs *alloc_workqueue_attrs(void)
|
|
|
|
{
|
|
|
|
struct workqueue_attrs *attrs;
|
|
|
|
|
|
|
|
attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
|
|
|
|
if (!attrs)
|
|
|
|
goto fail;
|
|
|
|
if (!alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL))
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
cpumask_copy(attrs->cpumask, cpu_possible_mask);
|
|
|
|
return attrs;
|
|
|
|
fail:
|
|
|
|
free_workqueue_attrs(attrs);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void copy_workqueue_attrs(struct workqueue_attrs *to,
|
|
|
|
const struct workqueue_attrs *from)
|
|
|
|
{
|
|
|
|
to->nice = from->nice;
|
|
|
|
cpumask_copy(to->cpumask, from->cpumask);
|
|
|
|
/*
|
|
|
|
* Unlike hash and equality test, this function doesn't ignore
|
|
|
|
* ->no_numa as it is used for both pool and wq attrs. Instead,
|
|
|
|
* get_unbound_pool() explicitly clears ->no_numa after copying.
|
|
|
|
*/
|
|
|
|
to->no_numa = from->no_numa;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* hash value of the content of @attr */
|
|
|
|
static u32 wqattrs_hash(const struct workqueue_attrs *attrs)
|
|
|
|
{
|
|
|
|
u32 hash = 0;
|
|
|
|
|
|
|
|
hash = jhash_1word(attrs->nice, hash);
|
|
|
|
hash = jhash(cpumask_bits(attrs->cpumask),
|
|
|
|
BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
|
|
|
|
return hash;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* content equality test */
|
|
|
|
static bool wqattrs_equal(const struct workqueue_attrs *a,
|
|
|
|
const struct workqueue_attrs *b)
|
|
|
|
{
|
|
|
|
if (a->nice != b->nice)
|
|
|
|
return false;
|
|
|
|
if (!cpumask_equal(a->cpumask, b->cpumask))
|
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* init_worker_pool - initialize a newly zalloc'd worker_pool
|
|
|
|
* @pool: worker_pool to initialize
|
|
|
|
*
|
|
|
|
* Initialize a newly zalloc'd @pool. It also allocates @pool->attrs.
|
|
|
|
*
|
|
|
|
* Return: 0 on success, -errno on failure. Even on failure, all fields
|
|
|
|
* inside @pool proper are initialized and put_unbound_pool() can be called
|
|
|
|
* on @pool safely to release it.
|
|
|
|
*/
|
|
|
|
static int init_worker_pool(struct worker_pool *pool)
|
|
|
|
{
|
|
|
|
raw_spin_lock_init(&pool->lock);
|
|
|
|
pool->id = -1;
|
|
|
|
pool->cpu = -1;
|
|
|
|
pool->node = NUMA_NO_NODE;
|
|
|
|
pool->flags |= POOL_DISASSOCIATED;
|
|
|
|
pool->watchdog_ts = jiffies;
|
|
|
|
INIT_LIST_HEAD(&pool->worklist);
|
|
|
|
INIT_LIST_HEAD(&pool->idle_list);
|
|
|
|
hash_init(pool->busy_hash);
|
|
|
|
|
|
|
|
timer_setup(&pool->idle_timer, idle_worker_timeout, TIMER_DEFERRABLE);
|
|
|
|
INIT_WORK(&pool->idle_cull_work, idle_cull_fn);
|
|
|
|
|
|
|
|
timer_setup(&pool->mayday_timer, pool_mayday_timeout, 0);
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&pool->workers);
|
|
|
|
INIT_LIST_HEAD(&pool->dying_workers);
|
|
|
|
|
|
|
|
ida_init(&pool->worker_ida);
|
|
|
|
INIT_HLIST_NODE(&pool->hash_node);
|
|
|
|
pool->refcnt = 1;
|
|
|
|
|
|
|
|
/* shouldn't fail above this point */
|
|
|
|
pool->attrs = alloc_workqueue_attrs();
|
|
|
|
if (!pool->attrs)
|
|
|
|
return -ENOMEM;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_LOCKDEP
|
|
|
|
static void wq_init_lockdep(struct workqueue_struct *wq)
|
|
|
|
{
|
|
|
|
char *lock_name;
|
|
|
|
|
|
|
|
lockdep_register_key(&wq->key);
|
|
|
|
lock_name = kasprintf(GFP_KERNEL, "%s%s", "(wq_completion)", wq->name);
|
|
|
|
if (!lock_name)
|
|
|
|
lock_name = wq->name;
|
|
|
|
|
|
|
|
wq->lock_name = lock_name;
|
|
|
|
lockdep_init_map(&wq->lockdep_map, lock_name, &wq->key, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void wq_unregister_lockdep(struct workqueue_struct *wq)
|
|
|
|
{
|
|
|
|
lockdep_unregister_key(&wq->key);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void wq_free_lockdep(struct workqueue_struct *wq)
|
|
|
|
{
|
|
|
|
if (wq->lock_name != wq->name)
|
|
|
|
kfree(wq->lock_name);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static void wq_init_lockdep(struct workqueue_struct *wq)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static void wq_unregister_lockdep(struct workqueue_struct *wq)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static void wq_free_lockdep(struct workqueue_struct *wq)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static void rcu_free_wq(struct rcu_head *rcu)
|
|
|
|
{
|
|
|
|
struct workqueue_struct *wq =
|
|
|
|
container_of(rcu, struct workqueue_struct, rcu);
|
|
|
|
|
|
|
|
wq_free_lockdep(wq);
|
|
|
|
|
|
|
|
if (!(wq->flags & WQ_UNBOUND))
|
|
|
|
free_percpu(wq->cpu_pwqs);
|
|
|
|
else
|
|
|
|
free_workqueue_attrs(wq->unbound_attrs);
|
|
|
|
|
|
|
|
kfree(wq);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rcu_free_pool(struct rcu_head *rcu)
|
|
|
|
{
|
|
|
|
struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu);
|
|
|
|
|
|
|
|
ida_destroy(&pool->worker_ida);
|
|
|
|
free_workqueue_attrs(pool->attrs);
|
|
|
|
kfree(pool);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* put_unbound_pool - put a worker_pool
|
|
|
|
* @pool: worker_pool to put
|
|
|
|
*
|
|
|
|
* Put @pool. If its refcnt reaches zero, it gets destroyed in RCU
|
|
|
|
* safe manner. get_unbound_pool() calls this function on its failure path
|
|
|
|
* and this function should be able to release pools which went through,
|
|
|
|
* successfully or not, init_worker_pool().
|
|
|
|
*
|
|
|
|
* Should be called with wq_pool_mutex held.
|
|
|
|
*/
|
|
|
|
static void put_unbound_pool(struct worker_pool *pool)
|
|
|
|
{
|
|
|
|
DECLARE_COMPLETION_ONSTACK(detach_completion);
|
|
|
|
struct list_head cull_list;
|
|
|
|
struct worker *worker;
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&cull_list);
|
|
|
|
|
|
|
|
lockdep_assert_held(&wq_pool_mutex);
|
|
|
|
|
|
|
|
if (--pool->refcnt)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* sanity checks */
|
|
|
|
if (WARN_ON(!(pool->cpu < 0)) ||
|
|
|
|
WARN_ON(!list_empty(&pool->worklist)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* release id and unhash */
|
|
|
|
if (pool->id >= 0)
|
|
|
|
idr_remove(&worker_pool_idr, pool->id);
|
|
|
|
hash_del(&pool->hash_node);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Become the manager and destroy all workers. This prevents
|
|
|
|
* @pool's workers from blocking on attach_mutex. We're the last
|
|
|
|
* manager and @pool gets freed with the flag set.
|
|
|
|
*
|
|
|
|
* Having a concurrent manager is quite unlikely to happen as we can
|
|
|
|
* only get here with
|
|
|
|
* pwq->refcnt == pool->refcnt == 0
|
|
|
|
* which implies no work queued to the pool, which implies no worker can
|
|
|
|
* become the manager. However a worker could have taken the role of
|
|
|
|
* manager before the refcnts dropped to 0, since maybe_create_worker()
|
|
|
|
* drops pool->lock
|
|
|
|
*/
|
|
|
|
while (true) {
|
|
|
|
rcuwait_wait_event(&manager_wait,
|
|
|
|
!(pool->flags & POOL_MANAGER_ACTIVE),
|
|
|
|
TASK_UNINTERRUPTIBLE);
|
|
|
|
|
|
|
|
mutex_lock(&wq_pool_attach_mutex);
|
|
|
|
raw_spin_lock_irq(&pool->lock);
|
|
|
|
if (!(pool->flags & POOL_MANAGER_ACTIVE)) {
|
|
|
|
pool->flags |= POOL_MANAGER_ACTIVE;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
raw_spin_unlock_irq(&pool->lock);
|
|
|
|
mutex_unlock(&wq_pool_attach_mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
while ((worker = first_idle_worker(pool)))
|
|
|
|
set_worker_dying(worker, &cull_list);
|
|
|
|
WARN_ON(pool->nr_workers || pool->nr_idle);
|
|
|
|
raw_spin_unlock_irq(&pool->lock);
|
|
|
|
|
|
|
|
wake_dying_workers(&cull_list);
|
|
|
|
|
|
|
|
if (!list_empty(&pool->workers) || !list_empty(&pool->dying_workers))
|
|
|
|
pool->detach_completion = &detach_completion;
|
|
|
|
mutex_unlock(&wq_pool_attach_mutex);
|
|
|
|
|
|
|
|
if (pool->detach_completion)
|
|
|
|
wait_for_completion(pool->detach_completion);
|
|
|
|
|
|
|
|
/* shut down the timers */
|
|
|
|
del_timer_sync(&pool->idle_timer);
|
|
|
|
cancel_work_sync(&pool->idle_cull_work);
|
|
|
|
del_timer_sync(&pool->mayday_timer);
|
|
|
|
|
|
|
|
/* RCU protected to allow dereferences from get_work_pool() */
|
|
|
|
call_rcu(&pool->rcu, rcu_free_pool);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* get_unbound_pool - get a worker_pool with the specified attributes
|
|
|
|
* @attrs: the attributes of the worker_pool to get
|
|
|
|
*
|
|
|
|
* Obtain a worker_pool which has the same attributes as @attrs, bump the
|
|
|
|
* reference count and return it. If there already is a matching
|
|
|
|
* worker_pool, it will be used; otherwise, this function attempts to
|
|
|
|
* create a new one.
|
|
|
|
*
|
|
|
|
* Should be called with wq_pool_mutex held.
|
|
|
|
*
|
|
|
|
* Return: On success, a worker_pool with the same attributes as @attrs.
|
|
|
|
* On failure, %NULL.
|
|
|
|
*/
|
|
|
|
static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
|
|
|
|
{
|
|
|
|
u32 hash = wqattrs_hash(attrs);
|
|
|
|
struct worker_pool *pool;
|
|
|
|
int node;
|
|
|
|
int target_node = NUMA_NO_NODE;
|
|
|
|
|
|
|
|
lockdep_assert_held(&wq_pool_mutex);
|
|
|
|
|
|
|
|
/* do we already have a matching pool? */
|
|
|
|
hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
|
|
|
|
if (wqattrs_equal(pool->attrs, attrs)) {
|
|
|
|
pool->refcnt++;
|
|
|
|
return pool;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* if cpumask is contained inside a NUMA node, we belong to that node */
|
|
|
|
if (wq_numa_enabled) {
|
|
|
|
for_each_node(node) {
|
|
|
|
if (cpumask_subset(attrs->cpumask,
|
|
|
|
wq_numa_possible_cpumask[node])) {
|
|
|
|
target_node = node;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* nope, create a new one */
|
|
|
|
pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, target_node);
|
|
|
|
if (!pool || init_worker_pool(pool) < 0)
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */
|
|
|
|
copy_workqueue_attrs(pool->attrs, attrs);
|
|
|
|
pool->node = target_node;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* no_numa isn't a worker_pool attribute, always clear it. See
|
|
|
|
* 'struct workqueue_attrs' comments for detail.
|
|
|
|
*/
|
|
|
|
pool->attrs->no_numa = false;
|
|
|
|
|
|
|
|
if (worker_pool_assign_id(pool) < 0)
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
/* create and start the initial worker */
|
|
|
|
if (wq_online && !create_worker(pool))
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
/* install */
|
|
|
|
hash_add(unbound_pool_hash, &pool->hash_node, hash);
|
|
|
|
|
|
|
|
return pool;
|
|
|
|
fail:
|
|
|
|
if (pool)
|
|
|
|
put_unbound_pool(pool);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rcu_free_pwq(struct rcu_head *rcu)
|
|
|
|
{
|
|
|
|
kmem_cache_free(pwq_cache,
|
|
|
|
container_of(rcu, struct pool_workqueue, rcu));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Scheduled on system_wq by put_pwq() when an unbound pwq hits zero refcnt
|
|