2023-08-30 17:31:07 +02:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
|
|
/*
|
|
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
|
|
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __ASM_ARM_KVM_ARCH_TIMER_H
|
|
|
|
#define __ASM_ARM_KVM_ARCH_TIMER_H
|
|
|
|
|
|
|
|
#include <linux/clocksource.h>
|
|
|
|
#include <linux/hrtimer.h>
|
|
|
|
|
|
|
|
enum kvm_arch_timers {
|
|
|
|
TIMER_PTIMER,
|
|
|
|
TIMER_VTIMER,
|
2023-10-24 12:59:35 +02:00
|
|
|
NR_KVM_EL0_TIMERS,
|
|
|
|
TIMER_HVTIMER = NR_KVM_EL0_TIMERS,
|
|
|
|
TIMER_HPTIMER,
|
2023-08-30 17:31:07 +02:00
|
|
|
NR_KVM_TIMERS
|
|
|
|
};
|
|
|
|
|
|
|
|
enum kvm_arch_timer_regs {
|
|
|
|
TIMER_REG_CNT,
|
|
|
|
TIMER_REG_CVAL,
|
|
|
|
TIMER_REG_TVAL,
|
|
|
|
TIMER_REG_CTL,
|
2023-10-24 12:59:35 +02:00
|
|
|
TIMER_REG_VOFF,
|
2023-08-30 17:31:07 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
struct arch_timer_offset {
|
|
|
|
/*
|
|
|
|
* If set, pointer to one of the offsets in the kvm's offset
|
|
|
|
* structure. If NULL, assume a zero offset.
|
|
|
|
*/
|
|
|
|
u64 *vm_offset;
|
2023-10-24 12:59:35 +02:00
|
|
|
/*
|
|
|
|
* If set, pointer to one of the offsets in the vcpu's sysreg
|
|
|
|
* array. If NULL, assume a zero offset.
|
|
|
|
*/
|
|
|
|
u64 *vcpu_offset;
|
2023-08-30 17:31:07 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
struct arch_timer_vm_data {
|
|
|
|
/* Offset applied to the virtual timer/counter */
|
|
|
|
u64 voffset;
|
2023-10-24 12:59:35 +02:00
|
|
|
/* Offset applied to the physical timer/counter */
|
|
|
|
u64 poffset;
|
|
|
|
|
|
|
|
/* The PPI for each timer, global to the VM */
|
|
|
|
u8 ppi[NR_KVM_TIMERS];
|
2023-08-30 17:31:07 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
struct arch_timer_context {
|
|
|
|
struct kvm_vcpu *vcpu;
|
|
|
|
|
|
|
|
/* Emulated Timer (may be unused) */
|
|
|
|
struct hrtimer hrtimer;
|
2023-10-24 12:59:35 +02:00
|
|
|
u64 ns_frac;
|
2023-08-30 17:31:07 +02:00
|
|
|
|
|
|
|
/* Offset for this counter/timer */
|
|
|
|
struct arch_timer_offset offset;
|
|
|
|
/*
|
|
|
|
* We have multiple paths which can save/restore the timer state onto
|
|
|
|
* the hardware, so we need some way of keeping track of where the
|
|
|
|
* latest state is.
|
|
|
|
*/
|
|
|
|
bool loaded;
|
|
|
|
|
2023-10-24 12:59:35 +02:00
|
|
|
/* Output level of the timer IRQ */
|
|
|
|
struct {
|
|
|
|
bool level;
|
|
|
|
} irq;
|
|
|
|
|
2023-08-30 17:31:07 +02:00
|
|
|
/* Duplicated state from arch_timer.c for convenience */
|
|
|
|
u32 host_timer_irq;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct timer_map {
|
|
|
|
struct arch_timer_context *direct_vtimer;
|
|
|
|
struct arch_timer_context *direct_ptimer;
|
2023-10-24 12:59:35 +02:00
|
|
|
struct arch_timer_context *emul_vtimer;
|
2023-08-30 17:31:07 +02:00
|
|
|
struct arch_timer_context *emul_ptimer;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct arch_timer_cpu {
|
|
|
|
struct arch_timer_context timers[NR_KVM_TIMERS];
|
|
|
|
|
|
|
|
/* Background timer used when the guest is not running */
|
|
|
|
struct hrtimer bg_timer;
|
|
|
|
|
|
|
|
/* Is the timer enabled */
|
|
|
|
bool enabled;
|
|
|
|
};
|
|
|
|
|
|
|
|
int __init kvm_timer_hyp_init(bool has_gic);
|
|
|
|
int kvm_timer_enable(struct kvm_vcpu *vcpu);
|
|
|
|
int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu);
|
|
|
|
void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
|
|
|
|
void kvm_timer_sync_user(struct kvm_vcpu *vcpu);
|
|
|
|
bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu);
|
|
|
|
void kvm_timer_update_run(struct kvm_vcpu *vcpu);
|
|
|
|
void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu);
|
|
|
|
|
2023-10-24 12:59:35 +02:00
|
|
|
void kvm_timer_init_vm(struct kvm *kvm);
|
|
|
|
|
2023-08-30 17:31:07 +02:00
|
|
|
u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid);
|
|
|
|
int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value);
|
|
|
|
|
|
|
|
int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
|
|
|
|
int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
|
|
|
|
int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
|
|
|
|
|
|
|
|
u64 kvm_phys_timer_read(void);
|
|
|
|
|
|
|
|
void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu);
|
|
|
|
void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu);
|
|
|
|
|
|
|
|
void kvm_timer_init_vhe(void);
|
|
|
|
|
|
|
|
#define vcpu_timer(v) (&(v)->arch.timer_cpu)
|
|
|
|
#define vcpu_get_timer(v,t) (&vcpu_timer(v)->timers[(t)])
|
|
|
|
#define vcpu_vtimer(v) (&(v)->arch.timer_cpu.timers[TIMER_VTIMER])
|
|
|
|
#define vcpu_ptimer(v) (&(v)->arch.timer_cpu.timers[TIMER_PTIMER])
|
2023-10-24 12:59:35 +02:00
|
|
|
#define vcpu_hvtimer(v) (&(v)->arch.timer_cpu.timers[TIMER_HVTIMER])
|
|
|
|
#define vcpu_hptimer(v) (&(v)->arch.timer_cpu.timers[TIMER_HPTIMER])
|
2023-08-30 17:31:07 +02:00
|
|
|
|
|
|
|
#define arch_timer_ctx_index(ctx) ((ctx) - vcpu_timer((ctx)->vcpu)->timers)
|
|
|
|
|
2023-10-24 12:59:35 +02:00
|
|
|
#define timer_vm_data(ctx) (&(ctx)->vcpu->kvm->arch.timer_data)
|
|
|
|
#define timer_irq(ctx) (timer_vm_data(ctx)->ppi[arch_timer_ctx_index(ctx)])
|
|
|
|
|
2023-08-30 17:31:07 +02:00
|
|
|
u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu,
|
|
|
|
enum kvm_arch_timers tmr,
|
|
|
|
enum kvm_arch_timer_regs treg);
|
|
|
|
void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu,
|
|
|
|
enum kvm_arch_timers tmr,
|
|
|
|
enum kvm_arch_timer_regs treg,
|
|
|
|
u64 val);
|
|
|
|
|
|
|
|
/* Needed for tracing */
|
|
|
|
u32 timer_get_ctl(struct arch_timer_context *ctxt);
|
|
|
|
u64 timer_get_cval(struct arch_timer_context *ctxt);
|
|
|
|
|
|
|
|
/* CPU HP callbacks */
|
|
|
|
void kvm_timer_cpu_up(void);
|
|
|
|
void kvm_timer_cpu_down(void);
|
|
|
|
|
|
|
|
#endif
|