Displaying 8 results from an estimated 8 matches for "ivcpu".
Did you mean:
vcpu
2019 Aug 13
1
[RFC PATCH v6 16/92] kvm: introspection: handle events and event replies
On 09/08/19 17:59, Adalbert Laz?r wrote:
>
> + reply->padding2);
> +
> + ivcpu->reply_waiting = false;
> + return expected->error;
> +}
> +
> /*
Is this missing a wakeup?
>
> +static bool need_to_wait(struct kvm_vcpu *vcpu)
> +{
> + struct kvmi_vcpu *ivcpu = IVCPU(vcpu);
> +
> + return ivcpu->reply_waiting;
> +}
> +
Do you ac...
2019 Aug 09
0
[RFC PATCH v6 13/92] kvm: introspection: make the vCPU wait even when its jobs list is empty
...*/
diff --git a/virt/kvm/kvmi.c b/virt/kvm/kvmi.c
index 07ebd1c629b0..3c884dc0e38c 100644
--- a/virt/kvm/kvmi.c
+++ b/virt/kvm/kvmi.c
@@ -135,6 +135,19 @@ static void kvmi_free_job(struct kvmi_job *job)
kmem_cache_free(job_cache, job);
}
+static struct kvmi_job *kvmi_pull_job(struct kvmi_vcpu *ivcpu)
+{
+ struct kvmi_job *job = NULL;
+
+ spin_lock(&ivcpu->job_lock);
+ job = list_first_entry_or_null(&ivcpu->job_list, typeof(*job), link);
+ if (job)
+ list_del(&job->link);
+ spin_unlock(&ivcpu->job_lock);
+
+ return job;
+}
+
static bool alloc_ivcpu(struct kvm_vcpu...
2019 Aug 09
0
[RFC PATCH v6 64/92] kvm: introspection: add single-stepping
...,7 +520,6 @@ bool kvmi_arch_pf_event(struct kvm_vcpu *vcpu, gpa_t gpa, gva_t gva,
u32 ctx_size;
u64 ctx_addr;
u32 action;
- bool singlestep_ignored;
bool ret = false;
if (!kvm_spt_fault(vcpu))
@@ -533,7 +532,7 @@ bool kvmi_arch_pf_event(struct kvm_vcpu *vcpu, gpa_t gpa, gva_t gva,
if (ivcpu->effective_rep_complete)
return true;
- action = kvmi_msg_send_pf(vcpu, gpa, gva, access, &singlestep_ignored,
+ action = kvmi_msg_send_pf(vcpu, gpa, gva, access, &ivcpu->ss_requested,
&ivcpu->rep_complete, &ctx_addr,
ivcpu->ctx_data, &ctx_size);...
2019 Aug 12
1
[RFC PATCH v6 64/92] kvm: introspection: add single-stepping
...vm_vcpu *vcpu, gpa_t gpa, gva_t gva,
> u32 ctx_size;
> u64 ctx_addr;
> u32 action;
> - bool singlestep_ignored;
> bool ret = false;
>
> if (!kvm_spt_fault(vcpu))
> @@ -533,7 +532,7 @@ bool kvmi_arch_pf_event(struct kvm_vcpu *vcpu, gpa_t gpa, gva_t gva,
> if (ivcpu->effective_rep_complete)
> return true;
>
> - action = kvmi_msg_send_pf(vcpu, gpa, gva, access, &singlestep_ignored,
> + action = kvmi_msg_send_pf(vcpu, gpa, gva, access, &ivcpu->ss_requested,
> &ivcpu->rep_complete, &ctx_addr,
> ivcpu-&...
2019 Aug 09
0
[RFC PATCH v6 16/92] kvm: introspection: handle events and event replies
...(KVMI_GET_VERSION, ikvm->cmd_allow_mask);
set_bit(KVMI_CHECK_COMMAND, ikvm->cmd_allow_mask);
set_bit(KVMI_CHECK_EVENT, ikvm->cmd_allow_mask);
@@ -520,10 +522,20 @@ void kvmi_run_jobs(struct kvm_vcpu *vcpu)
}
}
+static bool need_to_wait(struct kvm_vcpu *vcpu)
+{
+ struct kvmi_vcpu *ivcpu = IVCPU(vcpu);
+
+ return ivcpu->reply_waiting;
+}
+
static bool done_waiting(struct kvm_vcpu *vcpu)
{
struct kvmi_vcpu *ivcpu = IVCPU(vcpu);
+ if (!need_to_wait(vcpu))
+ return true;
+
return !list_empty(&ivcpu->job_list);
}
@@ -552,6 +564,9 @@ int kvmi_run_jobs_and_wait(stru...
2019 Aug 09
117
[RFC PATCH v6 00/92] VM introspection
The KVM introspection subsystem provides a facility for applications running
on the host or in a separate VM, to control the execution of other VM-s
(pause, resume, shutdown), query the state of the vCPUs (GPRs, MSRs etc.),
alter the page access bits in the shadow page tables (only for the hardware
backed ones, eg. Intel's EPT) and receive notifications when events of
interest have taken place
2019 Aug 09
117
[RFC PATCH v6 00/92] VM introspection
The KVM introspection subsystem provides a facility for applications running
on the host or in a separate VM, to control the execution of other VM-s
(pause, resume, shutdown), query the state of the vCPUs (GPRs, MSRs etc.),
alter the page access bits in the shadow page tables (only for the hardware
backed ones, eg. Intel's EPT) and receive notifications when events of
interest have taken place
2019 Aug 09
0
[RFC PATCH v6 55/92] kvm: introspection: add KVMI_CONTROL_MSR and KVMI_EVENT_MSR
...0..5dba4f87afef 100644
--- a/arch/x86/kvm/kvmi.c
+++ b/arch/x86/kvm/kvmi.c
@@ -9,6 +9,133 @@
#include <asm/vmx.h>
#include "../../../virt/kvm/kvmi_int.h"
+static unsigned long *msr_mask(struct kvm_vcpu *vcpu, unsigned int *msr)
+{
+ switch (*msr) {
+ case 0 ... 0x1fff:
+ return IVCPU(vcpu)->msr_mask.low;
+ case 0xc0000000 ... 0xc0001fff:
+ *msr &= 0x1fff;
+ return IVCPU(vcpu)->msr_mask.high;
+ }
+
+ return NULL;
+}
+
+static bool test_msr_mask(struct kvm_vcpu *vcpu, unsigned int msr)
+{
+ unsigned long *mask = msr_mask(vcpu, &msr);
+
+ if (!mask)
+ return false...