search for: hv_hypercall_pg

Displaying 9 results from an estimated 9 matches for "hv_hypercall_pg".

2020 Sep 15
0
[PATCH RFC v1 08/18] x86/hyperv: handling hypercall page setup for root
...gt; /* Is Linux running as the root partition? */ > bool hv_root_partition; > @@ -448,8 +449,29 @@ void __init hyperv_init(void) > > rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); > hypercall_msr.enable = 1; > - hypercall_msr.guest_physical_address = vmalloc_to_pfn(hv_hypercall_pg); > - wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); > + > + if (hv_root_partition) { > + struct page *pg; > + void *src, *dst; > + > + /* > + * Order is important here. We must enable the hypercall page > + * so it is populated with code, then copy the cod...
2020 Sep 15
0
[PATCH RFC v1 08/18] x86/hyperv: handling hypercall page setup for root
...;> > bool hv_root_partition; >> > @@ -448,8 +449,29 @@ void __init hyperv_init(void) >> > >> > rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); >> > hypercall_msr.enable = 1; >> > - hypercall_msr.guest_physical_address = vmalloc_to_pfn(hv_hypercall_pg); >> > - wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); >> > + >> > + if (hv_root_partition) { >> > + struct page *pg; >> > + void *src, *dst; >> > + >> > + /* >> > + * Order is important here. We must enable the h...
2020 Sep 15
0
[PATCH RFC v1 07/18] x86/hyperv: extract partition ID from Microsoft Hypervisor if necessary
...ec1ed32023 100644 > --- a/arch/x86/hyperv/hv_init.c > +++ b/arch/x86/hyperv/hv_init.c > @@ -30,6 +30,9 @@ > bool hv_root_partition; > EXPORT_SYMBOL_GPL(hv_root_partition); > > +u64 hv_current_partition_id; > +EXPORT_SYMBOL_GPL(hv_current_partition_id); > + > void *hv_hypercall_pg; > EXPORT_SYMBOL_GPL(hv_hypercall_pg); > > @@ -345,6 +348,26 @@ static struct syscore_ops hv_syscore_ops = { > .resume = hv_resume, > }; > > +void __init hv_get_partition_id(void) > +{ > + struct hv_get_partition_id *output_page; > + int status; > + unsigne...
2020 Sep 15
0
[PATCH RFC v1 08/18] x86/hyperv: handling hypercall page setup for root
...>> > @@ -448,8 +449,29 @@ void __init hyperv_init(void) >> >> > >> >> > rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); >> >> > hypercall_msr.enable = 1; >> >> > - hypercall_msr.guest_physical_address = vmalloc_to_pfn(hv_hypercall_pg); >> >> > - wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); >> >> > + >> >> > + if (hv_root_partition) { >> >> > + struct page *pg; >> >> > + void *src, *dst; >> >> > + >> >> > + /* &g...
2020 Sep 15
0
[PATCH RFC v1 06/18] x86/hyperv: allocate output arg pages if required
...ch/x86/include/asm/mshyperv.h > index 2a2cc81beac6..f5c62140f28d 100644 > --- a/arch/x86/include/asm/mshyperv.h > +++ b/arch/x86/include/asm/mshyperv.h > @@ -63,6 +63,7 @@ static inline void hv_disable_stimer0_percpu_irq(int irq) {} > #if IS_ENABLED(CONFIG_HYPERV) > extern void *hv_hypercall_pg; > extern void __percpu **hyperv_pcpu_input_arg; > +extern void __percpu **hyperv_pcpu_output_arg; > > static inline u64 hv_do_hypercall(u64 control, void *input, void *output) > { -- Vitaly
2019 Jul 19
0
[PATCH v3 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...pu, vcpu, gva_n, max_gvas; struct hv_tlb_flush **flush_pcpu; @@ -59,7 +59,7 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus, u64 status = U64_MAX; unsigned long flags; - trace_hyperv_mmu_flush_tlb_others(cpus, info); + trace_hyperv_mmu_flush_tlb_multi(cpus, info); if (!hv_hypercall_pg) goto do_native; @@ -156,7 +156,7 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus, if (!(status & HV_HYPERCALL_RESULT_MASK)) return; do_native: - native_flush_tlb_others(cpus, info); + native_flush_tlb_multi(cpus, info); } static u64 hyperv_flush_tlb_others_ex(con...
2019 Jul 02
0
[PATCH v2 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...pu, vcpu, gva_n, max_gvas; struct hv_tlb_flush **flush_pcpu; @@ -59,7 +59,7 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus, u64 status = U64_MAX; unsigned long flags; - trace_hyperv_mmu_flush_tlb_others(cpus, info); + trace_hyperv_mmu_flush_tlb_multi(cpus, info); if (!hv_hypercall_pg) goto do_native; @@ -69,6 +69,9 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus, local_irq_save(flags); + if (cpumask_test_cpu(smp_processor_id(), cpus)) + flush_tlb_func_local(info); + flush_pcpu = (struct hv_tlb_flush **) this_cpu_ptr(hyperv_pcpu_input_arg);...
2019 Jul 02
2
[PATCH v2 0/9] x86: Concurrent TLB flushes
Currently, local and remote TLB flushes are not performed concurrently, which introduces unnecessary overhead - each INVLPG can take 100s of cycles. This patch-set allows TLB flushes to be run concurrently: first request the remote CPUs to initiate the flush, then run it locally, and finally wait for the remote CPUs to finish their work. In addition, there are various small optimizations to avoid
2019 Jul 19
5
[PATCH v3 0/9] x86: Concurrent TLB flushes
[ Cover-letter is identical to v2, including benchmark results, excluding the change log. ] Currently, local and remote TLB flushes are not performed concurrently, which introduces unnecessary overhead - each INVLPG can take 100s of cycles. This patch-set allows TLB flushes to be run concurrently: first request the remote CPUs to initiate the flush, then run it locally, and finally wait for