search for: local_irq_disable

Displaying 20 results from an estimated 162 matches for "local_irq_disable".

2012 Dec 06
1
Question on local_irq_save/local_irq_retore
Hi, I have some confusion on local_irq_save() and local_irq_restore(). From the definitions, you can see that local_irq_save() calls local_irq_disable(). But why there is no local_irq_enable() in local_irq_restore? #define local_irq_save(x) ({ local_save_flags(x); local_irq_disable(); }) #define local_irq_restore(x) ({ BUILD_BUG_ON(sizeof(x) != sizeof(long)); asm volatile ( "push" __OS " %0 ; popf" __OS...
2011 Nov 23
2
[patch] Initialize xen_vcpu0 before initialize irq_ops
...amp;= ~(_PAGE_PWT | _PAGE_PCD); __supported_pte_mask |= _PAGE_IOMAP; - /* Don''t do the full vcpu_info placement stuff until we have a - possible map and a non-dummy shared_info. */ - per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; local_irq_disable(); early_boot_irqs_disabled = true; -- 1.7.3.4 _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
2007 Apr 18
1
[patch 8/9] Guest page hinting: discarded page list.
...d++; + } + return freed; +} +#endif + /* * Free a 0-order page */ @@ -795,6 +831,16 @@ static void fastcall free_hot_cold_page( struct per_cpu_pages *pcp; unsigned long flags; +#if defined(CONFIG_PAGE_DISCARD_LIST) + if (page_host_discards() && unlikely(PageDiscarded(page))) { + local_irq_disable(); + list_add_tail(&page->lru, + &__get_cpu_var(page_discard_list)); + local_irq_enable(); + return; + } +#endif + arch_free_page(page, 0); if (PageAnon(page)) @@ -2810,6 +2856,10 @@ static int page_alloc_cpu_notify(struct local_irq_disable(); __drain_pages(cpu);...
2007 Apr 18
1
[patch 8/9] Guest page hinting: discarded page list.
...d++; + } + return freed; +} +#endif + /* * Free a 0-order page */ @@ -795,6 +831,16 @@ static void fastcall free_hot_cold_page( struct per_cpu_pages *pcp; unsigned long flags; +#if defined(CONFIG_PAGE_DISCARD_LIST) + if (page_host_discards() && unlikely(PageDiscarded(page))) { + local_irq_disable(); + list_add_tail(&page->lru, + &__get_cpu_var(page_discard_list)); + local_irq_enable(); + return; + } +#endif + arch_free_page(page, 0); if (PageAnon(page)) @@ -2810,6 +2856,10 @@ static int page_alloc_cpu_notify(struct local_irq_disable(); __drain_pages(cpu);...
2007 Apr 18
0
[RFC, PATCH 13/24] i386 Vmi system header
...__volatile__("pushfl ; popl %0":"=g" (x): /* no input */); } while (0) -#define local_irq_restore(x) do { typecheck(unsigned long,x); __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory", "cc"); } while (0) -#define local_irq_disable() __asm__ __volatile__("cli": : :"memory") -#define local_irq_enable() __asm__ __volatile__("sti": : :"memory") -/* used in the idle loop; sti takes one instruction cycle to complete */ -#define safe_halt() __asm__ __volatile__("sti; hlt": : :&quo...
2007 Apr 18
0
[RFC, PATCH 13/24] i386 Vmi system header
...__volatile__("pushfl ; popl %0":"=g" (x): /* no input */); } while (0) -#define local_irq_restore(x) do { typecheck(unsigned long,x); __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory", "cc"); } while (0) -#define local_irq_disable() __asm__ __volatile__("cli": : :"memory") -#define local_irq_enable() __asm__ __volatile__("sti": : :"memory") -/* used in the idle loop; sti takes one instruction cycle to complete */ -#define safe_halt() __asm__ __volatile__("sti; hlt": : :&quo...
2012 Dec 12
7
[PATCH V5] x86/kexec: Change NMI and MCE handling on kexec path
...d as the hardware NMI latch is currently in effect. + * This means that if NMIs become unlatched (e.g. following a + * non-fatal MCE), the LAPIC will force us back here rather than + * wandering back into regular Xen code. */ - if ( cpu == crashing_cpu ) - return 1; - local_irq_disable(); + switch ( current_local_apic_mode() ) + { + u32 apic_id; - kexec_crash_save_cpu(); + case APIC_MODE_X2APIC: + apic_id = apic_rdmsr(APIC_ID); - __stop_this_cpu(); + apic_wrmsr(APIC_ICR, APIC_DM_NMI | APIC_DEST_PHYSICAL | ((u64)apic_id << 32)); +...
2019 Jul 03
2
[PATCH v2 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...truct cpumask *cpus, > args = mcs.args; > args->op.arg2.vcpumask = to_cpumask(args->mask); > > - /* Remove us, and any offline CPUS. */ > + /* Flush locally if needed and remove us */ > + if (cpumask_test_cpu(smp_processor_id(), to_cpumask(args->mask))) { > + local_irq_disable(); > + flush_tlb_func_local(info); I think this isn't the correct function for PV guests. In fact it should be much easier: just don't clear the own cpu from the mask, that's all what's needed. The hypervisor is just fine having the current cpu in the mask and it will do the r...
2019 Jul 03
2
[PATCH v2 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...truct cpumask *cpus, > args = mcs.args; > args->op.arg2.vcpumask = to_cpumask(args->mask); > > - /* Remove us, and any offline CPUS. */ > + /* Flush locally if needed and remove us */ > + if (cpumask_test_cpu(smp_processor_id(), to_cpumask(args->mask))) { > + local_irq_disable(); > + flush_tlb_func_local(info); I think this isn't the correct function for PV guests. In fact it should be much easier: just don't clear the own cpu from the mask, that's all what's needed. The hypervisor is just fine having the current cpu in the mask and it will do the r...
2007 Apr 18
2
[RFC, PATCH 14/24] i386 Vmi reboot fixes
...=============================================================== --- linux-2.6.16-rc5.orig/arch/i386/kernel/process.c 2006-03-08 11:38:06.000000000 -0800 +++ linux-2.6.16-rc5/arch/i386/kernel/process.c 2006-03-08 11:38:09.000000000 -0800 @@ -156,7 +156,7 @@ static inline void play_dead(void) */ local_irq_disable(); while (1) - halt(); + shutdown_halt(); } #else static inline void play_dead(void) Index: linux-2.6.16-rc5/arch/i386/kernel/reboot.c =================================================================== --- linux-2.6.16-rc5.orig/arch/i386/kernel/reboot.c 2006-03-08 11:34:53.000000000 -0800 +...
2007 Apr 18
2
[RFC, PATCH 14/24] i386 Vmi reboot fixes
...=============================================================== --- linux-2.6.16-rc5.orig/arch/i386/kernel/process.c 2006-03-08 11:38:06.000000000 -0800 +++ linux-2.6.16-rc5/arch/i386/kernel/process.c 2006-03-08 11:38:09.000000000 -0800 @@ -156,7 +156,7 @@ static inline void play_dead(void) */ local_irq_disable(); while (1) - halt(); + shutdown_halt(); } #else static inline void play_dead(void) Index: linux-2.6.16-rc5/arch/i386/kernel/reboot.c =================================================================== --- linux-2.6.16-rc5.orig/arch/i386/kernel/reboot.c 2006-03-08 11:34:53.000000000 -0800 +...
2019 Jul 02
0
[PATCH v2 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...mon case in which only + * a local TLB flush is needed. Optimize this use-case by calling + * flush_tlb_func_local() directly in this case. + */ + if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids) { + flush_tlb_multi(mm_cpumask(mm), info); + } else { lockdep_assert_irqs_enabled(); local_irq_disable(); flush_tlb_func_local(info); local_irq_enable(); } - if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids) - flush_tlb_others(mm_cpumask(mm), info); - put_flush_tlb_info(); put_cpu(); } @@ -890,16 +917,20 @@ void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch) {...
2019 Jul 02
2
[PATCH v2 0/9] x86: Concurrent TLB flushes
Currently, local and remote TLB flushes are not performed concurrently, which introduces unnecessary overhead - each INVLPG can take 100s of cycles. This patch-set allows TLB flushes to be run concurrently: first request the remote CPUs to initiate the flush, then run it locally, and finally wait for the remote CPUs to finish their work. In addition, there are various small optimizations to avoid
2019 Jul 03
1
[Xen-devel] [PATCH v2 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...s = mcs.args; >>> args->op.arg2.vcpumask = to_cpumask(args->mask); >>> - /* Remove us, and any offline CPUS. */ >>> + /* Flush locally if needed and remove us */ >>> + if (cpumask_test_cpu(smp_processor_id(), to_cpumask(args->mask))) { >>> + local_irq_disable(); >>> + flush_tlb_func_local(info); >> I think this isn't the correct function for PV guests. >> >> In fact it should be much easier: just don't clear the own cpu from the >> mask, that's all what's needed. The hypervisor is just fine having the &g...
2006 Sep 29
1
[PATCH] hvm: clear vmxe if vmxoff
hvm: clear vmxe if vmxoff The current Xen code keeps X86_CR4_VMXE set even if VMXON has not been executed. The stop_vmx() code assumes that it is possible to call VMXOFF if X86_CR4_VMXE is set which is not always true. Calling VMXOFF without VMXON results in an illegal opcode trap, and to avoid this condition this patch makes sure that X86_CR4_VMXE is only set when VMXON has been called.
2008 May 08
0
[PATCH] minios: fix and enforce block_domain atomicity
...008 +0100 @@ -201,10 +201,12 @@ void block_domain(s_time_t until) { struct timeval tv; gettimeofday(&tv, NULL); + ASSERT(irqs_disabled()); if(monotonic_clock() < until) { HYPERVISOR_set_timer_op(until); HYPERVISOR_sched_op(SCHEDOP_block, 0); + local_irq_disable(); } } _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
2012 Mar 27
0
[PATCH 2/4] x86/hpet: replace disabling of legacy broadcast
...ted out there: remove CPUs from the online map in __stop_this_cpu() (and hence doing so in stop_this_cpu() is no longer needed). Signed-off-by: Jan Beulich <jbeulich@suse.com> --- a/xen/arch/x86/crash.c +++ b/xen/arch/x86/crash.c @@ -61,9 +61,6 @@ static void nmi_shootdown_cpus(void) local_irq_disable(); - if ( hpet_broadcast_is_available() ) - hpet_disable_legacy_broadcast(); - crashing_cpu = smp_processor_id(); local_irq_count(crashing_cpu) = 0; --- a/xen/arch/x86/hpet.c +++ b/xen/arch/x86/hpet.c @@ -783,7 +783,11 @@ void hpet_disable(void) u32 id; if ( !hpe...
2012 Nov 14
0
[PATCH] x86/nmi: self_nmi() should not unconditionally enable interrupts
...86/nmi.c @@ -482,13 +482,14 @@ void nmi_watchdog_tick(struct cpu_user_r * 8-3 and 8-4 in IA32 Reference Manual Volume 3. We send the IPI to * our own APIC ID explicitly which is valid. */ -void self_nmi(void) +void self_nmi(void) { + unsigned long flags; u32 id = get_apic_id(); - local_irq_disable(); + local_irq_save(flags); apic_wait_icr_idle(); apic_icr_write(APIC_DM_NMI | APIC_DEST_PHYSICAL, id); - local_irq_enable(); + local_irq_restore(flags); } static void do_nmi_trigger(unsigned char key)
2013 Feb 10
0
[PATCH 16/16] xen idle: make xen-specific macro xen-specific
...void default_idle(void) EXPORT_SYMBOL(default_idle); #endif -bool set_pm_idle_to_default(void) +#ifdef CONFIG_XEN +bool xen_set_default_idle(void) { bool ret = !!x86_idle; @@ -398,6 +399,7 @@ bool set_pm_idle_to_default(void) return ret; } +#endif void stop_this_cpu(void *dummy) { local_irq_disable(); diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 8971a26..2b73b5c 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -561,7 +561,7 @@ void __init xen_arch_setup(void) #endif disable_cpuidle(); disable_cpufreq(); - WARN_ON(set_pm_idle_to_default()); + WARN_ON(xen_s...
2013 Nov 07
0
Xen PV ABI on FPU doesn't match with pvops kernel FPU code, reducing to a serious memory data damage
...<<<< Here it might open a schedule window /* * does a slab alloc which can sleep */ if (init_fpu(tsk)) { /* * ran out of memory! */ do_group_exit(SIGKILL); return; } local_irq_disable(); <<<< Here it closes } __thread_fpu_begin(tsk); /* * Paranoid restore. send a SIGSEGV if we fail to restore the state. */ if (unlikely(restore_fpu_checking(tsk))) { drop_init_fpu(tsk); force_sig(SIGSEGV, tsk); re...