Displaying 20 results from an estimated 43 matches for "106,18".
2016 Jun 03
2
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...e __rw_yield(x) barrier()
> +#define SHARED_PROCESSOR 0
> +#endif
> +
> +#ifdef CONFIG_QUEUED_SPINLOCKS
> +#include <asm/qspinlock.h>
> +#else
> ?static __always_inline int arch_spin_value_unlocked(arch_spinlock_t
> lock)
> ?{
> ? return lock.slock == 0;
> @@ -106,18 +120,6 @@ static inline int
> arch_spin_trylock(arch_spinlock_t *lock)
> ? * held.??Conveniently, we have a word in the paca that holds this
> ? * value.
> ? */
> -
> -#if defined(CONFIG_PPC_SPLPAR)
> -/* We only yield to the hypervisor if we are in shared processor
> mod...
2016 Jun 03
2
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...e __rw_yield(x) barrier()
> +#define SHARED_PROCESSOR 0
> +#endif
> +
> +#ifdef CONFIG_QUEUED_SPINLOCKS
> +#include <asm/qspinlock.h>
> +#else
> ?static __always_inline int arch_spin_value_unlocked(arch_spinlock_t
> lock)
> ?{
> ? return lock.slock == 0;
> @@ -106,18 +120,6 @@ static inline int
> arch_spin_trylock(arch_spinlock_t *lock)
> ? * held.??Conveniently, we have a word in the paca that holds this
> ? * value.
> ? */
> -
> -#if defined(CONFIG_PPC_SPLPAR)
> -/* We only yield to the hypervisor if we are in shared processor
> mod...
2018 Sep 14
0
[patch 02/11] x86/time: Implement clocksource_arch_init()
...S_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_DEBUG_VIRTUAL
--- a/arch/x86/kernel/time.c
+++ b/arch/x86/kernel/time.c
@@ -10,6 +10,7 @@
*
*/
+#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
@@ -105,3 +106,18 @@ void __init time_init(void)
{
late_time_init = x86_late_time_init;
}
+
+/*
+ * Sanity check the vdso related archdata content.
+ */
+void clocksource_arch_init(struct clocksource *cs)
+{
+ if (cs->archdata.vclock_mode == VCLOCK_NONE)
+ return;
+
+ if (cs->archdata.vclock_mode >=...
2018 Sep 14
1
[patch 02/11] x86/time: Implement clocksource_arch_init()
...VIRTUAL
> --- a/arch/x86/kernel/time.c
> +++ b/arch/x86/kernel/time.c
> @@ -10,6 +10,7 @@
> *
> */
>
> +#include <linux/clocksource.h>
> #include <linux/clockchips.h>
> #include <linux/interrupt.h>
> #include <linux/irq.h>
> @@ -105,3 +106,18 @@ void __init time_init(void)
> {
> late_time_init = x86_late_time_init;
> }
> +
> +/*
> + * Sanity check the vdso related archdata content.
> + */
> +void clocksource_arch_init(struct clocksource *cs)
> +{
> + if (cs->archdata.vclock_mode == VCLOCK_NONE)
&g...
2018 Sep 17
0
[patch V2 02/11] x86/time: Implement clocksource_arch_init()
...S_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_DEBUG_VIRTUAL
--- a/arch/x86/kernel/time.c
+++ b/arch/x86/kernel/time.c
@@ -10,6 +10,7 @@
*
*/
+#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
@@ -105,3 +106,18 @@ void __init time_init(void)
{
late_time_init = x86_late_time_init;
}
+
+/*
+ * Sanity check the vdso related archdata content.
+ */
+void clocksource_arch_init(struct clocksource *cs)
+{
+ if (cs->archdata.vclock_mode == VCLOCK_NONE)
+ return;
+
+ if (cs->archdata.vclock_mode >...
2005 Apr 15
0
[PATCH] add cscope support to xen Makefile
...ist.
Signed-off-by: Chris Wright <chrisw@osdl.org>
---
xen/Makefile | 19 +++++++++++--------
1 files changed, 11 insertions(+), 8 deletions(-)
===== xen/Makefile 1.78 vs edited =====
--- 1.78/xen/Makefile 2005-03-28 13:51:31 -08:00
+++ edited/xen/Makefile 2005-04-14 14:25:33 -07:00
@@ -106,15 +106,18 @@ include/asm-$(TARGET_ARCH)/asm-offsets.h
.PHONY: default debug install dist clean delete-unfresh-files TAGS tags
SUBDIRS = arch/$(TARGET_ARCH) common drivers
+define all_sources
+ ( find include/asm-$(TARGET_ARCH) -name SCCS -prune -o -name ''*.h'' -print; \
+...
2016 Jun 02
0
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
.../* SPLPAR */
+#define __spin_yield(x) barrier()
+#define __rw_yield(x) barrier()
+#define SHARED_PROCESSOR 0
+#endif
+
+#ifdef CONFIG_QUEUED_SPINLOCKS
+#include <asm/qspinlock.h>
+#else
static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
{
return lock.slock == 0;
@@ -106,18 +120,6 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
* held. Conveniently, we have a word in the paca that holds this
* value.
*/
-
-#if defined(CONFIG_PPC_SPLPAR)
-/* We only yield to the hypervisor if we are in shared processor mode */
-#define SHARED_PROCESSOR (lppaca_s...
2016 Jun 02
0
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
.../* SPLPAR */
+#define __spin_yield(x) barrier()
+#define __rw_yield(x) barrier()
+#define SHARED_PROCESSOR 0
+#endif
+
+#ifdef CONFIG_QUEUED_SPINLOCKS
+#include <asm/qspinlock.h>
+#else
static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
{
return lock.slock == 0;
@@ -106,18 +120,6 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
* held. Conveniently, we have a word in the paca that holds this
* value.
*/
-
-#if defined(CONFIG_PPC_SPLPAR)
-/* We only yield to the hypervisor if we are in shared processor mode */
-#define SHARED_PROCESSOR (lppaca_s...
2016 Jun 03
0
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...gt; > +#endif
> > +
> > +#ifdef CONFIG_QUEUED_SPINLOCKS
> > +#include <asm/qspinlock.h>
> > +#else
> > ?static __always_inline int
> > arch_spin_value_unlocked(arch_spinlock_t
> > lock)
> > ?{
> > ? return lock.slock == 0;
> > @@ -106,18 +120,6 @@ static inline int
> > arch_spin_trylock(arch_spinlock_t *lock)
> > ? * held.??Conveniently, we have a word in the paca that holds this
> > ? * value.
> > ? */
> > -
> > -#if defined(CONFIG_PPC_SPLPAR)
> > -/* We only yield to the hypervisor if...
2007 May 21
2
changing definition of paravirt_ops.iret
...VISOR_iret],
+ .iret = xen_iret,
.irq_enable_sysexit = NULL, /* never called */
.load_tr_desc = paravirt_nop,
diff -r e13ec2ed67aa include/asm-i386/irqflags.h
--- a/include/asm-i386/irqflags.h Mon May 21 16:56:20 2007 +0100
+++ b/include/asm-i386/irqflags.h Mon May 21 17:21:37 2007 +0100
@@ -106,7 +106,18 @@ static inline unsigned long __raw_local_
#define DISABLE_INTERRUPTS(clobbers) cli
#define ENABLE_INTERRUPTS(clobbers) sti
#define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
-#define INTERRUPT_RETURN iret
+#define INTERRUPT_RETURN \
+1: popl %fs; \
+2: iret; \
+.pushsection...
2007 May 21
2
changing definition of paravirt_ops.iret
...VISOR_iret],
+ .iret = xen_iret,
.irq_enable_sysexit = NULL, /* never called */
.load_tr_desc = paravirt_nop,
diff -r e13ec2ed67aa include/asm-i386/irqflags.h
--- a/include/asm-i386/irqflags.h Mon May 21 16:56:20 2007 +0100
+++ b/include/asm-i386/irqflags.h Mon May 21 17:21:37 2007 +0100
@@ -106,7 +106,18 @@ static inline unsigned long __raw_local_
#define DISABLE_INTERRUPTS(clobbers) cli
#define ENABLE_INTERRUPTS(clobbers) sti
#define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
-#define INTERRUPT_RETURN iret
+#define INTERRUPT_RETURN \
+1: popl %fs; \
+2: iret; \
+.pushsection...
2016 Apr 28
0
[PATCH] powerpc: enable qspinlock and its virtualization support
...barrier()
+#define __spin_wake_cpu(x) barrier()
+#define __rw_yield(x) barrier()
+#define SHARED_PROCESSOR 0
+#endif
+
+#ifdef CONFIG_QUEUED_SPINLOCKS
+#include <asm/qspinlock.h>
+#else
static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
{
return lock.slock == 0;
@@ -106,18 +124,6 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
* held. Conveniently, we have a word in the paca that holds this
* value.
*/
-
-#if defined(CONFIG_PPC_SPLPAR)
-/* We only yield to the hypervisor if we are in shared processor mode */
-#define SHARED_PROCESSOR (lppaca_s...
2016 Apr 28
0
[PATCH] powerpc: enable qspinlock and its virtualization support
...barrier()
+#define __spin_wake_cpu(x) barrier()
+#define __rw_yield(x) barrier()
+#define SHARED_PROCESSOR 0
+#endif
+
+#ifdef CONFIG_QUEUED_SPINLOCKS
+#include <asm/qspinlock.h>
+#else
static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
{
return lock.slock == 0;
@@ -106,18 +124,6 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
* held. Conveniently, we have a word in the paca that holds this
* value.
*/
-
-#if defined(CONFIG_PPC_SPLPAR)
-/* We only yield to the hypervisor if we are in shared processor mode */
-#define SHARED_PROCESSOR (lppaca_s...
2020 Jul 22
0
Re: [PATCH nbdkit] server: Pass the export name through filter .open calls.
...gt; - if (next (nxdata, readonly) == -1)
> + if (next (nxdata, readonly, exportname) == -1)
> return NULL;
Pre-existing - the log filter should include the exportname somewhere in
its output log. Well, nothing like the present to fix it ;)
> +++ b/filters/retry/retry.c
> @@ -106,16 +106,18 @@ retry_config (nbdkit_next_config *next, void *nxdata,
>
> struct retry_handle {
> int readonly; /* Save original readonly setting. */
> + const char *exportname; /* Client exportname. */
> unsigned reopens;
> bool open;
>...
2016 Jun 03
3
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...gt;>> +#ifdef CONFIG_QUEUED_SPINLOCKS
>>> +#include <asm/qspinlock.h>
>>> +#else
>>> static __always_inline int
>>> arch_spin_value_unlocked(arch_spinlock_t
>>> lock)
>>> {
>>> return lock.slock == 0;
>>> @@ -106,18 +120,6 @@ static inline int
>>> arch_spin_trylock(arch_spinlock_t *lock)
>>> * held. Conveniently, we have a word in the paca that holds this
>>> * value.
>>> */
>>> -
>>> -#if defined(CONFIG_PPC_SPLPAR)
>>> -/* We only yie...
2016 Jun 03
3
[PATCH v5 1/6] qspinlock: powerpc support qspinlock
...gt;>> +#ifdef CONFIG_QUEUED_SPINLOCKS
>>> +#include <asm/qspinlock.h>
>>> +#else
>>> static __always_inline int
>>> arch_spin_value_unlocked(arch_spinlock_t
>>> lock)
>>> {
>>> return lock.slock == 0;
>>> @@ -106,18 +120,6 @@ static inline int
>>> arch_spin_trylock(arch_spinlock_t *lock)
>>> * held. Conveniently, we have a word in the paca that holds this
>>> * value.
>>> */
>>> -
>>> -#if defined(CONFIG_PPC_SPLPAR)
>>> -/* We only yie...
2016 Jun 02
8
[PATCH v5 0/6] powerPC/pSeries use pv-qpsinlock as the default spinlock implemention
From: root <root at ltcalpine2-lp13.aus.stglabs.ibm.com>
change from v4:
BUG FIX. thanks boqun reporting this issue.
struct __qspinlock has different layout in bigendian mahcine.
native_queued_spin_unlock() may write value to a wrong address. now fix it.
change from v3:
a big change in [PATCH v4 4/6] pv-qspinlock: powerpc support pv-qspinlock
no other patch changed.
and the patch
2016 Jun 02
8
[PATCH v5 0/6] powerPC/pSeries use pv-qpsinlock as the default spinlock implemention
From: root <root at ltcalpine2-lp13.aus.stglabs.ibm.com>
change from v4:
BUG FIX. thanks boqun reporting this issue.
struct __qspinlock has different layout in bigendian mahcine.
native_queued_spin_unlock() may write value to a wrong address. now fix it.
change from v3:
a big change in [PATCH v4 4/6] pv-qspinlock: powerpc support pv-qspinlock
no other patch changed.
and the patch
2016 Apr 28
2
[PATCH resend] powerpc: enable qspinlock and its virtualization support
...barrier()
+#define __spin_wake_cpu(x) barrier()
+#define __rw_yield(x) barrier()
+#define SHARED_PROCESSOR 0
+#endif
+
+#ifdef CONFIG_QUEUED_SPINLOCKS
+#include <asm/qspinlock.h>
+#else
static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
{
return lock.slock == 0;
@@ -106,18 +124,6 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
* held. Conveniently, we have a word in the paca that holds this
* value.
*/
-
-#if defined(CONFIG_PPC_SPLPAR)
-/* We only yield to the hypervisor if we are in shared processor mode */
-#define SHARED_PROCESSOR (lppaca_s...
2016 Apr 28
2
[PATCH resend] powerpc: enable qspinlock and its virtualization support
...barrier()
+#define __spin_wake_cpu(x) barrier()
+#define __rw_yield(x) barrier()
+#define SHARED_PROCESSOR 0
+#endif
+
+#ifdef CONFIG_QUEUED_SPINLOCKS
+#include <asm/qspinlock.h>
+#else
static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
{
return lock.slock == 0;
@@ -106,18 +124,6 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
* held. Conveniently, we have a word in the paca that holds this
* value.
*/
-
-#if defined(CONFIG_PPC_SPLPAR)
-/* We only yield to the hypervisor if we are in shared processor mode */
-#define SHARED_PROCESSOR (lppaca_s...