search for: ____cacheline_aligned

Displaying 20 results from an estimated 32 matches for "____cacheline_aligned".

2015 Apr 02
3
[PATCH 8/9] qspinlock: Generic paravirt support
On Thu, Apr 02, 2015 at 12:28:30PM -0400, Waiman Long wrote: > On 04/01/2015 05:03 PM, Peter Zijlstra wrote: > >On Wed, Apr 01, 2015 at 03:58:58PM -0400, Waiman Long wrote: > >>On 04/01/2015 02:48 PM, Peter Zijlstra wrote: > >>I am sorry that I don't quite get what you mean here. My point is that in > >>the hashing step, a cpu will need to scan an empty
2015 Apr 02
3
[PATCH 8/9] qspinlock: Generic paravirt support
On Thu, Apr 02, 2015 at 12:28:30PM -0400, Waiman Long wrote: > On 04/01/2015 05:03 PM, Peter Zijlstra wrote: > >On Wed, Apr 01, 2015 at 03:58:58PM -0400, Waiman Long wrote: > >>On 04/01/2015 02:48 PM, Peter Zijlstra wrote: > >>I am sorry that I don't quite get what you mean here. My point is that in > >>the hashing step, a cpu will need to scan an empty
2015 Mar 19
4
[PATCH 8/9] qspinlock: Generic paravirt support
...using nr_cpu_ids instead... + */ +#define PV_LOCK_HASH_BITS (2 + NR_CPUS_BITS) + +#if PV_LOCK_HASH_BITS < 6 +#undef PV_LOCK_HASH_BITS +#define PB_LOCK_HASH_BITS 6 +#endif + +#define PV_LOCK_HASH_SIZE (1 << PV_LOCK_HASH_BITS) + +static struct pv_hash_bucket __pv_lock_hash[PV_LOCK_HASH_SIZE] ____cacheline_aligned; + +#define PV_HB_PER_LINE (SMP_CACHE_BYTES / sizeof(struct pv_hash_bucket)) + +static inline u32 hash_align(u32 hash) +{ + return hash & ~(PV_HB_PER_LINE - 1); +} + +static struct qspinlock **pv_hash(struct qspinlock *lock) +{ + u32 hash = hash_ptr(lock, PV_LOCK_HASH_BITS); + struct pv_hash_b...
2015 Mar 19
4
[PATCH 8/9] qspinlock: Generic paravirt support
...using nr_cpu_ids instead... + */ +#define PV_LOCK_HASH_BITS (2 + NR_CPUS_BITS) + +#if PV_LOCK_HASH_BITS < 6 +#undef PV_LOCK_HASH_BITS +#define PB_LOCK_HASH_BITS 6 +#endif + +#define PV_LOCK_HASH_SIZE (1 << PV_LOCK_HASH_BITS) + +static struct pv_hash_bucket __pv_lock_hash[PV_LOCK_HASH_SIZE] ____cacheline_aligned; + +#define PV_HB_PER_LINE (SMP_CACHE_BYTES / sizeof(struct pv_hash_bucket)) + +static inline u32 hash_align(u32 hash) +{ + return hash & ~(PV_HB_PER_LINE - 1); +} + +static struct qspinlock **pv_hash(struct qspinlock *lock) +{ + u32 hash = hash_ptr(lock, PV_LOCK_HASH_BITS); + struct pv_hash_b...
2015 Apr 02
0
[PATCH 8/9] qspinlock: Generic paravirt support
...using nr_cpu_ids instead... + */ +#define PV_LOCK_HASH_BITS (2 + NR_CPUS_BITS) + +#if PV_LOCK_HASH_BITS < 6 +#undef PV_LOCK_HASH_BITS +#define PB_LOCK_HASH_BITS 6 +#endif + +#define PV_LOCK_HASH_SIZE (1 << PV_LOCK_HASH_BITS) + +static struct pv_hash_bucket __pv_lock_hash[PV_LOCK_HASH_SIZE] ____cacheline_aligned; + +#define PV_HB_PER_LINE (SMP_CACHE_BYTES / sizeof(struct pv_hash_bucket)) + +static inline u32 hash_align(u32 hash) +{ + return hash & ~(PV_HB_PER_LINE - 1); +} + +#define for_each_hash_bucket(hb, off, hash) \ + for (hash = hash_align(hash), off = 0, hb = &__pv_lock_hash[hash + off]...
2020 Aug 24
0
[PATCH v6 47/76] x86/dumpstack/64: Add noinstr version of get_stack_info()
...cpu_entry_stack(smp_processor_id()); diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index c49cf594714b..5a85730eb0ca 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c @@ -85,7 +85,7 @@ struct estack_pages estack_pages[CEA_ESTACK_PAGES] ____cacheline_aligned = { EPAGERANGE(VC2), }; -static bool in_exception_stack(unsigned long *stack, struct stack_info *info) +static bool __always_inline in_exception_stack(unsigned long *stack, struct stack_info *info) { unsigned long begin, end, stk = (unsigned long)stack; const struct estack_pages *ep; @@ -...
2020 Apr 28
0
[PATCH v3 45/75] x86/dumpstack/64: Handle #VC exception stacks
...; type <= STACK_TYPE_EXCEPTION_LAST) return exception_stack_names[type - STACK_TYPE_EXCEPTION]; + if (type >= STACK_TYPE_VC && type <= STACK_TYPE_VC_LAST) + return vc_stack_name(type); + return NULL; } @@ -84,6 +88,46 @@ struct estack_pages estack_pages[CEA_ESTACK_PAGES] ____cacheline_aligned = { EPAGERANGE(MCE), }; +static bool in_vc_exception_stack(unsigned long *stack, struct stack_info *info) +{ +#ifdef CONFIG_AMD_MEM_ENCRYPT + unsigned long begin, end, stk = (unsigned long)stack; + struct cea_vmm_exception_stacks *vc_stacks; + struct pt_regs *regs; + enum stack_type type; + in...
2020 Aug 24
0
[PATCH v6 45/76] x86/sev-es: Allocate and Map IST stack for #VC handler
...ACK_VC2 ] = "#VC2", }; const char *stack_type_name(enum stack_type type) { - BUILD_BUG_ON(N_EXCEPTION_STACKS != 4); + BUILD_BUG_ON(N_EXCEPTION_STACKS != 6); if (type == STACK_TYPE_IRQ) return "IRQ"; @@ -79,6 +81,8 @@ struct estack_pages estack_pages[CEA_ESTACK_PAGES] ____cacheline_aligned = { EPAGERANGE(NMI), EPAGERANGE(DB), EPAGERANGE(MCE), + EPAGERANGE(VC), + EPAGERANGE(VC2), }; static bool in_exception_stack(unsigned long *stack, struct stack_info *info) @@ -88,7 +92,7 @@ static bool in_exception_stack(unsigned long *stack, struct stack_info *info) struct pt_regs *reg...
2007 Apr 28
3
[PATCH] i386: introduce voyager smp_ops, fix voyager build
...c +++ b/arch/i386/mach-voyager/voyager_smp.c @@ -28,7 +28,6 @@ #include <asm/pgalloc.h> #include <asm/tlbflush.h> #include <asm/arch_hooks.h> -#include <asm/pda.h> /* TLB state -- visible externally, indexed physically */ DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0 }; @@ -423,7 +422,7 @@ find_smp_config(void) VOYAGER_SUS_IN_CONTROL_PORT); current_thread_info()->cpu = boot_cpu_id; - write_pda(cpu_number, boot_cpu_id); + x86_write_percpu(cpu_number, boot_cpu_id); } /* @@ -436,7 +435,7 @@ smp_store_cpu_info(int id) *c =...
2007 Apr 28
3
[PATCH] i386: introduce voyager smp_ops, fix voyager build
...c +++ b/arch/i386/mach-voyager/voyager_smp.c @@ -28,7 +28,6 @@ #include <asm/pgalloc.h> #include <asm/tlbflush.h> #include <asm/arch_hooks.h> -#include <asm/pda.h> /* TLB state -- visible externally, indexed physically */ DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0 }; @@ -423,7 +422,7 @@ find_smp_config(void) VOYAGER_SUS_IN_CONTROL_PORT); current_thread_info()->cpu = boot_cpu_id; - write_pda(cpu_number, boot_cpu_id); + x86_write_percpu(cpu_number, boot_cpu_id); } /* @@ -436,7 +435,7 @@ smp_store_cpu_info(int id) *c =...
2009 Aug 19
1
[PATCHv4 2/2] vhost_net: a kernel-level virtio server
...s, or timeout. */ + work_func_t handle_kick; + + /* Last available index we saw. */ + u16 last_avail_idx; + + /* Last index we used. */ + u16 last_used_idx; + + /* Outstanding buffers */ + unsigned int inflight; + + /* Is this blocked? */ + bool blocked; + + struct iovec iov[VHOST_NET_MAX_SG]; + +} ____cacheline_aligned; + +struct vhost_dev { + /* Readers use RCU to access memory table pointer. + * Writers use mutex below.*/ + struct vhost_memory *memory; + struct mm_struct *mm; + struct vhost_virtqueue *vqs; + int nvqs; + struct mutex mutex; +}; + +long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue *...
2009 Aug 19
1
[PATCHv4 2/2] vhost_net: a kernel-level virtio server
...s, or timeout. */ + work_func_t handle_kick; + + /* Last available index we saw. */ + u16 last_avail_idx; + + /* Last index we used. */ + u16 last_used_idx; + + /* Outstanding buffers */ + unsigned int inflight; + + /* Is this blocked? */ + bool blocked; + + struct iovec iov[VHOST_NET_MAX_SG]; + +} ____cacheline_aligned; + +struct vhost_dev { + /* Readers use RCU to access memory table pointer. + * Writers use mutex below.*/ + struct vhost_memory *memory; + struct mm_struct *mm; + struct vhost_virtqueue *vqs; + int nvqs; + struct mutex mutex; +}; + +long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue *...
2015 Mar 18
2
[PATCH 8/9] qspinlock: Generic paravirt support
On 03/16/2015 09:16 AM, Peter Zijlstra wrote: > Implement simple paravirt support for the qspinlock. > > Provide a separate (second) version of the spin_lock_slowpath for > paravirt along with a special unlock path. > > The second slowpath is generated by adding a few pv hooks to the > normal slowpath, but where those will compile away for the native > case, they expand
2015 Mar 18
2
[PATCH 8/9] qspinlock: Generic paravirt support
On 03/16/2015 09:16 AM, Peter Zijlstra wrote: > Implement simple paravirt support for the qspinlock. > > Provide a separate (second) version of the spin_lock_slowpath for > paravirt along with a special unlock path. > > The second slowpath is generated by adding a few pv hooks to the > normal slowpath, but where those will compile away for the native > case, they expand
2009 Aug 10
6
[PATCH 2/2] vhost_net: a kernel-level virtio server
...s, or timeout. */ + work_func_t handle_kick; + + /* Last available index we saw. */ + u16 last_avail_idx; + + /* Last index we used. */ + u16 last_used_idx; + + /* Outstanding buffers */ + unsigned int inflight; + + /* Is this blocked? */ + bool blocked; + + struct iovec iov[VHOST_NET_MAX_SG]; + +} ____cacheline_aligned; + +struct vhost_dev { + /* Readers use RCU to access memory table pointer. + * Writers use mutex below.*/ + struct vhost_memory *memory; + struct mm_struct *mm; + struct vhost_virtqueue *vqs; + int nvqs; + struct mutex mutex; +}; + +long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue *...
2009 Aug 10
6
[PATCH 2/2] vhost_net: a kernel-level virtio server
...s, or timeout. */ + work_func_t handle_kick; + + /* Last available index we saw. */ + u16 last_avail_idx; + + /* Last index we used. */ + u16 last_used_idx; + + /* Outstanding buffers */ + unsigned int inflight; + + /* Is this blocked? */ + bool blocked; + + struct iovec iov[VHOST_NET_MAX_SG]; + +} ____cacheline_aligned; + +struct vhost_dev { + /* Readers use RCU to access memory table pointer. + * Writers use mutex below.*/ + struct vhost_memory *memory; + struct mm_struct *mm; + struct vhost_virtqueue *vqs; + int nvqs; + struct mutex mutex; +}; + +long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue *...
2009 Aug 11
2
[PATCHv2 2/2] vhost_net: a kernel-level virtio server
...s, or timeout. */ + work_func_t handle_kick; + + /* Last available index we saw. */ + u16 last_avail_idx; + + /* Last index we used. */ + u16 last_used_idx; + + /* Outstanding buffers */ + unsigned int inflight; + + /* Is this blocked? */ + bool blocked; + + struct iovec iov[VHOST_NET_MAX_SG]; + +} ____cacheline_aligned; + +struct vhost_dev { + /* Readers use RCU to access memory table pointer. + * Writers use mutex below.*/ + struct vhost_memory *memory; + struct mm_struct *mm; + struct vhost_virtqueue *vqs; + int nvqs; + struct mutex mutex; +}; + +long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue *...
2009 Aug 11
2
[PATCHv2 2/2] vhost_net: a kernel-level virtio server
...s, or timeout. */ + work_func_t handle_kick; + + /* Last available index we saw. */ + u16 last_avail_idx; + + /* Last index we used. */ + u16 last_used_idx; + + /* Outstanding buffers */ + unsigned int inflight; + + /* Is this blocked? */ + bool blocked; + + struct iovec iov[VHOST_NET_MAX_SG]; + +} ____cacheline_aligned; + +struct vhost_dev { + /* Readers use RCU to access memory table pointer. + * Writers use mutex below.*/ + struct vhost_memory *memory; + struct mm_struct *mm; + struct vhost_virtqueue *vqs; + int nvqs; + struct mutex mutex; +}; + +long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue *...
2009 Aug 13
1
[PATCHv3 2/2] vhost_net: a kernel-level virtio server
...s, or timeout. */ + work_func_t handle_kick; + + /* Last available index we saw. */ + u16 last_avail_idx; + + /* Last index we used. */ + u16 last_used_idx; + + /* Outstanding buffers */ + unsigned int inflight; + + /* Is this blocked? */ + bool blocked; + + struct iovec iov[VHOST_NET_MAX_SG]; + +} ____cacheline_aligned; + +struct vhost_dev { + /* Readers use RCU to access memory table pointer. + * Writers use mutex below.*/ + struct vhost_memory *memory; + struct mm_struct *mm; + struct vhost_virtqueue *vqs; + int nvqs; + struct mutex mutex; +}; + +long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue *...
2009 Aug 13
1
[PATCHv3 2/2] vhost_net: a kernel-level virtio server
...s, or timeout. */ + work_func_t handle_kick; + + /* Last available index we saw. */ + u16 last_avail_idx; + + /* Last index we used. */ + u16 last_used_idx; + + /* Outstanding buffers */ + unsigned int inflight; + + /* Is this blocked? */ + bool blocked; + + struct iovec iov[VHOST_NET_MAX_SG]; + +} ____cacheline_aligned; + +struct vhost_dev { + /* Readers use RCU to access memory table pointer. + * Writers use mutex below.*/ + struct vhost_memory *memory; + struct mm_struct *mm; + struct vhost_virtqueue *vqs; + int nvqs; + struct mutex mutex; +}; + +long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue *...