Displaying 20 results from an estimated 44 matches for "numpages".
Did you mean:
num_pages
2019 May 08
2
[PATCH 04/10] s390/mm: force swiotlb for protected virtualization
...H__
> +
> +#ifndef __ASSEMBLY__
> +
> +#define sme_me_mask 0ULL
This is rather ugly, but I understand why it's there
> +
> +static inline bool sme_active(void) { return false; }
> +extern bool sev_active(void);
> +
> +int set_memory_encrypted(unsigned long addr, int numpages);
> +int set_memory_decrypted(unsigned long addr, int numpages);
> +
> +#endif /* __ASSEMBLY__ */
> +
> +#endif /* S390_MEM_ENCRYPT_H__ */
> +
> diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
> index 3e82f66d5c61..7e3cbd15dcfa 100644
> --- a/arch/s390/mm/init.c
&g...
2019 May 08
2
[PATCH 04/10] s390/mm: force swiotlb for protected virtualization
...H__
> +
> +#ifndef __ASSEMBLY__
> +
> +#define sme_me_mask 0ULL
This is rather ugly, but I understand why it's there
> +
> +static inline bool sme_active(void) { return false; }
> +extern bool sev_active(void);
> +
> +int set_memory_encrypted(unsigned long addr, int numpages);
> +int set_memory_decrypted(unsigned long addr, int numpages);
> +
> +#endif /* __ASSEMBLY__ */
> +
> +#endif /* S390_MEM_ENCRYPT_H__ */
> +
> diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
> index 3e82f66d5c61..7e3cbd15dcfa 100644
> --- a/arch/s390/mm/init.c
&g...
2019 Apr 26
0
[PATCH 04/10] s390/mm: force swiotlb for protected virtualization
...1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef S390_MEM_ENCRYPT_H__
+#define S390_MEM_ENCRYPT_H__
+
+#ifndef __ASSEMBLY__
+
+#define sme_me_mask 0ULL
+
+static inline bool sme_active(void) { return false; }
+extern bool sev_active(void);
+
+int set_memory_encrypted(unsigned long addr, int numpages);
+int set_memory_decrypted(unsigned long addr, int numpages);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* S390_MEM_ENCRYPT_H__ */
+
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 3e82f66d5c61..7e3cbd15dcfa 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -18,6 +18,7 @@
#i...
2019 Jun 06
0
[PATCH v4 1/8] s390/mm: force swiotlb for protected virtualization
...1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef S390_MEM_ENCRYPT_H__
+#define S390_MEM_ENCRYPT_H__
+
+#ifndef __ASSEMBLY__
+
+#define sme_me_mask 0ULL
+
+static inline bool sme_active(void) { return false; }
+extern bool sev_active(void);
+
+int set_memory_encrypted(unsigned long addr, int numpages);
+int set_memory_decrypted(unsigned long addr, int numpages);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* S390_MEM_ENCRYPT_H__ */
+
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 14d1eae9fe43..f0bee6af3960 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -18,6 +18,7 @@
#i...
2019 Jun 12
0
[PATCH v5 1/8] s390/mm: force swiotlb for protected virtualization
...1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef S390_MEM_ENCRYPT_H__
+#define S390_MEM_ENCRYPT_H__
+
+#ifndef __ASSEMBLY__
+
+#define sme_me_mask 0ULL
+
+static inline bool sme_active(void) { return false; }
+extern bool sev_active(void);
+
+int set_memory_encrypted(unsigned long addr, int numpages);
+int set_memory_decrypted(unsigned long addr, int numpages);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* S390_MEM_ENCRYPT_H__ */
+
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 14d1eae9fe43..f0bee6af3960 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -18,6 +18,7 @@
#i...
2019 May 23
0
[PATCH v2 1/8] s390/mm: force swiotlb for protected virtualization
...1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef S390_MEM_ENCRYPT_H__
+#define S390_MEM_ENCRYPT_H__
+
+#ifndef __ASSEMBLY__
+
+#define sme_me_mask 0ULL
+
+static inline bool sme_active(void) { return false; }
+extern bool sev_active(void);
+
+int set_memory_encrypted(unsigned long addr, int numpages);
+int set_memory_decrypted(unsigned long addr, int numpages);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* S390_MEM_ENCRYPT_H__ */
+
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 14d1eae9fe43..f0bee6af3960 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -18,6 +18,7 @@
#i...
2019 May 29
0
[PATCH v3 1/8] s390/mm: force swiotlb for protected virtualization
...1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef S390_MEM_ENCRYPT_H__
+#define S390_MEM_ENCRYPT_H__
+
+#ifndef __ASSEMBLY__
+
+#define sme_me_mask 0ULL
+
+static inline bool sme_active(void) { return false; }
+extern bool sev_active(void);
+
+int set_memory_encrypted(unsigned long addr, int numpages);
+int set_memory_decrypted(unsigned long addr, int numpages);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* S390_MEM_ENCRYPT_H__ */
+
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 14d1eae9fe43..f0bee6af3960 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -18,6 +18,7 @@
#i...
2019 May 09
0
[PATCH 04/10] s390/mm: force swiotlb for protected virtualization
...t; +#define sme_me_mask 0ULL
>
> This is rather ugly, but I understand why it's there
>
Nod.
> > +
> > +static inline bool sme_active(void) { return false; }
> > +extern bool sev_active(void);
> > +
> > +int set_memory_encrypted(unsigned long addr, int numpages);
> > +int set_memory_decrypted(unsigned long addr, int numpages);
> > +
> > +#endif /* __ASSEMBLY__ */
> > +
> > +#endif /* S390_MEM_ENCRYPT_H__ */
> > +
> > diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
> > index 3e82f66d5c61..7e3cbd15dcfa...
2019 May 09
0
[PATCH 04/10] s390/mm: force swiotlb for protected virtualization
...bool sme_active(void) { return false; }
> +extern bool sev_active(void);
> +
I noticed this patch always returns false for sme_active. Is it safe to assume that
whatever fixups are required on x86 to deal with sme do not apply to s390?
> +int set_memory_encrypted(unsigned long addr, int numpages);
> +int set_memory_decrypted(unsigned long addr, int numpages);
> +
> +#endif??? /* __ASSEMBLY__ */
> +
> +#endif??? /* S390_MEM_ENCRYPT_H__ */
> +
> diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
> index 3e82f66d5c61..7e3cbd15dcfa 100644
> --- a/arch/s390/mm/ini...
2009 Sep 09
1
[PATCH] SCSI driver for VMware's virtual HBA - V4.
...+ * - _pad should be 0.
> + */
> +
> +typedef struct PVSCSICmdDescAbortCmd {
> + u64 context;
> + u32 target;
> + u32 _pad;
> +} __packed PVSCSICmdDescAbortCmd;
> +
> +/*
> + * Command descriptor for PVSCSI_CMD_SETUP_RINGS --
> + *
> + * Notes:
> + * - reqRingNumPages and cmpRingNumPages need to be power of two.
> + * - reqRingNumPages and cmpRingNumPages need to be different from 0,
> + * - reqRingNumPages and cmpRingNumPages need to be inferior to
> + * PVSCSI_SETUP_RINGS_MAX_NUM_PAGES.
> + */
> +
> +#define PVSCSI_SETUP_RINGS_MAX_NUM_PAGES...
2009 Sep 09
1
[PATCH] SCSI driver for VMware's virtual HBA - V4.
...+ * - _pad should be 0.
> + */
> +
> +typedef struct PVSCSICmdDescAbortCmd {
> + u64 context;
> + u32 target;
> + u32 _pad;
> +} __packed PVSCSICmdDescAbortCmd;
> +
> +/*
> + * Command descriptor for PVSCSI_CMD_SETUP_RINGS --
> + *
> + * Notes:
> + * - reqRingNumPages and cmpRingNumPages need to be power of two.
> + * - reqRingNumPages and cmpRingNumPages need to be different from 0,
> + * - reqRingNumPages and cmpRingNumPages need to be inferior to
> + * PVSCSI_SETUP_RINGS_MAX_NUM_PAGES.
> + */
> +
> +#define PVSCSI_SETUP_RINGS_MAX_NUM_PAGES...
2019 Apr 09
0
[RFC PATCH 03/12] s390/mm: force swiotlb for protected virtualization
...a-mapping.h
> create mode 100644 arch/s390/include/asm/mem_encrypt.h
(...)
> @@ -126,6 +129,45 @@ void mark_rodata_ro(void)
> pr_info("Write protected read-only-after-init data: %luk\n", size >> 10);
> }
>
> +int set_memory_encrypted(unsigned long addr, int numpages)
> +{
> + /* also called for the swiotlb bounce buffers, make all pages shared */
> + /* TODO: do ultravisor calls */
> + return 0;
> +}
> +EXPORT_SYMBOL_GPL(set_memory_encrypted);
> +
> +int set_memory_decrypted(unsigned long addr, int numpages)
> +{
> + /* also calle...
2018 Feb 08
0
[PATCH v28 3/4] mm/page_poison: add a function to expose page poison val to kernel modules
...b/include/linux/mm.h
index 1c77d88..d95e5d3 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2469,11 +2469,13 @@ extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
extern bool page_poisoning_enabled(void);
extern void kernel_poison_pages(struct page *page, int numpages, int enable);
extern bool page_is_poisoned(struct page *page);
+extern bool page_poison_val_get(u8 *val);
#else
static inline bool page_poisoning_enabled(void) { return false; }
static inline void kernel_poison_pages(struct page *page, int numpages,
int enable) { }
static inline bool pag...
2019 Apr 09
0
[RFC PATCH 03/12] s390/mm: force swiotlb for protected virtualization
...> > (...)
> >
> > > @@ -126,6 +129,45 @@ void mark_rodata_ro(void)
> > > pr_info("Write protected read-only-after-init data: %luk\n", size >> 10);
> > > }
> > >
> > > +int set_memory_encrypted(unsigned long addr, int numpages)
> > > +{
> > > + /* also called for the swiotlb bounce buffers, make all pages shared */
> > > + /* TODO: do ultravisor calls */
> > > + return 0;
> > > +}
> > > +EXPORT_SYMBOL_GPL(set_memory_encrypted);
> > > +
> > > +int set...
2012 Aug 30
0
[PATCH 08/11] vmci_queue_pair.patch: VMCI queue pair implementation.
...size_t size);
+
+/* The Kernel specific component of the struct vmci_queue structure. */
+struct vmci_queue_kern_if {
+ struct page **page;
+ struct page **headerPage;
+ void *va;
+ struct semaphore __mutex;
+ struct semaphore *mutex;
+ bool host;
+ size_t numPages;
+ bool mapped;
+};
+
+/*
+ * This structure is opaque to the clients.
+ */
+struct vmci_qp {
+ struct vmci_handle handle;
+ struct vmci_queue *produceQ;
+ struct vmci_queue *consumeQ;
+ uint64_t produceQSize;
+ uint64_t consumeQSize;
+ uint32_t peer;
+...
2012 Aug 30
0
[PATCH 08/11] vmci_queue_pair.patch: VMCI queue pair implementation.
...size_t size);
+
+/* The Kernel specific component of the struct vmci_queue structure. */
+struct vmci_queue_kern_if {
+ struct page **page;
+ struct page **headerPage;
+ void *va;
+ struct semaphore __mutex;
+ struct semaphore *mutex;
+ bool host;
+ size_t numPages;
+ bool mapped;
+};
+
+/*
+ * This structure is opaque to the clients.
+ */
+struct vmci_qp {
+ struct vmci_handle handle;
+ struct vmci_queue *produceQ;
+ struct vmci_queue *consumeQ;
+ uint64_t produceQSize;
+ uint64_t consumeQSize;
+ uint32_t peer;
+...
2019 Apr 26
33
[PATCH 00/10] s390: virtio: support protected virtualization
Enhanced virtualization protection technology may require the use of
bounce buffers for I/O. While support for this was built into the virtio
core, virtio-ccw wasn't changed accordingly.
Some background on technology (not part of this series) and the
terminology used.
* Protected Virtualization (PV):
Protected Virtualization guarantees, that non-shared memory of a guest
that operates in PV
2019 Apr 26
33
[PATCH 00/10] s390: virtio: support protected virtualization
Enhanced virtualization protection technology may require the use of
bounce buffers for I/O. While support for this was built into the virtio
core, virtio-ccw wasn't changed accordingly.
Some background on technology (not part of this series) and the
terminology used.
* Protected Virtualization (PV):
Protected Virtualization guarantees, that non-shared memory of a guest
that operates in PV
2007 Mar 09
5
memory leak in index build?
...field1 field2 field3].each{|fieldname|
fi.add_field fieldname.to_sym,
:index => :yes, :store => :no, :term_vector => :with_positions
}
i = Ferret::Index::IndexWriter.new(:path=>dir, :create=>true,
:field_infos=>fi)
list=Dir["/usr/share/man/*/#{prefix}*.gz"]
numpages=(ARGV.last||list.size).to_i
list[0...numpages].each{|manfile|
all,name,section=*/\A(.*)\.([^.]+)\Z/.match(File.basename(manfile,
".gz"))
tttt=`man #{section} #{name}`.gsub(/.\b/m, '''')
i << {
:data=>tttt.to_s,
:name=>name,
:field1=>name...
2018 Feb 08
9
[PATCH v28 0/4] Virtio-balloon: support free page reporting
This patch series is separated from the previous "Virtio-balloon
Enhancement" series. The new feature, VIRTIO_BALLOON_F_FREE_PAGE_HINT,
implemented by this series enables the virtio-balloon driver to report
hints of guest free pages to the host. It can be used to accelerate live
migration of VMs. Here is an introduction of this usage:
Live migration needs to transfer the VM's