Displaying 20 results from an estimated 28 matches for "set_memory_encrypted".
2019 May 08
2
[PATCH 04/10] s390/mm: force swiotlb for protected virtualization
...M_ENCRYPT_H__
> +#define S390_MEM_ENCRYPT_H__
> +
> +#ifndef __ASSEMBLY__
> +
> +#define sme_me_mask 0ULL
This is rather ugly, but I understand why it's there
> +
> +static inline bool sme_active(void) { return false; }
> +extern bool sev_active(void);
> +
> +int set_memory_encrypted(unsigned long addr, int numpages);
> +int set_memory_decrypted(unsigned long addr, int numpages);
> +
> +#endif /* __ASSEMBLY__ */
> +
> +#endif /* S390_MEM_ENCRYPT_H__ */
> +
> diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
> index 3e82f66d5c61..7e3cbd15dcfa 100644...
2019 May 08
2
[PATCH 04/10] s390/mm: force swiotlb for protected virtualization
...M_ENCRYPT_H__
> +#define S390_MEM_ENCRYPT_H__
> +
> +#ifndef __ASSEMBLY__
> +
> +#define sme_me_mask 0ULL
This is rather ugly, but I understand why it's there
> +
> +static inline bool sme_active(void) { return false; }
> +extern bool sev_active(void);
> +
> +int set_memory_encrypted(unsigned long addr, int numpages);
> +int set_memory_decrypted(unsigned long addr, int numpages);
> +
> +#endif /* __ASSEMBLY__ */
> +
> +#endif /* S390_MEM_ENCRYPT_H__ */
> +
> diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
> index 3e82f66d5c61..7e3cbd15dcfa 100644...
2019 Apr 26
2
[PATCH 04/10] s390/mm: force swiotlb for protected virtualization
On Fri, Apr 26, 2019 at 08:32:39PM +0200, Halil Pasic wrote:
> +EXPORT_SYMBOL_GPL(set_memory_encrypted);
> +EXPORT_SYMBOL_GPL(set_memory_decrypted);
> +EXPORT_SYMBOL_GPL(sev_active);
Why do you export these? I know x86 exports those as well, but
it shoudn't be needed there either.
2019 Apr 26
2
[PATCH 04/10] s390/mm: force swiotlb for protected virtualization
On Fri, Apr 26, 2019 at 08:32:39PM +0200, Halil Pasic wrote:
> +EXPORT_SYMBOL_GPL(set_memory_encrypted);
> +EXPORT_SYMBOL_GPL(set_memory_decrypted);
> +EXPORT_SYMBOL_GPL(sev_active);
Why do you export these? I know x86 exports those as well, but
it shoudn't be needed there either.
2019 Apr 26
0
[PATCH 04/10] s390/mm: force swiotlb for protected virtualization
...arch/s390/include/asm/mem_encrypt.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef S390_MEM_ENCRYPT_H__
+#define S390_MEM_ENCRYPT_H__
+
+#ifndef __ASSEMBLY__
+
+#define sme_me_mask 0ULL
+
+static inline bool sme_active(void) { return false; }
+extern bool sev_active(void);
+
+int set_memory_encrypted(unsigned long addr, int numpages);
+int set_memory_decrypted(unsigned long addr, int numpages);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* S390_MEM_ENCRYPT_H__ */
+
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 3e82f66d5c61..7e3cbd15dcfa 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s39...
2019 May 09
0
[PATCH 04/10] s390/mm: force swiotlb for protected virtualization
...gt; +#ifndef __ASSEMBLY__
> > +
> > +#define sme_me_mask 0ULL
>
> This is rather ugly, but I understand why it's there
>
Nod.
> > +
> > +static inline bool sme_active(void) { return false; }
> > +extern bool sev_active(void);
> > +
> > +int set_memory_encrypted(unsigned long addr, int numpages);
> > +int set_memory_decrypted(unsigned long addr, int numpages);
> > +
> > +#endif /* __ASSEMBLY__ */
> > +
> > +#endif /* S390_MEM_ENCRYPT_H__ */
> > +
> > diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
> >...
2019 May 09
0
[PATCH 04/10] s390/mm: force swiotlb for protected virtualization
...e_me_mask??? 0ULL
> +
> +static inline bool sme_active(void) { return false; }
> +extern bool sev_active(void);
> +
I noticed this patch always returns false for sme_active. Is it safe to assume that
whatever fixups are required on x86 to deal with sme do not apply to s390?
> +int set_memory_encrypted(unsigned long addr, int numpages);
> +int set_memory_decrypted(unsigned long addr, int numpages);
> +
> +#endif??? /* __ASSEMBLY__ */
> +
> +#endif??? /* S390_MEM_ENCRYPT_H__ */
> +
> diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
> index 3e82f66d5c61..7e3cbd15dcfa 1...
2019 Apr 09
0
[RFC PATCH 03/12] s390/mm: force swiotlb for protected virtualization
...create mode 100644 arch/s390/include/asm/dma-mapping.h
> create mode 100644 arch/s390/include/asm/mem_encrypt.h
(...)
> @@ -126,6 +129,45 @@ void mark_rodata_ro(void)
> pr_info("Write protected read-only-after-init data: %luk\n", size >> 10);
> }
>
> +int set_memory_encrypted(unsigned long addr, int numpages)
> +{
> + /* also called for the swiotlb bounce buffers, make all pages shared */
> + /* TODO: do ultravisor calls */
> + return 0;
> +}
> +EXPORT_SYMBOL_GPL(set_memory_encrypted);
> +
> +int set_memory_decrypted(unsigned long addr, int numpa...
2019 Apr 29
1
[PATCH 04/10] s390/mm: force swiotlb for protected virtualization
On 29.04.19 15:59, Halil Pasic wrote:
> On Fri, 26 Apr 2019 12:27:11 -0700
> Christoph Hellwig <hch at infradead.org> wrote:
>
>> On Fri, Apr 26, 2019 at 08:32:39PM +0200, Halil Pasic wrote:
>>> +EXPORT_SYMBOL_GPL(set_memory_encrypted);
>>
>>> +EXPORT_SYMBOL_GPL(set_memory_decrypted);
>>
>>> +EXPORT_SYMBOL_GPL(sev_active);
>>
>> Why do you export these? I know x86 exports those as well, but
>> it shoudn't be needed there either.
>>
>
> I export these to be in line...
2019 Jun 06
0
[PATCH v4 1/8] s390/mm: force swiotlb for protected virtualization
...arch/s390/include/asm/mem_encrypt.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef S390_MEM_ENCRYPT_H__
+#define S390_MEM_ENCRYPT_H__
+
+#ifndef __ASSEMBLY__
+
+#define sme_me_mask 0ULL
+
+static inline bool sme_active(void) { return false; }
+extern bool sev_active(void);
+
+int set_memory_encrypted(unsigned long addr, int numpages);
+int set_memory_decrypted(unsigned long addr, int numpages);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* S390_MEM_ENCRYPT_H__ */
+
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 14d1eae9fe43..f0bee6af3960 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s39...
2019 Jun 12
0
[PATCH v5 1/8] s390/mm: force swiotlb for protected virtualization
...arch/s390/include/asm/mem_encrypt.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef S390_MEM_ENCRYPT_H__
+#define S390_MEM_ENCRYPT_H__
+
+#ifndef __ASSEMBLY__
+
+#define sme_me_mask 0ULL
+
+static inline bool sme_active(void) { return false; }
+extern bool sev_active(void);
+
+int set_memory_encrypted(unsigned long addr, int numpages);
+int set_memory_decrypted(unsigned long addr, int numpages);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* S390_MEM_ENCRYPT_H__ */
+
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 14d1eae9fe43..f0bee6af3960 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s39...
2019 May 23
0
[PATCH v2 1/8] s390/mm: force swiotlb for protected virtualization
...arch/s390/include/asm/mem_encrypt.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef S390_MEM_ENCRYPT_H__
+#define S390_MEM_ENCRYPT_H__
+
+#ifndef __ASSEMBLY__
+
+#define sme_me_mask 0ULL
+
+static inline bool sme_active(void) { return false; }
+extern bool sev_active(void);
+
+int set_memory_encrypted(unsigned long addr, int numpages);
+int set_memory_decrypted(unsigned long addr, int numpages);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* S390_MEM_ENCRYPT_H__ */
+
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 14d1eae9fe43..f0bee6af3960 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s39...
2019 May 29
0
[PATCH v3 1/8] s390/mm: force swiotlb for protected virtualization
...arch/s390/include/asm/mem_encrypt.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef S390_MEM_ENCRYPT_H__
+#define S390_MEM_ENCRYPT_H__
+
+#ifndef __ASSEMBLY__
+
+#define sme_me_mask 0ULL
+
+static inline bool sme_active(void) { return false; }
+extern bool sev_active(void);
+
+int set_memory_encrypted(unsigned long addr, int numpages);
+int set_memory_decrypted(unsigned long addr, int numpages);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* S390_MEM_ENCRYPT_H__ */
+
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 14d1eae9fe43..f0bee6af3960 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s39...
2019 Apr 09
0
[RFC PATCH 03/12] s390/mm: force swiotlb for protected virtualization
...ch/s390/include/asm/mem_encrypt.h
> >
> > (...)
> >
> > > @@ -126,6 +129,45 @@ void mark_rodata_ro(void)
> > > pr_info("Write protected read-only-after-init data: %luk\n", size >> 10);
> > > }
> > >
> > > +int set_memory_encrypted(unsigned long addr, int numpages)
> > > +{
> > > + /* also called for the swiotlb bounce buffers, make all pages shared */
> > > + /* TODO: do ultravisor calls */
> > > + return 0;
> > > +}
> > > +EXPORT_SYMBOL_GPL(set_memory_encrypted);
> &g...
2019 Apr 29
0
[PATCH 04/10] s390/mm: force swiotlb for protected virtualization
On Fri, 26 Apr 2019 12:27:11 -0700
Christoph Hellwig <hch at infradead.org> wrote:
> On Fri, Apr 26, 2019 at 08:32:39PM +0200, Halil Pasic wrote:
> > +EXPORT_SYMBOL_GPL(set_memory_encrypted);
>
> > +EXPORT_SYMBOL_GPL(set_memory_decrypted);
>
> > +EXPORT_SYMBOL_GPL(sev_active);
>
> Why do you export these? I know x86 exports those as well, but
> it shoudn't be needed there either.
>
I export these to be in line with the x86 implementation (which...
2019 Apr 26
33
[PATCH 00/10] s390: virtio: support protected virtualization
Enhanced virtualization protection technology may require the use of
bounce buffers for I/O. While support for this was built into the virtio
core, virtio-ccw wasn't changed accordingly.
Some background on technology (not part of this series) and the
terminology used.
* Protected Virtualization (PV):
Protected Virtualization guarantees, that non-shared memory of a guest
that operates in PV
2019 Apr 26
33
[PATCH 00/10] s390: virtio: support protected virtualization
Enhanced virtualization protection technology may require the use of
bounce buffers for I/O. While support for this was built into the virtio
core, virtio-ccw wasn't changed accordingly.
Some background on technology (not part of this series) and the
terminology used.
* Protected Virtualization (PV):
Protected Virtualization guarantees, that non-shared memory of a guest
that operates in PV
2020 Sep 15
0
[PATCH 15/18] dma-mapping: add a new dma_alloc_pages API
...nsigned int page_order = get_order(size);
+ void *vaddr = page_address(page);
+
+ /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
+ if (dma_should_free_from_pool(dev, 0) &&
+ dma_free_from_pool(dev, vaddr, size))
+ return;
+
+ if (force_dma_unencrypted(dev))
+ set_memory_encrypted((unsigned long)vaddr, 1 << page_order);
+
+ dma_free_contiguous(dev, page, size);
+}
+
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
defined(CONFIG_SWIOTLB)
void dma_direct_sync_sg_for_device(struct device *dev,
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
index e71...
2020 Aug 19
0
[PATCH 19/28] dma-mapping: replace DMA_ATTR_NON_CONSISTENT with dma_{alloc, free}_pages
...um dma_data_direction dir)
+{
+ unsigned int page_order = get_order(size);
+
+ /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
+ if (dma_should_free_from_pool(dev, 0) &&
+ dma_free_from_pool(dev, cpu_addr, size))
+ return;
+
+ if (force_dma_unencrypted(dev))
+ set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
+
+ dma_free_contiguous(dev, dma_direct_to_page(dev, dma_addr), size);
+}
+
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
defined(CONFIG_SWIOTLB)
void dma_direct_sync_sg_for_device(struct device *dev,
diff --git a/kernel/dma/mapping.c...
2019 Jun 12
21
[PATCH v5 0/8] s390: virtio: support protected virtualization
Enhanced virtualization protection technology may require the use of
bounce buffers for I/O. While support for this was built into the virtio
core, virtio-ccw wasn't changed accordingly.
Some background on technology (not part of this series) and the
terminology used.
* Protected Virtualization (PV):
Protected Virtualization guarantees, that non-shared memory of a guest
that operates in PV