Displaying 20 results from an estimated 61 matches for "540,6".
Did you mean:
240,6
2020 Jun 23
1
[RESEND PATCH 2/3] nouveau: fix mixed normal and device private page migration
...++
> 1 file changed, 8 insertions(+)
>
> diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
> index cc9993837508..f6a806ba3caa 100644
> --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
> +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
> @@ -540,6 +540,12 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
> if (!(src & MIGRATE_PFN_MIGRATE))
> goto out;
>
> + if (spage && is_device_private_page(spage)) {
> + paddr = nouveau_dmem_page_addr(spage);
> + *dma_addr = DMA_MAPPI...
2009 Mar 26
1
[PATCH 3/5] lguest: avoid accidental recycling of pgdir pages
...etable(cpu, args->arg1);
+ break;
default:
/* It should be an architecture-specific hypercall. */
if (lguest_arch_do_hcall(cpu, args))
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 65f0b8a..c3bdf0b 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -540,6 +551,13 @@ static void lguest_flush_tlb_kernel(void)
lazy_hcall(LHCALL_FLUSH_TLB, 1, 0, 0);
}
+/* This routine is called when a process exits, and we're throwing away the
+ * page table. */
+static void lguest_pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+ lazy_hcall(LHCALL_INVALIDATE_P...
2009 Mar 26
1
[PATCH 3/5] lguest: avoid accidental recycling of pgdir pages
...etable(cpu, args->arg1);
+ break;
default:
/* It should be an architecture-specific hypercall. */
if (lguest_arch_do_hcall(cpu, args))
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 65f0b8a..c3bdf0b 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -540,6 +551,13 @@ static void lguest_flush_tlb_kernel(void)
lazy_hcall(LHCALL_FLUSH_TLB, 1, 0, 0);
}
+/* This routine is called when a process exits, and we're throwing away the
+ * page table. */
+static void lguest_pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+ lazy_hcall(LHCALL_INVALIDATE_P...
2020 Jun 22
0
[RESEND PATCH 2/3] nouveau: fix mixed normal and device private page migration
...rm/nouveau/nouveau_dmem.c | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index cc9993837508..f6a806ba3caa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -540,6 +540,12 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
if (!(src & MIGRATE_PFN_MIGRATE))
goto out;
+ if (spage && is_device_private_page(spage)) {
+ paddr = nouveau_dmem_page_addr(spage);
+ *dma_addr = DMA_MAPPING_ERROR;
+ goto done;
+ }
+
d...
2020 Apr 28
0
[PATCH v3 73/75] x86/sev-es: Support CPU offline/online
..._HOTPLUG_CPU */
+
+#ifdef CONFIG_SMP
+static void __init sev_es_setup_play_dead(void)
+{
+ smp_ops.play_dead = sev_es_play_dead;
+}
+#else
+static inline void sev_es_setup_play_dead(void) { }
+#endif
+
static void __init sev_es_alloc_runtime_data(int cpu)
{
struct sev_es_runtime_data *data;
@@ -540,6 +596,8 @@ void __init sev_es_init_vc_handling(void)
sev_es_setup_vc_stack(cpu);
}
+ sev_es_setup_play_dead();
+
xa_init_flags(&sev_es_cpuid_cache, XA_FLAGS_LOCK_IRQ);
sev_es_cpuid_cache_initialized = true;
--
2.17.1
2020 Apr 28
0
[PATCH v3 38/75] x86/sev-es: Add SEV-ES Feature Detection
...k - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
bool force_dma_unencrypted(struct device *dev)
{
diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
index e2b0e2ac07bb..68d75379e06a 100644
--- a/arch/x86/mm/mem_encrypt_identity.c
+++ b/arch/x86/mm/mem_encrypt_identity.c
@@ -540,6 +540,9 @@ void __init sme_enable(struct boot_params *bp)
if (!(msr & MSR_AMD64_SEV_ENABLED))
return;
+ /* Save SEV_STATUS to avoid reading MSR again */
+ sev_status = msr;
+
/* SEV state cannot be controlled by a command line option */
sme_me_mask = me_mask;
sev_enabled =...
2020 Jul 24
0
[PATCH v5 38/75] x86/sev-es: Add SEV-ES Feature Detection
...);
}
/* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
index e2b0e2ac07bb..68d75379e06a 100644
--- a/arch/x86/mm/mem_encrypt_identity.c
+++ b/arch/x86/mm/mem_encrypt_identity.c
@@ -540,6 +540,9 @@ void __init sme_enable(struct boot_params *bp)
if (!(msr & MSR_AMD64_SEV_ENABLED))
return;
+ /* Save SEV_STATUS to avoid reading MSR again */
+ sev_status = msr;
+
/* SEV state cannot be controlled by a command line option */
sme_me_mask = me_mask;
sev_enabled =...
2020 Sep 07
0
[PATCH v7 36/72] x86/sev-es: Add SEV-ES Feature Detection
...D;
}
/* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
index e2b0e2ac07bb..68d75379e06a 100644
--- a/arch/x86/mm/mem_encrypt_identity.c
+++ b/arch/x86/mm/mem_encrypt_identity.c
@@ -540,6 +540,9 @@ void __init sme_enable(struct boot_params *bp)
if (!(msr & MSR_AMD64_SEV_ENABLED))
return;
+ /* Save SEV_STATUS to avoid reading MSR again */
+ sev_status = msr;
+
/* SEV state cannot be controlled by a command line option */
sme_me_mask = me_mask;
sev_enabled =...
2020 Aug 24
0
[PATCH v6 39/76] x86/sev-es: Add SEV-ES Feature Detection
...);
}
/* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
index e2b0e2ac07bb..68d75379e06a 100644
--- a/arch/x86/mm/mem_encrypt_identity.c
+++ b/arch/x86/mm/mem_encrypt_identity.c
@@ -540,6 +540,9 @@ void __init sme_enable(struct boot_params *bp)
if (!(msr & MSR_AMD64_SEV_ENABLED))
return;
+ /* Save SEV_STATUS to avoid reading MSR again */
+ sev_status = msr;
+
/* SEV state cannot be controlled by a command line option */
sme_me_mask = me_mask;
sev_enabled =...
2020 Jun 22
7
[RESEND PATCH 0/3] nouveau: fixes for SVM
These are based on 5.8.0-rc2 and intended for Ben Skeggs' nouveau tree.
I believe the changes can be queued for 5.8-rcX after being reviewed.
These were part of a larger series but I'm resending them separately as
suggested by Jason Gunthorpe.
https://lore.kernel.org/linux-mm/20200619215649.32297-1-rcampbell at nvidia.com/
Note that in order to exercise/test patch 2 here, you will need a
2008 Mar 23
1
[PATCH] allow to change the block size used to handle sparse files
...rintf(F," -n, --dry-run perform a trial run with no changes made\n");
rprintf(F," -W, --whole-file copy files whole (without delta-xfer algorithm)\n");
rprintf(F," -x, --one-file-system don't cross filesystem boundaries\n");
@@ -540,6 +542,7 @@ static struct poptOption long_options[] = {
{"sparse", 'S', POPT_ARG_VAL, &sparse_files, 1, 0, 0 },
{"no-sparse", 0, POPT_ARG_VAL, &sparse_files, 0, 0, 0 },
{"no-S", 0, POPT_ARG_VAL, &spars...
2020 Feb 07
0
[RFC PATCH v7 65/78] KVM: introspection: add KVMI_VCPU_GET_XSAVE
...gt;vcpu, &rpl, &rpl_size);
+
+ err = kvmi_msg_vcpu_reply(job, msg, ec, rpl, rpl_size);
+ kvmi_msg_free(rpl);
+ return err;
+}
+
/*
* These commands are executed on the vCPU thread. The receiving thread
* passes the messages using a newly allocated 'struct kvmi_vcpu_cmd_job'
@@ -540,6 +556,7 @@ static int(*const msg_vcpu[])(const struct kvmi_vcpu_cmd_job *,
[KVMI_VCPU_GET_CPUID] = handle_get_cpuid,
[KVMI_VCPU_GET_INFO] = handle_get_vcpu_info,
[KVMI_VCPU_GET_REGISTERS] = handle_get_registers,
+ [KVMI_VCPU_GET_XSAVE] = handle_vcpu_get_xsave,
[KVM...
2018 Sep 06
2
[PATCH net-next 11/11] vhost_net: batch submitting XDP buffers to underlayer sockets
...struct vhost_virtqueue *vq = &nvq->vq;
> int ret;
>
> - ret = vhost_net_tx_get_vq_desc(net, nvq, out, in, busyloop_intr);
> + ret = vhost_net_tx_get_vq_desc(net, nvq, out, in, msg, busyloop_intr);
>
> if (ret < 0 || ret == vq->num)
> return ret;
> @@ -540,6 +574,83 @@ static bool tx_can_batch(struct vhost_virtqueue *vq, size_t total_len)
> !vhost_vq_avail_empty(vq->dev, vq);
> }
>
> +#define VHOST_NET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
I wonder whether NET_IP_ALIGN make sense for XDP.
> +
> +static int vhost_net_b...
2018 Sep 06
2
[PATCH net-next 11/11] vhost_net: batch submitting XDP buffers to underlayer sockets
...struct vhost_virtqueue *vq = &nvq->vq;
> int ret;
>
> - ret = vhost_net_tx_get_vq_desc(net, nvq, out, in, busyloop_intr);
> + ret = vhost_net_tx_get_vq_desc(net, nvq, out, in, msg, busyloop_intr);
>
> if (ret < 0 || ret == vq->num)
> return ret;
> @@ -540,6 +574,83 @@ static bool tx_can_batch(struct vhost_virtqueue *vq, size_t total_len)
> !vhost_vq_avail_empty(vq->dev, vq);
> }
>
> +#define VHOST_NET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
I wonder whether NET_IP_ALIGN make sense for XDP.
> +
> +static int vhost_net_b...
2023 Jul 07
0
[PATCH drm-next v6 02/13] drm: manager to keep track of GPUs VA mappings
...(&drm_gem_object.resv).
> + * (&drm_gem_object.resv) or a custom lock if one is provided.
> */
> struct {
> struct list_head list;
> +
> +#ifdef CONFIG_LOCKDEP
> + struct lockdep_map *lock_dep_map;
> +#endif
> } gpuva;
>
> /**
> @@ -540,6 +544,26 @@ unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru,
>
> int drm_gem_evict(struct drm_gem_object *obj);
>
> +#ifdef CONFIG_LOCKDEP
> +/*
> + * drm_gem_gpuva_set_lock() - Set the lock protecting accesses to the gpuva list.
> + * @obj: the &drm_gem_obj...
2000 Oct 24
2
feature request & patch submit: chroot(2) in sshd
...temptypasswords", sEmptyPasswd },
{ "uselogin", sUseLogin },
+ { "usechroot", sUseChroot },
+ { "chrootgroup", sChrootGroup },
{ "randomseed", sRandomSeedFile },
{ "keepalive", sKeepAlives },
{ "allowusers", sAllowUsers },
@@ -540,6 +549,14 @@
case sUseLogin:
intptr = &options->use_login;
goto parse_flag;
+
+ case sUseChroot:
+ intptr = &options->use_chroot;
+ goto parse_flag;
+
+ case sChrootGroup:
+ intptr = &options->chroot_group;
+ goto parse_int;
case sGatewayPorts:
int...
2018 Sep 06
0
[PATCH net-next 11/11] vhost_net: batch submitting XDP buffers to underlayer sockets
...nt get_tx_bufs(struct vhost_net *net,
struct vhost_virtqueue *vq = &nvq->vq;
int ret;
- ret = vhost_net_tx_get_vq_desc(net, nvq, out, in, busyloop_intr);
+ ret = vhost_net_tx_get_vq_desc(net, nvq, out, in, msg, busyloop_intr);
if (ret < 0 || ret == vq->num)
return ret;
@@ -540,6 +574,83 @@ static bool tx_can_batch(struct vhost_virtqueue *vq, size_t total_len)
!vhost_vq_avail_empty(vq->dev, vq);
}
+#define VHOST_NET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
+
+static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
+ struct iov_iter *from)
+{
+ s...
2018 Sep 12
0
[PATCH net-next V2 11/11] vhost_net: batch submitting XDP buffers to underlayer sockets
...nt get_tx_bufs(struct vhost_net *net,
struct vhost_virtqueue *vq = &nvq->vq;
int ret;
- ret = vhost_net_tx_get_vq_desc(net, nvq, out, in, busyloop_intr);
+ ret = vhost_net_tx_get_vq_desc(net, nvq, out, in, msg, busyloop_intr);
if (ret < 0 || ret == vq->num)
return ret;
@@ -540,6 +577,80 @@ static bool tx_can_batch(struct vhost_virtqueue *vq, size_t total_len)
!vhost_vq_avail_empty(vq->dev, vq);
}
+#define VHOST_NET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
+
+static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
+ struct iov_iter *from)
+{
+ s...
2018 Sep 07
0
[PATCH net-next 11/11] vhost_net: batch submitting XDP buffers to underlayer sockets
...->vq;
>> int ret;
>>
>> - ret = vhost_net_tx_get_vq_desc(net, nvq, out, in, busyloop_intr);
>> + ret = vhost_net_tx_get_vq_desc(net, nvq, out, in, msg, busyloop_intr);
>>
>> if (ret < 0 || ret == vq->num)
>> return ret;
>> @@ -540,6 +574,83 @@ static bool tx_can_batch(struct vhost_virtqueue *vq, size_t total_len)
>> !vhost_vq_avail_empty(vq->dev, vq);
>> }
>>
>> +#define VHOST_NET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
> I wonder whether NET_IP_ALIGN make sense for XDP.
XDP is not t...
2014 Apr 30
1
[PATCH 1/2] nouveau: remove cb_dirty, it's never used
...rned was not the real resource's data, this needs to transfer the
* data back to the resource.
*
- * Also marks vbo/cb dirty if the buffer's binding
+ * Also marks vbo dirty based on the buffer's binding
*/
static void
nouveau_buffer_transfer_unmap(struct pipe_context *pipe,
@@ -540,8 +540,6 @@ nouveau_buffer_transfer_unmap(struct pipe_context *pipe,
/* make sure we invalidate dedicated caches */
if (bind & (PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER))
nv->vbo_dirty = TRUE;
- if (bind & (PIPE_BIND_CONSTANT_BUFFER))
-...