Displaying 20 results from an estimated 23 matches for "479,9".
Did you mean:
459,9
2012 Sep 11
2
[PATCH 6/8] ns16550: PCI initialization adjustments
...beulich@suse.com>
--- a/xen/drivers/char/ns16550.c
+++ b/xen/drivers/char/ns16550.c
@@ -468,7 +468,6 @@ static int __init check_existence(struct
static int
pci_uart_config (struct ns16550 *uart, int skip_amt, int bar_idx)
{
- uint16_t class;
uint32_t bar, len;
int b, d, f;
@@ -479,9 +478,15 @@ pci_uart_config (struct ns16550 *uart, i
{
for ( f = 0; f < 0x8; f++ )
{
- class = pci_conf_read16(0, b, d, f, PCI_CLASS_DEVICE);
- if ( class != 0x700 )
+ switch ( pci_conf_read16(0, b, d, f, PCI_CLASS_...
2010 May 14
4
Dovecot 2.0beta5 Quota/Dict
I can not use quota/dict when i configure it come outs this error.
If the configuration gos wrong it just do not happen anything, but if the
filds and name are ok, happen this..
> May 14 17:03:40 auth: Debug: Loading modules from directory:
> /usr/lib/dovecot/auth
> May 14 17:03:40 auth: Debug: master in: USER 1 sfrique at nti.uff.br
> service=lda
> May 14 17:03:40 auth:
2019 Mar 21
0
[PATCH] gpu/nouveau: empty chunk do not have a buffer object associated with them.
.../* FIXME handle pin failure */
WARN_ON(ret);
}
- list_for_each_entry (chunk, &drm->dmem->chunk_empty, list) {
- ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
- /* FIXME handle pin failure */
- WARN_ON(ret);
- }
mutex_unlock(&drm->dmem->mutex);
}
@@ -479,9 +474,6 @@ nouveau_dmem_suspend(struct nouveau_drm *drm)
list_for_each_entry (chunk, &drm->dmem->chunk_full, list) {
nouveau_bo_unpin(chunk->bo);
}
- list_for_each_entry (chunk, &drm->dmem->chunk_empty, list) {
- nouveau_bo_unpin(chunk->bo);
- }
mutex_unlock(&am...
2020 Oct 26
0
Re: unable to migrate: virPortAllocatorSetUsed:299 : internal error: Failed to reserve port 49153
...the first iteration of the "for (i = 0; i < vm->def->ndisks; i++)" loop.
I think this should be the fix:
diff --git i/src/qemu/qemu_migration.c w/src/qemu/qemu_migration.c
index 2f5d61f8e7..6f764b0c73 100644
--- i/src/qemu/qemu_migration.c
+++ w/src/qemu/qemu_migration.c
@@ -479,9 +479,11 @@ qemuMigrationDstStartNBDServer(virQEMUDriverPtr driver,
QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
goto cleanup;
- if (!server_started &&
- qemuMonitorNBDServerStart(priv->mon, &server, tls_alias) < 0)
- goto exit_monitor;
+...
2020 Apr 14
0
[PATCH v2 23/33] iommu/mediatek-v1 Convert to probe/release_device() call-backs
...ev_err(dev, "Can't create IOMMU mapping - DMA-OPS will not work\n");
}
-static void mtk_iommu_remove_device(struct device *dev)
+static void mtk_iommu_release_device(struct device *dev)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct mtk_iommu_data *data;
@@ -479,9 +470,6 @@ static void mtk_iommu_remove_device(struct device *dev)
return;
data = dev_iommu_priv_get(dev);
- iommu_device_unlink(&data->iommu, dev);
-
- iommu_group_remove_device(dev);
iommu_fwspec_free(dev);
}
@@ -534,8 +522,10 @@ static const struct iommu_ops mtk_iommu_ops = {...
2010 Jan 26
1
[PATCH] drm/nouveau: Add module options to disable acceleration.
...aph.init(dev);
+ if (ret)
+ goto out_fb;
- /* PFIFO */
- ret = engine->fifo.init(dev);
- if (ret)
- goto out_graph;
+ /* PFIFO */
+ ret = engine->fifo.init(dev);
+ if (ret)
+ goto out_graph;
+ }
/* this call irq_preinstall, register irq handler and
* call irq_postinstall
@@ -479,9 +483,11 @@ nouveau_card_init(struct drm_device *dev)
out_irq:
drm_irq_uninstall(dev);
out_fifo:
- engine->fifo.takedown(dev);
+ if (!nouveau_noaccel)
+ engine->fifo.takedown(dev);
out_graph:
- engine->graph.takedown(dev);
+ if (!nouveau_noaccel)
+ engine->graph.takedown(dev);...
2014 May 16
2
[PATCH] clk: allow config option to enable reclocking
...au_clock_create_(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass,
struct nouveau_clocks *clocks,
+ bool allow_reclock,
int length, void **object)
{
struct nouveau_device *device = nv_device(parent);
@@ -478,6 +479,9 @@ nouveau_clock_create_(struct nouveau_object *parent,
ret = nouveau_pstate_new(clk, idx++);
} while (ret == 0);
+ clk->allow_reclock =
+ nouveau_boolopt(device->cfgopt, "NvReclock", allow_reclock);
+
mode = nouveau_stropt(device->cfgopt, "NvClkMode", &...
2020 Oct 12
3
unable to migrate: virPortAllocatorSetUsed:299 : internal error: Failed to reserve port 49153
On libvirt 6.8.0 and qemu 5.1.0, when trying to live migrate "error:
internal error: Failed to reserve port" error is received and
migration does not succeed:
virsh # migrate cartridge qemu+tls://ratchet.lan/system --live
--persistent --undefinesource --copy-storage-all --verbose
error: internal error: Failed to reserve port 49153
virsh #
On target host with debug logs, nothing
2013 Jul 22
0
[RFC PATCH] Support running nested in a Mir compositor
...aster: %s\n", strerror(errno));
+ if (!xorgMir) {
+ ret = drmSetMaster(pNv->dev->fd);
+ if (ret)
+ ErrorF("Unable to get master: %s\n", strerror(errno));
+ }
if (XF86_CRTC_CONFIG_PTR(pScrn)->num_crtc && !xf86SetDesiredModes(pScrn))
return FALSE;
@@ -452,6 +479,9 @@ NVLeaveVT(VT_FUNC_ARGS_DECL)
xf86DrvMsg(pScrn->scrnIndex, X_INFO, "NVLeaveVT is called.\n");
+ if (xorgMir)
+ return;
+
ret = drmDropMaster(pNv->dev->fd);
if (ret && errno != EIO && errno != ENODEV)
ErrorF("Error dropping master: %i(%m)\n&q...
2014 May 17
0
[PATCH] clk: allow config option to enable reclocking
...struct nouveau_oclass *oclass,
> struct nouveau_clocks *clocks,
> + bool allow_reclock,
> int length, void **object)
> {
> struct nouveau_device *device = nv_device(parent);
> @@ -478,6 +479,9 @@ nouveau_clock_create_(struct nouveau_object *parent,
> ret = nouveau_pstate_new(clk, idx++);
> } while (ret == 0);
>
> + clk->allow_reclock =
> + nouveau_boolopt(device->cfgopt, "NvReclock",
allow_reclock);
> +
>...
2019 Mar 21
3
Nouveau dmem NULL Pointer deref (SVM)
Hi,
just for your information and maybe for some help: with 5.1rc1 and SVM
enabled i see the following backtrace [1] when the nouveau card (reverse
prime) goes to sleep, for now i have papered over with [2] which leaves
me with userspace hangs. Any pointers where to look for the actual culprit?
PS: Card is: nouveau 0000:01:00.0: NVIDIA GP106 (136000a1)
Greetings,
Tobias
[1]:
BUG: unable
2010 Feb 01
4
[PATCH 1/3] Introduce nouveau_bo_wait for waiting on a BO with a GPU channel
nouveau_bo_wait will make the GPU channel wait for fence if possible,
otherwise falling back to waiting with the CPU using ttm_bo_wait.
The nouveau_fence_sync function currently returns -ENOSYS, and is
the focus of the next patch.
Signed-off-by: Luca Barbieri <luca at luca-barbieri.com>
---
drivers/gpu/drm/nouveau/nouveau_bo.c | 68 ++++++++++++++++++++++++++++++-
2019 Sep 08
7
[PATCH v6 0/5] iommu/amd: Convert the AMD iommu driver to the dma-iommu api
Convert the AMD iommu driver to the dma-iommu api. Remove the iova
handling and reserve region code from the AMD iommu driver.
Change-log:
V6:
-add more details to the description of patch 001-iommu-amd-Remove-unnecessary-locking-from-AMD-iommu-.patch
-rename handle_deferred_device to iommu_dma_deferred_attach
-fix double tabs in 0003-iommu-dma-iommu-Handle-deferred-devices.patch
V5:
-Rebase on
2020 Apr 14
35
[PATCH v2 00/33] iommu: Move iommu_group setup to IOMMU core code
Hi,
here is the second version of this patch-set. The first version with
some more introductory text can be found here:
https://lore.kernel.org/lkml/20200407183742.4344-1-joro at 8bytes.org/
Changes v1->v2:
* Rebased to v5.7-rc1
* Re-wrote the arm-smmu changes as suggested by Robin Murphy
* Re-worked the Exynos patches to hopefully not break the
driver anymore
* Fixed a missing
2020 Apr 14
35
[PATCH v2 00/33] iommu: Move iommu_group setup to IOMMU core code
Hi,
here is the second version of this patch-set. The first version with
some more introductory text can be found here:
https://lore.kernel.org/lkml/20200407183742.4344-1-joro at 8bytes.org/
Changes v1->v2:
* Rebased to v5.7-rc1
* Re-wrote the arm-smmu changes as suggested by Robin Murphy
* Re-worked the Exynos patches to hopefully not break the
driver anymore
* Fixed a missing
2020 Apr 29
35
[PATCH v3 00/34] iommu: Move iommu_group setup to IOMMU core code
Hi,
here is the third version of this patch-set. Older versions can be found
here:
v1: https://lore.kernel.org/lkml/20200407183742.4344-1-joro at 8bytes.org/
(Has some more introductory text)
v2: https://lore.kernel.org/lkml/20200414131542.25608-1-joro at 8bytes.org/
Changes v2 -> v3:
* Rebased v5.7-rc3
* Added a missing iommu_group_put() as reported by Lu Baolu.
* Added a
2020 Apr 07
41
[RFC PATCH 00/34] iommu: Move iommu_group setup to IOMMU core code
Hi,
here is a patch-set to remove all calls of iommu_group_get_for_dev() from
the IOMMU drivers and move the per-device group setup and default domain
allocation into the IOMMU core code.
This eliminates some ugly back and forth between IOMMU core code and the
IOMMU drivers, where the driver called iommu_group_get_for_dev() which itself
called back into the driver.
The patch-set started as a
2020 Apr 07
41
[RFC PATCH 00/34] iommu: Move iommu_group setup to IOMMU core code
Hi,
here is a patch-set to remove all calls of iommu_group_get_for_dev() from
the IOMMU drivers and move the per-device group setup and default domain
allocation into the IOMMU core code.
This eliminates some ugly back and forth between IOMMU core code and the
IOMMU drivers, where the driver called iommu_group_get_for_dev() which itself
called back into the driver.
The patch-set started as a
2019 Jul 17
17
[PATCH V3 00/15] Packed virtqueue support for vhost
Hi all:
This series implements packed virtqueues which were described
at [1]. In this version we try to address the performance regression
saw by V2. The root cause is packed virtqueue need more times of
userspace memory accesssing which turns out to be very
expensive. Thanks to the help of 7f466032dc9e ("vhost: access vq
metadata through kernel virtual address"), such overhead cold be
2019 Jul 17
17
[PATCH V3 00/15] Packed virtqueue support for vhost
Hi all:
This series implements packed virtqueues which were described
at [1]. In this version we try to address the performance regression
saw by V2. The root cause is packed virtqueue need more times of
userspace memory accesssing which turns out to be very
expensive. Thanks to the help of 7f466032dc9e ("vhost: access vq
metadata through kernel virtual address"), such overhead cold be