search for: usleep_rang

Displaying 20 results from an estimated 62 matches for "usleep_rang".

Did you mean: usleep_range
2014 Mar 24
2
[PATCH 06/12] drm/nouveau/ibus: add GK20A support
...;subdev/ibus.h> > + > +struct nvea_ibus_priv { > + struct nouveau_ibus base; > +}; > + > +static void > +nvea_ibus_init_priv_ring(struct nvea_ibus_priv *priv) > +{ > + nv_mask(priv, 0x137250, 0x3f, 0); > + > + nv_mask(priv, 0x000200, 0x20, 0); > + udelay(20); usleep_range()? > +static void > +nvea_ibus_intr(struct nouveau_subdev *subdev) > +{ [...] > + /* Acknowledge interrupt */ > + nv_mask(priv, 0x12004c, 0x2, 0x2); > + > + while (--retry >= 0) { > + command = nv_rd32(priv, 0x12004c) & 0x3f; > + if (command == 0) > + brea...
2019 Sep 06
2
[PATCH 08/18] virtiofs: Drain all pending requests during ->remove time
..._ON(fsvq->in_flight < 0); > + > + /* Wait for in flight requests to finish.*/ > + while (1) { > + spin_lock(&fsvq->lock); > + if (!fsvq->in_flight) { > + spin_unlock(&fsvq->lock); > + break; > + } > + spin_unlock(&fsvq->lock); > + usleep_range(1000, 2000); > + } I think all contexts that call this allow sleeping so we could avoid usleep here. -------------- next part -------------- A non-text attachment was scrubbed... Name: signature.asc Type: application/pgp-signature Size: 488 bytes Desc: not available URL: <http://lists.linux...
2019 Sep 06
2
[PATCH 08/18] virtiofs: Drain all pending requests during ->remove time
..._ON(fsvq->in_flight < 0); > + > + /* Wait for in flight requests to finish.*/ > + while (1) { > + spin_lock(&fsvq->lock); > + if (!fsvq->in_flight) { > + spin_unlock(&fsvq->lock); > + break; > + } > + spin_unlock(&fsvq->lock); > + usleep_range(1000, 2000); > + } I think all contexts that call this allow sleeping so we could avoid usleep here. -------------- next part -------------- A non-text attachment was scrubbed... Name: signature.asc Type: application/pgp-signature Size: 488 bytes Desc: not available URL: <http://lists.linux...
2014 Apr 02
1
[PATCH 06/12] drm/nouveau/ibus: add GK20A support
...+}; >>> + >>> +static void >>> +nvea_ibus_init_priv_ring(struct nvea_ibus_priv *priv) >>> +{ >>> + nv_mask(priv, 0x137250, 0x3f, 0); >>> + >>> + nv_mask(priv, 0x000200, 0x20, 0); >>> + udelay(20); >> >> usleep_range()? > > Sure. > >> >>> +static void >>> +nvea_ibus_intr(struct nouveau_subdev *subdev) >>> +{ >> [...] >>> + /* Acknowledge interrupt */ >>> + nv_mask(priv, 0x12004c, 0x2, 0x2); >>> + >>> + while (--retr...
2019 Sep 06
1
[PATCH 08/18] virtiofs: Drain all pending requests during ->remove time
...finish.*/ > > > + while (1) { > > > + spin_lock(&fsvq->lock); > > > + if (!fsvq->in_flight) { > > > + spin_unlock(&fsvq->lock); > > > + break; > > > + } > > > + spin_unlock(&fsvq->lock); > > > + usleep_range(1000, 2000); > > > + } > > > > I think all contexts that call this allow sleeping so we could avoid > > usleep here. > > usleep_range() is supposed to be used from non-atomic context. > > https://github.com/torvalds/linux/blob/master/Documentation/timers/t...
2014 Apr 02
0
[PATCH 06/12] drm/nouveau/ibus: add GK20A support
...+ struct nouveau_ibus base; >> +}; >> + >> +static void >> +nvea_ibus_init_priv_ring(struct nvea_ibus_priv *priv) >> +{ >> + nv_mask(priv, 0x137250, 0x3f, 0); >> + >> + nv_mask(priv, 0x000200, 0x20, 0); >> + udelay(20); > > usleep_range()? Sure. > >> +static void >> +nvea_ibus_intr(struct nouveau_subdev *subdev) >> +{ > [...] >> + /* Acknowledge interrupt */ >> + nv_mask(priv, 0x12004c, 0x2, 0x2); >> + >> + while (--retry >= 0) { >> + command = nv_...
2016 Jul 06
0
[PATCH] ibus/gk20a: use udelay() in interrupt context
gk20a_ibus_init_ibus_ring() can be called from gk20a_ibus_intr(), in non-interruptible context. Replace use of usleep_range() with udelay(). Reported-by: Thierry Reding <treding at nvidia.com> Signed-off-by: Alexandre Courbot <acourbot at nvidia.com> --- drm/nouveau/nvkm/subdev/ibus/gk20a.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drm/nouveau/nvkm/subdev/ibus/gk20a.c b/drm/nouve...
2019 Sep 05
0
[PATCH 08/18] virtiofs: Drain all pending requests during ->remove time
...s_drain_queue(struct virtio_fs_vq *fsvq) +{ + WARN_ON(fsvq->in_flight < 0); + + /* Wait for in flight requests to finish.*/ + while (1) { + spin_lock(&fsvq->lock); + if (!fsvq->in_flight) { + spin_unlock(&fsvq->lock); + break; + } + spin_unlock(&fsvq->lock); + usleep_range(1000, 2000); + } + + flush_work(&fsvq->done_work); + flush_delayed_work(&fsvq->dispatch_work); +} + +static inline void drain_hiprio_queued_reqs(struct virtio_fs_vq *fsvq) +{ + struct virtio_fs_forget *forget; + + spin_lock(&fsvq->lock); + while (1) { + forget = list_first_en...
2019 Sep 06
0
[PATCH 08/18] virtiofs: Drain all pending requests during ->remove time
...gt; + /* Wait for in flight requests to finish.*/ > > + while (1) { > > + spin_lock(&fsvq->lock); > > + if (!fsvq->in_flight) { > > + spin_unlock(&fsvq->lock); > > + break; > > + } > > + spin_unlock(&fsvq->lock); > > + usleep_range(1000, 2000); > > + } > > I think all contexts that call this allow sleeping so we could avoid > usleep here. usleep_range() is supposed to be used from non-atomic context. https://github.com/torvalds/linux/blob/master/Documentation/timers/timers-howto.rst What construct you are...
2020 Sep 01
3
[PATCH v3] drm/nouveau/kms/nv50-: Program notifier offset before requesting disp caps
...F(NV_DISP_CORE_NOTIFIER_1, CAPABILITIES_1, DONE, FALSE)); + + core507d_read_caps(disp, NV50_DISP_CAPS_NTFY1); + + time = nvif_msec(core->chan.base.device, 2000ULL, + if (NVBO_TD32(bo, NV50_DISP_CAPS_NTFY1, + NV_DISP_CORE_NOTIFIER_1, CAPABILITIES_1, DONE, ==, TRUE)) + break; + usleep_range(1, 2); + ); + if (time < 0) + NV_ERROR(drm, "core caps notifier timeout\n"); + + return 0; +} + int core507d_init(struct nv50_core *core) { diff --git a/drivers/gpu/drm/nouveau/dispnv50/core907d.c b/drivers/gpu/drm/nouveau/dispnv50/core907d.c index b17c03529c784..8a2005adb0e2f...
2015 Jan 06
2
[PATCH 3/11] memory: tegra: add flush operation for Tegra124 memory clients
...|= BIT(hr_client->bit); > + else > + val &= ~BIT(hr_client->bit); > + mc_writel(mc, val, hr_client->ctrl); > + mc_readl(mc, hr_client->ctrl); > + > + /* poll till the flush is done */ > + if (enable) { > + do { > + udelay(10); This should probably be usleep_range(10, 20) or something. Would it be difficult to implement this for Tegra30 and Tegra114? Thierry -------------- next part -------------- A non-text attachment was scrubbed... Name: not available Type: application/pgp-signature Size: 819 bytes Desc: not available URL: <http://lists.freedesktop....
2020 Sep 04
3
[PATCH v5 1/2] drm/nouveau/kms/nv50-: Program notifier offset before requesting disp caps
...OTIFIER_1, CAPABILITIES_1, DONE, FALSE)); + + ret = core507d_read_caps(disp); + if (ret < 0) + return ret; + + time = nvif_msec(core->chan.base.device, 2000ULL, + if (NVBO_TD32(bo, NV50_DISP_CORE_NTFY, + NV_DISP_CORE_NOTIFIER_1, CAPABILITIES_1, DONE, ==, TRUE)) + break; + usleep_range(1, 2); + ); + if (time < 0) + NV_ERROR(drm, "core caps notifier timeout\n"); + + return 0; +} + int core507d_init(struct nv50_core *core) { diff --git a/drivers/gpu/drm/nouveau/dispnv50/core907d.c b/drivers/gpu/drm/nouveau/dispnv50/core907d.c index b17c03529c784..8564d4dffaff0...
2020 Sep 01
0
[PATCH v4] drm/nouveau/kms/nv50-: Program notifier offset before requesting disp caps
..._1, DONE, FALSE)); + + ret = core507d_read_caps(disp, NV50_DISP_CAPS_NTFY1); + if (ret < 0) + return ret; + + time = nvif_msec(core->chan.base.device, 2000ULL, + if (NVBO_TD32(bo, NV50_DISP_CAPS_NTFY1, + NV_DISP_CORE_NOTIFIER_1, CAPABILITIES_1, DONE, ==, TRUE)) + break; + usleep_range(1, 2); + ); + if (time < 0) + NV_ERROR(drm, "core caps notifier timeout\n"); + + return 0; +} + int core507d_init(struct nv50_core *core) { diff --git a/drivers/gpu/drm/nouveau/dispnv50/core907d.c b/drivers/gpu/drm/nouveau/dispnv50/core907d.c index b17c03529c784..45505a18aca17...
2015 Jan 06
1
[PATCH 3/11] memory: tegra: add flush operation for Tegra124 memory clients
...gt; > + mc_writel(mc, val, hr_client->ctrl); > > > + mc_readl(mc, hr_client->ctrl); > > > + > > > + /* poll till the flush is done */ > > > + if (enable) { > > > + do { > > > + udelay(10); > > > > This should probably be usleep_range(10, 20) or something. > Maybe no. We might need some spin lock here to ensure only one flushing > operation requested and no race could happen. We should use a mutex, then. There's no saying how long this will take and busy-looping indefinitely is a bad idea. Though it seems to me like...
2014 Jul 09
0
[PATCH 10/17] drm/qxl: rework to new fence interface
...0; count < 11; count++) { + if (!qxl_queue_garbage_collect(qdev, true)) + break; + + if (fence_is_signaled_locked(fence)) + goto signaled; + } + + if (fence_is_signaled_locked(fence)) + goto signaled; + + if (have_drawable_releases || sc < 4) { + if (sc > 2) + /* back off */ + usleep_range(500, 1000); + + if (time_after(jiffies, end)) + return 0; + + if (have_drawable_releases && sc > 300) { + FENCE_WARN(fence, "failed to wait on release %d " + "after spincount %d\n", + fence->context & ~0xf0000000, sc); + goto signaled; + }...
2019 Oct 30
6
[PATCH 0/3] virtiofs: Small Cleanups for 5.5
Hi Miklos, Here are few small cleanups for virtiofs for 5.5. I had received some comments from Michael Tsirkin on original virtiofs patches and these cleanups are result of these comments. Thanks Vivek Vivek Goyal (3): virtiofs: Use a common function to send forget virtiofs: Do not send forget request "struct list_head" element virtiofs: Use completions while waiting for queue
2020 Apr 21
0
[PATCH 1/1] drm/qxl: add mutex_lock/mutex_unlock to ensure the order in which resources are released.
...the qxl_release_fence_buffer_objects() + qxl_push_{cursor,command}_ring_release() calls to close that race window. Can you try that and see if it fixes the bug for you? > if (flush) > - flush_work(&qdev->gc_work); > + //can't flush work, it may lead to deadlock > + usleep_range(500, 1000); > + The commit message doesn't explain this chunk. take care, Gerd
2018 Feb 14
1
[vhost:vhost 22/23] drivers/firmware/qemu_fw_cfg.c:130:36: sparse: incorrect type in initializer (different base types)
...e is sync today, but spec says it may become async */ 105 static void fw_cfg_wait_for_control(struct fw_cfg_dma *d) 106 { 107 do { > 108 u32 ctrl = be32_to_cpu(READ_ONCE(d->control)); 109 110 if ((ctrl & ~FW_CFG_DMA_CTL_ERROR) == 0) 111 return; 112 113 usleep_range(50, 100); 114 } while (true); 115 } 116 117 static ssize_t fw_cfg_dma_transfer(void *address, u32 length, u32 control) 118 { 119 phys_addr_t dma; 120 struct fw_cfg_dma *d = NULL; 121 ssize_t ret = length; 122 123 d = kmalloc(sizeof(*d), GFP_KERNEL); 124 if...
2019 Oct 30
0
[PATCH 3/3] virtiofs: Use completions while waiting for queue to be drained
...uld not be any + * waiters waiting for completion. + */ + reinit_completion(&fsvq->in_flight_zero); + spin_unlock(&fsvq->lock); + wait_for_completion(&fsvq->in_flight_zero); + } else { spin_unlock(&fsvq->lock); - /* TODO use completion instead of timeout */ - usleep_range(1000, 2000); } flush_work(&fsvq->done_work); flush_delayed_work(&fsvq->dispatch_work); } -static void virtio_fs_drain_all_queues(struct virtio_fs *fs) +static void virtio_fs_drain_all_queues_locked(struct virtio_fs *fs) { struct virtio_fs_vq *fsvq; int i; @@ -141,6 +1...
2012 Dec 12
1
kernel BUG at fs/btrfs/extent_io.c:4052 (kernel 3.5.3)
...s_end_transaction+0x9a/0x350 [btrfs] Dec 11 17:49:04 SANOS1 kernel: [<ffffffffa023d880>] btrfs_end_transaction+0x10/0x20 [btrfs] Dec 11 17:49:04 SANOS1 kernel: [<ffffffffa02433c5>] btrfs_finish_ordered_io+0x175/0x400 [btrfs] Dec 11 17:49:04 SANOS1 kernel: [<ffffffff8104ebd0>] ? usleep_range+0x40/0x40 Dec 11 17:49:04 SANOS1 kernel: [<ffffffffa0243660>] finish_ordered_fn+0x10/0x20 [btrfs] Dec 11 17:49:04 SANOS1 kernel: [<ffffffffa026cd97>] worker_loop+0x157/0x550 [btrfs] Dec 11 17:49:04 SANOS1 kernel: [<ffffffffa026cc40>] ? btrfs_queue_worker+0x310/0x310 [btrfs] De...