Displaying 20 results from an estimated 22 matches for "ctxs".
Did you mean:
ctx
2017 Feb 21
3
What is the proper usage of LLVMContext?
....)
}
Q2) What situation do I need to create multiple `LLVMContext`s?
I don't know the situation used multiple `LLVMContext`s.
For example,
{
{
llvm::LLVMContext ctx1;
// some code
}
{
llvm::LLVMContext ctx2;
// some code
}
{
llvm::LLVMContext ctx3;
// some code
}
}
or
{
llvm::LLVMContext ctxs[] = {....}
// some code
}
I'd like to know the appropriate usage of LLVMContext. Thak you very much
for reading.
Sincerely,
Ryo
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.llvm.org/pipermail/llvm-dev/attachments/20170221/19ca2a2c/attachmen...
2017 Nov 21
2
4.14: WARNING: CPU: 4 PID: 2895 at block/blk-mq.c:1144 with virtio-blk (also 4.12 stable)
...;
> static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
> {
> - cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
> - &hctx->cpuhp_dead);
> + cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_PREPARE, &hctx->cpuhp);
> }
>
> /* hctx->ctxs will be freed in queue's release handler */
> @@ -2039,7 +2050,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
> hctx->queue = q;
> hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
>
> - cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->...
2017 Nov 21
2
4.14: WARNING: CPU: 4 PID: 2895 at block/blk-mq.c:1144 with virtio-blk (also 4.12 stable)
...;
> static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
> {
> - cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
> - &hctx->cpuhp_dead);
> + cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_PREPARE, &hctx->cpuhp);
> }
>
> /* hctx->ctxs will be freed in queue's release handler */
> @@ -2039,7 +2050,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
> hctx->queue = q;
> hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
>
> - cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->...
2017 Nov 21
0
4.14: WARNING: CPU: 4 PID: 2895 at block/blk-mq.c:1144 with virtio-blk (also 4.12 stable)
..._queue_reinit(struct request_queue *q)
* we should change hctx numa_node according to the new topology (this
* involves freeing and re-allocating memory, worth doing?)
*/
- blk_mq_map_swqueue(q);
+ blk_mq_map_swqueue(q, online_mask);
blk_mq_sysfs_register(q);
blk_mq_debugfs_register_hctxs(q);
}
+/*
+ * New online cpumask which is going to be set in this hotplug event.
+ * Declare this cpumasks as global as cpu-hotplug operation is invoked
+ * one-by-one and dynamically allocating this could result in a failure.
+ */
+static struct cpumask cpuhp_online_new;
+
+static void blk_mq_q...
2017 Nov 21
2
4.14: WARNING: CPU: 4 PID: 2895 at block/blk-mq.c:1144 with virtio-blk (also 4.12 stable)
On 11/21/2017 11:12 AM, Christian Borntraeger wrote:
>
>
> On 11/21/2017 07:09 PM, Jens Axboe wrote:
>> On 11/21/2017 10:27 AM, Jens Axboe wrote:
>>> On 11/21/2017 03:14 AM, Christian Borntraeger wrote:
>>>> Bisect points to
>>>>
>>>> 1b5a7455d345b223d3a4658a9e5fce985b7998c1 is the first bad commit
>>>> commit
2017 Nov 21
2
4.14: WARNING: CPU: 4 PID: 2895 at block/blk-mq.c:1144 with virtio-blk (also 4.12 stable)
On 11/21/2017 11:12 AM, Christian Borntraeger wrote:
>
>
> On 11/21/2017 07:09 PM, Jens Axboe wrote:
>> On 11/21/2017 10:27 AM, Jens Axboe wrote:
>>> On 11/21/2017 03:14 AM, Christian Borntraeger wrote:
>>>> Bisect points to
>>>>
>>>> 1b5a7455d345b223d3a4658a9e5fce985b7998c1 is the first bad commit
>>>> commit
2017 Nov 21
0
4.14: WARNING: CPU: 4 PID: 2895 at block/blk-mq.c:1144 with virtio-blk (also 4.12 stable)
...igned int cpu, struct hlist_node *node)
static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
{
- cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
- &hctx->cpuhp_dead);
+ cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_PREPARE, &hctx->cpuhp);
}
/* hctx->ctxs will be freed in queue's release handler */
@@ -2039,7 +2050,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
hctx->queue = q;
hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
- cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
+ cpuhp_st...
2019 Jun 13
4
[PATCH 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...ve_flush_tlb_others skipping
+ * This should be rare, with native_flush_tlb_multi skipping
* IPIs to lazy TLB mode CPUs.
*/
switch_mm_irqs_off(NULL, &init_mm, NULL);
@@ -635,9 +635,12 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen);
}
-static void flush_tlb_func_local(const void *info, enum tlb_flush_reason reason)
+static void flush_tlb_func_local(void *info)
{
const struct flush_tlb_info *f = info;
+ enum tlb_flush_reason reason;
+
+ reason = (f->mm == NULL) ? TLB_LOCAL_SHOOTDOW...
2019 Jun 13
4
[PATCH 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...ve_flush_tlb_others skipping
+ * This should be rare, with native_flush_tlb_multi skipping
* IPIs to lazy TLB mode CPUs.
*/
switch_mm_irqs_off(NULL, &init_mm, NULL);
@@ -635,9 +635,12 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen);
}
-static void flush_tlb_func_local(const void *info, enum tlb_flush_reason reason)
+static void flush_tlb_func_local(void *info)
{
const struct flush_tlb_info *f = info;
+ enum tlb_flush_reason reason;
+
+ reason = (f->mm == NULL) ? TLB_LOCAL_SHOOTDOW...
2017 Nov 21
2
4.14: WARNING: CPU: 4 PID: 2895 at block/blk-mq.c:1144 with virtio-blk (also 4.12 stable)
...> * we should change hctx numa_node according to the new topology (this
> * involves freeing and re-allocating memory, worth doing?)
> */
> - blk_mq_map_swqueue(q);
> + blk_mq_map_swqueue(q, online_mask);
>
> blk_mq_sysfs_register(q);
> blk_mq_debugfs_register_hctxs(q);
> }
>
> +/*
> + * New online cpumask which is going to be set in this hotplug event.
> + * Declare this cpumasks as global as cpu-hotplug operation is invoked
> + * one-by-one and dynamically allocating this could result in a failure.
> + */
> +static struct cpumask cp...
2017 Nov 21
2
4.14: WARNING: CPU: 4 PID: 2895 at block/blk-mq.c:1144 with virtio-blk (also 4.12 stable)
...> * we should change hctx numa_node according to the new topology (this
> * involves freeing and re-allocating memory, worth doing?)
> */
> - blk_mq_map_swqueue(q);
> + blk_mq_map_swqueue(q, online_mask);
>
> blk_mq_sysfs_register(q);
> blk_mq_debugfs_register_hctxs(q);
> }
>
> +/*
> + * New online cpumask which is going to be set in this hotplug event.
> + * Declare this cpumasks as global as cpu-hotplug operation is invoked
> + * one-by-one and dynamically allocating this could result in a failure.
> + */
> +static struct cpumask cp...
2009 Feb 09
9
[Bug 20023] New: nv20: unwanted solid fills during busyloop rendering
http://bugs.freedesktop.org/show_bug.cgi?id=20023
Summary: nv20: unwanted solid fills during busyloop rendering
Product: Mesa
Version: CVS
Platform: x86-64 (AMD64)
OS/Version: Linux (All)
Status: ASSIGNED
Severity: minor
Priority: medium
Component: Drivers/DRI/nouveau
AssignedTo: pq at iki.fi
2019 Jun 25
0
[PATCH 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
.../
Nit, since we're messing with this, it can now be
"native_flush_tlb_multi()" since it is a function.
> switch_mm_irqs_off(NULL, &init_mm, NULL);
> @@ -635,9 +635,12 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
> this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen);
> }
>
> -static void flush_tlb_func_local(const void *info, enum tlb_flush_reason reason)
> +static void flush_tlb_func_local(void *info)
> {
> const struct flush_tlb_info *f = info;
> + enum tlb_flush_reason reason;
> +
> + rea...
2019 Jun 26
2
[PATCH 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...ssing with this, it can now be
> "native_flush_tlb_multi()" since it is a function.
Sure.
>
>> switch_mm_irqs_off(NULL, &init_mm, NULL);
>> @@ -635,9 +635,12 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
>> this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen);
>> }
>>
>> -static void flush_tlb_func_local(const void *info, enum tlb_flush_reason reason)
>> +static void flush_tlb_func_local(void *info)
>> {
>> const struct flush_tlb_info *f = info;
>> + enum tlb_flush_reason r...
2019 Jun 26
2
[PATCH 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...ssing with this, it can now be
> "native_flush_tlb_multi()" since it is a function.
Sure.
>
>> switch_mm_irqs_off(NULL, &init_mm, NULL);
>> @@ -635,9 +635,12 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
>> this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen);
>> }
>>
>> -static void flush_tlb_func_local(const void *info, enum tlb_flush_reason reason)
>> +static void flush_tlb_func_local(void *info)
>> {
>> const struct flush_tlb_info *f = info;
>> + enum tlb_flush_reason r...
2019 May 31
2
[RFC PATCH v2 04/12] x86/mm/tlb: Flush remote and local TLBs concurrently
...ve_flush_tlb_others skipping
+ * This should be rare, with native_flush_tlb_multi skipping
* IPIs to lazy TLB mode CPUs.
*/
switch_mm_irqs_off(NULL, &init_mm, NULL);
@@ -634,9 +634,12 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen);
}
-static void flush_tlb_func_local(const void *info, enum tlb_flush_reason reason)
+static void flush_tlb_func_local(void *info)
{
const struct flush_tlb_info *f = info;
+ enum tlb_flush_reason reason;
+
+ reason = (f->mm == NULL) ? TLB_LOCAL_SHOOTDOW...
2019 May 31
2
[RFC PATCH v2 04/12] x86/mm/tlb: Flush remote and local TLBs concurrently
...ve_flush_tlb_others skipping
+ * This should be rare, with native_flush_tlb_multi skipping
* IPIs to lazy TLB mode CPUs.
*/
switch_mm_irqs_off(NULL, &init_mm, NULL);
@@ -634,9 +634,12 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen);
}
-static void flush_tlb_func_local(const void *info, enum tlb_flush_reason reason)
+static void flush_tlb_func_local(void *info)
{
const struct flush_tlb_info *f = info;
+ enum tlb_flush_reason reason;
+
+ reason = (f->mm == NULL) ? TLB_LOCAL_SHOOTDOW...
2019 Jul 02
0
[PATCH v2 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...e_flush_tlb_others skipping
+ * This should be rare, with native_flush_tlb_multi() skipping
* IPIs to lazy TLB mode CPUs.
*/
switch_mm_irqs_off(NULL, &init_mm, NULL);
@@ -635,7 +635,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen);
}
-static void flush_tlb_func_local(void *info)
+static void __flush_tlb_func_local(void *info)
{
const struct flush_tlb_info *f = info;
enum tlb_flush_reason reason;
@@ -645,6 +645,11 @@ static void flush_tlb_func_local(void *info)
flush_tlb_func_com...
2019 May 25
3
[RFC PATCH 5/6] x86/mm/tlb: Flush remote and local TLBs concurrently
...ve_flush_tlb_others skipping
+ * This should be rare, with native_flush_tlb_multi skipping
* IPIs to lazy TLB mode CPUs.
*/
switch_mm_irqs_off(NULL, &init_mm, NULL);
@@ -634,9 +634,12 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen);
}
-static void flush_tlb_func_local(const void *info, enum tlb_flush_reason reason)
+static void flush_tlb_func_local(void *info)
{
const struct flush_tlb_info *f = info;
+ enum tlb_flush_reason reason;
+
+ reason = (f->mm == NULL) ? TLB_LOCAL_SHOOTDOW...
2019 May 25
3
[RFC PATCH 5/6] x86/mm/tlb: Flush remote and local TLBs concurrently
...ve_flush_tlb_others skipping
+ * This should be rare, with native_flush_tlb_multi skipping
* IPIs to lazy TLB mode CPUs.
*/
switch_mm_irqs_off(NULL, &init_mm, NULL);
@@ -634,9 +634,12 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen);
}
-static void flush_tlb_func_local(const void *info, enum tlb_flush_reason reason)
+static void flush_tlb_func_local(void *info)
{
const struct flush_tlb_info *f = info;
+ enum tlb_flush_reason reason;
+
+ reason = (f->mm == NULL) ? TLB_LOCAL_SHOOTDOW...