Displaying 20 results from an estimated 33 matches for "node_idx".
Did you mean:
node_id
2010 Jun 16
2
[PATCH] ocfs2/dlm: check dlm_state under spinlock
...goto unlock_respond;
+ goto unlock_domain_respond;
/*
* There is a small window where the joining node may not see the
@@ -811,7 +811,7 @@ static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data,
"have node %u in its nodemap\n",
query->node_idx, nodenum);
packet.code = JOIN_DISALLOW;
- goto unlock_respond;
+ goto unlock_domain_respond;
}
}
nodenum++;
@@ -821,9 +821,9 @@ static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data,
* to be put in someone's domain map.
* Also, explicitly disall...
2009 Feb 03
5
[PATCH 1/4] ocfs2/dlm: Retract fix for race between purge and migrate
Mainline commit d4f7e650e55af6b235871126f747da88600e8040 attempts to delay
the dlm_thread from sending the drop ref message if the lockres is being
migrated. The problem is that we make the dlm_thread wait for the migration
to complete. This causes a deadlock as dlm_thread also participates in the
lockres migration process.
A better fix for the original oss bugzilla#1012 is in testing.
2010 Apr 14
2
[PATCH 1/2] ocfs2/dlm: Make o2dlm domain join/leave messages KERN_NOTICE
..._NOTICE "o2dlm: Nodes in domain %s: ", dlm->name);
while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES,
node + 1)) < O2NM_MAX_NODES) {
@@ -534,7 +534,7 @@ static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data,
node = exit_msg->node_idx;
- printk(KERN_INFO "ocfs2_dlm: Node %u leaves domain %s\n", node, dlm->name);
+ printk(KERN_NOTICE "o2dlm: Node %u leaves domain %s\n", node, dlm->name);
spin_lock(&dlm->spinlock);
clear_bit(node, dlm->domain_map);
@@ -904,7 +904,7 @@ static int dlm_asse...
2012 Nov 02
1
[PATCH] ocfs2:fix memory leak in dlm_add_migration_mle
...msg, u32 len, void *data,
u32 flags;
int master_request = 0, have_lockres_ref = 0;
int ret = 0;
+ int bit;
if (!dlm_grab(dlm))
return 0;
@@ -1770,7 +1772,11 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
"MLE for it! (%.*s)\n", assert->node_idx,
namelen, name);
} else {
- int bit = find_next_bit (mle->maybe_map, O2NM_MAX_NODES, 0);
+ spin_lock(&mle->spinlock);
+ mle->assert_master = 1;
+ spin_unlock(&mle->spinlock);
+
+ bit = find_next_bit (mle->maybe_map, O2NM_MAX_NODES, 0);
if (bit >= O2NM_MAX...
2010 Aug 26
1
[PATCH 2/5] ocfs2/dlm: add lockres as parameter to dlm_new_lock()
...struct dlm_lock_resource *res)
{
struct dlm_lock *lock;
int kernel_allocated = 0;
@@ -502,22 +503,6 @@ int dlm_create_lock_handler(struct o2net_msg *msg, u32 len, void *data,
goto leave;
}
- status = DLM_SYSERR;
- newlock = dlm_new_lock(create->requested_type,
- create->node_idx,
- be64_to_cpu(create->cookie), NULL);
- if (!newlock) {
- dlm_error(status);
- goto leave;
- }
-
- lksb = newlock->lksb;
-
- if (be32_to_cpu(create->flags) & LKM_GET_LVB) {
- lksb->flags |= DLM_LKSB_GET_LVB;
- mlog(0, "set DLM_LKSB_GET_LVB flag\n");
- }
-
s...
2014 Feb 26
1
[PATCH v5 1/8] qspinlock: Introducing a 4-byte queue spinlock implementation
On Wed, Feb 26, 2014 at 10:14:21AM -0500, Waiman Long wrote:
> +struct qnode {
> + u32 wait; /* Waiting flag */
> + struct qnode *next; /* Next queue node addr */
> +};
> +
> +struct qnode_set {
> + struct qnode nodes[MAX_QNODES];
> + int node_idx; /* Current node to use */
> +};
> +
> +/*
> + * Per-CPU queue node structures
> + */
> +static DEFINE_PER_CPU_ALIGNED(struct qnode_set, qnset) = { {{0}}, 0 };
So I've not yet wrapped my head around any of this; and I see a later
patch adds some paravirt gunk to this, but it...
2014 Feb 26
1
[PATCH v5 1/8] qspinlock: Introducing a 4-byte queue spinlock implementation
On Wed, Feb 26, 2014 at 10:14:21AM -0500, Waiman Long wrote:
> +static void put_qnode(void)
> +{
> + struct qnode_set *qset = this_cpu_ptr(&qnset);
> +
> + qset->node_idx--;
> +}
That very much wants to be: this_cpu_dec().
2014 Feb 26
1
[PATCH v5 1/8] qspinlock: Introducing a 4-byte queue spinlock implementation
On Wed, Feb 26, 2014 at 10:14:21AM -0500, Waiman Long wrote:
> +struct qnode {
> + u32 wait; /* Waiting flag */
> + struct qnode *next; /* Next queue node addr */
> +};
> +
> +struct qnode_set {
> + struct qnode nodes[MAX_QNODES];
> + int node_idx; /* Current node to use */
> +};
> +
> +/*
> + * Per-CPU queue node structures
> + */
> +static DEFINE_PER_CPU_ALIGNED(struct qnode_set, qnset) = { {{0}}, 0 };
So I've not yet wrapped my head around any of this; and I see a later
patch adds some paravirt gunk to this, but it...
2014 Feb 26
1
[PATCH v5 1/8] qspinlock: Introducing a 4-byte queue spinlock implementation
On Wed, Feb 26, 2014 at 10:14:21AM -0500, Waiman Long wrote:
> +static void put_qnode(void)
> +{
> + struct qnode_set *qset = this_cpu_ptr(&qnset);
> +
> + qset->node_idx--;
> +}
That very much wants to be: this_cpu_dec().
2014 Sep 11
1
May be deadlock for wrong locking order, patch request reviewed, thanks
...pin_lock(&res->spinlock);
found = dlm_find_mle(dlm, &tmpmle, name, namelen);
if (!found) {
mlog(ML_ERROR, "no mle found for this lock!\n");
*************** way_up_top:
*** 1551,1558 ****
set_bit(request->node_idx, tmpmle->maybe_map);
spin_unlock(&tmpmle->spinlock);
- spin_unlock(&dlm->master_lock);
spin_unlock(&res->spinlock);
+ spin_unlock(&dlm->master_lock);
/* keep the mle attached to heartbeat events */...
2014 Sep 11
1
May be deadlock for wrong locking order, patch request reviewed, thanks
...pin_lock(&res->spinlock);
found = dlm_find_mle(dlm, &tmpmle, name, namelen);
if (!found) {
mlog(ML_ERROR, "no mle found for this lock!\n");
*************** way_up_top:
*** 1551,1558 ****
set_bit(request->node_idx, tmpmle->maybe_map);
spin_unlock(&tmpmle->spinlock);
- spin_unlock(&dlm->master_lock);
spin_unlock(&res->spinlock);
+ spin_unlock(&dlm->master_lock);
/* keep the mle attached to heartbeat events */...
2014 Feb 26
0
[PATCH v5 1/8] qspinlock: Introducing a 4-byte queue spinlock implementation
...nlock structure
+ * in mcs_spinlock.h file. This structure is retained for future extension
+ * where new fields may be added.
+ */
+struct qnode {
+ u32 wait; /* Waiting flag */
+ struct qnode *next; /* Next queue node addr */
+};
+
+struct qnode_set {
+ struct qnode nodes[MAX_QNODES];
+ int node_idx; /* Current node to use */
+};
+
+/*
+ * Per-CPU queue node structures
+ */
+static DEFINE_PER_CPU_ALIGNED(struct qnode_set, qnset) = { {{0}}, 0 };
+
+/*
+ ************************************************************************
+ * The following optimized codes are for architectures that support:...
2014 Feb 27
0
[PATCH v5 1/8] qspinlock: Introducing a 4-byte queue spinlock implementation
...nlock structure
+ * in mcs_spinlock.h file. This structure is retained for future extension
+ * where new fields may be added.
+ */
+struct qnode {
+ u32 wait; /* Waiting flag */
+ struct qnode *next; /* Next queue node addr */
+};
+
+struct qnode_set {
+ struct qnode nodes[MAX_QNODES];
+ int node_idx; /* Current node to use */
+};
+
+/*
+ * Per-CPU queue node structures
+ */
+static DEFINE_PER_CPU_ALIGNED(struct qnode_set, qnset) = { {{0}}, 0 };
+
+/*
+ ************************************************************************
+ * The following optimized codes are for architectures that support:...
2009 Feb 03
10
Convert mle list to a hash
These patches convert the mle list to a hash. The same patches apply on
ocfs2 1.4 too.
Currently, we use the same number of hash pages for mles and lockres'.
This will be addressed in a future patch that will make both of them
configurable.
Sunil
2007 May 17
1
[PATCH] ocfs: use list_for_each_entry where benefical
...struct dlm_ctxt *dlm = data;
struct dlm_reco_data_done *done = (struct dlm_reco_data_done *)msg->buf;
- struct list_head *iter;
struct dlm_reco_node_data *ndata = NULL;
int ret = -EINVAL;
@@ -1000,8 +989,7 @@ int dlm_reco_data_done_handler(struct o2
dlm->reco.dead_node, done->node_idx, dlm->node_num);
spin_lock(&dlm_reco_state_lock);
- list_for_each(iter, &dlm->reco.node_data) {
- ndata = list_entry (iter, struct dlm_reco_node_data, list);
+ list_for_each_entry(ndata, &dlm->reco.node_data, list) {
if (ndata->node_num != done->node_idx)
cont...
2009 Apr 17
26
OCFS2 1.4: Patches backported from mainline
Please review the list of patches being applied to the ocfs2 1.4 tree.
All patches list the mainline commit hash.
Thanks
Sunil
2014 Apr 02
0
[PATCH v8 01/10] qspinlock: A generic 4-byte queue spinlock implementation
...mcs_spinlock structure
+ * in mcs_spinlock.h file. It is retained for future extension where new
+ * fields may be added.
+ */
+struct qnode {
+ u32 qhead; /* Queue head flag */
+ struct qnode *next; /* Next queue node addr */
+};
+
+struct qnode_set {
+ struct qnode nodes[MAX_QNODES];
+ int node_idx; /* Current node to use */
+};
+
+/*
+ * Per-CPU queue node structures
+ */
+static DEFINE_PER_CPU_ALIGNED(struct qnode_set, qnset) = { { { 0 } }, 0 };
+
+/*
+ ************************************************************************
+ * Inline functions used by the queue_spin_lock_slowpath() functi...
2009 Feb 26
13
o2dlm mle hash patches - round 2
The changes from the last drop are:
1. Patch 11 removes struct dlm_lock_name.
2. Patch 12 is an unrelated bugfix. Actually is related to a bugfix
that we are retracting in mainline currently. The patch may need more testing.
While I did hit the condition in my testing, Marcos hasn't. I am sending it
because it can be queued for 2.6.30. Give us more time to test.
3. Patch 13 will be useful
2014 Feb 26
22
[PATCH v5 0/8] qspinlock: a 4-byte queue spinlock with PV support
v4->v5:
- Move the optimized 2-task contending code to the generic file to
enable more architectures to use it without code duplication.
- Address some of the style-related comments by PeterZ.
- Allow the use of unfair queue spinlock in a real para-virtualized
execution environment.
- Add para-virtualization support to the qspinlock code by ensuring
that the lock holder and queue
2014 Feb 26
22
[PATCH v5 0/8] qspinlock: a 4-byte queue spinlock with PV support
v4->v5:
- Move the optimized 2-task contending code to the generic file to
enable more architectures to use it without code duplication.
- Address some of the style-related comments by PeterZ.
- Allow the use of unfair queue spinlock in a real para-virtualized
execution environment.
- Add para-virtualization support to the qspinlock code by ensuring
that the lock holder and queue